text
stringlengths 56
7.94M
|
---|
\begin{document}
\begin{center}
\Large {\bfseries
Uniqueness of W*-tensor Products}
\\
\end{center}
\begin{flushright}
\large \bfseries
Corneliu Constantinescu
\end{flushright}
\begin{abstract}
In contrast to C*-algebras, distinct C*-norms on the algebraic tensor product of two W*-algebras produce isomorphic W*-tensor products.
\end{abstract}
AMS classification code: 46L06, 46L10
Keywords: W*-tensor products
We use the notation and terminology of [C]. For W*-tensor products of W*-algebras we use [T].
In the sequel we give a list of some notation used in this paper.
\renewcommand{\alph{enumi})}{\arabic{enumi}.}
\begin{enumerate}
\item $\ensuremath{\mathrm{I\! K}}$ denotes the field of real or the field of complex numbers. The whole theory is developed in parallel for the real and complex case (but the proofs coincide).
\item If $f$ is a map defined on a set $X$ and $Y$ is a subset of $X$ then $f|Y$ denotes the restriction of $f$ to $Y$.
\item If $E,F$ are vector spaces in duality then $E_F$ denotes the vector space $E$ endowed with the locally convex topology of pointwise convergence on $F$, i.e. with the weak topology $\sigma (E,F)$.
\item If $E$ is a Banach space then $E'$ denotes its dual, $E''$ its bi-dual, and $E^{\#}$ its unit ball :
$$ E^{\#}:= \me{x\in E}{\| x\| \leq 1}.$$
We put for every $x\in E$,
$$\ma{j_Ex}{E'}{\ensuremath{\mathrm{I\! K}}}{x'}{\sa{x}{x'}}$$
and call the map $\mb{j_E}{E}{E''}$ the evaluation map of $E$.
If $F$ is a vector subspace of $E$ then we set
$$F^0:=\me{x'\in E'}{x'|F=0}. $$
If $F$ is a vector subspace of $E'$ then we define
$$^0F:=\me{x\in E}{x'\in F\Longrightarrow \sa{x}{x'}=0}.$$
\item Let $E,F$ be Banach spaces and $\mb{\varphi }{E}{F}$ is a continuous linear map. $\varphi $ is called an isometry if it preserves the norms and if it is surjective. We put $Im\,\varphi :=\varphi (E)=\me{\varphi x}{x\in E}$ and denote by
$$\ma{\varphi '}{F'}{E'}{y'}{y'\circ \varphi }$$
the transpose of $\varphi $.
\item If $E$ is a C*-algebra then we denote by $Pr\,E$ the set of orthogonal projections of $E$. If in addition $E$ is unital then we denote by $1_E$ its unit. If $F$ is a subset of $E$ then we put
$$F^c:=\me{x\in E}{y\in F\Longrightarrow xy=yx}.$$
We put for all $(x,x')\in E\times E'$
$$\ma{xx'}{E}{\ensuremath{\mathrm{I\! K}}}{y}{\sa{yx}{x'}},$$
$$\ma{x'x}{E}{\ensuremath{\mathrm{I\! K}}}{y}{\sa{xy}{x'}};$$
then $xx',x'x\in E'$.
\item If $E$ is a W*-algebra then $\ddot{E} $ denotes its predual.
\item $\odot$ denotes the algebraic tensor product. If $E,F$ are C*-algebras and if $\alpha $ is a C*-norm on $E\odot F$ then $E\otimes _\alpha F$ denotes the C*-algebra obtained by the compltion of $E\odot F$ with respect to $\alpha $.
\end{enumerate}
\renewcommand{\alph{enumi})}{\alph{enumi})}
\begin{p}\label{8085}
Let $E$ be a W*-algebra and $F$ a closed vector subspace of $\ddot{E} $ such that $xF\subset F$ and $Fx\subset F$ for all $x\in E$.
\begin{enumerate}
\item There is a $p\in Pr\, E^c$ such that $F=p\ddot{E} $ and $F^0=(1_E-p)E$.
\item For every $x\in pE$ put
$$\ma{\tilde{x} }{F}{\ensuremath{\mathrm{I\! K}}}{a}{\sa{x}{a}}.$$
Then $\tilde{x}\in F' $ for all $x\in pE$ and the map
$$\map{pE}{F'}{x}{\tilde{x} }$$
is an isometry of Banach spaces.
\end{enumerate}
\end{p}
a) follows from [T] Theorem III.2.7 c).
b) For $a\in \ddot{E} $,
$$\sa{x}{a}=\sa{px}{a}=\sa{x}{ap}=\sa{\tilde{x} }{ap},$$
so $\tilde{x}\in F' $ and $\n{x}=\n{\tilde{x} }$. Let $a'\in F'$ and put
$$\ma{y}{\ddot{E} }{\ensuremath{\mathrm{I\! K}}}{a}{\sa{a'}{pa}}.$$
Then $y\in E$ and for $a\in \ddot{E} $,
$$\sa{py}{a}=\sa{y}{ap}=\sa{y}{a},$$
so $y=py\in pE$ and $\tilde{y}=a' $, i.e. the map is surjective.\qed
\begin{p}\label{8086}
Let $E$ be a C*-algebra and $F$ a closed vector subspace of $E'$ such that $xF\subset F$ and $Fx\subset F$ for all $x\in E$.
\begin{enumerate}
\item There is a $p\in Pr\,(E'')^c$ such that $F=pE'$ and $F^0=(1_{E''}-p)E''$.
\item The map
$$\map{pE''}{F'}{x''}{x''|F}$$
is an isometry of Banach spaces.
\end{enumerate}
\end{p}
By [C] Corollary 1.3.6.5, $Im\,j_E$ is dense in $E''_{E'}$ so $x''F\subset F$ and $Fx''\subset F$ for all $x''\in E''$. By [C] Theorem 6.3.2.1 b), $E''$ is a W*-algebra and the assertions follow from Proposition \ref{8085}.\qed
\begin{de}\label{8088}
Let $E,F$ be W*-algebras. We define the (bilinear) maps
$$\map{(E\odot F)\times (\ddot{E}\odot \ddot{F} )}{\ddot{E}\odot \ddot{F} }{(x\otimes y,a\otimes b)}{(x\otimes y)(a\otimes b):=(xa)\otimes (yb)},$$
$$\map{(\ddot{E}\odot \ddot{F} )\times (E\odot F)}{\ddot{E}\odot \ddot{F} }{(a\otimes b,x\otimes y)}{(a\otimes b)(x\otimes y):=(ax)\otimes (by)}.$$
and similarly the (bilinear) maps
$$(E\odot F)\times (E'\odot F')\longrightarrow E'\odot F',$$
$$(E'\odot F')\times (E\odot F)\longrightarrow E'\odot F'.$$
\end{de}
\begin{p}\label{8087}
Let $E,F$ be W*-algebras, $\alpha $ a C*-norm on $E\odot F$, and
$$G:=\ddot{E}\bar{\otimes }_\alpha \ddot{F}$$
the closure of $Im\,j_{\ddot{E} }\odot Im\,j_{\ddot{F} }$ in $(E\otimes _\alpha F)'$.
\begin{enumerate}
\item There is a $p\in Pr\,((E\otimes _\alpha F)'')^c$ such that $G=p(E\otimes _\alpha F)'$ and such that the map
$$\map{p(E\otimes _\alpha F)''}{G'}{x''}{x''|G}$$
is an isometry of Banach spaces.
\item If $\mb{j}{E\otimes _\alpha F}{(E\otimes _\alpha F)''}$ denotes the evaluation map then $Im\,j$ is a C*-subalgebra of $p(E\otimes _\alpha F)''$ generating it as a W*-algebra.
\end{enumerate}
\end{p}
a) For $(u,v),(x,y)\in E\times F$ and $(a,b)\in \ddot{E}\times \ddot{F} $,
$$\sa{u\otimes v}{(x\otimes y)((j_{\ddot{E} }a)\otimes (j_{\ddot{F}}b))}=\sa{(u\otimes v)(x\otimes y)}{(j_{\ddot{E}}a)\otimes (j_{\ddot{F} }b)}=$$
$$=\sa{(ux)\otimes (vy)}{(j_{\ddot{E}}a)\otimes (j_{\ddot{F} }b)}=\sa{ux}{j_{\ddot{E} }a}\sa{vy}{j_{\ddot{F} }b}=$$
$$=\sa{ux}{a}\sa{vy}{b}=\sa{u}{xa}\sa{v}{yb}
=\sa{u}{j_{\ddot{E} }(xa)}\sa{v}{j_{\ddot{F} }(yb)}=$$
$$=\sa{u\otimes v}{j_{\ddot{E}}(xa)\otimes j_{\ddot{F} }(yb)},$$
so
$$(x\otimes y)((j_{\ddot{E}}a)\otimes (j_{\ddot{F} }b))=j_{\ddot{E}}(xa)\otimes j_{\ddot{F} }(yb)\in G.$$
It follows $(x\otimes y)G\subset G$ and $zG\subset G$ for all $z\in E\otimes _\alpha F$. Similarly $Gz\subset G$ for all $z\in E\otimes _\alpha F$. By Proposition \ref{8086}, there is a $p\in Pr\,((E\otimes _\alpha F)'')^c$ such that $G=p(E\otimes _\alpha F)'$ and such that the map
$$\map{p(E\otimes _\alpha F)''}{G'}{x''}{x''|G}$$
is an isometry of Banach spaces.
b Let $(x,y)\in E\times F$ and $\varepsilon >0$. There are $a\in E^{\#}$ and $b\in F^{\#}$ such that
$$\n{x}\n{y}-\varepsilon <\sa{x}{a}\sa{y}{b}=\sa{x}{j_{\ddot{E} }a}\sa{y}{j_{\ddot{F} }b}=$$
$$=\sa{x\otimes y}{(j_{\ddot{E} }a)\otimes (j_{\ddot{F} }b)}=\sa{j(x\otimes y)}{(j_{\ddot{E} }a)\otimes (j_{\ddot{F} }b)}\leq \n{j(x\otimes y)|G}.$$
Since $\varepsilon $ is arbitrary,
$$\n{j(x\otimes y)|G}=\n{x}\n{y},$$
so by a), $j(x\otimes y)\in p(E\otimes _\alpha F)''$. It follows $j(E\odot F)\subset p(E\otimes _\alpha F)''$ and $j(E\otimes _\alpha F)\subset p(E\otimes _\alpha F)''$. Let
$$z\in (Im\,j_{\ddot{E}}\odot Im\,j_{\ddot{F} })\cap \,^0(j(E\otimes _\alpha F)).$$
Then $z|(E\otimes _\alpha F)=0$ and so $z=0$. Thus
$$G\cap \,^0(j(E\otimes _\alpha F))=\{0\},$$
so by a),
$$^0(j(E\otimes _\alpha F))\subset (1_{(E\otimes _\alpha F)''}-p)(E\otimes _\alpha F)',$$
$$p(E\otimes _\alpha F)''\subset (\,^0(j(E\otimes _\alpha F)))^0,$$
$$p(E\otimes _\alpha F)''= (\,^0(j(E\otimes _\alpha F)))^0.$$
Hence $p(E\otimes _\alpha F)''$ is the closure of $j(E\otimes _\alpha F)$ in $(E\otimes _\alpha F)''_{(E\otimes_ \alpha F)'}$ ([C] Proposition 1.3.5.4). By [C] Corollary 4.4.4.12 a), $p(E\otimes _\alpha F)''$ is the W*-subalgebra of $(E\otimes _\alpha F)''$ generated by $j(E\otimes _\alpha F)$.\qed
\begin{de}\label{8090}
We put \emph{(with the notation of Proposition \ref{8087})}
$$E\bar{\otimes }_\alpha F:=p(E\otimes _\alpha F)'' .$$
It is a W*-algebra with $\ddot{E}\bar{\otimes }_\alpha \ddot{F}$ as predual.
\end{de}
\begin{theo}\label{8091}
If $E,F$ are W*-algebras and $\alpha ,\beta $ are C*-norms on $E\odot F$ then $E\bar{\otimes }_\alpha F$ and $E\bar{\otimes }_\beta F$ are isomorphic.
\end{theo}
We may assume $\alpha \leq \beta $. By [W] Proposition T.6.24, there is a surjective C*-homomorphism
$$\mb{\varphi }{E\otimes _\beta F}{E\otimes _\alpha F}.$$
Then
$$\mb{\varphi '}{(E\otimes _\alpha F)'}{(E\otimes _\beta F)'}$$
preserves the norms ([C] Proposition 1.3.5.2). Thus $\ddot{E}\bar{\otimes }_\alpha \ddot{F}=\ddot{E}\bar{\otimes }_\beta \ddot{F}$ and $\varphi ''$ is an isometry of Banach spaces. By [C] Corollary 6.3.2.3, $\varphi ''$ is a W*-isomorphism.\qed
\begin{center}
{\bfseries REFERENCES}
\end{center}
\begin{flushleft}
[C] Constantinescu, Corneliu, C*-algebras. Elsevir, 2001. \newline
[T] Takesaki, Masamichi, Theory of Operator Algebra I. Springer, 2002. \newline
[W] Wegge-Olsen, N. E., K-theory and C*-algebras. Oxford University Press, 1993. \newline
\end{flushleft}
\begin{flushright}
{\scriptsize Theorem space{-5mm} Corneliu Constantinescu$\quad$\\
Bodenacherstr. 53$\qquad\;$\\
CH 8121 Benglen$\qquad\;\;$\\
e-mail: [email protected] }
\end{flushright}
\end{document} |
\begin{document}
\title[Dihedral symmetry of periodic chain]{Dihedral symmetry
of periodic chain: \\ quantization and coherent states}
\author{P Luft, G Chadzitaskos and J Tolar}
\address{Department of Physics\\
Faculty of Nuclear Sciences and Physical Engineering \\
Czech Technical University \\ B\v rehov\'a 7, CZ - 115 19 Prague,
Czech Republic}
\ead{[email protected]}
\begin{abstract}
Our previous work on quantum kinematics and coherent states over
finite configuration spaces is extended: the configuration space is,
as before, the cyclic group $\mathbf{Z_{n}}$ of arbitrary order
$n=2,3,\ldots$, but a larger group --- the non-Abelian dihedral
group $\mathbf{D_{n}}$ --- is taken as its symmetry group. The
corresponding group related coherent states are constructed and
their overcompleteness proved. Our approach based on geometric
symmetry can be used as a kinematic framework for matrix methods in
quantum chemistry of ring molecules.
\end{abstract}
\pacs{03.65.Fd, 31.15.-p, 31.15.Hz}
\submitto{J. Phys. A: Math. Theor.}
\noindent Keywords: dihedral group, periodic chain, Mackey
quantization, finite-dimensional Hilbert space, coherent states
\section{Introduction}
The mathematical arena for ordinary quantum mechanics is, due to
Heisenberg's commutation relations, the infinite-dimensional Hilbert
space. A useful model for quantum mechanics in a Hilbert space of
finite dimension $n$ is due to H. Weyl \cite{Weyl}. Its geometric
interpretation as the simplest quantum kinematic on a finite
discrete configuration space formed by a periodic chain of $n$
points, was elaborated by J.~Schwinger \cite{Schwinger}. In
\cite{Tolar, StovTolar} we proposed a group theoretical formulation
of this quantum model in terms of Mackey's quantization
\cite{Mackey, HDDTolar}. It is based on Mackey's system of
imprimitivity which represents a group theoretical generalization of
Heisenberg's commutation relations.
The geometrical picture behind the group theoretical approach is the
following \cite{HDDStovTolar}: one has a discrete or continuous
configuration space together with a geometrical symmetry group
acting transitively on it, i.e. the configuration space is a
homogeneous space of the group. In particular, Weyl's model is based
on configuration space $\mathbf{Z_{n}}$ (where $\mathbf{Z_{n}}$ is
the cyclic group of order $n=2,3,\ldots $) with symmetry
$\mathbf{Z_{n}}$ acting on the periodic chain $\mathbf{Z_{n}}$ by
discrete translations. In this paper our formulation of Weyl's model
is generalized by extending the Abelian symmetry group
$\mathbf{Z_{n}}$ of the periodic chain to the dihedral group
$\mathbf{D_{n}}$ --- the non-Abelian symmetry group of a regular
$n$-sided polygon.
Coherent states belong to the most important tools in many
applications of quantum physics. They found numerous applications in
quantum optics, quantum field theory, condensed matter physics,
atomic physics etc. There are various definitions and approaches to
the coherent states dependent on author and application. Our main
reference is \cite{Perelomov}, where the systems of coherent states
related to Lie groups are described. The basic feature of such
systems is that they are overcomplete. As shown for instance in
\cite{TCh}, Perelomov's method can be equally well applied to
discrete groups. Starting with irreducible systems of imprimitivity
we shall construct irreducible sets of generalized Weyl operators,
whose action on properly chosen vacuum states will produce the
resulting families of coherent states.
In section 2 after recalling Mackey's Imprimitivity Theorem for
finite groups \cite{Coleman} the construction of systems of
imprimitivity is described. Then necessary notations for the
dihedral groups are introduced in section 3. Section 4 is devoted to
the construction of the two irreducible systems of imprimitivity for
$\mathbf{D_{n}}$ based on $\mathbf{Z_{n}}$, each consisting of a
projection--valued measure and an induced unitary representation.
From them, the corresponding quantum position and momentum
observables are constructed in section 5. This is the starting point
for construction of the set of generalized Weyl operators and
generalized coherent states in section 6. We apply the method of
paper \cite{TCh}, where quantization on $\mathbf{Z}_{n}$ with
Abelian symmetry group $\mathbf{Z}_{n}$ and the corresponding
coherent states were investigated. Concluding section 7 contains
remarks concerning the replacement of the Abelian cyclic symmetry
group $\mathbf{Z}_{n}$ by the non-Abelian dihedral group
$\mathbf{D}_{n}$ as the group of motions of the configuration space
$\mathbf{Z}_{n}$. The interesting feature of our construction is the
fact that, even if the group property of the set of Weyl operators
is lost, the families of coherent states still possess the required
overcompleteness property.
\section{Systems of imprimitivity for finite groups}
We consider the case when the configuration space $\mathbf{M}$ and
its symmetry group $\mathbf{G}$ are finite. Our configuration space
will be a finite set $\mathbf{M}= \{m_{1}, m_{2},...,m_{n}\}$,
$n=|\mathbf{M}|$. Let $\mathbf{G}$ be a finite group acting
transitively on $\mathbf{M}$, and let $\mathbf{H}$ be the stability
subgroup. Let $\mathbf{L}$ be an irreducible unitary representation
of subgroup $\mathbf{H}$ on Hilbert space
$\mathcal{H}^{\mathbf{L}}$.
System of imprimitivity is a pair $(\mathbf{V},\mathbf{E})$, where
$\mathbf{E}$ is a projection-valued measure on configuration space
$\mathbf{G}/\mathbf{H}$ and $\mathbf{V}$ is a unitary representation
of the symmetry group $\mathbf{G}$ such that
\begin{equation}
\mathbf{V}(g)\mathbf{E}(S)\mathbf{V}(g)^{-1}=\mathbf{E}(g.S) \quad
\text{for all} \quad g\in\mathbf{G}, S \subset
\mathbf{G}/\mathbf{H}.
\end{equation}
In a finite-dimensional Hilbert space $\mathcal{H}= \mathbb{C}^{n}$
the standard projection-valued measure is given by finite sums of
diagonal matrices
\begin{equation}
\mathbf{E}(m_{i}) := \text{diag}(0,0,...,1,...,0), \; i =1,2,...,n.
\end{equation}
The Imprimitivity Theorem for finite groups has the following form
\cite{Coleman}: \\
{\bf Theorem :} {\it A unitary representation
$\mathbf{V}$ of a finite group $\mathbf{G}$ in Hilbert space
$\mathcal{H}$ belongs to the imprimitivity system
$(\mathbf{V},\mathbf{E})$ with standard projection-valued measure
based on $\mathbf{G}/\mathbf{H}$, if and only if $\mathbf{V}$ is
equivalent to an induced representation
$Ind_{\mathbf{H}}^{\mathbf{G}}(\mathbf{L})$ for some unitary
representation $\mathbf{L}$ of subgroup $\mathbf{H}$. The system of
imprimitivity is irreducible, if and only if $\mathbf{L}$ is
irreducible.}
Thus a unitary representation $\mathbf{V}$ for a system of
imprimitivity is constructed directly as an induced representation.
Let $\mathbf{G}$ be a finite group of order $r$, $\mathbf{H}$ its
subgroup of order $s$. Suppose that $\mathbf{L}$ is a representation
of the subgroup $\mathbf{H}$. Let us decompose the group
$\mathbf{G}$ into left cosets
\begin{equation}\label{cosets}
\mathbf{G}=\{\bigcup_{j=1}^{r/s}t_{j}\cdot\mathbf{H}
\; \vert \; t_{j} \in \mathbf{G}, \; t_{1}=e \}.
\end{equation}
Group elements $t_{j}$ are arbitrarily chosen representatives of
left cosets. If the dimension of the representation $\mathbf{L}$ is
$l$, then the induced representation $\mathbf{V}$ of $\mathbf{G}$ is
given by
\begin{eqnarray}
(\mathbf{V}(g))_{ij}& = & \mathbf{L}(h) \quad \text{ if } \quad
t^{-1}_{i}\cdot g \cdot t_{j} = h \quad \text{for} \; \mathrm{some}
\;
h \in \mathbf{H}, \\
& = & 0 \quad \text{ otherwise };
\end{eqnarray}
here $(\mathbf{V}(g))_{ij}$ are $l \times l$ matrices which serve as
building blocks for
\begin{equation}
\mathbf{V}(g) = Ind_{\mathbf{H}}^{\mathbf{G}}(\mathbf{L})
\end{equation}
and the subscript $ij$ denotes the position of the block in
$\mathbf{V}(g)$.
\section{Structure of dihedral groups}
The dihedral group $\mathbf{D_{n}}$, where $n=2,3,\ldots$, is a
non-Abelian finite group of order $2n$ with the structure of a
semidirect product of two cyclic groups:
\begin{equation}\label{semi}
\mathbf{D_{n}} = \mathbf{Z_{n}} \triangleright \mathbf{Z_{2}}.
\end{equation}
It arises as the symmetry group of a regular polygon and is
generated by discrete rotations and reflections. The elements of the
subgroups $\mathbf{Z_{2}}$ and $\mathbf{Z_{n}}$ will be denoted
\begin{equation}\label{ZN}
\mathbf{Z_{2}} = \{+1,-1\}; \quad \mathbf{Z_{n}} =
\{e=r_{0},r_{1},...,r_{n-1}\}.
\end{equation}
Group operation in $\mathbf{Z_{2}}$ is multiplication,
in $\mathbf{Z_{n}}$ $r_{i}\cdot r_{j}=r_{i+j \pmod{n}}$.
The multiplication law of the semidirect product \eref{semi} is
determined by a fixed homomorphism $f$ from $\mathbf{Z_{2}}$ to the
group of all automorphisms of the group $\mathbf{Z_{n}}$, $
f:\mathbf{Z_{2}} \rightarrow Aut(\mathbf{Z_{n}})$:
\begin{equation}\label{multlaw}
(r_{i},x)\cdot (r_{j},y) = (r_{i}\cdot f(x)(r_{j}),x\cdot y), \;
x,y \in \mathbf{Z_{2}}, \; r_{i},r_{j} \in \mathbf{Z_{n}}.
\end{equation}
Under this multiplication law, $\mathbf{Z_{n}}$ is a normal
subgroup. Specifically for $\mathbf{D_{n}}$, the mapping $f$ is
simply
\begin{equation}\label{}
f:+1 \mapsto Id, \qquad f:-1 \mapsto Inv,
\end{equation}
where $Id$ is the identical mapping on $\mathbf{Z_{n}}$, $Inv$ is an
automorphism of $\mathbf{Z_{n}}$ which maps an element of
$\mathbf{Z_{n}}$ into its inverse:
\begin{equation}\label{}
Inv: r_{k}\mapsto r_{k}^{-1}=r_{-k \pmod{n}}, \qquad r_i \in \mathbf{Z_{n}}.
\end{equation}
We shall need the explicit form of the multiplication law:
\begin{equation}\label{nasa}
(r_{i},+1)\cdot (r_{j},x) = (r_{i}\cdot r_{j},x) =
(r_{i+j \pmod{n}},x),
\end{equation}
\begin{equation}\label{nasb}
(r_{i},-1)\cdot (r_{j},x) = (r_{i}\cdot r_{j}^{-1},-x) =
(r_{i-j \pmod{n}},-x).
\end{equation}
Thus the elements of $\mathbf{D_{n}}$ can be divided in two
disjoint subsets:
\begin{enumerate}
\item The subset $ \{(r_{k},+1), \, k = 0,1,...,n-1 \}$ forms the subgroup
isomorphic to $\mathbf{Z_{n}}$ and the elements $(r_{k},+1)$ have
the geometrical meaning of integral multiples of a clockwise
rotation of an $n$-sided regular polygon through an angle $2\pi /n$.
\item The subset $ \{(r_{k},-1), \, k = 0,1,...,n-1 \}$ consists of
mirror symmetries with respect to axes in the $n$--sided polygon: if
$n$ is odd, then all axes of mirror symmetries pass through vertices
of the $n$--sided polygon; if $n$ is even, then only one half of
mirror symmetries have axes passing through opposite vertices, the
remaining axes are symmetry axes of two opposite sides of the
polygon.
\end{enumerate}
Summarizing, the group $\mathbf{D_{n}}$ consists of $n$ rotation
symmetries $\mathbf{R}_{k} = (r_{k},+1)$ and $n$ mirror symmetries
$\mathbf{M}_{k} = (r_{k},-1)$ obeying the following multiplication
rules (with $i,j = 0,1,...,n-1$):
\begin{equation}\label{nas1}
\mathbf{R}_{i}\cdot\mathbf{R}_{j} = \mathbf{R}_{i+j \pmod{n}},
\qquad
\mathbf{R}_{i}\cdot\mathbf{M}_{j} = \mathbf{M}_{i+j \pmod{n}},
\end{equation}
\begin{equation}\label{nas2}
\mathbf{M}_{i}\cdot\mathbf{R}_{j} = \mathbf{M}_{i-j \pmod{n}},
\qquad
\mathbf{M}_{i}\cdot\mathbf{M}_{j} = \mathbf{R}_{i-j \pmod{n}}.
\end{equation}
\section{Quantization on $\mathbf{Z_{n}}$ with $\mathbf{D_{n}}$ as
a symmetry group}
The configuration space $\mathbf{Z_{n}}$ will be identified with the
set of vertices of a regular $n$--sided polygon. We have seen that
$\mathbf{D_{n}}$ acts on $\mathbf{Z_{n}}$ transitively as a group of
discrete rotations and mirror symmetries. The stability subgroup
$\mathbf{H_{n}}$ of $\mathbf{D_{n}}$ is $\mathbf{Z_{2}}$ for all
$n$, hence we can write $ \mathbf{Z_{n}} \cong
\mathbf{D_{n}}/\mathbf{Z_{2}}$.
The stability subgroup $\mathbf{Z_{2}}$ is independent of the order
of symmetry group $\mathbf{D_{n}}$ and it has exactly two
inequivalent irreducible unitary representations, the trivial
representation
\begin{equation}\label{}
\mathbf{T_{1}}:\mathbf{Z_{2}}\rightarrow \mathbb{C}:\pm 1 \mapsto 1,
\end{equation}
and the alternating representation
\begin{equation}\label{}
\mathbf{T_{2}}:\mathbf{Z_{2}}\rightarrow \mathbb{C}:+1 \mapsto +1,
\quad -1 \mapsto -1.
\end{equation}
Now the inequivalent quantum kinematics on the configuration space
$\mathbf{Z_{n}}$ are determined by inequivalent systems of
imprimitivity on $\mathbf{Z_{n}}$ with the symmetry group
$\mathbf{D_{n}}$. We require irreducibility of systems of
imprimitivity in order that the corresponding kinematical
observables act irreducibly in the Hilbert space. There will be
exactly two inequivalent irreducible systems of imprimitivity
$(\mathbf{V_1},\mathbf{E_1})$ and $(\mathbf{V_2},\mathbf{E_2})$ with
representations induced from irreducible unitary representations
$\mathbf{T_{1}}$ and $\mathbf{T_{2}}$.
In both cases the Hilbert space $\mathcal H$ of quantum mechanics is the
space of complex functions on the configuration space
$\mathbf{Z_{n}}$ and it is isomorphic to $n$--dimensional complex
vector space $\mathbb{C}^{n}$ with standard inner product
\begin{equation}\label{}
<z_1,z_2>=\sum_{i=0}^{n-1}\bar{z}_{1i}z_{2i}.
\end{equation}
The standard projection-valued measure $\mathbf{E}$ is common to
both systems of imprimiti\-vi\-ty $(\mathbf{V_1},\mathbf{E})$ and
$(\mathbf{V_2},\mathbf{E})$. It is diagonal and generated by sums of
one-dimensional orthogonal projectors on $\mathbb{C}^{n}$ of the
form
\begin{equation}\label{proj1}
\mathbf{E}(r_{i}) = {}_{i}\left(
\begin{array}{ccccc}
{} & {} & {}^{i} & {} & {} \, {} \\
{} & {} & \cdot & {} & {} \, {} \\
\cdot & \cdot & 1 & \cdot & \cdot \, {} \\
{} & {} & \cdot & {} & {} \, {}\\
{} & {} & \cdot & {} & {} \, {}
\end{array}\right), \quad i=0,1,...,n-1;
\end{equation}
Measure of an empty set in $\mathbf{Z_{n}}$ is the vanishing operator
on $\mathbb{C}^{n}$, measure of the whole configuration space is the
unit operator.
In order to obtain the two irreducible systems of imprimitivity, we
shall construct the representations induced from $\mathbf{T_{1}}$
and $\mathbf{T_{2}}$ on $\mathbb{C}^{n}$,
\begin{equation}\label{}
\mathbf{V_{1}} =
Ind_{\mathbf{Z_{2}}}^{\mathbf{D_{n}}}(\mathbf{T_{1}}), \qquad
\mathbf{V_{2}} =
Ind_{\mathbf{Z_{2}}}^{\mathbf{D_{n}}}(\mathbf{T_{2}}).
\end{equation}
According to \eref{cosets} the symmetry group $\mathbf{D_{n}}$ is
decomposed into left cosets,
\begin{equation}\label{}
\mathbf{D_{n}}=
\{\bigcup_{m=0}^{n-1}t_{m}\cdot\mathbf{Z_{2}}|t_{m}\in \mathbf{D_{n}}, \; t_{0} =
e.\}
\end{equation}
In our case we have $\mathbf{Z_{2}} = \{\mathbf{R}_{0},\,
\mathbf{M}_{0}\}$; with the choice of coset representatives $t_{m} =
\mathbf{R}_{m}, \; m=0,1,...,n-1$, we obtain the decomposition
\begin{equation}\label{}
\mathbf{D_{n}} = \{ \{\mathbf{R}_{0}, \mathbf{M}_{0}\} \cup \,
\{\mathbf{R}_{1}, \mathbf{M}_{1}\} \cup
\,... \cup \{\mathbf{R}_{n-1}, \mathbf{M}_{n-1}\} \}.
\end{equation}
Matrices of induced representations are then constructed in block
form: dimensions of both representations $\mathbf{V_{1}}$ and
$\mathbf{V_{2}}$ are equal to $n$,
\begin{equation}\label{}
\text{dim}(\mathbf{V_{l}}) =
\frac{|\mathbf{D_{n}}|}{\mathbf{|Z_{2}|}}\cdot
\text{dim}(\mathbf{T_{l}})=\frac{2n}{2}\cdot 1 = n, \quad l=1,2,
\end{equation}
and matrix elements ($1\times 1$--blocks) have the following form:
\begin{eqnarray}\label{ind1}
\mathbf{V_{l}}(g)_{ij}&=&
\mathbf{T_{l}}(h) \quad \text{if}\quad
t^{-1}_{i}\cdot g \cdot t_{j} = h \quad
\text{for some}\quad h \in \mathbf{Z_{2}}, \cr
&=& 0 \quad \text{otherwise}.
\end{eqnarray}
In our case $t_{i} = \mathbf{R}_{i}$, so the matrix element
$(\mathbf{V_{i}}(g))_{ij}$ does not vanish if and only if
\begin{equation}\label{ind2}
\mathbf{R}_{-i \pmod{n}}\cdot g \cdot \mathbf{R}_{j} \in
\{\mathbf{R}_{0}, \, \mathbf{M}_{0}\}.
\end{equation}
To construct the induced representation $\mathbf{V_{1}}$ --- first
for the subgroup of discrete rotations $g=\mathbf{R}_{k}$ ---
condition \eref{ind2}
\begin{equation}\label{}
\mathbf{R}_{-i \pmod{n}}\cdot \mathbf{R}_{k} \cdot \mathbf{R}_{j}
= \mathbf{R}_{-i+j+k \pmod{n}}\in \{\mathbf{R}_{0}, \, \mathbf{M}_{0}\}
\end{equation}
is equivalent to $i=j+k {\pmod{n}}$, hence matrix elements
\eref{ind1} of discrete rotations are
\begin{equation}\label{v1op}
\ (\mathbf{V_{1}}(\mathbf{R}_{k}))_{ij}=\delta_{i,j+k \pmod{n}}.
\end{equation}
So the entire matrix is
\begin{equation}\label{v1rk}
\mathbf{V_{1}}(\mathbf{R}_{k})=_{k}
\left(\begin{array}{cccccccccc}
& & & & ^{k} & 1 & & & & \\
& & & & & & 1 & & & \\
& & & & & & & \cdot & & \\
& & & & & & & & \cdot & \\
& & & & & & & & & 1 \\
1 & & & & & & & & & \\
& 1 & & & & & & & & \\
& & \cdot & & & & & & & \\
& & & \cdot & & & & & & \\
& & & & 1 & & & & & \
\end{array}\right).
\end{equation}
For the representation $\mathbf{V_{1}}$ of mirror symmetries
$g=\mathbf{M}_{k}$ condition (\ref{ind2}) acquires the form
\begin{equation}\label{}
\mathbf{R}_{-i \pmod{n}}\cdot \mathbf{M}_{k} \cdot
\mathbf{R}_{j} = \mathbf{M}_{-i-j+k \pmod{n}}
\in \{\mathbf{R}_{0}, \, \mathbf{M}_{0}\} \Leftrightarrow i = k-j
\end{equation}
due to (\ref{nas1}) - (\ref{nas2}), so the matrix elements
(\ref{ind1}) of mirror symmetries are
\begin{equation}\label{}
(\mathbf{V_{1}}(\mathbf{M}_{k}))_{ij} = \delta_{i,k-j \pmod{n}}.
\end{equation}
The matrix $\mathbf{V_{1}}(\mathbf{M}_{k})$ has the explicit form
\begin{equation}\label{v1mk}
\mathbf{V_{1}}(\mathbf{M}_{k}) =
\left(\begin{array}{ccccccccc}
& & & & 1 & & & & \\
& & & \cdot & & & & & \\
_{k} & & 1 & & & & & & \\
& \cdot & & & & & & & \\
1 & & & & & & & & \\
& & & & & & & & 1 \\
& & & & & & & \cdot & \\
& & & & & & \cdot & & \\
& & & & & 1 & & & \
\end{array}\right)
\end{equation}
The second representation $\mathbf{V_{2}}$ is obtained similarly via
(\ref{ind1}) as the representation induced from $\mathbf{T_{2}}$
with the result
\begin{equation}\label{jd}
\mathbf{V_{2}}(\mathbf{R}_{k}) = \mathbf{V_{1}}(\mathbf{R}_{k}),
\quad \mathbf{V_{2}}(\mathbf{M}_{k}) =
-\mathbf{V_{1}}(\mathbf{M}_{k}).
\end{equation}
The representations $\mathbf{V_{1}}$ and $\mathbf{V_{2}}$ are
unitary, reducible and inequivalent; as could be expected,
the two systems of imprimitivity
differ only on reflections in $\mathbf{D_{n}}$.
\section{Quantum observables}
The basic quantum observables --- position and momentum operators
--- defining quantum kinematics on a configuration space have
natural definition if a system of imprimitivity is given.
Classical position observable is a Borel mapping from the
configuration space, in our case from $\mathbf{Z_{n}}$, to the set
of real numbers. For the classical position observable counting
the points in $\mathbf{Z_{n}}$,
\begin{equation}\label{}
f:\mathbf{Z_{n}}\rightarrow \mathbb{R}:r_{k}\mapsto k, \quad
k=0,1,...,n-1,
\end{equation}
the corresponding {\it quantized position operator}
$\widehat{\mathbf{Q}}$ is expressed in terms of the
projection-valued measure (\ref{proj1}) as follows
\cite{HDDStovTolar}:
\begin{equation}\label{posop}
\widehat{\mathbf{Q}}:=\sum_{k=0}^{n-1}k\cdot\mathbf{E}(f^{-1}(k))
=\sum_{k=0}^{n-1}k\cdot\mathbf{E}(r_{k})
= \text{diag}(0,1,\ldots,n-1).
\end{equation}
Note that the position operator is the same for both
systems of imprimitivity constructed in
previous section, i.e. in both quantum kinematics.
In the continuous case, {\it quantized momentum operators}
are obtained from unitary representation $\mathbf{V}$
by means of Stone's theorem \cite{BEH}:
{\it To each one-parameter subgroup $\gamma(t)$ of a symmetry group
there exists a self-adjoint operator
$\widehat{\mathbf{P}}$ such that}
\begin{equation}\label{}
\mathbf{V}(\gamma(t))=\exp(-it\widehat{\mathbf{P}}), \;\; t \in
\mathbb{R}.
\end{equation}
However, this is not possible in the discrete case. One has to look
for self--adjoint operators $\widehat{\mathbf{P_l}}_{g}$ on
$\mathbb{C}^{n}$ such that
\begin{equation}\label{exp}
\mathbf{V_{l}}(g)=\exp(-i\widehat{\mathbf{P_l}}_{g}),
\qquad l=1,2, \quad g \in \mathbf{D_{n}}.
\end{equation}
One may try to compute the operators
$\widehat{\mathbf{P_l}}_{g}$ by inverting the exponential
(\ref{exp}),
\begin{equation}\label{ln}
\widehat{\mathbf{P_l}}_{g}= i\cdot \ln(\mathbf{V_{l}}(g)),
\end{equation}
but then has to face the problem that the complex exponential is not
invertible, so the operators $\widehat{\mathbf{P_l}}_{g}$ will not
be determined uniquely.
Computation of functions of matrices is possible via the
Lagrange--Sylvester theorem (see the Appendix).
However, the spectral data needed there have their own physical
importance in quantum mechanics, so they will be
determined below for the operators $\mathbf{V_{1}}(\mathbf{R_{k}})$ and
$\mathbf{V_{1}}(\mathbf{M}_{k})$, $k=0,1,...,n-1$.
Because of (\ref{jd}) they are applicable to the other
system of imprimitivity, too.
Let us start with discrete rotations.
The eigenvalues of operator $\mathbf{V_{1}}(\mathbf{R_{1}})$ are
solutions of the secular equation
\begin{equation}\label{det}
\det(\lambda\mathbb{I}-\mathbf{V_{1}}(\mathbf{R_{1}}))=0
\qquad \text{or} \qquad \lambda^{n}-1=0,
\end{equation}
hence the spectrum is
\begin{equation}\label{}
\sigma(\mathbf{V_{1}}(\mathbf{R}_{1}))=\{\lambda_{j}=e^{\frac{2\pi
ij}{n}}|j=0,1,...,n-1\}.
\end{equation}
Then the eigenvalues of operators $\mathbf{V_{1}}(\mathbf{R_{k}})$ are
simply the powers of those of $\mathbf{V_{1}}(\mathbf{R_{1}})$,
\begin{equation}\label{spek1}
\sigma(\mathbf{V_{1}}(\mathbf{R}_{k}))=
\sigma(\mathbf{V_{1}}((\mathbf{R}_{1})^{k}))=
\{\lambda_{j}^k=e^{\frac{2\pi ijk}{n}}|j=0,1,...,n-1\}.
\end{equation}
Similarly the spectra of operators $\mathbf{V_{1}}(\mathbf{M}_{k})$
for mirror symmetries are obtained by solving
\begin{equation}\label{det1}
\det(\lambda\mathbb{I}-\mathbf{V_{1}}(\mathbf{M}_{k}))=0,
\end{equation}
but here two cases should be distinguished.
\begin{enumerate}
\item If $n$ is {\it odd}, then
(\ref{det1}) becomes
\begin{equation}\label{spm}
(1-\lambda)(\lambda^{2}-1)^{\frac{n-1}{2}}=0
\qquad \Rightarrow \qquad
\sigma(\mathbf{V_{1}}(\mathbf{M}_{k})) = \{+1,-1\}
\end{equation}
and the multiplicities of eigenvalues $\pm 1$ are $\frac{n\pm
1}{2}$.
\item If $n$ is {\it even}, then the
characteristic polynomial of operator
$\mathbf{V_{1}}(\mathbf{M}_{k})$ depends, in addition to dimension
$n$, also on parameter $k$. At this point we have also to
distinguish if $k$ is odd or even. In the geometric picture
we have to distinguish if the axis of mirror
symmetry $\mathbf{M}_{k}$ passes through opposite vertices of the
$n$--sided regular polygon ($k$ even), or if it is an axis of two
opposite sides of the polygon ($k$ odd). So if $n$ is even, then
(\ref{det1}) has following form:
\begin{eqnarray}\label{}
0&=&
(1-\lambda)^{\frac{n}{2}+1}(1+\lambda)^{\frac{n}{2}-1}
\text{ if $k$ is even } , \\
0&=& (1-\lambda)^{\frac{n}{2}}(1+\lambda)^{\frac{n}{2}}
\text{ if $k$ is odd }.
\end{eqnarray}
The spectra for both cases are the same as for odd $n$, but the
multiplicities of eigenvalues are different. If $k$ is even, the
multiplicity of eigenvalue $+1$ is $\frac{n}{2}+1$, the multiplicity
of eigenvalue $-1$ is $\frac{n}{2}-1$; if $k$ is odd, then the
multiplicity of both eigenvalues is $\frac{n}{2}$.
\end{enumerate}
The evaluation of operators
$\widehat{\mathbf{P_1}}_{\mathbf{R}_{k}}$ for discrete rotations can
be done using the fact that rotations $\mathbf{R}_{k}$ form an
Abelian subgroup $\mathbf{Z_{n}}$ of $\mathbf{D_{n}}$. Thus we have
simply
\begin{equation}\label{}
\exp(-i\widehat{\mathbf{P}}_{\mathbf{R}_{k}})=
\mathbf{V_{1}}(\mathbf{R}_{k})=(\mathbf{V_{1}}(\mathbf{R}_{1}))^{k}
= \exp(-ik\widehat{\mathbf{P}})
\end{equation}
where
$ \widehat{\mathbf{P}}=\widehat{\mathbf{P_1}}_{\mathbf{R}_{1}}$
can be interpreted as self--adjoint momentum operator.
The spectrum (\ref{spek1}) of $\mathbf{V_{1}}(\mathbf{R}_{1})$
has $n$ different simple eigenvalues
$\lambda_{k}=e^{\frac{2\pi ik}{n}}$, so it remains to find
the corresponding one--dimensional spectral projectors
\begin{equation}\label{proj2}
\mathbb{P}_{k}=|k\rangle \langle k|.
\end{equation}
Here $|k\rangle$ are normalized eigenvectors of operator
$\mathbf{V_{1}}(\mathbf{R}_{1})$ belonging to eigenvalues
$\lambda_{k}$ \cite{StovTolar}:
\begin{equation}\label{}
|k\rangle = \frac{1}{\sqrt{n}}\left(\begin{array}{c}
\lambda_{k}^{n-1} \\
\lambda_{k}^{n-2} \\
\cdot \\
\cdot \\
\lambda_{k} \\
1
\end{array}\right),
\end{equation}
Using (\ref{proj2}), matrix elements of $\mathbb{P}_{k}$ can be written as
\begin{equation}\label{}
(\mathbb{P}_{k})_{lm}=
\frac{1}{n}\lambda_{k}^{n-l}\overline{\lambda_{k}^{n-m}}=
\frac{1}{n}e^{\frac{2\pi ik(m-l)}{n}}.
\end{equation}
Then, using (\ref{ln}) for simple eigenvalues, we have
\begin{equation}\label{}
(\widehat{\mathbf{P}})_{lm}=
i (\ln \mathbf{V_{1}}(\mathbf{R}_{1}))_{lm}=
= i \sum_{j=0}^{n-1}\ln (\lambda_j)(\mathbb{P}_{j})_{lm},
\end{equation}
hence matrix elements of the momentum operator are obtained:
\begin{eqnarray}\label{gr}
(\widehat{\mathbf{P}})_{lm}
&=& \frac{2\pi}{n}\frac{1}{1-e^{\frac{2\pi i(m-l)}{n}}} \quad m \neq l, \\
&=& -\pi\frac{n-1}{n} \quad m = l.
\end{eqnarray}
Note that this result was obtained in \cite{TCh} by finite Fourier
transform of the position operator. For the analysis of
operators of mirror symmetries see the Appendix.
From the physical point of view unitary operators
$\mathbf{V_{1,2}}(\mathbf{M}_{k})$ play the role of parity operators.
\section{Coherent states parametrized by
$\mathbf{Z_{n}}\times \mathbf{D_{n}}$}
In this section generalized coherent states will be determined for
each of the two quantum kinematics.
{\it A family of generalized coherent states of type $\{\Gamma(g),
\vert\psi_{0}\rangle\}$ in~the sense of Perelomov \cite{Perelomov}
is defined for a~representation $\Gamma(g)$ of a~group $\mathbf{G}$
as a~family of states $\{\vert\psi_{g}\rangle\}$,
$\vert\psi_{g}\rangle=\Gamma(g) \vert\psi_{0}\rangle$, where $g$
runs over the whole group $\mathbf{G}$ and $\vert\psi_{0}\rangle$
is the `vacuum' vector.}
First take quantum kinematics defined by the system of
imprimitivity $(\mathbf{V_1},\mathbf{E})$. To construct
group--related coherent states of Perelomov type parametrized by
$(a,g) \in \mathbf{Z_{n}}\times \mathbf{D_{n}}$,
we define generalized Weyl operators
\begin{equation}\label{}
\widehat{\mathbf{W_1}}(a,g)=
\exp(\frac{2\pi ia}{n}\widehat{\mathbf{Q}})
exp(-i\widehat{\mathbf{P_1}}_{g})=
e^{\frac{2\pi ia}{n}\widehat{\mathbf{Q}}}\mathbf{V_{1}}(g);
\quad a\in \mathbf{Z_{n}},\, g\in \mathbf{D_{n}}.
\end{equation}
Here
\begin{equation}\label{eiaq}
(e^{\frac{2\pi ia}{n}\widehat{\mathbf{Q}}})_{jk}=
\delta_{j,k}e^{\frac{2\pi iaj}{n}},\quad
\exp(\frac{2\pi ia}{n}\widehat{\mathbf{Q}})=
\left( \begin{array}{ccccc}
1 & & & & \\
& e^{2\pi \frac{ia}{n}} & & & \\
& & \cdot & & \\
& & & \cdot & \\
& & & & e^{\frac{2\pi ia(n-1)}{n}} \
\end{array}\right).
\end{equation}
Note that, if the system of imprimitivity is irreducible, also the
set of generalized Weyl operators defined above acts irreducibly in
the Hilbert space $\cal H$.
Restricting $g$ to the subgroup $\mathbf{Z_{n}}$ of discrete
rotations, the unitary operators satisfy
\begin{equation}\label{xyz}
e^{\frac{2\pi ia}{n}\widehat{\mathbf{Q}}}e^{im\widehat{\mathbf{P}}}=
e^{\frac{2\pi iam}{n}}e^{im\widehat{\mathbf{P}}}
e^{\frac{2\pi ia}{n}\widehat{\mathbf{Q}}}
\end{equation}
and operators $\widehat{\mathbf{W_1}}(a,g)$ form the well--known
projective unitary representation of the group $\mathbf{Z_{n}}\times
\mathbf{Z_{n}}$, which acts irreducibly in the Hilbert space
${\cal H} =\mathbb{C}^{n}$ \cite{Weyl,StovTolar}.
Unfortunately, if we want to derive a relation similar to \eref{xyz}
for operators $\widehat{\mathbf{P_1}}_{\mathbf{M}_{k}}$, by
performing the same computation as for $\widehat{\mathbf{P}}$ we
obtain
\begin{equation}\label{prus}
(e^{\frac{2\pi ia}{n}\widehat{\mathbf{Q}}}
e^{i\widehat{\mathbf{P_1}}_{\mathbf{M}_{m}}})_{jk}=
e^{\frac{2\pi ia}{n}(2m-2k)}(e^{i\widehat{\mathbf{P_1}}_{\mathbf{M}_{m}}}
e^{\frac{2\pi ia}{n}\widehat{\mathbf{Q}}})_{jk}.
\end{equation}
Here the multiplier is $k$--dependent, hence there is neither an
operator equality similar to (\ref{xyz}) nor a projective
representation property of operators $\widehat{\mathbf{W_1}}(a,g)$.
To construct the system of coherent states in $\mathbb{C}^{n}$,
besides the system of operators $\widehat{\mathbf{W_1}}(a,g)$
a properly defined 'vacuum' vector $|0\rangle$ is needed. Then
generalized coherent states of type
$\{\widehat{\mathbf{W_1}}(a,g),|0\rangle\}$ are given by
\begin{equation}\label{dks}
|a,g\rangle_1 = \widehat{\mathbf{W_1}}(a,g)|0\rangle, \quad a \in
\mathbf{Z}_{n}, \, g \in \mathbf{D}_{n},
\end{equation}
and $|0\rangle= |0,e\rangle_1$.
In analogy with continuous case where the coherent states are
eigenvectors of the annihilation operator and the vacuum vector
belongs to eigenvalue $0$ one would like to have a similar condition
\cite{TCh}
\begin{equation}\label{en1}
e^{\frac{2\pi}{n}\widehat{\mathbf{Q}}}
e^{i\widehat{\mathbf{P}}}|0\rangle=|0\rangle.
\end{equation}
But (\ref{en1}) cannot hold true since $1$ is not an eigenvalue of the
operator. So our admissible vacuum vectors are required to satisfy
(\ref{en1}) up to a non--zero multiplier \cite{TCh},
\begin{equation}\label{en2}
e^{\frac{2\pi}{n}\widehat{\mathbf{Q}}}
e^{i\widehat{\mathbf{P}}}|0\rangle= \lambda|0\rangle.
\end{equation}
For $n$ spectral values
\begin{equation}\label{}
\sigma(e^{\frac{2\pi}{n}\widehat{\mathbf{Q}}}e^{i\widehat{\mathbf{P}}})
= \{\lambda_{k}=e^{\frac{\pi (n-1)}{n}}e^{\frac{2\pi ik}{n}}\;
\vert \; k=0,1,..,n-1\}
\end{equation}
we obtain a system of $n$ admissible (normalized) vacuum vectors
$|0\rangle^{(k)}$ labeled by $k=0,1,..,n-1$,
\begin{equation}\label{}
|0\rangle^{(k)}=\mathcal{A}_{n} \left(
\begin{array}{c}
1 \\
e^{\frac{\pi(3-n)}{n}}e^{\frac{-2\pi ik}{n}}\\
\cdot \\
\cdot \\
e^{\frac{\pi(n-1)}{n}}e^\frac{-2\pi ik(n-1)}{n}
\end{array}\right);
\end{equation}
here the $j$--th component
\begin{equation}\label{g}
(|0\rangle^{(k)})_j=g_{j}^{(k)}= \mathcal{A}_{n}
e^{\frac{\pi j(j-n+2)}{n}}e^{-j\frac{2\pi ik}{n}},
\end{equation}
where $j=0,1,\ldots,n-1$ and $\mathcal{A}_{n}$ is the normalization
constant
\begin{equation}\label{A}
\mathcal{A}_{n}=\frac{1}{\sqrt{\sum_{j=0}^{n-1}e^{\frac{2\pi}{n}j(j-n+2)}}}.
\end{equation}
Now we are able to construct $n$ families of coherent states in the
first quantum kinematics which are labeled by parameter $k$.
Applying (\ref{dks}) for $\mathbf{R}_{m}$, we obtain
\begin{eqnarray}\label{vacr}
(|a,\mathbf{R}_{m}\rangle^{(k)}_{1})_{j} & = &
(\widehat{\mathbf{W_{1}}}(a,\mathbf{R}_{m})|0\rangle^{(k)})_{j} =\\
\nonumber = (e^{\frac{2\pi ia}{n}\widehat{\mathbf{Q}}}\widehat{\mathbf{V_{1}}}
(\mathbf{R}_{m})|0\rangle^{(k)})_{j} & = & e^{\frac{2\pi iaj}{n}}
g^{(k)}_{j-m \pmod{n}};
\end{eqnarray}
for $\mathbf{M}_{m}$ we obtain
\begin{eqnarray}\label{vacm}
(|a,\mathbf{M}_{m}\rangle^{(k)}_{1})_{j}& = &
(\widehat{\mathbf{W_{1}}}(a,\mathbf{M}_{m})|0\rangle^{(k)})_{j} =\\
\nonumber = (e^{\frac{2\pi ia}{n}\widehat{\mathbf{Q}}}\widehat{\mathbf{V_{1}}}
(\mathbf{M}_{m})|0\rangle^{(k)})_{j}
& = & e^{\frac{2\pi iaj}{n}}g^{(k)}_{m-j \pmod{n}}.
\end{eqnarray}
Coherent states for the second quantum mechanics with
representation $\mathbf{V_{2}}$ are equivalent to those of the first
one because they differ on $\mathbf{M}_{m}$ by an unessential
phase factor $-1$:
\begin{equation}
|a,\mathbf{R}_{m}\rangle^{(k)}_{2}=|a,\mathbf{R}_{m}\rangle^{(k)}_{1},
\qquad
|a,\mathbf{M}_{m}\rangle^{(k)}_{2}=-|a,\mathbf{M}_{m}\rangle^{(k)}_{1}.
\end{equation}
\section{Properties of coherent states}
One of the most important properties of coherent states is their
overcompleteness expressed by a resolution of unity
\begin{equation}\label{}
\sum_{(a,g)\in \mathbf{Z_{n}}\times \mathbf{D_{n}}}
|a,g\rangle^{(k)} \langle a,g|^{(k)}=c_{k}\widehat{\mathbb{I}},
\end{equation}
where $c_{k}$ is some non--zero complex number. Let us check this
property for our coherent states. From (\ref{vacr}) and (\ref{vacm})
we get
\begin{equation*}
\sum_{(a,g)\in \mathbf{Z_{n}}\times \mathbf{D_{n}}}
|a,g\rangle^{(k)}_{1,2} \langle a,g|^{(k)}_{1,2}
=\sum_{a\in \mathbf{Z_{n}},m = 0,..,n-1}
|a,\mathbf{R}_{m}\rangle^{(k)}_{1} \langle a,\mathbf{R}_{m}|^{(k)}_{1}
\end{equation*}
\begin{equation}\label{rou}
+ \sum_{a \in \mathbf{Z_{n}},m=0,..,n-1}
|a,\mathbf{M}_{m}\rangle^{(k)}_{1} \langle a,\mathbf{M}_{m}|^{(k)}_{1}.
\end{equation}
Matrix element of the first sum on the right--hand side of
(\ref{rou}) is, due to (\ref{g}) and (\ref{A}),
\begin{equation*}
(\sum_{a,m}|a,\mathbf{R}_{m}\rangle^{(k)}_{1} \langle
a,\mathbf{R}_{m}|^{(k)}_{1})_{jl}
=\sum_{a,m}(|a,\mathbf{R}_{m})\rangle^{(k)}_{1})_{j} (\langle
a,\mathbf{R}_{m}|^{(k)}_{1})_{l} = \end{equation*}
\begin{equation}\label{rour}
=\sum_{a,m}e^{\frac{2\pi ia}{n}(j-l)}
g^{(k)}_{j-m \pmod{n}}\overline{g^{(k)}_{l-m \pmod{n}}}
= n\delta_{j,l}\langle 0|0\rangle^{(k)}=n\delta_{j,l}.
\end{equation}
Exactly the same result is obtained for the second sum on the
right--hand side of (\ref{rou}):
\begin{eqnarray}\label{roum}
\nonumber (\sum_{a,m}|a,\mathbf{M}_{m}\rangle^{(k)}_{1} \langle
a,\mathbf{M}_{m}|^{(k)}_{1})_{jl} =\sum_{a,m}e^{\frac{2\pi ia}{n}(j-l)}
g^{(k)}_{m-j \pmod{n}}\overline{g^{(k)}_{m-l \pmod{n}}} = \\
= n\delta_{j,l}\sum_{m}g^{(k)}_{m-j \pmod{n}}
\overline{g^{(k)}_{m-l \pmod{n}}} =n\delta_{j,l}.
\end{eqnarray}
So we have proved that the resolution of unity is fulfilled:
\begin{equation}\label{rouf}
\sum_{(a,g)\in \mathbf{Z_{n}}\times \mathbf{D_{n}}}|a,g\rangle^{(k)}_{1,2}
\langle a,g|^{(k)}_{1,2}=2n\widehat{\mathbb{I}}
\end{equation}
and this result holds for both representations $\mathbf{V_{1}}$ and
$\mathbf{V_{2}}$.
For the inner product (overlap) of two coherent states we have the
formulae
\begin{eqnarray}\label{sumsum}
\langle
a,\mathbf{R}_{p}|b,\mathbf{R}_{q}\rangle^{(k)}_{1,2}
&=&\sum_{j=1}^{n}e^{\frac{2\pi ij}{n}(b-a)}
\overline{g^{(k)}_{j-p \pmod{n}}}g^{(k)}_{j-q \pmod{n}},\\
\nonumber
\langle a,\mathbf{M}_{p}|b,\mathbf{M}_{q}\rangle^{(k)}_{1,2}
&=&\sum_{j=1}^{n}e^{\frac{2\pi ij}{n}(b-a)}
\overline{g^{(k)}_{p-j \pmod{n}}}g^{(k)}_{q-j \pmod{n}},\\
\nonumber \langle
a,\mathbf{R}_{p}|b,\mathbf{M}_{q}\rangle^{(k)}_{1,2}
&=&\sum_{j=1}^{n}e^{\frac{2\pi ij}{n}(b-a)}
\overline{g^{(k)}_{j-p \pmod{n}}}g^{(k)}_{q-j \pmod{n}}.
\end{eqnarray}
Note that the inner products yield the reproducing kernel
$ \langle x \vert x' \rangle = K(x,x')$ \cite{AAG}.
If the system is prepared in the coherent state
$|a,g\rangle^{(k)}_{1,2}$, then the probability to measure the
eigenvalue $j$ of position operator is given by $\vert \langle j
|a,g\rangle^{(k)}_{1,2}\vert^2$. It is independent of $k$ and is the
same in both quantum kinematics, namely,
\begin{eqnarray}
\vert\langle j|a,\mathbf{R}_{m}\rangle^{(k)}_{1,2}\vert^2 &=&
\mathcal{A}_{n}^{2} e^{\frac{2\pi}{n}(j-m)(j-m-n+2)}, \cr
\vert\langle j|a,\mathbf{M}_{m}\rangle^{(k)}_{1,2}\vert^2 &=&
\mathcal{A}_{n}^{2}e^{\frac{2\pi}{n}(m-j)(m-j-n+2)}.
\end{eqnarray}
\section{Concluding remarks}
In this paper we have constructed systems of imprimitivity on the
finite configuration space $\mathbf{Z}_{n}$ considered as a
homogeneous space of the dihedral group $\mathbf{D}_{n}$. We have
shown that there exist two inequivalent irreducible systems of
imprimitivity $(\mathbf{V_{1}},\mathbf{E})$ and
$(\mathbf{V_{2}},\mathbf{E})$. Unitary representations
$\mathbf{V_{1}}$ and $\mathbf{V_{2}}$ have clear physical
significance of symmetry transformations.
Using these systems of imprimitivity, we have constructed the
corresponding families of group related coherent states in the sense
of Perelomov. They are connected with the group
$\mathbf{Z}_{n}\times \mathbf{D}_{n}$ acting on the discrete phase
space $\mathbf{Z}_{n}\times \mathbf{Z}_{n}$. Unfortunately, due to
(\ref{prus}) we have lost the group property of the set of operators
$\widehat{\mathbf{W}}(a,g)$, i.e. these operators do not form a
projective unitary representation of the group $\mathbf{Z}_{n}
\times \mathbf{D}_{n}$. In spite of this fact for the first system
of imprimitivity $n$ families of coherent states were obtained,
generated from $n$ admissible vacuum vectors (\ref{g}). It turned
out that the coherent states for the second system of imprimitivity
differ from the first only by an unessential phase factor, i.e.,
they are physically equivalent. For all $n$ families of coherent
states the overcompleteness property was demonstrated. We have also
evaluated the overlaps of pairs of coherent states in the form of
finite sums (\ref{sumsum}). The only physical difference between the
two quantum kinematics can be observed in the difference between
unitary representations $\mathbf{V_{1}}$ and $\mathbf{V_{2}}$ on
mirror symmetries, which have the meaning of parity operators.
Let us note that in quantum optics, discrete phase space
$\mathbf{Z}_{n}\times \mathbf{Z}_{n}$ is employed in connection with
the quantum description of phase conjugated to number operator
\cite{PeggBarnett}. Our approach can also provide a suitable
starting point for the approximate solution of the continuous
Schr\"odinger equation. In this connection we found instructive the
paper \cite{Digernes} on finite approximation of continuous Weyl
systems inspired by an approximation scheme due to J. Schwinger
\cite{Schwinger*}.
Another interesting application is offered by quantum chemistry,
viz. H\"uckel's treatment of delocalized $\pi$-electrons and its
generalizations in various kinds of molecules, where molecular
orbitals are expressed as linear combinations of atomic orbitals
\cite{Rouvray,Ruedenberg}. In this respect our approach seems
especially suitable for the treatment of ring molecules with $n$
equivalent carbon atoms called annulenes . In our notation, the set
of atomic orbitals would correspond to the standard basis in
$\mathcal{H} = \mathbb{C}^n$ and unitary representations
$\mathbf{V_{1}}$ and $\mathbf{V_{2}}$ realize the geometric symmetry
transformations.
\section*{Appendix}
For computation of matrix functions the Lagrange--Sylvester theorem
is useful:
\noindent {\bf Theorem} \cite{LSF}.{\it Let $\mathbb{A}$ be an
$n\times n$ matrix with spectrum $\sigma(\mathbb{A})=\{
\lambda_{1},\lambda_{2},...,\lambda_{s}\}$, $s\leq n$. Let $q_{j}$
be the multiplicity of eigenvalue $\lambda_{j}$, $j=1,2,...,s$. Let
$\Omega \subset \mathbb{C}$ be an open subset of the complex plane
such that $\sigma(\mathbb{A})\subset \Omega$. Then the formula
\begin{equation}\label{LSF}
f(\mathbb{A}) =
\sum_{j=1}^{s}\sum_{k=0}^{q_{j}-1}\frac{f^{(k)}(\lambda_{j})}{k!}
(\mathbb{A}-\lambda_{j}\mathbb{I})^{k}\mathbb{P}_{j}
\end{equation}
holds for every function $f$ holomorphic on $\Omega$. Here
$\mathbb{P}_{j}$ is the orthogonal projector onto the subspace of
$\mathbb{C}^{n}$ which is spanned by the set of all eigenvectors
with eigenvalue $\lambda_{j}$:
\begin{equation}\label{proj}
\mathbb{P}_{j}:=\prod_{l=1,l\neq
j}^{s}\frac{\lambda_{l}\mathbb{I}-\mathbb{A}}{\lambda_{l}-\lambda_{j}}.
\end{equation}
}
The formula (\ref{LSF}) can be applied to equation (\ref{ln}) to
evaluate operators $\widehat{\mathbf{P_1}}_{g}$ for mirror
symmetries. Since the multiplicities of spectral values
$\pm 1$ have already been determined, we
have only to find the spectral projectors $\mathbb{P}_{k}$ for each
representation element $\mathbf{V_{1}}(\mathbf{M}_{k})$.
From equation
(\ref{ln})
\begin{equation}\label{}
\widehat{\mathbf{P_1}}_{\mathbf{M}_{k}}= i\cdot
\ln(\mathbf{V_{1}}(\mathbf{M}_{k})),
\end{equation}
we get, using the Lagrange--Sylvester formula (\ref{LSF})
with spectrum (\ref{spm}), the spectral decomposition
\begin{eqnarray}\label{ppp}\nonumber
\widehat{\mathbf{P_1}}_{\mathbf{M}_{k}}=i\cdot
\sum_{j=0}^{q_{(+)}-1}\frac{\ln^{(j)}(+1)}{j!}(\mathbf{V_{1}}
(\mathbf{M}_{k})-\mathbb{I})^{j}\widehat{\mathbb{P}}_{+1} \\
+i\cdot \sum_{j=0}^{q_{(-)}-1}\frac{\ln^{(j)}(-1)}{j!}(\mathbf{V_{1}}
(\mathbf{M}_{k})+\mathbb{I})^{j}\widehat{\mathbb{P}}_{-1},
\end{eqnarray}
where $q_{(\pm)}$ are multiplicities of eigenvalues $\pm 1$.
Strictly said the assumption of the Lagrange--Sylvester formula
(\ref{LSF}) is not satisfied since the complex logarithm is not
holomorphic on the non--positive part of the real axis and $-1$
belongs to the spectrum of $\mathbf{V_{1}}(\mathbf{M}_{k})$. We will
express $\widehat{\mathbf{P}}_{\mathbf{M}_{k}}$ in a formal way and
verify (\ref{exp}) using (\ref{LSF}), where function $\exp$ is
holomorphic.
Using formula \eref{proj} for the projectors projecting
on $q_{(\pm)}$-dimensional subspaces of $\mathbb{C}^n$
\begin{equation}\label{}
\widehat{\mathbb{P}}_{+1} =
\frac{(\mathbf{V_{1}}(\mathbf{M}_{k})+\mathbb{I})}{2}, \qquad
\widehat{\mathbb{P}}_{-1} =
-\frac{(\mathbf{V_{1}}(\mathbf{M}_{k})-\mathbb{I})}{2},
\end{equation}
and the property
\begin{equation}\label{}
(\mathbf{V_{1}}(\mathbf{M}_{k})-\mathbb{I})
(\mathbf{V_{1}}(\mathbf{M}_{k})+\mathbb{I})=
(\mathbf{V_{1}}(\mathbf{M}_{k}))^{2}-\mathbb{I}=\widehat{0},
\end{equation}
all elements in the sum \eref{ppp} vanish except $j=0$:
\begin{equation}\label{gz}
\widehat{\mathbf{P_1}}_{\mathbf{M}_{k}} = i\cdot
(\frac{\ln(+1)}{2}(\mathbf{V_{1}}(\mathbf{M}_{k})+
\mathbb{I})-\frac{\ln(-1)}{2}(\mathbf{V_{1}}(\mathbf{M}_{k})-\mathbb{I})).
\end{equation}
Taking the value $-\pi$ for $\ln (-1)$
\begin{equation}
\widehat{\mathbf{P_1}}_{\mathbf{M}_{k}}
= \frac{\pi}{2}(\mathbf{V_{1}}(\mathbf{M}_{k})-\mathbb{I});
\end{equation}
similar calculation leads to
\begin{equation}\label{}
\widehat{\mathbf{P_2}}_{\mathbf{M}_{k}}=
\frac{\pi}{2}(\mathbf{V_{2}}(\mathbf{M}_{k})-\mathbb{I}).
\end{equation}
Note that momentum operators are not uniquely determined. This is
caused by the property of exponential mapping which is not
one-to-one.
\section*{References}
\end{document} |
\begin{document}
\title{Spin-structures on real Bott manifolds}
\author{A. G\c{a}sior\footnote{Author is supported by the Polish National Science Center grant DEC-2013/09/B/ST1/04125}}
\maketitle
{\mathbb H}skip5mm
\section{Introduction}
Let $M^n$ be a flat manifold of dimension $n$, i.e. a compact connected Riemannian manifold without boundary
with zero sectional curvature. From the theorem of Bieberbach (\cite{Ch}, \cite{S3})
the fundamental group
$\pi_{1}(M^{n}) = \Gamma$ determines a short exact sequence:
\begin{equation}\label{ses}
0 \rightarrow {\mathbb Z}^{n} \rightarrow \Gamma \stackrel{p}\rightarrow
G \rightarrow 0,
\end{equation}
where
${\mathbb Z}^{n}$ is a torsion free abelian group of rank $n$ and
$G$ is a finite group which
is isomorphic to the holonomy group of $M^{n}.$
The universal covering of $M^{n}$ is the Euclidean space ${\mathbb R}^{n}$
and hence $\Gamma$
is isomorphic to a discrete cocompact subgroup
of the isometry group $\operatorname{Isom}({\mathbb R}^{n}) = \operatorname{O}(n)\times{\mathbb R}^n = E(n).$ In that case $p:\Gamma\to G$ is a projection on the first component of the semidirect product $O(n)\ltimes \mathbb R^n$ and $\pi_1(M_n)=\Gamma$ is a subfroup of $O(n)\ltimes \mathbb R^n$.
Conversely, given a short exact sequence of the form (\ref{ses}), it is known that
the group $\Gamma$ is (isomorphic to) the fundamental group of a flat manifold if and only if
$\Gamma$ is torsion free.
In this case $\Gamma$ is called a Bieberbach group.
We can define a holonomy representation $\phi:G\to \operatorname{GL}(n,{\mathbb Z})$ by the formula:
\begin{equation}\label{holonomyrep}
\forall e\in\mathbb Z^n\;\forall g\in G,\phi(g)(e) = \tilde{g}e(\tilde{g})^{-1},
\end{equation}
where $p(\tilde{g})=g.$ In this article we shall consider Bieberbach groups of rank $n$ with holonomy group ${\mathbb Z}_{2}^{k}$, $1\leq k\leq n-1$,
and $\phi({\mathbb Z}_{2}^{k})\subset D\subset \operatorname{GL}(n,{\mathbb Z})$.
Here $D$ is the group of matrices with $\pm1$ on the diagonal.
Let
\begin{equation}\label{tower}
M_{n}\stackrel{{\mathbb R} P^1}\to M_{n-1}\stackrel{{\mathbb R} P^1}\to...\stackrel{{\mathbb R} P^1}\to M_{1}\stackrel{{\mathbb R} P^1}\to M_0 =
\{ \bullet\}
\end{equation}
be a sequence of real projective bundles such that $M_i\to M_{i-1}$, $i=1,2,\ldots,n$, is a projective
bundle of a Whitney sum of a real line bundle $L_{i-1}$ and the trivial line bundle over $M_{i-1}$.
The sequence (\ref{tower}) is called the real Bott tower and the top manifold $M_n$ is called the real
Bott manifold, \cite{CMO}.
Let $\gamma_i$ be the canonical line bundle over $M_i$ and we set $x_i = w_1(\gamma_i)$ ($w_1$ is the first Stiefel-Whitney class).
Since $H^1(M_{i-1},{\mathbb Z}_2)$ is additively generated by $x_1,x_2,..,x_{i-1}$ and $L_{i-1}$
is a line bundle over $M_{i-1},$ we can uniquely write
\begin{equation}\label{w1}
w_1(L_{i-1}) = \sum_{k=1}^{i-1} a_{ki}x_k
\end{equation}
where $a_{ki}\in {\mathbb Z}_2$ and $i = 2,3,...,n.$
From above we obtain the matrix $A = [a_{ki}]$ which is a $n\times n$ strictly upper triangular matrix
whose diagonal entries are $0$ and remaining entries are either
$0$ or $1.$
One can observe (see \cite{KM}) that the tower (\ref{tower}) is completly determined by the matrix $A$ and therefore we may denote the real Bott manifold $M_n$ by $M(A)$.
From \cite[Lemma 3.1]{KM} we can consider $M(A)$ as the orbit space $M(A) = {\mathbb R}^n/\Gamma(A),$
where $\Gamma(A)\subset E(n)$ is generated by elements
\begin{equation}\label{gener}
s_{i} = \left(\left[
\begin{matrix}
1&0&0&.&.&...&0\\
0&1&0&.&.&...&0\\
.&.&.&.&.&...&\\
0&...&0&1&0&...&0\\
0&...&0&0&(-1)^{a_{i,i+1}}&...&0\\
.&.&.&.&.&...&\\
0&...&0&0&0&...&(-1)^{a_{i,n}}
\end{matrix}\right], \begin{pmatrix}
0\\
.\\
0\\
\frac{1}{2}\\
0\\
.\\
0\\
0
\end{pmatrix}\right)\in E(n),
\end{equation}
where $(-1)^{a_{i,i+1}}$ is in the $(i+1, i+1)$ position and $\frac{1}{2}$ is the $i-$th coordinate of the column, $i = 1,2,...,n-1.$
$s_{n} = \left(I,\left(0,0,...,0,\frac12\right)\right)\in E(n).$
From \cite[Lemma 3.2, 3.3]{KM} $s_{1}^{2},s_{2}^{2},...,s_{n}^{2}$ commute with each
other and generate a free abelian subgroup ${\mathbb Z}^n.$ In other words $M(A)$ is a flat manifold with holonomy group $Z_2^k$ of
diagonal type. Here $k$ is a number of non zero rows of a matrix $A$.
We have the following two lemmas.
\begin{lm}[\cite{KM}, Lemma 2.1]
The cohomology ring $H^*(M(A),\mathbb Z_2)$ is generated by degree one elements $x_1,\ldots,x_n$ as a graded ring with $n$ relations
$$x_j^2=x_j\sum_{i=1}^na_{ij}x_i,$$
for $j=1,\ldots,n$.
\end{lm}
\begin{lm}[\cite{KM}, Lemma 2.2]\label{lemma12}
The real Bott manifold $M(A)$ is orientable if and only if the sum of entries is $0(\operatorname{mod}2)$ for each row of the matrix $A$.
\end{lm}
There are a few ways to decide whether there exists a Spin-structure
on an oriented flat manifold $M^n$. We start with
\begin{defi}[\cite{Fr}]
An oriented flat manifold $M^n$ has a Spin-structure if and only if there exists a homomorphism $\epsilon\colon\Gamma\to\operatorname{Spin}(n)$ such that $\lambda_n\epsilon=p$, where $\lambda_n:\operatorname{Spin}(n)\to\operatorname{SO}(n)$ is the covering map.
\end{defi}
There is an equivalent condition for existence of Spin-structure. This is well known (\cite{Fr}) that the closed oriented
differential manifold $M$ has a Spin-structure if and only if the second Stiefel-Whitney class vanishes.
The $k$-th Stiefel-Whitney class \cite[ page 3, (2.1) ]{LS} is given by the formula
\begin{equation}
w_k(M(A)) = (B(p))^{\ast}\sigma_{k}(y_1,y_2,...,y_{n})\in H^{k}(M(A);{\mathbb Z}_2) ,
\end{equation}
where $\sigma_k$ is the $k$-th elementary symmetric function, $B(p)$ is a map
induced by $p$ on the classification space and
\begin{equation}
y_i : = w_1(L_{i-1})\label{y}\end{equation}
for $i=2,3,\ldots,n$.
Hence,
\begin{equation}\label{sw1}
w_{2}(M(A)) = \sum_{1< i< j\leq n} y_{i}y_{j}\in H^{2}(M(A);{\mathbb Z}_2).
\end{equation}
\begin{defi} {(\cite{CMO}, page 4)}
A binary square matrix $A$ is a Bott matrix if $A = PBP^{-1}$ for a permutation
matrix $P$ and a strictly upper triangular binary matrix $B.$
\end{defi}
Our paper is a sequel of \cite{GS1}. There are given some conditions of the existence of Spin-structures.
\begin{theo}{\rm{(\cite{GS1}, page 1021)}}
Let $A$ be a matrix of an orientable real Bott manifold $M(A)$.
\begin{enumerate}
\item Let $l\in\mathbb N$ be an odd number. If there exist $1\leq i<j\leq n$ and rows $A_{i,*}$, $A_{j,*}$ such that
$$\sharp\{m:a_{i,m}=a_{j,m}=1\}=l$$
and
$$a_{ij}=0$$
then $M(A)$ has no Spin-structure.
\item If $a_{ij}=1$ and there exist $1\leq i<j\leq n$ and rows
$$\begin{aligned}
A_{i,*}&=(0,\ldots,0,a_{i,i_1},\ldots,a_{i,i_{2k}},0,\ldots,0),\\
A_{j,*}&=(0,\ldots,0,a_{j,i_{2k+1}},\ldots,a_{j,i_{2k+2l}},0,\ldots,0)
\end{aligned}$$
such that $a_{i,i_1}=\ldots=a_{i,i_{2k}}=1$, $a_{i,m}=0$ for $m\not\in\{i_1,\ldots,i_{2k}\}$, $a_{j,i_{2k+1}}=\ldots=a_{j,i_{2k+2l}}=1$, $a_{j,r}=0$ for $r\not\in\{i_{2k+1},\ldots,i_{2k+2l}\}$ and $l$, $k$ are odd then $M(A)$ has no Spin-structure.
\end{enumerate}
\end{theo}
In this paper we extend this theorem and we formulate necessary and sufficient conditions of the existence of a Spin-structure on real Bott manifolds. Here is our main result for Bott manifolds with holonomy group $Z_2^k$, $k$ even. Here is our main result
\begin{theo}\label{theolast}
Let $A$ be a Bott matrix with $k$ non zero rows where $k$ is an even number. Then the real Bott manifold manifold $M(A)$ has a Spin-structure if and only if for all $1\leq i<j\leq n$ manifolds $M(A_{ij})$ have a Spin-structure, where $A_{ij}$ is a matrix with $i-$ and $j-$ th nonzero rows.
\end{theo}
The structure of a paper is as follows. In Section 2 we give three lemmas. First of them gives a decomposition of the $n\times n-$integer matrix $A$ into $n\times n-$integer
matrices $A_{ij}$ with $i-$th and $j-$th nonzero rows. In Lemmas 2.2. and 2.3 we examine dependence of $y_i$ and $w_2$ of a real Bott manifold $M(A)$ on values $y_i^{jk}$ and $w_2(M(A_{jk}))$ of manifolds
$M(A_{jk})$. Then the proof of Theorem 1.2 will follow from Lemmas 2.2. and 2.3. Section 3 has a very technical character. In this section we shall give a complete characterization of the existence of the Spin-structure
on manifolds $M(A_{ij})$, $1\leq i<j\leq n$. Almost all statements in part 2 and 3 are illustrated by examples.
The author is grateful to Andrzej Szczepa\'{n}ski for his valuable, suggestions and help.
\section{Proof of the Main Theorem}
At the beginning we give formula for the decomposition of real Bott matrix $A$ into the sum of the real Bott matrices with two nonzero rows.
\begin{lm}
Let $A$ be $n\times n-$Bott matrix and let $A_{ij}$, $1\leq i<j\leq n$, be $n\times n$-matrices with $i-$th and $j-$th nonzero rows. Then, if $k$ is even, we have the following decomposition
\begin{equation}A=\sum_{1\leq i<j\leq n}A_{ij}.\label{rozklad}\end{equation}
\end{lm}
\noindent
{\bf Proof.}
Let $A$ be $n\times n$-Bott matrix with $k$ nonzero rows, $k$ is an even number. Without loss of generality we can assume that nonzero rows have numbers from 1 to $k$. We shall consider the matrix $A$ as a sum of matrices $A_{ij}$, $1\leq i<j\leq n$. The number of matrices $A_{ij}$ is equal
$k\choose2$. For $1\leq i\leq k$ there are $(k-1)$-two elements subsets of $\{1,2,\ldots,k\}$ containing $i$.
Thus having summed matrices $A_{ij}$ we obtain
\begin{equation}
(k-1)\cdot A=\sum_{1\leq i<j\leq n}A_{ij}.\label{rozklad_1}
\end{equation}
Since $A$ is Bott matrix and $k$ is an even number we get
the formula (\ref{rozklad}).
{\mathbb H}skip 142mm $\Box$
\begin{ex}\label{ex1}
Let
$$A=\left[
\begin{matrix}
0&1&1&0&0&0\\
0&0&1&1&0&0\\
0&0&0&1&1&0\\
0&0&0&0&1&1\\
0&0&0&0&0&0\\
0&0&0&0&0&0
\end{matrix}\right]$$
Thus $n=6$, $k=4$, so we have
$$\begin{aligned}
A=&
\underbrace{\left[
\begin{matrix}
0&1&1&0&0&0\\
0&0&1&1&0&0\\
0&0&0&0&0&0\\
0&0&0&0&0&0\\
0&0&0&0&0&0\\
0&0&0&0&0&0
\end{matrix}\right]}_{A_{12}}
+\underbrace{\left[
\begin{matrix}
0&1&1&0&0&0\\
0&0&0&0&0&0\\
0&0&0&1&1&0\\
0&0&0&0&0&0\\
0&0&0&0&0&0\\
0&0&0&0&0&0
\end{matrix}\right]}_{A_{13}}
+\underbrace{\left[
\begin{matrix}
0&1&1&0&0&0\\
0&0&0&0&0&0\\
0&0&0&0&0&0\\
0&0&0&0&1&1\\
0&0&0&0&0&0\\
0&0&0&0&0&0
\end{matrix}\right]}_{A_{14}}\\
&+\underbrace{\left[
\begin{matrix}
0&0&0&0&0&0\\
0&0&1&1&0&0\\
0&0&0&1&1&0\\
0&0&0&0&0&0\\
0&0&0&0&0&0\\
0&0&0&0&0&0
\end{matrix}\right]}_{A_{23}}
+\underbrace{\left[
\begin{matrix}
0&0&0&0&0&0\\
0&0&1&1&0&0\\
0&0&0&0&0&0\\
0&0&0&0&1&1\\
0&0&0&0&0&0\\
0&0&0&0&0&0
\end{matrix}\right]}_{A_{24}}
+\underbrace{\left[
\begin{matrix}
0&0&0&0&0&0\\
0&0&0&0&0&0\\
0&0&0&1&1&0\\
0&0&0&0&1&1\\
0&0&0&0&0&0\\
0&0&0&0&0&0
\end{matrix}\right]}_{A_{34}}
\end{aligned}$$
\end{ex}
\vskip5mm
Before we start a proof of the main theorem we give an example.
\begin{ex}
For the manifold $M(A)$ from Example \ref{ex1} we get
$$
y_2=x_1,\;
y_3=x_1+x_2,\;
y_4=x_2+x_3,\;
y_5=x_3+x_4,\;
y_6=x_4.$$
Hence
$$\omega_2(M(A))=x_1x_3+x_2x_4.$$
We compute second Stiefel-Whitney classes for real Bott manifolds $M(A_{ij})$ from Example \ref{ex1}. For these purpose we put
$y_l^{ij}=w_1(L_{l-1})$
for manifolds $M(A_{ij})$ and we obtain
$$\begin{array}{lllllll}
y_2^{12}=x_1&y_2^{13}=x_1&y_2^{14}=x_1&y_2^{23}=0&y_2^{24}=0&y_2^{34}=0\\
y_3^{12}=x_1+x_2&y_3^{13}=x_1&y_3^{14}=x_1&y_3^{23}=x_2&y_3^{24}=x_2&y_3^{34}=0\\
y_4^{12}=x_2&y_4^{13}=x_3&y_4^{14}=0&y_4^{23}=x_2+x_3&y_4^{24}=x_2&y_4^{34}=x_3\\
y_5^{12}=0&y_5^{13}=x_3&y_5^{14}=x_4&y_5^{23}=x_3&y_5^{24}=x_4&y_5^{34}=x_3+x_4\\
y_6^{12}=0&y_6^{13}=0&y_6^{14}=x_4&y_6^{23}=0&y_6^{24}=x_4&y_6^{34}=x_4
\end{array}$$
With the above notation we get
$$\begin{aligned}
\sum_{1\leq i<j\leq k}y_2^{ij}&=3x_1=x_1{\mathbb R}ightarrow \sum_{1\leq i<j\leq k}y_2^{ij}=y_2,\\
\sum_{1\leq i<j\leq k}y_3^{ij}&=3x_1+3x_2=x_1+x_2{\mathbb R}ightarrow \sum_{1\leq i<j\leq k}y_3^{ij}=y_3,\\
\sum_{1\leq i<j\leq k}y_4^{ij}&=3x_2+3x_3=x_2+x_3{\mathbb R}ightarrow\sum_{1\leq i<j\leq k}y_4^{ij}=y_4,\\
\sum_{1\leq i<j\leq k}y_5^{ij}&=3x_3+3x_4=x_3+x_4{\mathbb R}ightarrow\sum_{1\leq i<j\leq k}y_5^{ij}=y_5,\\
\sum_{1\leq i<j\leq k}y_6^{ij}&=3x_4=x_4{\mathbb R}ightarrow\sum_{1\leq i<j\leq k}y_6^{ij}=y_6
\end{aligned}$$
and second Stiefel-Whitney classes for manifolds $M(A_{ij})$ are follows
$$\begin{aligned}
w_2(M(A_{12}))&=0,\\
w_2(M(A_{13}))&=x_1x_3,\\
w_2(M(A_{14}))&=0,\\
w_2(M(A_{23}))&=0,\\
w_2(M(A_{24}))&=0x_2x_4,\\
w_2(M(A_{34}))&=0.
\end{aligned}$$
Hence
$$
\sum_{1=i<j=4}\omega_2(M(A_{ij})=x_1x_3+x_2x_4=\omega_2(M(A)).
$$
\end{ex}
Following the method described in the above example we have lemmas.
\begin{lm}\label{lemat1}
Let $A$ be a $n\times n$ Bott matrix with $k>3$ nonzero rows, $k$ is an even number.
Then
\begin{equation}y_l=\sum_{1\leq i<j\leq k}y_l^{ij},\label{wzor_y}\end{equation}
where $y_l=\omega_1(L_{l-1}(M(A))$ and $y_l^{ij}=\omega_1(L_{l-1}(M(A_{ij}))$.
\end{lm}
\noindent
{\bf Proof.}
We have
$$y_l=w_1(L_{l-1})=\sum_{k=1}^{l-1}a_{kl}x_k=x\cdot A^l$$
where $x=[x_1,\ldots,x_n]$, $A=[a_{ij}]$, $A^l$ is the $l-$th column
of the matrix $A$ and $\cdot$ is multiplication of matrices. Let us multiply (\ref{rozklad})
on the left by $x$
$$\begin{aligned}
x\cdot A&=\sum_{1\leq i<j\leq k} x\cdot A_{ij}.\\
\end{aligned}$$
Since $yx\cdot A=[y_1,y_2,\ldots,y_n]$ and $x\cdot A^{ij}=[y^{ij}_1,y^{ij}_2,\ldots,y^{ij}_n]$, we get (\ref{wzor_y}).
{\mathbb H}skip 142mm $\Box$
\begin{lm}\label{lemat2}
Let $A$ be $n\times n$ Bott matrix with $k-$nonzero rows, $k\geq4$, $k$ is an even number.
Then
$$w_2(M(A))=\sum_{1\leq i<j\leq k}w_2(M(A_{ij})).$$
\end{lm}
\noindent
{\bf Proof.}
From (\ref{sw1}) and (\ref{wzor_y})
$$\begin{aligned}
\omega_2(M(A))&=\sum_{l<r}y_ly_r\\
&=\sum_{l<r}\left[\left(\sum_{i<j}y_l^{ij}\right)\right]\left[\left(\sum_{i<j}y_r^{ij}\right)\right]
=\sum_{l<r}\left(\sum_{i<j}y_l^{ij}y_r^{ij}\right)\\
&=\sum_{i<j}\left(\sum_{l<r}y_l^{ij}y_r^{ij}\right)
=\sum_{i<j}\omega_2(M(A_{ij})).
\end{aligned}$$
{\mathbb H}skip 142mm $\Box$
From proofs of Lemma \ref{lemat1} and Lemma \ref{lemat2} we obtain a proof of Main Theorem \ref{theolast}.
\noindent
{\bf Proof of Theorem \ref{theolast}}
Let us recall the manifold $M$ has a Spin-structure if and only if $w_2(M)=0$.
At the beginning let us assume, for each pair $1\leq i<j\leq n$, we have $w_2(M(A_{ij}))=0$. Then from Lemma \ref{lemat2} we have
$$w_2(M(A))=\sum_{1\leq i<j\leq k}w_2(M(A_{ij}))=0,$$
so the real Bott manifold $M(A)$ has a Spin-structure.
On the other hand, let the manifold $M(A)$ admits the Spin-structure, then
$$0=w_2(M(A))=\sum_{1\leq i<j\leq k}w_2(M(A_{ij})).$$
Second Stiefel-Whitney classes $M(A_{ij})$ are non negative so
$$\forall_{1\leq i<j\leq n}w_2(M(A_{ij}))=0.$$
{\mathbb H}skip 142mm $\Box$
\begin{rem}
We do not know how to prove the main theorem for odd $k$.
From the other side we are not sure if we can formulate it as a conjecture
in this case.
\end{rem}
In the next section of our paper we concentrate on calculations of Spin-structure on manifolds $A_{ij}$.
\vskip 4mm
\section{Existence of Spin-structure on manifolds $M(A_{ij})$}
From now, let $A$ be a matrix of an orientable real Bott manifold $M(A)$ of dimension $n$ with two non-zero rows. From Lemma \ref{lemma12} we have that the number of entries 1, in each row, is an odd number and we have following three cases:
\newline {\bf CASE I.} There are no columns with double entries 1,
\newline{\bf CASE II.} The number of columns with double entries 1 is an odd number,
\newline {\bf CASE III.} The number of columns with double entries 1 is an even number,
\vskip5mm
\noindent
We give conditions for an existence of the Spin-structure on $M(A_{ij})$. In the further part of the paper we adopt the notation $0_{p}=(\underbrace{0,\ldots,0}_{p\text{ - times}})$. From the definition, rows of number $i$ and $j$ correspond to generators $s_i,s_j$ which define a finite index abelian subgroup $H\subset\pi_1(M(A))$ (see \cite{GK}).
\begin{theo}\label{theo1}
Let $A$ be a matrix of an orientable real Bott manifold $M(A)$ from the above case I. If there exist $1\leq i<j\leq n$ such that
\noindent {\bf 1.}
$$\begin{matrix}
A_{i,\ast} &=(0_{i_1},a_{i,i_{1}+1},\ldots,a_{i,i_{1}+2k},0_{i_{2l}},0_{i_{p}})\\
A_{j,\ast} &=(0_{i_1},0_{i_{2k}},a_{j,i_{1}+2k+1},\ldots,a_{j,i_{1}+2k+2l},0_{i_{p}}),\end{matrix}$$
where $a_{i,i_{1}+1} = \ldots = a_{i,i_{1}+2k} = 1, a_{i,m} = 0$ for $m\notin\{i_1,\ldots,i_{1}+2k\}$,
$a_{j,i_{1}+2k+1}= \ldots = a_{j,i_{1}+2k+2l} = 1, a_{j,r} = 0$ for
$r\notin\{i_{1}+2k+1,\ldots,i_{1}+2k+2l\}$.
\newline Then $M(A)$ admits the Spin-structure if and only if either $l$ is an even number or $l$ is an odd number and
$j\notin\{i_1+1,\ldots,i_{1}+2k\}$.
\noindent
{\bf 2.}
$$\begin{matrix}
A_{i,\ast} &=(0_{i_{1}},0_{i_{2k}},a_{i,i_{2k}+1},\ldots,a_{i,i_{2k}+2l},0_{i_{p}})\\
A_{j,\ast} &=(0_{i_{1}},a_{j,i_{1}+1},\ldots,a_{j,i_{1}+2k},0_{i_{2l}},0_{i_{p}}),\end{matrix}$$
where
$a_{j,i_{1}+1} = \ldots = a_{j,i_{1}+2k} = 1, a_{j,m} = 0$ for $m\notin\{i_1,\ldots,i_{1}+2k\}$,
$a_{i,i_{2k}+1} = \ldots = a_{i,i_{2k}+2l} = 1, a_{i,r} = 0$ for
$r\notin\{i_{2k}+1,\ldots,i_{2k}+2l\}$,
then $M(A)$ has the Spin-structure.
\end{theo}
\noindent
{\bf Proof.}
{\bf 1.}
From (\ref{y}) we have
$$\begin{aligned}
y_{i_1+1}&=\ldots=y_{i_{1}+2k}=x_i,\\
y_{i_{1}+2k+1}&=\ldots=y_{i_{1}+2k+2l}=x_j.\end{aligned}$$
Using (\ref{sw1}) and
$x_i^2=x_i\sum_{j=1}^na_{ji}x_j$
we get
$$\begin{aligned}w_2(M(A))&=k(2k-1)x_i^2+4klx_ix_j+l(2l-1)x_j^2\\
&=k(2k-1)x_i^2+l(2l-1)x_j^2=l(2l-1)x_j^2=lx_j^2.
\end{aligned}$$
Summing up, we have to consider the following cases
\begin{enumerate}
\item if $l=2b$, then $w_2(M(A))=2bx_j^2=0$. Hence $M(A)$ has a Spin-structure,
\item if $l=2b+1$, then
$$\begin{aligned}
w_2(M(A))&=(2b+1)x_j^2=x_j^2\\
&=\begin{cases}0,&\text{if }j\notin\{i_1+1,\ldots,i_{1}+2k\},M(A)\text{ has a Spin-structure,}\\x_ix_j,&\text{if }j\in\{i_1+1,\ldots,i_{1}+2k\},M(A)\text{ has no Spin-structure}.\end{cases}
\end{aligned}$$
\end{enumerate}
{\bf 2.}
From {\rm(}\ref{y}{\rm)}
$$\begin{aligned}
y_{i_1}+1&=\ldots=y_{i_{1}+2k}=x_j\\
y_{i_{1}+2k+1}&=\ldots=y_{i_{1}+2k+2l}=x_i.
\end{aligned}$$
Moreover, from (\ref{sw1}) and since $i_1>j>i$
$$\begin{aligned}
w_2(M(A))&=k(2k-1)x_j^2+4klx_ix_j+l(2l-1)x_i^2\\
&=k(2k-1)\underbrace{x_j^2}_{=0}+l(2l-1)\underbrace{x_i^2}_{=0}=0.\end{aligned}$$
Hence $M(A)$ has the Spin-structure.
{\mathbb H}skip 142mm $\Box$
\begin{theo}\label{theo2} Let $A$ be a matrix of an orientable real Bott manifold $M(A)$ from the above case II. If there exist $1\leq i<j\leq n$ such that
\newline{\bf 1.}
$$\begin{footnotesize}\begin{matrix}
A_{i,\ast} = (0_{i_1},a_{i,i_{1}+1},\ldots,a_{i,i_{1}+2k},a_{i,i_{1}+2k+1},\ldots,a_{i,i_{1}+2k+2l},0_{i_{2m}},0_{i_p})\\
A_{j,\ast} = (0_{i_1},0_{i_{2k}},a_{j,i_{1}+2k+1},\ldots,a_{j,i_{1}+2k+2l},a_{j,i_{1}+2k+2l+1},\ldots,a_{j,i_{1}+2k+2l+2m},0_{i_p}),\end{matrix}\end{footnotesize}$$
where
$a_{i,i_{1}+1} = \ldots = a_{i,i_{1}+2k} =a_{i,i_{1}+2k+1}=\ldots=a_{i,i_{1}+2k+2l}=1, a_{i,r} = 0$ for $r\notin\{i_1+1,\ldots,i_{1}+2k+2l\}$,
$a_{j,i_{1}+2k+1} = \ldots= a_{j,i_{1}+2k+2l+2m} = 1, a_{j,s} = 0$ for
$s\notin\{i_{1}+2k+1,\ldots,i_{1}+2k+2l+2m\}$.
Then $M(A)$ has the Spin-structure if and only if either $l$ and $m$ are number of the same parity or $l$ and $m$ are number of different parity and
$j\notin\{i_1+1,\ldots,i_{1}+2k\}$.
\noindent {\bf 2.}
$$\begin{footnotesize}\begin{matrix} A_{i,\ast} &=(0_{i_1},0_{i_{1}+2k},a_{i,i_{1}+2k+1},\ldots,a_{i,i_{1}+2k+2l},a_{i,i_{1}+2k+2l+1},\ldots,a_{i,i_{1}+2k+2l+2m},0_{i_p}) ,\\
A_{j,\ast} &=(0_{i_1},a_{j,i_{1}+1},\ldots,a_{j,i_{1}+2k},a_{j,i_{1}+2k+1},\ldots,a_{j,i_{1}+2k+2l},0_{i_{2m}},0_{i_p}) \end{matrix}\end{footnotesize}$$
where
$a_{j,i_{1}+1}= \ldots = a_{j,i_{1}+2k}=a_{j,i_{1}+2k+1}=\ldots=a_{j,i_{1}+2k+2l} = 1, a_{j,m} = 0$ for $m\notin\{i_1+1,\ldots,i_{1}+2k+2l\}$,
$a_{i,i_{1}+2k+1}= \ldots = a_{i,i_{1}+2k+2l}=a_{i,i_{1}+2k+2l+1}=\ldots=a_{i,i_{1}+2k+2l+2m} = 1, a_{i,r} = 0$ for
$r\notin\{i_{1}+2k+1,\ldots,i_{1}+2k+2l+2m\}$, then $M(A)$ has the Spin-structure
\end{theo}
\noindent{\bf Proof.}
{\bf 1.} From (\ref{y}) we have
$$\begin{aligned}
y_{i_1+1}&=\ldots=y_{i_{1}+2k}=x_i,\\
y_{i_{1}+2k+1}&=\ldots=y_{i_{1}+2k+2l}=x_i+x_j\\
y_{i_{1}+2k+2l+1}&=\ldots=y_{i_{1}+2k+2l+2m}=x_j.\end{aligned}$$
From (\ref{sw1}) and $x_i^2=x_i\sum_{j=1}^na_{ji}x_j$
we get
$$\begin{aligned}w_2(M(A))&=k(2k-1)x_i^2+4klx_i(x_i+x_j)+l(2l-1)(x_i+x_j)^2+m(2m-1)x_j^2\\
&=l(2l-1)x_j^2+m(2m-1)x_j^2=(l+m)x_j^2.
\end{aligned}$$
We have to consider the following cases:
\begin{enumerate}
\item If $l+m$ is an even number then $w_2(M(A))=0.$ Hence $M(A)$ has a Spin-structure.
\item If $l+m$ is an odd number then
$$\begin{aligned}
w_2(M(A))&=x_j^2\\
&=\begin{cases}0,&\text{if }j\notin\{i_1+1,\ldots,i_{1}+2k\},M(A)\text{ has a Spin-structure}\\x_ix_j,&\text{if }j\in\{i_1+1,\ldots,i_{1}+2k\},M(A)\text{ has no Spin-structure}.\end{cases}
\end{aligned}$$
\end{enumerate}
{\bf 2.} Using (\ref{y}) we get
$$\begin{aligned}
y_{i_1+1}&=\ldots=y_{i_{1}+1}=x_j\\
y_{i_{1}+2k+1}&=\ldots=y_{i_{1}+2k+2l}=x_i+x_j\\
y_{i_{1}+2k+2l+1}&=\ldots=y_{i_{1}+2k+2l+2m}=x_i.
\end{aligned}$$
Moreover, from {\rm(}\ref{sw1}{\rm)} and since $i_1>j>i$
$$\begin{aligned}
&w_2(M(A))=k(2k-1)x_j^2+l(2l-1)x_i^2+4klx_j(x_i+x_j)+4kmx_ix_j\\
&+4lmx_i(x_i+x_j)+l(2l-1)(x_i+x_j)^2+m(2m-1)x_i^2\\
&=k(2k-1)\underbrace{x_j^2}_{=0}+l(2l-1)\underbrace{x_i^2}_{=0}+l(2l-1)\underbrace{x_j^2}_{=0}+m(2m-1)\underbrace{x_i^2}_{=0}=0.
\end{aligned}$$
Hence $M(A)$ has a Spin-structure.
{\mathbb H}skip 142mm $\Box$
\begin{theo}\label{theo3} Let $A$ be a matrix of an orientable real Bott manifold $M(A)$ from the above case III. If there exist $1\leq i<j\leq n$ such that
\newline {\bf 1.}
$$\begin{footnotesize}\begin{matrix} A_{i,\ast} &= (0_{i_1},a_{i,i_{1}+1},\ldots,a_{i,i_{1}+2k+1},a_{i,i_{1}+2k+2},\ldots,a_{i,i_{1}+2k+2l+2},0_{i_{2m+1}},0_{i_p})\\
A_{j,\ast} &= (0_{i_1},0_{i_{2k+1}},a_{j,i_{2k+2}},\ldots,a_{j,i_{1}+2k+2l+2},a_{j,i_{1}+2k+2l+3},\ldots,a_{j,i_{1}+2k+2l+2m+3},0_{i_p}),\end{matrix}\end{footnotesize}$$
where
$a_{i,i_{1}+1} = \ldots = a_{i,i_{1}+2k} =\ldots=a_{i,i_{1}+2k+2l+2}=1, a_{i,r} = 0$ for $r\notin\{i_1+1,\ldots,i_{1}+2k+2l+2\}$,
$a_{j,i_{1}+2k+2} = \ldots= a_{j,i_{1}+2k+2l+2m+3} = 1, a_{j,s} = 0$ for
$s\notin\{i_{1}+2k+2,\ldots,i_{1}+2k+2l+2m+3\}$.
Then $M(A)$ admits the Spin-structure if and only $l$ and $m$ are number of the same parity and
$j\in\{i_1+1,\ldots,i_{1}+2k+2\}$ .
{\bf 2.}
$$\begin{footnotesize}\begin{matrix} A_{i,\ast} &=(0,_{i_1},0_{i_{2l+1}},a_{i,i_{1}+2k+2},\ldots,a_{i,i_{1}+2k+2l+2},a_{i,i_{1}+2k+2l+3},\ldots,a_{i,i_{1}+2k+2l+2m+3},0_{i_p})\\
A_{j,\ast} &=(0_{i_1},a_{j,i_{1}+1},\ldots,a_{j,i_{1}+2k+1},a_{j,i_{1}+2k+2},\ldots,a_{j,i_{1}+2k+2l+2},0_{i_{2m}},0_{i_p}) \end{matrix}\end{footnotesize}$$
where
$a_{j,i_{1}+1}= \ldots = a_{j,i_{1}+2k}=a_{j,i_{1}+2k+1}=\ldots=a_{j,i_{1}+2k+2l+2} = 1, a_{j,m} = 0$ for $m\notin\{i_1+1,\ldots,i_{1}+2k+2l+2\}$,
$a_{i,i_{1}+2k+2}= \ldots = a_{i,i_{1}+2k+2l+2}=a_{i,i_{1}+2k+2l+3}=\ldots=a_{i,i_{1}+2k+2l+2m+3} = 1, a_{i,r} = 0$ for
$r\notin\{i_{1}+2k+2,\ldots,i_{1}+2k+2l+2m+3\}$.
Then $M(A)$ has no Spin-structure.
\end{theo}
\noindent
{\bf Proof.} {\bf 1.}
From (\ref{y})
$$\begin{aligned}
y_{i_1+1}&=\ldots=y_{i_{1}+2k+1}=x_i,\\
y_{i_{1}+2k+2}&=\ldots=y_{i_{1}+2k+2l+2}=x_i+x_j\\
y_{i_{1}+2k+2l+3}&=\ldots=y_{i_{1}+2k+2l+2m+3}=x_j.\end{aligned}$$
From (\ref{sw1}) and $x_i^2=x_i\sum_{j=1}^na_{ji}x_j$
we obtain
$$\begin{aligned}w_2(M(A))&=k(2k+1)x_i^2+(2k+1)(2l+1)x_i(x_i+x_j)+(2k+1)(2m+1)x_ix_j\\
&+l(2l+1)(x_i+x_j)^2+(2l+1)(2m+1)x_j(x_i+x_j)+m(2m+1)x_j^2\\
&=(l+m+1)x_j^2+(2l+1)(2m+1)x_ix_j=(l+m+1)x_j^2+x_ix_j.
\end{aligned}$$
Now, if $l$ and $m$ are number of the same parity we have
$$\begin{aligned}&w_2(M(A))=x_ix_j+x_j^2\\&=
\begin{cases}x_ix_j,&\text{ if } j\notin\{i_1+1,\ldots,i_{1}+2k+2\}, M(A)\text { has no Spin-structure},\\
0,&\text{ if } j\in\{i_1+1,\ldots,i_{1}+2k+2\}, M(A)\text { has a Spin-structure}.\end{cases}
\end{aligned}$$
{\bf 2.} From {\rm(}\ref{y}{\rm)}
$$\begin{aligned}
y_{i_1+1}&=\ldots=y_{i_{1}+2k+1}=x_j\\
y_{i_{1}+2k+2}&=\ldots=y_{i_{1}+2k+2l+2}=x_i+x_j\\
y_{i_{1}+2k+2l+3}&=\ldots=y_{i_{1}+2k+2l+2m+3}=x_i.
\end{aligned}$$
From (\ref{sw1}) and since $i_1>j>i$ we get
$$\begin{aligned}
w_2(M(A))&=k(2k+1)x_j^2+m(2m+1)x_i^2+(2k+1)(2l+1)x_j(x_i+x_j)\\
&+(2k+1)(2m+1)x_ix_j+l(2l+1)(x_i+x_j)^2\\
&+(2l+1)(2m+1)x_i(x_i+x_j)+m(2m-1)x_i^2\\
&=k(2k+1)\underbrace{x_j^2}_{=0}+l(2l+1)\underbrace{(x_i+x_j)^2}_{=0}+m(2m+1)\underbrace{x_i^2}_{=0}\\
&+x_j(x_i+x_j)+x_ix_j+x_i(x_i+x_j)=x_ix_j\ne0,
\end{aligned}$$
so $M(A)$ has no Spin-structure.
{\mathbb H}skip 142mm $\Box$
Now, we give examples which illustrate Theorems \ref{theo1} - \ref{theo3}.
\begin{ex}
{\bf 1.} Let
$$A=\left[\begin{matrix}
0&0&0&0&0&0&0&0\\
0&0&1&1&0&0&0&0\\
0&0&0&0&1&1&1&1\\
0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0
\end{matrix}\right].$$
Here $2l=4{\mathbb R}ightarrow l=2$. Hence from Theorem \ref{theo1}, part 1.1, manifold $M(A)$ has the Spin-structure.
\newline{\bf 2.}
$$A=\left[\begin{matrix}
0&0&0&0&0&0\\
0&0&1&1&0&0\\
0&0&0&0&1&1\\
0&0&0&0&0&0\\
0&0&0&0&0&0\\
0&0&0&0&0&0
\end{matrix}\right].$$
Here
$
l=1,\{i_1,i_2,\ldots,i_n\}=\{3,4\},j=3\in\{3,4\}.$
Hence, from Theorem \ref{theo1}, part 1.2, the real Bott manifold $M(A)$ has no Spin-structure.
\newline{\bf 3.}
$$A=\left[\begin{matrix}
0&1&1&1&1&0&0&0&0\\
0&0&0&1&1&1&1&1&1\\
0&0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0&0
\end{matrix}\right].$$
From Theorem \ref{theo2}, part 1.4 and since $l=1, m=2, \{i_1,\ldots,i_{2k}\}=\{2,3\}, j=2\in\{2,3\}$
the real Bott manifold has no Spin-structure.
\newline{\bf 4.}
$$A=\left[\begin{array}{ccccccccccccc}
0&0&0&0&0&0&0&0&0&0&0&0&0\\
0&0&1&1&1&1&1&1&0&0&0&0&0\\
0&0&0&0&0&1&1&1&1&1&1&1&1\\
0&0&0&0&0&0&0&0&0&0&0&0&0\\0&0&0&0&0&0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0&0&0&0&0&0\\0&0&0&0&0&0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0&0&0&0&0&0\\0&0&0&0&0&0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0&0&0&0&0&0\\0&0&0&0&0&0&0&0&0&0&0&0&0\\
0&0&0&0&0&0&0&0&0&0&0&0&0\\0&0&0&0&0&0&0&0&0&0&0&0&0
\end{array}\right].$$
In this case $l=1, m=2, $
and from Theorem \ref{theo3} we have that $M(A)$ has no Spin-structure.
\end{ex}
\vskip 2mm
\noindent
Maria Curie-Sk{\l}odowska University,\\
Institute of Mathematics\\
pl. Marii Curie-Sk{\l}odowskiej 1\\
20-031 Lublin, Poland\\
E-mail: [email protected]
\end{document} |
\begin{document}
\title{Structure invariant wave packets}
\author{V. Arrizon, F. Soto-Eguibar and H.M. Moya-Cessa}
\address{Instituto Nacional de Astrof\'{\i}sica, \'Optica y Electr\'onica,\\ Calle Luis Enrique Erro 1, Santa Mar\'{\i}a Tonantzintla, Puebla, 72840 Mexico}
\begin{abstract}
We show that by adding a quadratic phase to an initial arbitrary wavefunction, its free evolution maintains an invariant structure while it spreads by the action of an squeeze operator. Although such invariance is an approximation, we show that it matches perfectly the exact evolution.
\end{abstract}
\maketitle
\section{Introduction}
The Schr\"odinger equation for a free particle has attracted the search for wave functions that evolve without distortion. Berry and Balasz have shown that an Airy wave function keeps its form under evolution, just showing some acceleration \cite{Berry}. However, Airy wave functions are not square integrable functions and therefore are not proper wave functions. If one wants to use them, they need to be apodized, either by cutting them or by super-imposing a Gaussian function; i.e., instead considering a Gauss-Airy beam. In such case, it is too much to say that they loose their shape as they evolve, and therefore, their beauty. Effects such as focusing of waves may occur when particles go through a single slit \cite{Schleich2}, as it has been shown by studying the time dependent wave function in position space and its Wigner function \cite{Schleich1}.\\
In this contribution, we want to show that by adding a positive quadratic phase to an initial arbitrary wavefunction, its free evolution maintains an invariant structure, while it spreads by the action of an squeeze operator. That means, that the effect of passing a beam of particles (for instance electrons \cite{elec}, neutrons \cite{neut} or atoms \cite{atom}) through a negative lens, provides the wave function with the property of evolution invariance, while it diffracts by the application of a squeeze operator to the initial state \cite{Yuen,Caves,Satya,Vidiella,Knight,Schleich}.\\
In the following, we will revisit Airy beams and Airy-Gauss beams in order to show that the later ones deform as they evolve. In Section III, we show that the acquisition of a quadratic phase helps any field to become invariant under free evolution; in Section IV, we give some examples, namely initial Sinc and Bessel functions, while Section V is left for conclusions.
\section{Revisiting Airy beams}
Berry and Balasz \cite{Berry} have shown that an initial wave function of the form (for simplicity we set $\hbar=1$)
\begin{equation}\label{Airy0}
\psi(x,0)=\mathrm{Ai}(\epsilon x),
\end{equation}
where $\epsilon$ is an arbitrary real constant, evolves according to the Schr\"odinger equation for a free particle of mass $m=1$
\begin{equation}\label{schr0}
i\frac{\partial \psi(x,t)}{\partial t}=\frac{\hat{p}^2}{2}\psi(x,t),
\end{equation}
as
\begin{equation}
\psi(x,t)=\mathrm{Ai}\left[\epsilon \left(x-\frac{\epsilon^3t^2}{4}\right)\right]
\exp\left[ i \frac{\epsilon^3t}{2}\left( x-\frac{\epsilon^3t^2}{6}\right)\right],
\end{equation}
as can be verified by substitution into (\ref{schr0}). It is clear from this solution that the Airy wave packet is conserved, meaning that it evolves without spreading. Besides, the evolution shows an acceleration which may be obtained also in some other initial distributions of wave packets, like half Bessel functions \cite{Aleahmad}. Propagation of Airy wavelet-related patterns has also been considered in \cite{Torre0} and it has been shown they provide \textquotedblleft source functions\textquotedblright \; for freely propagating paraxial fields. The acceleration may be corrected by propagating the Airy function in a linear potential \cite{Chavez}. Unfortunately, the Airy wave packet is not a proper wave function as it is not square integrable. A possibility for making it normalizable would be to cut it (have a window) or to {\it apodize} it by multiplying it by a Gauss function, and effectively cutting it. If instead of the initial state (\ref{Airy0}), we consider as initial condition the normalizable wave function
\begin{equation}\label{Airy1}
\psi(x,0)=\textrm{Ai}(\epsilon x)\exp\left( -\beta x^2\right),
\end{equation}
with $\beta$ another arbitrary real constant, the solution then reads
\begin{equation}\label{Airy1Sol}
\psi(x,t)=\frac{1}{\sqrt{1-2i\beta t}}\textrm{Ai}\left[ \zeta(x,t)\right] \exp\left( \frac{\beta x^2}{2i\beta t -1}\right) \exp\left[i\gamma(x,t)\right]
\end{equation}
with
\begin{equation}
\zeta(x,t)=\frac{\epsilon^4t^2+\epsilon x(2i\beta t -1)}{(2\beta t+i)^2}, \qquad
\gamma(x,t)=\frac{3\epsilon^3xt(2\beta t+i)-2i\epsilon^6t^3}{3(2\beta t+i)^3};
\end{equation}
again, this can be proved by direct substitution into Eq.(\ref{schr0}). In Figure 1, we plot the probability density $|\psi(x,t)|^2$ for Eq.(\ref{Airy1Sol}) for different times. We can see that for $\beta=0.01$, the Airy-Gauss beam still accelerates, but it looses its shape.
\begin{figure}
\caption{Plot of the probability density $|\psi(x,t)|^2$ of the wavefuntion in equation (\ref{Airy1Sol}
\label{fig1}
\end{figure}
\section{Evolution invariant beams}
Now consider an initial condition of the form
\begin{equation}
\psi(x,0)=\exp\left( i\alpha x^2\right) \phi(x,0),
\end{equation}
where $\alpha$ is a real parameter which must be set in each specific case \cite{victor18}. The solution of the Schr\"odinger equation then reads
\begin{equation}\label{sol}
\psi(x,t)=\exp\left( -i\frac{t}{2}\hat{p}^2\right) \exp\left( i\alpha x^2\right) \phi(x,0).
\end{equation}
Writing the identity operator as $\hat{I}=\left( i\frac{t}{2}\hat{p}^2\right) \exp\left( -i\frac{t}{2}\hat{p}^2\right)$, the previous equation can be cast as
\begin{equation}\label{ec090}
\psi(x,t)=\exp\left( -i\frac{t}{2}\hat{p}^2\right)
\exp\left( i\alpha x^2\right)
\exp \left( i\frac{t}{2}\hat{p}^2\right) \exp\left( -i\frac{t}{2}\hat{p}^2\right) \phi(x,0).
\end{equation}
As is well known, $\exp\left( -i\frac{t}{2}\hat{p}^2\right) x \exp\left( i\frac{t}{2}\hat{p}^2\right) =x-t\hat{p}$, and this implies that
\begin{eqnarray}
\exp\left( -i\frac{t}{2}\hat{p}^2\right)
\exp\left( i\alpha x^2\right)
\exp \left( i\frac{t}{2}\hat{p}^2\right)&=&\exp \left[ i \alpha \left( x-t\hat{p}\right) ^2 \right]
\\ \nonumber
&=&\exp\left\lbrace i\alpha [x^2-t(x\hat{p}+\hat{p}x)+t^2\hat{p}^2]\right\rbrace,
\end{eqnarray}
which substituted in equation (\ref{ec090}) gives us
\begin{equation}
\psi(x,t)=\exp\left\lbrace i\alpha [x^2-t(x\hat{p}+\hat{p}x)+t^2\hat{p}^2]\right\rbrace \exp\left( -i\frac{t}{2}\hat{p}^2\right) \phi(x,0).
\end{equation}
It is not difficult to show that the first exponential above may be factorized as \cite{metop}
\begin{equation}
\exp \left[ if_1(t)x^2\right] \exp\left[ i f_2(t)(x\hat{p}+\hat{p}x)\right] \exp\left[ i f_3(t)\hat{p}^2\right] ,
\end{equation}
with
\begin{equation}
f_1(t)= \frac{\alpha}{1+2\alpha t}, \qquad f_2(t)=-\frac{1}{2} \ln(1+2\alpha t), \qquad f_3(t)= \frac{\alpha t^2}{1+2\alpha t}.
\end{equation}
This allows us to give a final form for equation (\ref{sol}) as
\begin{equation}
\psi(x,t)=\exp\left[ if_1(t)x^2\right] \exp\left[ if_2(t)(x\hat{p}+\hat{p}x)\right] \exp\left[ i f_4(t)\hat{p}^2\right] \phi(x,0)
\end{equation}
with $f_4(t)=f_3(t)-t/2$.\\
We now examine the behaviour of $f_4(t)$ as a function of the parameter $\alpha$. The Taylor series of $f_4(t)$ for $\alpha \approx 0$ is
\begin{equation}\label{0150}
f_4(t) = -\frac{t}{2}+t^2 \alpha-2t^3 \alpha^2+\textrm{O}(\alpha)^3
\end{equation}
and for $\alpha \approx \infty$ is
\begin{equation}\label{0160}
f_4(t) = -\frac{1}{4\alpha}+\frac{1}{8t\alpha^2}+\textrm{O}\left( \frac{1}{\alpha}\right) ^3.
\end{equation}
In Figure 2, we plot $f_4(t)$ as a function of time for different values of the $\alpha$ parameter. It may be seen that for small values of $\alpha$ it remains close to zero, and for large values of $\alpha$ it becomes very small, as expected from the approximation in Equation (16).
\begin{figure}
\caption{Plot of the function $f_4(t)$ for $\alpha=10$ (dotted line), $\alpha=5.0$ (dashed line) and $\alpha=0.5$ (continuous line).}
\label{fig2}
\end{figure}
Thus, for small values of $\alpha$, we take the first two terms in the Taylor development of the operator
$\exp\left[ i f_4(t)\hat{p}^2\right]$ and we get
\begin{equation} \label{appsol}
\psi_1(x,t)\approx \exp\left[ if_1(t)x^2\right] \exp\left[ if_2(t)(x\hat{p}+\hat{p}x)\right] \left[1+if_4(t)\hat{p}^2\right]\phi(x,0).
\end{equation}
For $\alpha$ large enough, we completely disregard the term $\exp\left[ i f_4(t)p^2\right] $ and, to a very good approximation (as will be see below), we write simply the zeroth order solution
\begin{equation}\label{appsol2}
\psi_0(x,t)\approx \exp\left[ if_1(t)x^2\right] \exp\left[ i f_2(t)(x\hat{p}+\hat{p}x)\right] \phi(x,0).
\end{equation}
The operator $\exp\left[i f_2(t)(x\hat{p}+\hat{p}x)\right]$ is the squeeze operator, and by its application to the initial function, the equation above may be cast into
\begin{equation}\label{appsol3}
\psi_0(x,t)=\frac{1}{\sqrt{1+2\alpha t}} \exp\left[ if_1(t)x^2\right] \phi\left(\frac{x}{1+2\alpha t},0\right).
\end{equation}
It is clear that the above wave function gives a probability density that remains invariant during evolution
\begin{equation}\label{invariant}
|\psi_0(x,t)|^2=\frac{1}{{1+2\alpha t}}\left\vert \phi\left(\frac{x}{1+2\alpha t},0\right)\right\vert^2.
\end{equation}
The choice of the $\alpha$ parameter depends of the problem that is being studied and on the propagation distance that must be considered, as will be shown in the examples below. From Eqs. (\ref{0150}) and (\ref{0160}), it is also clear that different values of $\alpha$ must be considered if the zeroth order or the first order solutions are going to be used. In \cite{victor18} we present a discussion on the election of this parameter in the realm of classical optics.
\section{Some examples}
In this section, we study some examples where we apply our approximation and compare it with the exact solution.
\subsection{Sinc function}
We start with an initial (unnormalized, but normalizable) wave packet of the form
\begin{equation}
\psi(x,0)= \exp\left( i\alpha x^2\right) \mathrm{Sinc}(bx),
\end{equation}
where $b$ is an arbitrary real constant and where we define the Sinc function as
\begin{equation}
\mathrm{Sinc}(bx)=\frac{1}{b}\int_{-b}^b \exp\left( i u x\right) du.
\label{sinc}
\end{equation}
We write the approximations to zeroth and first order as
\begin{equation}
\psi_0(x,t)=\frac{\exp\left[ if_1(t)x^2\right]} {b\sqrt{1+2\alpha t}} \int_{-b}^b \exp\left( iu\frac{x}{1+2\alpha t}\right) du,
\end{equation}
and
\begin{equation}
\psi_1(x,t)=\psi_0(x,t)+i\frac{f_4(t) \exp\left[ if_1(t)x^2\right]} {b\sqrt{1+2\alpha t}} \int_{-b}^b u^2 \exp\left( iu\frac{x}{1+2\alpha t}\right) du,
\end{equation}
respectively. For the sake of comparison, we can also write the exact solution as
\begin{equation}
\psi(x,t)=\frac{\exp\left[ if_1(t)x^2\right] }{b\sqrt{1+2\alpha t}} \int_{-b}^b \exp\left[ if_4(t)u^2\right] \exp\left( iu\frac{x}{1+2\alpha t}\right) du.
\end{equation}
We plot in Figure \ref{fig3} (a) and (c) the probability densities for the zeroth order and exact solutions, showing that they match very well for a value of $\alpha=0.3$ and have an excellent agreement for a greater value ($\alpha=3$). In Figure \ref{fig3} (b) and (d), the quantities $|\psi_0(x,t)|^2$ (dashed line) and $|\psi_0(x,t)-\psi_0(x,t)|^2$ (solid line) are plotted in order to show that their contributions to the first order approximation are negligible, already for such small values of $\alpha$.
\begin{figure}
\caption{\label{fig3}
\label{fig3}
\end{figure}
\subsection{Bessel function}
We consider now the initial wave function given by a Bessel function \cite{Leija,Optica}
\begin{equation}
\psi(x,0)=\exp \left( i\alpha x^2\right) J_n(x),
\end{equation}
with $J_n(x)$ a Bessel function of order $n$, defined as \cite{Arfken}
\begin{equation}
J_n(x)=\frac{1}{2\pi}\int_{-\pi}^{\pi} \exp\left( in\theta\right) \exp\left( -ix\sin\theta\right) d\theta.
\end{equation}
It is not difficult to show that the zeroth order solution is given by
\begin{equation}
\psi_0(x,t)=\frac{\exp\left[ if_1(t)x^2\right]} {\sqrt{1+2\alpha t}} J_n\left(\frac{x}{1+2\alpha t}\right),
\end{equation}
while the solution to first order reads
\begin{eqnarray}
& & \psi_1 \left( x,t\right)= \frac{\exp\left[ i f_1\left( t \right) x^2\right]}{\sqrt{1+2\alpha t}} \times
\nonumber \\
& & \left\lbrace
\left[1+\frac{f_4\left( t \right) }{2}\right]
J_n\left( \frac{x}{1+2\alpha t} \right)
-i \frac{f_4\left( t \right) }{4}
\left[ J_{n+2}\left( \frac{x}{1+2\alpha t} \right)+
J_{n-2}\left( \frac{x}{1+2\alpha t} \right)
\right]
\right\rbrace.
\end{eqnarray}
In order to show that the approximation is good, we write also the exact solution as
\begin{eqnarray}
\psi(x,t)=\frac{ \exp\left[ if_1(t)x^2\right] }{2\pi\sqrt{1+2\alpha t}}\int_{-\pi}^{\pi} \exp\left( in\theta\right) \exp\left( -ix\sin\theta\right) \exp\left[ if_4(t)\sin^2\theta\right] d\theta,
\end{eqnarray}
which is a so-called generalized Bessel function \cite{Leija,Dattoli,Torre}. In Figure \ref{fig4}, we plot the probability densities for the exact (solid lines) and zeroth order solutions (dashed lines) which again show an excellent agreement.
\begin{figure}
\caption{\label{fig4}
\label{fig4}
\end{figure}
\section{Conclusions}
We have shown that by adding a quadratic phase to an initial wave packet, its structure may be kept invariant through free evolution. The main result of this contribution is equation (\ref{invariant}), which shows clearly this fact. Although the invariance is an approximation, it was shown that it perfectly matches the exact evolution. The price that has to be paid is the usual spread of the wave function due to free evolution, which is given here by the application of the squeeze operator to the initial wave function.\\
\end{document} |
\begin{document}
\title{Detection of Gravitational Wave - \\
An Application of Relativistic Quantum Information Theory}
\author{Ye Yeo}
\affiliation{Department of Physics, National University of Singapore, 10 Kent Ridge Crescent, Singapore 119260, Singapore}
\author{Chee Leong Ching}
\affiliation{Department of Physics, National University of Singapore, 10 Kent Ridge Crescent, Singapore 119260, Singapore}
\author{Jeremy Chong}
\affiliation{Department of Physics, National University of Singapore, 10 Kent Ridge Crescent, Singapore 119260, Singapore}
\author{Wee Kang Chua}
\affiliation{Department of Physics, National University of Singapore, 10 Kent Ridge Crescent, Singapore 119260, Singapore}
\author{Andreas Dewanto}
\affiliation{Department of Physics, National University of Singapore, 10 Kent Ridge Crescent, Singapore 119260, Singapore}
\author{Zhi Han Lim}
\affiliation{Department of Physics, National University of Singapore, 10 Kent Ridge Crescent, Singapore 119260, Singapore}
\begin{abstract}
We show that a passing gravitational wave may influence the spin entropy and spin negativity of a system of $N$ massive spin-$1/2$ particles, in a way that is characteristic of the radiation. We establish the specific conditions under which this effect may be nonzero. The change in spin entropy and negativity, however, is extremely small. Here, we propose and show that this effect may be amplified through entanglement swapping. Relativistic quantum information theory may have a contribution towards the detection of gravitational wave.
\end{abstract}
\maketitle
Relativity and quantum mechanics are the two pillars of 20th century physics. Einstein's general relativity \cite{Hartle} is the classical theory of gravity. Many of its predictions have been experimentally confirmed via very precise measurements. One of its most intriguing predictions is the propagation of ripples in spacetime curvature at the speed of light ($c = 1$) called {\em gravitational waves}. Gravity is a long-range interaction and it is not possible to shield this interaction. Gravitational waves thus provide a new window for exploring astronomical phenomena. However, gravity is the weakest of the four fundamental interactions; this means that gravitational waves are not easily detected. In fact, they have not yet been detected on Earth. Quantum mechanics, in combination with computation and information, leads to unexpected new ways that information can be processed and transmitted, extending the known capabilities in the field of classical information to previously unsuspected limits \cite{Nielsen}. One of the greatest challenges faced by quantum information scientists is the fragility of quantum coherence and entanglement in the presence of environmental decoherence \cite{Zurek}. In particular, multipartite entangled states such as the GHZ states \cite{Greenberger} become more susceptible under certain kinds of noise as the number of particles increases \cite{Carvalho}. Motivated by its fundamental importance in gravitational wave detection and several recent developments in relativistic quantum information theory \cite{Peres1}, we explore, in this paper, the possibility of turning this fragility into a quantum means to detect gravitational radiation.
Peres, {\em et al.} \cite{Peres2} were the first to study the relativistic properties of spin entropy for a single, free particle of spin 1/2 and nonzero mass in flat spacetime. They showed that even if the initial quantum state of the particle is a direct product of a function of momentum and a function of spin, the state under a Lorentz boost is in general not a direct product. This is because the spin undergoes a Wigner rotation \cite{Wigner} whose direction and magnitude depend on the momentum of the particle. Spin and momentum appear to be ``entangled''. As a result, the reduced density matrix for spin becomes mixed and the corresponding entropy becomes nonzero. Slightly later, Gingrich and Adami \cite{Gingrich} showed that Lorentz boost can also affect the entanglement between spins. Namely, a maximally entangled Bell state of two massive spin-1/2 particles loses entanglement under a Lorentz boost.
More recently, Terashima and Ueda \cite{Tera1} (see also \cite{Tera2}) extended the original investigation by Peres {\em et al.}, by considering the relativistic quantum mechanics of a massive spin-1/2 particle moving in curved spacetime, which entails a breakdown of the global $SO(3, 1)$ symmetry associated with flat spacetime. In this case, spin can only be defined locally at each spacetime point by invoking the $SO(3, 1)$ symmetry of some local inertial frame. Specifically, a spin-1/2 particle in curved spacetime is defined as a particle whose one-particle states furnish the spin-1/2 representation of the {\em local Lorentz transformation}. Terashima and Ueda showed that, as a consequence of this local definition, the motion of the particle is accompanied by a continuous succession of local Lorentz transformations, which gives rise to spin entropy production that is unique to the curved spacetime. They illustrated their ideas with the Schwarzschild spacetime in \cite{Tera1, Tera2}.
In this paper, we study the effects on the quantum states of one, two, and in general $N$ spin-1/2 particles due to a plane gravitational wave spacetime propagating in the positive $z$-direction \cite{Hartle}:
\begin{equation}
ds^2 = -dt^2 + [1 + f(t - z)]dx^2 + [1 - f(t - z)]dy^2 + dz^2.
\end{equation}
The size and shape of the propagating ripple in curvature are determined by some dimensionless function $f$ ($|f(t - z)| << 1$). For example, for a Gaussian wave packet with width $\omega$ and maximum height $A$,
\begin{equation}
f(t - z) = A\exp\left[-\frac{(t - z)^2}{\omega^2}\right].
\end{equation}
And, for a gravitational wave of amplitude $A$ and definite frequency $\varpi$,
\begin{equation}
f(t - z) = A\sin[\varpi(t - z)].
\end{equation}
The only nonvanishing Christoffel symbols for the above metric are $2\Gamma^t_{xx} = -2\Gamma^t_{yy} = \partial f/\partial t$, $-2\Gamma^z_{xx} = 2\Gamma^z_{yy} = \partial f/\partial z$, $\Gamma^x_{tx} = \partial\ln\sqrt{1 + f}/\partial t$, $\Gamma^x_{xz} = \partial\ln\sqrt{1 + f}/\partial z$, $\Gamma^y_{ty} = \partial\ln\sqrt{1 - f}/\partial t$, and $\Gamma^y_{yz} = \partial\ln\sqrt{1 - f}/\partial z$.
We begin with a single spin-1/2 particle $A$ of mass $m$ in a local inertial frame at the spacetime point $x^{\mu}_i$. This particle is initially prepared at proper time $\tau_i$ in the state
\begin{equation}
|\psi\rangle_A = \int N(k^a)d^3\vec{k}\sum_{\lambda}C(k^a, \lambda)|k^a, \lambda\rangle_A,
\end{equation}
where $N(k^a)d^3\vec{k} = md^3\vec{k}/\sqrt{\vec{k}\cdot\vec{k} + m^2}$ is Lorentz-invariant volume element. From here on, it is assumed that Latin and Greek letters run over the four inertial-coordinate labels 0, 1, 2, 3 and the four general-coordinate labels, respectively. $|k^a, \lambda\rangle_A$ is the momentum eigenstate of the particle, labeled by the four-momentum $k^a = (\sqrt{\vec{k}\cdot\vec{k} + m^2}, \vec{k})$ and by the $z$-component $\lambda$ ($= \uparrow$ or 0, $\downarrow$ or 1) of the spin. We consider, in particular, the case where the coefficient $C(k^a, \lambda) = D(k^a)\delta_{\lambda 0}$,
\begin{equation}
D(k^a) = \frac{1}{\sqrt{N(k^a)}\sqrt{\pi}w}
\prod_{a = 1, 3}\exp\left[-\frac{(k^a - q^a(x_i))^2}{2w^2}\right]\sqrt{\delta(k^2)}.
\end{equation}
We assume that the spacetime curvature does not change drastically within the spacetime scale of the wave packet. $q^a(x_i)$ is as given in Eq.(18). Together with the orthogonality condition ${_A}\langle k'^a, \lambda'|k^a, \lambda\rangle_A = \delta^3(\vec{k}' - \vec{k})\delta_{\lambda'\lambda}/N(k^a)$ we clearly have ${_A}\langle\psi|\psi\rangle_A = \int N(k^a)d^3\vec{k}\sum_{\lambda}|C(k^a, \lambda)|^2 = 1$, i.e., $|\psi\rangle_A$ is normalized. To ease calculations, we set $k^2$ to zero with no loss of generality. It follows that at $\tau_i$ the reduced density matrix for spin,
\begin{equation}
\rho_A(\tau_i) \equiv \int N(k^a)d^3\vec{k}\ {_A}\langle k^a|\psi\rangle_A\langle\psi|k^a\rangle_A = |0\rangle_A\langle 0|,
\end{equation}
and the corresponding entropy $S_A(\tau_i) \equiv -{\rm tr}[\rho_A(\tau_i)\log_2\rho_A(\tau_i)] = 0$. We will show that at a later proper time $\tau_f$, $\rho_A(\tau_i)$ evolves to
\begin{equation}
\rho'_A(\tau_f) \equiv {\cal E}[\rho_A(\tau_i)]
= \frac{1}{2}\left(\begin{array}{cc} 1 + \bar{c} & \bar{s} \\ \bar{s} & 1 - \bar{c}\end{array}\right),
\end{equation}
with spin entropy $S'_A(\tau_f) = -P\log_2P - (1 - P)\log_2(1 - P)$, $P = (1 - |\bar{u}|)/2$. Here,
\begin{equation}
\bar{u} = \int N(k^a)d^3\vec{k}|D(k^a)|^2\exp(i\Omega),
\end{equation}
with $\Omega = \Omega(k^a; \tau_i, \tau_f, \xi, \vartheta)$ as given in Eq.(22), $\bar{c} = {\rm Re}(\bar{u})$ and $\bar{s} = {\rm Im}(\bar{u})$. It will be useful to note
\begin{eqnarray}
{\cal E}[R^{00}] \equiv {\cal E}[|0\rangle\langle 0|]
= \frac{1}{2}\left(\begin{array}{cc} 1 + \bar{c} & \bar{s} \\ \bar{s} & 1 - \bar{c}\end{array}\right), & &
{\cal E}[R^{01}] \equiv {\cal E}[|0\rangle\langle 1|]
= \frac{1}{2}\left(\begin{array}{cc} -\bar{s} & 1 + \bar{c} \\ -1 + \bar{c} & \bar{s}\end{array}\right), \nonumber \\
{\cal E}[R^{10}] \equiv {\cal E}[|1\rangle\langle 0|]
= \frac{1}{2}\left(\begin{array}{cc} -\bar{s} & -1 + \bar{c} \\ 1 + \bar{c} & \bar{s}\end{array}\right), & &
{\cal E}[R^{11}] \equiv {\cal E}[|1\rangle\langle 1|]
= \frac{1}{2}\left(\begin{array}{cc} 1 - \bar{c} & -\bar{s} \\ -\bar{s} & 1 + \bar{c}\end{array}\right).
\end{eqnarray}
Next, we consider two spin-1/2 particles $A$ and $B$ with equal mass $m$, initially prepared in the state
\begin{equation}
|\Psi\rangle_{AB} = \int\int N(k^a)N(p^b)d^3\vec{k}d^3\vec{p}\sum_{\lambda, \sigma}C(k^a, \lambda; p^b, \sigma)
|k^a, \lambda\rangle_A \otimes |p^b, \sigma\rangle_B,
\end{equation}
with $C(k^a, \lambda; p^b, \sigma) = D(k^a)D(p^b)\delta_{\lambda\sigma}$. By writing $|\Psi\rangle_{AB}$ as a density matrix and tracing over the momentum degrees of freedom, we obtain, at $\tau_i$, a maximally entangled Bell state
\begin{equation}
\chi_{AB}(\tau_i) = |\Psi^0_{Bell}\rangle_{AB}\langle\Psi^0_{Bell}| = \frac{1}{2}\sum^1_{j, k = 0} R^{jk}_A \otimes R^{jk}_B,
\end{equation}
where $|\Psi^0_{Bell}\rangle \equiv (|00\rangle + |11\rangle)/\sqrt{2}$, $S_{AB}(\tau_i) \equiv -{\rm tr}[\chi_{AB}(\tau_i)\log_2\chi_{AB}(\tau_i)] = 0$, and (spin) {\em negativity} ${\cal N}[\chi_{AB}(\tau_i)] = 1$. Consider a density matrix $\chi_{AB}$ and its partial transposition $\chi^{T_A}_{AB}$ for a two spin-1/2 system $AB$. $\chi_{AB}$ is entangled if and only if $\chi^{T_A}_{AB}$ has any negative eigenvalues \cite{Peres3, Horodecki}. The negativity \cite{Vidal} is a computable measure of entanglement defined by ${\cal N}[\chi_{AB}] \equiv \max\{-2\sum_i\eta_i,\ 0\}$, where $\eta_i$ is a negative eigenvalue of $\chi^{T_A}_{AB}$. At $\tau_f$, we will show that
\begin{equation}
\chi'_{AB}(\tau_f) = \frac{1}{2}\sum^1_{j, k = 0} {\cal E}[R^{jk}_A] \otimes {\cal E}[R^{jk}_B]
= \frac{1}{4}\left(\begin{array}{cccc}
1 + |\bar{u}|^2 & 0 & 0 & 1 + |\bar{u}|^2 \\
0 & 1 - |\bar{u}|^2 & -(1 - |\bar{u}|^2) & 0 \\
0 & -(1 - |\bar{u}|^2) & 1 - |\bar{u}|^2 & 0 \\
1 + |\bar{u}|^2 & 0 & 0 & 1 + |\bar{u}|^2
\end{array}\right),
\end{equation}
with $S_{AB}(\tau_f) = -P\log_2P - (1 - P)\log_2(1 - P)$ but $P = (1 - |\bar{u}|^2)/2$ (see FIG. 1 and 2), and ${\cal N}[\chi'_{AB}(\tau_f)] = |\bar{u}|^2$ (see FIG. 3 and 4). Generalization to a system of $N$ spin-$1/2$ particles is straightforward:
\begin{eqnarray}
\chi_{A_1\cdots A_N}(\tau_i) & = & |\Psi^0_{GHZ}\rangle_{A_1\cdots A_N}\langle\Psi^0_{GHZ}|
= \frac{1}{2}\sum^1_{j, k = 0} R^{jk}_{A_1} \otimes \cdots \otimes R^{jk}_{A_N}, \nonumber \\
\chi'_{A_1\cdots A_N}(\tau_f) & = & \frac{1}{2}\sum^1_{j, k = 0} {\cal E}[R^{jk}_{A_1}] \otimes \cdots \otimes {\cal E}[R^{jk}_{A_N}].
\end{eqnarray}
Here, $|\Psi^0_{GHZ}\rangle \equiv (|0\cdots 0\rangle + |1\cdots 1\rangle)/\sqrt{2}$ \cite{Greenberger}.
In order to measure the effects described by Eqs.(7) and (12), we introduce a {\em static} observer at each spacetime point along the ``trajectory'' of the particle(s). Each observer is assigned a local inertial frame defined by the following convenient choice of {\em vierbein} $e^{\mu}_a(x)$:
\begin{equation}
e^t_0(x) = 1, e^x_1(x) = \frac{1}{\sqrt{1 + f}}, e^y_2(x) = \frac{1}{\sqrt{1 - f}}, e^z_3(x) = 1,
\end{equation}
with all the other components being zero. Furthermore, we demand that the particle(s) be moving with four-velocity
\begin{equation}
u^{\mu}(x) = (\cosh\xi, \frac{\sinh\xi\sin\vartheta}{\sqrt{1 + f}}, 0, \sinh\xi\cos\vartheta)
\end{equation}
or four-momentum $q^{\mu}(x) = mu^{\mu}(x)$. Here, $\tanh\xi \equiv v$ ($=$ constant $< 1$), i.e., $\xi$ is the rapidity in the local inertial frame, and $0 < \vartheta < \pi/2$. In order for the particle(s) to move in this way, which is not a geodesic motion, we must apply an external force. The acceleration due to this external force is given by $a^{\mu}(x) = u^{\lambda}(x)\nabla_{\lambda}u^{\mu}(x)$:
\begin{equation}
a^{\mu}(x) =
(\sinh^2\xi\sin^2\vartheta\frac{\partial}{\partial t}\ln\sqrt{1 + f}, \frac{F\sinh\xi\sin\vartheta}{\sqrt{1 + f}}, 0, -\sinh^2\xi\sin^2\vartheta\frac{\partial}{\partial z}\ln\sqrt{1 + f}),
\end{equation}
where
\begin{equation}
F = F(t, z; \xi, \vartheta) \equiv \left(\cosh\xi\frac{\partial}{\partial t} + \sinh\xi\cos\vartheta\frac{\partial}{\partial z}\right)\ln\sqrt{1 + f} = \frac{d}{d\tau}\ln\sqrt{1 + f(t - z)}.
\end{equation}
The inverse of the vierbein $e^a_{\mu}(x)$ in Eq.(14) is given by $e^0_t(x) = 1, e^1_x(x) = \sqrt{1 + f}, e^2_y(x) = \sqrt{1 - f}, e^3_z(x) = 1$. The vierbein transforms a tensor in a general coordinate system $x^{\mu}$ into that in a local inertial frame $x^a$. For instance,
\begin{equation}
q^a(x) = e^a_{\mu}(x)q^{\mu}(x) = (m\cosh\xi, m\sinh\xi\sin\vartheta, 0, m\sinh\xi\cos\vartheta),
\end{equation}
and similarly, $a^a(x) = e^a_{\mu}(x)a^{\mu}(x)$ yields
\begin{equation}
a^a(x) =
(\sinh^2\xi\sin^2\vartheta\frac{\partial}{\partial t}\ln\sqrt{1 + f}, F\sinh\xi\sin\vartheta, 0, -\sinh^2\xi\sin^2\vartheta\frac{\partial}{\partial z}\ln\sqrt{1 + f}).
\end{equation}
A straightforward calculation shows that the nonzero components of the spin connection $\omega^a_{\mu b}(x) \equiv e^a_{\lambda}(x)\nabla_{\mu}e^{\lambda}_b(x)$ are $\omega^0_{x1}(x) = \omega^1_{x0}(x) = \sqrt{1 + f}\partial\ln\sqrt{1 + f}/\partial t$, $\omega^1_{x3}(x) = -\omega^3_{x1}(x) = \sqrt{1 + f}\partial\ln\sqrt{1 + f}/\partial z$, $\omega^0_{y2}(x) = \omega^2_{y0}(x) = \sqrt{1 - f}\partial\ln\sqrt{1 - f}/\partial t$, and $\omega^2_{y3}(x) = -\omega^3_{y2}(x) = \sqrt{1 - f}\partial\ln\sqrt{1 - f}/\partial z$.
Suppose at proper time $\tau$ the particle(s) is at $x^{\mu}$. After an infinitesimal proper time $d\tau$, the particle(s) moves to a new local inertial frame at the new point $x'^{\mu} = x^{\mu} + u^{\mu}d\tau$. $q^a(x)$ changes to $q^a(x') = q^a(x) + \delta q^a(x) = \Lambda^a_{\ b}(x)q^b(x)$, where the infinitesimal local Lorentz transformation $\Lambda^a_{\ b}(x) \equiv \delta^a_{\ b} + \lambda^a_{\ b}(x)d\tau$, with $\lambda^a_{\ b}(x) \equiv -[a^a(x)q_b(x) - q^a(x)a_b(x)]/m + \chi^a_{\ b}(x)$ and $\chi^a_{\ b}(x) \equiv -u^{\mu}(x)\omega^a_{\mu b}(x)$. For our case, we have $\lambda^0_{\ 1}(x) = \lambda^1_{\ 0}(x) = \sinh^2\xi\cos\vartheta\sin\vartheta G(t, z; \xi, \vartheta)$, $\lambda^0_{\ 3}(x) = \lambda^3_{\ 0}(x) = -\sinh^2\xi\sin^2\vartheta G(t, z; \xi, \vartheta)$, and $\lambda^1_{\ 3}(x) = -\lambda^3_{\ 1}(x) = -\cosh\xi\sinh\xi\sin\vartheta G(t, z; \xi, \vartheta)$; where $G(t, z; \xi, \vartheta) \equiv (\sinh\xi\cos\vartheta\partial/\partial t + \cosh\xi\partial/\partial z)\ln\sqrt{1 + f(t - z)} = -F(t, z; \xi, \vartheta)$. Corresponding to $\Lambda^a_{\ b}(x)$ is the infinitesimal local Wigner rotation $W^a_{\ b}(x) \equiv \delta^a_{\ b} + \varphi^a_{\ b}(x)d\tau$, where $\varphi^0_{\ 0}(x) = \varphi^0_{\ i}(x) = \varphi^i_{\ 0}(x) = 0$ and $\varphi^i_{\ j}(x) = \lambda^i_{\ j}(x) + [\lambda^i_0(x)k_j - k^i\lambda_{j0}(x)]/(\sqrt{\vec{k}\cdot\vec{k} + m^2} + m)$. Its spin-1/2 representation is $D^{(1/2)}(W(x)) = \sigma^0 + i[\varphi_{23}(x)\sigma^1 + \varphi_{31}(x)\sigma^2 + \varphi_{12}(x)\sigma^3]d\tau/2$, with the identity matrix $\sigma^0$ and the Pauli matrices $\{\sigma^1, \sigma^2, \sigma^3\}$. It follows that $\varphi^1_{\ 3}(x) = -G(t, z; \xi, \vartheta)H(k^a; \xi, \vartheta)$, with
\begin{equation}
H(k^a; \xi, \vartheta) \equiv
\left(1 - \frac{k^1\sin\vartheta + k^3\cos\vartheta}{\sqrt{\vec{k}\cdot\vec{k} + m^2} + m}\tanh\xi\right)\cosh\xi\sinh\xi\sin\vartheta.
\end{equation}
Hence, for a finite proper time interval, $\tau_f - \tau_i$, we have
\begin{equation}
D^{(1/2)}(W(x_f, x_i)) = \exp\left[-\frac{i}{2}\sigma^2\Omega(k^a; \tau_i, \tau_f, \xi, \vartheta)\right],
\end{equation}
where
\begin{eqnarray}
\Omega(k^a; \tau_i, \tau_f, \xi, \vartheta)
& \equiv & \int^{\tau_f}_{\tau_i}\varphi^1_{\ 3}(x)d\tau \nonumber \\
& = & H(k^a; \xi, \vartheta)\left[\ln\sqrt{1 + f(t_f - z_f)} - \ln\sqrt{1 + f(t_i - z_i)}\right] \nonumber \\
& \approx & \frac{1}{2}[f(t_f - z_f) - f(t_i - z_i)]H(k^a; \xi, \vartheta).
\end{eqnarray}
Consequently, $|\psi\rangle_A$ evolves to
\begin{equation}
|\psi'\rangle_A = \int N(k^a)d^3\vec{k}\sum_{\lambda, \lambda'}C(k^a, \lambda)
D^{(1/2)}_{\lambda'\lambda}(W(x_f, x_i))|\Lambda(x_f, x_i)k^a, \lambda'\rangle_A,
\end{equation}
and similarly $|\Psi\rangle_{AB}$ to $|\Psi'\rangle_{AB} = \int N(k^a)N(p^b)d^3\vec{k}d^3\vec{p}\sum_{\lambda, \lambda', \sigma, \sigma'}C(k^a, \lambda; p^b, \sigma) \times D^{(1/2)}_{\lambda'\lambda}(W(x_f, x_i))|\Lambda(x_f, x_i)k^a, \lambda'\rangle_A \otimes D^{(1/2)}_{\sigma'\sigma}(W(x_f, x_i))|\Lambda(x_f, x_i)p^b, \sigma'\rangle_B$. We obtain Eqs.(7) and (12) by writing $|\psi'\rangle_A$ and $|\Psi\rangle_{AB}$ respectively as density matrices, and tracing over the momentum degrees of freedom. This completes what we set out to do.
In summary, we have shown that the spin entropy of a single massive spin-1/2 particle may change under the influence of a passing gravitational wave. Interestingly, this change has a dependence on the shape of the wave [see Eqs.(8) and (22)]. In other words, by determining the entropy change, one could in principle deduce $f$. To measure this change, one could prepare an identical ensemble of many particles in the state $|\psi\rangle$ [Eq.(4)] and subject them to an external force that produces the acceleration in Eq.(16). The observers at each spacetime point then select a subensemble of particles to determine as accurately as possible its spin state. The variation of the spin entropy with proper time can then be determined. We may also consider the same experimental setup for two- or $N$-particle state $|\Psi\rangle$ [Eq.(10) or its generalization, which gives Eq.(13)]. In this case, we can analyze the entanglement properties of the resulting states. Specifically, we have ${\cal N}[\chi'_{AB}(\tau_f)] = |\bar{u}|^2$.
We have to emphasize that the above effect, even though nonzero, is extremely tiny, especially in the light that the height or amplitude $A$ of a gravitational wave may be of the order of $10^{-21}$. Consequently, $|\bar{u}|^2$ would be extremely close to $1$. So, in order to measure such a minute effect, we need to ``amplify'' or ``concentrate'' it. Our preliminary analysis of the 3- to 7-particle states shows that although a passing gravitational wave may have a greater effect on the 3-particle state compared to a 2-particle one, the 4-, 5-, 6-, and 7-particle states are surprisingly robust. Thus, it seems, by considering $N$-particle states (with $N \geq 4$) does not help. Here, we turn to another well-known phenomenon in quantum information science, {\em entanglement swapping} \cite{Zukowski}. Briefly, we analyze the negativity of the resulting two-particle state
\begin{equation}
\Xi^{(4)}_{A_1A_2} \equiv \frac{1}{p_i}{\rm tr}_{B_1B_2}
[(I_{A_1A_2} \otimes |\Psi^i_{Bell}\rangle_{B_1B_2}\langle\Psi^i_{Bell}|)(\chi'_{A_1B_1}(\tau_f) \otimes \chi'_{A_2B_2}(\tau_f))],
\end{equation}
where $|\Psi^i_{Bell}\rangle = (\sigma^i \otimes \sigma^0)|\Psi^0_{Bell}\rangle$ ($i = 0, 1, 2, 3$) and $p_i = {\rm tr}[(I_{A_1A_2} \otimes |\Psi^i_{Bell}\rangle_{B_1B_2}\langle\Psi^i_{Bell}|) \times (\chi'_{A_1B_1}(\tau_f) \otimes \chi'_{A_2B_2}(\tau_f))] = 1/4$ is the probability of obtaining outcome $i$ from the Bell basis measurement. Particles $B_1$ and $B_2$ become maximally entangled, but $\Xi^{(4)}_{A_1A_2}$ yields ${\cal N}[\Xi^{(4)}_{A_1A_2}] = |\bar{u}|^4$. This is therefore an amplification of the decoherence effect due to a gravitational wave. We repeat the procedure with $\chi'_{A_jB_j}(\tau_f)$ in Eq.(24) replaced by $\Xi^{(4)}_{A_jA_j}$ to obtain $\Xi^{(8)}_{A_1A_2}$, which has negativity ${\cal N}[\Xi^{(8)}_{A_1A_2}] = |\bar{u}|^8$. It is not difficult to see how one can achieve ${\cal N}[\Xi^{(n)}_{A_1A_2}] = |\bar{u}|^n$, with $n$ the number of particles. Hence, instead of a direct measurement on the spin states, we subject the particles to the above cycles of entanglement swapping, obtaining a smaller number of pairs of particles with negativities, which differ appreciably from $1$.
In conclusion, we have established the specific conditions under which the spin entropy or negativity of massive spin-$1/2$ particles may change due to a passing gravitational wave. This very small change may be amplified via the above entanglement swapping scheme, and may be measurable. It is therefore, a potentially viable means of gravitational wave detection. More generally, our results demonstrate the exciting possibility of detecting measurable effects due to spacetime curvature using ideas and tools developed in quantum information science. Effects including those due to our expanding universe will be discussed in a longer paper in preparation \cite{ZhiHan}.
\end{document} |
\begin{document}
\begin{abstract}
We generalize the concept of localization of a star operation to flat overrings; subsequently, we investigate the possibility of representing the set $\mathrm{Star}(R)$ of star operations on $R$ as the product of $\mathrm{Star}(T)$, as $T$ ranges in a family of overrings of $R$ with special properties. We then apply this method to study the set of star operations on a Pr\"ufer domain $R$, in particular the set of stable star operations and the star-class groups of $R$.
\end{abstract}
\title{Jaffard families and localizations of star operations}
\section{Introduction}
Recently, the study of star operations, initiated by the works of Krull \cite{krull_idealtheorie} and Gilmer \cite[Chapter 32]{gilmer}, has focused on studying the whole set $\mathrm{Star}(R)$ of star operations on $R$, and in particular its cardinality. Using as a starting point the characterization of domains with $|\mathrm{Star}(R)|=1$ due to Heinzer \cite{heinzer_d=v}, Houston, Mimouni and Park have devoted a series of papers \cite{twostar,houston_noeth-starfinite,hmp_finite,starnoeth_resinfinito} to this study, obtaining, among other results, a characterization of Pr\"ufer domains with two star operations \cite[Theorem 3.3]{twostar} and the precise determination of $|\mathrm{Star}(R)|$ on some classes of one-dimensional Noetherian domains \cite{houston_noeth-starfinite,starnoeth_resinfinito}. Their work is based -- at least partly -- on the concept of \emph{localization} of finite-type star operations to localizations of the ring.
The purpose of this paper is to generalize the concept of localization of a star operation $\ast$, by avoiding (when possible) the hypothesis that $\ast$ is of finite type and by considering, instead of localizations, flat overrings of the base ring $R$. In particular, we will prove that, if $R$ admits a family of overrings with certain properties (precisely, a \emph{Jaffard family} \cite[Section 6.3]{{fontana_factoring}}) then $\mathrm{Star}(R)$ can be represented as a cartesian product of $\mathrm{Star}(T)$, as $T$ ranges in this family, and that this representation preserves the main properties of the star operations.
We then specialize to the case of Pr\"ufer domain, when this approach is complemented by the possibility, in certain cases, to link star operations on $R$ with star operations on a quotient of $R$. This method allows one to obtain a better grasp of several properties, like being a stable operation (Proposition \ref{prop:prufer-stab}), and to describe the star-class group of $R$ in terms of the class groups of some localizations of $R$.
\section{Preliminaries and notation}
Let $R$ be an integral domain with quotient field $K$, and denote by $\mathcal{F}(R)$ the set of fractional ideals of $R$. A \emph{star operation} on $R$ is a map $\ast:\mathcal{F}(R)\longrightarrow\mathcal{F}(R)$, $I\mapsto I^\ast$ such that, for every $I,J\in\mathcal{F}(R)$ and $x\in K$,
\begin{enumerate}[(a)]
\item $I\subseteq I^\ast$;
\item $(I^\ast)^\ast=I^\ast$;
\item if $I\subseteq J$, then $I^\ast\subseteq J^\ast$;
\item $R^\ast=R$;
\item $(xI)^\ast=x\cdot I^\ast$.
\end{enumerate}
The set of star operations on $R$ is denoted by $\mathrm{Star}(R)$. An ideal $I$ is a \emph{$\ast$-ideal} if $I=I^\ast$.
Similarly, a \emph{semistar operation} on $R$ is a map $\ast:\mathbf{F}(R)\longrightarrow\mathbf{F}(R)$ (where $\mathbf{F}(R)$ is the set of $R$-submodules of $K$) satisfying the previous properties, except for $R^\ast=R$; if $\ast$ verifies also the latter, then it is said to be a \emph{(semi)star operation}. We indicate the sets of semistar and (semi)star operations by $\mathrm{SStar}(R)$ and $\mathrm{(S)Star}(R)$, respectively. A \emph{semiprime operation} is a map $c$, from the set of integral ideals of $R$ to itself, that satisfies the first four properties of star operations and, moreover, such that $xI^\ast\subseteq(xI)^\ast$ for every $x\in R$.
A star operation is said to be:
\begin{itemize}
\item \emph{of finite type} if, for every $I$,
\begin{equation*}
I^\ast=\bigcup\{J^\ast\mid J\subseteq I,~J\text{~is finitely generated}\};
\end{equation*}
\item \emph{semifinite} if any proper $\ast$-ideal $I$ is contained in a prime $\ast$-ideal;
\item \emph{stable} if $(I\cap J)^\ast=I^\ast\cap J^\ast$ for all ideals $I,J$;
\item \emph{spectral} if it is in the form $I^\ast=\bigcap\{IR_P\mid P\in\Delta\}$ for some $\Delta\subseteq\mathrm{Spec}(R)$; equivalently, $\ast$ is spectral if and only if it is stable and semifinite \cite[Theorem 4]{anderson_overrings_1988};
\item \emph{endlich arithmetisch brauchbar} (\emph{eab} for short) if, for every nonzero finitely generated ideals $F,G,H$ such that $(FG)^\ast\subseteq(FH)^\ast$, we have $G^\ast\subseteq H^\ast$; if this property holds for arbitrary nonzero fractional ideals $G,H$ (but $F$ still finitely generated) then $\ast$ is said to be \emph{arithmetisch brauchbar} (\emph{ab} for short);
\item \emph{Noetherian} if any set $\{I_\alpha\mid \alpha\in A\}$ of proper $\ast$-ideals has a maximum, or equivalently if and only if every ascending chain of $\ast$-closed ideals stabilizes. (More commonly, under this hypothesis $R$ is said to be \emph{$\ast$-Noetherian} \cite{zaf_ACCstar}.)
\end{itemize}
The set of star operations has a natural order, such that $\ast_1\leq\ast_2$ if and only if $I^{\ast_1}\subseteq I^{\ast_2}$ for every ideal $I$, or equivalently if and only if every $\ast_2$-closed ideal is also $\ast_1$-closed. Under this order, $\mathrm{Star}(R)$ becomes a complete lattice, where the minimum is the identity (usually denoted by $d$) and the maximum the $v$-operation (or \emph{divisorial closure}) $I\mapsto(R:(R:I))$.
If $R$ is an integral domain, an \emph{overring} of $R$ is a ring $T$ contained between $R$ and its quotient field $K$. A family $\Theta$ of overrings of $R$ is \emph{locally finite} (or \emph{of finite character}) if every $x\in K\setminus\{0\}$ (or, equivalently, every $x\in R\setminus\{0\}$) is a nonunit in only finitely many $T\in\Theta$. The ring $R$ itself is said to be of finite character if $\{R_M:M\in\mathrm{Max}(R)\}$ is a family of finite character.
A flat overring of $R$ is an overring that is flat as a $R$-module. If $T$ is a flat overring, then $(I_1\cap\cdots\cap I_n)T=I_1T\cap\cdots\cap I_nT$ for every $I_1,\ldots,I_n\in\mathbf{F}(R)$, and $(I:J)T=(IT:JT)$ for every $I,J\in\mathbf{F}(R)$ with $J$ finitely generated \cite[Theorem 7.4]{matsumura} (see also \cite[Proposition 2]{compact-intersections}).
\section{Extendable star operations}\label{sect:extension}
The starting point is the notion of localization of a star operation, originally defined in \cite{twostar}. We shall adopt a more general and more abstract approach.
\begin{defin}\label{defin:starloc}
Let $R$ be an integral domain and $T$ a flat overring of $R$. We say that a star operation $\ast\in\mathrm{Star}(R)$ is \emph{extendable to $T$} if the map
\begin{equation}
\begin{aligned}
\ast_T\colon \mathcal{F}(T) & \longrightarrow \mathcal{F}(T)\\
IT & \longmapsto I^\ast T
\end{aligned}
\end{equation}
is well-defined (where $I$ is a fractional ideal of $R$).
\end{defin}
\begin{oss}\label{oss:def}
~\begin{enumerate}
\item If $T$ is flat over $R$, then every fractional ideal of $T$ is an extension of a fractional ideal of $R$ (since, if $J$ is an integral ideal of $T$, $J=(J\cap R)T$); therefore, $\ast_T$ is (potentially) defined on all of $\mathcal{F}(T)$.
\item\label{oss:def:primi} If $T$ is flat over $R$ and $P$ is a prime of $R$ such that $PT\neq T$, then $PT$ is a prime ideal of $T$. Indeed, let $Q$ be a minimal prime of $PT$. By the previous point, $Q=(Q\cap R)T$; suppose $P\subsetneq Q\cap R$. By \cite[Theorem 2]{richamn_generalized-qr}, $T_Q=R_{Q\cap R}$, and thus $PT_Q$ is not minimal over $QT_Q=(Q\cap R)T_Q$, a contradiction. Note that the equality $T_Q=R_{Q\cap R}$ also shows that there is at most one $Q\in\mathrm{Spec}(T)$ over any $P\in\mathrm{Spec}(R)$.
\item When $T=S^{-1}R$ is a localization of $R$ and $\ast$ is of finite type, Definition \ref{defin:starloc} coincides with the definition of $\ast_S$ given in \cite[Proposition 2.4]{twostar}.
\item When $T=R_P$ for some $P\in\mathrm{Spec}(R)$, we will sometimes denote $\ast_T$ with $\ast_P$.
\end{enumerate}
\end{oss}
The following proposition shows the basic properties of extendability.
\begin{prop}\label{prop:starloc:basic}
Let $R$ be an integral domain, let $\ast\in\mathrm{Star}(R)$ and let $T$ be a flat overring of $R$.
\begin{enumerate}[(a)]
\item\label{prop:starloc:basic:star} If $\ast$ is extendable to $T$, then $\ast_T$ is a star operation.
\item\label{prop:starloc:basic:equiv} $\ast$ is extendable to $T$ if and only if $I^\ast T=J^\ast T$ whenever $IT=JT$.
\item\label{prop:starloc:basic:id} The identity star operation $d$ is always extendable, and $d_T$ is the identity on $T$.
\item\label{prop:starloc:basic:ft} If $\ast$ is of finite type, then it is extendable to $T$, and $\ast_T$ is of finite type.
\end{enumerate}
\end{prop}
Note that, if $T$ is a localization of $R$, point \ref{prop:starloc:basic:ft} is proved in \cite[Proposition 2.4]{twostar}.
\begin{proof}
\ref{prop:starloc:basic:star} and \ref{prop:starloc:basic:id} are obvious, while \ref{prop:starloc:basic:equiv} is just a reformulation of Definition \ref{defin:starloc}.
For \ref{prop:starloc:basic:ft}, by symmetry it is enough to show that $J^\ast T\subseteq I^\ast T$, or equivalently that $1\in(I^\ast T:J^\ast T)$. Since $\ast$ is of finite type,
\begin{equation*}
(I^\ast T:J^\ast T)=\left(I^\ast T:\left(\sum_{\substack{L\subseteq J\\ L\text{~finitely generated}}}L^\ast\right)T\right)= \left(I^\ast T:\sum_{\substack{L\subseteq J\\ L\text{~fin. gen.}}}L^\ast T\right)=\end{equation*}\begin{equation*}
=\bigcap_{\substack{L\subseteq J\\ L\text{~fin. gen.}}}(I^\ast T:L^\ast T)\supseteq\bigcap_{\substack{L\subseteq J\\ L\text{~fin. gen.}}}(I^\ast:L^\ast)T.
\end{equation*}
By properties of star operations, $(I^\ast:L^\ast)=(I^\ast:L)$; since $L$ is finitely generated and $T$ is flat, it follows that, for every $L$,
\begin{equation*}
(I^\ast:L^\ast)T=(I^\ast:L)T=(I^\ast T:LT)
\end{equation*}
which contains 1 since $LT\subseteq JT=IT\subseteq I^\ast T$. Hence, $1\in(I^\ast T:J^\ast T)$, as requested.
\end{proof}
\begin{ex}\label{ex:ad}
Not every star operation is extendable: let $R$ be an almost Dedekind domain which is not Dedekind (i.e., a one-dimensional non-Noetherian domain such that $R_M$ is a discrete valuation ring for every $M\in\mathrm{Max}(R)$), and let $P$ be a non-finitely generated prime ideal of $R$. Then $P$ is not divisorial \cite[Lemma 4.1.8]{fontana_libro}, and thus the $v$-operation is not extendable to $R_P$, since otherwise $(PR_P)^{v_P}=P^vR_P=R_P$, while the unique star operation on $R_P$ is the identity.
\end{ex}
Beside being of finite type, extension preserves the main properties of a star operation.
\begin{prop}\label{prop:estensione-prop}
Let $R$ be a domain and $T$ be a flat overring of $R$; suppose $\ast\in\mathrm{Star}(R)$ is extendable to $T$. If $\ast$ is stable (respectively, spectral, Noetherian) then so is $\ast_T$.
\end{prop}
\begin{proof}
Suppose $\ast$ is stable, and let $I_1:=J_1T$, $I_2:=J_2T$ be ideals of $T$, where $J_1$ and $J_2$ are ideals of $R$. Then,
\begin{equation*}
\begin{array}{rcl}
(I_1\cap I_2)^{\ast_T} & = & (J_1T\cap J_2T)^{\ast_T}=[(J_1\cap J_2)T]^{\ast_T}=\\
& = & (J_1\cap J_2)^\ast T=(J_1^\ast\cap J_2^\ast)T=J_1^\ast T\cap J_2^\ast T=I_1^{\ast_T}\cap I_2^{\ast_T}
\end{array}
\end{equation*}
and thus $\ast_T$ is stable.
If $\ast$ is spectral, it is stable, and thus so is $\ast_T$. Let now $I$ be a proper $\ast_T$-closed ideal of $T$, and let $J:=I\cap R$; then, $JT=(I\cap R)T=I$, and thus $J^\ast\subseteq I^{\ast_T}\cap R=I\cap R=J$, so that $J$ is a $\ast$-ideal. By definition, there is a $\Delta\subseteq\mathrm{Spec}(R)$ such that $\ast=\ast_\Delta$; hence,
\begin{equation*}
J=J^\ast=\bigcap_{P\in\Delta}JR_P=\bigcap_{P\in\Delta}(I\cap R)R_P=\bigcap_{P\in\Delta}IR_P\cap R_P.
\end{equation*}
In particular, there is a $P\in\Delta$ such that $1\notin IR_P=ITR_P$; hence, there is a $Q\in\mathrm{Spec}(TR_P)$ such that $ITR_P\subseteq Q$. We claim that $Q_0:=Q\cap T$ is a prime $\ast_T$-ideal containing $I$. Indeed, $I\subseteq ITR_P\cap T\subseteq Q\cap T=Q_0$; moreover, since $Q\cap R=Q_0\cap R\subseteq P$, $Q_0=LT$ for some prime ideal $L$ of $T$ contained in $P$ (Remark \ref{oss:def}\ref{oss:def:primi}), and thus
\begin{equation*}
Q_0^{\ast_T}=L^\ast T\subseteq (LR_P\cap R)T=LT=Q_0.
\end{equation*}
Therefore, $\ast_T$ is also semifinite, and by \cite[Theorem 4]{anderson_overrings_1988} it is spectral.
Suppose $\ast$ is Noetherian, and let $\{I_\alpha T\mid\alpha\in A\}$ be an ascending chain of $\ast_T$-ideals. Then, $\{I_\alpha^\ast\mid\alpha\in A\}$ is an ascending chain of $\ast$-ideals, which has to stabilize at $I_{\overline{\alpha}}$. Hence, the original chain stabilizes at $I_{\overline{\alpha}}T$, and $\ast_T$ is Noetherian.
\end{proof}
Extendability works well with the order structure of $\mathrm{Star}(R)$.
\begin{prop}\label{prop:starloc:ordine}
Let $R$ be an integral domain and $T$ be a flat overring of $R$. Let $\ast_1,\ast_2,\{\ast_\lambda\mid\lambda\in\Lambda\}$ be star operations that are extendable to $T$.
\begin{enumerate}[(a)]
\item\label{prop:starloc:ordine:leq} If $\ast_1\leq\ast_2\in\mathrm{Star}(R)$, then $(\ast_1)_T\leq(\ast_2)_T$.
\item\label{prop:starloc:ordine:wedge} $\ast_1\wedge\ast_2$ is extendable to $T$ and $(\ast_1\wedge\ast_2)_T=(\ast_1)_T\wedge(\ast_2)_T$.
\item\label{prop:starloc:ordine:supft} If each $\ast_\lambda$ is of finite type, then $\sup_\lambda\ast_\lambda$ is extendable to $T$ and $(\sup_\lambda\ast_\lambda)_T=\sup_\lambda(\ast_\lambda)_T$.
\end{enumerate}
\end{prop}
\begin{proof}
\ref{prop:starloc:ordine:leq} If $\ast_1\leq\ast_2$, then $I^{\ast_1}\subseteq I^{\ast_2}$ for every fractional ideal $I$, and thus $(I^{\ast_1}T)\subseteq(I^{\ast_2}T)$. Using the definition of $\ast_T$, we get $(\ast_1)_T\leq(\ast_2)_T$.
\ref{prop:starloc:ordine:wedge} Let $I$ be an ideal of $R$. By definition, $I^{\ast_1\wedge\ast_2}=I^{\ast_1}\cap I^{\ast_2}$, so that
\begin{equation*}
(IT)^{(\ast_1\wedge\ast_2)_T}=(I^{\ast_1\wedge\ast_2})T=(I^{\ast_1}\cap I^{\ast_2})T=\end{equation*}\begin{equation*}
=I^{\ast_1}T\cap I^{\ast_2}T=(IT)^{(\ast_1)_T}\cap (IT)^{(\ast_2)_T}=(IT)^{(\ast_1)_T\wedge(\ast_2)_T}
\end{equation*}
and thus $(\ast_1\wedge\ast_2)_T=(\ast_1)_T\wedge(\ast_2)_T$.
\ref{prop:starloc:ordine:supft} Let $\ast:=\sup_\lambda\ast_\lambda$. Since each $\ast_\lambda$ is of finite type, so is $\ast$ \cite[p.1628]{anderson_examples}, and thus $\ast$ is extendable to $T$ by Proposition \ref{prop:starloc:basic}\ref{prop:starloc:basic:ft}. Moreover, again by \cite[p.1628]{anderson_examples}, $I^\ast=\sum I^{\ast_1\circ\cdots\circ\ast_n}$, as $(\ast_1,\ldots,\ast_n)$ ranges among the finite strings of elements of $\{\ast_\lambda\mid\lambda\in\Lambda\}$ (here $\ast_1\circ\cdots\circ\ast_n$ indicates simply the composition of $\ast_1,\ldots,\ast_n$); therefore,
\begin{equation*}
I^\ast T=\left(\sum I^{\ast_1\circ\cdots\circ\ast_n}\right)T=\sum I^{\ast_1\circ\cdots\circ\ast_n}T.
\end{equation*}
We claim that $I^{\ast_1\circ\cdots\circ\ast_n}T=(IT)^{(\ast_1)_T\circ\cdots\circ(\ast_n)_T}$; we proceed by induction. The case $n=1$ is just the definition of the extension; suppose the claim holds for $m<n$. Then,
\begin{equation*}
I^{\ast_1\circ\cdots\circ\ast_n}T=(I^{\ast_1})^{\ast_2\circ\cdots\circ\ast_n}T= (I^{\ast_1}T)^{(\ast_2)_T\circ\cdots\circ(\ast_n)_T}=(IT)^{(\ast_1)_T\circ\cdots\circ(\ast_n)_T}
\end{equation*}
as claimed. Thus,
\begin{equation*}
I^\ast T=\sum(IT)^{(\ast_1)_T\circ\cdots\circ(\ast_n)_T}=(IT)^{\sup_\lambda(\ast_\lambda)_T}
\end{equation*}
the last equality coming from \cite[p.1628]{anderson_examples} and Proposition \ref{prop:starloc:basic}\ref{prop:starloc:basic:ft}. Hence, $\ast=\sup_\lambda(\ast_\lambda)_T$.
\end{proof}
Extendability is also transitive:
\begin{prop}\label{prop:ext-transitivo}
Let $R$ be a domain and $T_1\subseteq T_2$ be two flat overrings of $R$. If $\ast\in\mathrm{Star}(R)$ is extendable to $T_1$ and $\ast_{T_1}$ is extendable to $T_2$, then $\ast$ is extendable to $T_2$, and $\ast_{T_2}=(\ast_{T_1})_{T_2}$.
\end{prop}
\begin{proof}
Note first that if $T_2$ is flat over $R$ then it is flat over $T_1$, and thus it makes sense to speak of the extendability of $\ast_{T_1}$. For every ideal $I$ of $R$, we have
\begin{equation*}
I^\ast T_2=(I^\ast T_1)T_2=(IT_1)^{\ast_{T_1}}T_2=(IT_1T_2)^{(\ast_{T_1})_{T_2}}=(IT_2)^{(\ast_{T_1})_{T_2}}
\end{equation*}
and thus if $IT_2=JT_2$ then $I^\ast T_2=J^\ast T_2$, so that $\ast$ is extendable to $T_2$. The previous calculation also shows that $\ast_{T_2}=(\ast_{T_1})_{T_2}$.
\end{proof}
\begin{prop}
Let $R$ be an integral domain and $T$ be a flat overring of $R$. Let $\Delta:=\{M\cap R\mid M\in\mathrm{Max}(T)\}$. If $\ast\in\mathrm{Star}(R)$ is extendable to $R_P$, for every $P\in\Delta$, then it is extendable to $T$.
\end{prop}
\begin{proof}
Let $I,J$ be ideals of $R$ such that $IT=JT$. Let $P\in\Delta$ and let $M$ be the (necessarily unique -- see Remark \ref{oss:def}(\ref{oss:def:primi})) maximal ideal of $T$ such that $M\cap R=P$. Then, $T_M=R_P$, and since $\ast$ is extendable to $R_P$ we have $I^\ast R_P=J^\ast R_P$. It follows that
\begin{equation*}
I^\ast T=\bigcap_{P\in\Delta}I^\ast R_P=\bigcap_{P\in\Delta}J^\ast R_P=J^\ast T,
\end{equation*}
and thus $\ast$ is extendable to $T$.
\end{proof}
\begin{cor}\label{cor:def-extstar}
Let $R$ be a domain, and let $\ast\in\mathrm{Star}(R)$. The following are equivalent:
\begin{enumerate}[(i)]
\item $\ast$ is extendable to $R_P$, for every $P\in\mathrm{Spec}(R)$;
\item $\ast$ is extendable to every flat overring of $R$.
\end{enumerate}
\end{cor}
Note that condition (i) of the above corollary cannot be replaced by the version that considers only maximal ideals of $T$: indeed, if $(R,M)$ is local, then clearly every star operation is extendable to $R_M$, but it would be implausible that every star operation is extendable to every localization. We can build an explicit counterexample tweaking slightly \cite[Remark 2.5(3)]{twostar}. Let $R:=\ins{Z}_{p\ins{Z}}+X\ins{Q}(\sqrt{2})[[X]]$ (where $p$ is a prime number). Then, $R$ is a two-dimensional local domain, with maximal ideal $M:=p\ins{Z}_{p\ins{Z}}+X\ins{Q}(\sqrt{2})[[X]]$; let $P:=X\ins{Q}(\sqrt{2})[[X]]$. We claim that the $v$-operation is not extendable to $R_P=\ins{Q}+P$. Let $A:=X(\ins{Q}+P)$ and $B:=XR$: then, $AR_P=BR_P=A$, but $A^vR_P=P$ while $B^vR_P=BR_P\neq P$.
\section{Jaffard families}\label{sect:Jaffard}
The concept of Jaffard family was introduced and studied in \cite[Section 6.3]{fontana_factoring}.
\begin{defin}
Let $R$ be a domain and $\Theta$ be a set of overrings of $R$ such that the quotient field of $R$ is not in $\Theta$. We say that $\Theta$ is a \emph{Jaffard family on $R$} if, for every integral ideal $I$ of $R$,
\begin{itemize}
\item $R=\bigcap_{T\in\Theta}T$;
\item $\Theta$ is locally finite;
\item $I=\prod_{T\in\Theta}(IT\cap R)$;
\item if $T\neq S$ are in $\Theta$, then $(IT\cap R)+(IS\cap R)=R$.
\end{itemize}
We say that an overring $T$ of $R$ is a \emph{Jaffard overring} of $R$ if $T$ belongs to a Jaffard family of $R$.
\end{defin}
Note that, by the second axiom, if $I\neq(0)$ then $IT=T$ for all but finitely many $T\in\Theta$, so that the product $I=\prod_{T\in\Theta}(IT\cap R)$ is finite.
The next propositions collect the properties of Jaffard families that we will be using.
\begin{prop}[{\protect\cite[Theorem 6.3.1]{fontana_factoring}}]\label{prop:jaffard:basic}
Let $R$ be an integral domain with quotient field $K$, and let $\Theta$ be a Jaffard family on $R$. For each $T\in\Theta$, let $\ortog{\Theta}(T):=\bigcap\{U\in\Theta\mid U\neq T\}$.
\begin{enumerate}[(a)]
\item\label{prop:jaffard:basic:complete} $\Theta$ is complete (i.e., $I=\bigcap\{IT\mid T\in\Theta\}$ for every ideal $I$ of $R$).
\item\label{prop:jaffard:basic:partitionSpec} For each $P\in\mathrm{Spec}(R)$, $P\neq(0)$, there is a unique $T\in\Theta$ such that $PT\neq T$.
\item\label{prop:jaffard:basic:flat} For each $T\in\Theta$, both $T$ and $\ortog{\Theta}(T)$ are flat over $R$.
\item\label{prop:jaffard:basic:complindip} For each $T\in\Theta$, we have $T\cdot\ortog{\Theta}(T)=K$.
\end{enumerate}
\end{prop}
\begin{prop}\label{prop:caratt-jaffard}
Let $\Theta$ be a family of flat overrings of the domain $R$, and let $K$ be the quotient field of $R$. Then, $\Theta$ is a Jaffard family if and only if it is complete, locally finite and $TS=K$ for all $T,S\in\Theta$, $T\neq S$.
\end{prop}
\begin{proof}
If $\Theta$ is a Jaffard family, the properties follow by the definition and Proposition \ref{prop:jaffard:basic}. Conversely, suppose $\Theta$ verifies the three properties, let $I\neq(0)$ be an ideal of $R$ and let $T\neq S$ be members of $\Theta$. If $IT\cap R$ and $IS\cap R$ are not coprime, then there would be a prime $P$ of $R$ containing both; since $\Theta$ is complete, it would follow that both $IT\cap R$ and $IS\cap R$ survive in some $A\in\Theta$. In particular, without loss of generality, $A\neq T$; however,
\begin{equation*}
(IT\cap R)A=ITA\cap A=IK\cap A=A,
\end{equation*}
a contradiction. Therefore, $(IT\cap R)+(IS\cap R)=R$. Moreover, $I=\bigcap\{IT\cap R\mid T\in\Theta\}=(IT_1\cap R)\cap\cdots\cap(IT_n\cap R)$ by local finiteness; since the $IT_i\cap R$ are coprime, their intersection is equal to their product, and thus $I=(IT_1\cap R)\cdots(IT_n\cap R)$.
\end{proof}
\begin{oss}\label{oss:Matlis}
Any Jaffard family $\Theta$ defines a partition on $\mathrm{Max}(R)$, where each class is composed by the $M\in\mathrm{Max}(R)$ such that $MT\neq T$ for some fixed $T\in\Theta$. In particular, $T=\bigcap R_M$, as $M$ ranges in the class relative to $M$; hence, different Jaffard families define different partitions. In particular, a local domain has only one Jaffard family, namely $\{R\}$, and a semilocal domain has only a finite number of Jaffard families.
However, not every partition of $\mathrm{Max}(R)$ can arise in this way. For example, let $\Theta$ be a Jaffard family and let $M,N\in\mathrm{Max}(R)$; by Proposition \ref{prop:jaffard:basic}\ref{prop:jaffard:basic:partitionSpec}, there are unique overrings $T,U\in\Theta$ such that $MT\neq T$ and $NU\neq U$. If there is a nonzero prime $P\subseteq M\cap N$, then $PT\neq T$ and $PU\neq U$; therefore, again by Proposition \ref{prop:jaffard:basic}\ref{prop:jaffard:basic:partitionSpec}, it must be $T=U$.
\end{oss}
A \emph{$h$-local domain} is an integral domain $R$ such that $\mathrm{Max}(R)$ is locally finite and such that every prime ideal $P$ is contained in only one maximal ideal. In this case, $\{R_M\mid M\in\mathrm{Max}(R)\}$ is a Jaffard family of $R$; conversely, if $\{R_M\mid M\in\mathrm{Max}(R)\}$ is a Jaffard family, then $\mathrm{Max}(R)$ is locally finite (by definition) and each prime is contained in only one maximal ideal (by Proposition \ref{prop:jaffard:basic}\ref{prop:jaffard:basic:partitionSpec}), and thus $R$ is $h$-local. Many properties of the Jaffard families can be seen as generalizations of the corresponding properties of $h$-local domains; the following proposition is an example (compare \cite[Proposition 3.1]{olberding_globalizing}).
\begin{prop}\label{prop:integintersect}
Let $R$ be a domain and $T$ be a Jaffard overring of $R$. Then:
\begin{enumerate}[(a)]
\item\label{prop:integintersect:a} for every family $\{X_\alpha:\alpha\in A\}$ of $R$-submodules of $K$ with nonzero intersection, we have $\left(\bigcap_{\alpha\in A}X_\alpha\right)T=\bigcap_{\alpha\in A}X_\alpha T$;
\item\label{prop:integintersect:b} if $\{I_\alpha:\alpha\in A\}$ is a family of integral ideals of $R$ with nonzero intersection such that $\left(\bigcap_{\alpha\in A}I_\alpha\right)T\neq T$, then $I_{\overline{\alpha}}T\neq T$ for some $\overline{\alpha}\in A$.
\end{enumerate}
\end{prop}
\begin{proof}
\ref{prop:integintersect:a} Let $\Theta$ be a Jaffard family of $R$ such that $T\in\Theta$. Then, by the flatness of $T$,
\begin{equation*}
\left(\bigcap_{\alpha\in A}X_\alpha\right)T=
\left(\bigcap_{\alpha\in A}\bigcap_{U\in\Theta}X_\alpha U\right)T=
\left(\bigcap_{U\in\Theta}\bigcap_{\alpha\in A}X_\alpha U\right)T=\end{equation*}\begin{equation*}
=\left(\bigcap_{U\in\Theta\setminus\{T\}}\bigcap_{\alpha\in A}X_\alpha U\right)T\cap \bigcap_{\alpha\in A}X_\alpha T=K\cap\bigcap_{\alpha\in A}X_\alpha T
\end{equation*}
since $\bigcap_{U\in\Theta\setminus\{T\}}\bigcap_{\alpha\in A}X_\alpha U$ is a $\ortog{\Theta}(T)$-module, and thus its product with $T$ is equal to $K$ by Proposition \ref{prop:jaffard:basic}\ref{prop:jaffard:basic:complindip}.
(a $\Longrightarrow$ b). Suppose $\left(\bigcap_{\alpha\in A}I_\alpha\right)T\neq T$. Since $\left(\bigcap_{\alpha\in A}I_\alpha\right)T\subseteq T$, then 1 is not contained in the left hand side. By \ref{prop:integintersect:a}, 1 is not contained in $\bigcap_{\alpha\in A}I_\alpha T$, i.e., there is a $\overline{\alpha}$ such that $1\notin I_{\overline{\alpha}} T$, and thus $I_{\overline{\alpha}} T\neq T$.
\end{proof}
\section{Jaffard families and star operations}\label{sect:jaff-star}
The reason why we introduced Jaffard families is that they provide a way to decompose $\mathrm{Star}(R)$ as a product of spaces of star operations of overrings of $T$. Before reaching this objective (Theorem \ref{teor:star-jaffard}) we show that weaker properties can lead to a decomposition of at least a subset of $\mathrm{Star}(R)$.
\begin{prop}\label{prop:indip-rhoinj}
Let $R$ be an integral domain with quotient field $K$. Let $\Theta$ be a set of flat overrings of $R$ such that $R=\bigcap\{T\mid T\in\Theta\}$ and such that $AB=K$ whenever $A,B\in\Theta$ and $A\neq B$. Then, there is an injective order-preserving map
\begin{equation*}
\begin{aligned}
\rho_\Theta\colon\prod_{T\in\Theta}\mathrm{Star}(T) & \longrightarrow\mathrm{Star}(R) \\
(\ast^{(T)})_{T\in\Theta} & \longmapsto\bigwedge_{T\in\Theta}\ast^{(T)},
\end{aligned}
\end{equation*}
where $\bigwedge_{T\in\Theta}\ast^{(T)}$ is the map such that
\begin{equation*}
I\mapsto\bigcap_{T\in\Theta}(IT)^{\ast^{(T)}}
\end{equation*}
for every fractional ideal $I$ of $R$.
\end{prop}
\begin{proof}
Let $(\ast_T)_{T\in\Theta}\in\prod_{T\in\Theta}\mathrm{Star}(T)$, and let $\ast:=\rho_\Theta((\ast^{(T)})_{T\in\Theta})$. Since $\bigcap_{T\in\Theta}T=R$, the map $\ast$ is a star operation; moreover, it is clear that if $\ast_1^{(T)}\leq\ast_2^{(T)}$ for all $T$ then $\rho_\Theta(\ast_1^{(T)})\leq\rho_\Theta(\ast_2^{(T)})$. Hence, $\rho_\Theta$ is well-defined and order-preserving; we need to show that it is injective.
Suppose it is not; then, $\ast:=\rho_\Theta(\ast_1^{(T)})=\rho_\Theta(\ast_2^{(T)})$ for some families of star operations such that $\ast_1^{(U)}\neq\ast_2^{(U)}$ for some $U\in\Theta$. There is an integral ideal $J$ of $U$ such that $J^{\ast_1^{(U)}}\neq J^{\ast_2^{(U)}}$; let $I:=J\cap R$. Since $U$ is flat, for both $i=1$ and $i=2$ we have
\begin{equation*}
I^\ast U=\left[\bigcap_{T\in\Theta}(IT)^{\ast_i^{(T)}}\right]U=(IU)^{\ast_i^{(U)}}U\cap \left[\bigcap_{T\in\Theta\setminus\{U\}}(IT)^{\ast_i^{(T)}}\right]U.
\end{equation*}
If $T\neq U$, then, since $T$ is flat,
\begin{equation*}
(IT)^{\ast_i^{(T)}}=((J\cap R)T)^{\ast_i^{(T)}}=(JT\cap T)^{\ast_i^{(T)}}.
\end{equation*}
However, $JT=JUT=K$ since $UT=K$ (by hypothesis); therefore, $(IT)^{\ast_i^{(T)}}=T$, and (since $I\subseteq U$)
\begin{equation*}
I^\ast U=(IU)^{\ast_i^{(U)}}U\cap\left[\bigcap_{T\in\Theta\setminus\{U\}}T\right]U= (IU)^{\ast_i^{(U)}}U\cap\left[\bigcap_{T\in\Theta}T\right]U=\end{equation*}\begin{equation*} =(IU)^{\ast_i^{(U)}}\cap RU=(IU)^{\ast_i^{(U)}}=J^{\ast_i^{(U)}}
\end{equation*}
for both $i=1$ and $i=2$. However, this contradicts the choice of $J$; hence, $\rho_\Theta$ is injective.
\end{proof}
If $\Theta$ is a Jaffard family, the previous proposition can be strengthened. We need two lemmas.
\begin{lemma}\label{lemma:intersez-ritorno}
Let $R$ be a domain with quotient field $K$, and let $\Theta$ be a Jaffard family on $R$. For every $U\in\Theta$, let $J_U$ be a $U$-submodule of $K$, and define $J:=\bigcap_{U\in\Theta}J_U$. If $J\neq(0)$, then for every $T\in\Theta$ we have $JT=J_T$.
\end{lemma}
\begin{proof}
By Proposition \ref{prop:integintersect}\ref{prop:integintersect:a}, we have
\begin{equation*}
JT=\left(\bigcap_{U\in\Theta}J_U\right)T= \bigcap_{U\in\Theta}J_UT.
\end{equation*}
If $U\neq T$, then $J_UT=J_UUT=J_UK=K$; therefore, $JT=J_TT=J_T$.
\end{proof}
The next lemma can be seen as a generalization of \cite[Theorem 6.2.2(2)]{fontana_factoring} and \cite[Lemma 2.3]{warfield}.
\begin{lemma}\label{lemma:Tcolon}
Let $R$ be an integral domain, $T$ be a Jaffard overring of $R$, and let $I,J\in\mathbf{F}(R)$ such that $(I:J)\neq(0)$. Then, $(I:J)T=(IT:JT)$.
\end{lemma}
\begin{proof}
It is enough to note that $(I:J)=\bigcap_{j\in J}j^{-1}I\neq(0)$, and apply Proposition \ref{prop:integintersect}\ref{prop:integintersect:a}.
\end{proof}
\begin{teor}\label{teor:star-jaffard}
Let $R$ be an integral domain and let $\Theta$ be a Jaffard family on $R$. Then, every $\ast\in\mathrm{Star}(R)$ is extendable to every $T\in\Theta$, and the maps
\begin{equation*}
\begin{aligned}
\lambda_\Theta\colon \mathrm{Star}(R) & \longrightarrow \prod_{T\in\Theta}\mathrm{Star}(T)\\
\ast & \longmapsto (\ast_T)_{T\in\Theta}
\end{aligned}
\quad\text{and}\quad
\begin{aligned}
\rho_\Theta\colon \prod_{T\in\Theta}\mathrm{Star}(T) & \longrightarrow \mathrm{Star}(R)\\
(\ast^{(T)})_{T\in\Theta} & \longmapsto \bigwedge_{T\in\Theta}\ast^{(T)}
\end{aligned}
\end{equation*}
(where $\bigwedge_{T\in\Theta}\ast^{(T)}$ is defined as in Proposition \ref{prop:indip-rhoinj}) are order-preserving bijections between $\mathrm{Star}(R)$ and $\prod\{\mathrm{Star}(T)\mid T\in\Theta\}$.
\end{teor}
\begin{proof}
We first show that every $\ast\in\mathrm{Star}(R)$ is extendable. Let $T\in\Theta$ and let $I,J$ be ideals of $R$ such that $IT=JT$. Then, using Lemma \ref{lemma:Tcolon}, we have
\begin{equation*}
(I^\ast T:J^\ast T)=(I^\ast:J^\ast)T=(I^\ast:J)T=(I^\ast T:JT)
\end{equation*}
and, since $JT=IT\subseteq I^\ast T$, we have $1\in(I^\ast T:J^\ast T)$, so that $J^\ast T\subseteq I^\ast T$. Symmetrically, $I^\ast T\subseteq J^\ast T$, and hence $J^\ast T=I^\ast T$. By Proposition \ref{prop:starloc:basic}\ref{prop:starloc:basic:equiv}, $\ast_T$ is well-defined, and $\ast$ is extendable to $T$; in particular, $\lambda_\Theta$ is well-defined.
Moreover, for every $\ast\in\mathrm{Star}(R)$, we have
\begin{equation*}
I^\ast=\bigcap_{T\in\Theta}I^\ast T=\bigcap_{T\in\Theta}(IT)^{\ast_T}
\end{equation*}
using the completeness of $\Theta$ in the first equality and the definition of extension in the second. Thus, $\ast=\rho_\Theta\circ\lambda_\Theta(\ast)$, i.e., $\rho_\Theta\circ\lambda_\Theta$ is the identity. It follows that $\lambda_\Theta$ is injective and $\rho_\Theta$ is surjective. But $\rho_\Theta$ is injective by Proposition \ref{prop:indip-rhoinj}, so $\lambda_\Theta$ and $\rho_\Theta$ must be bijections.
\end{proof}
The second part of the following corollary is a generalization of \cite[Theorem 2.3]{houston_noeth-starfinite}.
\begin{cor}\label{cor:1dim}
Let $R$ be a one-dimensional integral domain.
\begin{enumerate}[(a)]
\item $|\mathrm{Star}(R)|\geq\prod\{|\mathrm{Star}(R_M)|:M\in\mathrm{Max}(R)\}$;
\item if $R$ is of finite character (for example, if $R$ is Noetherian), then $|\mathrm{Star}(R)|=\prod\{|\mathrm{Star}(R_M)|:M\in\mathrm{Max}(R)\}$.
\end{enumerate}
\end{cor}
\begin{proof}
If $M\neq N$ are maximal ideals of $R$, then $R_MR_N=K$, since both $M$ and $N$ have height 1. By Proposition \ref{prop:indip-rhoinj}, there is an injective map from $\mathrm{Star}(R)$ to the product $\prod\mathrm{Star}(R_M)$, which in particular implies the first inequality.
If, moreover, $R$ is one-dimensional and of finite character, then $\{R_M\mid M\in\mathrm{Max}(R)\}$ is a Jaffard family, and the claim follows by applying Theorem \ref{teor:star-jaffard}.
\end{proof}
The bijections $\rho_\Theta$ and $\lambda_\Theta$ respect the properties of star operations; see the following Proposition \ref{prop:jaffard-eab} for the eab case.
\begin{teor}\label{teor:jaffard-corresp}
Let $R$ be a domain and $\Theta$ be a Jaffard family on $R$, and let $\ast\in\mathrm{Star}(R)$. Then, $\ast$ is of finite type (respectively, semifinite, stable, spectral, Noetherian) if and only if $\ast_T$ is of finite type (resp., semifinite, stable, spectral, Noetherian) for every $T\in\Theta$.
\end{teor}
\begin{proof}
By Propositions \ref{prop:starloc:basic}\ref{prop:starloc:basic:ft} and \ref{prop:estensione-prop}, if $\ast$ is of finite type, stable, spectral or Noetherian so is $\ast_T$. If $\ast$ is semifinite, let $I$ be a $\ast_T$-closed ideal of $T$, and let $J:=I\cap R$. Then $JT=I$, and $J^\ast\subseteq I^{\ast_T}\cap R=J$, so that there is a prime ideal $Q\supseteq J$ such that $Q^\ast=Q$. For every $U\in\Theta$, $U\neq T$, we have $JU=U$; hence $QU=U$, and thus $QT\neq T$; moreover, since $R$ is flat, $QT$ is prime (Remark \ref{oss:def}(\ref{oss:def:primi})). Therefore, $(QT)^{\ast_T}=Q^\ast T=QT$ is a proper prime $\ast_T$-ideal containing $I$, and $\ast_T$ is semifinite.
Let now $\ast:=\rho_\Theta(\ast^{(T)})$.
If each $\ast^{(T)}$ is of finite type, then $\ast$ is of finite type by \cite{anderson_examples}.
Suppose each $\ast^{(T)}$ is semifinite and $I=I^\ast$ is a proper ideal of $R$. Then, $1\notin I$, so there is a $T\in\Theta$ such that $(IT)^{\ast^{(T)}}\neq T$, and thus there is a prime ideal $P$ of $T$ containing $IT$ such that $P=P^{\ast^{(T)}}$. If $Q:=P\cap R$, then
\begin{equation*}
Q^\ast\subseteq(QT)^{\ast^{(T)}}\cap R\subseteq P^{\ast^{(T)}}\cap R=Q,
\end{equation*}
so that $Q$ is a $\ast$-prime ideal of $R$ containing $I$.
If each $\ast^{(T)}$ is stable, then, given ideal $I,J$ of $R$, we have
\begin{equation*}
(I\cap J)^\ast=\bigcap_{T\in\Theta}((I\cap J)T)^{\ast^{(T)}}=\bigcap_{T\in\Theta}(IT)^{\ast^{(T)}}\cap \bigcap_{T\in\Theta}(JT)^{\ast{(T)}}=I^\ast\cap J^\ast.
\end{equation*}
Hence, $\ast$ is stable. The case of spectral star operation follows since $\ast$ is spectral if and only if it is stable and semifinite \cite[Theorem 4]{anderson_overrings_1988}.
Suppose now $\ast^{(T)}$ is Noetherian for every $T\in\Theta$ and let $\{I_\alpha:\alpha\in A\}$ be an ascending chain of $\ast$-ideals. If $I_\alpha=(0)$ for every $\alpha$ we are done. Otherwise, there is a $\overline{\alpha}$ such that $I_{\overline{\alpha}}\neq(0)$, and thus $I_{\overline{\alpha}}$ (and, consequently, every $I_\alpha$ for $\alpha>\overline{\alpha}$) survives in only a finite number of elements of $\Theta$, say $T_1,\ldots,T_n$. For each $i\in\{1,\ldots,n\}$, the set $\{I_\alpha T_i\}$ is an ascending chain of $\ast^{(T_i)}$-ideals, and thus there is a $\alpha_i$ such that $I_\alpha T_i=I_{\alpha_i}T_i$ for every $\alpha\geq\alpha_i$.
Let thus $\widetilde{\alpha}:=\max\{\overline{\alpha},\alpha_i: 1\leq i\leq n\}$. For every $\beta\geq\widetilde{\alpha}$, we have $I_\beta T_i=I_{\alpha_i}T_i=I_{\widetilde{\alpha}}T_i$, while, if $T\neq T_i$ for every $i$, then $I_\beta T=T=I_{\widetilde{\alpha}}T$ since $\beta\geq\overline{\alpha}$. Therefore, $I_\beta=\bigcap_{T\in\Theta}I_\beta T=\bigcap_{T\in\Theta}I_{\widetilde{\alpha}}T=I_{\widetilde{\alpha}}$ and the chain $\{I_\alpha\}$ stabilizes.
\end{proof}
\begin{cor}\label{cor:trasfnoeth}
Let $R$ be a domain and $\Theta$ be a Jaffard family on $R$. If every $T\in\Theta$ is Noetherian, so is $R$.
\end{cor}
\begin{proof}
A domain $A$ is Noetherian if and only if the identity star operation $d^{(A)}$ is Noetherian. If every $T\in\Theta$ is Noetherian, each $d_T$ is a Noetherian star operation, and thus (by Theorem \ref{teor:jaffard-corresp}) $\rho_\Theta(d_T)$ is Noetherian. However, by Theorem \ref{teor:star-jaffard}, $\rho_\Theta(d_T)=d_R$, and thus $R$ is a Noetherian domain.
\end{proof}
\begin{lemma}\label{lemma:IcapRJcapR}
Let $R$ be an integral domain and let $T$ be a Jaffard overring of $R$. For all nonzero integral ideals $I,J$ of $T$,
\begin{equation*}
(I\cap R)(J\cap R)=IJ\cap R.
\end{equation*}
\end{lemma}
\begin{proof}
Let $\Theta$ be a Jaffard family containing $T$. Since $\Theta$ is complete, it is enough to show that they are equal when localized on every $U\in\Theta$. We have
\begin{equation*}
(I\cap R)(J\cap R)U=(IU\cap U)(JU\cap U)=\begin{cases}
IJ & \text{~if~}U=T\\
U & \text{~if~}U\neq T
\end{cases}
\end{equation*}
while
\begin{equation*}
(IJ\cap R)U=IJU\cap U=\begin{cases}
IJ & \text{~if~}U=T\\
U & \text{~if~}U\neq T
\end{cases}
\end{equation*}
and thus $(I\cap R)(J\cap R)=IJ\cap R$.
\end{proof}
\begin{lemma}\label{lemma:IcapR}
Let $R$ be an integral domain, $T$ a Jaffard overring of $R$, and let $I$ be a finitely generated integral ideal of $T$. Then, $I\cap R$ is finitely generated (over $R$).
\end{lemma}
\begin{proof}
Let $S:=\ortog{\Theta}(T)$, where $\Theta$ is a Jaffard family to which $T$ belongs. Then, by Proposition \ref{prop:jaffard:basic}, $(I\cap R)S=IS\cap S=ITS\cap S=S$, and thus there are $i_1,\ldots,i_n\in I\cap R$, $s_1,\ldots,s_n\in S$ such that $1=i_1s_1+\cdots+i_ns_n$; let $I_0:=(i_1,\ldots,i_n)$.
Let $x_1,\ldots,x_m$ be the generators of $I$ in $T$. Since $(I\cap R)T=IT=I$, for every $x_i$ there are $j_{1i},\ldots,j_{n_ii}\in I\cap R$, $t_{1i},\ldots,t_{n_ii}\in T$ such that $x_i=j_{1i}t_{1i}+\cdots+j_{n_ii}t_{n_ii}$; let $I_i:=(j_{1i},\ldots,j_{n_ii})$. Then, $J:=I_0+I_1+\cdots+I_n$ is a finitely generated ideal contained in $I\cap R$ (since it is generated by elements of $I\cap R$) such that $(I\cap R)T\subseteq JT$ and $(I\cap R)S\subseteq JS$; thus, $I\cap R\subseteq J$. Therefore, $I\cap R=J$ is finitely generated, as claimed.
\end{proof}
\begin{prop}\label{prop:jaffard-eab}
Let $R$ be an integral domain and let $\Theta$ be a Jaffard family on $R$. A $\ast\in\mathrm{Star}(R)$ is eab (resp., ab) if and only if $\ast_T$ is eab (resp., ab) for every $T\in\Theta$.
\end{prop}
\begin{proof}
$(\Longrightarrow)$. Suppose $(IJ)^{\ast_T}\subseteq(IL)^{\ast_T}$ for some finitely generated ideals $I,J,L$ of $T$ (which we can suppose contained in $T$). Since
\begin{equation*}
(IJ\cap R)^\ast T=((IJ\cap R)T)^{\ast_T}=(IJ)^{\ast_T}
\end{equation*}
(and the same happens for $IL$), we have $(IJ\cap R)^\ast T\subseteq (IL\cap R)^\ast T$, and so
\begin{equation*}
(IJ\cap R)^\ast T\cap R\subseteq (IL\cap R)^\ast T\cap R.
\end{equation*}
However, both $IJ\cap R$ and $IL\cap R$ survive (among the ideals of $\Theta$) only in $T$, so that
\begin{equation*}
(IJ\cap R)^\ast T\cap R=(IJ\cap R)^\ast=((I\cap R)(J\cap R))^\ast
\end{equation*}
by Lemma \ref{lemma:IcapRJcapR}, and thus
\begin{equation*}
((I\cap R)(J\cap R))^\ast\subseteq ((I\cap R)(L\cap R))^\ast.
\end{equation*}
Since $I$ is finitely generated, by Lemma \ref{lemma:IcapR} so is $I\cap R$; the same happens for $J\cap R$ and $L\cap R$. Hence, since $\ast$ is eab, $(J\cap R)^\ast\subseteq(L\cap R)^\ast$, and thus
\begin{equation*}
J^{\ast_T}=(J\cap R)^\ast T\subseteq(L\cap R)^\ast T=L^{\ast_T}.
\end{equation*}
Hence, $\ast_T$ is eab.
$(\Longleftarrow)$. Suppose $(IJ)^\ast\subseteq(IL)^\ast$. Then, $(IJ)^\ast T\subseteq(IL)^\ast T$, i.e., $(IJT)^{\ast_T}\subseteq(ILT)^{\ast_T}$ for every $T\in\Theta$. Since $\ast_T$ is eab, this implies that $(JT)^{\ast_T}\subseteq(LT)^{\ast_T}$ for every $T\in\Theta$; since $H^\ast=\bigcap_{T\in\Theta}(HT)^{\ast_T}$, it follows that $J^\ast\subseteq L^\ast$, and $\ast$ is eab.
The same reasoning applies for the ab case.
\end{proof}
Following \cite{hhp_m-canonical}, we say that an ideal $A$ is $m$-canonical if $I=(A:(A:I))$ for every fractional ideal $I$ of $R$. The following proposition can be seen as a generalization of \cite[Theorem 6.7]{hhp_m-canonical} to domains that are not necessarily integrally closed.
\begin{prop}\label{prop:starloc:mcan}
Let $R$ be a domain. Then $R$ admits an $m$-canonical ideal if an only if $R$ is $h$-local, $R_M$ admits an $m$-canonical ideal for every $M\in\mathrm{Max}(R)$ and $|\mathrm{Star}(R_M)|\neq 1$ for only a finite number of maximal ideals of $M$.
\end{prop}
\begin{proof}
Suppose $A$ is $m$-canonical. Then $R$ is $h$-local by \cite[Proposition 2.4]{hhp_m-canonical}; moreover, if $I$ is a $R_M$-fractional ideal, then $I=JR_M$ for some $R$-fractional ideal, and thus
\begin{equation*}
(AR_M:(AR_M:I))=(AR_M:(AR_M:JR_M))=\end{equation*}\begin{equation*}
=(AR_M:(A:J)R_M)=(A:(A:J))R_M=JR_M=I
\end{equation*}
applying Lemma \ref{lemma:Tcolon} (which is applicable since $R$ $h$-local implies that $R_M$ is a Jaffard overring of $R$). If $AR_M=R_M$, it follows that $R_M$ is an $m$-canonical ideal for $R_M$, and thus that the $v$-operation on $R_M$ is the identity, or equivalently that $|\mathrm{Star}(R_M)|=1$; hence, if $|\mathrm{Star}(R_M)|\neq 1$ then $AR_M\neq R_M$. But this can happen only for a finite number of $M$, since $R$ is $h$-local and thus of finite character.
Conversely, suppose that the three hypotheses hold. For every $M\in\mathrm{Max}(R)$, let $J_M$ be an $m$-canonical ideal of $R_M$, and define
\begin{equation*}
I_M:=\begin{cases}
R_M & \text{~if~}|\mathrm{Star}(R_M)|=1\\
J_M & \text{~if~}|\mathrm{Star}(R_M)|>1
\end{cases}
\end{equation*}
Note that, if $|\mathrm{Star}(R_M)|=1$, then $R_M$ is $m$-canonical for $R_M$, and thus $I_M$ is $m$-canonical for every $M$.
The ideal $J:=\bigcap_{P\in\mathrm{Max}(R)}I_P$ of $R$ is nonzero, and by Lemma \ref{lemma:intersez-ritorno} $JR_M=I_M$ for every maximal ideal $M$. If $L$ is an ideal of $R$ then, for every maximal ideal $M$,
\begin{equation*}
(J:(J:L))R_M=(JR_M:(JR_M:LR_M))=(I_M:(I_M:LR_M))=LR_M,
\end{equation*}
so that
\begin{equation*}
(J:(J:L))=\bigcap_{M\in\mathrm{Max}(R)}(J:(J:L))R_M=\bigcap_{M\in\mathrm{Max}(R)}LR_M=L.
\end{equation*}
Therefore, $J$ is an $m$-canonical ideal of $R$.
\end{proof}
\begin{oss}
The results in Sections \ref{sect:extension} and \ref{sect:jaff-star} can be generalized in two different directions.
On the one hand, we can consider, instead of star operations, other classes of closure operations, for example semiprime or semistar operations. In both cases, the definitions of extendability and the results in Section \ref{sect:extension} carry over without modifications, noting that the equalities $(I^c:J^c)=(I^c:J)$ and $(I^\ast:J^\ast)=(I^\ast:J)$ holds when $c$ and $\ast$ are, respectively, a semiprime or a semistar operation.
However, the behaviour of these two classes differs when we come to Jaffard families. In one case there is no problem: with the obvious modifications, all result of Section \ref{sect:jaff-star} hold for the set $\mathrm{Sp}(R)$ of semiprime operations. For example, this means that we can analyze the structure of the semiprime operation on a Dedekind domain $D$ almost directly from the structure of $\mathrm{Sp}(V)$, for $V$ a discrete valuation ring, shortening the analysis done in \cite[Section 3]{vassilev_structure_2009}.
The case of semistar operations is much more delicate: indeed, the result corresponding to Theorem \ref{teor:star-jaffard} is \emph{not} true for $\mathrm{SStar}(R)$, meaning that a semistar operation on $R$ may not be extendable to a Jaffard overring $T$ of $R$. For example, let $\ast$ be the semistar operation defined by
\begin{equation*}
I^\ast=\begin{cases}
I & \text{if~}I\in\mathcal{F}(R)\\
K & \text{otherwise}.
\end{cases}
\end{equation*}
If $T\neq R$ is a Jaffard overring of $R$, then it is not a fractional ideal of $R$ (for otherwise $T\cdot\ortog{\Theta}(T)=K$ would imply $\ortog{\Theta}(T)=K$); however, we have $RT=TT$, while
\begin{equation*}
R^\ast T=T\neq K=T^\ast T.
\end{equation*}
Hence, $\ast$ is not extendable to $T$. The exact point in which the proof of Theorem \ref{teor:star-jaffard} fails is the possibility of using Lemma \ref{lemma:Tcolon}, because the equality $IT=JT$ does not imply that $(I:J)\neq(0)$. However, if we restrict to finite-type semistar operations, the analogue of Theorem \ref{teor:star-jaffard} does hold: indeed, a proof analogous to the one of Proposition \ref{prop:starloc:basic}\ref{prop:starloc:basic:ft} shows that finite-type operations are extendable, and thus the proof of Theorem \ref{teor:star-jaffard} continues without problems.
A second way of generalizing these results is by considering, beyond the order structure, also a \emph{topological} structure on $\mathrm{Star}(R)$: mimicking the definition of the Zariski topology on $\mathrm{SStar}(R)$ given in \cite{topological-cons}, we can define a topology on $\mathrm{Star}(R)$ by declaring open the sets of the form
\begin{equation*}
V_I:=\{\ast\in\mathrm{Star}(R)\mid 1\in I^\ast\},
\end{equation*}
as $I$ ranges among the fractional ideals of $R$. In particular, Theorem \ref{teor:star-jaffard} can be interpreted at the topological level: if $\Theta$ is a Jaffard family of $R$, then $\lambda_\Theta$ and $\rho_\Theta$ are homeomorphisms between $\mathrm{Star}(R)$ and the space $\prod_{T\in\Theta}\mathrm{Star}(T)$ endowed with the product topology.
\end{oss}
\section{Application to Pr\"ufer domains}\label{sect:starloc:prufer}
Theorem \ref{teor:star-jaffard} allows one to split the study of the set $\mathrm{Star}(R)$ of star operations on $R$ into the study of the sets $\mathrm{Star}(T)$, as $T$ ranges among the members of a Jaffard family $\Theta$. Obviously, this result isn't quite useful if we don't know how to find Jaffard families, or if studying $\mathrm{Star}(T)$ is as complex as studying $\mathrm{Star}(R)$. The purpose of this section is to show that, in the case of (some classes of) Pr\"ufer domains, we can resolve the first question, and we can at least make some progress on the second, proving more explicit results on $\mathrm{Star}(R)$. We shall employ a method similar to the one used in \cite[Sections 3-5]{hmp_finite}
Let now $R$ be a Pr\"ufer domain with quotient field $K$. We say that two maximal ideals $M,N$ are \emph{dependent} if $R_MR_N\neq K$, or equivalently if $M\cap N$ contains a nonzero prime ideal. Since the spectrum of $R$ is a tree, being dependent is an equivalence relation; we indicate the equivalence classes by $\Delta_\lambda$, as $\lambda$ ranges in $\Lambda$. We also define $T_\lambda:=\bigcap\{R_P\mid P\in\Delta_\lambda\}$. We call the set $\Theta:=\{T_\lambda\mid \lambda\in\Lambda\}$ the \emph{standard decomposition} of $R$.
\begin{lemma}\label{lemma:dimfin}
Let $R$ be a finite-dimensional Pr\"ufer domain. Then, $\Delta\subseteq\mathrm{Max}(R)$ is an equivalence class with respect to dependence if and only if $\Delta=V(P)\cap\mathrm{Max}(R)$ for some height-one prime $P$ of $R$.
\end{lemma}
\begin{proof}
Suppose $\Delta=V(P)\cap\mathrm{Max}(R)$. If $M,N\in\Delta$, then $P\subseteq M\cap N$; conversely, since $P$ has height 1, $M\in\Delta$ and $Q\subseteq M\cap N$ imply that $P\subseteq Q$ (since the spectrum of $R$ is a tree).
On the other hand, suppose $\Delta=\Delta_\lambda$ for some $\lambda$, and let $M,N\in\Delta$. Since $\mathrm{Spec}(R)$ is a tree and $\dim(R)<\infty$, both $M$ and $N$ contain a unique height-one prime, respectively (say) $P_M$ and $P_N$; if $P_M\neq P_N$, then $M\cap N$ cannot contain a nonzero prime, and thus $M$ and $N$ are not dependent, against the hypothesis $M,N\in\Delta$. Therefore, the height-1 prime contained in the members of $\Delta$ is unique, and $\Delta=V(P)\cap\mathrm{Max}(R)$.
\end{proof}
\begin{prop}\label{prop:Tlambda}
Let $R$ be a Pr\"ufer domain, and suppose that
\begin{enumerate}[(a)]
\item\label{prop:Tlambda:noeth} $\mathrm{Max}(R)$ is a Noetherian space; or
\item\label{prop:Tlambda:semiloc} $R$ is semilocal.
\end{enumerate}
Then, the standard decomposition $\Theta$ of $R$ is a Jaffard family of $R$.
\end{prop}
\begin{proof}
Since $R$ is Pr\"ufer, every overring of $R$ is flat \cite[Theorem 1.1.1]{fontana_libro}, and this in particular applies to the $T\in\Theta$.
We claim that, under both hypotheses, if $T=T_\lambda\in\Theta$, then $\mathrm{Spec}(T)=\{PT\mid P\subseteq M\text{~for some~}M\in\Delta_\lambda\}$. Indeed, in both cases every $\Delta_\lambda$ is compact: if $\mathrm{Max}(R)$ is Noetherian this is immediate, while if $R$ is semilocal they are finite and thus compact. Hence, the semistar operation $\ast_\Delta$ is of finite type \cite[Corollary 4.6]{localizing-semistar}, and $R^{\ast_\Delta}=T$; since the unique finite-type (semi)star operation on a Pr\"ufer domain is the identity (since all finitely generated ideals are invertible), it follows that $\ast_\Delta$ is just the map $I\mapsto IT$, and thus $QT=T$ if $Q$ is not contained in any $M\in\Delta$. Therefore, no prime ideal $P$ of $R$ survives in two different members of $\Theta$; thus, $PT_\lambda T_\mu=T_\lambda T_\mu$ if $\lambda\neq\mu$ are in $\Lambda$. Hence, $T_\lambda T_\mu=K$.
We need to show that $\Theta$ is locally finite. If $R$ is semilocal then $\Theta$ is finite, and in particular locally finite; suppose $\mathrm{Max}(R)$ is Noetherian. For every $x\in R$, $x\neq 0$, the ideal $xR$ has only a finite number of minimal primes (this follows, for example, from the proof of \cite[Chapter 4, Corollary 3, p.102]{bourbaki_ac} or \cite[Chapter 6, Exercises 5 and 7]{atiyah}); in particular, since each prime survives in only one $T\in\Theta$, the family $\Theta$ is of finite character.
Hence, in both case $\Theta$ is a Jaffard family by Proposition \ref{prop:caratt-jaffard}.
\end{proof}
\begin{oss}\label{oss:prufjaff}
~\\
\begin{enumerate}
\item If $R$ is a Pr\"ufer domain that is both of finite character and finite-dimensional, then $\mathrm{Spec}(R)$ (and so $\mathrm{Max}(R)$) is Noetherian. Indeed, if $I$ is a nonzero radical ideal of $R$, then $V(I)$ is finite, and thus every ascending chain of radical ideals must stop; by \cite[Chapter 6, Exercise 5]{atiyah}, this implies Noetherianity.
\item\label{oss:prufjaff:finerJaff}
The standard decomposition $\Theta$ of $R$ is the ``finest'' Jaffard family of $R$, in the sense that the partition of $\mathrm{Max}(R)$ determined by $\Theta$ (see Remark \ref{oss:Matlis}) is the finer partition that can be induced by a Jaffard family; this follows exactly from the definition of the dependence relation.
\item\label{oss:prufjaff:nonminimalbranch} In general, the standard decomposition of $R$ need not be a Jaffard family of $R$. For example, let $R$ be an almost Dedekind domain which is not Dedekind. Since $R$ is one-dimensional, no two maximal ideals are dependent, and thus each $T_\lambda$ has the form $R_M$ for some maximal ideal $M$. However, $\Theta$ is not a Jaffard family, since it is not locally finite (if it were, $R$ would be a Dedekind domain). Indeed, Example \ref{ex:ad} shows that not every star operation is extendable to every $R_M$.
\end{enumerate}
\end{oss}
\subsection{Cutting the branch}
Let $R$ be a finite-dimensional Pr\"ufer domain whose standard decomposition $\Theta$ is a Jaffard family. By Lemma \ref{lemma:dimfin}, every $T\in\Theta$ will have a nonzero prime ideal $P$ contained in all its maximal ideals; moreover, by Remark \ref{oss:prufjaff}(\ref{oss:prufjaff:finerJaff}), $T$ does not admit a further decomposition. On the other hand, it may be possible that $T/P$ has a nontrivial standard decomposition that is still a Jaffard family; thus, if we could relate $\mathrm{Star}(T)$ with $\mathrm{Star}(T/P)$, we could (in principle) simplify the study of $\mathrm{Star}(T)$.
\begin{lemma}\label{lemma:prufer-jac}
Let $R$ be a Pr\"ufer domain whose Jacobson radical $\mathrm{Jac}(R)$ contains a nonzero prime ideal. Then, there is a prime ideal $Q\subseteq\mathrm{Jac}(R)$ such that $\mathrm{Jac}(R/Q)$ does not contain nonzero prime ideals.
\end{lemma}
\begin{proof}
Let $\Delta:=\{P\in\mathrm{Spec}(R),P\subseteq\mathrm{Jac}(R)\}$. By hypothesis, $\Delta$ contains nonzero prime ideals. Let $Q:=\bigcup_{P\in\Delta}P$.
Since $R$ is treed, $\Delta$ is a chain; hence, $Q$ is itself a prime ideal, and it is contained in every maximal ideal of $R$. Suppose $\mathrm{Jac}(R/Q)$ contains a nonzero prime ideal $\overline{Q}$. Then, $\overline{Q}=Q'/P$ for some prime ideal $Q'$ of $R$, and $Q'$ is contained in every maximal ideal of $R$. It follows that $Q\subsetneq Q'\subseteq\mathrm{Jac}(R)$, against the construction of $Q$.
\end{proof}
Suppose now that $R$ is a Pr\"ufer domain with quotient field $K$, and suppose there is a nonzero prime ideal $P$ contained in every maximal ideal of $R$. Then, we have a quotient map $\phi:R_P\longrightarrow R_P/PR_P=k$ that, for every star operation $\ast$ on $R$, induces a semistar operation $\ast_\phi$ on $D:=R/P$ defined by
\begin{equation*}
I^{\ast_\phi}:=\phi\left(\phi^{-1}(I)^\ast\right),
\end{equation*}
such that $D^{\ast_\phi}=D$. Conversely, if $\sharp$ is a star operation on $D$, then we can construct a star operation $\sharp^\phi$ on $R$: indeed, if $I$ is a fractional ideal of $R$, then $I$ is either divisorial (and so we define $I^{\sharp^\phi}:=I$) or there is an $\alpha\in K$ such that $R\subseteq \alpha I\subseteq R_P$ \cite[Proposition 2.2(5)]{hmp_finite}: in the latter case, we define
\begin{equation*}
I^{\sharp^\phi}:=\alpha^{-1}\phi^{-1}\left(\phi(\alpha I)^\sharp\right).
\end{equation*}
\begin{prop}\label{prop:star-semistar}
Let $R,P,D,\phi$ as above. Then, the maps
\begin{equation*}
\begin{aligned}
\mathrm{Star}(R) & \longrightarrow \mathrm{(S)Star}(R/P)\\
\ast & \longmapsto \ast_\phi
\end{aligned}
\quad\text{~and~}\quad
\begin{aligned}
\mathrm{(S)Star}(R/P) & \longrightarrow \mathrm{Star}(R)\\
\ast & \longmapsto \ast^\phi
\end{aligned}
\end{equation*}
are well-defined order-preserving bijections.
\end{prop}
\begin{proof}
The fact that they are well-defined and bijections follow from \cite[Lemmas 2.3 and 2.4]{hmp_finite}; the fact that they are order-preserving is immediate from the definitions.
\end{proof}
\subsection{$h$-local Pr\"ufer domains}\label{sect:hloc-prufer}
If $R$ is both a Pr\"ufer domain and a $h$-local domain, then its standard decomposition $\Theta:=\{R_M\mid M\in\mathrm{Max}(R)\}$ is composed by valuation domains, and star operations behave particularly well. We start by re-proving \cite[Theorem 3.1]{twostar} using our general theory.
\begin{prop}\label{prop:hloc-prufer}
Let $R$ be an $h$-local Pr\"ufer domain, and let $\mathcal{M}$ be the set of nondivisorial maximal ideals of $R$. Then, $|\mathrm{Star}(R)|=2^{|\mathcal{M}|}$.
\end{prop}
\begin{proof}
By Theorem \ref{teor:star-jaffard}, there is an order-preserving bijection between $\mathrm{Star}(R)$ and $\prod\{\mathrm{Star}(R_M)\mid M\in\mathrm{Max}(R)\}$, and a maximal ideal $M$ is divisorial (in $R$) if and only if $MR_M$ is divisorial (in $R_M$). Since $R_M$ is a valuation domain, $|\mathrm{Star}(R_M)|$ is equal to 1 if $MR_M$ is divisorial, and to 2 if $MR_M$ is not; the claim follows.
\end{proof}
It is noted in the proof of \cite[Theorem 3.10]{olberding_globalizing} that, if $R$ is an $h$-local Pr\"ufer domain and $I,J$ are divisorial ideals of $R$, then $I+J$ is also divisorial. We can extend this result to arbitrary star operations; we shall see a similar result in Proposition \ref{prop:pruf-somma-invt}.
\begin{prop}\label{prop:hloc-pruf-somma}
Let $R$ be an $h$-local Pr\"ufer domain, let $\ast\in\mathrm{Star}(R)$ and let $I,J$ be $\ast$-closed ideals. Then, $I+J$ is $\ast$-closed.
\end{prop}
\begin{proof}
Since $R$ is $h$-local, $I+J$ is $\ast$-closed if and only if $(I+J)R_M$ is $\ast_M$-closed for every $M\in\mathrm{Max}(R)$. However, since $R_M$ is a valuation domain, either $IR_M\subseteq JR_M$ or $JR_M\subseteq IR_M$; hence, $(I+J)R_M=IR_M+JR_M$ is equal either to $IR_M$ or to $JR_M$, both of which are $\ast_M$-closed.
\end{proof}
This result does not hold if we drop the hypothesis that $R$ is $h$-local: in fact, let $R=\ins{Z}+X\ins{Q}[[X]]$ and let $R_p:=\ins{Z}[1/p]+X\ins{Q}[[X]]$ for each prime number $p$. Consider the star operation
\begin{equation*}
\ast:I\mapsto(R:(R:I))\cap(R_2:(R_2:I))\cap(R_3:(R_3:I)).
\end{equation*}
Then, $R_2$ and $R_3$ are $\ast$-closed; we claim that $R_2+R_3$ is not. Indeed, if $T$ is equal to $R$, $R_2$ or $R_3$, then $(T:(R_2+R_3))=X\ins{Q}[[X]]$, and thus $(R_2+R_3)^\ast=\ins{Q}[[X]]$; however, $R_2+R_3=(\ins{Z}[1/2]+\ins{Z}[1/3])+X\ins{Q}[[X]]$ does not contain rationals with denominator not divisible by 2 or 3 (for example, $1/5\notin R_2+R_3$), and thus $R_2+R_3\neq\ins{Q}[[X]]$.
The following can be seen as a sort of converse to Proposition \ref{prop:hloc-pruf-somma}.
\begin{prop}\label{prop:sommaintersez-prufer}
Let $R$ be a Pr\"ufer domain and suppose that $R$ is either:
\begin{enumerate}[(a)]
\item semilocal; or
\item locally finite and finite-dimensional.
\end{enumerate}
Then, the following are equivalent:
\begin{enumerate}[(i)]
\item $R$ is $h$-local;
\item for every $\ast\in\mathrm{Star}(R)$, $I\in\mathcal{F}(R)\setminus\mathcal{F}^\ast(R)$ and $J\in\mathcal{F}(R)$, at least one of $I\cap J$ and $I+J$ is not $\ast$-closed;
\item for every $I\in\mathcal{F}(R)\setminus\mathcal{F}^v(R)$ and $J\in\mathcal{F}(R)$, at least one of $I\cap J$ and $I+J$ is not divisorial.
\end{enumerate}
\end{prop}
\begin{proof}
(i $\Longrightarrow$ ii) For every $M\in\mathrm{Max}(R)$, $(I+J)R_M=IR_M+JR_M=\max\{IR_M,JR_M\}$, while $(I\cap J)R_M=IR_M\cap JR_M=\min\{IR_M,JR_M\}$. Since $I$ is not $\ast$-closed, and $\{R_M\mid M\in\mathrm{Max}(R)\}$ is a Jaffard family of $R$, there is a maximal ideal $N$ such that $IR_N$ is not $\ast_N$-closed; however, at least one of $(I+J)R_N$ and $(I\cap J)R_N$ is equal to $IR_N$, and thus at least one is not $\ast_N$-closed. Therefore, at least one between $I+J$ and $I\cap J$ is not $\ast$-closed.
(ii $\Longrightarrow$ iii) is obvious.
(iii $\Longrightarrow$ i) Consider the standard decomposition $\Theta$ of $R$; then, (iii) holds for every member of $\Theta$ but, if $R$ is not $h$-local, there must be a $T\in\Theta$ that is not local. By Lemma \ref{lemma:prufer-jac}, there is a prime ideal $P$ of $T$ such that $\mathrm{Jac}(T/P)$ does not contain nonzero primes. Let $\Lambda$ be the standard decomposition of $D:=T/P$, let $Z\in\Lambda$ and define $Z':=\bigcap_{W\in\Lambda\setminus\{Z\}}W=\ortog{\Lambda}(Z)$. We have $Z\cap Z'=D$ and, for every maximal ideal $M$ of $D$, either $ZD_M=K$ or $Z'D_M=K$. Therefore, $Z+Z'=\bigcap_{M\in\mathrm{Max}(T)}(Z+Z')D_M=K$.
By Proposition \ref{prop:star-semistar}, the $v$-operation on $T$ correspond to a (semi)star operation on $D$ such that $A^\ast=K$ if $A$ is not a fractional ideal of $D$; therefore, both $\phi^{-1}(Z)$ and $\phi^{-1}(Z')$ are not divisorial, but both $\phi^{-1}(Z\cap Z')=T$ and $\phi^{-1}(Z+Z')=T_P$ are (where $\phi:T\longrightarrow D$ is the quotient map). This is a contradiction, and $R$ must be $h$-local.
\end{proof}
\subsection{Stability}
Recall that a star operation $\ast$ is \emph{stable} if it distributes over finite intersections, i.e., if $(I\cap J)^\ast=I^\ast\cap J^\ast$. In this section, we study stable operations on Pr\"ufer domains; we start with an analogue of Proposition \ref{prop:star-semistar}.
\begin{prop}\label{prop:star-semistar_stab}
Preserve the notation and the hypotheses of Proposition \ref{prop:star-semistar}. There is a bijection between $\mathrm{Star}stab(R)$ and $\mathrm{Star}stab(R/P)$.
\end{prop}
\begin{proof}
We first show that the bijections of Proposition \ref{prop:star-semistar} become bijections on the subsets of stable operations; let thus $\ast$ be a semistar operation in the first set and $\sharp$ be the corresponding operation on $\mathrm{(S)Star}(R/P)$. Let $\phi:R\longrightarrow R/P$ be the quotient map.
Suppose that $\ast$ is stable and let $I,J\in\mathbf{F}(R/P)$. Then, since $\phi$ is a bijection between the ideal comprised between $P$ and $R_P$ and $\mathbf{F}(R/P)$,
\begin{equation*}
\begin{array}{rcl}
(I\cap J)^\sharp & = & \phi\left[\phi^{-1}(I\cap J)^\ast\right]= \phi\left[\left(\phi^{-1}(I)\cap\phi^{-1}(J)\right)^\ast\right]=\\
& = & \phi\left[\phi^{-1}(I)^\ast\cap\phi^{-1}(J)^\ast\right]= \phi\left(\phi^{-1}(I)^\ast\right)\cap\phi\left(\phi^{-1}(J)^\ast\right)=\\
& = & I^\sharp\cap J^\sharp.
\end{array}
\end{equation*}
Therefore, $\sharp$ is stable.
Conversely, suppose $\sharp$ is stable and let $I,J\in\mathcal{F}(R)$. If $I$ and $J$ are divisorial, so is $I\cap J$; hence, $(I\cap J)^\ast=I\cap J=I^\ast\cap J^\ast$. Suppose (without loss of generality) that $I\neq I^v$. Then, there is an $\alpha$ such that $P\subseteq \alpha I\subseteq R_P$. Moreover, since $R$ is Pr\"ufer and $P$ is contained in every maximal ideal of $R$, every fractional ideal must be comparable with both $P$ and $R_P$: more precisely, if $\mathbf{v}$ is the valuation relative to $R_P$, and $L$ is an ideal, then either $\inf\mathbf{v}(L)=0$ (so that $P\subseteq L\subseteq R_P$), $\inf\mathbf{v}(L)$ exist and has a sign (if positive, $L\subseteq P$, if negative, $R_P\subseteq L$) or $\inf\mathbf{v}(L)$ has no infimum (so that if $\mathbf{v}(L)$ contains negative values then $R_P\subseteq L$, while $L\subseteq P$ in the other case). Therefore, we can distinguish three cases:
\begin{itemize}
\item $\alpha J\subseteq P$: then, $\alpha J\subseteq \alpha I$, and thus $(I\cap J)^\ast=J^\ast=I^\ast\cap J^\ast$;
\item $R_P\subseteq\alpha J$: then, $\alpha I\subseteq\alpha J$, and thus $(I\cap J)^\ast=I^\ast=I^\ast\cap J^\ast$;
\item $P\subseteq\alpha J\subseteq R_P$. Let $I_0:=\alpha I$ and $J_0:=\alpha J$. Then,
\begin{equation*}
\begin{array}{rcl}
(I_0\cap J_0)^\ast & = &\phi^{-1}\left(\phi(I_0\cap J_0)^\sharp\right)= \phi^{-1}\left(\phi(I_0)^\sharp\cap\phi(J_0)^\sharp\right)=\\
& = & \phi^{-1}(\phi(I_0)^\sharp)\cap\phi^{-1}(\phi(J_0)^\sharp)=I_0^\ast\cap J_0^\ast.
\end{array}
\end{equation*}
Hence,
\begin{equation*}
\begin{array}{rcl}
(I\cap J)^\ast & = & \alpha^{-1}(\alpha(I\cap J)^\ast)=\alpha^{-1}(I_0\cap J_0)^\ast=\\
& = & \alpha^{-1}(I_0^\ast\cap J_0^\ast)=\alpha^{-1}I_0^\ast\cap\alpha^{-1}J_0^\ast=I^\ast\cap J^\ast.
\end{array}
\end{equation*}
\end{itemize}
In all cases, $\ast$ distributes over finite intersection, and thus $\ast$ is stable.
Therefore, there is an order-preserving bijection between $\mathrm{Star}stab(R)$ and $\mathrm{(S)Star}_{\mathrm{st}}(R/P)$. However, for every domain $D$, the restriction map $\mathrm{(S)Star}_{\mathrm{st}}(D)\longrightarrow\mathrm{Star}stab(D)$ is a bijection (see \cite[Discussion after Proposition 3.10]{surveygraz} or \cite[Proposition 3.4]{spettrali-eab}), and thus $\mathrm{Star}stab(R)$ corresponds bijectively with $\mathrm{Star}stab(R/P)$. The claim follows.
\end{proof}
We say that a star (or semistar) operation $\ast$ \emph{distributes over arbitrary intersections} if, whenever $\{I_\alpha\}_{\alpha\in A}$ is a family of ideals with nonzero intersection, we have $\left(\bigcap_{\alpha\in A}I_\alpha\right)^\ast=\bigcap_{\alpha\in A}I_\alpha^\ast$.
\begin{lemma}\label{lemma:intersez-valuation}
If $V$ is a valuation domain, the $v$-operation distributes over arbitrary intersections.
\end{lemma}
\begin{proof}
Let $\mathcal{A}:=\{I_\alpha\}_{\alpha\in A}$ be a family of ideals of $V$ with nonzero intersection. If $\mathcal{A}$ has a minimum $I_{\overline{\alpha}}$, then $I_{\overline{\alpha}}^v\subseteq I_\beta^v$ for every $\beta\in A$, and thus $\left(\bigcap_{\alpha\in A}I_\alpha\right)^v=I_{\overline{\alpha}}^v=\bigcap_{\alpha\in A}I_\alpha^v$.
Suppose $\mathcal{A}$ does not have a minimum: since $\left(\bigcap_{\alpha\in A}I_\alpha\right)^v\subseteq I_\alpha^v$ for every $\alpha\in A$, we have $\left(\bigcap_{\alpha\in A}I_\alpha\right)^v\subseteq\bigcap_{\alpha\in A}I_\alpha^v$.
Let $x\in\bigcap_{\alpha\in A}I_\alpha^v$: if $x\in\bigcap_{\alpha\in A}I_\alpha$ then $x\in\left(\bigcap_{\alpha\in A}I_\alpha\right)^v$. On the other hand, if $x\notin\bigcap_{\alpha\in A}I_\alpha$, then there is an $\overline{\alpha}$ such that $x\in I_{\overline{\alpha}}^v\setminus I_{\overline{\alpha}}$, i.e., $\mathbf{v}(x)=\inf \mathbf{v}(I_{\overline{\alpha}})$ (where $\mathbf{v}$ is the valuation associated to $V$ and $\mathbf{v}(J):=\{\mathbf{v}(j)\mid j\in J\}$). However, since $\mathcal{A}$ has no minimum, there are $\beta,\gamma\in A$ such that $I_\alpha\supsetneq I_\beta\supsetneq I_\gamma$; in particular, $\mathbf{v}(x)>\inf \mathbf{v}(I_\gamma)$, and thus $x\notin I_\gamma^v$, which is absurd. Therefore, $x\in\bigcap_{\alpha\in A}I_\alpha$.
\end{proof}
The following proposition may also be proved, in a slightly more generalized setting, using a different, more direct, approach; see \cite{stable_prufer}.
\begin{prop}\label{prop:prufer-stab}
Let $R$ be a Pr\"ufer domain and suppose that $R$ is either:
\begin{enumerate}[(a)]
\item semilocal; or
\item locally finite and finite-dimensional.
\end{enumerate}
Then, every stable star operation $\ast$ on $R$ is in the form
\begin{equation}\label{eq:stablepruf}
I\mapsto\bigcap_{P\in\mathrm{Max}(R)}(IR_P)^{\ast^{(P)}},
\end{equation}
where each $\ast^{(P)}\in\mathrm{Star}(R_P)$. In particular, $\mathrm{Star}stab(R)$ is order-isomorphic to $\prod\{\mathrm{Star}(R_P)\mid P\in\mathrm{Max}(R)\}$.
\end{prop}
\begin{proof}
For any ring $A$, let $\mathcal{M}_A$ be the set of maximal ideals of $A$ that are not divisorial.
Suppose first that $R$ is semilocal, and let $\Delta$ be the set of star operations defined as in \eqref{eq:stablepruf}. By Lemma \ref{lemma:intersez-valuation}, every star operation in $\Delta$ is stable; moreover, a maximal ideal $P$ is $\ast$-closed if and only if $\ast^{(P)}$ is the identity, and thus $|\Delta|=2^{|\mathcal{M}_R|}$. Since $\mathrm{Star}(R)$ is finite \cite[Theorem 5.3]{hmp_finite}, it is enough to show that the cardinalities of $\Delta$ and $\mathrm{Star}stab(R)$ are equal.
We proceed by induction on $n:=|\mathrm{Max}(R)|$; if $n=1$ the claim follows from Lemma \ref{lemma:intersez-valuation}. Suppose it holds up to $n-1$.
Let $\Theta$ be the standard decomposition of $R$. If $\Theta$ is not trivial, then by the inductive hypothesis the claim holds for every member of $\Theta$; by Theorem \ref{teor:star-jaffard}, $M\in\mathrm{Max}(R)$ is divisorial over $R$ if and only if $MT$ is divisorial over $T$ (where $T\in\Theta$ is such that $MT\neq T$), and thus $|\mathcal{M}_R|=\sum_{T\in\Theta}|\mathcal{M}_T|$. Since, by Theorem \ref{teor:jaffard-corresp}, we have $\mathrm{Star}stab(R)\simeq\prod\{\mathrm{Star}stab(T)\mid T\in\Theta\}$, it follows that the claim holds also for $R$.
Suppose $\Theta$ is trivial: then, $\mathrm{Jac}(R)$ must contain a nonzero prime ideal $P$ (and, by Lemma \ref{lemma:prufer-jac}, we can suppose $P$ is maximal with these properties). By Proposition \ref{prop:star-semistar_stab}, $|\mathrm{Star}stab(R)|=|\mathrm{Star}stab(R/P)|$; moreover, by Proposition \ref{prop:star-semistar} $\mathcal{M}_R$ and $\mathcal{M}_{R/P}$ have the same cardinality. By the maximality of $P$, $R/P$ has a nontrivial standard decomposition; by induction, the claim holds for every member of the decomposition, and thus, with the same reasoning as above, we see that $|\mathrm{Star}stab(R/P)|=2^{|\mathcal{M}_{R/P}|}$. Putting all together we have $|\mathrm{Star}stab(R)|=2^{|\mathcal{M}_R|}$ and so $\mathrm{Star}stab(R)=\Delta$ holds for every semilocal Pr\"ufer domain.
If $R$ is locally finite and finite-dimensional, then $\mathrm{Star}stab(R)=\prod\{\mathrm{Star}stab(T)\mid T\in\Theta\}$, where $\Theta$ is the standard decomposition of $R$. Each $T\in\Theta$ is semilocal, and thus we can apply the previous part of the proof; the claim follows.
\end{proof}
\begin{prop}\label{prop:intersez-prufer}
Let $R$ be a Pr\"ufer domain and suppose that $R$ is either:
\begin{enumerate}[(a)]
\item semilocal; or
\item locally finite and finite-dimensional.
\end{enumerate}
Then, the following are equivalent:
\begin{enumerate}[(i)]
\item $R$ is $h$-local;
\item every star operation on $R$ distributes over arbitrary intersections;
\item every star operation on $R$ distributes over finite intersections;
\item the $v$-operation on $R$ distributes over arbitrary intersections;
\item the $v$-operation on $R$ distributes over finite intersections;
\item for every fractional ideal $I$ of $R$, $I^v=\bigcap\{(IR_M)^{v^{(R_M)}}\mid M\in\mathrm{Max}(R)\}$.
\end{enumerate}
\end{prop}
\begin{proof}
(i $\Longrightarrow$ ii) follows from Theorem \ref{teor:star-jaffard}, Lemma \ref{lemma:intersez-ritorno} and Lemma \ref{lemma:intersez-valuation}, since $\{R_M\mid M\in\mathrm{Max}(R)\}$ is a Jaffard family if $R$ is $h$-local. (ii $\Longrightarrow$ iii $\Longrightarrow$ v) and (ii $\Longrightarrow$ iv $\Longrightarrow$ v) are clear, while (v $\iff$ vi) follows from Proposition \ref{prop:prufer-stab}; we only have to show that (v $\Longrightarrow$ i).
Suppose (v) holds and let $\Theta$ be the standard decomposition of $R$. If $R$ is not $h$-local, then a branch $T\in\Theta$ is not local; the hypotheses on $R$ guarantee that there is a nonzero prime ideal of $T$ contained in every maximal ideal. Therefore, we can apply Lemma \ref{lemma:prufer-jac} and find a prime ideal $Q$ such that $\mathrm{Jac}(T/Q)$ contains no prime ideals. By Proposition \ref{prop:star-semistar}, there is an order-preserving bijection between $\mathrm{Star}(T)$ and $\mathrm{(S)Star}(T/Q)$, where the $v$-operation on $T$ corresponds to the semistar operation $\ast$ which is the trivial extension of the $v$-operation on $T/Q$.
Since $\mathrm{Jac}(T/Q)$ does not contain nonzero primes, $T/Q$ admits a nontrivial Jaffard family $\Lambda$; let $Z\in\Lambda$, and define $Z':=\bigcap_{W\in\Lambda\setminus\{Z\}}W=\ortog{\Lambda}(Z)$. Then, $Z$ and $Z'$ are not fractional ideals of $T/Q$, and thus $Z^\ast=Z'^\ast=F$, where $F$ is the quotient field of $T/Q$; on the other hand, $Z\cap Z'=T/Q$ and thus $(Z\cap Z')^\ast=T/Q$.
If $\pi:T_Q\longrightarrow T_Q/QT_Q$ is the canonical quotient, it follows that $\pi^{-1}(Z)^v=\pi^{-1}(Z')^v=T_Q$, while $\pi^{-1}(Z\cap Z')^v=\pi^{-1}(T/Q)^v=T^v=T$. Since $T$ is not local, $T\neq T_Q$, and thus $v$ does not distribute over finite intersections, against the hypothesis.
\end{proof}
\section{The class group}\label{sect:jaff-InvCl}
Let $\ast$ be a star operation on $R$. An ideal $I$ of $R$ is \emph{$\ast$-invertible} if $(I(R:I))^\ast=R$; the set of $\ast$-invertible $\ast$-ideals, indicated with $\mathrm{Inv}^\ast(R)$, is a group under the natural ``$\ast$-product'' $I\times_\ast J\mapsto(IJ)^\ast$ \cite{jaffard_systeme,griffin_vmultiplication_1967,zafrullah_tinvt,halterkoch_libro}. Any $\ast$-invertible $\ast$-ideal is divisorial \cite[Theorem 1.1 and Observation C]{zafrullah_tinvt} and, if $\ast_1\leq\ast_2$, there is a natural inclusion $\mathrm{Inv}^{\ast_1}(R)\subseteq\mathrm{Inv}^{\ast_2}(R)$.
\begin{prop}\label{prop:jaffard-invt}
Let $R$ be an integral domain and $\Theta$ be a Jaffard family on $R$. The map
\begin{equation*}
\begin{aligned}
\Gamma\colon \mathrm{Inv}^\ast(R) & \longrightarrow \bigoplus_{T\in\Theta}\mathrm{Inv}^{\ast_T}(T)\\
I & \longmapsto (IT)_{T\in\Theta}
\end{aligned}
\end{equation*}
is well-defined and a group isomorphism.
\end{prop}
\begin{proof}
Define a map
\begin{equation*}
\begin{aligned}
\widehat{\Gamma}\colon \mathcal{F}(R) & \longrightarrow \prod_{T\in\Theta}\mathcal{F}(T)\\
I & \longmapsto (IT)_{T\in\Theta}
\end{aligned}
\end{equation*}
For every $\ast$-ideal $I$, $\widehat{\Gamma}(I)=(IT)$ is a sequence such that $IT$ is $\ast_T$-closed. Moreover, if $I$ is $\ast$-invertible, then $(I(R:I))^\ast=R$ and thus $(I(R:I)T)^{\ast_T}=T$, so that $IT$ is $\ast_T$-invertible. Thus $\widehat{\Gamma}(\mathrm{Inv}^\ast(R))\subseteq\prod_{T\in\Theta}\mathrm{Inv}^{\ast_T}(T)$, and indeed $\widehat{\Gamma}(\mathrm{Inv}^\ast(R))\subseteq\bigoplus_{T\in\Theta}\mathrm{Inv}^{\ast_T}(T)$ since $\Theta$ is locally finite, by Theorem \ref{teor:star-jaffard}. Hence, $\Gamma$ is well-defined, since it is the restriction of $\widehat{\Gamma}$ to $\mathrm{Inv}^\ast(R)$.
It is straightforward to verify that $\Gamma$ is a group homomorphism, and since $I=\bigcap_{T\in\Theta}IT$, we have that $\Gamma$ (or even $\widehat{\Gamma}$) is injective.
We need only to show that $\Gamma$ is surjective. Let $(I_T)\in\bigoplus_{T\in\Theta}\mathrm{Inv}^{\ast_T}(T)$, and define $I:=\bigcap I_T$. Since $I_T=T$ for all but a finite number of elements of $\Theta$, say $T_1,\ldots,T_n$, there are $d_1,\ldots,d_n\in R$ such that $d_iI_{T_i}\subseteq T_i$. Defining $d:=d_1\cdots d_n$, we have $dI_T\subseteq T$ for every $T$, and thus $dI\subseteq\bigcap_{T\in\Theta}T=R$, so that $I$ is indeed a fractional ideal of $R$. Moreover, since $I_T$ is $\ast_T$-closed, $I_T\cap R$ is $\ast$-closed, and thus $I$, being the intersection of a family of $\ast$-closed ideals, is $\ast$-closed. It is also $\ast$-invertible, since
\begin{equation*}
(I(R:I))^\ast=\bigcap_{T\in\Theta}(I(R:I)T)^{\ast_T}=\bigcap_{T\in\Theta}(IT(T:IT))^{\ast_T}=\bigcap_{T\in\Theta}T=R.
\end{equation*}
Therefore, $(I_T)=\Gamma(I)\in\Gamma(\mathrm{Inv}^\ast(R))$, and thus $\Gamma$ is an isomorphism.
\end{proof}
The set of nonzero principal fractional ideals forms a subgroup of $\mathrm{Inv}^\ast(R)$, denoted by $\mathrm{Prin}(I)$. The quotient between $\mathrm{Inv}^\ast(R)$ and $\mathrm{Prin}(R)$ is called the \emph{$\ast$-class group} of $R$ \cite{anderson_generalCG_1988}, and it is denoted by $\mathrm{Cl}^\ast(R)$. If $\ast_1\leq\ast_2$, there is an injective homomorphism $\mathrm{Cl}^{\ast_1}(R)\subseteq\mathrm{Cl}^{\ast_2}(R)$. Of particular interest are the class group of the identity star operation (usually called the \emph{Picard group} of $R$, denoted by $\mathrm{Pic}(R)$) and the $t$-class group, which is linked to the factorization properties of the group (see for example \cite{samuel_factoriel,bouvier_zaf_1988,zafrullah_tinvt}). The quotient between $\mathrm{Cl}^\ast(R)$ and $\mathrm{Pic}(R)$ is called the \emph{$\ast$-local class group} of $R$, and it is indicated by $G_\ast(R)$ \cite{anderson_generalCG_1988}.
\begin{teor}\label{teor:jaffard-clgroup}
Let $R$ be an integral domain and let $\Theta$ be a Jaffard family on $R$. Then, the map
\begin{equation*}
\begin{aligned}
\Lambda\colon G_\ast(R) & \longrightarrow \bigoplus_{T\in\Theta}G_{\ast_T}(T)\\
[I] & \longmapsto ([IT])_{T\in\Theta}
\end{aligned}
\end{equation*}
is well-defined and a group isomorphism.
\end{teor}
\begin{proof}
By Proposition \ref{prop:jaffard-invt}, there are two isomorphisms $\Gamma^\ast:\mathrm{Inv}^\ast(R)\longrightarrow \bigoplus_{T\in\Theta}\mathrm{Inv}^{\ast_T}(T)$ and $\Gamma^d:\mathrm{Inv}^d(R)\longrightarrow \bigoplus_{T\in\Theta}\mathrm{Inv}^{d_T}(T)$.
Consider the chain of maps
\begin{equation*}
\mathrm{Inv}^\ast(R)\xrightarrow{\Gamma^\ast}\bigoplus_{T\in\mathrm{Max}(T)}\mathrm{Inv}^{\ast_T}(T) \xrightarrow{\pi}\bigoplus_{T\in\mathrm{Max}(T)}\frac{\mathrm{Inv}^{\ast_T}(T)}{\mathrm{Inv}^{d_T}(T)}
\end{equation*}
where $\pi$ is the componentwise quotient; then, the kernel of $\pi$ is exactly $\bigoplus_{T\in\Theta}\mathrm{Inv}^{d_T}(T)$. However, $\Gamma^\ast$ and $\Gamma^d$ coincide on $\mathrm{Inv}^d(R)\subseteq\mathrm{Inv}^\ast(R)$; hence,
\begin{equation*}
\ker(\pi\circ\Gamma^\ast)=(\Gamma^d)^{-1}(\ker\pi)=\mathrm{Inv}^d(R).
\end{equation*}
Therefore, there is an isomorphism $\displaystyle{\frac{\mathrm{Inv}^\ast(R)}{\mathrm{Inv}^d(R)}\simeq\bigoplus_{T\in\mathrm{Max}(T)}\frac{\mathrm{Inv}^{\ast_T}(T)}{\mathrm{Inv}^{d_T}(T)}}$. However, for an arbitrary domain $A$ and an arbitrary $\sharp\in\mathrm{Star}(A)$, we have $\mathrm{Prin}(A)\subseteq\mathrm{Inv}^d(A)\subseteq\mathrm{Inv}^\sharp(A)$, and thus
\begin{equation*}
\frac{\mathrm{Inv}^\sharp(A)}{\mathrm{Inv}^d(A)}\simeq\frac{\mathrm{Inv}^\sharp(A)/\mathrm{Prin}(A)}{\mathrm{Inv}^d(A)/\mathrm{Prin}(A)}\simeq\frac{\mathrm{Cl}^\sharp(A)}{\mathrm{Pic}(A)}=G_\sharp(A)
\end{equation*}
so that $\Lambda$ becomes an isomorphism between $G_\ast(R)$ and $\bigoplus_{T\in\Theta}G_{\ast_T}(T)$, as claimed.
\end{proof}
\subsection{The class group of a Pr\"ufer domain}\label{sect:prufer-Cl}
If $\ast$ is a (semi)star operation, we can define the $\ast$-class group by mirroring the definition of the case of star operations: we say that $I$ is $\ast$-invertible if $(I(R:I))^\ast=R$, and we define $\mathrm{Cl}^\ast(R)$ as the quotient between the group of the $\ast$-invertible $\ast$-ideals (endowed with the $\ast$-product) and the subgroup of principal ideals. Since $(R:I)=(0)$ if $I\in\mathbf{F}(R)\setminus\mathcal{F}(R)$, every $\ast$-invertible ideal is a fractional ideal, and thus $\mathrm{Cl}^\ast(R)$ coincides with $\mathrm{Cl}^{\ast'}(R)$, where $\ast':=\ast|_{\mathcal{F}(R)}$ is the restriction of $\ast$.
The first result of this section is that Proposition \ref{prop:star-semistar} can be extended to the class group.
\begin{prop}\label{prop:star-semistar-clgroup}
Let $R$ be a Pr\"ufer domain and let $P$ be a nonzero prime ideal of $R$ contained in every maximal ideal. Suppose also that $P\notin\mathrm{Max}(R)$. Let $\ast\in\mathrm{Star}(R)$ and let $\sharp$ be the corresponding (semi)star operation on $D:=R/P$. Then, $\mathrm{Cl}^\ast(R)$ is naturally isomorphic to $\mathrm{Cl}^\sharp(D)$.
\end{prop}
\begin{proof}
Let $\pi:R_P\longrightarrow F=Q(D)$ be the quotient map, and let $I$ be a fractional ideal of $R$ contained between $P$ and $R_P$. We claim that $\pi((R:I))=(D:\pi(I))$. In fact, if $y\in\pi((R:I))$ then $y=\pi(x)$ for some $x\in(R:I)$, and thus $y\pi(I)=\pi(x)\pi(I)=\pi(xI)\subseteq\pi(R)=D$, and thus $x\in(D:\pi(I))$. Conversely, if $y\in(D:\pi(I))$ and $y=\pi(x)$ then $y\pi(I)\subseteq D$, i.e., $\pi(xI)\subseteq D$. By the correspondence between $R$-submodules of $R_P$ and $D$-submodules of $F$ we have $xI\subseteq R$ and $y\in\pi((R:I))$.
Let $J=\pi(I)$ be a $\sharp$-invertible ideal of $D$. Then, $(J(D:J))^\sharp=D$, and thus
\begin{equation*}
\begin{array}{rcl}
R & = & \pi^{-1}\left((J(D:J))^\sharp\right)=\pi^{-1}(J(D:J))^\ast=\\
& = & \left(\pi^{-1}(J)\pi^{-1}(D:J)\right)^\ast=(I(R:I))^\ast.
\end{array}
\end{equation*}
Therefore, $I$ is $\ast$-invertible, and there is an injective map $\theta:\mathrm{Inv}^\sharp(D)\longrightarrow \mathrm{Inv}^\ast(R)$. It is also straightforward to see that $\theta$ is a group homomorphism.
The well-definedness of the map $\ast\mapsto\ast_\phi$ implies that, if $J,J'$ are $D$-submodules of $F$, and $I:=\pi^{-1}(J)$, $I':=\pi^{-1}(J')$, then $J=zJ'$ for some $z\in F$ if and only if $I=wI'$ for some $w\in K$. Therefore, $\theta$ induces an injective map $\overline{\theta}:\mathrm{Cl}^\sharp(D)\longrightarrow \mathrm{Cl}^\ast(R)$, that is clearly is a group homomorphism.
Let now $I$ be a $\ast$-invertible ideal of $R$. Then, $I$ is $v$-invertible, and thus $(I:I)=R$ \cite[Proposition 34.2(2)]{gilmer}. In particular, $I$ is not a $R_P$-module, and thus the set $\mathbf{v}(I)$ has an infimum $\alpha$, where $\mathbf{v}$ is the valuation associated to $R_P$. If $a$ is an element of valuation $\alpha$, then $P\subsetneq a^{-1}I\subsetneq R_P$; hence, $a^{-1}I=\phi^{-1}(\phi(a^{-1}I))$ and $[I]=\overline{\theta}([\pi(a^{-1}I)])$, and in particular $[I]$ is in the image of $\overline{\theta}$. Since $I$ was arbitrary, $\overline{\theta}$ is surjective and $\mathrm{Cl}^\sharp(D)\simeq\mathrm{Cl}^\ast(R)$.
\end{proof}
\begin{teor}\label{teor:clgroup-prufer}
Let $R$ be a Pr\"ufer domain, and suppose that $R$ is either:
\begin{enumerate}[(a)]
\item semilocal; or
\item locally finite and finite-dimensional.
\end{enumerate}
Consider a star operation $\ast$ on $R$. Then,
\begin{equation*}
G_\ast(R)\simeq\bigoplus_{\substack{M\in\mathrm{Max}(R)\\ M\neq M^\ast}}\mathrm{Cl}^v(R_M).
\end{equation*}
\end{teor}
\begin{proof}
We start by considering the case of $R$ semilocal, and we proceed by induction on the number $n$ of maximal ideals of $R$. Note that, in this case, $\mathrm{Pic}(R)=(0)$ and so $G_\ast(R)=\mathrm{Cl}^\ast(R)$. If $n=1$, the conclusion is trivial, since $\ast\neq v$ if and only if $M\neq M^\ast$.
Suppose $n>1$ and let $\Theta$ be the standard decomposition of $R$ (which is a Jaffard family by Proposition \ref{prop:Tlambda}). By Theorem \ref{teor:jaffard-clgroup}, and using the fact that $\mathrm{Pic}(R)=(0)=\mathrm{Pic}(T)$ for every $T\in\Theta$, we have $\mathrm{Cl}^\ast(R)\simeq\bigoplus_{T\in\Theta}\mathrm{Cl}^{\ast_T}(T)$. Moreover, since a maximal ideal $M$ of $R$ is $\ast$-closed if and only if $MT$ is $\ast_T$-closed, by induction it suffices to prove the theorem when the standard decomposition of $R$ is $\{R\}$.
In this case, $\mathrm{Jac}(R)$ contains nonzero primes, and by Lemma \ref{lemma:prufer-jac} we can find a prime ideal $Q\subseteq\mathrm{Jac}(R)$ such that $\mathrm{Jac}(R/Q)$ does not contain nonzero prime ideals. Let $A:=R/Q$.
The standard decomposition $\Theta'$ of $A$ is nontrivial, and thus every $B\in\Theta'$ is a semilocal Pr\"ufer domain with less than $n$ maximal ideals. Moreover, by Proposition \ref{prop:star-semistar-clgroup}, $\mathrm{Cl}^\ast(R)\simeq\mathrm{Cl}^\sharp(A)$, where $\sharp$ is the restriction to $\mathcal{F}(A)$ of the (semi)star operation corresponding to $\ast$. Therefore, by the inductive hypothesis,
\begin{equation*}
\mathrm{Cl}^\sharp(A)\simeq\bigoplus_{B\in\Theta'}\mathrm{Cl}^v(B)\simeq \bigoplus_{B\in\Theta'}\bigoplus_{\substack{N\in\mathrm{Max}(B)\\ N\neq N^{\sharp_B}}}\mathrm{Cl}^v(B_N)\simeq\bigoplus_{\substack{N\in\mathrm{Max}(A)\\ N\neq N^\sharp}}\mathrm{Cl}^v(A_N).
\end{equation*}
Thus,
\begin{equation*}
\mathrm{Cl}^\ast(R)\simeq\mathrm{Cl}^\sharp(A)\simeq\bigoplus_{\substack{N\in\mathrm{Max}(A)\\ N\neq N^\sharp}}\mathrm{Cl}^v(A_N).
\end{equation*}
However, if $M$ is the maximal ideal of $R$ which corresponds to the maximal ideal $N$ of $A$, then $R_M/QR_M\simeq A_N$, and thus by \cite[Theorem 3.5]{afz_vclass} we have $\mathrm{Cl}^v(R_M)\simeq\mathrm{Cl}^v(A_N)$; the claim follows.
Suppose now $R$ is finite-dimensional and of finite character, and let $\Theta$ be the standard decomposition of $R$. By Lemma \ref{lemma:dimfin}, there is a bijective correspondence between $\Theta$ and the height 1 prime ideals of $R$, and every $T\in\Theta$ is semilocal. Hence, by Proposition \ref{prop:Tlambda} and by the previous case,
\begin{equation*}
G_\ast(R)\simeq\bigoplus_{T\in\Theta}G_{\ast_T}(T)\simeq \bigoplus_{T\in\Theta}\mathrm{Cl}^{\ast_T}(T)\simeq \bigoplus_{T\in\Theta}\bigoplus_{\substack{M\in\mathrm{Max}(T)\\ M\neq M^{\ast_T}}}\mathrm{Cl}^v(T_M).
\end{equation*}
The conclusion now follows since $T_M=R_N$ (where $N:=M\cap R$) and $N=N^\ast$ if and only if $M=M^{\ast_T}$.
\end{proof}
\begin{cor}\label{cor:clgroup-bezout}
Let $R$ be a Bézout domain, and suppose that $R$ is either:
\begin{enumerate}[(a)]
\item semilocal; or
\item finite-dimensional and of finite character.
\end{enumerate}
Let $\ast$ be a star operation on $R$. Then,
\begin{equation*}
\mathrm{Cl}^\ast(R)\simeq\bigoplus_{\substack{M\in\mathrm{Max}(R)\\ M\neq M^\ast}}\mathrm{Cl}^v(R_M).
\end{equation*}
\end{cor}
\begin{proof}
It is enough to note that $\mathrm{Pic}(R)=0$ if $R$ is a Bézout domain, so that $G_\ast(R)=\mathrm{Cl}^\ast(R)$ for every $\ast\in\mathrm{Star}(R)$, and then apply Theorem \ref{teor:clgroup-prufer}.
\end{proof}
\begin{cor}
Let $R$ be a Bézout domain, and suppose that $R$ is either
\begin{enumerate}[(a)]
\item semilocal; or
\item finite-dimensional and of finite character.
\end{enumerate}
Let $S$ be a multiplicatively closed subset of $R$. Then, there is a natural surjective group homomorphism $\mathrm{Cl}^v(R)\longrightarrow \mathrm{Cl}^v(S^{-1}R)$, $[I]\mapsto[S^{-1}I]$.
\end{cor}
\begin{proof}
Let $\Delta:=\{M\in\mathrm{Max}(R):M\cap S=\emptyset\}$. Then, for every $M\in\Delta$, $R_M=(S^{-1}R)_{S^{-1}M}$, and thus the isomorphism of Theorem \ref{teor:clgroup-prufer} reduces to a surjective map $\mathrm{Cl}^v(R)\longrightarrow \bigoplus_{M\in\Delta}\mathrm{Cl}^v(R_M)\simeq\mathrm{Cl}^v(S^{-1}R)$, where the last equality comes from the fact that the maximal ideals of $S^{-1}R$ are the extensions of the ideals belonging to $\Delta$.
\end{proof}
Therefore, under each case of Theorem \ref{teor:clgroup-prufer}, the determination of $G_\ast(R)$ is reduced to the calculation of $\mathrm{Cl}^v(V)$, where $V$ is a valuation domain. In the case where the maximal ideal $M$ of $V$ is \emph{branched} (that is, if there is a $M$-primary ideal of $V$ different from $R$, or equivalently if there is a prime ideal $P\subsetneq M$ such that there are no prime ideal properly contained between $P$ and $M$ \cite[Theorem 17.3]{gilmer}), this group has been calculated in \cite[Corollaries 3.6 and 3.7]{afz_vclass}. Indeed, if $P$ is the prime ideal directly below $M$, and $H$ is the value group of $V/P$ (represented as a subgroup of $\ins{R}$), then
\begin{equation*}
\mathrm{Cl}^v(V)\simeq\begin{cases}
0 & \text{~if~}G\simeq\ins{Z}\\
\ins{R}/H & \text{~otherwise}.
\end{cases}
\end{equation*}
In particular, we have the following.
\begin{cor}
Let $R$ be a Bézout domain, and suppose that $R$ is either:
\begin{enumerate}[(a)]
\item semilocal; or
\item finite-dimensional and of finite character.
\end{enumerate}
For every $\ast\in\mathrm{Star}(R)$, $\mathrm{Cl}^\ast(R)$ is an injective group (equivalently, an injective $\ins{Z}$-module).
\end{cor}
\begin{proof}
By Corollary \ref{cor:clgroup-bezout} and the previous discussion, $\mathrm{Cl}^\ast(R)\simeq\bigoplus\ins{R}/H_\alpha$, for a family $\{H_\alpha:\alpha\in A\}$ of additive subgroups of $\ins{R}$. Each $\ins{R}/H_\alpha$ is a divisible group, and thus so is their direct sum; however, a divisible group is injective, and thus so is $\mathrm{Cl}^\ast(R)$.
\end{proof}
We end with a result similar in spirit to Proposition \ref{prop:hloc-pruf-somma}.
\begin{prop}\label{prop:pruf-somma-invt}
Let $R$ be a Pr\"ufer domain and suppose that $R$ is either:
\begin{enumerate}[(a)]
\item semilocal; or
\item finite-dimensional and of finite character.
\end{enumerate}
Let $\ast\in\mathrm{Star}(R)$. If $I,J\in\mathrm{Inv}^\ast(R)$, then $I+J\in\mathrm{Inv}^\ast(R)$.
\end{prop}
\begin{proof}
Suppose first that $R$ is semilocal, and proceed by induction on $n:=|\mathrm{Max}(R)|$. If $n=1$, then $R$ is a valuation domain and $I+J$ is equal either to $I$ or to $J$, and the claim is proved.
Suppose the claim is true up to rings with $n-1$ maximal ideals, let $|\mathrm{Max}(R)|=n$ and consider the standard decomposition $\Theta$ of $R$. By Proposition \ref{prop:jaffard-invt}, $I+J\in\mathrm{Inv}^\ast(R)$ if and only if $(I+J)T\in\mathrm{Inv}^{\ast_T}(T)$ for every $T\in\Theta$; therefore, if $\Theta$ is not trivial, we can use the inductive hypothesis. Suppose $\Theta$ is trivial: then, $\mathrm{Jac}(R)$ contains nonzero prime ideals, and by Lemma \ref{lemma:prufer-jac} there is a nonzero prime ideal $Q\subseteq\mathrm{Jac}(R)$ such that $\mathrm{Jac}(R/Q)$ does not contain nonzero primes. By Proposition \ref{prop:star-semistar-clgroup}, $I/Q$ and $J/Q$ are $\sharp$-invertible $\sharp$-ideals of $R/Q$ (where $\sharp$ is the (semi)star operation induced by $\ast$), and in particular $I/Q$ and $J/Q$ are fractional ideals of $R/Q$.
By construction, $R/Q$ admits a nontrivial Jaffard family $\Lambda$: for every $U\in\Lambda$, $(I/Q)U$ and $(J/Q)U$ are $\sharp_U$-invertible $\sharp_U$-ideals, and thus by the inductive hypothesis so is $(I/Q)U+(J/Q)U=((I+J)/Q)U$. Hence $(I+J)/Q$ is a $\sharp$-invertible $\sharp$-ideal, and so $I+J$ is a $\ast$-invertible $\ast$-ideal, i.e., $I+J\in\mathrm{Inv}^\ast(R)$.
If now $R$ is locally finite and finite-dimensional, we see that if $\Theta$ is the standard decomposition of $R$ then every $T\in\Theta$ is semilocal. The ideal $I+J$ is $\ast$-invertible if and only if $(I+J)T$ is $\ast_T$-invertible for every $T\in\Theta$; however, since $IT$ and $JT$ are $\ast_T$-invertible $\ast_T$-ideals, the previous part of the proof shows that so is $IT+JT=(I+J)T$. Therefore, $I+J\in\mathrm{Inv}^\ast(R)$.
\end{proof}
\end{document} |
\begin{document}
\date{}
\title[Dynamics of epidemic models]{Dynamics of epidemic models with asymptomatic infection and seasonal succession}
\author{\sc Yilei Tang $^{\dag}$ \ Dongmei Xiao $^{\dag*}$
\ Weinian Zhang $^{\ddag}$ \ Di Zhu $^{\dag}$}
\thanks{$^*$ Corresponding author.}
\thanks{{\bf Funding}: The first author is partially supported by the National Natural
Science Foundation of China (No. 11431008) and the European Union's Horizon 2020 research and innovation
programme under the Marie Sklodowska-Curie grant agreement (No. 655212). The second author is supported by the National Natural
Science Foundation of China (No. 11431008 \& 11371248). The third author is supported by the National Natural
Science Foundation of China (No. 11521061 \& 11231001). }
\thanks{$^{\dag}$ School of Mathematical Science, Shanghai Jiao Tong University, Shanghai, 200240, P. R. China
([email protected] (D. Xiao), [email protected] (Y. Tang), [email protected] (D. Zhu)) }
\thanks{$^{\ddag}$ Yangtze Center of Mathematics and Department of Mathematics, Sichuan University, Chengdu, Sichuan 610064, P. R. China ([email protected] (W. Zhang))}
\keywords{Epidemic model, asymptomatic infection, seasonal succession, basic reproduction number, threshold dynamics}
\subjclass{Primary 92D25, 34C23; Secondary 34D23}
\maketitle
{\bf Abstract~~} In this paper, we consider a compartmental SIRS epidemic model with asymptomatic infection and seasonal succession,
which is a periodic discontinuous differential system.
The basic reproduction number $\mathcal{R}_0$ is defined and evaluated directly for this model, and the uniformly persistent of the disease
and threshold dynamics are obtained. Specially, global dynamics
of the model without seasonal force are studied. It is shown that
the model has only a disease-free equilibrium which is globally stable if $\mathcal{R}_0\le 1$,
and as $\mathcal{R}_0>1$ the disease-free equilibrium is unstable and the model has an endemic equilibrium, which is globally stable
if the recovering rates of asymptomatic infective and symptomatic infective are close. These theoretical results provide an intuitive basis for
understanding that the asymptomatic infective individuals and the disease seasonal transmission promote the evolution of epidemic,
which allow us to predict the outcomes of control strategies during the course of the epidemic.
\section{Introduction}
Since Kermack and McKendrick in \cite{Ker-McK} proposed the classical deterministic compartmental
model (called SIR model) to describe epidemic outbreaks and spread,
mathematical models have become important tools in analyzing the
spread and control of infectious diseases, see \cite{AleM2005, May, Brauer, SeasonalScience, Heth2000, Hsi2014, THRZ08, Tow2012, xiaoruan}
and references therein. The number of infected individuals used in these models is usually calculated via data in the hospitals. However,
some studies on influenza show that some individuals of the population who
are infected never develop symptoms, i.e. being asymptomatic infective. The asymptomatic infected individuals will not go to hospital but they can infect the susceptible by contact, then go to the recovered stage, see for instance
\cite{wujh, Long, Feng}. Hence, using the data from hospitals to mathematical models to assess the epidemic,
it seems that we will underestimate infection risks in epidemic.
On the other hand, seasonality is very common in ecological and human social systems (cf. \cite{xiao}). For example, variation patterns in climate are repeated every year,
birds migrate according to the variation of season, opening and closing of schools are almost periodic, and so on. These seasonal factors significantly influence
the survival of pathogens in the environment, host behavior, and abundance of vectors and non-human hosts.
A number of papers have suggested that seasonality plays
an important role in epidemic outbreaks and the evolution of disease transmissions, see
\cite{PeriodWu, periodC, SeasonalD, SeasonalScience, seasonalS, Smi1983, seasonalNature, Tow2012, zhang2007}. However, it is still
challenging to understand the mechanisms of seasonality and
their impacts on the dynamics of infectious diseases.
Motivated by the studies above on asymptomatic infectivity or seasonality, we develop a compartmental model with asymptomatic infectivity and seasonal factors in this paper.
This model is a periodic discontinuous differential system.
We try to establish the theoretical analysis on the periodic discontinuous differential systems and obtain the dynamics of the model. This will allow us to draw both qualitative and quantitative conclusions on effect of the asymptomatic infectivity and seasonality on the epidemic.
The rest of the paper is organized as follows.
In section 2, we formulate the SIRS model with asymptomatic infective and seasonal factors, then discuss the existence and regularity of non-negative solutions
for this model.
In section 3, we define the basic reproduction number $\mathcal{R}_0$ for the model, give the evaluation of $\mathcal{R}_0$
and investigate the threshold dynamics of the model (or the uniformly persistent of the disease). It is shown that the length of the season, the transmission rate
and the existence of asymptomatic infective affect the basic reproduction number $\mathcal{R}_0$. In section 4, we study the global dynamics of the model ignoring seasonal factor.
We prove that there is a unique disease-free equilibrium and the disease always dies out when $\mathcal{R}_0\le 1$;
while when $\mathcal{R}_0> 1$ there is an endemic equilibrium which is global stable if the recovering rates of asymptomatic infective and symptomatic infective are close.
A brief discussion is given in the last section.
\section{Model formulation}
In this section, we first extend the classic SIRS model to a model which incorporates with
the asymptomatic infective and seasonal features of epidemics, and then study the regularity of solutions of the model.
Because there are asymptomatic infectious and
symptomatic infectious individuals in the evolution of epidemic, the whole population is divided into four compartments: susceptible, asymptomatic infectious,
symptomatic infectious and recovered individuals.
More precisely, we let $S$, $I_a$, $I_s$ and $R$ denote the numbers of
individuals in the susceptible, asymptomatic,
symptomatic and recovered compartments, respectively,
and $N$ be the total population size. Let $\mathbb{R}_+=[0, +\infty)$, $\mathbb{Z}_+$ be the set of all nonnegative integers, and $\omega>0$ be given as the period of the disease transmissions. In addition to the assumptions of the classical SIRS model, we list the following assumptions on seasonal factors,
asymptomatic infectivity and symptomatic infectivity.
\begin{itemize}
\item[(A1)] Due to the opening and closing of schools or migration of birds, each period of the disease transmission is simply divided into two seasons with high and low
transmission rates, which are called high season $J_2$ and low season $J_1$, respectively. The seasonality is described by a piecewise constant function with high transmission rate $\beta_2$ in $J_2$ and low transmission rate $\beta_1$ in $J_1$, respectively, where $J_1=[m\omega, m\omega+(1-\theta)\omega )$ and $J_2=[ m\omega+(1-\theta)\omega, (m+1)\omega)$.
Here $m\in \mathbb{Z}_+$, and $0<\theta<1$ which measures the fraction of the high season to the whole infection cycle.
\item[(A2)] There are two classes of infective individuals: asymptomatic infective ones and symptomatic infective ones. Both of them are able to infect susceptible individuals by contact.
A fraction $\mu$ of infective individuals proceeds to the
asymptomatic infective compartment and the remainder (i.e. a fraction $1-\mu$ of infective individuals) goes directly to the symptomatic infective compartment. And the asymptomatic infective and symptomatic infective individuals recover from disease at rate $r_a$ and $r_s$, respectively.
\item[(A3)] The symptomatic infective individuals will get treatment in hospital or be quarantined. Hence, the symptomatic infective individuals reduce their contact
rate by a fraction $\alpha$.
\end{itemize}
Based on these assumptions, the classical SIRS model can be extended to the following system
\begin{equation}\label{model}
\begin{cases}
\dot{S}(t)=dN(t)-dS(t)-\beta(t) S(t)(I_{a}(t)+\alpha I_{s}(t))+\sigma R(t), \\
\dot{I_{a}}(t)=\mu\beta(t) S(t)(I_{a}(t)+\alpha I_{s}(t))-(d+r_{a})I_{a}(t), \\
\dot{I_{s}}(t)=(1-\mu)\beta(t) S(t)(I_{a}(t)+\alpha I_{s}(t))-(d+r_{s})I_{s}(t), \\
\dot{R}(t)=r_{a}I_{a}(t)+r_{s}I_{s}(t)-(d+\sigma)R(t),
\end{cases}
\end{equation}
where $N(t)=S(t)+I_a(t)+I_s(t)+R(t)$, all parameters $d$, $\alpha$, $\sigma$, $\mu$, $r_a$ and $r_s$ are nonnegative, and
$$
\beta(t)=\left\{ \begin{array}{ll}
\beta_1, \ & t\in J_1=[m\omega, m\omega+(1-\theta)\omega ),\\
\beta_2, \ & t\in J_2=[ m\omega+(1-\theta)\omega, (m+1)\omega ).\\
\end{array}
\right.
$$
Parameters $\beta_2$ and $\beta_1$ are the rates of contact transmission of the disease in high season and low season respectively for which $\beta_2\ge \beta_1\ge 0$.
Besides, $d$ is both birth rate and death rate, $\alpha$, $0\le\alpha\le 1$, is the fraction of the symptomatic infective individuals reducing their contact
rate with susceptible, the fraction of infective individuals becoming asymptomatic infective $\mu$ satisfies $0\le\mu\le 1$,
parameter $\sigma$ is the rate of recovered population loss of the immunity and reentering the susceptible group,
and $r_a$ and $r_s$ are the rates of asymptomatic infective and symptomatic infective recovering with immunity, respectively.
From the biological point of view, we focus on the solutions of system (\ref{model}) with initial conditions
\begin{equation}\label{initial}
S(0)=S_0 \ge 0, I_a(0)=I_{a0} \ge 0, I_s(0)=I_{s0}\ge 0, R(0)=R_0 \ge 0
\end{equation}
in the first octant $\mathbb{R}_+^4$.
Note that
\[
\dot{ N}(t)=\dot{S}(t) + \dot{ I}_a(t) + \dot{I}_s(t) + \dot{ R}(t) \equiv 0, \ t\in J_1 \ \rm{or}\ t\in J_2.
\]
Hence, $N(t)=S_0+I_{a0}+I_{s0}+R_0$, which is a constant for almost all $t\in \mathbb{R}_+$.
Since the total population does not change by the assumption, we let
\[
S(t) + I_a(t) +I_s(t) + R(t) \equiv N
\]
for almost all $t\in \mathbb{R}_+$. Therefore, system \eqref{model} with the initial condition \eqref{initial} in $\mathbb{R}_+^4$ can be reduced to
\begin{equation}\label{SIRS3}
\begin{cases}
\dot{S}=(d+\sigma)(N-S)-\beta (t) S(I_a+\alpha I_s)-\sigma (I_a+I_s), \\
\dot{I_a}=\mu\beta (t) S(I_a+\alpha I_s)-(d+r_a)I_a, \\
\dot{I_s}=(1-\mu)\beta (t) S(I_a+\alpha I_s)-(d+r_s)I_s, \\
S(0)=S_0, I_a(0)=I_{a0}, I_s(0)=I_{s0},\\
P_0=(S_0, I_{a0}, I_{s0})\in \mathcal{D}_0,
\end{cases}
\end{equation}
where $\mathcal{D}_0\subset \mathbb{R}_+^3$ and
\begin{equation}\label{D}
\mathcal{D}_0:=\{(S, I_a, I_s)|\; S\ge 0, I_a\ge 0, I_s\ge 0, ~0\le S+ I_a+I_s\le N \}.
\end{equation}
Clearly, the right hand side of system \eqref{SIRS3} is not continuous on the
domain $\mathbb{R}_+\times\mathcal{D}_0$. We claim that the solution of system \eqref{SIRS3} exists globally on the interval $\mathbb{R}_+=[0, +\infty)$ and is unique.
\begin{theorem}\label{existenceUni}
For any $P_0\in \mathcal{D}_0$, system \eqref{SIRS3} has a unique global solution $\varphi(t, P_0)=(S(t,P_0), I_a(t,P_0), I_s(t,P_0))$
in $\mathbb{R}_+$, which is continuous with respect to $t$ and all parameters of this system.
Moreover, $\varphi(t, P_0)\subseteq \mathcal{D}_0$ for any $t\in \mathbb{R}_+$ and the solution $\varphi(t, P_0)$ is differentiable with respect to $P_0$, where
some derivatives are one-sided if $P_0$ is on the domain boundary.
\end{theorem}
\begin{proof}
Assume that $\varphi(t,P_0)$ is a solution of system \eqref{SIRS3}. We first
consider the two systems
\begin{equation}\label{SIR-dim32}
\begin{cases}
\dot{S}=(d+\sigma)(N-S)-\beta_i S(I_a+\alpha I_s)-\sigma (I_a+I_s),\\
\dot{I}_a=\mu\beta_i S(I_a+\alpha I_s)-(d+r_a)I_a,\\
\dot{I}_s=(1-\mu)\beta_i S(I_a+\alpha I_s)-(d+r_s)I_s,\\
S(t_*)=S_*,\; I_{a}(t_*)=I_{a*},\; I_{s}(t_*)=I_{s*},\\
P_*=(S_*, I_{a*},I_{s*})\in \mathbb{R}_+^3
\end{cases}
\end{equation}
in the domain $\mathbb{R}_+\times\mathbb{R}_+^3$, $i=1, 2,$ respectively.
It is clear that for each $i$ the solution of system \eqref{SIR-dim32} exists and is unique on its maximal interval of existence, and the solution of system \eqref{SIR-dim32} is differentiable with respect to the initial value $P_*$ by the fundamental theory of ordinary differential equations.
Note that
the bounded closed set $\mathcal{D}_0$ in $\mathbb{R}_+^3$ is a positive compact invariant set of system \eqref{SIR-dim32} since the vector field of system \eqref{SIR-dim32} on
the boundary $\partial\mathcal{D}_0$ of $\mathcal{D}_0$ is directed toward to the interior of $\mathcal{D}_0$ or lies on $\partial\mathcal{D}_0$, where
\begin{eqnarray*}
\begin{split}
\partial\mathcal{D}_0=&\{(S,I_a,I_s):\ (S,I_a,I_s)\in \mathbb{R}_+^3, S=0, \ 0\le I_a+I_s\le N\}\\
&\cup\{(S,I_a,I_s):\ (S,I_a,I_s)\in \mathbb{R}_+^3, I_s =0,
\ 0\le S+I_a\le N\} \\
& \cup\{(S,I_a,I_s): \ (S,I_a,I_s)\in \mathbb{R}_+^3, I_a=0,\ 0\le S+I_s\le N\}\\
&\cup \{(S,I_a,I_s): \ (S,I_a,I_s)\in \mathbb{R}_+^3, S+I_s+I_a=N\}.
\end{split}
\end{eqnarray*}
Therefore, the solution of system \eqref{SIR-dim32} exists globally for any $P_*\in \mathcal{D}_0\subset\mathbb{R}_+^3$, and these solutions are in $\mathcal{D}_0$
for all $t> 0$.
Let $\phi_i(t,t_*, P_*)$ for $i=1, 2$
be the solution semiflow of the following system
\begin{equation}\label{SIRS1}
\begin{cases}
\dot{S}=(d+\sigma)(N-S)-\beta_i S(I_a+\alpha I_s)-\sigma (I_a+I_s),\\
\dot{I}_a=\mu\beta_i S(I_a+\alpha I_s)-(d+r_a)I_a,\\
\dot{I}_s=(1-\mu)\beta_i S(I_a+\alpha I_s)-(d+r_s)I_s,\\
\phi_i(t_*,t_*, P_*)=P_*, \ P_*\in \mathcal{D}_0,
\end{cases}
\end{equation}
respectively, that is, $\phi_i(t,t_*, P_*) = (S(t,t_*, P_*), I_a(t,t_*, P_*), I_s(t,t_*, P_*))$ for $t\ge t_*$ is the
solution of system \eqref{SIRS1} with the initial condition $\phi_i(t_*,t_*, P_*)=(S_*, I_{a*},I_{s*})\in \mathcal{D}_0$, respectively.
It follows that the solution
$\varphi(t,P_0)$ for $t\ge 0$ of system \eqref{SIRS3} can be determined uniquely by induction.
For simplicity, we let $s_m=(m-1)\omega$ and $t_m=s_m+(1-\theta)\omega$ for $m\in \mathbb{Z}_+$.
Hence,
$$
[0,\infty )=\bigcup _{m=1}^\infty [s_m, s_{m+1}]
=\bigcup _{m=1}^\infty
([s_{m}, t_m]\cup [t_m, s_{m+1}]),
$$
and $\varphi(t,P_0)$ can be written as follows.
\begin{equation}\label{solution}
\varphi(t,P_0)=\left\{
\begin{array}{ll}
\phi _1(t,s_1, P_0) &\textrm{when}\;t\in [s_1, t_1],
\\[1ex]
\phi _2(t, t_1, \phi_1(t_1, s_1, P_0)) &\textrm{when}\;\;t\in [t_1, s_2], \\[1ex]
...
\\[1ex]
\phi _1(t, s_m, u_m) & \textrm{when}\;\;t\in [s_m , t_m], \\[1ex]
\phi _2(t, t_m, v_m) & \textrm{when}\;\;t\in [t_m, s_{m+1} ],
\end{array}
\right.
\end{equation}
where $u_m$ and $v_m$ are determined by letting $u_1=P_0$, $v_1=\phi_1(t_1, s_1, u_1)$ and
$$
u_m=\phi _2(s_m, t_{m-1}, v_{m-1}),\; v_m=\phi _1(t_m, s_m, u_m) \;\;\textrm{for}\;\;m\geq 2.
$$
This implies that the solution $\varphi(t,P_0)$ of system \eqref{SIRS3} exits globally in $\mathbb{R}_+$ and is unique for any $P_0\in \mathcal{D}_0$,
and it is continuous with respect to $t$ and all parameters.
By the expression \eqref{solution},
it is easy to see that the solution $\varphi(t,P_0)$ lies in $\mathcal{D}_0$ for all $t\ge 0$ and $\varphi(t,P_0)$ is differentiable with respect to $P_0$.
The proof is completed.
\end{proof}
Theorem \ref{existenceUni} tells us that system \eqref{SIRS3} is $\omega$-periodic with respect to $t$ in $\mathbb{R}_+\times \mathcal{D}_0$, and it suffices to investigate the dynamics of its associated period map $\mathcal{P}$ on $\mathcal{D}_0$ for the dynamics of system \eqref{SIRS3}, where
\begin{equation}\label{poincaremap}
\begin{split}
\mathcal{P}:& \ \mathcal{D}_0 \to \mathcal{D}_0,\\
\mathcal{P}(P_0)&=\varphi(\omega,P_0)=\phi _2(\omega, (1-\theta)\omega, \phi_1((1-\theta)\omega, 0, P_0)),
\end{split}
\end{equation}
which is continuous in $\mathcal{D}_0$.
\section{ Basic reproduction number and threshold dynamics}
In epidemiology, the basic reproduction number (or basic reproduction ratio) $\mathcal{R}_0$ is an important quantity,
defined as the average number of secondary infections produced when an infected individual is introduced into a host population where everyone is susceptible. It is often considered as the threshold quantity that determines whether
an infection can invade a new host population and persist. Detailedly speaking, if $\mathcal{R}_0<1$, the disease dies out
and the disease cannot invade the population; but if $\mathcal{R}_0>1$, then the disease is established in the population. There have been some successful approaches for
the calculations of basic reproduction number for different epidemic models. For example, Diekmann {\it et al} in \cite{Die1990}
and van den Driessche and Watmough in \cite{Van2002} presented general approaches of $\mathcal{R}_0$ for autonomous continuous epidemic models. And for periodic continuous epidemic models,
Wang and Zhao in \cite{Wang2008}
defined the basic reproduction number.
Under some assumptions on the discontinuous states function,
Guo, Huang and Zou \cite{Guo2012} determine the basic reproduction number for an SIR epidemic model with discontinuous treatment strategies.
To our knowledge, there is no theoretic approach to calculate the basic reproduction number for periodic discontinuous epidemic models such as system \eqref{SIRS3}.
In this section, we use the idea and some notations given in \cite{Wang2008} to define and calculate the basic reproduction numbers for system \eqref{SIRS3},
and discuss the uniformly persistent of the disease and threshold dynamics.
We define ${\bf X}$ to be the set of all disease free states of system \eqref{SIRS3}, that is
$${\bf X}=\{(S,I_a,I_s):\ 0\le S\le N, I_a=I_s=0\}.$$
Clearly, the disease free subspace ${\bf X}$ is positive invariant for system \eqref{SIRS3}.
It can be checked that the period map $\mathcal{P}(P_0)$ in ${\bf X}$ has a unique fixed point at $(N,0,0)$, which is a unique disease-free equilibrium $(N,0,0)$
of system \eqref{SIRS3}, denoted by $E_0$. We now consider a population near the disease-free equilibrium $E_0$.
For simplicity, we let $\mathbf{x}=(S,I_a,I_s)^T$, and for $i=1,2$ set
\begin{equation*}\label{FV}
\begin{array}{ll}
\ \mathbf{F_i}&=\left(
\begin{array}{rrr}
0 & 0 & 0
\\
0 & \mu\beta_iN & \alpha\mu\beta_iN
\\
0 & (1-\mu)\beta_iN & \alpha(1-\mu)\beta_iN
\end{array}
\right):=\left(
\begin{array}{rr}
0 & {\bf 0 }
\\
{\bf 0} & F_i
\end{array}
\right),\\ \mathbf{V_i}&=\left(
\begin{array}{rrr}
d+\sigma & \beta_iN+\sigma & \alpha\beta_iN+\sigma
\\
0 & d+r_a & 0
\\
0 & 0 & d+r_s
\end{array}
\right):=\left(
\begin{array}{rr}
d+\sigma & {\bf b_i }
\\
{\bf 0} & V
\end{array}
\right).
\end{array}
\end{equation*}
Then the linearized system of \eqref{SIRS3} at $E_0$ can be rewritten as
\begin{equation}\label{real_linear}
\frac{d\mathbf{x}}{dt}=(\mathbf{F}(t)-\mathbf{V}(t))\mathbf{x},
\end{equation}
where $\mathbf{F}(t)=\chi_{J_1}(t)\mathbf{F}_{1}\ +\chi_{J_2}(t)\mathbf{F}_{2}\ $, $\mathbf{V}(t)=\chi_{J_1}(t)\mathbf{V}_{1}+\chi_{J_2}(t)\mathbf{V}_{2}$, and
$$
\chi_{J_i}(t)=\left\{\begin{array}{ll}
1 & \ {\textrm as }\ t\in J_i,
\\
0 & \ {\textrm as }\ t\notin J_i.
\end{array}
\right.
$$
System \eqref{real_linear} is a piecewise continuous periodic linear system with period $\omega$ in $t\in \mathbb{R}_+$.
In order to determine the fate of a small number of infective individuals introduced into a disease free population,
we first extend system \eqref{real_linear} from $t\in \mathbb{R}_+$ to $t\in \mathbb{R}$, and introduce some new notations.
When $t\in \cup_{m=-\infty}^{+\infty}(J_1\cup J_2)=(-\infty, +\infty),$ we set $\mathbb{I}(t)=(I_a(t),I_s(t))^T$, and $$
\mathbb{F}(t)=
\chi_{J_1}(t){F}_{1}\ +\chi_{J_2}(t){F}_{2}=\left(
\begin{array}{rr}
\mu N\beta(t) & \alpha\mu N\beta(t)
\\
(1-\mu) N\beta(t) & \alpha(1-\mu) N\beta(t)
\end{array}
\right),
$$
where
\[ \beta(t)=\left\{ \begin{array}{ll}
\beta_1, \ & t\in J_1=[m\omega, m\omega+(1-\theta)\omega ),\\
\beta_2, \ & t\in J_2=[ m\omega+(1-\theta)\omega, (m+1)\omega), \ m\in \mathbb{Z}.\\
\end{array}
\right.\]
Clearly,
$\mathbb{F}(t)$ is a $2\times 2$ piecewise continuous periodic matrix with period $\omega$ in $\mathbb{R}$, and it is non-negative. And
$$
-V=\left(
\begin{array}{rr}
-(d+r_a) & 0
\\
0 & -(d+r_s)
\end{array}
\right),
$$
which is cooperative in the sense that the off-diagonal
elements of $-V$ are non-negative.
Let $Y(t,s)$, $t\ge s$, be the evolution operator of the linear system
\begin{equation}\label{vv}
\frac{d\mathbb{I}(t)}{dt}=-V\mathbb{I}(t).
\end{equation}
Since $V$ is a constant matrix, for each $s\in \mathbb{R}$ the matrix $Y(t,s)$ satisfies
\begin{equation}\label{v2eq}
\frac{d}{dt}Y(t,s)=-V Y(t,s), \ t\ge s, \ Y(s,s)=E^2,
\end{equation}
where $E^2$ is a $2\times 2$ identity matrix, and $Y(t,s)=e^{-V(t-s)}$. Hence, the monodromy matrix $\Phi_{-V}(t)$ of system \eqref{vv} is $Y(t,0)$, that is,
$$
\Phi_{-V}(t)=e^{-Vt}=\left(
\begin{array}{ll}
e^{-(d+r_a)t} & 0
\\
0 & e^{-(d+r_s)t}
\end{array}
\right),
$$
where $d$, $r_a$ and $r_s$ are positive numbers.
We denote $\|\cdot\|_1$ the $1$-norm of vector and matrix.
Thus, there exist $K>0$ and $\kappa >0$ such that
$$
\|Y(t,s)\|_1\le Ke^{-\kappa (t-s)}, \ \forall t\ge s, \ s\in \mathbb{R}.
$$
And from the boundedness of $\mathbb{F}(t)$, i.e. $\|\mathbb{F}(t)\|_1<K_1$, it follows that there exists a constant $K_1>0$ such that
\begin{equation}\label{Lineq}
\|Y(t,t-a)\mathbb{F}(t-a)\|_1\le K K_1e^{-\kappa a},\ \forall t\in \mathbb{R},\ a\in [0, +\infty).
\end{equation}
We now consider the distribution of infected individuals in the periodic environment. Assume that $\mathbb{I}(s)$ is the initial distribution of infected individuals in infectious compartments. Then $\mathbb{F}(s)\mathbb{I}(s)$
is the distribution of new infections produced by the infected individuals who were introduced at time $s$.
Given $t\ge s$, then $Y(t,s)\mathbb{F}(s)\mathbb{I}(s)$
is the distribution of
those infected individuals which were newly infected at time s and still remain in the infected
compartments at time t.
Thus, the integration of this distribution from $-\infty$ to $t$
$$
\int_{-\infty}^tY(t,s)\mathbb{F}(s)\mathbb{I}(s)ds
=\int_0^{\infty}Y(t,t-a)\mathbb{F}(t-a)\mathbb{I}(t-a)da
$$
gives the distribution of cumulative new infections at time t produced by all those
infected individuals introduced at times earlier than $t$.
Let $\mathbb{C}_{\omega}=\mathbb{C}(\mathbb{R},\mathbb{R}^2)$ be the ordered Banach space of $\omega$-periodic continuous functions from $\mathbb{R}$ to $\mathbb{R}^2$, which is equipped with the norm $\left \lVert \cdot \right \rVert _c$,
$$
\left \lVert \mathbb{I}(s) \right \rVert_c=\max _{s\in [0,\omega ]}\left \lVert\mathbb{I}(s)\right \rVert_1,
$$
and the generating positive cone
$$
\mathbb{C}^+_{\omega}=\{\mathbb{I}(s)\in \mathbb{C}_{\omega}:\ \mathbb{I}(s)\ge 0, \ s\in \mathbb{R}\}.
$$
Define a linear
operator $\mathcal{L}: \ \mathbb{C}_{\omega}\to \mathbb{C}_{\omega}$ by
\begin{equation}\label{operate}
(\mathcal{L}\mathbb{I})(t)=\int_{-\infty}^tY(t,s)\mathbb{F}(s)\mathbb{I}(s)ds=\int_0^{\infty}Y(t,t-a)\mathbb{F}(t-a)\mathbb{I}(t-a)da.
\end{equation}
It can be checked that the linear operator $\mathcal{L}$ is well defined.
\begin{lemma}\label{operatorC}
The operator $\mathcal{L}$ is positive, continuous and compact on $\mathbb{C}_{\omega}$.
\end{lemma}
\begin{proof}
Since $Y(t,s)=e^{-V(t-s)}$ and $\mathbb{F}(t)$ is a nonnegative bounded matrix,
we get that $\mathcal{L}(\mathbb{C}^+_{\omega})\subset \mathbb{C}^+_{\omega}$. This implies that the
linear operator $\mathcal{L}$ is positive.
We now prove the continuity of $\mathcal{L}$. For each $t\in \mathbb{R}$, we have
\begin{eqnarray*}
\begin{split}
\|\mathcal{L}\mathbb{I}(t)\|_{1}&=\left \|\int_0^{\infty}Y(t,t-a)\mathbb{F}(t-a)\mathbb{I}(t-a)da\right \|_{1}\\
&=\left \|\sum_{j=0}^\infty\int_{j\omega}^{(j+1)\omega}Y(t,t-a)\mathbb{F}(t-a)\mathbb{I}(t-a)da\right \|_{1}\\
&\leq \sum_{j=0}^\infty\int_{j\omega}^{(j+1)\omega}\|Y(t,t-a)\mathbb{F}(t-a)\mathbb{I}(t-a)\|_{1}da\\
&\leq \sum_{j=0}^\infty\int_{j\omega}^{(j+1)
\omega}KK_1e^{-\kappa a}\|\mathbb{I}(t-a)\|_1da\\
&\leq \omega K K_1\sum_{j=0}^\infty e^{-\kappa\omega j}\cdot\|\mathbb{I}\|_c
\end{split}
\end{eqnarray*}
by \eqref{Lineq}. Hence,
\begin{equation*}
\|\mathcal{L}\mathbb{I}(t)\|_{c}=\max_{t\in[0,\omega]}\|\mathcal{L}\mathbb{I}(t)\|_{1}\leq \omega K K_1\sum_{j=0}^\infty e^{-\kappa\omega j}\cdot\|\mathbb{I}\|_c,
\end{equation*}
which implies that $\mathcal{L}$ is continuous and uniformly bounded since $\sum_{j=0}^\infty e^{-\kappa\omega j}$ is convergent.
In the following we prove the compactness of $\mathcal{L}$. We first claim that $\mathcal{L}\mathbb{I}(t)$ is equicontinuous.
Consider $\mathbb{I}(t)\in \mathbb{C}_{\omega}$ and $\forall t_1,t_2\in [0,\omega]$ with $t_1<t_2$. Then
\begin{eqnarray*}
\begin{split}
&\|\mathcal{L}\mathbb{I}(t_2)-\mathcal{L}\mathbb{I}(t_1)\|_1=\left\|\int_{-\infty}^{t_2}Y(t_2,s)\mathbb{F}(s)\mathbb{I}(s)ds-
\int_{-\infty}^{t_1}Y(t_1,s)\mathbb{F}(s)\mathbb{I}(s)ds\right \|_1\\
&=\left \|\int_{-\infty}^{t_2}(Y(t_2,s)-Y(t_1,s))\mathbb{F}(s)\mathbb{I}(s)ds
+\int_{t_1}^{t_2}Y(t_1,s)\mathbb{F}(s)\mathbb{I}(s)ds\right \|_1\\
&\leq \int_{-\infty}^{t_2}\|Y(t_2,s)-Y(t_1,s)\|_1\|\mathbb{F}(s)\|_1\|\mathbb{I}(s)\|_1ds+\int_{t_1}^{t_2}\|Y(t_1,s)\|_1\|\mathbb{F}(s)\|_1\|\mathbb{I}(s)\|_1ds\\
&\leq \int_{-\infty}^{\omega}\|Y(t_2,s)-Y(t_1,s)\|_1\|\mathbb{F}(s)\|_1\|\mathbb{I}(s)\|_1ds+\int_{t_1}^{t_2}Ke^{-\kappa (t_1-s)}
\|\mathbb{F}(s)\|_1\|\mathbb{I}(s)\|_1ds\\
&\leq\|e^{-Vt_2}-e^{-Vt_1}\|_1\sum_{i=-\infty}^0\int_{i\omega}^{(i+1)\omega}K_1\|e^{Vs}\|_1\|\mathbb{I}(s)\|_1ds+\int_{t_1}^{t_2}Ke^{-\kappa (t_1-s)}
K_1\|\mathbb{I}(s)\|_1ds\\
&\leq\sum_{i=-\infty}^0e^{\tilde{d}_1(i+1)\omega}\cdot K_1\|\mathbb{I}\|_c\|e^{-Vt_2}-e^{-Vt_1}\|_1+KK_1e^{\kappa \omega }\|\mathbb{I}\|_c(t_2-t_1),
\end{split}
\end{eqnarray*}
where $\tilde{d}_1=\max\{d+r_a, d+r_s\}$.
Notice that $\sum_{i=-\infty}^0e^{N(i+1)}$ is convergent and $e^{-Vt}$ is continuous on $[0,\omega]$.
Thus, if $\{\mathbb{I}(t)\}$ is bounded, for $\forall \epsilon>0$ there exists a $\delta>0$ such that $\|\mathcal{L}\mathbb{I}(t_2)-\mathcal{L}\mathbb{I}(t_1)\|_c<\epsilon$ as $|t_2-t_1|<\delta$. This implies that $\{(\mathcal{L}\mathbb{I})(t)\}$ are equicontinuous. According to Ascoli-Arzela theorem, we know that $\mathcal{L}$ is compact. The proof of this lemma is completed.
\end{proof}
$\mathcal{L}$ is called the next infection operator, and the spectral radius of $\mathcal{L}$ can be defined as the
basic reproduction number (or ratio)
\begin{equation}\label{R_0}
\mathcal{R}_0:= \rho(\mathcal{L})
\end{equation}
of system \eqref{SIRS3}.
Following \cite{Wang2008}, we consider how to calculate $\mathcal{R}_0$ and whether the basic reproduction ratio (or number)
$\mathcal{R}_0$ characterizes the threshold of disease invasion, i.e., the disease-free periodic
solution $(N,0,0)$ of system \eqref{SIRS3} is local asymptotically stable if $\mathcal{R}_0 < 1$ and unstable if $\mathcal{R}_0 > 1$.
It is clear that the disease-free periodic
solution $(N,0,0)$ of system \eqref{SIRS3} is local asymptotically stable if all characteristic multipliers of periodic system \eqref{real_linear} are less than one, and
it is unstable if at least one of characteristic multipliers of periodic system \eqref{real_linear} is greater than one. By straightforward calculation,
we obtain that the characteristic multipliers of periodic system \eqref{real_linear} consist of $e^{-(d+\sigma)\omega}$ and the eigenvalues of the following
matrix
$$
\Phi_{F-V}(\omega)=e^{(F_2-V)\theta\omega}e^{(F_1-V)(1-\theta)\omega},
$$
where
\[
F_i-V=\left(
\begin{array}{rr}
\mu\beta_iN -(d+r_a) & \alpha\mu\beta_iN
\\
(1-\mu)\beta_iN & \alpha(1-\mu)\beta_iN -(d+r_s)
\end{array}
\right), \ \ i=1,2.
\]
Note that $e^{-(d+\sigma)\omega}<1$ because $d+\sigma>0$. Therefore, all characteristic multipliers of periodic system \eqref{real_linear} are less than one
if and only the largest eigenvalue of $\Phi_{F-V}(\omega)$, denoted by $\rho (\Phi_{F-V}(\omega))$, is less than one (i.e. $\rho (\Phi_{F-V}(\omega))<1$), and at least one of characteristic multipliers of periodic system \eqref{real_linear} is greater than one if and only if $\rho (\Phi_{F-V}(\omega))>1$, here $\rho (\Phi_{F-V}(\omega))$ is called
{\it the spectral radius} of matrix $\Phi_{F-V}(\omega)$.
On the other hand, it is easy to check that all assumptions (A2)-(A7) in \cite{Wang2008} are valid for system \eqref{real_linear} except the assumption (A1).
Using the notations in \cite{Wang2008}, we
define a matrix $V_{\varepsilon}=V-\varepsilon P$, here $P=\left( {\begin{array}{cc}
1 & 1 \\ 1 & 1 \end{array} } \right)$ and $\varepsilon$ is a very small positive number.
Thus, $-V_{\varepsilon}$ is cooperative and irreducible for each $t\in \mathbb R$. Let $Y_{\varepsilon} (t,s)$ be the evolution operator of
the linear system \eqref{v2eq} with $V$ replaced by $V_{\varepsilon}$. For some small $\varepsilon_0$, as $\varepsilon\in [0,\ \varepsilon_0)$
we can define the linear operator
${\mathcal L}_{\varepsilon}$ by replacing $Y (t,s)$ in \eqref{operate} with $Y_{\varepsilon} (t,s)$ such that the operator
${\mathcal L}_{\varepsilon}$ is positive, continuous and compact on $\mathbb{C}_{\omega}$. Let $\mathcal{R}_0^{\varepsilon}:= \rho(\mathcal{L}_{\varepsilon})$
for $\varepsilon\in [0,\ \varepsilon_0)$.
By proof of Theorem \ref{existenceUni}, we know that the solutions of the following system
\begin{equation}\label{linearEq2}
\frac{dx}{dt}=({\mathbb F}(t)-V_{\varepsilon})x
\end{equation}
are continuous with respect to all parameters. Thus,
\[
\lim_{\varepsilon\to 0}\Phi_{F-V_{\varepsilon}}(\omega)=\Phi_{F-V}(\omega),
\]
where $\Phi_{F-V_{\varepsilon}}(\omega)$ is the monodromy matrix of system \eqref{linearEq2}, and $\Phi_{F-V}(\omega)$ is the monodromy matrix of system \eqref{linearEq2}
as $\varepsilon=0$.
According to the continuity of the spectrum of matrices, we have
\[
\lim_{\varepsilon\to 0}\rho(\Phi_{F-V_{\varepsilon}}(\omega))=\rho(\Phi_{F-V}(\omega)).
\]
From Lemma \ref{operatorC}, we use the similar arguments in \cite{Wang2008} to the two linear operator
${\mathcal L}_{\varepsilon}$ and ${\mathcal L}$, and obtain
\[
\lim_{\varepsilon\to 0}\mathcal{R}_0^{\varepsilon}=\mathcal{R}_0.
\]
We now easily follow the arguments in \cite{Wang2008} to characterize $\mathcal{R}_0$. Let $W_{\lambda}(t, s), t\geq s$ be the fundamental
solution matrix of the following linear periodic system
\begin{equation*}\label{test}
\frac{dw}{dt}=\left(-V+\frac{\mathbb F(t)}{\lambda}\right)w,
\end{equation*}
where the parameter $\lambda\in (0, +\infty)$. Consider an equation of $\lambda$
\begin{equation}\label{need}
\rho(W_{\lambda}(\omega, 0))=1.
\end{equation}
Then $\mathcal{R}_0$ can be calculated as follows.
\begin{theorem}\label{R0Characterize}
\begin{itemize}
\item[(i)] If equation \eqref{need} has a solution $\lambda_0>0$, then $\lambda_0$ is an eigenvalue of $\mathcal{L}$, which implies that $\mathcal{R}_0>0$;
\item[(ii)] If $\mathcal{R}_0>0$, then $\lambda=\mathcal{R}_0$ is the only solution of equation \eqref{need};
\item[(iii)] $\mathcal{R}_0=0$ if and only if $\rho(W_{\lambda}(\omega, 0))<1$ for all positive $\lambda$.
\end{itemize}
\end{theorem}
Note that $\rho(W_1(\omega,0))=\rho(\Phi_{F-V}(\omega))$.
Using similar arguments in \cite{Wang2008}, we can prove that the basic reproduction ratio (or number)
$\mathcal{R}_0$ can characterize the threshold of disease invasion.
\begin{theorem}\label{threshold}
\begin{itemize}
\item[(i)]$\mathcal{R}_0>1$ if and only if $\rho (\Phi_{F-V}(\omega))>1$;
\item[(ii)] $\mathcal{R}_0=1$ if and only if $\rho (\Phi_{F-V}(\omega))=1$;
\item[(iii)] $\mathcal{R}_0<1$ if and only if $\rho (\Phi_{F-V}(\omega))<1$.
\end{itemize}
Hence, the disease-free periodic
solution $(N,0,0)$ of system \eqref{SIRS3} is local asymptotically stable if $\mathcal{R}_0<1$, and it is unstable if $\mathcal{R}_0>1$.
\end{theorem}
To save space, the proofs of the above theorems are omitted. From Theorem \ref{threshold}, we can see that $\mathcal{R}_0$ is a threshold parameter
for local stability of the disease-free periodic
solution $(N,0,0)$. We next show $\mathcal{R}_0$ is also a threshold parameter for dynamics of system \eqref{SIRS3} in $\mathcal{D}_0$.
\begin{theorem}\label{th-E02}
When $\mathcal{R}_{0}<1$, solutions
$(S(t),I_{a}(t),I_{s}(t))$ of system (\ref{SIRS3}) with initial points in $\mathcal{D}_0$
satisfies
$$
\lim_{t \to +\infty}(S(t),I_{a}(t),I_{s}(t))=(N, 0, 0).
$$
And the disease-free periodic solution $(N,0,0)$ of system (\ref{SIRS3}) is global asymptotically stable in $\mathcal{D}_0$.
\end{theorem}
\begin{proof}
In the invariant pyramid $\mathcal{D}_0$ as shown in \eqref{D}, we consider a subsystem by the last two equations of system (\ref{SIRS3})
\begin{equation}
\begin{cases}
\label{compare-smaller}
\dot{I_{a}}(t)&=\mu\beta(t)S(I_{a}+\alpha I_{s})-(d+r_{a})I_{a}
\\
&\le \mu\beta(t) N(I_{a}+\alpha I_{s})-(d+r_{a})I_{a},
\\
\dot{I_{s}}(t)&= (1-\mu)\beta(t)S(I_{a}+\alpha I_{s})-(d+r_{s})I_{s}
\\
&\le (1-\mu)\beta(t)N(I_{a}+\alpha I_{s})-(d+r_{s})I_{s}.
\end{cases}
\end{equation}
Thus, the auxiliary system of \eqref{compare-smaller} is
\begin{eqnarray}
\label{compare-bigger}
\begin{cases}
\dot{I_{a}}(t)=\mu\beta(t) N(I_{a}+\alpha I_{s})-(d+r_{a})I_{a},
\\
\dot{I_{s}}(t)=(1-\mu)\beta(t)N(I_{a}+\alpha I_{s})-(d+r_{s})I_{s},
\end{cases}
\end{eqnarray}
which is a periodic linear discontinuous system with period $\omega$. The periodic map associated with system \eqref{compare-bigger} is defined by
$\Phi_{F-V}(\omega)$, which is a linear continuous map.
When $\mathcal{R}_{0}<1$, we have $\rho(\Phi_{F-V}(\omega))<1$, which implies that $(0,0)$ is a global asymptotically stable solution of system \eqref{compare-bigger}.
Note that systems \eqref{compare-smaller} and \eqref{compare-bigger} are cooperative.
Using the similar arguments in \cite{Smi1995}, we can prove the comparison principle holds. Hence,
$$
\lim_{t \to +\infty}(I_{a}(t),I_{s}(t))=(0, 0).
$$
So, for arbitrarily small constant $\varepsilon>0$, there exists $T>0$
such that $I_{a}(t)+\alpha I_{s}(t)<\varepsilon$ as $t>T$.
From the first equation of system (\ref{SIRS3}),
\begin{equation*}
\begin{split}
\dot{S}&=dN-dS-\beta(t)S(I_{a}+\alpha I_{s})+\sigma(N-S-I_a-I_s)
\\
&> dN-dS-\beta_2S\varepsilon.
\end{split}
\label{SS1}
\end{equation*}
Therefore, $\liminf_{t\rightarrow+\infty}S(t)\geqslant\frac{dN}{d+\beta_2\varepsilon}$. Let $\varepsilon\to 0$, we have
$$
\liminf_{t\rightarrow+\infty}S(t)\geqslant N.
$$
On the other hand, $S(t)\leqslant N$ in $\mathcal{D}_0$, which admits
$$
\lim_{t\rightarrow+\infty}S(t)=N.
$$
In summary, we have $\lim_{t \to +\infty}(S(t),I_{a}(t),I_{s}(t))=(N, 0, 0)$.
Moreover, from Theorem \ref{threshold} we know that $(N,0,0)$ of system \eqref{SIRS3} is global asymptotically stable.
\end{proof}
In the following, we show that the disease is
uniformly persistent when $\mathcal{R}_0 > 1$.
\begin{theorem}
If $\mathcal{R}_{0}>1$, $0<\mu<1$ and $0<\alpha\beta_1$, then
there exists a constant $\delta_{0}>0$ such that every solution
$(S(t),I_{a}(t),I_{s}(t))$ of system (\ref{SIRS3}) with initial value in $\mathcal{D}_0$
satisfies
$$
\liminf_{t \to +\infty}I_{a}(t)\geqslant\delta_{0},\quad \liminf_{t \to +\infty}I_{s}(t)\geqslant\delta_{0}.
$$
\end{theorem}
\begin{proof}
Since system \eqref{SIRS3} is $\omega$-periodic with respect to $t$ in $\mathbb{R}_+\times \mathcal{D}_0$, it suffices to investigate the dynamics of its associated period map $\mathcal{P}$ defined by \eqref{poincaremap} on $\mathcal{D}_0$ for the dynamics of system \eqref{SIRS3}, where the map $\mathcal{P}$ is continuous. Clearly, $\mathcal{P}(\mathcal{D}_0)\subset \mathcal{D}_0$.
Define
$$
X_{0}=\{(S,I_a,I_s)\in\mathcal{D}_0: I_a>0,I_s>0\},\ \partial{X_{0}}=\mathcal{D}_0 \backslash X_{0}.
$$
Set
$$
M_{\partial}=\{P_0\in \partial X_{0} :\mathcal{P}^k(P_0)\in\partial X_{0}, \forall k\ge 0 \},
$$
which is a positive invariant set of $\mathcal{P}$ in $\partial X_{0}$. We claim
\begin{equation}
\label{M-partial}
M_{\partial}=\{(S,0,0):0\leqslant S\leqslant N\}.
\end{equation}
In fact, $\{(S,0,0):0\leqslant S\leqslant N\}\subset M_{\partial}$ by \eqref{poincaremap}.
On the other hand, for any
$P_0\in\partial{X_{0}}\setminus \{(S,0,0):0\leqslant S\leqslant N\}$, that is either
$I_{a0}=0, I_{s0}>0, S_0\ge0$ or $I_{a0}>0,I_{s0}=0, S_0\ge0$.
In the case $I_{a0}=0, I_{s0}>0, S_0>0$ (resp. $I_{a0}>0, I_{s0}=0, S_0>0$), we calculate by the last two equations
of system \eqref{SIRS3} and obtain that
\begin{eqnarray*}
I_{a}'(0)=\mu\alpha\beta(0) S(0)I_{s}(0)>0\ ({\rm resp.} \ ~I_{s}'(0)=(1-\mu)\beta(0) S(0)I_{a}(0)>0),
\end{eqnarray*}
if $0<\mu<1$ and $0<\alpha\beta_1$. This implies that $\mathcal{P}^{k_0}(P_0)\not\in\partial{X_{0}}\setminus \{(S,0,0):0\leqslant S\leqslant N\}$ for some $k_0\ge0$ since the subsystem by the last two equations
of system \eqref{SIRS3} is cooperative. If $S(0)=0, I_{a0}=0, I_{s0}>0$ (or $S(0)=0, I_{a0}>0,I_{s0}=0$), then
$S'(0)=(d+\sigma)N-\sigma I_s(0)>0$ (or $S'(0)=(d+\sigma)N-\sigma I_a(0)>0$),
which leads that $\mathcal{P}^{k_1}(P_0)\not\in\partial{X_{0}}\setminus \{(S,0,0):0\leqslant S\leqslant N\}$ for some $k_1\ge 0$.
Therefore, \eqref{M-partial} is proved and $M_{\partial}$ is the maximal compact invariant set of $\mathcal{P}$ in $\partial X_{0}$.
Note that $E_0(N, 0, 0)$ is the unique fixed point of $\mathcal{P}$ in $M_{\partial}$ and it is an attractor of $\mathcal{P}$ in $M_{\partial}$
by the first equation of \eqref{SIRS3}.
Since $\mathcal{R}_{0}>1$, the stable set $W^s(E_0)$ of $E_0$ satisfies that $W^s(E_0) \cap X_0 =\emptyset$.
Applying \cite[Theorem 1.3.1]{Zhao2003}, we obtain that $\mathcal{P}$ is uniformly persistence with respect to $(X_0, \partial X_0)$.
Moreover, from \cite[Theorem 3.1.1]{Zhao2003}, it can see that the conclusion of this theorem is true. The proof is completed.
\end{proof}
\section{Global dynamics of system \eqref{SIRS3} without seasonal force}
In this section, we study the effects of asymptomatic infection on dynamics of system \eqref{SIRS3} if there are not seasonal factors, that is,
$\beta_1=\beta_2=\beta$. Then system \eqref{SIRS3} becomes
\begin{equation}\label{SIR-dim3}
\begin{cases}
\dot{S}=(d+\sigma)(N-S)-\beta S(I_a+\alpha I_s)-\sigma (I_a+I_s),\\
\dot{I_a}=\mu\beta S(I_a+\alpha I_s)-(d+r_a)I_a,\\
\dot{I_s}=(1-\mu)\beta S(I_a+\alpha I_s)-(d+r_s)I_s
\end{cases}
\end{equation}
in the domain $\mathbb{R}_+^3$.
By the formula \eqref{R_0}, we let $\beta_1=\beta_2$ and obtain the basic reproduction number
$\mathcal{R}_0$ of system \eqref{SIR-dim3} as follows.
\begin{eqnarray}\label{cons-R0}
\mathcal{R}_0=\beta N \left( \frac{\mu}{d+r_{a}}+\frac{\alpha(1-\mu)}{d+r_{s}} \right),
\end{eqnarray}
which is consistent with the number calculated using the approach of basic reproduction number in \cite{Die1990} and \cite{Van2002}.
From the expression \eqref{cons-R0}, we can see that there is still the risks of infectious disease outbreaks due to the existence of asymptomatic infection
even if all symptomatic infective individuals has been quarantined, that is, $\alpha=0$. This provides an intuitive basis for
understanding that the asymptomatic infective individuals promote the evolution of epidemic.
In the following we study dynamics of system \eqref{SIR-dim3}.
By a straightforward calculation, we obtain the existence of equilibrium for system \eqref{SIR-dim3}.
\begin{lemma}\label{existen}
\label{L-equils}(Existence of equilibrium) System (\ref{SIR-dim3}) has the following equilibria in $\mathbb{R}_+^3$.
\begin{itemize}
\item[(i)]
If $\mathcal{R}_0\le 1$, then system (\ref{SIR-dim3}) has a unique equilibrium, which is the disease-free equilibrium $E_0(N,0,0)$.
\item[(ii)] If $\mathcal{R}_0>1$ and $0<\mu<1$, then system (\ref{SIR-dim3}) has two equilibria: the disease-free equilibrium $E_0(N,0,0)$ and the endemic equilibrium $E_1(S^*, I_a^*, I_s^*)$ in the interior of $\mathcal{D}_0$,
where
$S^* = \frac{N}{\mathcal{R}_0},
I_a^* = \frac{\mu(d+\sigma) (d+r_s)N}
{(d+r_a)(d+r_s)+\sigma(d+\mu r_s)+\sigma r_a(1-\mu)} (1-\frac{1}{\mathcal{R}_0}),
I_s^* =\frac{(1-\mu)(d+r_a)}{\mu(d+r_s)}I_a^*.$
\item[(iii)] If $\mathcal{R}_0>1$ and $\mu=0$, then system (\ref{SIR-dim3}) has two equilibria: the disease-free equilibrium $E_0(N,0,0)$ and the asymptomatic-free equilibrium $E_2 (S_2^*,0,I_{s2}^*)$, where
$S_2^*= \frac{N}{\mathcal{R}_0},
I_{s2}^*= \frac{d+\sigma}{d+\sigma+r_s}N(1-\frac{1}{\mathcal{R}_0}).$
\item[(iv)] If $\mathcal{R}_0>1$ and $\mu=1$, the system (\ref{SIR-dim3}) has two equilibria: the disease-free equilibrium $E_0(N,0,0)$ and the symptomatic-free equilibrium $E_3 (S_3^*,I_{a3}^*,0)$, where
$S_3^*= \frac{N}{\mathcal{R}_0},
I_{a3}^*= \frac{d+\sigma}{d+\sigma+r_a}N(1-\frac{1}{\mathcal{R}_0}).$
\end{itemize}
\end{lemma}
We now discuss the local stability and topological classification of these equilibria in $\mathbb{R}_+^3$, respectively. We first study the disease-free equilibrium $E_0(N, 0, 0)$
and have the following lemma.
\begin{lemma}
\label{localstablity}
The disease-free equilibrium $E_0(N, 0, 0)$ of system (\ref{SIR-dim3}) in $\mathbb{R}_+^3$ is asymptotically stable if $\mathcal{R}_0<1$;
$E_0(N, 0, 0)$ is a saddle-node with one dimensional center manifold and two dimensional stable manifold if
$\mathcal{R}_0=1$; and $E_0(N, 0, 0)$ is a saddle with two dimensional stable manifold and one dimensional unstable manifold if $\mathcal{R}_0>1$.
\end{lemma}
\begin{proof}
A routine computation shows that the characteristic
polynomial of system (\ref{SIR-dim3}) at $E_0$ is
\begin{eqnarray}\label{ChEq}
f_1(\lambda) = (\lambda+d+\sigma)(\lambda^{2}-a_1\lambda+a_0),
\end{eqnarray}
where $a_0=(d+r_{a})(d+r_{s})(1-\mathcal{R}_0),$
$$
a_1=(d+r_{a})(\beta N\frac{\mu}{d+r_{a}}-1)+(d+r_{s})(\alpha\beta N\frac{1-\mu}{d+r_{s}}-1) .
$$
It is clear that
$-(d+\sigma)<0$ is always one root of \eqref{ChEq}.
We divide three cases: $\mathcal{R}_0<1$, $\mathcal{R}_0=1$ and $\mathcal{R}_0>1$ to discuss the other roots of \eqref{ChEq}.
If $\mathcal{R}_0<1$, then $a_1<0$ and $a_0>0$ by $\beta N\frac{\mu}{d+r_{a}}<\mathcal{R}_0$ and $\beta N\frac{\alpha(1-\mu)}{d+r_{s}}<\mathcal{R}_0$.
Thus, three roots of \eqref{ChEq} have negative real parts, which leads to the local asymptotically stable of the disease-free equilibrium $E_0$.
If $\mathcal{R}_0=1$, then $a_0=0$ and $a_1<0$. Hence, the characteristic equation $f_1(\lambda) =0$ has three roots: $\lambda_1=-(d+\sigma)<0$, $\lambda_2=a_1<0$ and $\lambda_3=0$.
For calculating the associated eigenvectors $v_i$ of $\lambda_i$, $i=1,2,3$, we consider $J(E_0)$ with respect to $\mu$ in three cases: (i) $0<\mu<1$, (ii) $\mu=0$ and (iii) $\mu=1$, and we can obtain that $E_0$ is a saddle-node with one dimensional center manifold and two dimensional stable manifold by tedious calculations of normal form.
Summarized the above analysis, we complete proof of this lemma.
\end{proof}
From lemma \ref{existen} and lemma \ref{localstablity}, we can see that system \eqref{SIR-dim3} undergoes saddle-node bifurcation in a small neighborhood of $E_0(N,0,0)$
as $\mathcal{R}_0$ increases passing through $\mathcal{R}_0=1.$
About the endemic equilibria, we have the following local stability.
\begin{lemma}
\label{L-E1}
The endemic equilibrium $E_1(S^*, I_a^*, I_s^*)$ of system (\ref{SIR-dim3}) is asymptotically stable if $\mathcal{R}_0>1$ and $0<\mu<1$; the asymptomatic-free equilibrium $E_2 (S_2^*,0,I_{s2}^*)$ of system (\ref{SIR-dim3}) is asymptotically stable if $\mathcal{R}_0>1$ and $\mu=0$; and the symptomatic-free equilibrium $E_3 (S_3^*,I_{a3}^*,0)$ of system (\ref{SIR-dim3}) is asymptotically stable if $\mathcal{R}_0>1$ and $\mu=1$.
\end{lemma}
\begin{proof}
Either $\mu=0$ or $\mu=1$, it is easy to compute the eigenvalues of the Jacobian matrix of system (\ref{SIR-dim3}) at $E_2$ or $E_3$, respectively, and
find that all eigenvalues have the negative real parts. Hence, $E_2 (S_2^*,0,I_{s2}^*)$ or $E_3 (S_3^*,I_{a3}^*,0)$ is asymptotically stable if $\mathcal{R}_0>1$, respectively.
After here we only
prove that $E_1(S^*, I_a^*, I_s^*)$ is asymptotically stable if $\mathcal{R}_0>1$ and $0<\mu<1$.
To make the calculation easier, we use the variables change
\begin{equation*}\label{change1}
S= \frac{ (d+r_s)}{\mu\beta} \hat{S}, ~I_a= \frac{ (d+r_s)}{\beta} \hat{I}_a, ~~I_s= \frac{ (d+r_s)}{\beta}\hat{I}_s, ~dt= \frac{d\tau}{(d+r_s)},
\end{equation*}
which reduces system (\ref{SIR-dim3}) into the following system,
\begin{equation}\label{SIR-1}
\begin{cases}
\frac{dS}{d\tau}=N_1-d_1S-\sigma_1 I_a-\sigma_1I_s - S(I_a+\alpha I_s),\\
\frac{dI_a}{d\tau}=-r I_a + S(I_a+\alpha I_s),\\
\frac{dI_s}{d\tau}=-I_s +\mu_1 S(I_a+\alpha I_s),
\end{cases}
\end{equation}
where
\begin{eqnarray*}\label{Pchange1}
\begin{split}
N_1 &=N (d+\sigma) \mu \beta/(d+r_s)^2 , ~d_1=(d+\sigma)/(d+r_s),
\\
\sigma_1 &=\sigma\mu/(d+r_s), ~r=(d+r_a)/(d+r_s), ~\mu_1=(1-\mu)/\mu
\end{split}
\end{eqnarray*}
and for simplicity we denote $\hat{S}, \hat{I}_a, \hat{I}_s$ by $S, I_a, I_s$ respectively.
When $\mathcal{R}_0>1$, the disease-free equilibrium $E_0(N, 0, 0)$ and endemic equilibrium $E_1(S^*, I_a^*, I_s^*)$ of system (\ref{SIR-dim3}) are transformed into the disease-free equilibrium $\hat{E}_0(N_1/d_1, 0, 0)$ and endemic equilibrium $\hat{E}_1(\hat{S}^*, \hat{I}_a^*, \hat{I}_s^*)$ of system (\ref{SIR-1}) respectively, where
\begin{eqnarray*}
\begin{split}
\hat{S}^* =\frac{N_1/d_1}{\hat{R}_0}, ~\hat{I}_a^* = \frac{N_1}{\sigma_1+r \sigma_1 \mu_1+r}(1-\frac{1}{\hat{R}_0}),
~\hat{I}_s^* = \mu_1 r I_a^*.
\end{split}
\end{eqnarray*}
Notice that $\hat{R}_0:=\frac{N_1}{d_1}(\frac{1}{r}+\alpha\mu_1)>1$ if and only if $\mathcal{R}_0>1$.
The characteristic
equation of system (\ref{SIR-1}) at $\hat{E}_1$ is
\begin{eqnarray*}\label{Ch-E1}
f_2(\lambda) ={\rm det} (\lambda I-J(\hat{E}_1) ) = \lambda^3+ \xi_2 \lambda^2 + \xi_1 \lambda +\xi_0,
\end{eqnarray*}
where
\begin{eqnarray*}
\begin{split}
\xi_2 &=\{\sigma_1+r \sigma_1 \mu_1+r+r^2 \mu_1 \alpha \sigma_1+r^3 \mu_1^2 \alpha \sigma_1+r^3 \mu_1 \alpha+d_1 \sigma_1 \mu_1 r \alpha+d_1 \sigma_1 \mu_1^2 r^2 \alpha+N_1
\\
~~ & +d_1 \sigma_1+d_1 r \sigma_1 \mu_1+2 N_1 \mu_1 r \alpha+r^2 \mu_1^2 \alpha^2 N_1\}/\{(\sigma_1+r \sigma_1 \mu_1+r)(r \mu_1 \alpha+1)\},
\\
\xi_1 &= d_1 (1+r^2 \mu_1 \alpha)/(r \mu_1 \alpha+1) +(\sigma_1 \mu_1+1+r+\sigma_1) (r \mu_1 \alpha+1) \hat{I}_a^*,
\\
\xi_0 &=N_1\mu_1 r\alpha+N_1-rd_1=rd_1(\hat{R}_0-1).
\end{split}
\end{eqnarray*}
It can be seen that all coefficients $\xi_j$ of polynomial $f_2(\lambda)$ are positive if $\hat{R}_0>1$, where $j=0, 1, 2$.
Moreover, we claim that $\xi_2\xi_1-\xi_0>0$. In fact,
\begin{eqnarray*}
\begin{split}
\xi_2\xi_1-\xi_0 =c_0+c_1 \hat{I}_a^* +c_2 (\hat{I}_a^*)^2,
\end{split}
\end{eqnarray*}
where
\begin{eqnarray*}
\begin{split}
c_0= & ~ \frac{d_1(1+r^2\mu_1 \alpha) (r^2 \mu_1 \alpha+ d_1\mu_1 r\alpha+1+d_1)}{(r \mu_1 \alpha+1)^2},
\\
c_1= & ~ d_1 \mu_1^2 r \alpha \sigma_1+r^3 \mu_1 \alpha+r^2 \mu_1 \alpha \sigma_1+2 d_1 r^2 \mu_1 \alpha+d_1 \sigma_1 \mu_1 r \alpha+\sigma_1 \mu_1
\\
&~+d_1 \sigma_1 \mu_1+1+2 d_1+d_1 \sigma_1 +r(d_1-\sigma_1 \mu_1) + \mu_1 r \alpha(d_1-\sigma_1),
\\
c_2= & ~ (r \mu_1 \alpha+1)^2 (\sigma_1\mu_1+1+r+\sigma_1).
\end{split}
\end{eqnarray*}
It is easy to see that $c_0>0$ and $c_2>0$. Note that $ d_1-\sigma_1 \mu_1=\frac{d+\sigma \mu}{d+r_s}>0$ and $d_1-\sigma_1=\frac{d+\sigma (1- \mu)}{d+r_s}>0$ since $0<\mu<1$. This implies that $c_1>0$. Moreover, $\hat{I}_a^*>0$ yields that $\xi_2\xi_1-\xi_0>0$ and what we claimed is proved.
By the Routh-Hurwitz Criterion, we know that all eigenvalues of the characteristic
polynomial $f_2(\lambda)$ have negative real parts. Thus, endemic equilibrium $\hat{E}_1$ of system (\ref{SIR-1})
is asymptotically stable. This leads that endemic equilibrium $E_1(S^*, I_a^*, I_s^*)$ of system (\ref{SIR-dim3}) is also asymptotically stable.
\end{proof}
From lemma \ref{localstablity} and lemma \ref{L-E1}, we can see that $\mathcal{R}_0$ is the threshold quantity
of local dynamics of system \eqref{SIR-dim3} in $\mathbb{R}^3_+$.
By Theorem \ref{existenceUni}, we only need to consider system \eqref{SIR-dim3}
for its global dynamics in $\mathcal{D}_0$.
The following theorems will show that $\mathcal{R}_0$ is also the threshold quantity of global dynamics of system \eqref{SIR-dim3} in $\mathcal{D}_0$.
\begin{theorem}
\label{globalE0}
If $\mathcal{R}_0\le 1$, then the disease-free equilibrium $E_0(N, 0, 0)$ of system (\ref{SIR-dim3}) is global asymptotically stable in $\mathcal{D}_0$.
\end{theorem}
The proof of this theorem can be finished by
constructing a Liapunov function
\begin{equation*}
\label{Lia1}
L(S, I_a,I_s) =I_a(t) +\frac{d+r_a}{d+r_s}\alpha I_s(t)
\end{equation*}
in $\mathcal{D}_0$. For saving space, we bypass it.
\begin{theorem}
\label{globalE1}
If $\mathcal{R}_0> 1$ and $\mu=0$ (resp. $\mu=1$), then $E_2(S_2^*,0,I_{s2}^*)$
(resp. $E_3(S_3^*,I_{a3}^*,0)$) attracts all orbits of system (\ref{SIR-dim3}) in $\mathcal{D}_0$ except both $E_0(N, 0, 0)$
and a positive orbit $\gamma$ in its two dimensional stable manifold,
where
$$
\gamma=\{(S,I_a,I_s)\in \mathcal{D}_0:\ I_a=0,\ I_s=0, \ 0<S<N \} .
$$
\end{theorem}
\begin{proof}
We first prove the case that $\mathcal{R}_0> 1$ and $\mu=0$. When $\mu=0$, system (\ref{SIR-dim3}) becomes
\begin{equation}\label{mu0}
\begin{cases}
\dot{S}=(d+\sigma)(N-S)-\beta S(I_a+\alpha I_s)-\sigma (I_a+I_s),\\
\dot{I_a}=-(d+r_a)I_a,\\
\dot{I_s}=\beta S(I_a+\alpha I_s)-(d+r_s)I_s.
\end{cases}
\end{equation}
It is clear that $\lim_{t\to +\infty}I_a(t)=0$. Hence, the limit system of system \eqref{mu0} in $\mathcal{D}_0$ is
\begin{equation}\label{mu0limit}
\begin{cases}
\dot{S}=(d+\sigma)(N-S)-\alpha\beta S I_s-\sigma I_s,\\
\dot{I_s}=\alpha\beta S I_s-(d+r_s)I_s
\end{cases}
\end{equation}
in $\mathcal{D}_{1}=\{(S,I_s):\ 0\le S\le N,\ 0\le I_s\le N\}$, which has two equilibria: $(N,0)$ and $(S_2^*,I_{s2}^*)$.
Equilibrium $(N,0)$ is a saddle and $(S_2^*,I_{s2}^*)$ is locally asymptotically stable if $\mathcal{R}_0>1$.
In the following we prove that $(S_2^*,I_{s2}^*)$ attracts all orbits of system \eqref{mu0limit} in $\mathcal{D}_{1}$ except both $(N,0)$ and its one dimensional stable manifold.
Let $x=S+\frac{\sigma}{\alpha\beta}$ and $y=I_s$. Then system \eqref{mu0limit} becomes
\begin{equation}\label{mu0xy}
\begin{cases}
\dot{x}=(d+\sigma)(N+\frac{\sigma}{\alpha\beta})-(d+\sigma)x-\alpha\beta xy,\\
\dot{y}=\alpha\beta xy-(d+r_s+\sigma)y.
\end{cases}
\end{equation}
Hence, $(x_0,y_0)=(S_2^*+\frac{\sigma}{\alpha\beta},I_{s2}^*)$ is the unique positive equilibrium of system \eqref{mu0xy} if $\mathcal{R}_0>1$.
Consider the Liapunov function of system \eqref{mu0xy}
$$
V(x,y)=\frac{1}{2}(x-x_0)^2+x_0\left(y-y_0-y_0\ln\frac{y}{y_0}\right)
$$
in $\tilde{\mathcal{D}}_{1}=\{(x,y):\ \frac{\sigma}{\alpha\beta}\le x\le N+\frac{\sigma}{\alpha\beta},\ 0\le y\le N\}$.
It is clear that $V(x,y)\ge 0$ and $V(x,y)=0$ if and only if $x=x_0$ and $y=y_0$ in $\tilde{\mathcal{D}}_{1}$. And
$$
\frac{dV(x(t),y(t))}{dt}|_{\eqref{mu0xy}}=-(x-x_0)^2(\alpha\beta y+d+\sigma)\le 0
$$
in $\tilde{\mathcal{D}}_{1}$.
By LaSalle's Invariance Principle, we know that $(x_0,y_0)$ attracts all orbits of system \eqref{mu0xy}
in $\tilde{\mathcal{D}}_{1}$ except both equilibrium $(N+\frac{\sigma}{\alpha\beta},0)$ and its one dimensional stable manifold $\{(x,y):\ y=0, 0<x<N+\frac{\sigma}{\alpha\beta}\}$. This leads to the conclusion, $E_2(S_2^*,0,I_{s2}^*)$
attracts all orbits of system (\ref{SIR-dim3}) in $\mathcal{D}_0$ except both $E_0(N, 0, 0)$ and a positive orbit $\gamma $ if $\mathcal{R}_0> 1$ and $\mu=0$.
Using the similar arguments, we can prove that $E_3(S_3^*,I_{a3}^*,0)$ attracts all orbits of system (\ref{SIR-dim3}) in $\mathcal{D}_0$
except both $E_0(N, 0, 0)$ and a positive orbit $\gamma $ if $\mathcal{R}_0> 1$ and $\mu=1$.
\end{proof}
\begin{theorem}
\label{th-globE1}
If $\mathcal{R}_0> 1$, $0<\mu<1$ and $r_a=r_s$, then the endemic equilibrium $E_1(S^*, I_a^*, I_s^*)$
attracts all orbits of system (\ref{SIR-dim3}) in $\mathcal{D}_0$ except $E_0(N, 0, 0)$.
\end{theorem}
\begin{proof}
Let
$I=I_a+\alpha I_s, ~~~N_1=S+I_a+I_s$.
Then under the assumption $r_a=r_s=r$, system (\ref{SIR-dim3}) in $\mathcal{D}_0$ can be written as
\begin{equation}\label{SIN}
\begin{cases}
\dot{S}=(d+\sigma)N -\sigma N_1 -dS -\beta SI,\\
\dot{I}=\tilde{\mu}SI-(d+r)I,\\
\dot{N_1}=(d+\sigma)N -(d+r+\sigma) N_1+rS
\end{cases}
\end{equation}
in $\tilde{\mathcal{D}}_0:=\{(S, I, N_1)|\; S\ge 0, I\ge 0, ~N\ge N_1\ge 0 \}$,
where $\tilde{\mu}=(\mu+\alpha(1-\mu))\beta$.
Thus, equilibrium $E_1(S^*, I_a^*, I_s^*)$
of system (\ref{SIR-dim3}) becomes equilibrium $\tilde{E}_1(S^*, I^*, N_1^*)$ of system (\ref{SIN}) and $\tilde{E}_1$ is locally asymptotically stable,
where $I^*=I_a^*+\alpha I_s^*, N_1^*=S^* + I_a^* + I_s^*$.
Applying a typical approach of Liapunov functional,
we define
\begin{equation*}\label{gx}
g(x) = x- 1- \ln x,
\end{equation*}
and construct a Liapunov functional of system (\ref{SIN})
\begin{equation*}\label{V1}
V_1(S, I, N_1)=\frac{\nu_1}{2} (S-S^*)^2+\nu_2 I^*g(\frac{I}{I^*}) + \frac{\nu_3}{2} (N_1-N_1^*)^2,
\end{equation*}
where arbitrary constant $\nu_1>0$, $\nu_2=\nu_1\beta S^*/\tilde{\mu}$ and $\nu_3=\nu_1\sigma/r$.
Note that $g (x)\ge g (1) = 0$ for all $x > 0$ and the global minimum $g (x) = 0$ is attained if and only if $x = 1$.
Thus, $V_1(S, I, N_1)\ge 0$ and $V_1(S, I, N_1)=0$ if and only if $S=S^*$, $I=I^*$ and $N_1=N_1^*$ in
$\tilde{\mathcal{D}}_0$.
The derivative of $V_1$ along the trajectories of system \eqref{SIN} is
\begin{equation*}\label{dV1}
\begin{split}
\frac{dV_1(S,I, N_1)}{dt}=& -\nu_1 d {S^*}^2(x-1)^2 -\nu_3(d+r+\sigma) {N_1^*}^2(z-1)^2
\\
& - \nu_1\beta {S^*}^2I^*y(x-1)^2\le 0,
\end{split}
\end{equation*}
where
$x=\frac{S}{S^*}, ~~y=\frac{I}{I^*}, ~~ z=\frac{N_1}{N_1^*}$.
Note that the only compact invariant subset of the set $\{(S,I, N_1):\ \frac{dV_1(S,I, N_1)}{dt}=0\}$ is the singleton $\tilde{E}_1(S^*, I^*, N_1^*)$
in $\tilde{\mathcal{D}}_0$. Consequently, we can conclude that $E_1(S^*, I_a^*, I_s^*)$ is globally asymptotically stable
and attracts all orbits of system (\ref{SIR-dim3}) in $\mathcal{D}_0$ except $E_0(N, 0, 0)$.
\end{proof}
From Theorem \eqref{th-globE1} and the continuity of solutions with respect to parameters $r_a$ and $r_s$, we obtain
the following results.
\begin{theorem}
\label{th-globE1-2}
If $\mathcal{R}_0> 1$ and $0<\mu<1$, then the endemic equilibrium $E_1(S^*, I_a^*, I_s^*)$
is globally asymptotical stable in the interior of $\mathcal{D}_0$ for $ 0<|r_s-r_a|\ll 1$.
\end{theorem}
\section{Discussion}
In this model, we divide the period of the disease transmission into two seasons. In fact, it can be divided into $n$ seasons for any given $n\in\mathbb{Z}_+$.
Compared with continuous periodic systems, our piecewise continuous periodic model can provide a straightforward method to evaluate the basic reproduction
number $\mathcal{R}_0$, that is to calculate the spectral radius of matrix $\Phi_{F-V}(\omega)=e^{(F_2-V)\theta\omega}e^{(F_1-V)(1-\theta)\omega}$.
It is shown that the length of the season, the transmission rate
and the existence of asymptomatic infective affect the basic reproduction number $\mathcal{R}_0$, and there is still the risks of infectious disease outbreaks due to the existence of asymptomatic infection
even if all symptomatic infective individuals has been quarantined, that is, $\alpha=0$. This provides an intuitive basis for
understanding that the asymptomatic infective individuals and the disease seasonal transmission promote the evolution of epidemic.
And theoretical dynamics of the model allow us to predictions of outcomes of control strategies during the course of the epidemic.
\end{document} |
\begin{document}
\begin{center}
{\large{\textbf{Optimal designs for experiments for scalar-on-function linear models}} } \\[1ex]
D. Michaelides\footnote{Damianos Michaelides; [email protected]; School of Mathematical Sciences, University of Southampton, Southampton, SO17 1BJ, UK.}
, M. Adamou, D. C. Woods \& A. M. Overstall \\[1ex]
Southampton Statistical Sciences Research Institute \\
University of Southampton, United Kingdom
\end{center}
\normalsize
The aim of this work is to extend the usual optimal experimental design paradigm to experiments where the settings of one or more factors are functions. For these new experiments, a design consists of combinations of functions for each run of the experiment along with settings for non-functional variables. After briefly introducing the class of functional variables, basis function systems are described. Basis function expansion is applied to a functional linear model consisting of both functional and scalar factors, reducing the problem to an optimisation problem of a single design matrix.
\begin{flushleft}
Keywords: Design of Experiments; Functional Linear Model; Profile Factors; Basis functions
\end{flushleft}
\section{Introduction} \label{int}
In science and engineering, an increasing number of experiments involve the investigation of the relationship between a response and functional or profile variables, i.e. variables whose values can be varied as a function, usually of time, within a single run of an experiment. A common example of a profile variable is temperature, being varied monotonically or as a step function through the run. The statistical design problem then becomes choosing suitable functions that determine how each profile variable varies during each run. For ease of exposition, throughout this paper we shall assume time $t\in [0,\mathcal{T}]$ is the continuous single input to functional variables. The methods extend naturally to situations where there are functional variables with multiple inputs, e.g. spatio-temporal studies.
Statistical modelling with functional data is well established in the statistics literature, see \citet{ramsay1997} and \citet{ramsay2005} for an introduction. However, the design of experiments for such models has received much less attention, with two main approaches being proposed: response surface methods using dimension-reduction techniques (\citealp{georgakis2013}, \citealp{roche2015}, \citealp{roche2018} and \citealp{klebanov2016}) and optimal design for dynamic models, typically derived from differential equations (\citealp{balsa2007} and \citealp{ucinski2007}). We develop methodology related to the response surface approach to find optimal functions for profile variables assuming a scalar-on-function linear model. Our approach leverages the power of standard linear model optimal design methodology and it's flexibility allows designs to be obtained for various different scenarios, including common experiments in the pharmaceutical industry, and using different optimality criteria.
In Section \ref{flm}, with a view to model and design experiments involving profile factors, a functional linear model along with an optimal design strategy is developed. Models involving both profile and scalar factors are tackled and a basis function expansion is used for both the profile factors and functional parameters. Functional models turn out to be extension of the traditional linear model and the model developed is capable of incorporating an unlimited number of profile and scalar factors. In Section \ref{ex}, examples with only profile and both profile and scalar variables are presented, for which it is assumed that control of the profile variables is represented by step functions. In Section \ref{bs}, the set of possible functions for the profile variables is extended by the use of spline basis expansions and the performance of the designs is compared to the designs found under the step function basis. Section \ref{conc} provides a conclusion of the findings and future research goals.
\section{The scalar-on-function linear model and optimal design} \label{flm}
\subsection{The scalar-on-function linear model}
Let $y_i$ denote a scalar response observed at time $\mathcal{T}$, the end of the experiment, for the $i$th run of the experiment ($i=1,\ldots,n$). An additive scalar-on-function linear model of the following form is assumed for $y_i$:
\begin{equation}
\label{eq:fullmodel}
y_{i} = \beta_{0} + \sum_{j=1}^{f_{1}} \int_{0}^{\mathcal{T}} \beta_{j}(t) x_{ij}(t) \; dt + \sum_{k=f_{1}+1}^{f_{1}+f_{2}} \beta_{k} x_{ik} + \varepsilon_{i}\,,
\end{equation}
where $f_{1}$ is the number of functional variables involved in the experiment, $f_{2}$ is the number of scalar variables involved in the experiment, $\beta_{j}(t) : [0,\mathcal{T}] \rightarrow \mathbb{R} $ is an unknown function of time $ 0 \leq t \leq \mathcal{T}$, $x_{ij}(t) : [0,\mathcal{T}] \rightarrow [u,v] $ the function controlling the $j$th functional variable in the interval $[u,v]$, $\beta_{0}, \beta_{f_{1}+1}, ..., \beta_{f_{1}+f_{2}}$ are unknown constant parameters, $x_{ik} \in [u_{s},v_{s}] $ is the value for the $k$th scalar factor lying in the interval $[u_{s},v_{s}]$ and $\varepsilon_{i} \sim\mathcal{N} (0, \sigma^{2}) $, with errors from different experiment runs assumed independent. The scalars $u, v, u_{s}, v_{s}$ are constants representing the bounds for the functional and scalar variables, respectively.
Viewed via an increasingly fine discretisation of $t$, $t_1,\dots, t_m$ with $m\rightarrow\infty$, model~\eqref{eq:fullmodel} has essentially an infinite number of unknowns $\beta_j(t_k)$. Hence, as the experiment only returns a finite amount of data, the system is underdetermined and there will be an infinite number of solutions $\hat{\boldsymbol{\beta}}^{\rm{T}}(t) = (\hat{\beta}_0, \hat{\beta}_1(t), \ldots, \hat{\beta}_{f_1}(t), \hat{\beta}_{f_1+1}, \ldots, \hat{\beta}_{f_1+f_2})$ that each give a perfect fit to the data. Hence, inference from a given experiment requires some restriction on the function space. We implicitly define spaces of functions $\beta_{j}(t)\in\mathcal{B}_j[0,\mathcal{T}]$ and $x_{ij}(t)\in\mathcal{X}_j[0,\mathcal{T}]$ via basis function expansions, with $\mathcal{B}_j[0,\mathcal{T}]$ and $\mathcal{X}_j[0,\mathcal{T}]$ the function spaces of the functional parameters and profile variables respectively, from time 0 to time $\mathcal{T}$. For the functional parameters
\begin{eqnarray}
\beta_j(t) & = & \sum_{l=1}^{n_{\beta,j}} \theta_{jl}b_{jl}(t)\nonumber \\
& = & \boldsymbol{b}_j(t)^{\rm{T}}\boldsymbol{\theta}_j\,,\label{eq:betaex1}
\end{eqnarray}
where $\boldsymbol{b}_j(t)^{\rm{T}} = [b_{j1}(t),\ldots,b_{jn_{\beta,j}}(t)]$ are known basis functions with corresponding (unknown) coefficients $\boldsymbol{\theta}_j^{\rm{T}} = (\theta_{j1},\ldots,\theta_{jn_{\beta,j}})$.
Hence, we have reduced the problem of estimating an unknown function to the problem of estimating $n_{\beta,j}$ coefficients.
We can define the functions $x_{ij}(t)$ via a similar expansion,
\begin{equation}\label{eq:xex}
x_{ij}(t) = \sum_{l=1}^{n_{x,j}} \gamma_{ijl}c_l(t)\,,
\end{equation}
and for $\boldsymbol{x}_j(t)^{\rm{T}} = [x_{j1}(t),\ldots,x_{jn}(t)]$ we have that $\boldsymbol{x}_j(t) = \Gamma_j\boldsymbol{c}_j(t)$, where $\boldsymbol{c}_j(t)^{\rm{T}}=[c_{j1}(t),\ldots,c_{jn_{x,j}}(t)]$ are known basis functions, $\Gamma_j$ is a $n\times n_{x,j}$ coefficient matrix with $il$th entry $\gamma_{ijl}$. Where necessary, linear transformations of the basis expansions are used to ensure the desired image of the function $x_{ij}(t)$ is obtained.
A substitution of the basis expansions into model~\eqref{eq:fullmodel} gives a familiar model form for the response vector $\boldsymbol{y}^{\rm{T}} = (y_1,\ldots, y_n)$:
\begin{equation}
\label{eq:extendedmodel}
\begin{split}
\bm{y} & = \beta_{0}\bm{1}_{n} + \sum_{j=1}^{f_{1}} \int_{0}^{\mathcal{T}} \beta_{j}(t) \bm{x}_{j}(t) \; dt + \sum_{k=f_{1}+1}^{f_{1}+f_{2}} \beta_{k} \bm{x}_{k} + \boldsymbol{\varepsilon} \\
& = \beta_{0}\bm{1}_{n} + \sum_{j=1}^{f_{1}} \int_{0}^{\mathcal{T}} \bm{\Gamma}_{j} \bm{c}_{j}(t) \bm{b}_{j}(t)^{T} \bm{\theta}_{j} \; dt + \sum_{k=f_{1}+1}^{f_{1}+f_{2}} \beta_{k} \bm{x}_{k} + \boldsymbol{\varepsilon}\\
& = \beta_{0}\bm{1}_{n} + \bm{J}_{\gamma cb} \bm{\theta} + \bm{X\beta}+ \bm{\epsilon} \\
\end{split}
\end{equation}
where $\bm{1}_{n}$ is the $n$-vector with every entry equal to one, $\bm{\theta}^{\rm{T}} = (\boldsymbol{\theta}_1^{\rm{T}}, \ldots, \boldsymbol{\theta}_{f_1}^{\rm{T}})$, $\bm{X}$ the $n\times f_2$ model matrix for the scalar factors, $\bm{\beta}^{\rm{T}} = (\beta_{f_1+1}, \ldots, \beta_{f_1+f_2})$ and $\boldsymbol{\varepsilon}^{\rm{T}} = (\varepsilon_1,\ldots, \varepsilon_n)$. The $n \times \sum n_{\beta,j}$ matrix $\bm{J}_{\gamma cb}$ is formed by column binding the $f_1$ sub-matrices
\begin{equation}
\begin{split}
\label{eq:jgcb}
\bm{J}_{\gamma cb_{j}} & = \int_{0}^{\mathcal{T}} \bm{\Gamma}_{j} \; \bm{c}_{j}(t) \; \bm{b}_{j}(t)^{T} \; dt\,\quad j=1,\ldots,f_1 \\
& = \bm{\Gamma}_{j} \int_{0}^{\mathcal{T}} \bm{c}_{j}(t) \; \bm{b}_{j}(t)^{T} \; dt,
\end{split}
\end{equation}
with the integral of the matrix defined as the matrix of integrals, i.e. the integral of a vector $\bm{\alpha}(t)^{T} = [\alpha_{1}(t), . . . , \alpha_{n}(t)]$ is defined as
\begin{equation*}
\int_{0}^{\mathcal{T}} \bm{\alpha}(t) \; dt = \Bigg[ \int_{0}^{\mathcal{T}} \alpha_{1}(t) \; dt, ..., \int_{0}^{\mathcal{T}} \alpha_{n}(t) \; dt \Bigg]^{T}.
\end{equation*}
Hence model~\eqref{eq:extendedmodel} takes the form of the familiar linear model,
\begin{equation*}\label{eq:finallm}
\boldsymbol{y} = \bm{Z\nu} +\boldsymbol{\varepsilon}\,,
\end{equation*}
with $n \times (\sum n_{\beta,j} + f_{2} + 1)$ model matrix
\begin{equation}
\label{eq:z}
\bm{Z} = [ \bm{1 \: \; J_{\gamma cb} \: \; X} ]\,,
\end{equation}
and ($\sum n_{\beta,j} + f_{2} + 1$)-vector $\bm{\nu}$ of coefficients
\begin{equation}
\label{eq:nu}
\bm{\nu}^{T} = [ \beta_{0} \: \; \bm{\theta}^{T} \: \; \bm{\beta}^{T} ]\,.
\end{equation}
The least squares estimator for $\bm{\nu}$ hence takes the form
\begin{equation}
\label{eq:nuhat}
\hat{\bm{\nu}} = (\bm{Z}^{T}\bm{Z})^{-1} \bm{Z}^{T}\bm{y}\,.
\end{equation}
with variance-covariance matrix
\begin{equation}
\begin{split}
\label{eq:varnuhat}
\text{Var}(\hat{\bm{\nu}}) & = \text{Var}\Big[(\bm{Z}^{T}\bm{Z})^{-1} \bm{Z}^{T}\bm{y} \Big] \\
& = \sigma^{2} (\bm{Z}^{T}\bm{Z})^{-1}.
\end{split}
\end{equation}
\subsection{The reduced design problem}\label{flm2}
Representation of the functional variables and parameters via basis function expansions in model~\eqref{eq:finallm} reduces the problem of finding a functional optimal design to one of choosing coefficient matrices $\bm{\Gamma}_{j}$, together with values for the scalar inputs, to minimise a functional of equation~\eqref{eq:varnuhat}. Here, we focus on point estimation of $\bm{\theta}$ using $A$-optimality, and find designs that lead to information matrices $\bm{M} = \bm{Z}^{\rm{T}}\bm{Z}$ that minimise
\begin{equation}
\label{eq:aopt}
\begin{split}
\Psi_{A}(\bm{M}) & = \text{tr} \big\{ \text{Var}(\hat{\bm{\nu}}) / \sigma^2 \big\} \\
& = \text{tr} \big\{ \bm{M}^{-1} \big\}\,.
\end{split}
\end{equation}
Clearly, an $A$-optimal design will depend on the choice of bases for both the functional parameters and inputs, as well as the number of functions in each of these bases. In what follows, we investigate the impact of these choices for a number different scenarios.
For two designs with the same size $n$ resulting in information matrices $M_1$ and $M_2$, we define the relative $A$-efficiency of design 2 with respect to design 1 as $\Psi_A(M_1) / \Psi_A(M_2)$.
\section{Optimal designs using step functions} \label{ex}
For the examples to be considered in this section, control of a functional variable is obtained via choice of step functions defined as
$$
x_{ij}(t) = \sum_{l=1}^{n_{x,j}}\gamma_{ijl}\bm{1}_{\lambda_l}(t)\,,
$$
for breakpoints $\lambda_0 = 0 < \lambda_1 < \cdots \lambda_{n_{x,j}} = \mathcal{T}$ and
$$
\bm{1}_{\lambda_l}(t) =
\begin{cases}
1 & \mbox{if } \lambda_{l-1} < t < \lambda_l \\
0 & \mbox{otherwise}\,.
\end{cases}
$$
An example of such a function with two interior break points, and hence $n_{x, j} = 3$, is given in Figure~\ref{fig:stepknots}.
\begin{figure}
\caption{Step function for $x_{ij}
\label{fig:stepknots}
\end{figure}
$A$-optimal designs are found for two choices of power series bases for $\beta_j(t)$, a linear basis
\begin{equation}\label{eq:linb}
\beta_j(t) = \alpha_{0} + \alpha_{1}t\,,
\end{equation}
and a quadratic basis
\begin{equation}\label{eq:quadb}
\beta_j(t) = \alpha_{0} + \alpha_{1}t + \alpha_{2}t^{2}\,.
\end{equation}
See Figure~\ref{fig:linearquadratic}. Designs are sought for two experiments: (i) with a single functional variable, and (ii) with one functional variable and three scalar variables.
\begin{figure}
\caption{(a) Linear and (b) Quadratic basis functions for $\beta (t)$.}
\label{fig:linearquadratic}
\end{figure}
\subsection{Example 1: Single profile factor } \label{ex1}
Under this example, $f_{1}=1$ and $f_{2}=0$ and model~\eqref{eq:extendedmodel} is simplified such that
\begin{equation}
\label{eq:example1}
y_{i} = \beta_{0} + \int_{0}^{\mathcal{T}} \beta (t) x_{i}(t) \; dt + \varepsilon_{i}\,,
\end{equation}
for $i = 1, ..., n$. We enforce the constraint $-1 \leq x_{i}(t) \leq 1$ via linear transformation and construct step functions assuming $n_x+2$ equally-spaced breakpoints on $[0,1]$. To ensure identifiability of model~\eqref{eq:example1}, we restrict the number of basis functions in the expansion of $x_i(t)$ to be $n_{x}\ ge 2$ when finding designs for the linear $\beta(t)$ basis and $n_{x} \ge 3$ when considering the quadratic $\beta(t)$ basis.
Designs for different combinations of $n$ and $n_x$ are found under both basis expansions~\eqref{eq:linb} and~\eqref{eq:quadb} using 1000 random starts of a coordinate exchange algorithm (\citealp{goos2011}). The results are given in Table~\ref{table:linear} for the linear basis and Table~\ref{table:quadratic} for the quadratic basis.
Some general findings can be observed from the results. For both functional parameter bases, the $A$-optimality objective function values decrease with experiment size $n$, as expected, with typically, the designs for $n=8$ and $n=12$ containing replicates of the functions from the $n=4$ design. Also, the $A$-efficiency of the designs (within each experiment size) generally decrease with $n_x$. The exception is for the case $n_x = 3$ for the linear functional parameter basis, where the inability to make a step change in the centre of the interval $[0,1]$ (as $\lambda = 0.5$ is not included in the set of breakpoints) leads to an increase in the average variance compared to the design for $n_x = 2$. Again, increasing design performance with $n_x$ is generally expected, as increasing the breakpoints enlarges the set of possible functions from which the design can be chosen. Changes in design performance with $n_x$ are greater for the quadratic $\beta(t)$ basis, as more complex functions are chosen in the $A$-optimal design. Representative example designs are shown in Figures~\ref{fig:linearstep1} and~\ref{fig:quadraticstep1} for the linear and quadratic bases respectively.
\begin{table}[H]
\centering
\begin{tabular}{ ccccccc }
\toprule
\midrule
\multicolumn{1}{c}{} & \multicolumn{2}{c}{ $n=4$} & \multicolumn{2}{c}{ $n=8$} & \multicolumn{2}{c}{ $n=12$} \\
\midrule
$n_{x}$ & A-opt & A-eff & A-opt & A-eff & A-opt & A-eff \\
\hline
2 & 8.750 & 0.961 & 3.958 & 0.981 & 2.583 & 0.972 \\
3 & 8.828 & 0.952 & 4.287 & 0.906 & 2.778 & 0.904 \\
4 & 8.750 & 0.961 & 3.903 & 0.995 & 2.570 & 0.977 \\
8 & 8.493 & 0.990 & 3.902 & 0.995 & 2.539 & 0.989 \\
16 & 8.427 & 0.997 & 3.887 & 0.999 & 2.520 & 0.997 \\
100 & 8.404 & 1.000 & 3.882 & 1.000 & 2.512 & 1.000 \\
\midrule
\bottomrule
\end{tabular}
\caption{Objective function values and A-efficiency values with $n=4,8,12$ for the linear basis for $\beta(t)$ and step basis for $x(t)$.}
\label{table:linear}
\end{table}
\begin{table}[H]
\centering
\begin{tabular}{ ccccccc }
\toprule
\midrule
\multicolumn{1}{c}{} & \multicolumn{2}{c}{ $n=4$} & \multicolumn{2}{c}{ $n=8$} & \multicolumn{2}{c}{ $n=12$} \\
\midrule
$n_{x}$ & A-opt & A-eff & A-opt & A-eff & A-opt & A-eff \\
\hline
3 & 386.408 & 0.535& 189.766 & 0.510 & 126.409 & 0.499 \\
4 & 246.869 & 0.838 & 103.553 & 0.934 & 67.735 & 0.931 \\
8 & 218.479 & 0.947 & 99.109 & 0.976 & 65.217 & 0.966 \\
16 & 208.843 & 0.991 & 97.408 & 0.993 & 63.610 & 0.991 \\
100 & 206.884 & 1.000 & 96.709 & 1.000 & 63.028 & 1.000 \\
\midrule
\bottomrule
\end{tabular}
\caption{Objective function values and A-efficiency values with $n=4,8,12$ for the quadratic basis for $\beta(t)$ and step basis for $x(t)$.}
\label{table:quadratic}
\end{table}
\begin{figure}
\caption{Four run optimal design for $n_{x}
\label{fig:linearstep1}
\end{figure}
\begin{figure}
\caption{Four run optimal design for $n_{x}
\label{fig:quadraticstep1}
\end{figure}
\subsection{Example 2: Single profile and three scalar factors}
For this example, the model is extended to include a single profile factor and $f_{2}=3$ scalar factors. The functional linear model takes the form,
\begin{equation}
\label{eq:example2}
y_{i} = \beta_{0} + \int_{0}^{\mathcal{T}} \beta_{1} (t) x_{i1}(t) \; dt + \bm{f}^{T}(\bm{x}_{i})\bm{\beta} + \epsilon_{i},
\end{equation}
for $i = 1, ..., n$, assuming $-1 \leq x_{i}(t) \leq 1$ and,
\[
\bm{f}^{T}(\bm{x}_{i})\bm{\beta} =
\begin{cases}
\beta_{2}x_{i2} + \beta_{3}x_{i3} + \beta_{4}x_{i4} & \text{Case } 1 \\
\beta_{2}x_{i2} + \beta_{3}x_{i3} + \beta_{4}x_{i4} + \beta_{5}x_{i2}^{2} + \beta_{6}x_{i3}^{2} + \beta_{7}x_{i4}^{2} & \text{Case } 2
\end{cases}
\]
being the model for scalar factors, i.e. main effects or main and quadratic effects. Control of the single profile factor is assumed to be represented by a step function basis, the functional parameter $\beta_{1}(t)$ is assumed to be represented by both a linear and quadratic basis, similar to Section \ref{ex1}, and $n=12$ runs is chosen. In this example, the sensitivity study raised is partly similar to the study in Section \ref{ex1}. The number of basis functions for the profile factor and functional parameters are varied as before and additionally the scalar part with main and main and quadratic effects is considered. The optimal designs for the single profile factor are exactly the same as the designs found in Section \ref{ex1}, so no further elaboration is required.
What is interesting to investigate is how the designs for the scalar factors behave from main to main and quadratic effects. For the Case 1 model, i.e. the model with main effects, the optimal choices of the scalar factors are those at the boundaries. This is similar to the profile factor behaviour with linear basis for the parameters, where the function of the profile factor changes at most once. For the Case 2 model, i.e. the model with both main and quadratic effects, the optimal design for the scalar factors include boundary points and centre points in order to be able to estimate the curvature. This is similar to the profile factor behaviour with quadratic basis for the parameters, where the function of the profile factor changes at most twice. For the Case 2 model, the columns for the scalar factors are orthogonal. The optimal designs for the scalar factors with a linear basis function for the functional parameter and $n_{x}=4$ are given in Table~\ref{table:oneprofilethreescalar}.
\begin{table}[H]
\setlength{\tabcolsep}{8pt}
\hspace{10pt}
\begin{minipage}{0.5\textwidth}
\centering
\begin{tabular}{r|rrr}
\multicolumn{4}{c}{Case 1} \\
\midrule
\toprule
$i$ & $x_{i2}$ & $x_{i3}$ & $x_{i4}$ \\
\midrule
1 & -1 & 1 & -1 \\
2 & -1 & 1 & 1 \\
3 & 1 & 1 & -1 \\
4 & -1 & -1 & 1 \\
5 & -1 & -1 & -1 \\
6 & -1 & 1 & 1 \\
7 & 1 & -1 & 1 \\
8 & 1 & -1 & -1 \\
9 & 1 & 1 & 1 \\
10 & 1 & -1 & 1 \\
11 & 1 & 1 & -1 \\
12 & -1 & -1 & -1 \\
\midrule
\bottomrule
\end{tabular}
\end{minipage}
\hspace{-50pt}
\begin{minipage}{0.5\textwidth}
\centering
\begin{tabular}{r|rrr}
\multicolumn{4}{c}{Case 2} \\
\midrule
\toprule
$i$ & $x_{i2}$ & $x_{i3}$ & $x_{i4}$ \\
\midrule
1 & 1 & 1 & 1 \\
2 & 0 & 0 & -1 \\
3 & 1 & 0 & 0 \\
4 & 0 & -1 & 1 \\
5 & -1 & 0 & 1 \\
6 & 0 & 1 & 1 \\
7 & -1 & -1 & 0 \\
8 & -1 & 0 & 0 \\
9 & 0 & 0 & 0 \\
10 & 0 & 1 & 0 \\
11 & 0& 0 & 0 \\
12 & -1 & 1 & -1 \\
\midrule
\bottomrule
\end{tabular}
\end{minipage}
\caption{Optimal designs for main effects (Case 1) and main \& quadratic effects (Case 2) for linear basis function for the functional linear model.}
\label{table:oneprofilethreescalar}
\end{table}
\section{Spline basis functions} \label{bs}
To assess whether design performance is improved by widening the set of possible functions from which we choose the runs for the functional variable, we now use a spline basis expansion for $x_{ij}(t)$. A spline is a piecewise polynomial with continuity, and possibly continuity of derivatives, defined at the breakpoints. We use a B-spline basis expansion \citep[see][]{deboor1978} of degree 1 on the interval $[0,1]$. B-splines have a number of desirable properties as a spline basis, see \citet[Chapter 1]{dierckx1995}, including ease of computation through recursive formulae, they sum to unity for any $t$ in the domain, and they are more numerically stable than alternative bases, e.g. the truncated power basis.
We repeat the example of Section~\ref{ex1} and find $A$-optimal designs for model~\eqref{eq:example1} with a single functional variable but assuming a linear B-spline for $x_i(t)$. We find designs for a linear functional parameter, $n = 4, 8, 12$ runs and choose the breakpoint vector such that $n_x = 3, 4, 8, 16, 100, 201$, assuming equally spaced breakpoints. Results are given in Table~\ref{table:bslinear}, and example designs are displayed in Figures~\ref{fig:bslinear0} and~\ref{fig:bslinear1}. As in the previous section, design performance increases with $n_x$ for all values of $n$, but with much smaller increases for $n>16$. The functions chosen for the design actually become somewhat simpler for larger values of $n_x$, e.g. for $n_x = 8$ the chosen functions commonly have four changes in slope (see Figure~\ref{fig:bslinear0}) whereas each function has only two changes when $n_x=16$ (Figure~\ref{fig:bslinear1}). This difference demonstrates the importance of the location of the breakpoints, as with larger values of $n_x$ there is more flexibility in the location of the changes in slope. For larger $n_x$, each chosen function consists of two constant sections, at the extremes of the design space, joined by a linear component with finite slope. However, comparison of the objective function values for the designs found assuming step functions (Table~\ref{table:linear}) shows that the added complexity of the linear spline basis did not result in better designs.
\begin{table}
\centering
\begin{tabular}{ ccccccc }
\toprule
\midrule
\multicolumn{1}{c}{} & \multicolumn{2}{c}{ $n=4$} & \multicolumn{2}{c}{ $n=8$} & \multicolumn{2}{c}{ $n=12$} \\
\midrule
$n_{x}$ & A-opt & A-eff & A-opt & A-eff & A-opt & A-eff \\
\hline
3 & 12.471 & 0.674 & 6.224 & 0.624 & 4.123 & 0.609 \\
4 & 9.314 & 0.902 & 4.168 & 0.931 & 2.759 & 0.910 \\
8 & 8.594 & 0.978 & 3.940 & 0.985 & 2.571 & 0.977 \\
16 & 8.433 & 0.997 & 3.895 & 0.997 & 2.528 & 0.994 \\
100 & 8.405 & 1.000 & 3.882 & 1.000 & 2.512 & 1.000 \\
201 & 8.404 & 1.000 & 3.882 & 1.000 & 2.512 & 1.000 \\
\midrule
\bottomrule
\end{tabular}
\caption{Objective function values and A-efficiency values with $n=4,8,12$ for the linear basis for $\beta(t)$ and B-spline basis for $x(t)$.}
\label{table:bslinear}
\end{table}
\begin{figure}
\caption{Four run optimal design for $n_{x}
\label{fig:bslinear0}
\end{figure}
\begin{figure}
\caption{Four run optimal design for $n_{x}
\label{fig:bslinear1}
\end{figure}
\section{Conclusions} \label{conc}
We have demonstrated a general methodology for finding optimal designs for experiments involving functional variables and a scalar response. The methodology uses basis function expansions of the functional variable and the functional parameters in a scalar-on-function linear model. It is flexible and can be applied assuming a variety of different bases for both functional variables and parameters. For simple functional parameters, e.g. linear, only simple forms for the functional variable are required.
Ongoing work is extending the results to different criteria tailored to estimation of the functional parameters, finding designs for scalar-on-function generalised linear models, and developing methods for choosing optimal functions for inputs into dynamic models.
\end{document} |
\begin{document}
\title{Lempert Theorem for strongly linearly convex domains}
\author{\L ukasz Kosi\'nski and Tomasz Warszawski}
\subsetbjclass[2010]{32F45}
\keywords{Lempert Theorem, strongly linearly convex domains, Lempert extremals}
\address{Instytut Matematyki, Wydzia\l\ Matematyki i Informatyki, Uniwersytet Jagiello\'nski, ul. Prof. St. \L ojasiewicza 6, 30-348 Krak\'ow, Poland}
\email{[email protected], [email protected]}
\begin{abstract}
In 1984 L.~Lempert showed that the Lempert function and the Carath\'eodory distance coincide on non-planar bounded strongly linearly convex domains with real analytic boundaries. Following this paper, we present a~slightly modified and more detailed version of the proof. Moreover, the Lempert Theorem is proved for non-planar bounded ${\mathcal C}^2$-smooth strongly linearly convex domains.
\end{abstract}
\maketitle
The aim of this paper is to present a detailed version of the proof of the Lempert Theorem in the case of non-planar bounded strongly linearly convex domains with smooth boundaries. The original Lempert's proof is presented only in proceedings of a conference (see \cite{Lem1}) with a very limited access and at some places it was quite sketchy. We were encouraged by some colleagues to prepare an extended version of the proof in which all doubts could be removed and some of details of the proofs could be simplified. We hope to have done it below. Certainly, \textbf{the idea of the proof belongs entirely to Lempert}. The main differences, we would like to draw attention to, are
\begin{itemize}
\item results are obtained in $\mathcal C^2$-smooth case;
\item the notion of stationary mappings and $E$-mappings is separated;
\item a geometry of domains is investigated only in neighborhoods of boundaries of stationary mappings (viewed as boundaries of analytic discs) --- this allows us to obtain localization properties for stationary mappings;
\item boundary properties of strongly convex domains are expressed in terms of the squares of their Minkowski functionals.
\end{itemize}
Additional motivation for presenting the proof is the fact, showed recently in \cite{Pfl-Zwo}, that the so-called symmetrized bidisc may be exhausted by strongly linearly convex domains. On the other hand it cannot be exhausted by domains biholomorphic to convex ones (\cite{Edi}). Therefore, the equality of the Lempert function and the Carath\'eodory distance for strongly linearly convex domains does not follow directly from \cite{Lem2}.
\section{Introduction and results}
Let us recall the objects we will deal with. Throughout the paper $\mathbb{D}$ denotes the unit open disc on the complex plane, $\mathbb{T}$ is the unit circle and $p$ --- the Poincar\'e distance on $\mathbb{D}$.
Let $D\subsetbset\mathbb{C}^{n}$ be a domain and let $z,w\in D$, $v\in\mathbb{C}^{n}$. The {\it Lempert function}\/ is defined as
\begin{equation}\label{lem}
\widetildedetilde{k}_{D}(z,w):=\inf\{p(0,\xi):\xi\in[0,1)\textnormal{ and }\exists f\in \mathcal{O}(\mathbb{D},D):f(0)=z,\ f(\xi)=w\}.
\end{equation} The {\it Kobayashi-Royden \emph{(}pseudo\emph{)}metric}\/ we define as
\begin{equation}\label{kob-roy}
\kappa_{D}(z;v):=\inf\{\lambda^{-1}:\lambda>0\text{ and }\exists f\in\mathcal{O}(\mathbb{D},D):f(0)=z,\ f'(0)=\lambda v\}.
\end{equation}
Note that
\begin{equation}\label{lem1}
\widetildedetilde{k}_{D}(z,w)=\inf\{p(\zeta,\xi):\zeta,\xi\in\mathbb{D}\textnormal{ and }\exists f\in \mathcal{O}(\mathbb{D},D):f(\zeta)=z,\ f(\xi)=w\},
\end{equation}
\begin{multline}\label{kob-roy1}
\kappa_{D}(z;v)=\inf\{|\lambda|^{-1}/(1-|\zeta|^2):\lambda\in\mathbb{C}_*,\,\zeta\in\mathbb{D}\text{ and }\\ \exists f\in\mathcal{O}(\mathbb{D},D):f(\zeta)=z,\ f'(\zeta)=\lambda v\}.
\end{multline}
If $z\neq w$ (respectively $v\neq 0$), a mapping $f$ for which the infimum in \eqref{lem1} (resp. in \eqref{kob-roy1}) is attained, we call a $\widetilde{k}_D$-\textit{extremal} (or a \textit{Lempert extremal}) for $z,w$ (resp. a $\kappa_D$-\textit{extremal} for $z,v$). A mapping being a $\widetilde k_D$-extremal or a $\kappa_D$-extremal we will call just an \textit{extremal} or an \textit{extremal mapping}.
We shall say that $f:\mathbb{D}\longrightarrow D$ is a unique $\widetilde{k}_D$-extremal for $z,w$ (resp. a unique $\kappa_D$-extremal for $z,v$) if any other $\widetilde{k}_D$-extremal $g:\mathbb{D}\longrightarrow D$ for $z,w$ (resp. $\kappa_D$-extremal for $z,v$) satisfies $g=f\circ a$ for some M\"obius function $a$.
In general, $\widetilde{k}_{D}$ does not satisfy a triangle inequality --- take for example $D_{\alpha}:=\{(z,w)\in\mathbb{C}^{2}:|z|,|w|<1,\ |zw|<\alpha\}$, $\alpha\in(0,1)$. Therefore, it is natural to consider the so-called \textit{Kobayashi \emph{(}pseudo\emph{)}distance} given by the formula \begin{multline*}k_{D}(w,z):=\subsetp\{d_{D}(w,z):(d_{D})\text{ is a family of holomorphically invariant} \\\text{pseudodistances less than or equal to }\widetildedetilde{k}_{D}\}.\end{multline*}
It follows directly from the definition that $$k_{D}(z,w)=\inf\left\{\subsetm_{j=1}^{N}\widetilde{k}_{D}(z_{j-1},z_{j}):N\in\mathbb{N},\ z_{1},\ldots,z_{N}\in
D,\ z_{0}=z,\ z_{N}=w\right\}.$$
The next objects we are dealing with, are the \textit{Carath\'eodory \emph{(}pseudo\emph{)}distance}
$$c_{D}(z,w):=\subsetp\{p(F(z),F(w)):F\in\mathcal{O}(D,\mathbb{D})\}$$
and the \textit{Carath\'eodory-Reiffen \emph{(}pseudo\emph{)}metric}
$$\gamma_D(z;v):=\subsetp\{|F'(z)v|:F\in\mathcal{O}(D,\mathbb{D}),\ F(z)=0\}.$$
A holomorphic mapping $f:\mathbb{D}\longrightarrow D$ is said to be a \emph{complex geodesic} if $c_D(f(\zeta),f(\xi))=p(\zeta,\xi)$ for any $\zeta,\xi\in\mathbb{D}$.
Here is some notation. Let $z_1,\ldots,z_n$ be the standard complex coordinates in $\mathbb{C}^n$ and $x_1,\ldots,x_{2n}$ --- the standard real coordinates in $\mathbb{C}^n=\mathbb{R}^n+i\mathbb{R}^n\simeq\mathbb{R}^{2n}$. We use $T_{D}^\mathbb{R}(a)$, $T_{D}^\mathbb{C}(a)$ to denote a real and a complex tangent space to a ${\mathcal C}^1$-smooth domain $D$ at a point $a\in\partialrtial D$, i.e. the sets \begin{align*}T_{D}^\mathbb{R}(a):&=\left\{X\in\mathbb{C}^{n}:\re\subsetm_{j=1}^n\frac{\partialrtial r}{\partialrtial z_j}(a)X_{j}=0\right\},\\ T_{D}^\mathbb{C}(a):&=\left\{X\in\mathbb{C}^{n}:\subsetm_{j=1}^n\frac{\partialrtial r}{\partialrtial z_j}(a)X_{j}=0\right\},\end{align*}
where $r$ is a defining function of $D$. Let $\nu_D(a)$ be the outward unit normal vector to $\partialrtial D$ at $a$.
Let $\mathcal{C}^{k}(\overlineerline{\DD})$, where $k\in(0,\infty]$, denote a class of continuous functions on $\overlineerline{\DD}$, which are of class ${\mathcal C}^k$ on $\mathbb{D}$ and
\begin{itemize}
\item if $k\in\mathbb{N}\cup\{\infty\}$ then derivatives up to the order $k$ extend continuously on~$\overlineerline{\DD}$;
\item if $k-[k]=:c>0$ then derivatives up to the order $[k]$ are $c$-H\"older continuous on $\mathbb{D}$.
\end{itemize}
By $\mathcal{C}^\omega$ class we shall denote real analytic functions. Further, saying that $f$ is of class $\mathcal{C}^{k}(\mathbb{T})$, $k\in(0,\infty]\cup\{\omega\}$, we mean that the function $t\longmapsto f(e^{it})$, $t\in\mathbb{R}$, is in $\mathcal{C}^{k}(\mathbb R)$. For a compact set $K\subset\mathbb{C}^n$ let ${\mathcal O}(K)$ denote the set of functions extending holomorphically on a neighborhood of $K$ (we assume that all neighborhoods are open). In that case we shall sometimes say that a given function is of class ${\mathcal O}(K)$. Note that $\mathcal{C}^{\omega}(\mathbb{T})={\mathcal O}(\mathbb{T})$.
Let $|\cdot|$ denote the Euclidean norm in $\mathbb{C}^{n}$ and let $\dist(z,S):=\inf\{|z-s|:s\in S\}$ be a distance of the point $z\in\mathbb{C}^n$ to the set $S\subset\mathbb{C}^n$. For such a set $S$ we define $S_*:=S\setminus\{0\}$. Let $\mathbb{B}_n:=\{z\in\mathbb{C}^n:|z|=1\}$ be the unit ball and $B_n(a,r):=\{z\in\mathbb{C}^n:|z-a|<r\}$ --- an open ball with a center $a\in\mathbb{C}^n$ and a radius $r>0$. Put $$z\bullet w:=\subsetm_{j=1}^nz_{j}{w}_{j}$$ for $z,w\in\mathbb{C}^{n}$ and let $\langle\cdotp,-\rangle$ be a hermitian inner product on $\mathbb{C}^n$. The real inner product on $\mathbb{C}^n$ is denoted by $\langle\cdotp,-\rangle_{\mathbb{R}}=\re\langle\cdotp,-\rangle$.
We use $\nabla$ to denote the gradient $(\partial/\partial x_1,\ldots,\partial/\partial x_{2n})$. For real-valued functions the gradient is naturally identified with $2(\partial/\partial\overline z_1,\ldots,\partial/\partial\overline z_n)$. Recall that $$\nu_D(a)=\frac{\nabla r(a)}{|\nabla r(a)|}.$$ Let $\mathcal{H}$ be the Hessian matrix $$\left[\frac{\partial^2}{\partial x_j\partial x_k}\right]_{1\leq j,k\leq 2n}.$$ Sometimes, for a ${\mathcal C}^2$-smooth function $u$ and a vector $X\in\mathbb{R}^{2n}$ the Hessian $$\subsetm_{j,k=1}^{2n}\frac{\partialrtial^2 u}{\partialrtial x_j\partialrtial x_k}(a)X_{j}X_{k}=X^T{\mathcal H} u(a)X$$ will be denoted by ${\mathcal H} u(a;X)$. By $\|\cdot\|$ we denote the operator norm.
\begin{df}\label{29}
Let $D\subsetbset\mathbb{C}^{n}$ be a domain.
We say that $D$ is \emph{linearly convex} (resp. \emph{weakly linearly convex}) if through any point $a\in\mathbb C^n\setminus D$ (resp. $a\in \partialrtial D$) there goes an $(n-1)$-dimensional complex hyperplane disjoint from $D$.
A domain $D$ is said to be \emph{strongly linearly convex} if
\begin{enumerate}
\item $D$ has $\mathcal{C}^{2}$-smooth boundary;
\item there exists a defining function $r$ of $D$ such that
\begin{equation}\label{48}\subsetm_{j,k=1}^n\frac{\partialrtial^2 r}{\partialrtial z_j\partialrtial\overlineerline z_k}(a)X_{j}\overlineerline{X}_{k}>\left|\subsetm_{j,k=1}^n\frac{\partialrtial^2 r}{\partialrtial z_j\partialrtial z_k}(a)X_{j}X_{k}\right|,\ a\in\partialrtial D,\ X\in T_{D}^\mathbb{C}(a)_*.\end{equation}
\end{enumerate}
More generally, any point $a\in\partial D$ for which there exists a defining function $r$ satisfying \eqref{48}, is called a \emph{point of the strong linear convexity} of $D$.
Furthermore, we say that a domain $D$ has \emph{real analytic boundary} if it possesses a real analytic defining function.
\end{df}
Note that the condition \eqref{48} does not depend on the choice of a defining function of $D$.
\begin{rem}
Let $D\subsetbset\mathbb{C}^{n}$ be a strongly linearly convex domain. Then
\begin{enumerate}
\item any $(n-1)$-dimensional complex tangent hyperplane intersects $\partialrtial{D}$ at precisely one point; in other words $$\overlineerline D\cap(a+T_{D}^\mathbb{C}(a))=\{a\},\ a\in\partial D;$$
\item for $a\in\partial D$ the equation $\langle w-a, \nu_D(a)\rangle=0$ describes the $(n-1)$-dimensional complex tangent hyperplane $a+T_{D}^\mathbb{C}(a)$, consequently $$\langle z-a, \nu_D(a)\rangle\neq 0,\ z\in D,\ a\in\partial D.$$
\end{enumerate}
\end{rem}
The main aim of the paper is to present a detailed proof of the following
\begin{tw}[Lempert Theorem]\label{lem-car}
Let $D\subsetbset\mathbb{C}^{n}$, $n\geq 2$, be a bounded strongly linearly convex domain. Then $$c_{D}=k_{D}=\widetilde{k}_{D}\text{\,\ and\,\, }\gamma_D=\kappa_D.$$
\end{tw}
An important role will be played by strongly convex domains and strongly convex functions.
\begin{df}
A domain $D\subsetbset\mathbb{C}^{n}$ is called \emph{strongly convex} if
\begin{enumerate}
\item $D$ has $\mathcal{C}^{2}$-smooth boundary;
\item there exists a defining function $r$ of $D$ such that
\begin{equation}\label{sc}\subsetm_{j,k=1}^{2n}\frac{\partialrtial^2 r}{\partialrtial x_j\partialrtial x_k}(a)X_{j}X_{k}>0,\ a\in\partialrtial D,\ X\in T_{D}^\mathbb{R}(a)_*.\end{equation}
\end{enumerate}
Generally, any point $a\in\partial D$ for which there exists a defining function $r$ satisfying \eqref{sc}, is called a \emph{point of the strong convexity} of $D$.
\end{df}
\begin{rem}
A strongly convex domain $D\subsetbset\mathbb{C}^{n}$ is convex and strongly linearly convex. Moreover, it is strictly convex, i.e. for any different points $a,b\in\overlineerline D$ the interior of the segment $[a,b]=\{ta+(1-t)b:t\in [0,1]\}$ is contained in $D$ (i.e. $ta+(1-t)b\in D$ for any $t\in(0,1)$).
Observe also that any bounded convex domain with a real analytic boundary is strictly convex. Actually, if a domain $D$ with a real analytic boundary were not strictly convex, then we would be able to find two distinct points $a,b\in\partial D$ such that the segment $[a,b]$ lies entirely in $\partialrtial D$. On the other hand, the identity principle would imply that the set $\{t\in\mathbb R:\exists\varepsilon>0:sa+(1-s)b\in\partial D\text{ for }|s-t|<\varepsilon\}$ is open-closed in $\mathbb R$. Therefore it has to be empty. This immediately gives a contradiction.
\end{rem}
\begin{rem}
It is well-known that for any convex domain $D\subset\mathbb{C}^{n}$ there is a sequence $\{D_m\}$ of bounded strongly convex domains with real analytic boundaries, such that $D_m\subset D_{m+1}$ and $\bigcup_m D_m=D$.
In particular, Theorem~\ref{lem-car} holds for convex domains.
\end{rem}
\begin{df}
Let $U\subset\mathbb{C}^n$ be a domain. A function $u:U\longrightarrow\mathbb{R}$ is called \emph{strongly convex} if
\begin{enumerate}
\item $u$ is $\mathcal{C}^{2}$-smooth;
\item $$\subsetm_{j,k=1}^{2n}\frac{\partialrtial^2 u}{\partialrtial x_j\partialrtial x_k}(a)X_{j}X_{k}>0,\ a\in U,\ X\in(\mathbb{R}^{2n})_*.$$
\end{enumerate}
\end{df}
\begin{df} A degree of a continuous function (treated as a curve) $:\mathbb T\longrightarrow\mathbb T$ is called its winding number. The fundamental group is a homotopy invariant. Thus the definition of the \emph{winding number of a continuous function} $\varphi:\mathbb T\longrightarrow\mathbb C_*$ is the same. We denote it by $\widetildend\varphi$.
In the case of a ${\mathcal C}^1$-smooth function $\varphi:\mathbb{T}\longrightarrow\mathbb{C}_*$, its winding number is just the index of $\varphi$ at 0, i.e. $$\widetildend\varphi=\frac{1}{2\pi i}\int_{\varphi(\mathbb{T})}\frac{d\zeta}{\zeta}=\frac{1}{2\pi i}\int_{0}^{2\pi}\frac{\frac{d}{dt}\varphi(e^{it})}{\varphi(e^{it})}dt.$$
\end{df}
\begin{rem}\label{49}
\begin{enumerate}
\item\label{51} If $\varphi\in{\mathcal C}(\mathbb{T},\mathbb{C}_*)$ extends to a function $\widetildedetilde{\varphi}\in{\mathcal O}(\mathbb{D})\cap \mathcal C(\overlineerline{\DD})$ then $\widetildend\varphi$ is the number of zeroes of $\widetildedetilde{\varphi}$ in $\mathbb{D}$ counted with multiplicities;
\item\label{52} $\widetildend(\varphi\psi)=\widetildend\varphi+\widetildend\psi$, $\varphi,\psi\in{\mathcal C}(\mathbb{T},\mathbb{C}_*)$;
\item\label{53} $\widetildend\varphi=0$ if $\varphi\in{\mathcal C}(\mathbb{T})$ and $\re\varphi>0$.
\end{enumerate}
\end{rem}
\begin{df}
The boundary of a domain $D$ of $\mathbb C^n$ is \emph{real analytic in a neighborhood} $U$ of the set $S\subset\partial D$ if there exists a function $r\in\mathcal C^{\omega}(U,\mathbb{R})$ such that $D\cap U=\{z\in U:r(z)<0\}$ and $\nabla r$ does not vanish in $U$.
\end{df}
\begin{df}\label{21}
Let $D\subsetbset\mathbb{C}^{n}$ be a domain. We call a holomorphic mapping $f:\mathbb{D}\longrightarrow D$ a \emph{stationary mapping} if
\begin{enumerate}
\item $f$ extends to a holomorphic mapping in a neighborhood od $\overlineerline{\DD}$ $($denoted by the same letter$)$;
\item $f(\mathbb{T})\subsetbset\partialrtial D$;
\item there exists a real analytic function
$\rho:\mathbb{T}\longrightarrow\mathbb{R}_{>0}$ such that the mapping $\mathbb{T}\ni\zeta\longmapsto\zeta
\rho(\zeta)\overlineerline{\nu_D(f(\zeta))}\in\mathbb{C}^{n}$ extends to a mapping holomorphic in a neighborhood of $\overlineerline{\DD}$ $($denoted by $\widetildedetilde{f}${$)$}.
\end{enumerate}
Furthermore, we call a holomorphic mapping $f:\mathbb{D}\longrightarrow D$ a \emph{weak stationary mapping} if
\begin{enumerate}
\item[(1')] $f$ extends to a ${\mathcal C}^{1/2}$-smooth mapping on $\overlineerline{\DD}$ $($denoted by the same letter$)$;
\item[(2')] $f(\mathbb{T})\subsetbset\partialrtial D$;
\item[(3')] there exists a ${\mathcal C}^{1/2}$-smooth function
$\rho:\mathbb{T}\longrightarrow\mathbb{R}_{>0}$ such that the mapping $\mathbb{T}\ni\zeta\longmapsto\zeta
\rho(\zeta)\overlineerline{\nu_D(f(\zeta))}\in\mathbb{C}^{n}$ extends to a mapping $\widetildedetilde{f}\in{\mathcal O}(\mathbb{D})\cap{\mathcal C}^{1/2}(\overlineerline{\DD})$.
\end{enumerate}
The definition of a $($weak$)$ stationary mapping $f:\mathbb D\longrightarrow D$ extends naturally to the case when $\partial D$ is real analytic in a neighborhood of $f(\mathbb{T})$.
\end{df}
Directly from the definition of a stationary mapping $f$, it follows that $f$ and $\widetilde f$ extend holomorphically on some neighborhoods of $\overlineerline{\DD}$. By $\mathbb{D}_f$ we shall denote their intersection.
\begin{df}\label{21e}
Let $D\subset\mathbb{C}^n$, $n\geq 2$, be a bounded strongly linearly convex domain with real analytic boundary. A holomorphic mapping $f:\mathbb{D}\longrightarrow D$ is called a (\emph{weak}) $E$-\emph{mapping} if it is a (weak) stationary mapping and
\begin{enumerate}
\item[(4)] setting $\varphi_z(\zeta):=\langle z-f(\zeta),\nu_D(f(\zeta))\rangle,\ \zeta\in\mathbb{T}$, we have $\widetildend\varphi_z=0$ for some $z\in D$.
\end{enumerate}
\end{df}
\begin{rem}
The strong linear convexity of $D$ implies $\varphi_z(\zeta)\neq 0$ for any $z\in D$ and $\zeta\in\mathbb{T}$. Therefore, $\widetildend\varphi_z$ vanishes for all $z\in D$ if it vanishes for some $z\in D$.
Additionally, any stationary mapping of a convex domain is an $E$-mapping (as $\re \varphi_z<0$).
\end{rem}
We shall prove that in a class of non-planar bounded strongly linearly convex domains with real analytic boundaries weak stationary mappings are just stationary mappings, so there is no difference between $E$-mappings and weak $E$-mappings.
We have the following result describing extremal mappings, which is very interesting in its own.
\begin{tw}\label{main} Let $D\subset\mathbb{C}^n$, $n\geq 2$, be a bounded strongly linearly convex domain.
Then a holomorphic mapping $f:\mathbb{D}\longrightarrow D$ is an extremal if and only if $f$ is a weak $E$-mapping.
For a domain $D$ with real analytic boundary, a holomorphic mapping $f:\mathbb D\longrightarrow D$ is an extremal if and only if $f$ is an $E$-mapping.
If $\partial D$ is of class ${\mathcal C}^k$, $k=3,4,\ldots,\infty$, then any weak $E$-mapping $f:\mathbb{D}\longrightarrow D$ and its associated mappings $\widetilde f,\rho$ are $\mathcal C^{k-1-\varepsilon}$-smooth for any $\varepsilon>0$.
\end{tw}
The idea of the proof of the Lempert Theorem is as follows. In real analytic case we shall show that $E$-mappings are complex geodesics (because they have left inverses). Then we shall prove that for any different points $z,w\in D$ (resp. for a point $z\in D$ and a vector $v\in(\mathbb{C}^n)_*$) there is an $E$-mapping passing through $z,w$ (resp. such that $f(0)=z$ and $f'(0)=v$). This will give the equality between the Lempert function and the Carath\'eodory distance. In the general case, we exhaust a ${\mathcal C}^2$-smooth domain by strongly linearly convex domains with real analytic boundaries.
To prove Theorem \ref{main} we shall additionally observe that (weak) $E$-mappings are unique extremals.
\begin{center}{\sc Real analytic case}\end{center}
In what follows and if not mentioned otherwise, $D\subset\mathbb{C}^n$, $n\geq 2$, is a \textbf{bounded strongly linearly convex domain with real analytic boundary}.
\section{Weak stationary mappings of strongly linearly convex domains with real analytic boundaries are stationary mappings}\label{55}
Let $M\subsetbset\mathbb{C}^m$ be a totally real $\mathcal{C}^{\omega}$ submanifold of the real dimension $m$. Fix a point $z\in M$. There are neighborhoods $U,V\subset\mathbb{C}^m$ of $0$ and $z$ respectively and a biholomorphic mapping $\Phi:U\longrightarrow V$ such that $\Phi(\mathbb{R}^m\cap U)=M\cap V$ (for the proof see Appendix).
\begin{prop}\label{6}
A weak stationary mapping of $D$ is a stationary mapping of $D$ with the same associated mappings.
\end{prop}
\begin{proof}
Let $f:\mathbb{D}\longrightarrow D$ be a weak stationary mapping. Our aim is to prove that $f,\widetildedetilde{f}\in{\mathcal O}(\overlineerline{\DD})$ and $\rho\in\mathcal C^{\omega}(\mathbb{T})$. Choose a point $\zeta_0\in\mathbb{T}$. Since $\widetildedetilde{f}(\zeta_0)\neq 0$, we can assume that $\widetildedetilde{f}_1(\zeta)\neq 0$ in $\overlineerline{\DD}\cap U_0$, where $U_0$ is a neighborhood of $\zeta_0$. This implies
$\nu_{D,1}(f(\zeta_0))\neq 0$, so $\nu_{D,1}$ does not vanish on some set $V_0\subset\partial D$, relatively open in
$\partial D$, containing the point $f(\zeta_0)$. Shrinking $U_0$, if necessary, we may assume that $f(\mathbb{T}\cap U_0)\subsetbset V_0$.
Define $\psi:V_0\longrightarrow\mathbb{C}^{2n-1}$ by
$$\psi(z)=\left(z_1,\ldots,z_n,
\overline{\left(\frac{\nu_{D,2}(z)}{\nu_{D,1}(z)}\right)},\ldots,\overline{\left(\frac{\nu_{D,n}(z)}{\nu_{D,1}(z)}\right)}\right).$$ The set $M:=\psi(V_0)$ is the graph of a $\mathcal{C}^{\omega}$ function defined on the local $\mathcal{C}^{\omega}$ submanifold $V_0$, so it is a local $\mathcal{C}^{\omega}$ submanifold in $\mathbb{C}^{2n-1}$ of the real dimension $2n-1$. Assume for a moment that $M$ is totally real.
Let $$g(\zeta):=\left(f_1(\zeta),\ldots,f_n(\zeta),
\frac{\widetildedetilde{f}_2(\zeta)}{\widetildedetilde{f}_1(\zeta)},\ldots,\frac{\widetildedetilde{f}_n(\zeta)}{\widetildedetilde{f}_1(\zeta)}\right),\ \zeta\in\overlineerline{\DD}\cap U_0.$$ If $\zeta\in\mathbb{T}\cap U_0$ then
$\widetildedetilde{f}_k(\zeta)\widetildedetilde{f}_1(\zeta)^{-1} =
\overlineerline{\nu_{D,k}(f(\zeta))}\ \overlineerline{\nu_{D,1}(f(\zeta))}^{-1}$, so
$g(\zeta)=\psi(f(\zeta))$. Therefore, $g(\mathbb{T}\cap U_0)\subsetbset M$. Thanks to the Reflection
Principle (see Appendix), $g$ extends holomorphically past $\mathbb{T}\cap U_0$, so $f$ extends holomorphically on a neighborhood of $\zeta_0$.
The mapping $\overlineerline{\nu_D\circ f}$ is real analytic on $\mathbb{T}$, so it extends to a mapping $h$ holomorphic in a neighborhood $W$ of $\mathbb{T}$. For $\zeta\in\mathbb{T}\cap U_0$ we have $$\frac{\zeta
h_1(\zeta)}{\widetildedetilde{f}_1(\zeta)}=\frac{1}{\rho(\zeta)}.$$ The function on the
left side is holomorphic in $\mathbb{D}\cap U_0\cap W$ and continuous in $\overlineerline{\DD}\cap U_0\cap W$. Since it
has real values on $\mathbb{T}\cap U_0$, the Reflection Principle implies that it is holomorphic in a neighborhood of $\mathbb{T}\cap U_0$. Hence $\rho$ and $\widetildedetilde{f}$ are holomorphic in a neighborhood of $\zeta_0$. Since $\zeta_0$ is arbitrary, we get the assertion.
It remains to prove that $M$ is totally real. Let $r$ be a defining function of $D$. Recall that for any point $z\in V_0$ $$\frac{\overline{\nu_{D,k}(z)}}{\overline{\nu_{D,1}(z)}}=\frac{\partialrtial r}{\partialrtial z_k}(z)\left(\frac{\partialrtial r}{\partialrtial z_1}(z)\right)^{-1},\,k=1,\ldots,n.$$
Consider the mapping $S=(S_1,\ldots,S_n):V_0\times\mathbb{C}^{n-1}\longrightarrow\mathbb{R}\times\mathbb{C}^{n-1}$
given by $$S(z,w):=\left(r(z),\frac{\partialrtial r}{\partialrtial z_2}(z)-w_{1}\frac{\partialrtial r}{\partialrtial z_1}(z),\ldots,\frac{\partialrtial r}{\partialrtial z_n}(z)-w_{n-1}\frac{\partialrtial r}{\partialrtial z_1}(z)\right).$$ Clearly, $M=S^{-1}(\{0\})$. Hence
\begin{equation}\label{tan} T_{M}^{\mathbb{R}}(z,w)\subsetbset\ker\nabla S(z,w),\ (z,w)\in M,\end{equation} where
$\nabla S:=(\nabla S_1,\ldots,\nabla S_n)$.
Fix a point $(z,w)\in M$. Our goal is to prove that $T_{M}^{\mathbb{C}}(z,w)=\lbrace 0\rbrace$. Take an arbitrary vector $(X,Y)=(X_1,\ldots,X_n,Y_1,\ldots,Y_{n-1})\in T_{M}^{\mathbb{C}}(z,w)$. Then we infer from \eqref{tan} that $$\subsetm_{k=1}^n\frac{\partialrtial r}{\partialrtial z_k}(z)X_k=0,$$ i.e. $X\in T_{D}^{\mathbb{C}}(z)$. Denoting $v:=(z,w)$, $V:=(X,Y)$ and making use of \eqref{tan} again we find that
$$0=\nabla S_k(v)(V)=\subsetm_{j=1}^{2n-1}\frac{\partial S_k}{\partial v_j}(v)V_j+\subsetm_{j=1}^{2n-1}\frac{\partial S_k}{\partial\overline v_j}(v)\overline V_j$$ for $k=2,\ldots,n$.
But $V\in T_{M}^{\mathbb{C}}(v)$, so $iV\in T_{M}^{\mathbb{C}}(v)$. Thus $$0=\nabla S_k(v)(iV)=i\subsetm_{j=1}^{2n-1}\frac{\partial S_k}{\partial v_j}(v)V_j-i\subsetm_{j=1}^{2n-1}\frac{\partial S_k}{\partial\overline v_j}(v)\overline V_j.$$ In particular, \begin{multline*}0=\subsetm_{j=1}^{2n-1}\frac{\partial S_k}{\partial\overline v_j}(v)\overline V_j=\subsetm_{j=1}^{n}\frac{\partial S_k}{\partial\overline z_j}(z,w)\overline X_j+\subsetm_{j=1}^{n-1}\frac{\partial S_k}{\partial\overline w_j}(z,w)\overline Y_j=\\=\subsetm_{j=1}^n\frac{\partialrtial^2r}{\partialrtial z_k\partialrtial\overlineerline{z}_j}(z)\overlineerline X_j-w_{k-1}\subsetm_{j=1}^n\frac{\partialrtial^2r}{\partialrtial z_1\partialrtial\overlineerline{z}_j}(z)\overlineerline X_j.
\end{multline*}
The equality $M=S^{-1}(\{0\})$ gives $$w_{k-1}=\frac{\partialrtial r}{\partialrtial z_k}(z)\left(\frac{\partialrtial r}{\partialrtial z_1}(z)\right)^{-1},$$ so $$\frac{\partialrtial r}{\partialrtial z_1}(z)\subsetm_{j=1}^n\frac{\partialrtial^2r}{\partialrtial z_k\partialrtial\overlineerline{z}_j}(z)\overlineerline X_j=\frac{\partialrtial r}{\partialrtial z_k}(z)\subsetm_{j=1}^n\frac{\partialrtial^2r}{\partialrtial z_1\partialrtial\overlineerline{z}_j}(z)\overlineerline X_j,\ k=2,\ldots,n.$$ Note that the last equality holds also for $k=1$. Therefore, \begin{multline*}
\frac{\partialrtial r}{\partialrtial z_1}(z)\subsetm_{j,k=1}^n\frac{\partialrtial^2r}{\partialrtial z_k\partialrtial\overlineerline{z}_j}(z)\overlineerline X_jX_k=\subsetm_{k=1}^n\frac{\partialrtial r}{\partialrtial z_k}(z)\subsetm_{j=1}^n\frac{\partialrtial^2r}{\partialrtial z_1\partialrtial\overlineerline{z}_j}(z)\overlineerline X_jX_k =\\=\left(\subsetm_{k=1}^n\frac{\partialrtial r}{\partialrtial z_k}(z)X_k\right)\left(\subsetm_{j=1}^n\frac{\partialrtial^2r}{\partialrtial z_1\partialrtial\overlineerline{z}_j}(z)\overlineerline X_j\right)=0.
\end{multline*}
By the strong linear convexity of $D$ we have $X=0$. This implies $Y=0$, since $$0=\nabla S_k(z,w)(0,Y)=\subsetm_{j=1}^{n-1}\frac{\partial S_k}{\partial w_j}(v)Y_j+\subsetm_{j=1}^{n-1}\frac{\partial S_k}{\partial\overline w_j}(v)\overline Y_j=-\frac{\partialrtial r}{\partialrtial z_1}(z)Y_{k-1}$$ for $k=2,\ldots,n$.
\end{proof}
\section{(Weak) $E$-mappings vs. extremal mappings and complex geodesics}
In this section we will prove important properties of (weak) $E$-mappings. In particular, we will show that they are complex geodesics and unique extremals.
\subsetbsection{Weak $E$-mappings are complex geodesics and unique extremals}
The results of this subsection are related to weak $E$-mappings of bounded strongly linearly convex domains $D\subset\mathbb{C}^n$, $n\geq 2$.
Let $$G(z,\zeta):=(z-f(\zeta))\bullet\widetildedetilde{f}(\zeta),\ z\in\mathbb{C}^n,\ \zeta\in\mathbb{D}_f.$$
\begin{propp}\label{1}
Let $D\subset\mathbb{C}^n$, $n\geq 2$, be a bounded strongly linearly convex domain and let $f:\mathbb{D}\longrightarrow D$ be a weak $E$-mapping. Then there exist an open set $W\subsetpset\overlineerline D\setminus f(\mathbb{T})$ and a holomorphic mapping $F:W\longrightarrow\mathbb{D}$ such that for any $z\in W$ the number $F(z)$ is a unique solution of the equation $G(z,\zeta)=0,\ \zeta\in\mathbb{D}$. In particular, $F\circ f=\id_{\mathbb{D}}$.
\end{propp}
In the sequel we will strengthen the above proposition for domains with real analytic boundaries (see Proposition~\ref{34}).
\begin{proof}[Proof of Proposition~\ref{1}]
Set $A:=\overlineerline{D}\setminus f(\mathbb{T})$. Since $D$ is strongly linearly convex, $\varphi_z$ does not vanish in $\mathbb{T}$ for any $z\in A$, so by a continuity argument the condition (4) of Definition~\ref{21e} holds for every $z$ in some open set $W\subsetpset A$. For a fixed $z\in W$ we have $$G(z,\zeta)=\zeta\rho(\zeta)\varphi_z(\zeta),\ \zeta\in\mathbb{T},$$ so $\widetildend G(z,\cdotp)=1$. Since $G(z,\cdotp)\in{\mathcal O}(\mathbb{D})$, it has in $\mathbb{D}$ exactly one simple root $F(z)$. Hence $G(z,F(z))=0$ and $\frac{\partialrtial G}{\partialrtial\zeta}(z,F(z))\neq 0$. By the Implicit Function Theorem, $F$ is holomorphic in $W$. The equality $F(f(\zeta))=\zeta$ for $\zeta\in\mathbb{D}$ is clear.
\end{proof}
From the proposition above we immediately get the following
\begin{corr}\label{5}
A weak $E$-mapping $f:\mathbb{D}\longrightarrow D$ of a bounded strongly linearly convex domain $D\subset\mathbb{C}^n$, $n\geq 2$, is a complex geodesic. In particular,
$$c_{D}(f(\zeta),f(\xi))=\widetilde k_D(f(\zeta),f(\xi))\text{\,\ and\,\, }\gamma_D(f(\zeta);f'(\zeta))=\kappa_D(f(\zeta);f'(\zeta)),$$ for any $\zeta,\xi\in\mathbb{D}$.
\end{corr}
Using left inverses of weak $E$-mappings we may prove the uniqueness of extremals.
\begin{propp}\label{2}
Let $D\subset\mathbb{C}^n$, $n\geq 2$, be a bounded strongly linearly convex domain and let $f:\mathbb{D}\longrightarrow D$ be a weak $E$-mapping. Then for any $\xi\in(0,1)$ the mapping $f$ is a unique $\widetilde{k}_D$-extremal for $z=f(0)$, $w=f(\xi)$ \emph{(}resp. a unique $\kappa_D$-extremal for $z=f(0)$, $v=f'(0)$\emph{)}.
\end{propp}
\begin{proof}
Suppose that $g$ is a $\widetilde{k}_D$-extremal for $z,w$ (resp. a $\kappa_D$-extremal for $z,v$) such that $g(0)=z$, $g(\xi)=w$ (resp. $g(0)=z$, $g'(0)=v$). Our aim is to show that $f=g$. Proposition~\ref{1} provides us with the mapping $F$, which is a left inverse for $f$. By the Schwarz Lemma, $F$ is a left inverse for $g$, as well, that is $F\circ g=\text{id}_{\mathbb{D}}$. We claim that $\lim_{\mathbb{D}\ni\zeta\to\zeta_0}g(\zeta)=f(\zeta_0)$ for any $\zeta_0\in\mathbb{T}$ (in particular, we shall show that the limit does exist).
Assume the contrary. Then there are $\zeta_0\in\mathbb{T}$ and a sequence $\{\zeta_m\}\subsetbset\mathbb{D}$ convergent to $\zeta_0$ such that the limit $Z:=\lim_{m\to\infty}g(\zeta_m)\in\overlineerline{D}$ exists and is not equal to $f(\zeta_0)$. We have $G(z,F(z))=0$, so putting $z=g(\zeta_m)$ we infer that $$0=(g(\zeta_m)-f(F(g(\zeta_m))))\bullet \widetildedetilde{f}(F(g(\zeta_m)))=(g(\zeta_m)-f(\zeta_m))\bullet\widetildedetilde{f}(\zeta_m).
$$ Passing with $m$ to the infinity we get $$0=(Z-f(\zeta_0))\bullet \widetildedetilde{f}(\zeta_0)=\zeta_0\rho(\zeta_0)\langle Z-f(\zeta_0),\nu_D(f(\zeta_0))\rangle.$$ This means that $Z-f(\zeta_0)\in T^{\mathbb{C}}_D(f(\zeta_0))$. Since $D$ is strongly linearly convex, we deduce that $Z=f(\zeta_0)$, which is a contradiction.
Hence $g$ extends continuously on $\overlineerline{\DD}$ and, by the maximum principle, $g=f$.
\end{proof}
\begin{propp}\label{3}
Let $D\subset\mathbb{C}^n$, $n\geq 2$, be a bounded strongly linearly convex domain, let $f:\mathbb{D}\longrightarrow D$ be a weak $E$-mapping and let $a$ be an automorphism of $\mathbb{D}$. Then $f\circ a$ is a weak $E$-mapping of $D$.
\end{propp}
\begin{proof}
Set $g:=f\circ a$.
Clearly, the conditions (1') and (2') of Definition~\ref{21} are satisfied by $g$.
To prove that $g$ satisfies the condition (4) of Definition~\ref{21e} fix a point $z\in D$. Let $\varphi_{z,f}$, $\varphi_{z,g}$ be the functions appearing in the condition (4) for $f$ and $g$ respectively. Then $\varphi_{z,g}=\varphi_{z,f}\circ a$. Since $a$ maps $\mathbb{T}$ to $\mathbb{T}$ diffeomorphically, we have $\widetildend\varphi_{z,g}=\pm\widetildend\varphi_{z,f}=0$.
It remains to show that the condition (3') of Definition~\ref{21} is also satisfied by $g$. Note that the function $\widetilde a(\zeta):=\zeta/a(\zeta)$ has a holomorphic branch of the logarithm in the neighborhood of $\mathbb{T}$. This follows from the fact that $\widetildend \widetilde a=0$, however the existence of the holomorphic branch may be shown quite elementary. Actually, it would suffices to prove that $\widetilde a(\mathbb{T})\neq\mathbb{T}$. Expand $a$ as $$a(\zeta)=e^{it}\frac{\zeta-b}{1-\overlineerline b\zeta}$$ with some $t\in\mathbb{R}$, $b\in\mathbb{D}$ and observe that $\widetildedetilde a$ does not attain the value $-e^{-it}$. Indeed, if $\zeta/a(\zeta)=-e^{-it}$ for some $\zeta\in\mathbb{T}$, then $$\frac{1-\overlineerline b\zeta}{1-b\overlineerline\zeta}=-1,$$ so $2=2\re(b\overlineerline\zeta)\leq 2|b|$, which is impossible.
Concluding, there exists a function $v$ holomorphic in a neighborhood of $\mathbb{T}$ such that $$\frac{\zeta}{a(\zeta)}=e^{i v(\zeta)}.$$ Note that $v(\mathbb{T})\subset\mathbb{R}$. Expanding $v$ in Laurent series $$v(\zeta)=\subsetm_{k=-\infty}^{\infty}a_k\zeta^k,\ \zeta\text{ near }\mathbb{T},$$ we infer that $a_{-k}=\overlineerline a_k$, $k\in\mathbb{Z}$. Therefore, $$v(\zeta)=a_0+\subsetm_{k=1}^\infty 2\re(a_k\zeta^k)=\re\left(a_0+2\subsetm_{k=1}^\infty a_k\zeta^k\right),\ \zeta\in\mathbb{T}.$$ Hence, there is a function $h$ holomorphic in the neighborhood of $\overlineerline{\DD}$ such that $v=\im h$. Put $u:=h-iv$. Then $u\in{\mathcal O}(\mathbb{T})$ and $u(\mathbb{T})\subset\mathbb{R}$.
Take $\rho$ be as in the condition (3') of Definition~\ref{21} for $f$ and define $$r(\zeta):=\rho(a(\zeta))e^{u(\zeta)},\ \zeta\in\mathbb{T}.$$ Let us compute
\begin{eqnarray*}\zeta r(\zeta)\overlineerline{\nu_D(g(\zeta))}=\zeta u^{u(\zeta)}\rho(a(\zeta))\overlineerline{\nu_D(f(a(\zeta)))}&=&\\=a(\zeta)h(\zeta)\rho(a(\zeta))\overlineerline{\nu_D(f(a(\zeta)))}
&=&h(\zeta)\widetildedetilde{f}(a(\zeta)),\quad\zeta\in\mathbb{T}.
\end{eqnarray*} Thus $\zeta\longmapsto\zeta r(\zeta)\overlineerline{\nu_D(g(\zeta))}$ extends holomorphically to a function of class ${\mathcal O}(\mathbb{D})\cap{\mathcal C}^{1/2}(\overlineerline{\DD})$.
\end{proof}
\begin{corr}\label{28}
A weak $E$-mapping $f:\mathbb{D}\longrightarrow D$ of a bounded strongly linearly convex domain $D\subset\mathbb{C}^n$, $n\geq 2$, is a unique $\widetilde{k}_D$-extremal for $f(\zeta),f(\xi)$ \emph{(}resp. a unique $\kappa_D$-extremal for $f(\zeta),f'(\zeta)$\emph{)}, where $\zeta,\xi\in\mathbb{D}$, $\zeta\neq\xi$.
\end{corr}
\subsetbsection{Generalization of Proposition~\ref{1}}
The results obtained in this subsection will play an important role in the sequel.
We start with
\begin{propp}\label{4}
Let $f:\mathbb{D}\longrightarrow D$ be an $E$-mapping. Then the function $f'\bullet\widetildedetilde{f}$ is a positive constant.
\end{propp}
\begin{proof}
Consider the curve $$\mathbb{R}\ni t\longmapsto f(e^{it})\in\partialrtial D.$$ Its any tangent vector $ie^{it}f'(e^{it})$ belongs to $T_{D}^\mathbb{R}(f(e^{it}))$, i.e. $$\re\langle ie^{it}f'(e^{it}),\nu_D(f(e^{it}))\rangle=0.$$ Thus for $\zeta\in\mathbb{T}$ $$0=\rho(\zeta)\re\langle i\zeta f'(\zeta),\nu_D(f(\zeta))\rangle=-\im f'(\zeta)\bullet\widetildedetilde{f}(\zeta),$$ so the holomorphic function $f'\bullet\widetildedetilde{f}$ is a real constant $C$.
Considering the curve $$[0,1+\varepsilon)\ni t\longmapsto f(t)\in\overlineerline D$$ for small $\varepsilon>0$ and noting that $f([0,1))\subset D$, $f(1)\in\partialrtial D$, we see that the derivative of $r\circ f$ at a point $t=1$ is non-negative, where $r$ is a defining function of $D$. Hence $$0\leq\re\langle f'(1),\nu_D(f(1))\rangle =\frac{1}{\rho(1)} \re( f'(1)\bullet\widetildedetilde{f}(1))=
\frac{C}{\rho(1)},$$ i.e. $C\geq 0$. For $\zeta\in\mathbb{T}$
$$\frac{f(\zeta)-f(0)}{\zeta}\bullet\widetildedetilde{f}(\zeta)=\rho(\zeta)\langle f (\zeta)-f(0),\nu_D(f(\zeta))\rangle.$$ This function has the winding number equal to $0$. Therefore, the function $$g(\zeta):=\frac{f(\zeta)-f(0)}{\zeta}\bullet\widetildedetilde{f}(\zeta),$$ which is holomorphic in a neighborhood of $\overlineerline{\DD}$, does not vanish
in $\mathbb{D}$. In particular, $C=g(0)\neq 0$.
\end{proof}
The function $\rho$ is defined up to a constant factor. \textbf{We choose $\rho$ so that $ f'\bullet\widetildedetilde{f}\equiv 1$}, i.e. \begin{equation}\label{rho}\rho(\zeta)^{-1}=\langle\zeta f'(\zeta),\nu_D(f(\zeta))\rangle,\ \zeta\in\mathbb{T}.\end{equation} In that way $\widetildedetilde{f}$ and $\rho$ are uniquely determined by $f$.
\begin{propp}
An $E$-mapping $f:\mathbb{D}\longrightarrow D$ is injective in $\overlineerline{\DD}$.
\end{propp}
\begin{proof}The function $f$ has the left-inverse in $\mathbb{D}$, so it suffices to check the injectivity on $\mathbb{T}$. Suppose that $f(\zeta_1)=f(\zeta_2)$ for some $\zeta_1,\zeta_2\in\mathbb{T}$, $\zeta_1\neq\zeta_2$, and consider the curves $$\gamma_j:[0,1]\ni t\longmapsto f(t\zeta_j)\in\overlineerline D,\ j=1,2.$$ Since $$\re\langle\gamma_j'(1),\nu_D(f(\zeta_j))\rangle=\re\langle\zeta_jf'(\zeta_j),\nu_D(f(\zeta_j))\rangle
=\rho(\zeta_j)^{-1}\neq 0,$$ the curves $\gamma_j$ hit $\partial D$ transversally at their common point $f(\zeta_1)$. We claim that there exists $C>0$ such that for $t\in(0,1)$ close to $1$ there is $s_t\in(0,1)$ satisfying $\widetilde k_D(f(t\zeta_1),f(s_t\zeta_2))<C$. It will finish the proof since $$\widetilde k_D(f(t\zeta_1),f(s_t\zeta_2))=p(t\zeta_1,s_t\zeta_2)\to\infty,\ t\to 1.$$ We may assume that $f(\zeta_1)=0$ and $\nu_D(0)=(1,0,\ldots,0)=:e_1$. There exists a ball $B\subset D$ tangent to $\partial D$ at $0$. Using a homothety, if necessary, one can assume that $B=\mathbb{B}_n-e_1$. From the transversality of $\gamma_1,\gamma_2$ to $\partial D$ there exists a cone $$A:=\{z\in\mathbb{C}^n:-\re z_1>k|z|\},\quad k>0,$$ such that $\gamma_1(t),\gamma_2(t)\in A\cap B$ if $t\in(0,1)$ is close to $1$. For $z\in A$ let $k_z>k$ be a positive number satisfying the equality $$|z|=\frac{-\re z_1}{k_z}.$$
Note that for any $a\in\gamma_1((0,1))$ sufficiently close to $0$ one may find $b\in\gamma_2((0,1))\cap A\cap B$ such that $\re b_1=\re a_1$. To get a contradiction it suffices to show that $\widetilde k_D(a,b)$ is bounded from above by a constant independent on $a$ and $b$.
We have the following estimate \begin{multline*}\widetilde k_D(a,b)\leq\widetilde k_{\mathbb{B}_n-e_1}(a,b)=\widetilde k_{\mathbb{B}_n}(a+e_1,b+e_1)=\\=\tanh^{-1}\sqrt{1-\frac{(1-|a+e_1|^2)(1-|b+e_1|^2)}{|1-\langle a+e_1,b+e_1 \rangle|^2}}.\end{multline*} The last expression is bounded from above if and only if $$\frac{(1-|a+e_1|^2)(1-|b+e_1|^2)}{|1-\langle a+e_1,b+e_1\rangle|^2}$$ is bounded from below by some positive constant. We estimate $$\frac{(1-|a+e_1|^2)(1-|b+e_1|^2)}{|1-\langle a+e_1,b+e_1\rangle|^2}=\frac{(2\re a_1+|a|^2)(2\re b_1+|b|^2)}{|\langle a, b\rangle+a_1+\overlineerline b_1|^2}=$$$$=\frac{\left(2\re a_1+\frac{(\re a_1)^2}{k^2_a}\right)\left(2\re a_1+\frac{(\re a_1)^2}{k^2_b}\right)}{|\langle a, b\rangle+2\re a_1+i\im a_1-i\im b_1|^2}\geq\frac{(\re a_1)^2\left(2+\frac{\re a_1}{k^2_a}\right)\left(2+\frac{\re a_1}{k^2_b}\right)}{2|\langle a, b\rangle+i\im a_1-i\im b_1|^2+2|2\re a_1|^2}$$$$\geq\frac{(\re a_1)^2\left(2+\frac{\re a_1}{k^2_a}\right)\left(2+\frac{\re a_1}{k^2_b}\right)}{2(|a||b|+|a|+|b|)^2+8(\re a_1)^2}=\frac{(\re a_1)^2\left(2+\frac{\re a_1}{k^2_a}\right)\left(2+\frac{\re a_1}{k^2_b}\right)}{2\left(\frac{(-\re a_1)^2}{k^2_ak^2_b}-\frac{\re a_1}{k_a}-\frac{\re a_1}{k_b}\right)^2+8(\re a_1)^2}$$$$=\frac{\left(2+\frac{\re a_1}{k^2_a}\right)\left(2+\frac{\re a_1}{k^2_b}\right)}{2\left(\frac{-\re a_1}{k^2_ak^2_b}+\frac{1}{k_a}+\frac{1}{k_b}\right)^2+8}>\frac{1}{2(1+2/k)^2+8}.$$ This finishes the proof.
\end{proof}
Assume that we are in the settings of Proposition~\ref{1} and $D$ has real analytic boundary. Our aim is to replace $W$ with a neighborhood of $\overline D$.
\begin{remm}\label{przed34}
For $\zeta_0\in\mathbb{D}_f$ we have $G(f(\zeta_0),\zeta_0)=0$ and $\frac{\partialrtial G}{\partialrtial\zeta}(f(\zeta_0),\zeta_0)=-1$. By the Implicit Function Theorem there exist neighborhoods $U_{\zeta_0},V_{\zeta_0}$ of $f(\zeta_0),\zeta_0$ respectively and a holomorphic function $F_{\zeta_0}:U_{\zeta_0}\longrightarrow V_{\zeta_0}$ such that for any $z\in U_{\zeta_0}$ the point $F_{\zeta_0}(\zeta)$ is the unique solution of the equation $G(z,\zeta)=0$, $\zeta\in V_{\zeta_0}$.
In particular, if $\zeta_0\in\mathbb{D}$ then $F_{\zeta_0}=F$ near $f(\zeta_0)$.
\end{remm}
\begin{propp}\label{34}
Let $f:\mathbb{D}\longrightarrow D$ be an $E$-mapping. Then there exist arbitrarily small neighborhoods $U$, $V$ of $\overlineerline D$, $\overlineerline{\DD}$ respectively such that for any $z\in U$ the equation $G(z,\zeta)=0$, $\zeta\in V$, has exactly one solution.
\end{propp}
\begin{proof} In view of Proposition~\ref{1} and Remark~\ref{przed34} it suffices to prove that there exist neighborhoods $U$, $V$ of $\overlineerline D$, $\overlineerline{\DD}$ respectively such that for any $z\in U$ the equation $G(z,\cdotp)=0$ has at most one solution $\zeta\in V$.
Assume the contrary. Then for any neighborhoods $U$ of $\overlineerline D$ and $V$ of $\overlineerline{\DD}$ there are $z\in U$, $\zeta_1,\zeta_2\in V$, $\zeta_1\neq\zeta_2$ such that $G(z,\zeta_1)=G(z,\zeta_2)=0$. For $m\in\mathbb{N}$ put $$U_m:=\{z\in\mathbb{C}^n:\dist(z,D)<1/m\},$$ $$V_m:=\{\zeta\in\mathbb{C}:\dist(\zeta,\mathbb{D})<1/m\}.$$ There exist $z_m\in U_m$, $\zeta_{m,1},\zeta_{m,2}\in V_m$, $\zeta_{m,1}\neq\zeta_{m,2}$ such that $G(z_m,\zeta_{m,1})=G(z_m,\zeta_{m,2})=0$. Passing to a subsequence we may assume that $z_m\to z_0\in\overline D$. Analogously we may assume $\zeta_{m,1}\to\zeta_1\in \overlineerline{\DD}$ and $\zeta_{m,2}\to\zeta_2\in\overlineerline{\DD}$. Clearly, $G(z_0,\zeta_1)=G(z_0,\zeta_2)=0$. Let us consider few cases.
1) If $\zeta_1,\zeta_2\in\mathbb{T}$, then $G(z_0,\zeta_j)=0$ is equivalent to $$\langle z_0-f(\zeta_j), \nu_D(f(\zeta_j))\rangle=0,\ j=1,2,$$ consequently $z_0-f(\zeta_j)\in T^{\mathbb{C}}_D(f(\zeta_j))$. By the strong linear convexity of $D$ we get $z_0=f(\zeta_j)$. But $f$ is injective in $\overlineerline{\DD}$, so $\zeta_1=\zeta_2=:\zeta_0$. It follows from Remark~\ref{przed34} that in a sufficiently small neighborhood of $(z_0,\zeta_0)$ all solutions of the equation $G(z,\zeta)=0$ are of the form $(z,F_{\zeta_0}(z))$. Points $(z_m,\zeta_{m,1})$ and $(z_m,\zeta_{m,2})$ belong to this neighborhood for large $m$, which gives a contradiction.
2) If $\zeta_1\in\mathbb{T}$ and $\zeta_2\in\mathbb{D}$, then analogously as above we deduce that $z_0=f(\zeta_1)$. Let us take an arbitrary sequence $\{\eta_m\}\subset\mathbb{D}$ convergent to $\zeta_1$. Then $f(\eta_m) \in D$ and $f(\eta_m)\to z_0$, so the sequence $G(f(\eta_m),\cdotp)$ converges to $G(z_0,\cdotp)$ uniformly on $\mathbb{D}$. Since $G(z_0,\cdotp)\not\equiv 0$, $G(z_0,\zeta_2)=0$ and $\zeta_2\in\mathbb{D}$, we deduce from Hurwitz Theorem that for large $m$ the functions $G(f(\eta_m),\cdotp)$ have roots $\theta_m\in\mathbb{D}$ such that $\theta_m\to\zeta_2$. Hence $G(f(\eta_m),\theta_m)=0$ and from the uniqueness of solutions in $D\times\mathbb{D}$ (Proposition~\ref{1}) we have $$\theta_m=F(f(\eta_m))=\eta_m.$$ This is a contradiction, because the left side tends to $\zeta_2$ and the right one to $\zeta_1$, as $m\to\infty$.
3) We are left with the case $\zeta_1,\zeta_2\in\mathbb{D}$.
If $z_0\in\overlineerline{D}\setminus f(\mathbb{T})$ then $z_0\in W$. In $W\times\mathbb{D}$ all solutions of the equation $G=0$ are of the form $(z,F(z))$, $z\in W$. But for large $m$ the points $(z_m,\zeta_{m,1})$, $(z_m,\zeta_{m,2})$ belong to $W\times\mathbb{D}$, which is a contradiction with the uniqueness.
If $z_0\in f(\mathbb{T})$, then $z_0=f(\zeta_0)$ for some $\zeta_0\in\mathbb{T}$. Clearly, $G(f(\zeta_0),\zeta_0)=0$, whence $G(z_0,\zeta_0)=G(z_0,\zeta_1)=0$ and $\zeta_0\in\mathbb{T}$, $\zeta_1\in \mathbb{D}$. This is just the case 2), which has been already considered.
\end{proof}
\begin{corr} There are neighborhoods $U$, $V$ of $\overlineerline D$ and $\overlineerline{\DD}$ respectively with $V\Subset\mathbb{D}_f$, such that the function $F$ extends holomorphically on $U$. Moreover, all solutions of the equation $G|_{U\times V}=0$ are of the form $(z,F(z))$, $z\in U$.
In particular, $F\circ f=\id_{V}$.
\end{corr}
\section{H\"older estimates}\label{22}
\begin{df}\label{30} For a given $c>0$ let the family $\mathcal{D}(c)$ consist of all pairs $(D,z)$, where $D\subset\mathbb{C}^n$, $n\geq 2$, is a bounded pseudoconvex domain with real $\mathcal C^2$ boundary and $z\in D$, satisfying
\begin{enumerate}
\item $\dist(z,\partialrtial D)\geq 1/c$;
\item the diameter of $D$ is not greater than $c$ and $D$ satisfies the interior ball condition with a radius $1/c$;
\item for any $x,y\in D$ there exist $m\leq 8 c^2$ and open balls $B_0,\ldots,B_m\subsetbset D$ of radius $1/(2c)$ such that $x\in B_0$, $y\in B_m$ and the distance between the centers of the balls $B_j$, $B_{j+1}$ is not greater than $1/(4c)$ for $j=0,\ldots,m-1$;
\item for any open ball $B\subsetbset\mathbb{C}^n$ of radius not greater than $1/c$, intersecting non-emptily with $\partial D$, there exists a mapping $\Phi\in{\mathcal O}(\overlineerline{D},\mathbb{C}^n)$ such that
\begin{enumerate}
\item for any $w\in\Phi(B\cap\partialrtial D)$ there is a ball of radius $c$ containing $\Phi(D)$ and tangent to $\partialrtial\Phi(D)$ at $w$ (let us call it the ``exterior ball condition'' with a radius $c$);
\item $\Phi$ is biholomorphic in a neighborhood of $\overline B$ and $\Phi^{-1}(\Phi(B))=B$;
\item entries of all matrices $\Phi'$ on $B\cap\overline D$ and $(\Phi^{-1})'$ on $\Phi(B\cap\overlineerline{D})$ are bounded in modulus by $c$;
\item $\dist(\Phi(z),\partialrtial\Phi(D))\geq 1/c$;
\end{enumerate}
\item the normal vector $\nu_D$ is Lipschitz with a constant $2c$, that is $$|\nu_D(a)-\nu_D(b)|\leq 2c|a-b|,\ a,b\in \partialrtial D;$$
\item the $\varepsilon$-hull of $D$, i.e. a domain $D_{\varepsilon}:=\{w\in\mathbb C^n:\dist (w,D)<\varepsilon\}$, is strongly pseudoconvex for any $\varepsilon\in (0,1/c).$
\end{enumerate}
\end{df}
Recall that the {\it interior ball condition} with a radius $r>0$ means that for any point $a\in\partial D$ there is $a'\in D$ and a ball $B_n(a',r)\subset D$ tangent to $\partial D$ at $a$. Equivalently $$D=\bigcup_{a'\in D'}B_n(a',r)$$ for some set $D'\subset D$.
It may be shown that (2) and (5) may be expressed in terms of boundedness of the normal curvature, boundedness of a domain and the condition (3). This however lies beyond the scope of this paper and needs some very technical arguments so we omit the proof of this fact. The reasons why we decided to use (2) in such a form is its connection with the condition (3) (this allows us to simplify the proof in some places).
\begin{rem}\label{con}
Note that any convex domain satisfying conditions (1)-...-(4) of Definition~\ref{30} satisfies conditions (5) and (6), as well.
Actually, it follows from (2) that for any $a\in\partial D$ there exists a ball $B_n(a',1/c)\subset D$ tangent to $\partial D$ at $a$. Then $$\nu_D(a)=\frac{a'-a}{|a'-a|}=c(a'-a).$$ Hence $$|\nu_D(a)-\nu_D(b)|=c|a'-a-b'+b|=c|a'-b'-(a-b)|\leq c|a'-b'|+c|a-b|.$$ Since $D$ is convex, we have $|a'-b'|\leq|a-b|$, which gives (5).
The condition (6) is also clear --- for any $\varepsilon>0$ an $\varepsilon$-hull of a strongly convex domain is strongly convex.
\end{rem}
\begin{rem}
For a convex domain $D$ the condition (3) of Definition \ref{30} amounts to the condition (2).
Indeed, for two points $x,y\in D$ take two balls of radius $1/(2c)$ containing them and contained in $D$. Then divide the interval between the centers of the balls into $[4c^2]+1$ equal parts and take balls of radius $1/(2c)$ with centers at the points of the partition.
Note also that if $D$ is strongly convex and satisfies the interior ball condition with a radius $1/c$ and the exterior ball condition with a radius $c$, one can take $\Phi:=\id_{\mathbb{C}^n}$.
\end{rem}
\begin{rem}\label{D(c),4}
For a strongly pseudoconvex domain $D$ and $c'>0$ and for any $z\in D$ such that $\dist(z,\partialrtial D)>1/c'$ there exists $c=c(c')>0$ satisfying $(D,z)\in\mathcal{D}(c)$.
Indeed, the conditions (1)-...-(3) and (5)-(6) are clear. Only (4) is non-trivial.
The construction of the mapping $\Phi$ amounts to the construction of Forn\ae ss peak functions. Actually, apply directly Proposition 1 from \cite{For} to any boundary point of $\partialrtial D$ (obviously $D$ has a Stein neighborhood basis). This gives a covering of $\partialrtial D$ with a finite number of balls $B_j$, maps $\Phi_j\in{\mathcal O}(\overlineerline{D},\mathbb{C}^n)$ and strongly convex $C^\infty$-smooth domains $C_j$, $j=1,\ldots, N$, such that
\begin{itemize}\item $\Phi_j(D)\subsetbset C_j$;
\item $\Phi_j(\overline D)\subsetbset\overline C_j$;
\item $\Phi_j(B_j\setminus\overline D)\subsetbset\mathbb C^n\setminus\overline C_j$;
\item $\Phi_j^{-1}(\Phi_j(B_j))=B_j$;
\item $\Phi_j|_{B_j}: B_j\longrightarrow \Phi_j(B_j)$ is biholomorphic.
\end{itemize} Therefore, one may choose $c>0$ such that every $C_j$ satisfies the exterior ball condition with $c$, i.e. for any $x\in \partialrtial C_j$ there is a ball of radius $c$ containing $C_j$ and tangent to $\partialrtial C_j$ at $x$, every ball of radius $1/c$ intersecting non-emptily with $\partial D$ is contained in some $B_j$ (here one may use a standard argument invoking the Lebesgue number) and the conditions (c), (d) are also satisfied (with $\Phi:=\Phi_j$).
\end{rem}
In this section we use the words `uniform', `uniformly' if $(D,z)\in \mathcal D(c)$. This means that estimates will depend only on $c$ and will be independent on $D$ and $z$ if $(D,z)\in\mathcal{D}(c)$ and on $E$-mappings of $D$ mapping $0$ to $z$. Moreover, in what follows we assume that $D$ is a strongly linearlu convex domain with real-analytic boundary.
\begin{prop}\label{7}
Let $f:(\mathbb{D},0)\longrightarrow(D,z)$ be an $E$-mapping. Then $$\dist(f(\zeta),\partialrtial D)\leq C(1-|\zeta|),\ \zeta\in\overlineerline{\DD}$$ with $C>0$ uniform if $(D,z)\in\mathcal{D}(c)$.
\end{prop}
\begin{proof} There exists a uniform $C_1$ such that $$\text{if }\dist(w,\partialrtial D)\geq 1/c\text{ then }k_D(w,z)<C_1.$$ Indeed, let $\dist(w,\partialrtial D)\geq 1/c$ and let balls $B_0,\ldots,B_m$ with centers $b_0,\ldots,b_m$ be chosen to the points $w$, $z$ as in the condition (3) of Definition~\ref{30}. Then
\begin{multline*}k_D(w,z)\leq
k_D(w,b_0)+\subsetm_{j=0}^{m-1}k_D(b_j,b_{j+1})+k_D(b_m,z)\leq\\\leq k_{B_n(w,1/c)}(w,b_0)+\subsetm_{j=0}^{m-1}k_{B_j}(b_j,b_{j+1})+k_{B_n(z,1/c)}(b_m,z)=\\=p\left(0,\frac{|w-b_0|}{1/c}\right)+\subsetm_{j=0}^{m-1}p\left(0,\frac{|b_j-b_{j+1}|}{1/(2c)}\right)+
p\left(0,\frac{|b_m-z|}{1/c}\right)\leq\\\leq(m+2)p\left(0,\frac{1}{2}\right)\leq(8c^2+2)p\left(0,\frac{1}{2}\right)=:C_1.
\end{multline*}
If $\zeta\in\mathbb{D}$ is such that
$\dist(f(\zeta),\partialrtial D)\geq 1/c$ then $$k_D(f(0),f(\zeta))\leq
C_2-\frac{1}{2}\log\dist(f(\zeta),\partialrtial D)$$ with a uniform $C_2:=C_1+\frac{1}{2}\log c$.
In the other case, i.e. when $\dist(f(\zeta),\partialrtial D)<1/c$, denote by $\eta$ the nearest point to
$f(\zeta)$ lying on $\partialrtial D$. Let $w\in D$ be a center of a ball $B$ of radius $1/c$
tangent to $\partialrtial D$ at $\eta$. By the condition (2) of Definition~\ref{30} we have $B\subsetbset D$. Hence
\begin{multline*}k_D(f(0),f(\zeta))\leq k_D(f(0),w)+k_D(w,f(\zeta))\leq\\\leq
C_1+k_B(w,f(\zeta))\leq C_1+\frac{1}{2}\log 2-\frac{1}{2}\log\left(1-\frac{|f(\zeta)-w|}{1/c}\right)=\\=C_1+\frac{1}{2}\log 2-\frac{1}{2}\log(c\dist(f(\zeta),\partialrtial B))=C_3-\frac{1}{2}\log\dist(f(\zeta),\partialrtial D)
\end{multline*}
with a uniform $C_3:=C_1+\frac{1}{2}\log\frac{2}{c}$.
We have obtained the same type estimates in both cases. On the other side, by
Corollary~\ref{5} $$k_D(f(0),f(\zeta))=p(0,\zeta)\geq-\frac{1}{2}\log(1-|\zeta|),$$ which finishes the proof.
\end{proof}
Recall that we have assumed that $\rho$ is of the form~\eqref{rho}.
\begin{prop}\label{9}
Let $f:(\mathbb{D},0)\longrightarrow(D,z)$ be an $E$-mapping. Then $$C_1<\rho(\zeta)^{-1}<C_2,\ \zeta\in\mathbb{T},$$ where
$C_1,C_2$ are uniform if $(D,z)\in\mathcal{D}(c)$.
\end{prop}
\begin{proof} For the upper estimate fix $\zeta_0\in\mathbb{T}$. Set $B:=B_n(f(\zeta_0),1/c)$ and let $\Phi\in{\mathcal O}(\overlineerline{D},\mathbb{C}^n)$ be as in the condition (4) of Definition~\ref{30} for $B$. One can assume that $f(\zeta_0)=\Phi(f(\zeta_0))=0$ and $\nu_D(0)=\nu_{\Phi(D)}(0)=(1,0,\ldots,0)$. Then $\Phi(D)$ is contained in the half-space $\{w\in\mathbb{C}^n:\re w_1<0\}$. Putting $h:=\Phi\circ f$ we have $$h_1(\mathbb{D})\subsetbset\{w_1\in\mathbb{C}:\re w_1<0\}.$$ In virtue of the Schwarz Lemma on the half-plane
\begin{equation}\label{schh1}|h_1'(t\zeta_0)|\leq\frac{-2\re h_1(t\zeta_0)}{1-|t\zeta_0|^2}.\end{equation}
Let $\delta$ be the signed boundary distance of $\Phi(D)$, i.e. $$\delta(x):=\begin{cases}-\dist(x,\partialrtial\Phi(D)),\ x\in\Phi(D)\\\ \ \ \dist(x,\partialrtial\Phi(D)),\ x\notin\Phi(D).\end{cases}$$ It is a defining function of $\Phi(D)$ in a neighborhood of $0$ (recall that $\Phi^{-1}(\Phi(B))=B$). Observe that $$\delta(x)=\delta(0)+\re\langle\nabla\delta(0), x\rangle+O(|x|^2)=\re x_1+O(|x|^2).$$
If $x\in\Phi(D)$ tends transversally to $0$, then the angle between the vector $x$ and the hyperplane $\{w\in\mathbb{C}^n:\re w_1=0\}$ is separated from $0$, i.e. its sinus $(-\re x_1)/|x|>\varepsilon$ for some $\varepsilon>0$ independent on $x$. Thus $$\frac{\delta(x)}{\re x_1}=1+O(|x|)\text{ as }x\to 0\text{ transversally. }$$ Consequently \begin{equation}\label{50}-\re x_1\leq 2\dist(x,\partialrtial\Phi(D))\text{ as }x\to 0\text{ transversally. }\end{equation}
We know that $t\longmapsto f(t\zeta_0)$ hits $\partialrtial D$ transversally. Therefore, $t\longmapsto h(t\zeta_0)$ hits $\partialrtial \Phi(D)$ transversally, as well. Indeed, we have \begin{multline}\label{hf}\left\langle\left.\frac{d}{dt}h(t\zeta_0)\right|_{t=1},\nu_{\Phi(D)}(h(\zeta_0))\right\rangle=\left\langle \Phi'(0)f'(\zeta_0)\zeta_0,\frac{(\Phi^{-1})'(0)^*\nabla r(0)}{|(\Phi^{-1})'(0)^*\nabla r(0)|}\right\rangle=\\=\frac{\langle\zeta_0 f'(\zeta_0),\nabla r(0)\rangle}{|(\Phi'(0)^{-1})^*\overlineerline{\nabla r(0)}|}=\frac{\langle\zeta_0 f'(\zeta_0),\nu_D(f(\zeta_0))|\nabla r(0)|\rangle}{|(\Phi'(0)^{-1})^*\overlineerline{\nabla r(0)}|}.
\end{multline}
where $r$ is a defining function of $D$. In particular,
\begin{multline*} \re \left\langle\left.\frac{d}{dt}h(t\zeta_0)\right|_{t=1},\nu_{\Phi(D)}(h(\zeta_0))\right\rangle=\re \frac{\langle\zeta_0 f'(\zeta_0),\nu_D(f(\zeta_0))|\nabla r(0)|\rangle}{|(\Phi'(0)^{-1})^*\overlineerline{\nabla r(0)}|}=\\=\frac{\rho(\zeta_0)^{-1}|\nabla r(0)|}{|(\Phi'(0)^{-1})^*\overlineerline{\nabla r(0)}|}\neq 0.\end{multline*} This proves that $t\longmapsto h(t\zeta_0)$ hits $\partialrtial\Phi(D)$ transversally.
Consequently, we may put $x=h(t\zeta_0)$ into \eqref{50} to get \begin{equation}\label{hf1}\frac{-2\re h_1(t\zeta_0)}{1-|t\zeta_0|^2}\leq\frac{4\dist(h(t\zeta_0),\partialrtial\Phi(D))}
{1-|t\zeta_0|^2},\ t\to 1.\end{equation}
But $\Phi$ is a biholomorphism near $0$, so \begin{equation}\label{nfr}\frac{4\dist(h(t\zeta_0),\partialrtial\Phi(D))}{1-|t\zeta_0|^2}\leq C_3\frac{\dist(f(t\zeta_0),\partialrtial D)}{1-|t\zeta_0|},\ t\to 1,\end{equation} where $C_3$ is a uniform constant depending only on $c$ (thanks to the condition (4)(c) of Definition~\ref{30}). By Proposition \ref{7}, the term on the right side of~\eqref{nfr} does not exceed some uniform constant.
It follows from \eqref{hf} that \begin{multline*}\rho(\zeta_0)^{-1}=|\langle f'(\zeta_0)\zeta_0,\nu_D(f(\zeta_0))\rangle|\leq C_4|\langle h'(\zeta_0), \nu_{\Phi(D)}(h(\zeta_0))\rangle|=\\=C_4|h_1'(\zeta_0)|=\lim_{t\to 1}C_4|h_1'(t\zeta_0)|\end{multline*} with a uniform $C_4$ (here we use the condition (4)(c) of Definition~\ref{30} again).
Combining \eqref{schh1}, \eqref{hf1} and \eqref{nfr} we get the upper estimate for $\rho(\zeta_0)^{-1}.$
Now we are proving the lower estimate. Let $r$ be the signed boundary distance to $\partialrtial D$. For $\varepsilon=1/c$ the function $$\varrho(w):=-\log(\varepsilon-r(w))+\log\varepsilon,\ w\in
D_\varepsilon,$$ where $D_\varepsilon$ is an $\varepsilon$-hull of $D$, is plurisubharmonic and defining for $D$. Indeed, we have $$-\log(\varepsilon-r(w))=-\log\dist(w,\partialrtial D_\varepsilon),\ w\in D_\varepsilon$$ and $D_\varepsilon$ is pseudoconvex.
Therefore, a function $$v:=\varrho\circ f:\overlineerline{\mathbb{D}}\longrightarrow(-\infty,0]$$ is subharmonic on $\mathbb{D}$. Moreover, since $f$ maps $\mathbb{T}$ in $\partialrtial D$ we infer that $v=0$ on $\mathbb{T}$. Moreover, since $|f(\lambda)-z|<c$ for $\lambda\in\mathbb{D}$, we have $$|f(\lambda)-z|<\frac{1}{2c}\text{ if }|\lambda|\leq\frac{1}{2c^2}.$$ Therefore, for a fixed $\zeta_0\in\mathbb{T}$ $$M_{\zeta_0}(x):=\max_{t\in[0,2\pi]}v(\zeta_0 e^{x+it})\leq-\log\left(1+\frac{1}{2c\varepsilon}\right)=:-C_5\text{ if }x\leq-\log(2c^2).$$ Since $M_{\zeta_0}$ is convex for $x\leq 0$ and $M_{\zeta_0}(0)=0$, we get $$v(\zeta_0 e^x)\leq M_{\zeta_0}(x)\leq\frac{C_5x}{\log(2c^2)}\text{\ \ \
for \ }-\log(2c^2)\leq x\leq 0.$$ Hence (remember that $v(\zeta_0)=0$)
\begin{multline}\label{wk}\frac{C_5}{\log(2c^2)}\leq\left.\frac{d}{dx}v(\zeta_0 e^x)\right|_{x=0}=\subsetm_{j=1}^n\frac{\partial\varrho}{\partial z_j}(f(\zeta_0))f_j'(\zeta_0)\zeta_0=\\=\langle\zeta_0 f'(\zeta_0),\nabla\varrho(f(\zeta_0))\rangle=\rho(\zeta_0)^{-1}|\nabla\varrho(f(\zeta_0))|.\end{multline}
Moreover,
\begin{multline*}|\nabla\varrho(f(\zeta_0))|= \left\langle\nabla\varrho(f(\zeta_0)),\frac{\nabla\varrho(f(\zeta_0))}{|\nabla\varrho(f(\zeta_0))|}\right\rangle_\mathbb{R}
=\langle\nabla\varrho(f(\zeta_0)),\nu_D(f(\zeta_0))\rangle_\mathbb{R}=\\=\frac{\partialrtial\varrho}{\partialrtial\nu_D}(f(\zeta_0))=\lim_{t\to 0}\frac{\varrho(f(\zeta_0)+t\nu_D(f(\zeta_0)))-\varrho(f(\zeta_0))}{t}=\frac{1}{\varepsilon}=c,
\end{multline*} as $r(a+t\nu(a))=t$ if $a\in \partialrtial D$ and $t\in\mathbb R$ is small enough.
This, together with \eqref{wk}, finishes the proof of the lower estimate.
\end{proof}
\begin{prop}\label{8}
Let $f:(\mathbb{D},0)\longrightarrow (D,z)$ be an $E$-mapping.
Then $$|f(\zeta_1)-f(\zeta_2)|\leq C\sqrt{|\zeta_1-\zeta_2|},\ \zeta_1,\zeta_2\in\overlineerline{\DD},$$ where $C$ is uniform if $(D,z)\in\mathcal{D}(c)$.
\end{prop}
\begin{proof}
Let $\zeta_0\in\mathbb{D}$ be such that $1-|\zeta_0|<1/(cC)$, where $C$ is as in Proposition~\ref{7}. Then $B:=B_n(f(\zeta_0),1/c)$ intersects $\partialrtial D$. Take $\Phi$ for the ball $B$ from the condition (4) of Definition~\ref{30}. Let $w$ denote the nearest point to $\Phi(f(\zeta_0))$ lying on $\partialrtial\Phi(D)$. From the conditions (4)(b)-(c) of Definition~\ref{30} we find that there is a uniform constant $r<1$ such that the point $w$ belongs to $\Phi(B\cap\partialrtial D)$ provided that $|\zeta_0|\geq r$.
From the condition (4)(a) of Definition~\ref{30} we get that there is $w_0$ such that $\Phi(D)\subsetbset B_n(w_0,c)$ and the ball $B_n(w_0,c)$ is tangent to $\Phi(D)$ at $w$. Let $$h(\zeta):=(\Phi\circ f)\left(\frac{\zeta_0-\zeta}{1-\overlineerline{\zeta_0}\zeta}\right),\ \zeta\in\mathbb{D}.$$
Then $h$ is holomorphic, $h(\mathbb{D})\subsetbset B_n(w_0,c)$ and $h(0)=\Phi(f(\zeta_0))$. Using Lemma \ref{schw} we get \begin{multline*}|h'(0)|\leq\sqrt{c^2-|h(0)-w_0|^2}\leq\sqrt{2c(c-|\Phi(f(\zeta_0))-w_0|)}=\\
=\sqrt{2c(|w_0-w|-|\Phi(f(\zeta_0))-w_0|)}\leq\sqrt{2c}\sqrt{|\Phi(f(\zeta_0))-w|}=\\
=\sqrt{2c}\sqrt{\dist(\Phi(f(\zeta_0)),\partialrtial\Phi(D))}.\end{multline*} Since $$h'(0)=\Phi'(f(\zeta_0))f'(\zeta_0)\left.\frac{d}{d\zeta}\frac{\zeta_0-\zeta}{1-\overlineerline{\zeta_0}\zeta}\right|_{\zeta=0},$$ bby the condition (4)(c) of Definition~\ref{3} we get $$|h'(0)|\geq C_1|f'(\zeta_0)|(1-|\zeta_0|^2)$$ with a uniform $C_1$, so $$|f'(\zeta_0)|\leq\frac{|h'(0)|}{C_1(1-|\zeta_0|^2)}\leq\frac{\sqrt{2c}}{C_1}\frac{\sqrt{\dist(\Phi(f(\zeta_0)),\partialrtial\Phi(D))}}{1-|\zeta_0|^2}\leq C_2\frac{\sqrt{\dist(f(\zeta_0),\partialrtial D)}}{1-|\zeta_0|^2},$$ where $C_2$ is uniform. Combining with Proposition \ref{7} \begin{equation}\label{46}|f'(\zeta_0)|\leq C_3\frac{\sqrt{1-|\zeta_0|}}{1-|\zeta_0|^2}=\frac{C_3}{\sqrt{1-|\zeta_0|}},\end{equation} where a constant $C_3$ is uniform.
We have shown that \eqref{46} holds for $r\leq |\zeta_0|<1$ with a uniform $r<1$. For $|\zeta_0|<r$ we estimate in the following way $$|f'(\zeta_0)|\leq\max_{|\zeta|=r}|f'(\zeta)|\leq\frac{C_3}{\sqrt{1-r}}\leq\frac{C_4}{\sqrt{1-|\zeta_0|}}$$ with a uniform $C_4:=C_3/\sqrt{1-r}$.
Using Theorems \ref{lit1} and \ref{lit2} with $\alpha=1/2$ we finish the proof.
\end{proof}
\begin{prop}\label{10a}
Let $f:(\mathbb{D} ,0)\longrightarrow (D,z)$ be an $E$-mapping.
Then $$|\rho(\zeta_1)-\rho(\zeta_2)|\leq C\sqrt{|\zeta_1-\zeta_2|},\ \zeta_1,\zeta_2\in\mathbb{T},$$ where
$C$ is uniform if $(D,z)\in\mathcal{D}(c)$.
\end{prop}
\begin{proof}It suffices to prove that there exist uniform $C,C_1>0$ such that $$|\rho(\zeta_1)-\rho(\zeta_2)|\leq C\sqrt{|\zeta_1-\zeta_2|},\ \zeta_1,\zeta_2\in\mathbb{T},\ |\zeta_1-\zeta_2|<C_1.$$
Fix $\zeta_1\in\mathbb{T}$. Without loss of generality we may assume that $\nu_{D,1}(f(\zeta_1))=1$. Let $0<C_1\leq 1/4$ be uniform and such that $$|\nu_{D,1}(f(\zeta))-1|<1/2,\ \zeta\in\mathbb{T}\cap B_n(\zeta_1,3C_1).$$ It is possible, since by Proposition \ref{8} $$|{\nu_D(f(\zeta))}-{\nu_D(f(\zeta'))}|\leq 2c|f(\zeta)-f(\zeta')|\leq C'\sqrt{|\zeta-\zeta'|},\ \zeta,\zeta'\in\mathbb{T},$$ with a uniform $C'>0$. There exists a function $\psi\in{\mathcal C}^1(\mathbb{T},[0,1])$ such that $\psi=1$ on $\mathbb{T}\cap B_n(\zeta_1,2C_1)$ and $\psi=0$ on $\mathbb{T}\setminus B_n(\zeta_1,3C_1)$. Then the function $\varphi:\mathbb{T}\longrightarrow\mathbb{C}$ defined by $$\varphi:=(\overlineerline{\nu_{D,1}\circ f}-1)\psi+1$$ satisfies
\begin{enumerate}
\item $\varphi(\zeta)=\overlineerline{\nu_{D,1}(f(\zeta))}$, $\zeta\in\mathbb{T}\cap B_n(\zeta_1,2C_1)$;
\item $|\varphi(\zeta)-1|<1/2$, $\zeta\in\mathbb{T}$;
\item $\varphi$ is uniformly $1/2$-H\"older continuous on $\mathbb{T}$, i.e. it is $1/2$-H\"older continuous with a uniform constant (remember that $\psi$ was chosen uniformly).
\end{enumerate}
First observe that $\log\varphi$ is well-defined. Using using properties listed above we deduce that $\log\varphi$ and $\im\log\varphi$ are uniformly $1/2$-H\"older continuous on $\mathbb{T}$, as well. The function $\im\log\varphi$ can be extended continuously to a function $v:\overlineerline{\DD}\longrightarrow\mathbb{R}$, harmonic in $\mathbb{D}$. There is a function $h\in\mathcal O(\mathbb{D})$ such that $v=\im h$ in $\mathbb{D}$. Taking $h-\re h(0)$ instead of $h$, one can assume that $\re h(0)=0$. By Theorem \ref{priv} applied to $ih$, we get that the function $h$ extends continuously on $\overlineerline{\DD}$ and $h$ is uniformly $1/2$-H\"older continuous in $\overlineerline{\DD}$. Hence the function $u:=\re h:\overlineerline{\DD}\longrightarrow\mathbb{R}$ is uniformly $1/2$-H\"older continuous in $\overlineerline{\DD}$ with a uniform constant $C_2$. Furthermore, $u$ is uniformly bounded in $\overlineerline{\DD}$, since $$|u(\zeta)|=|u(\zeta)-u(0)|\leq C_2\sqrt{|\zeta|},\ \zeta\in\overlineerline{\DD}.$$
Let $g(\zeta):=\widetilde{f}_1(\zeta)e^{-h(\zeta)}$ and $G(\zeta):=g(\zeta)/\zeta$. Then $g\in\mathcal O(\mathbb{D})\cap\mathcal C(\overlineerline{\mathbb{D}})$ and $G\in\mathcal O(\mathbb{D}_*)\cap\mathcal C((\overlineerline{\mathbb{D}})_*)$. Note that for $\zeta\in\mathbb{T}$ $$|g(\zeta)|=|\zeta
\rho(\zeta)\overlineerline{\nu_{D,1}(f(\zeta))}e^{-h(\zeta)}|\leq\rho(\zeta)e^{-u(\zeta)},$$ which, combined with
Proposition \ref{9}, the uniform boundedness of $u$ and the maximum principle, gives a uniform boundedness of $g$ in $\overlineerline{\DD}$. The function $G$ is uniformly bounded in $\overlineerline{\mathbb{D}}\cap B_n(\zeta_1,2C_1)$. Moreover, for $\zeta\in\mathbb{T}\cap B_n(\zeta_1,2C_1)$ \begin{eqnarray*} G(\zeta)&=&\rho(\zeta)\overlineerline{\nu_{D,1}(f(\zeta))}e^{-u(\zeta)-i\im\log \varphi(\zeta)}=\\&=&\rho(\zeta)\overlineerline{\nu_{D,1}(f(\zeta))}e^{-u(\zeta)+\re\log\varphi(\zeta)}e^{-\log\varphi(\zeta)}
=\rho(\zeta)e^{-u(\zeta)+\re\log\varphi(\zeta)}\in\mathbb{R}.\end{eqnarray*} By the Reflection Principle one can extend $G$ holomorphically past $\mathbb{T}\cap B_n(\zeta_1,2C_1)$ to a function (denoted by the same letter) uniformly bounded in $B_n(\zeta_1,2C_2)$, where a constant $C_2$ is uniform. Hence, from the Cauchy formula, $G$ is uniformly Lipschitz continuous in $B_n(\zeta_1,C_2)$, consequently uniformly $1/2$-H\"older continuous in $B_n(\zeta_1,C_2)$.
Finally, the functions $G$, $h$, $\nu_{D,1}\circ f$ are uniformly $1/2$-H\"older continuous on $\mathbb{T}\cap B_n(\zeta_1,C_2)$, $|\nu_{D,1}\circ f|>1/2$ on $\mathbb{T}\cap B_n(\zeta_1,C_2)$, so the function $\rho=Ge^h/\overlineerline{\nu_{D,1}\circ f}$ is uniformly $1/2$-H\"older continuous on $\mathbb{T}\cap B_n(\zeta_1,C_2)$.
\end{proof}
\begin{prop}\label{10b}
Let $f:(\mathbb{D},0)\longrightarrow (D,z)$ be an $E$-mapping.
Then $$|\widetilde{f}(\zeta_1)-\widetilde{f}(\zeta_2)|\leq C\sqrt{|\zeta_1-\zeta_2|},\ \zeta_1,\zeta_2\in\overlineerline{\mathbb{D}},$$ where
$C$ is uniform if $(D,z)\in\mathcal{D}(c)$.
\end{prop}
\begin{proof}
By Propositions \ref{8} and \ref{10a} we have desired inequality for $\zeta_1,\zeta_2\in\mathbb{T}$. Theorem \ref{lit2} finishes the proof.
\end{proof}
\section{Openness of $E$-mappings' set}\label{27}
We shall show that perturbing a little a domain $D$ equipped with an $E$-mapping, we obtain a domain which also has an $E$-mapping, being close to a given one.
\subsetbsection{Preliminary results}
\begin{propp}\label{11}
Let $f:\mathbb{D}\longrightarrow D$ be an $E$-mapping. Then there exist domains $G,\widetilde D,\widetilde G\subsetbset\mathbb{C}^n$ and a biholomorphism $\Phi:\widetilde D\longrightarrow\widetilde G$ such that
\begin{enumerate}
\item $\widetilde D,\widetilde G$ are neighborhoods of $\overlineerline D,\overlineerline G$ respectively;
\item $\Phi(D)=G$;
\item $g(\zeta):=\Phi(f(\zeta))=(\zeta,0,\ldots,0),\ \zeta\in\overlineerline{\DD}$;
\item $\nu_G(g(\zeta))=(\zeta,0,\ldots,0),\ \zeta\in\mathbb{T}$;
\item for any $\zeta\in\mathbb{T}$, a point $g(\zeta)$ is a point of the strong linear convexity of $G$.
\end{enumerate}
\end{propp}
\begin{proof}
Let $U,V$ be the sets from Proposition \ref{34}. We claim that after a linear change of coordinates one can assume that $\widetildedetilde{f}_1,\widetildedetilde{f}_2$ do not have common zeroes in $V$.
Since $ f'\bullet\widetildedetilde{f}=1$, at least one of the functions $\widetilde f_1,\ldots,\widetilde f_n$, say $\widetilde f_1$, is not identically equal to $0$. Let $\lambda_1,\ldots,\lambda_m$ be all zeroes of $\widetilde f_1$ in $V$. We may find $\alpha\in\mathbb{C}^n$ such that $$(\alpha_1\widetilde f_1+\ldots+\alpha_n\widetilde f_n)(\lambda_j)\neq 0,\ j=1,\ldots,m.$$ Otherwise, for any $\alpha\in\mathbb{C}^n$ there would exist $j\in\{1,\ldots,m\}$ such that $\alpha\bullet\widetilde f(\lambda_j)=0$, hence $$\mathbb{C}^n=\bigcup_{j=1}^m\{\alpha\in\mathbb{C}^n:\ \alpha\bullet\widetilde f(\lambda_j)=0\}.$$ The sets $\{\alpha\in\mathbb{C}^n:\alpha \bullet \widetilde f(\lambda_j)=0\}$, $j=1,\ldots,m$, are the $(n-1)$-dimensional complex hyperplanes, so their finite sum cannot be the space $\mathbb{C}^n$.
Of course, at least one of the numbers $\alpha_2,\ldots,\alpha_n$, say $\alpha_2$, is non-zero. Let
$$A:=\left[\begin{matrix}
1 & 0 & 0 & \cdots & 0\\
\alpha_1 & \alpha_2 & \alpha_3 &\cdots & \alpha_n\\
0 & 0 & 1 & \cdots & 0\\
\vdots & \vdots & \vdots &\ddots & \vdots \\
0 & 0 & 0 & \cdots & 1
\end{matrix}\right],\quad B:=(A^T)^{-1}.$$ We claim that $B$ is a change of coordinates we are looking for. If $r$ is a defining function of $D$ then $r\circ B^{-1}$ is a defining function of $B_n(D)$, so $B_n(D)$ is a bounded strongly linearly convex domain with real analytic boundary. Let us check that $Bf$ is an $E$-mapping of $B_n(D)$ with associated mappings \begin{equation}\label{56}A\widetilde f\in{\mathcal O}(\overlineerline{\DD})\text{\ \ and\ \ }\rho\frac{|A\overlineerline{\nabla r\circ f}|}{|\nabla r\circ f|}\in\mathcal{C}^{\omega}(\mathbb{T}).\end{equation} The conditions (1) and (2) of Definition~\ref{21} are clear. For $\zeta\in\mathbb{T}$ we have \begin{equation}\label{57}\overlineerline{\nu_{B_n(D)}(Bf(\zeta))}=\frac{\overlineerline{\nabla(r\circ B^{-1})(Bf(\zeta))}}{|\nabla(r\circ B^{-1})(Bf(\zeta))|}=\frac{(B^{-1})^T\overlineerline{\nabla r(f(\zeta))}}{|(B^{-1})^T\overlineerline{\nabla r(f(\zeta))}|}=\frac{A\overlineerline{\nabla r(f(\zeta))}}{|A\overlineerline{\nabla r(f(\zeta))}|},\end{equation} so
\begin{equation}\label{58}\zeta\rho(\zeta)\frac{|A\overlineerline{\nabla r(f(\zeta))}|}{|\nabla r(f(\zeta))|}\overlineerline{\nu_{B_n(D)}(Bf(\zeta))}=\zeta\rho(\zeta)A\overlineerline{\nu_D(f(\zeta))}=A\widetilde f(\zeta).\end{equation} Moreover, for $\zeta\in\mathbb{T}$, $z\in D$ \begin{multline*}\langle Bz-Bf(\zeta), \nu_{B_n(D)}(Bf(\zeta))\rangle=\overlineerline{\nu_{B_n(D)}(Bf(\zeta))}^T(Bz-Bf(\zeta))=\\=\frac{\overlineerline{\nabla r(f(\zeta))}^TB^{-1}B_n(z-f(\zeta))}{|(B^{-1})^T\overlineerline{\nabla r(f(\zeta))}|}=\frac{|\nabla r(f(\zeta))|}{|(B^{-1})^T\overlineerline{\nabla r(f(\zeta))}|}\overlineerline{\nu_D(f(\zeta))}^T(z-f(\zeta))=\\=\frac{|\nabla r(f(\zeta))|}{|(B^{-1})^T\overlineerline{\nabla r(f(\zeta))}|}\langle z-f(\zeta), \nu_D(f(\zeta))\rangle.
\end{multline*}
Therefore, $B$ is a desired linear change of coordinates, as claimed.
If necessary, we shrink the sets $U,V$ associated with $f$ to sets associated with $Bf$. There exist holomorphic mappings $h_1,h_2:V\longrightarrow\mathbb{C}$ such that
$$h_1\widetildedetilde{f}_1+h_2\widetildedetilde{f}_2\equiv 1\text{ in }V.$$ Generally, it is a well-known fact for functions on pseudoconvex domains, however in this case it may be shown quite elementarily. Indeed, if $\widetildedetilde{f}_1\equiv 0$ or $\widetildedetilde{f}_2\equiv 0$ then it is obvious. In the opposite case, let $\widetildedetilde{f}_j=F_jP_j$, $j=1,2$, where $F_j$ are holomorphic, non-zero in $V$ and $P_j$ are polynomials with all (finitely many) zeroes in $V$. Then $P_j$ are relatively prime, so there are polynomials $Q_j$, $j=1,2$, such that $$Q_1P_1+Q_2P_2\equiv 1.$$ Hence $$\frac{Q_1}{F_1}\widetildedetilde{f}_1+\frac{Q_2}{F_2}\widetildedetilde{f}_2\equiv 1\ \text{ in }V.$$
Consider the mapping $\Psi:V\times\mathbb{C}^{n-1}\longrightarrow\mathbb{C}^n$ given by
\begin{equation}\label{et2}
\Psi_1(Z):=f_1(Z_1)-Z_2\widetildedetilde{f}_2(Z_1)-h_1(Z_1)
\subsetm_{j=3}^{n}Z_j\widetildedetilde{f}_j(Z_1),
\end{equation}
\begin{equation}\label{et3}
\Psi_2(Z):=f_2(Z_1)+Z_2\widetildedetilde{f}_1(Z_1)-h_2(Z_1)
\subsetm_{j=3}^{n}Z_j\widetildedetilde{f}_j(Z_1),
\end{equation}
\begin{equation}\label{et4}
\Psi_j(Z):=f_j(Z_1)+Z_j,\ j=3,\ldots,n.
\end{equation}
We claim that $\Psi$ is biholomorphic in $\Psi^{-1}(U)$. First of all observe that $\Psi^{-1}(\{z\})\neq\emptyset$ for any $z\in U$. Indeed, by Proposition \ref{34} there exists (exactly one) $Z_1\in V$ such that $$(z-f(Z_1))\bullet\widetildedetilde{f}(Z_1)=0.$$ The numbers $Z_j\in\mathbb{C}$, $j=3,\ldots,n$ are determined uniquely by the equations $$Z_j=z_j-f_j(Z_1).$$ At least one of the numbers $\widetilde f_1(Z_1),\widetilde f_2(Z_1)$, say $\widetilde f_1(Z_1)$, is non-zero. Let $$Z_2:=\frac{z_2-f_2(Z_1)+h_2(Z_1)\subsetm_{j=3}^{n}Z_j\widetildedetilde{f}_j(Z_1)}{\widetilde f_1(Z_1)}.$$ Then we easily check that the equality $$z_1=f_1(Z_1)-Z_2\widetildedetilde{f}_2(Z_1)-h_1(Z_1)
\subsetm_{j=3}^{n}Z_j\widetildedetilde{f}_j(Z_1)$$ is equivalent to $(z-f(Z_1))\bullet\widetildedetilde{f}(Z_1)=0$, which is true.
To finish the proof of biholomorphicity of $\Psi$ in $\Psi^{-1}(U)$ it suffices to check that $\Psi$ is injective in $\Psi^{-1}(U)$. Let us take $Z,W$ such that $\Psi(Z)=\Psi(W)=z\in U$. By a direct computation both $\zeta=Z_1\in V$ and $\zeta=W_1\in V$ solve the equation
$$(z-f(\zeta))\bullet\widetildedetilde{f}(\zeta)=0.$$ From Proposition \ref{34} we infer that it has exactly one solution. Hence $Z_1=W_1$. By \eqref{et4} we have $Z_j=W_j$ for $j=3,\ldots,n$. Finally $Z_2=W_2$ follows from
one of the equations \eqref{et2}, \eqref{et3}. Let $G:=\Psi^{-1}(D)$, $\widetilde D:=U$, $\widetilde G:=\Psi^{-1}(U)$, $\Phi:=\Psi^{-1}$.
Now we are proving that $\Phi$ has desired properties. We have $$\Psi_j(\zeta,0,\ldots,0)=f_j(\zeta),\ j=1,\ldots,n,$$ so $\Phi(f(\zeta))=(\zeta,0,\ldots,0)$, $\zeta\in\overlineerline{\DD}$. Put $g(\zeta):=\Phi(f(\zeta))$, $\zeta\in\overlineerline{\DD}$. Note that the entries of the matrix $\Psi'(g(\zeta))$ are $$\frac{\partialrtial\Psi_1}{\partialrtial Z_1}(g(\zeta))=f_1'(\zeta),\ \frac{\partialrtial\Psi_1}{\partialrtial Z_2}(g(\zeta))=-\widetildedetilde{f}_2(\zeta),\ \frac{\partialrtial\Psi_1}{\partialrtial Z_j}(g(\zeta))=-h_1(\zeta)\widetildedetilde{f}_j(\zeta),\ j\geq 3,$$$$\frac{\partialrtial\Psi_2}{\partialrtial Z_1}(g(\zeta))=f_2'(\zeta),\ \frac{\partialrtial\Psi_2}{\partialrtial Z_2}(g(\zeta))=\widetildedetilde{f}_1(\zeta),\ \frac{\partialrtial\Psi_2}{\partialrtial Z_j}(g(\zeta))=-h_2(\zeta)\widetildedetilde{f}_j(\zeta),\ j\geq 3,$$$$\frac{\partialrtial\Psi_k}{\partialrtial Z_1}(g(\zeta))=f_k'(\zeta),\ \frac{\partialrtial\Psi_k}{\partialrtial Z_2}(g(\zeta))=0,\ \frac{\partialrtial\Psi_k}{\partialrtial Z_j}(g(\zeta))=\delta^{k}_{j},\ j,k\geq 3.$$ Thus $\Psi '(g(\zeta))^T\widetilde f(\zeta)=(1,0,\ldots,0)$, $\zeta\in\overlineerline{\DD}$ (since $f'\bullet\widetilde f=1$). Let us take a defining function $r$ of $D$. Then $r\circ\Psi$ is a defining function of $G$. Therefore, \begin{multline*}\nu_G(g(\zeta))=\frac{\nabla(r\circ\Psi)(g(\zeta))}{|\nabla(r\circ\Psi)(g(\zeta))|}=
\frac{\overlineerline{\Psi'(g(\zeta))}^T\nabla r(f(\zeta))}{|\overlineerline{\Psi'(g(\zeta))}^T\nabla r(f(\zeta))|}=\\=\frac{\overlineerline{\Psi'(g(\zeta))}^T\overline{\frac{\widetilde f(\zeta)}{\zeta\rho(\zeta)}}|\nabla r(f(\zeta))|}{\left|\overlineerline{\Psi'(g(\zeta))}^T\overline{\frac{\widetilde f(\zeta)}{\zeta\rho(\zeta)}}|\nabla r(f(\zeta))|\right|}=g(\zeta),\ \zeta\in\mathbb{T}.\end{multline*}
It remains to prove the fifth condition. By Definition \ref{29}(2) we have to show that \begin{equation}\label{sgf}\subsetm_{j,k=1}^n\frac{\partialrtial^2(r\circ\Psi)}{\partialrtial z_j\partialrtial\overlineerline{z}_k}(g(\zeta))X_{j}\overlineerline{X}_{k}>\left|\subsetm_{j,k=1}^n\frac{\partialrtial^2(r\circ\Psi)}{\partialrtial z_j\partialrtial z_k}(g(\zeta))X_{j}X_{k}\right|\end{equation} for $\zeta\in\mathbb{T}$ and $X\in(\mathbb{C}^{n})_*$ with
$$\subsetm_{j=1}^n\frac{\partialrtial(r\circ\Psi)}{\partialrtial z_j}(g(\zeta))X_{j}=0,$$ i.e. $X_1=0$. We have $$\subsetm_{j,k=1}^n\frac{\partialrtial^2(r\circ\Psi)}{\partialrtial z_j\partialrtial\overlineerline{z}_k}(g(\zeta))X_{j}\overlineerline{X}_{k}=\subsetm_{j,k,s,t=1}^n\frac{\partialrtial^2 r}{\partialrtial z_s\partialrtial\overlineerline{z}_t}(f(\zeta))\frac{\partialrtial\Psi_s}{\partialrtial z_j}(g(\zeta))\overlineerline{\frac{\partialrtial\Psi_t}{\partialrtial z_k}(g(\zeta))}X_{j}\overlineerline{X}_{k}=$$$$=\subsetm_{s,t=1}^n\frac{\partialrtial^2 r}{\partialrtial z_s\partialrtial\overlineerline{z}_t}(f(\zeta))Y_{s}\overlineerline{Y}_{t},$$ where $$Y:=\Psi'(g(\zeta))X.$$ Note that $Y\neq 0$. Additionally $$\subsetm_{s=1}^n\frac{\partialrtial r}{\partialrtial z_s}(f(\zeta))Y_{s}=\subsetm_{j,s=1}^n\frac{\partialrtial r}{\partialrtial z_s}(f(\zeta))\frac{\partialrtial\Psi_s}{\partialrtial z_j}(g(\zeta))X_j=\subsetm_{j=1}^n\frac{\partialrtial(r\circ\Psi)}{\partialrtial z_j}(g(\zeta))X_{j}=0.$$ Therefore, by the strong linear convexity of $D$ at $f(\zeta)$ $$\subsetm_{s,t=1}^n\frac{\partialrtial^2 r}{\partialrtial z_s\partialrtial\overlineerline{z}_t}(f(\zeta))Y_{s}\overlineerline{Y}_{t}>\left|\subsetm_{s,t=1}^n\frac{\partialrtial^2 r}{\partialrtial z_s\partialrtial z_t}(f(\zeta))Y_{s}Y_{t}\right|.$$ To finish the proof observe that $$\left|\subsetm_{j,k=1}^n\frac{\partialrtial^2(r\circ\Psi)}{\partialrtial z_j\partialrtial z_k}(g(\zeta))X_{j}X_{k}\right|=\left|\subsetm_{j,k,s,t=1}^n\frac{\partialrtial^2 r}{\partialrtial z_s\partialrtial z_t}(f(\zeta))\frac{\partialrtial\Psi_s}{\partialrtial z_j}(g(\zeta))\frac{\partialrtial\Psi_t}{\partialrtial z_k}(g(\zeta))X_{j}X_{k}+\right.$$$$\left.+\subsetm_{j,k,s=1}^n\frac{\partialrtial r}{\partialrtial z_s}(f(\zeta))\frac{\partialrtial^2\Psi_s}{\partialrtial z_j\partialrtial z_k}(g(\zeta))X_{j}X_{k}\right|=$$$$=\left|\subsetm_{s,t=1}^n\frac{\partialrtial^2 r}{\partialrtial z_s\partialrtial z_t}(f(\zeta))Y_{s}Y_{t}+\subsetm_{j,k=2}^n\subsetm_{s=1}^n\frac{\partialrtial r}{\partialrtial z_s}(f(\zeta))\frac{\partialrtial^2\Psi_s}{\partialrtial z_j\partialrtial z_k}(g(\zeta))X_{j}X_{k}\right|$$ and $$\frac{\partialrtial^2\Psi_s}{\partialrtial z_j\partialrtial z_k}(g(\zeta))=0,\ j,k\geq 2,\ s\geq 1,$$ which gives \eqref{sgf}.
\end{proof}
\begin{remm}\label{rem:theta}
Let $D$ be a bounded domain in $\mathbb C^n$ and let $f:\mathbb{D}\longrightarrow D$ be a (weak) stationary mapping such that $\partialrtial D$ is real analytic in a neighborhood of $f(\mathbb{T})$. Assume moreover that there are a neighborhood $U$ of $f(\overlineerline{\DD})$ and a mapping $\Theta:U\longrightarrow\mathbb{C}^n$ biholomorphic onto its image and the set $D\cap U$ is connected. Then $\Theta\circ f$ is a (weak) stationary mapping of $G:=\Theta(D\cap U)$.
In particular, if $U_1$, $U_2$ are neighborhoods of the closures of domains $D_1$, $D_2$ with real analytic boundaries and $\Theta:U_1\longrightarrow U_2$ is a biholomorphism such that $\Theta(D_1)=D_2$, then $\Theta$ maps (weak) stationary mappings of $D_1$ onto (weak) stationary mappings of $D_2$.
\end{remm}
\begin{proof}
Actually, it is clear that two first conditions of the definition of (weak) stationary mappings are preserved by $\Theta$. To show the third one we proceed similarly as in the equations \eqref{56}, \eqref{57}, \eqref{58}. Let $f:\mathbb{D}\longrightarrow D $ be a (weak) stationary mapping. The candidates for the mappings in condition (3) (resp. (3')) of Definition~\ref{21} for $\Theta\circ f$ in the domain $G$ are $$((\Theta'\circ f)^{-1})^T\widetilde f\text{\ \ and\ \ }\rho\frac{|((\Theta'\circ f)^{-1})^T\overlineerline{\nabla r\circ f}|}{|\nabla r\circ f|}.$$ Indeed, for $\zeta\in\mathbb{T}$ \begin{multline*}\overlineerline{\nu_{G}(\Theta(f(\zeta)))}=
\frac{\overlineerline{\nabla(r\circ\Theta^{-1})(\Theta(f(\zeta)))}}{|\nabla(r\circ\Theta^{-1})(\Theta(f(\zeta)))|}=\frac{[(\Theta^{-1})'(\Theta(f(\zeta)))]^T\overlineerline{\nabla r(f(\zeta))}}{|[(\Theta^{-1})'(\Theta(f(\zeta)))]^T\overlineerline{\nabla r(f(\zeta))}|}=\\
=\frac{(\Theta'(f(\zeta))^{-1})^T\overlineerline{\nabla r(f(\zeta))}}{|(\Theta'(f(\zeta))^{-1})^T\overlineerline{\nabla r(f(\zeta))}|},
\end{multline*}
hence
\begin{multline*}\zeta\rho(\zeta)\frac{|(\Theta'(f(\zeta))^{-1})^T\overlineerline{\nabla r(f(\zeta))}|}{|\nabla r(f(\zeta))|}\overlineerline{\nu_{G}(\Theta(f(\zeta)))}=\\
=\zeta\rho(\zeta)(\Theta'(f(\zeta))^{-1})^T\overlineerline{\nu_{D}(f(\zeta))}=
(\Theta'(f(\zeta))^{-1})^T\widetilde f(\zeta).
\end{multline*}
\end{proof}
\subsetbsection{Situation (\dag)}\label{dag}
Consider the following situation, denoted by (\dag) (with data $D_0$ and $U_0$):
\begin{itemize}
\item $D_0$ is a bounded domain in $\mathbb{C}^n$, $n\geq 2$;
\item $f_0:\overlineerline{\DD}\ni\zeta\longmapsto(\zeta,0,\ldots,0)\in\overline D_0$, $\zeta\in\overlineerline{\DD}$;
\item $f_0(\mathbb{D})\subsetbset D_0$;
\item $f_0(\mathbb{T})\subsetbset\partialrtial D_0$;
\item $\nu_{D_0}(f_0(\zeta))=(\zeta,0,\ldots,0)$, $\zeta\in\mathbb{T}$;
\item for any $\zeta\in\mathbb{T}$, a point $f_0(\zeta)$ is a point of the strong linear convexity of $D_0$;
\item $\partialrtial D_0$ is real analytic in a neighborhood $U_0$ of $f_0(\mathbb{T})$ with a function $r_0$;
\item $|\nabla r_0|=1$ on $f_0(\mathbb{T})$ (in particular, $r_{0z}(f_0(\zeta))=(\overline\zeta/2,0,\ldots,0)$, $\zeta\in\mathbb{T}$).
\end{itemize}
Since $r_0$ is real analytic on $U_0\subset\mathbb{R}^{2n}$, it extends in a natural way to a holomorphic function in a neighborhood $U_0^\mathbb{C}\subset\mathbb{C}^{2n}$ of $U_0$. Without loss of generality we may assume that $r_0$ is bounded on $U_0^\mathbb{C}$. Set $$X_0=X_0(U_0,U_0^{\mathbb C}):=\{r\in\mathcal{O}(U_0^\mathbb{C}):\text{$r(U_0)\subset\mathbb{R}$ and $r$ is bounded}\},$$ which equipped with the sup-norm is a (real) Banach space.
\begin{remm} Lempert considered the case when $U_0$ is a neighborhood of a boundary of a bounded domain $D_0$ with real analytic boundary. We shall need more general results to prove the `localization property'.
\end{remm}
\subsetbsection{General lemmas}\label{General lemmas}
We keep the notation from Subsection \eqref{dag} and assume Situation (\dag).
Let us introduce some additional objects we shall be dealing with and let us prove more general lemmas (its generality will be useful in the next section).
Consider the Sobolev space $W^{2,2}(\mathbb{T})=W^{2,2}(\mathbb{T},\mathbb{C}^m)$ of functions $f:\mathbb{T}\longrightarrow\mathbb{C}^m$, whose first two derivatives (in the sense of distribution) are in $L^2(\mathbb{T})$. The $W^{2,2}$-norm is denoted by $\|\cdot\|_W$. For the basic properties of $W^{2,2}(\mathbb{T})$ see Appendix.
Put $$B:=\{f\in W^{2,2}(\mathbb{T},\mathbb{C}^n):f\text{ extends holomorphically on $\mathbb{D}$ and $f(0)=0$}\},$$$$B_0:=\{f\in B:f(\mathbb{T})\subset U_0\},\quad B^*:=\{\overlineerline{f}:f\in B\},$$$$Q:=\{q\in W^{2,2}(\mathbb{T},\mathbb{C}):q(\mathbb{T})\subset\mathbb{R}\},\quad Q_0:=\{q\in Q:q(1)=0\}.$$
It is clear that $B$, $B^*$, $Q$ and $Q_0$ equipped with the norm $\|\cdot\|_W$ are (real) Banach spaces. Note that $B_0$ is an open neighborhood of $f_0$. In what follows, we identify $f\in B$ with its unique holomorphic extension on $\mathbb{D}$.
Let us define the projection $$\pi:W^{2,2}(\mathbb{T},\mathbb{C}^n)\ni f=\subsetm_{k=-\infty}^{\infty}a_k\zeta^{k}\longmapsto\subsetm_{k=-\infty}^{-1}a_k\zeta^{k}\in{B^*}.$$ Note that $f\in W^{2,2}(\mathbb{T},\mathbb{C}^n)$ extends holomorphically on $\mathbb{D}$ if and only if $\pi(f)=0$ (and the extension is $\mathcal C^{1/2}$ on $\mathbb{T}$). Actually, it suffices to observe that
$g(\zeta):=\subsetm_{k=-\infty}^{-1}a_k\zeta^{k}$, $\zeta\in\mathbb{T}$, extends holomorphically on $\mathbb{D}$ if and only if $a_k=0$ for $k<0$. This follows immediately from the fact that the mapping $\mathbb{T}\ni\zeta\longmapsto g(\overline\zeta)\in\mathbb{C}^n$ extends holomorphically on $\mathbb{D}$.
Consider the mapping $\Xi:X_0\times\mathbb{C}^n\times B_0\times
Q_0\times\mathbb{R}\longrightarrow Q\times{B^*}\times\mathbb{C}^n$ defined by
$$\Xi(r,v,f,q,\lambda):=(r\circ f,\pi(\zeta(1+q)(r_z\circ f)),f'(0)-\lambda v),$$ where $\zeta$ is treated as the identity function on $\mathbb{T}$.
We have the following
\begin{lemm}\label{cruciallemma} There exist a neighborhood $V_0$ of $(r_0,f_0'(0))$ in $X_0\times\mathbb{C}^n$ and a real analytic mapping $\Upsilon:V_0\longrightarrow B_0\times Q_0\times\mathbb{R}$ such that for any $(r,v)\in V_0$ we have $\Xi(r,v,\Upsilon(r,v))=0$.
\end{lemm}
Let $\widetilde\Xi:X_0\times\mathbb{C}^n\times B_0\times Q_0\times(0,1)\longrightarrow Q\times{B^*}\times\mathbb{C}^n$ be defined as $$\widetilde\Xi(r,w,f,q,\xi):=(r\circ f,\pi(\zeta(1+q)(r_z\circ f)),f(\xi)-w).$$
Analogously we have
\begin{lemm}\label{cruciallemma1} Let $\xi_0\in(0,1)$. Then there exist a neighborhood $W_0$ of $(r_0,f_0(\xi_0))$ in $X_0\times D_0$ and a real analytic mapping $\widetilde\Upsilon:W_0\longrightarrow B_0\times Q_0\times(0,1)$ such that for any $(r,w)\in W_0$ we have $\widetilde\Xi(r,w,\widetilde\Upsilon(r,w))=0$.
\end{lemm}
\begin{proof}[Proof of Lemmas \ref{cruciallemma} and \ref{cruciallemma1}]
We will prove the first lemma. Then we will see that a proof of the second one reduces to that proof.
We claim that $\Xi$ is real analytic. The only problem is to show that the mapping $$T: X_0\times B_0\ni(r,f)\longmapsto r\circ f\in Q$$ is real analytic (the real analyticity of the mapping $X_0\times B_0\ni(r,f)\longmapsto r_z\circ f\in W^{2,2}(\mathbb{T},\mathbb{C}^n)$ follows from this claim).
Fix $r\in X_0$, $f\in B_0$ and take $\varepsilon>0$ so that a $2n$-dimensional polydisc $P_{2n}(f(\zeta),\varepsilon)$ is contained in $U_0^\mathbb{C}$ for any $\zeta\in\mathbb{T}$. Then any function $\widetilde r\in X_0$ is holomorphic in $U_0^\mathbb{C}$, so it may be expanded as a holomorphic series convergent in $P_{2n}(f(\zeta),\varepsilon)$. Losing no generality we may assume that $n$-dimensional polydiscs $P_{n}(f(\zeta),\varepsilon)$, $\zeta\in\mathbb{T}$, satisfy $P_{n}(f(\zeta),\varepsilon)\subset U_0$. This gives an expansion of the function $\widetilde r$ at any point $f(\zeta)$, $\zeta\in\mathbb{T}$, into a series $$\subsetm_{\alpha\in\mathbb{N}_0^{2n}}\frac{1}{\alpha!}\frac{\partial^{|\alpha|}\widetilde r}{\partial x^\alpha}(f(\zeta))x^\alpha$$ convergent to $\widetilde r(f(\zeta)+x)$, provided that $x=(x_1,\ldots,x_{2n})\in P_n(0,\varepsilon)$ (where $\mathbb{N}_0:=\mathbb{N}\cup\{0\}$ and $|\alpha|:=\alpha_1+\ldots+\alpha_{2n}$). Hence \begin{equation}\label{69}T(r+\varrho,f+h)=\subsetm_{\alpha\in\mathbb{N}_0^{2n}}\frac{1}{\alpha!}\left(\frac{\partial^{|\alpha|}r}{\partial x^\alpha}\circ f\right)h^\alpha+\subsetm_{\alpha\in\mathbb{N}_0^{2n}}\frac{1}{\alpha!}\left(\frac{\partial^{|\alpha|}\varrho}{\partial x^\alpha}\circ f\right)h^\alpha\end{equation} pointwise for $\varrho\in X_0$ and $h\in W^{2,2}(\mathbb{T},\mathbb{C}^n)$ with $\|h\|_{\subsetp}<\varepsilon$.
Put $P:=\bigcup_{\zeta\in \mathbb{T}} P_{2n}(f(\zeta),\varepsilon)$ and for $\widetilde r\in X_0$ put $||\widetilde r||_P:=\subsetp_P|\widetilde r|$. Let $\widetilde r$ be equal to $r$ or to $\varrho$, where $\varrho$ lies is in a neighborhood of $0$ in $X_0$. The Cauchy inequalities give
\begin{equation}\label{series}\left|\frac{\partial^{|\alpha|}\widetilde r}{\partial x^\alpha}(f(\zeta))\right|\leq\frac{\alpha!\|\widetilde r\|_{P}}{\varepsilon^{|\alpha|}},\quad\zeta\in\mathbb{T}.\end{equation}
Therefore, $$\left|\left|\frac{\partial^{|\alpha|}\widetilde r}{\partial x^\alpha}\circ f\right|\right|_W\leq C_1\frac{\alpha!\|\widetilde r\|_{P}}{\varepsilon^{|\alpha|}}$$ for some $C_1>0$.
There is $C_2>0$ such that $$\|gh^\alpha\|_W\leq C_2^{|\alpha|+1}\|g\|_W\|h_1\|^{\alpha_1}_W\cdotp\ldots\cdotp\|h_{2n}\|^{\alpha_{2n}}_W$$ for $g\in W^{2,2}(\mathbb{T},\mathbb{C})$, $h\in W^{2,2}(\mathbb{T},\mathbb{C}^n)$, $\alpha\in\mathbb{N}_0^{2n}$ (see Appendix for a proof of this fact). Using the above inequalities we infer that $$\subsetm_{\alpha\in\mathbb{N}_0^{2n}}\left|\left|\frac{1}{\alpha!}\left(\frac{\partial^{|\alpha|}\widetilde r}{\partial x^\alpha}\circ f\right)h^\alpha\right|\right|_W$$ is convergent if $h$ is small enough on the norm $\|\cdot\|_W$. Therefore, the series~\eqref{69} is absolutely convergent in the norm $\|\cdot\|_W$, whence $T$ is real analytic.
To show the existence of $V_0$ and $\Upsilon$ we will make use of the Implicit Function Theorem. More precisely, we shall show that the partial derivative $$\Xi_{(f,q,\lambda)}(r_0,f_0'(0),f_0,0,1):B\times Q_0\times\mathbb{R}\longrightarrow Q\times{B^*}\times\mathbb{C}^n$$ is an isomorphism.
Observe that for any $(\widetildedetilde{f},\widetildedetilde{q},\widetildedetilde{\lambda})\in B\times Q_0\times\mathbb{R}$ the following equality holds
\begin{multline*}\Xi_{(f,q,\lambda)}(r_0,f_0'(0),f_0,0,1)(\widetildedetilde{f},\widetildedetilde{q},\widetildedetilde{\lambda})=\left.\frac{d}{dt}
\Xi(r_0,f_0'(0),f_0+t\widetildedetilde{f},t\widetildedetilde{q},1+t\widetildedetilde{\lambda})\right|_{t=0}=\\
=((r_{0z}\circ f_0)\widetildedetilde{f}+(r_{0\overlineerline{z}}\circ f_0)\overlineerline{\widetildedetilde{f}},\pi(\zeta\widetildedetilde{q}r_{0z}\circ f_0+\zeta(r_{0zz} \circ
f_0)\widetildedetilde{f}+\zeta(r_{0z\overlineerline{z}}\circ f_0)\overlineerline{\widetildedetilde{f}}),\widetildedetilde{f}'(0)-\widetildedetilde{\lambda}f_0'(0)),
\end{multline*}
where we treat ${r_0}_z,{r_0}_{\overlineerline{z}}$ as row vectors, $\widetildedetilde{f},\overlineerline{\widetildedetilde{f}}$ as column vectors and $r_{0zz}=\left[\frac{\partialrtial^2r_0}{\partialrtial z_j\partialrtial z_k}\right]_{j,k=1}^n$, $r_{0z\overlineerline{z}}=\left[\frac{\partialrtial^2r_0}{\partialrtial z_j\partialrtial\overlineerline z_k}\right]_{j,k=1}^n$ as $n\times n$ matrices.
By the Bounded Inverse Theorem it suffices to show that $\Xi_{(f,q,\lambda)}(r_0,f_0'(0),f_0,0,1)$ is bijective, i.e. for $(\eta,\varphi,v)\in Q\times B^*\times\mathbb{C}^n$ there exists exactly one $(\widetildedetilde{f},\widetildedetilde{q},\widetildedetilde{\lambda})\in B\times Q_0\times\mathbb{R}$ satisfying
\begin{equation}
(r_{0z}\circ f_0)\widetildedetilde{f}+(r_{0\overlineerline{z}}\circ f_0)\overlineerline{\widetildedetilde{f}}=\eta,
\label{al1}
\end{equation}
\begin{equation}
\pi(\zeta\widetildedetilde{q}r_{0z}\circ f_0+\zeta (r_{0zz}\circ f_0)\widetildedetilde{f}+\zeta(r_{0z\overlineerline{z}}\circ f_0)\overlineerline{\widetildedetilde{f}})=\varphi,
\label{al2}
\end{equation}
\begin{equation}
\widetildedetilde{f}'(0)-\widetildedetilde{\lambda} f_0'(0)=v.
\label{al3}
\end{equation}
First we show that $\widetilde\lambda$ and $\widetilde f_1$ are uniquely determined. Observe that, in view of assumptions, (\ref{al1}) is just $$\frac{1}{2}\overlineerline{\zeta}\widetildedetilde{f}_1+\frac{1}{2}\zeta\overlineerline{\widetildedetilde{f}_1}=\eta$$ or equivalently
\begin{equation}
\re(\widetildedetilde{f}_1/\zeta)=\eta\text{ (on }\mathbb{T}).
\label{al4}
\end{equation}
Note that the equation (\ref{al4}) uniquely determines $\widetildedetilde{f}_1/\zeta\in W^{2,2}(\mathbb{T},\mathbb{C})\cap{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$ up to an imaginary additive constant, which may be computed using (\ref{al3}). Actually, $\eta=\re G$ on $\mathbb{T}$ for some function $G\in W^{2,2}(\mathbb{T},\mathbb{C})\cap{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$. To see this, let us expand $\eta(\zeta)=\subsetm_{k=-\infty}^{\infty}a_k\zeta^{k}$, $\zeta\in\mathbb{T}$. From the equality $\eta(\zeta)=\overline{\eta(\zeta)}$, $\zeta\in\mathbb{T}$, we get \begin{equation}\label{65}\subsetm_{k=-\infty}^{\infty}a_k\zeta^{k}=\subsetm_{k=-\infty}^{\infty}\overline a_k\zeta^{-k}=\subsetm_{k=-\infty}^{\infty}\overline a_{-k}\zeta^{k},\ \zeta\in\mathbb{T},\end{equation} so $a_{-k}=\overline a_k$, $k\in\mathbb{Z}$. Hence $$\eta(\zeta)=a_0+\subsetm_{k=1}^\infty 2\re(a_k\zeta^k)=\re\left(a_0+2\subsetm_{k=1}^\infty a_k\zeta^k\right),\ \zeta\in\mathbb{T}.$$ Set $$G(\zeta):=a_0+2\subsetm_{k=1}^\infty a_k\zeta^k,\ \zeta\in\mathbb{D}.$$ This series is convergent for $\zeta\in\mathbb{D}$, so $G\in{\mathcal O}(\mathbb{D})$. Further, the function $G$ extends continuously on $\overlineerline{\DD}$ (to the function denoted by the same letter) and the extension lies in $W^{2,2}(\mathbb{T},\mathbb{C})$. Clearly, $\eta=\re G$ on $\mathbb{T}$.
We are searching $C\in\mathbb{R}$ such that the functions $\widetildedetilde{f}_1:=\zeta(G+iC)$ and $\theta:=\im(\widetildedetilde{f}_1/\zeta)$ satisfy $$\eta(0)+i\theta(0)=\widetildedetilde{f}_1'(0)$$ and
$$\eta(0)+i\theta(0)-\widetildedetilde{\lambda}\re{f_{01}'(0)}-i\widetildedetilde{\lambda}\im{{f_{01}'(0)}}=\re{v_1}+i\im{v_1}.$$ But $$\eta(0)-\widetildedetilde{\lambda}\re{f_{01}'(0)}=\re{v_1},$$ which yields $\widetildedetilde{\lambda}$ and then $\theta(0)$, consequently the number $C$.
Having $\widetildedetilde{\lambda}$ and once again using (\ref{al3}), we find uniquely determined $\widetildedetilde{f}_2'(0),\ldots,\widetildedetilde{f}_n'(0)$.
Therefore, the equations $\eqref{al1}$ and $\eqref{al3}$ are satisfied by uniquely determined $\widetilde f_1$, $\widetilde\lambda$ and $\widetildedetilde{f}_2'(0),\ldots,\widetildedetilde{f}_n'(0)$.
Consider (\ref{al2}), which is the system of $n$ equations with unknown $\widetildedetilde{q},\widetildedetilde{f}_2,\ldots,\widetildedetilde{f}_n$. Observe that $\widetildedetilde{q}$ appears only in the first of the equations and the remaining $n-1$ equations mean exactly that the mapping
\begin{equation}
\zeta(r_{0\widetildedehat{z}\widetildedehat{z}}\circ f_0)
\widetildedehat{\widetildedetilde{f}}+\zeta(r_{0\widetildedehat{z}\widetildedehat{\overlineerline{z}}}\circ f_0)\widetildedehat{\overlineerline{\widetildedetilde{f}}}-\psi
\label{al5}
\end{equation}
extends holomorphically on $\mathbb{D}$, where $\widetildedehat{a}:=(a_{2},\ldots,a_{n})$ and $\psi\in W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})$ may be obtained from $\varphi$ and $\widetildedetilde{f}_1$. Indeed, to see this, write (\ref{al2}) in the form $$\pi(F_{1}+\zeta F_{2}+\zeta F_{3})=(\varphi_1,\ldots,\varphi_n),$$ where $$F_1:=(\widetilde q,0,\ldots,0),$$$$F_2:=(A_{j})_{j=1}^n,\ A_{j}:=\subsetm\limits_{k=1}^n(r_{0z_jz_k}\circ f_0)\widetildedetilde{f}_k,$$$$F_3=(B_{j})_{j=1}^n,\ B_{j}:=\subsetm\limits_{k=1}^n(r_{0z_j\overline z_k}\circ f_0)\overlineerline{\widetildedetilde{f}_k}.$$ It follows that $$\widetildedetilde{q}+\zeta A_1+\zeta B_1-\varphi_1$$ and $$\zeta A_j+\zeta B_j-\varphi_j,\ j=2,\ldots,n,$$ extend holomorphically on $\mathbb{D}$ and $$\psi:=\left(\varphi_j-\zeta(r_{0z_jz_1}\circ f_0)\widetildedetilde{f}_1-\zeta(r_{0z_j\overline z_1}\circ f_0)\overlineerline{\widetildedetilde{f}_1}\right)_{j=2}^n.$$
Put $$g(\zeta):=\widetildedehat{\widetildedetilde{f}}(\zeta)/\zeta,\quad\alpha(\zeta):=\zeta^2r_{0\widetildedehat{z}\widetildedehat{z}}(f_0(\zeta)),
\quad\beta(\zeta):=r_{0\widetildedehat{z}\widetildedehat{\overlineerline{z}}}(f_0(\zeta)).$$
Observe that $\alpha(\zeta)$, $\beta(\zeta)$ are the $(n-1)\times(n-1)$ matrices depending real analytically on $\zeta$ and $g(\zeta)$ is a column vector in $\mathbb{C}^{n-1}$. This allows us to reduce \eqref{al5} to the following problem: we have to find a unique $g\in W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})\cap{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$ such that \begin{equation}
\alpha g+\beta\overlineerline{g}-\psi\text{ extends holomorphically on $\mathbb{D}$ and } g(0)={\widetildedehat{\widetildedetilde{f}'}}(0).
\label{al6}
\end{equation}
The fact that every $f_0(\zeta)$ is a point of strong linear convexity of the domain $D_0$ may be written as
\begin{equation}
|X^T\alpha(\zeta)X|<X^{T}\beta(\zeta)\overlineerline{X},\ \zeta\in\mathbb{T},\ X\in(\mathbb{C}^{n-1})_*.
\label{al7}
\end{equation}
Note that $\beta(\zeta)$ is self-adjoint and strictly positive, hence using Proposition \ref{12} we get a mapping $H\in{\mathcal O}(\overlineerline{\DD},\mathbb{C}^{(n-1)\times(n-1)})$ such that $\det H\neq 0$ on $\overlineerline{\DD}$ and $HH^*=\beta$ on $\mathbb{T}$. Using this notation, (\ref{al6}) is
equivalent to
\begin{equation}
H^{-1}\alpha g+H^*\overlineerline{g}-H^{-1}\psi\text{ extends holomorphically on $\mathbb{D}$}
\label{al8}
\end{equation}
or, if we denote $h:=H^Tg$, $\gamma:=H^{-1}\alpha (H^T)^{-1}$, to
\begin{equation}
\gamma h+\overlineerline{h}-H^{-1}\psi\text{ extends holomorphically on $\mathbb{D}.$}
\label{al9}
\end{equation}
For any $\zeta\in\mathbb{T}$ the operator norm of the symmetric matrix $\gamma(\zeta)$ is uniformly less than 1. In fact, from (\ref{al7}) for any $X\in\mathbb{C}^{n-1}$ with $|X|=1$ \begin{multline*}|X^{T}\gamma(\zeta)X|=|X^{T}H(\zeta)^{-1}\alpha(\zeta)(H(\zeta)^T)^{-1}X|<X^TH(\zeta)^{-1}\beta(\zeta)
\overlineerline{(H(\zeta)^T)^{-1}X}=\\=X^TH(\zeta)^{-1}H(\zeta)H(\zeta)^*\overlineerline{(H(\zeta)^T)^{-1}}
\overlineerline{X}=|X|^2=1,\end{multline*} so, by the compactness argument, $|X^{T}\gamma(\zeta)X|\leq 1-\widetilde\varepsilon$ for some $\widetilde\varepsilon>0$ independent on $\zeta$ and $X$. Thus $\|\gamma(\zeta)\|\leq 1-\widetilde\varepsilon$ by Proposition \ref{59}.
We have to prove that there is a unique solution $h\in W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})\cap{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$ of (\ref{al9}) such that $h(0)=a$ with a given $a\in\mathbb{C}^{n-1}$.
Define the operator $$P:W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})\ni\subsetm_{k=-\infty}^{\infty}a_k\zeta^{k}\longmapsto\overlineerline{\subsetm_{k=-\infty}^{-1}a_k\zeta^{k}}\in W^{2,2}(\mathbb{T},\mathbb{C}^{n-1}),$$ where $a_k\in\mathbb{C}^{n-1}$, $k\in\mathbb{Z}$.
We will show that a mapping $h\in
W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})\cap{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$ satisfies (\ref{al9}) and $h(0)=a$ if and only if it is a fixed point of the mapping $$K:W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})\ni h\longmapsto P(H^{-1}\psi-\gamma h)+a\in W^{2,2}(\mathbb{T},\mathbb{C}^{n-1}).$$
Indeed, take $h\in
W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})\cap{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$ and suppose that $h(0)=a$ and $\gamma h+\overlineerline{h}-H^{-1}\psi$ extends holomorphically on $\mathbb{D}$. Then $$h=a+\subsetm_{k=1}^{\infty}a_k\zeta^{k},\quad\overlineerline{h}=\overlineerline{a}+\subsetm_{k=1}^{\infty}\overlineerline a_k\zeta^{-k}=\subsetm_{k=-\infty}^{-1}\overlineerline a_{-k}\zeta^{k}+\overlineerline{a},$$ $$P(h)=0,\quad P(\overlineerline{h})=\subsetm_{k=1}^{\infty}a_k\zeta^{k}=h-a$$ and $$P(\gamma h+\overlineerline{h}-H^{-1}\psi)=0,$$ which implies $$P(H^{-1}\psi-\gamma h)=h-a$$ and finally $K(h)=h$. Conversely, suppose that $K(h)=h$. Then $$P(H^{-1}\psi-\gamma h)=h-a=\subsetm_{k=1}^{\infty}a_k\zeta^{k}+a_1-a,\quad P(h)=0$$ and
$$P(\overlineerline{h})=\subsetm_{k=1}^{\infty}a_k\zeta^{k}=h-a_1,$$ from which follows that $$P(\gamma h+\overlineerline{h}-H^{-1}\psi)=P(\overlineerline{h})-P(H^{-1}\psi-\gamma h)=a-a_1$$ and $$P(\gamma h+\overlineerline{h}-H^{-1}\psi)=0\text{ iff }a=a_1.$$ Observe that $h(0)=K(h)(0)=P(H^{-1}\psi-\gamma h)(0)+a=a$.
We shall make use of the Banach Fixed Point Theorem. To do this, consider $W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})$ equipped with the following norm $$\|h\|_{\varepsilon}:=\|h\|_L+\varepsilon\|h'\|_L+
\varepsilon^2\|h''\|_L,$$ where $\varepsilon>0$ and $\|\cdot\|_L$ is the $L^2$-norm (it is a Banach space). We will prove that $K$ is a contraction with respect to the norm $\|\cdot\|_{\varepsilon}$ for sufficiently small $\varepsilon$. Indeed, there is $\widetilde\varepsilon>0$ such that for any $h_1,h_2\in W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})$
\begin{equation}
\|K(h_1)-K(h_2)\|_L=\|P(\gamma(h_2-h_1))\|_L\leq\|\gamma(h_2-h_1)\|_L\leq (1-\widetilde\varepsilon)\|h_2-h_1\|_L.
\label{al10}
\end{equation}
Moreover,
\begin{multline}
\|K(h_1)'-K(h_2)'\|_L= \|P(\gamma h_2)'-P(\gamma h_1)'\|_L\leq\\
\leq\|(\gamma h_2)'-(\gamma h_1)'\|_L= \|\gamma '(h_2-h_1)+\gamma(h_2'-h_1')\|_L.
\label{al11}
\end{multline} Furthermore,
\begin{equation}
\|K(h_1)''-K(h_2)''\|_L\leq\|\gamma ''(h_2-h_1)\|_L+2\|\gamma '(h_2'-h_1')\|_L+\|\gamma
(h_1''-h_2'')\|_L.\label{al12}
\end{equation}
Using the finiteness of $\|\gamma '\|$, $\|\gamma ''\|$ and putting (\ref{al10}), (\ref{al11}), (\ref{al12}) together we see that there exists $\varepsilon>0$ such that $K$ is a contraction w.r.t. the norm $\|\cdot\|_{\varepsilon}$.
We have found $\widetildedetilde{f}$ and $\widetildedetilde{\lambda}$ satisfying (\ref{al1}), (\ref{al3}) and the last $n-1$ equations from (\ref{al2}) are satisfied.
It remains to show that there exists a unique $\widetildedetilde{q}\in Q_0$ such that $\widetildedetilde{q}+\zeta A_1+\zeta B_1-\varphi_1$ extends holomorphically on $\mathbb{D}$.
Comparing the coefficients as in \eqref{65}, we see that if $$\pi(\zeta A_1+\zeta B_1-\varphi_1)=\subsetm_{k=-\infty}^{-1}a_k\zeta^{k}$$
then $\widetildedetilde{q}$ has to be taken as $$-\subsetm_{k=-\infty}^{-1}a_k\zeta^{k}-\subsetm_{k=0}^{\infty}b_k\zeta^{k}$$
with $b_k:=\overlineerline a_{-k}$ for $k\geq 1$ and $b_0\in\mathbb{R}$ uniquely determined by $\widetildedetilde{q}(1)=0$.\\
Let us show that the proof of the second Lemma follows from the proof of the first one.
Since $\widetilde\Xi$ is real analytic it suffices to prove that the derivative $$\widetilde\Xi_{(f,q,\xi)}(r_0,f_0(\xi_0),f_0,0,\xi_0):B\times Q_0\times\mathbb{R}\longrightarrow Q\times{B^*}\times\mathbb{C}^n$$ is invertible.
For $(\widetildedetilde{f},\widetildedetilde{q},\widetildedetilde{\xi})\in B\times Q_0\times\mathbb{R}$ we get
\begin{multline*}
\widetilde\Xi_{(f,q,\xi)}(r_0,f_0(\xi_0),f_0,0,\xi_0)(\widetildedetilde{f},\widetildedetilde{q},\widetildedetilde{\xi})=\left.\frac{d}{dt}
\widetilde\Xi(r_0,f_0(\xi_0),f_0+t\widetildedetilde{f},t\widetildedetilde{q},\xi_0+t\widetildedetilde{\xi})\right|_{t=0}=\\
=((r_{0z}\circ f_0)\widetildedetilde{f}+(r_{0\overlineerline{z}}\circ f_0)\overlineerline{\widetildedetilde{f}},
\pi(\zeta\widetildedetilde{q}r_{0z}\circ f_0+\zeta(r_{0zz}\circ f_0)\widetildedetilde{f}+\zeta(r_{0z\overlineerline{z}}\circ f_0)\overlineerline{\widetildedetilde{f}}),\widetildedetilde{f}(\xi_0)+\widetilde\xi f_0'(\xi_0)).
\end{multline*}
We have to show that for $(\eta,\varphi,w)\in Q\times B^*\times\mathbb{C}^n$ there exists exactly one $(\widetildedetilde{f},\widetildedetilde{q},\widetildedetilde{\xi})\in B\times Q_0\times\mathbb{R}$ satisfying
\begin{equation}
(r_{0z}\circ f_0)\widetildedetilde{f}+(r_{0\overlineerline{z}}\circ f_0)\overlineerline{\widetildedetilde{f}}=\eta,
\label{1al1}
\end{equation}
\begin{equation}
\pi(\zeta\widetildedetilde{q}r_{0z}\circ f_0+\zeta (r_{0zz}\circ f_0)\widetildedetilde{f}+\zeta(r_{0z\overlineerline{z}}\circ f_0)\overlineerline{\widetildedetilde{f}})=\varphi,
\label{1al2}
\end{equation}
\begin{equation}
\widetilde f(\xi_0)+\widetilde\xi f_0'(\xi_0)=w.
\label{1al3}
\end{equation}
The equation (\ref{1al1}) turns out to be
\begin{equation}
\re(\widetildedetilde{f}_1/\zeta)=\eta\text{ (on }\mathbb{T}).
\label{1al4}
\end{equation}
The equation above uniquely determines $\widetildedetilde{f}_1/\zeta\in W^{2,2}(\mathbb{T},\mathbb{C})\cap{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$ up to an imaginary additive constant, which may be computed using (\ref{1al3}). Indeed, there exists $G\in W^{2,2}(\mathbb{T},\mathbb{C})\cap{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$ such that $\eta=\re G$ on $\mathbb{T}$. We are searching $C\in\mathbb{R}$ such that the functions $\widetildedetilde{f}_1:=\zeta(G+iC)$ and $\theta:=\im(\widetildedetilde{f}_1/\zeta)$ satisfy $$\xi_0\eta(\xi_0)+i\xi_0\theta(\xi_0)=\widetildedetilde{f}_1(\xi_0)$$ and $$\xi_0(\eta(\xi_0)+i\theta(\xi_0))+\widetildedetilde{\xi}\re{f_{01}'(\xi_0)}+i\widetildedetilde{\xi}\im{{f_{01}'(\xi_0)}}=
\re{w_1}+i\im{w_1}.$$ But $$\xi_0\eta(\xi_0)+\widetildedetilde{\xi}\re{f_{01}'(\xi_0)}=\re{w_1},$$ which yields $\widetildedetilde{\xi}$ and then $\theta(\xi_0)$, consequently the number $C$. Having $\widetildedetilde{\xi}$ and once again using (\ref{1al3}), we find uniquely determined $\widetildedetilde{f}_2(\xi_0),\ldots,\widetildedetilde{f}_n(\xi_0)$.
Therefore, the equations $\eqref{1al1}$ and $\eqref{1al3}$ are satisfied by uniquely determined $\widetilde f_1$, $\widetilde\xi$ and $\widetildedetilde{f}_2(\xi_0),\ldots,\widetildedetilde{f}_n(\xi_0)$.
In the remaining part of the proof we change the second condition of \eqref{al6} to $$g(\xi_0)={\widetildedehat{\widetildedetilde{f}}}(\xi_0)/\xi_0$$ and we have to prove that there is a unique solution $h\in W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})\cap{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$ of (\ref{al9}) such that $h(\xi_0)=a$ with a given $a\in\mathbb{C}^{n-1}$. Let $\tau$ be an automorphism of $\mathbb{D}$ (so it extends holomorphically near $\overlineerline{\DD}$), which maps $0$ to $\xi_0$, i.e. $$\tau(\xi):=\frac{\xi_0-\xi}{1-\overline\xi_0\xi},\ \xi\in\mathbb{D}.$$ Let the maps $P,K$ be as before. Then $h\in W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})\cap{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$ satisfies
(\ref{al9}) and $h(\xi_0)=a$ if and only if $h\circ\tau\in W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})\cap{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$ satisfies (\ref{al9}) and $(h\circ\tau)(0)=a$. We already know that there is exactly one $\widetilde h\in W^{2,2}(\mathbb{T},\mathbb{C}^{n-1})\cap{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$ satisfying (\ref{al9}) and $\widetilde h(0)=a$. Setting $h:=\widetilde h\circ\tau^{-1}$, we get the claim.
\end{proof}
\subsetbsection{Topology in the class of domains with real analytic boundaries}\label{topol}
We introduce a concept of a domain being close to some other domain. Let $D_0\subset\mathbb{C}^n$ be a bounded domain with real analytic boundary. Then there exist a neighborhood $U_0$ of $\partialrtial D_0$ and a real analytic defining function $r_0:U_0\longrightarrow\mathbb{R}$ such that $\nabla r_0$ does not vanish in $U_0$ and $$D_0\cap U_0=\{z\in U_0:r_0(z)<0\}.$$
\begin{dff}
We say that domains $D$ \textit{tend to} $D_0$ $($or are \textit{close to} $D_0${$)$} if one can choose their defining functions $r\in X_0$ such that $r$ tend to $r_0$ in $X_0$.
\end{dff}
\begin{remm} If $r\in X_0$ is near to $r_0$ with respect to the topology in $X_0$, then $\{z\in U_0:r(z)=0\}$ is a compact real analytic hypersurface which bounds a bounded domain. We denote it by $D^{r}$.
Moreover, if $D^{r_0}$ is strongly linearly convex then a domain $D^r$ is also strongly linearly convex provided that $r$ is near $r_0$.
\end{remm}
\subsetbsection{Statement of the main result of this section}
\begin{remm}\label{f} Assume that $D^r$ is a strongly linearly convex domain bounded by a real analytic hypersurface $\{z\in U_0:r(z)=0\}$. Let $\xi\in(0,1)$ and $w\in(\mathbb{C}^n)_*$.
Then a function $f\in B_0$ satisfies the conditions $$f\text{ is a weak stationary mapping of }D^r,\ f(0)=0,\ f(\xi)=w$$ if and only if there exists $q\in Q_0$ such that $q>-1$ and $\widetilde\Xi(r,w,f,q,\xi)=0$.
Actually, from $\widetilde\Xi(r,w,f,q,\xi)=0$ we deduce immediately that $r\circ f=0$ on $\mathbb{T}$, $f(\xi)=w$ and $\pi(\zeta(1+q)(r_z\circ f))=0$. From the first equality we get $f(\mathbb{T})\subsetbset \partialrtial D^{r}$. From the last one we deduce that the condition (3') of Definition~\ref{21} is satisfied (with $\rho:=(1+q)|r_z\circ f|$). Since $D^{r}$ is strongly linearly convex, $\overline{D^r}$ is polynomially convex (use the fact that projections of $\mathbb{C}$-convex domains are $\mathbb{C}$-convex, as well, and the fact that $D^r$ is smooth). In particular, $$f(\overlineerline{\DD})=f(\widetildedehat{\mathbb{T}})\subsetbset\widetildedehat{f(\mathbb{T})}\subsetbset\widetildedehat{\overline{D^r}}=\overline{D^r},$$ where $\widehat S:=\{z\in\mathbb{C}^m:|P(z)|\leq\subsetp_S|P|\text{ for any polynomial }P\in\mathbb{C}[z_1,\ldots,z_m]\}$ is the polynomial hull of a set $S\subset\mathbb{C}^m$.
Note that this implies $f(\mathbb{D})\subset D^r$ --- this follows from the fact that $\partial D^r$ does not contain non-constant analytic discs (as $D^r$ is strongly pseudoconvex).
The opposite implication is clear.
In a similar way we show that for any $v\in(\mathbb{C}^n)_*$ and $\lambda>0$, a function $f\in B_0$ satisfies the conditions $$f\text{ is a weak stationary mapping of }D^r,\ f(0)=0,\ f'(0)=\lambda v$$ if and only if there exists $q\in Q_0$ such that $q>-1$ and $\Xi(r,v,f,q,\lambda)=0$.
\end{remm}
\begin{propp}\label{13} Let $D_0\subset\mathbb{C}^n$, $n\geq 2$, be a strongly linearly convex domain with real analytic boundary and let $f_0:\mathbb{D}\longrightarrow D_0$ be an $E$-mapping.
$(1)$ Let $\xi_0\in(0,1)$. Then there exist a neighborhood $W_0$ of $(r_0,f_0(\xi_0))$ in $X_0\times D_0$ and real analytic mappings $$\Lambda:W_0\longrightarrow\mathcal{C}^{1/2}(\overlineerline{\mathbb{D}}),\ \Omega:W_0\longrightarrow(0,1)$$ such that $$\Lambda(r_0,f_0(\xi_0))=f_0,\ \Omega(r_0,f_0(\xi_0))=\xi_0$$ and for any $(r,w)\in W_0$ the mapping
$f:=\Lambda(r,w)$ is an $E$-mapping of $D^{r}$ satisfying $$f(0)=f_0(0)\text{ and }f(\Omega(r,w))=w.$$
$(2)$ There exist a neighborhood $V_0$ of $(r_0,f_0'(0))$ in $X_0\times\mathbb{C}^n$
and a real analytic mapping $$\Gamma:V_0\longrightarrow\mathcal{C}^{1/2}(\overlineerline{\mathbb{D}})$$ such that $$\Gamma(r_0,f_0'(0))=f_0$$ and for any $(r,v)\in V_0$ the mapping $f:=\Gamma(r,v)$ is an $E$-mapping of $D^{r}$ satisfying $$f(0)=f_0(0)\text{ and }f'(0)=\lambda v\text{ for some }\lambda>0.$$
\end{propp}
\begin{proof}
Observe that Proposition \ref{11} provides us with a mapping $g_0=\Phi\circ f_0$ and a domain $G_0:=\Phi(D_0)$ giving a data for situation (\dag) (here $\partialrtial D_0$ is contained in $U_0$). Clearly, $\rho_0:=r_0\circ\Phi^{-1}$ is a defining function of $G_0$.
Using Lemmas \ref{cruciallemma}, \ref{cruciallemma1} we get neighborhoods $V_0$, $W_0$ of $(\rho_0, g_0'(0))$, $(\rho_0,g_0(\xi_0))$ respectively and real analytic mappings $\Upsilon$, $\widetilde\Upsilon$ such that $ \Xi(\rho,v,\Upsilon(\rho,v))=0$ on $V_0$ and $ \widetilde\Xi(\rho,w,\widetilde\Upsilon(\rho,w))=0$ on $W_0$. Define $$\widehat\Lambda:=\pi_B\circ\widetilde\Upsilon,\quad\Omega:=\pi_\mathbb{R}\circ\widetilde\Upsilon,\quad\widehat\Gamma:=\pi_B\circ\Upsilon,$$ where $$\pi_B:B\times Q_0\times\mathbb{R}\longrightarrow B,\quad\pi_\mathbb{R}:B\times Q_0\times\mathbb{R}\longrightarrow\mathbb{R},\ $$ are the projections.
If $\rho$ is sufficiently close to $\rho_0$, then the hypersurface $\{\rho=0\}$ bounds a strongly linearly convex domain. Moreover, then $\widehat\Lambda(\rho,w)$ and $\widehat\Gamma(\rho,v)$ are extremal mappings in $G^{\rho}$ (see Remark~\ref{f}).
Composing $\widehat\Lambda(\rho,w)$ and $\widehat\Gamma(\rho,v)$ with $\Phi^{-1}$ and making use of Remark \ref{rem:theta} we get weak stationary mappings in $D^r$, where $r:=\rho\circ\Phi$. To show that they are $E$-mappings we proceed as follows. If $D^r$ is sufficiently close to $D_0$ (this depends on a distance between $\rho$ and $\rho_0$), the domain $D^r$ is strongly linearly convex, so by the results of Section \ref{55} $$\Lambda(r,w):=\Phi^{-1}\circ\widehat\Lambda(\rho,w)\text{\ and\ }\Gamma(r,v):=\Phi^{-1}\circ\widehat\Gamma(\rho,v)$$ are stationary mappings. Moreover, they are close to $f_0$ provided that $r$ is sufficiently close to $r_0$. Therefore, their winding numbers are equal. Thus $f$ satisfies condition (4) of Definition~\ref{21e}, i.e. $f$ is an $E$-mapping.
\end{proof}
\section{Localization property}
\begin{prop}\label{localization} Let $D\subset\mathbb C^n$, $n\geq 2$, be a domain. Assume that $a\in\partialrtial D$ is such that $\partialrtial D$ is real analytic and strongly convex in a neighborhood of $a$. Then for any sufficiently small neighborhood $V_0$ of $a$ there is a weak stationary mapping of $D\cap V_0$ such that $f(\mathbb T)\subset\partialrtial D$.
In particular, $f$ is a weak stationary mapping of $D$.
\end{prop}
\begin{proof} Let $r$ be a real analytic defining function in a neighborhood of $a$. The problem we are dealing with has a local character, so replacing $r$ with $r\circ\Psi$, where $\Psi$ is a local biholomorphism near $a$, we may assume that $a=(0,\ldots,0,1)$ and a defining function of $D$ near $a$ is $r(z)=-1+|z|^2+h(z-a)$, where $h$ is real analytic in a neighborhood of $0$ and $h(z)=O(|z|^3)$ as $z\to 0$ (cf. \cite{Rud}, p. 321).
Following \cite{Lem2}, let us consider the mappings
$$A_t(z):=\left((1-t^2)^{1/2}\frac{z'}{1+tz_n},\frac{z_n+t}{1+tz_n}\right),\quad z=(z',z_n)\in\mathbb{C}^{n-1}\times\mathbb{D},\,\,t\in(0,1),$$ which restricted to $\mathbb{B}_n$ are automorphisms. Let $$r_t(z):=\begin{cases}\frac{|1+tz_n|^2}{1-t^2}r(A_t(z)),&t\in(0,1),\\-1+|z|^2,&t=1.\end{cases}$$ It is clear that $f_{(1)}(\zeta)=(\zeta,0,\ldots,0)$, $\zeta\in\mathbb{D}$ is a stationary mapping of $\mathbb B_n$. We want to have the situation (\dag) which will allow us to use Lemma \ref{cruciallemma} (or Lemma \ref{cruciallemma1}). Note that $r_t$ does not converge to $r_1$ as $t\to 1$. However, $r_t\to r_1$ in $X_0(U_0,U_0^{\mathbb C})$, where $U_0$ is a neighborhood of $f_{(1)}(\mathbb{T})$ contained in $\{z\in\mathbb C^n:\re z_n>-1/2\}$ and $U_0^{\mathbb C}$ is sufficiently small (remember that $h(z)=O(|z|^3)$).
Therefore, making use of Lemma \ref{cruciallemma} for $t$ sufficiently close to $1$ we obtain stationary mappings $f_{(t)}$ in $D_t:=\{z\in \mathbb C^n: r_t(z)<0,\ \re z_n>-1/2\}$ such that $f_{(t)}\to f_{(1)}$ in the $W^{2,2}$-norm (so also in the sup-norm). Actually, it follows from Lemma~\ref{cruciallemma} that one may take $f_{(t)}:=\pi_B\circ\Upsilon(r_t,f_{(1)}'(0))$ (keeping the notation from this lemma). The argument used in Remark~\ref{f} gives that $f_{(t)}$ satisfies conditions (1'), (2') and (3') of Definition~\ref{21}. Since the non-constant function $r\circ A_t\circ f_{(t)}$ is subharmonic on $\mathbb{D}$, continuous on $\overlineerline{\DD}$ and $r\circ A_t\circ f_{(t)}=0$ on $\mathbb{T}$, we see from the maximum principle that $f_{(t)}$ maps $\mathbb{D}$ in $D_t$. Therefore, $f_{(t)}$ are weak stationary mappings for $t$ close to $1$.
In particular, $$f_{(t)}(\mathbb{D})\subsetbset 2\mathbb B_n \cap \{z\in\mathbb C^n:\re z_n>-1/2\}$$ provided that $t$ is close to $1$. The mappings $A_t$ have the following important property $$A_t(2\mathbb B_n\cap\{z\in\mathbb C^n:\re z_n>-1/2\})\to\{a\}$$ as $t\to 1$ in the sense of the Hausdorff distance.
Therefore, we find from Remark \ref{rem:theta} that $g_{(t)}:=A_t\circ f_{(t)}$ is a stationary mapping of $D$. Since $g_{(t)}$ maps $\mathbb{D}$ onto arbitrarily small neighborhood of $a$ provided that $t$ is sufficiently close to $1$, we immediately get the assertion.
\end{proof}
\section{Proofs of Theorems \ref{lem-car} and \ref{main}}
We start this section with the following
\begin{lem}\label{lemat} For any different $z,w\in D$ $($resp. for any $z\in D$, $v\in(\mathbb{C}^n)_*${$)$} there exists an $E$-mapping $f:\mathbb{D}\longrightarrow D$ such that $f(0)=z$, $f(\xi)=w$ for some $\xi\in(0,1)$ $($resp. $f(0)=z$, $f'(0)=\lambda v$ for some $\lambda>0${$)$}.
\end{lem}
\begin{proof}
Fix different $z,w\in D$ (resp. $z\in D$, $v\in(\mathbb{C}^{n})_*$).
First, consider the case when $D$ is bounded strongly convex with real analytic boundary. Without loss of generality one may assume that $0\in D\Subset\mathbb{B}_n$. We need some properties of the Minkowski functionals.
Let $\mu_G$ be a Minkowski functional of a domain $G\subsetbset\mathbb{C}^n$ containing the origin, i.e. $$\mu_G(x):=\inf\left\{s>0:\frac{x}{s}\in G\right\},\ x\in\mathbb{C}^n.$$ Assume that $G$ is bounded strongly convex with real analytic boundary. We shall show that
\begin{itemize}
\item $\mu_G-1$ is a real analytic outside $0$, defining function of $G$;
\item $\mu^2_G-1$ is a real analytic outside $0$, strongly convex outside $0$, defining function of $G$.
\end{itemize}
Clearly, $G=\{x\in\mathbb{R}^{2n}:\mu_G(x)<1\}$. Setting $$q(x,s):=r\left(\frac{x}{s}\right),\ (x,s)\in U_0\times U_1,$$ where $r$ is a real analytic defining function of $G$ (defined near $\partial G$) and $U_0\subset\mathbb{R}^{2n}$, $U_1\subset\mathbb{R}$ are neighborhoods of $\partial G$ and $1$ respectively, we have $$\frac{\partialrtial q}{\partialrtial s}(x,s)=-\frac{1}{s^2}\left\langle\nabla r\left(\frac{x}{s}\right),x\right\rangle_{\mathbb{R}}\neq 0$$ for $(x,s)$ such that $x\in\partialrtial G$ and $s=\mu_G(x)=1$ (since $0\in G$, the vector $-x$ hooked at the point $x$ is inward $G$, so it is not orthogonal to the normal vector at $x$). By the Implicit Function Theorem for the equation $q=0$, the function $\mu_G$ is real analytic in a neighborhood $V_0$ of $\partialrtial G$. To see that $\mu_G$ is real analytic outside $0$, fix $x_0\in(\mathbb{R}^{2n})_*$. Then the set $$W_0:=\left\{x\in\mathbb{R}^{2n}:\frac{x}{\mu_G(x_0)}\in V_0\right\}$$ is open and contains $x_0$. Since $$\mu_G(x)=\mu_G(x_0)\mu_G\left(\frac{x}{\mu_G(x_0)}\right),\ x\in W_0,$$ the function $\mu_G$ is real analytic in $W_0$. Therefore, we can take $d/ds$ on both sides of $\mu_G(sx)=s\mu_G(x),\ x\neq 0,\ s>0$ to obtain $$\langle\nabla\mu_G(x),x\rangle_{\mathbb{R}}=\mu_G(x),\ x\neq 0,$$ so $\nabla\mu_G\neq 0$ in $(\mathbb{R}^{2n})_*$.
Furthermore, $\nabla\mu^2_G=2\mu_G\nabla\mu_G$, so $\mu^2_G-1$ is also a defining function of $G$.
To show that $u:=\mu^2_G$ is strongly convex outside $0$ let us prove that $$X^T\mathcal{H}_aX>0,\quad a\in\partial G,\ X\in(\mathbb{R}^{2n})_*,$$ where $\mathcal{H}_x:=\mathcal{H}u(x)$ for $x\in(\mathbb{R}^{2n})_*$. Taking $\partial/\partial x_j$ on both sides of $$u(sx)=s^2u(x),\ x,s\neq 0,$$ we get \begin{equation}\label{62}\frac{\partial u}{\partial x_j}(sx)=s\frac{\partial u}{\partial x_j}(x)\end{equation} and further taking $d/ds$ $$\subsetm_{k=1}^{2n}\frac{\partial^2 u}{\partial x_j\partial x_k}(sx)x_k=\frac{\partial u}{\partial x_j}(x).$$ In particular, $$x^T\mathcal{H}_xy=\subsetm_{j,k=1}^{2n}\frac{\partial^2 u}{\partial x_k\partial x_j}(x)x_ky_j=\langle\nabla u(x),y\rangle_{\mathbb{R}},\ x\in(\mathbb{R}^{2n})_*,\ y\in\mathbb{R}^{2n}.$$ Let $a\in\partial G$. Since $\langle\nabla\mu_G(a),a\rangle_{\mathbb{R}}=\mu_G(a)=1$, we have $a\notin T^\mathbb{R}_G(a)$. Any $X\in(\mathbb{R}^{2n})_*$ can be represented as $\alpha a+\beta Y$, where $Y\in T^\mathbb{R}_G(a)$, $\alpha,\beta\in\mathbb{R}$, $(\alpha,\beta)\neq(0,0)$. Then \begin{eqnarray*}X^T\mathcal{H}_aX&=&\alpha^2a^T\mathcal{H}_aa+2\alpha\beta a^T\mathcal{H}_aY+\beta^2Y^T\mathcal{H}_aY=\\&=&\alpha^2\langle\nabla u(a),a\rangle_{\mathbb{R}} +2\alpha\beta\langle\nabla u(a),Y\rangle_{\mathbb{R}} +\beta^2Y^T\mathcal{H}_aY= \\&=&\alpha^22\mu_G(a)\langle\nabla\mu_G(a),a\rangle_{\mathbb{R}} +\beta^2Y^T\mathcal{H}_aY=
2\alpha^2+\beta^2Y^T\mathcal{H}_aY.\end{eqnarray*} Since $G$ is strongly convex, the Hessian of any defining function is strictly positive on the tangent space, i.e. $Y^T\mathcal{H}_aY>0$ if $Y\in(T^\mathbb{R}_G(a))_*$. Hence $X^T\mathcal{H}_aX\geq 0$. Note that it cannot be $X^T\mathcal{H}_aX=0$, since then $\alpha=0$, consequently $\beta\neq 0$ and $Y^T\mathcal{H}_aY=0$. On the other side $Y=X/\beta\neq 0$ --- a contradiction.
Taking $\partial/\partial x_k$ on both sides of \eqref{62} we obtain $$\frac{\partial^2 u}{\partial x_j\partial x_k}(sx)=\frac{\partial^2 u}{\partial x_j\partial x_k}(x),\ x,s\neq 0$$ and for $a,X\in(\mathbb{R}^{2n})_*$ $$X^T\mathcal{H}_aX=X^T\mathcal{H}_{a/\mu_G(a)}X>0.$$
Let us consider the sets $$D_t:=\{x\in\mathbb{C}^n:t\mu^2_D(x)+(1-t)\mu^2_{\mathbb{B}_n}(x)<1\},\ t\in[0,1].$$ The functions $t\mu^2_D+(1-t)\mu^2_{\mathbb{B}_n}$ are real analytic in $(\mathbb{C}^n)_*$ and strongly convex in $(\mathbb{C}^n)_*$, so $D_t$ are strongly convex domains with real analytic boundaries satisfying $$D=D_1\Subset D_{t_2}\Subset D_{t_1}\Subset D_0=\mathbb{B}_n\text{\ if \ }0<t_1<t_2<1.$$ It is clear that $\mu_{D_t}=\sqrt{t\mu^2_D+(1-t)\mu^2_{\mathbb{B}_n}}$. Further, if $t_{1}$ is close to $t_{2}$ then $D_{t_{1}}$ is close to $D_{t_{2}}$ w.r.t. the topology introduced in Section \ref{27}. We want to show that $D_t$ are in some family $\mathcal D(c)$. Only the interior and exterior ball conditions need to verify.
There exists $\delta>0$ such that $\delta\mathbb{B}_n\Subset D$. Further, $\nabla\mu_{D_t}^2\neq 0$ in $(\mathbb{R}^{2n})_*$. Set $$M:=\subsetp\left\{\frac{\mathcal{H}\mu_{D_t}^2(x;X)}{|\nabla\mu_{D_t}^2(y)|}:
t\in[0,1],\ x,y\in 2\overline{\mathbb{B}}_n\setminus\delta\mathbb{B}_n,\ X\in\mathbb{R}^{2n},\ |X|=1\right\}.$$ It is a positive number since the functions $\mu_{D_t}^2$ are strongly convex in $(\mathbb{R}^{2n})_*$ and the `sup' of the continuous, positive function is taken over a compact set. Let $$r:=\min\left\{\frac{1}{2M},\frac{\dist(\partial D,\delta\mathbb{B}_n)}{2}\right\}.$$ For fixed $t\in[0,1]$ and $a\in\partial D_t$ put $a':=a-r\nu_{D_t}(a)$. In particular, $\overline{B_n(a',r)}\subset 2\overline{\mathbb{B}}_n\setminus\delta\mathbb{B}_n$. Let us define $$h(x):=\mu^2_{D_t}(x)-\frac{|\nabla\mu^2_{D_t}(a)|}{2|a-a'|}(|x-a'|^2-r^2),\ x\in 2\overline{\mathbb{B}}_n\setminus\delta\mathbb{B}_n.$$ We have $h(a)=1$ and $$\nabla h(x)=\nabla\mu^2_{D_t}(x)-\frac{|\nabla\mu^2_{D_t}(a)|}{|a-a'|}(x-a').$$ For $x=a$, dividing the right side by $|\nabla\mu^2_{D_t}(a)|$, we get a difference of the same normal vectors $\nu_{D_t}(a)$, so $\nabla h(a)=0$. Moreover, for $|X|=1$ $$\mathcal{H}h(x;X)=\mathcal{H}\mu^2_{D_t}(x;X)-\frac{|\nabla\mu^2_{D_t}(a)|}{r}\leq M|\nabla\mu^2_{D_t}(a)|-2M|\nabla\mu^2_{D_t}(a)|<0.$$ It follows that $h\leq 1$ in any convex set $S$ such that $a\in S\subset 2\overline{\mathbb{B}}_n\setminus\delta\mathbb{B}_n$. Indeed, assume the contrary. Then there is $y\in S$ such that $h(y)>1$. Let us join $a$ and $y$ with an interval $$g:[0,1]\ni t\longmapsto h(ta+(1-t)y)\in S.$$ Since $a$ is a strong local maximum of $h$, the function $g$ has a local minimum at some point $t_0\in(0,1)$. Hence $$0\leq g''(t_0)=\mathcal{H}h(t_0a+(1-t_0)y;a-y),$$ which is impossible.
Setting $S:=\overline{B_n(a',r)}$, we get $$\mu^2_{D_t}(x)\leq 1+\frac{|\nabla\mu^2_{D_t}(a)|}{2|a-a'|}(|x-a'|^2-r^2)<1$$ for $x\in B_n(a',r)$, i.e. $x\in D_t$.
The proof of the exterior ball condition is similar. Set $$m:=\inf\left\{\frac{\mathcal{H}\mu_{D_t}^2(x;X)}{|\nabla\mu_{D_t}^2(y)|}:
t\in[0,1],\ x,y\in(\overline{\mathbb{B}}_n)_*,\ X\in\mathbb{R}^{2n},\ |X|=1\right\}.$$ Note that the $m>0$. Actually, the homogeneity of $\mu_{D_t}$ implies $\mathcal{H}\mu_{D_t}^2(sx;X)=\mathcal{H}\mu_{D_t}^2(x;X)$ and $\nabla\mu_{D_t}^2(sx)=s\nabla\mu_{D_t}^2(x)$ for $x\neq 0$, $X\in \mathbb{R}^{2n}$, $s>0$. Therefore, there are positive constants $C_1,C_2$ such that $C_1\leq\mathcal{H}\mu_{D_t}^2(x;X)$ for $x\neq 0$, $X\in \mathbb{R}^{2n}$, $|X|=1$ and $|\nabla\mu_{D_t}^2(y)|\leq C_2$ for $y\in\overline\mathbb{B}_n$. In particular, $m\geq C_1/C_2$.
Let $R:=2/m$. For fixed $t\in[0,1]$ and $a\in\partial D_t$ put $a'':=a-R\nu_{D_t}(a)$. Let us define $$\widetilde h(x):=\mu^2_{D_t}(x)-\frac{|\nabla\mu^2_{D_t}(a)|}{2|a-a''|}(|x-a''|^2-R^2),\ x\in\overline{\mathbb{B}}_n.$$ We have $\widetilde h(a)=1$ and $$\nabla\widetilde h(x)=\nabla\mu^2_{D_t}(x)-\frac{|\nabla\mu^2_{D_t}(a)|}{|a-a''|}(x-a''),$$ so $\nabla\widetilde h(a)=0$. Moreover, for $x\in(\overline{\mathbb{B}}_n)_*$ and $|X|=1$ $$\mathcal{H}\widetilde h(x;X)=\mathcal{H}\mu^2_{D_t}(x;X)-\frac{|\nabla\mu^2_{D_t}(a)|}{R}\geq m|\nabla\mu^2_{D_t}(a)|-m/2|\nabla\mu^2_{D_t}(a)|>0.$$ Therefore, $a$ is a strong local minimum of $\widetilde h$.
Now using the properties listed above we may deduce that $\widetilde h\geq 1$ in $\overline\mathbb{B}_n$. We proceed similarly as before: seeking a contradiction suppose that there is $y\in\overline\mathbb{B}_n$ such that $\widetilde h(y)<1$. Moving $y$ a little (if necessary) we may assume that $0$ does not lie on the interval joining $a$ and $y$. Then the mapping $\widetilde g(t):=\widetilde h(ta+ (1-t)y)$ attains its local maximum at some point $t_0\in(0,1)$. The second derivative of $\widetilde g$ at $t_0$ is non-positive, which gives a contradiction with a positivity of the Hessian of the function $\widetilde h$.
Hence, we get $$\frac{|\nabla\mu^2_{D_t}(a)|}{2|a-a''|}(|x-a''|^2-R^2)\leq\mu^2_{D_t}(x)-1<0,$$ for $x\in D_t$, so $D_t \subsetbset B_n(a'',R)$.
Let $T$ be the set of all $t\in[0,1]$ such that there is an $E$-mapping $f_{t}:\mathbb{D}\longrightarrow D_{t}$ with $f_{t}(0)=z$, $f_{t}(\xi_{t})=w$ for some $\xi_{t}\in(0,1)$ (resp. $f_{t}(0)=z$, $f_{t}'(0)=\lambda_{t}v$ for some $\lambda_{t}>0$). We claim that $T=[0,1]$. To prove it we will use the open-close argument.
Clearly, $T\neq\emptyset$, as $0\in T$. Moreover, $T$ is open in $[0,1]$. Indeed, let $t_{0}\in T$. It follows from Proposition \ref{13} that there is a neighborhood $T_{0}$ of $t_{0}$ such that there are $E$-mappings $f_{t}:\mathbb{D}\longrightarrow D_{t}$ and $\xi_{t}\in(0,1)$ such that $f_{t}(0)=z$, $f_{t}(\xi_{t})=w$ for all $t\in T_{0}$ (resp. $\lambda_{t}>0$ such that $f_{t}(0)=z$, $f_{t}'(0)=\lambda_{t} v$ for all $t\in T_{0}$).
To prove that $T$ is closed, choose a sequence $\{t_{m}\}\subset T$ convergent to some $t\in[0,1]$. We want to show that $t\in T$. Since $f_{t_m}$ are $E$-mappings, they are complex geodesics. Therefore, making use of the inclusions $D\subsetbset D_{t_m}\subsetbset\mathbb B_n$ we find that there is a compact set $K\subset(0,1)$ (resp. a compact set $\widetildedetilde K\subsetbset(0,\infty)$) such that $\{\xi_{t_m}\}\subsetbset K$ (resp. $\{\lambda_{t_m}\}\subsetbset\widetildedetilde K$). By Propositions \ref{8} and \ref{10b} the functions $f_{t_{m}}$ and $\widetildedetilde f_{t_{m}}$ are equicontinuous in $\mathcal{C}^{1/2}(\overlineerline{\mathbb{D}})$ and by Propositions \ref{9} and \ref{10a} the functions $\rho_{t_{m}}$ are uniformly bounded from both sides by positive numbers and equicontinuous in $\mathcal{C}^{1/2}(\mathbb{T})$. From the Arzela-Ascoli Theorem there are a subsequence $\{s_{m}\}\subsetbset\{t_{m}\}$ and mappings $f,\widetilde f\in{\mathcal O}(\mathbb{D})\cap\mathcal C^{1/2}(\overlineerline{\mathbb D})$, $\rho\in{\mathcal C}^{1/2}(\mathbb{T})$ such that $f_{s_{m}}\to f$, $\widetildedetilde{f}_{s_{m}}\to\widetilde f$ uniformly on $\overlineerline{\mathbb{D}}$, $\rho_{s_{m}}\to\rho$ uniformly on $\mathbb{T}$ and $\xi_{s_m}\to\xi\in (0,1)$ (resp. $\lambda_{s_m}\to\lambda>0$).
Clearly, $f(\overlineerline{\DD})\subset\overlineerline{D}_{t}$, $f(\mathbb{T})\subset\partialrtial D_{t}$ and $\rho>0$. By the strong pseudoconvexity of $D_t$ we get $f(\mathbb{D})\subset D_t$.
The conditions (3') and (4) of Definitions~\ref{21} and \ref{21e} follow from the uniform convergence of suitable functions. Therefore, $f$ is a weak $E$-mapping of $D_{t}$, consequently an $E$-mapping of $D_t$, satisfying $f(0)=z$, $f(\xi)=w$ (resp. $f(0)=z$, $f'(0)=\lambda v$).
Let us go back to the general situation that is when a domain $D$ is bounded strongly linearly convex with real analytic boundary. Take a of point $\eta\in\partialrtial{D}$ such that $\max_{\zeta\in\partialrtial{D}}|z-\zeta|=|z-\eta|$. Then $\eta$ is a point of the strong convexity of $D$. Indeed, by the Implicit Function Theorem one can assume that in a neighborhood of $\eta$ the defining functions of $D$ and $B:=B_n(z,|z-\eta|)$ are of the form $r(x):=\widetilde r(\widetilde x)-x_{2n}$ and $q(x):=\widetilde q(\widetilde x)-x_{2n}$ respectively, where $x=(\widetilde x,x_{2n})\in\mathbb{R}^{2n}$ is sufficiently close to $\eta$. From the inclusion $D\subset B$ it it follows that $r-q\geq 0$ near $\eta$ and $(r-q)(\eta)=0$. Thus the Hessian $\mathcal{H}(r-q)(\eta)$ is weakly positive in $\mathbb{C}^n$. Since $\mathcal{H}q(\eta)$ is strictly positive on $T_B^\mathbb{R}(\eta)_*=T_D^\mathbb{R}(\eta)_*$, we find that $\mathcal{H}r(\eta)$ is strictly positive on $T_D^\mathbb{R}(\eta)_*$, as well.
By a continuity argument, there is a convex neighborhood $V_0$ of $\eta$ such that all points from $\partial D\cap V_0$ are points of the strong convexity of $D$. It follows from Proposition \ref{localization} (after shrinking $V_0$ if necessary) that there is a weak stationary mapping $g:\mathbb{D}\longrightarrow D\cap V_0$ such that $g(\mathbb{T})\subsetbset\partialrtial D$. In particular, $g$ is a weak stationary mapping of $D$. Since $D\cap V_0$ is convex, the condition with the winding number is satisfied on $D\cap V_0$ (and then on the whole $D$). Consequently $g$ is an $E$-mapping of $D$.
If $z=g(0)$, $w=g(\xi)$ for some $\xi\in\mathbb{D}$ (resp. $z=g(0)$, $v=g'(0)$) then there is nothing to prove. In the other case let us take curves $\alpha:[0,1]\longrightarrow D$, $\beta:[0,1]\longrightarrow D$ joining $g(0)$ and $z$, $g(\xi)$ and $w$ (resp. $g(0)$ and $z$, $g'(0)$ and $v$). We may assume that the images of $\alpha$ and $\beta$ are disjoint. Let $T$ be the set of all $t\in[0,1]$ such that there is an $E$-mapping $g_{t}:\mathbb{D}\longrightarrow D$ such that $g_{t}(0)=\alpha(t)$, $g_{t}(\xi_{t})=\beta(t)$ for some $\xi_{t}\in(0,1)$ (resp. $g_{t}(0)=\alpha(t)$, $g_{t}'(0)=\lambda_{t}\beta(t)$ for some $\lambda_{t}>0$). Again $T\neq\emptyset$ since $0\in T$. Using the results of Section \ref{22} similarly as before (but for one domain), we see that $T$ is closed.
Since $\widetilde k_D$ is symmetric, it follows from Proposition \ref{13}(1) that the set $T$ is open in $[0,1]$ (first we move along $\alpha$, then by the symmetry we move along $\beta$). Therefore, $g_1$ is the $E$-mapping for $z,w$.
In the case of $\kappa_{D}$ we change a point and then we change a direction. To be more precise, consider the set $S$ of all $s\in[0,1]$ such that there is an $E$-mapping $h_{s}:\mathbb{D}\longrightarrow D$ such that $h_{s}(0)=\alpha(s)$. Then $0\in S$, by Proposition \ref{13}(1) the set $S$ is open in $[0,1]$ and by results of Section~\ref{22} again, it is closed. Hence $S=[0,1]$. Now we may join $h'_{1}(0)$ and $v$ with a curve $\gamma:[0,1]\longrightarrow \mathbb C^n$. Let us define $R$ as the set of all $r\in[0,1]$ such that there is an $E$-mapping $\widetilde h_{r}:\mathbb{D}\longrightarrow D$ such that $\widetilde h_{r}(0)=h_1(0)$, $\widetilde h'_{r}(0)=\sigma_{r}\gamma(1-r)$ for some $\sigma_r>0$. Then $1\in R$, by Proposition \ref{13}(2) the set $R$ is open in $[0,1]$ and, by Section \ref{22}, it is closed. Hence $R=[0,1]$, so $\widetilde h_{0}$ is the $E$-mapping for $z,v$.
\end{proof}
Now we are in position that allows us to prove the main results of the Lempert's paper.
\begin{proof}[Proof of Theorem \ref{lem-car} $($real analytic case$)$] It follows from Lemma \ref{lemat} that for any different points $z,w\in D$ (resp. $z\in D$, $v\in(\mathbb{C}^n)_*$) one may find an $E$-mapping passing through them (resp. $f(0)=z$, $f'(0)=v$). On the other hand, it follows from Proposition \ref{1} that $E$-mappings have left inverses, so they are complex geodesics.
\end{proof}
\begin{proof}[Proof of Theorem \ref{main} $($real analytic case$)$] This is a direct consequence of Lemma \ref{lemat} and Corollary \ref{28}.
\end{proof}
\begin{center}{\sc ${\mathcal C}^2$-smooth case}\end{center}
\begin{lem}\label{un} Let $D\subset\mathbb C^n$, $n\geq 2$, be a bounded strongly pseudoconvex domain with $\mathcal C^2$-smooth boundary. Take $z\in D$ and let $r$ be a defining function of $D$ such that
\begin{itemize}\item $r\in \mathcal C^2(\mathbb C^n);$
\item $D=\{x\in \mathbb C^n:r(x)<0\}$;
\item $\mathbb C^n\setminus D=\{x\in \mathbb C^n:r(x)>0\}$;
\item $|\nabla r|=1$ on $\partialrtial D;$
\item $\subsetm_{j,k=1}^n\frac{\partialrtial^2 r}{\partialrtial z_j\partialrtial\overlineerline z_k}(a)X_{j}\overlineerline{X}_{k}\geq C|X|^2$ for any $a\in \partialrtial D$ and $X\in \mathbb C^n$ with some constant $C>0$.
\end{itemize}
Suppose that there is a sequence $\{r_m\}$ of $\mathcal C^2$-smooth real-valued functions such that $D^{\alpha}r_n$ converges to $D^{\alpha}r$ locally uniformly for any $\alpha\in \mathbb N_0^{2n}$ such that $|\alpha|:=|\alpha_1| +\ldots+|\alpha_n|\leq 2$. Let $D_m$ be a connected component of the set $\{x\in\mathbb C^n:r_m(x)<0\}$, containing the point $z$.
Then there is $c>0$ such that $(D_m,z)$ and $(D,z)$ belong to $\mathcal D(c)$, $m>>1.$
\end{lem}
\begin{proof} Losing no generality assume that $D\Subset\mathbb B_n.$
Note that the conditions (1), (5), (6) of Definition \ref{30} are clearly satisfied. To find $c$ satisfying ($2$), we take $s>0$ such that $\mathcal H r (x;X)< s |X|^2$ for $x\in\overline\mathbb{B}_n$ and $X\in(\mathbb R^{2n})_*$. Then ${\mathcal H} r_m (x;X)<2s|X|^2$ for $x\in\overline\mathbb{B}_n$, $X\in(\mathbb R^{2n})_*$ and $m>>1$. Let $U_0\subsetbset\mathbb B_n$ be an open neighborhood of $\partial D$ such that $|\nabla r|$ is on $U_0$ between $3/4$ and $5/4$. Note that $\partialrtial D_m\subsetbset U_0$ and $|\nabla r_m|\in (1/2, 3/2)$ on $U_0$ for $m>>1$.
Fix $m$ and $a\in \partialrtial D_m$ and put $b:=a-R\nu_{D_m}(a)$, where a small number $R>0$ will be specified later. There is $t>0$ such that $\nabla r_m(a)=2t(a-b)$. Note that $t$ may be arbitrarily large provided that $R$ was small enough. We take $t:=2s$ and $R:=|\nabla r_m(a)|/t$. Then we have $\mathcal H r_m(x;X)<2t |X|^2$ for $x\in\overline\mathbb{B}_n$, $X\in(\mathbb R^{2n})_*$ and $m>>1$. Then a function $$h(x):=r_m(x)-t(|x-b|^2-R^2),\ x\in \mathbb C^n,$$ attains at $a$ its global maximum on $\overline\mathbb{B}_n$ ($a$ is a strong local maximum and the Hessian of $h$ is negative on the convex set $\overline\mathbb{B}_n$, cf. the proof of Lemma \ref{lemat}).
Thus $h\leq 0$ on $\mathbb B_n$. From this we immediately get (2).
Note that it follows from (2) that $D_m=\{x\in\mathbb C^n:r_m(x)<0\}$ for $m$ big enough (i.e. $\{x\in \mathbb C^n:\ r_m(x)<0\}$ is connected).
Moreover, the condition (2) implies the condition (3) as follows. We infer from Remark~\ref{D(c),4} that there is $c'>0$ such that $D$ satisfies (3) with $c'$. Let $m_0$ be such that the Hausdorff distance between $\partialrtial D$ and $\partialrtial D_m$ is smaller than $1/c'$ for $m\geq m_0$. There is $c''$ such that $D_{m_0}$ satisfies (3) with $c''$. Losing no generality we may assume that $c''<c'$. Take any $x,y\in D_m$. Since $D_m$ satisfies the interior ball condition with a radius $c$ we infer that there are balls of a radius $1/c$ contained in $D_m$ and containig $x$ and $y$ respectively. The centers of these balls lie in $D_{m_0}$. Using the fact that $(D_{m_0},z)$ lies in $\mathcal D(c'')$, we may join chosen centers with balls of a radius $1/(2c'')$ as in the condition (3), so we have found a chain consiting of balls of radii $c'$ and $c''$ joining $x$ and $y$.
Thus we may join $x$ and $y$ with balls contained entirely in the constructed chain whose radii depend only on $c'$ and $c''$.
Now we are proving $(4)$. We shall show that there is $c>c'$ such that every $D_m$ satisfies (4) with $c$ for $m$ big enough. To do it let us cover $\partialrtial D$ with a finite number of balls $B_j$, $j=1,\ldots,N$, from condition (4) and let $B'_j$ be a ball contained relatively in $B_j$ such that $\{B_j\}$ covers $\partialrtial D$, as well. Let $\Phi_j$ be mappings corresponding to $B_j$. Let $\varepsilon$ be such that any ball of radius $\varepsilon$ intersecting $\partialrtial D$ non-emptily is relatively contained in $B_j'$ for some $j$. Observe that any ball $B$ of radius $\varepsilon/2$ intersecting non-emptily $\partialrtial D_m$ is contained in a ball of radius $\varepsilon$ intersecting non-emptily $\partialrtial D$; hence it is contained in $B_j'$ for some $j$. Then the pair $B$, $\Phi_j$ satisfies the conditions (4) (b), (c) and (d). Therefore, it suffices to check that there is $c>2/\varepsilon$ such that each pair $B_j'$, $\Phi_j$ satisfies the condition (4) for $D_m$ with $c$ ($m>>1$). This is possible since $\Phi_j(D_m)\subsetbset\Phi_j(D)$, $D^\alpha\Phi_j(\partial D_m\cap B_j)$ converges to $D^\alpha\Phi_j(\partial D\cap B_j)$ for $|\alpha|\leq 2$ and for any $w\in\Phi(\partial D\cap B_j)$ there is a ball of radius $2/\varepsilon$ containing $\Phi_j(D)$ and tangent to $\partialrtial\Phi_j(D)$ at $w$. To be precise, we proceed as follows.
Let $a,b\in\mathbb{C}^n$ and let $x\in\partial B_n(a,\widetilde c)$, where $\widetilde c>c'$. Then a ball $B_n(2a-x,2\widetilde c)$ contains $B_n(a,\widetilde c)$ and is tangent to $B_n(a,\widetilde c)$ at $x$. There is a number $\eta=\eta(\delta,\widetilde c)>0$, independent of $a,b,x$, such that the diameter of the set $B_n(b,\widetilde c)\setminus B_n(2a-x,2\widetilde c)$ is smaller than $\delta>0$, whenever $|a-b|<\eta$ (this is a simple consequence of the triangle inequality).
Let $\widetilde s>0$ be such that $\mathcal H(r\circ\Phi_j^{-1})(x;X)\geq 2\widetilde s|X|^2$ for $x\in U_j$, $j=1,\ldots,N$, where $U_j$ is an open neighborhood of $\Phi_j(\partialrtial D\cap B_j)$. Then, for $m$ big enough, $\mathcal H(r_m\circ \Phi_j^{-1})(x;X)\geq\widetilde s|X|^2$ for $x\in U_j$ and $\Phi_j(\partialrtial D_m\cap B_j')\subsetbset U_j$, $j=1,\ldots,N$. Repeating for the function $$x\longmapsto(r_m\circ\Phi_j^{-1})(x)-\widetilde t(|x-\widetilde b|^2-\widetilde R^2)$$ the argument used in the interior ball condition with suitable chosen $\widetilde t$ and uniform $\widetilde R>c$, we find that there is uniform $\widetilde\varepsilon>0$ such that for any $j,m$ and $w\in\Phi_j(\partialrtial D_m\cap B_j')$ there is a ball $B$ of radius $\widetilde R$, tangent to $\Phi_j(\partialrtial D_m\cap B_j')$ at $w$, such that $\Phi_j(\partialrtial D_m\cap B_j')\cap B_n(w,\widetilde\varepsilon)\subsetbset B$. Let $a_{j,m}(w)$ denote its center.
On the other hand for any $w\in \Phi_j(\partialrtial D_m\cap B_j')$ there is $t>0$ such that $w'=w+t\nu (w)\in \Phi_j(\partialrtial D\cap B_j)$, where $\nu(w)$ is a normal vector to $\Phi_j(\partialrtial D_m\cap B_j')$ at $w$. Let $a_j(w')$ be a center of a ball of radius $\widetilde R$ tangent to $\Phi_j(\partialrtial D\cap B_j)$ at $w'$. It follows that $|a_{j,m}(w)-a_j(w')|<\eta(\widetilde\varepsilon/2,\widetilde R)$ provided that $m$ is big enough.
Joinining the facts presented above, we finish the proof of the exterior ball condition (with a radius dependent only on $\widetilde\varepsilon$ and $\widetilde R$).
\end{proof}
\begin{proof}[Proof of Theorems \ref{lem-car} and \ref{main} \emph{(}$\mathcal C^2$-smooth case$)$]
Losing no generality assume that $0\in D\Subset\mathbb{B}_n$.
It follows from the Weierstrass Theorem that there is sequence $\{P_k\}$ of real polynomials on $\mathbb{C}^n\simeq\mathbb R^{2n}$ such that $$D^{\alpha}P_{k}\to D^{\alpha}r \text{ uniformly on }\overline\mathbb{B}_n,$$ where $\alpha=(\alpha_1,\ldots, \alpha_{2n})\in \mathbb N_0^{2n}$ is such that $|\alpha|=\alpha_1+\ldots +\alpha_{2n}\leq 2$. Consider the open set $$\widetilde D_{k,\varepsilon}:=\{x\in \mathbb C^n:P_{k}(x)+\varepsilon<0\}.$$ Let $\varepsilon_{m}$ be a sequence of positive numbers converging to $0$ such that $3\varepsilon_{m+1}<\varepsilon_m.$
For any $m\in \mathbb N$ there is $k_{m}\in\mathbb{N}$ such that $\subsetp_{\overline\mathbb{B}_n}|P_{k_{m}}-r|<\varepsilon_{m}$. Putting $r_{m}:=P_{k_{m}}+2\varepsilon_{m}$, we get $r+\varepsilon_{m}<r_{m}<r+3\varepsilon_{m}$. In particular, $r_{m+1}<r_m.$
Let $D_m$ be a connected component of $D_{k_m,2\varepsilon_m}$ containing $0$. It is a bounded strongly linearly convex domain with real analytic boundary and $r_m$ is its defining function provided that $m$ is big enough. Moreover, $D_{m}\subsetbset D_{m+1}$ and $\bigcup_m D_{m}=D$. Using properties of holomorphically invariant functions and metrics we get Theorem~\ref{lem-car}.
We are left with showing the claim that for any different $z,w\in D$ (resp. $z\in D$, $v\in(\mathbb C^n)_*$) there is a weak $E$-mapping for $z,w$ (resp. for $z,v$). Fix $z\in D$ and $w\in D$ (resp. $v\in(\mathbb C^n)_*$). Then $z,w\in D_m$ (resp. $z\in D_m$), $m>>1$. Therefore, for any $m>>1$ one may find an $E$-mapping $f_m$ of $D_m$ for $z,w$ (resp. for $z,v$). Since $(D_m,z)\in \mathcal D(c)$ for some uniform $c>0$ ($m>>1$) (Lemma~\ref{un}), we find that $f_m$, $\widetilde f_m$ and $\rho_m$ satisfy the uniform estimates from Section~\ref{22}. Thus, passing to a subsequence we may assume that $\{f_m\}$ converges uniformly on $\overlineerline{\DD}$ to a mapping $f\in{\mathcal O}(\mathbb{D})\cap{\mathcal C}^{1/2}(\overlineerline{\DD})$ passing through $z,w$ (resp. such that $f(0)=z$, $f'(0)=\lambda v$, $\lambda>0$), $\{\widetilde f_m\}$ converges uniformly on $\overlineerline{\DD}$ to a mapping $\widetilde f\in{\mathcal O}(\mathbb{D})\cap\mathcal C^{1/2}(\overlineerline{\mathbb D})$ and $\{\rho_m\}$ is convergent uniformly on $\mathbb{T}$ to a positive function $\rho\in{\mathcal C}^{1/2}(\mathbb{T})$ (in particular, $f'\bullet\widetilde f=1$ on $\mathbb{D}$, so $\widetilde f$ has no zeroes in $\overlineerline{\DD}$). We already know that this implies that $f$ is a weak $E$-mapping of $D$.
To get ${\mathcal C}^{k-1-\varepsilon}$-smoothness of the extremal $f$ and its associated mappings for $k\geq 3$, it suffices to repeat the proof of Proposition~5 of \cite{Lem2}. This is just the Webster Lemma (we have proved it in the real analytic case --- see Proposition~\ref{6}). Namely, let $$\psi:\partialrtial D\ni z\longmapsto(z,T_{D}^\mathbb{C}(z))\in \mathbb C^n\times(\mathbb P^{n-1})_*,$$ where $\mathbb P^{n-1}$ is the $(n-1)$-dimensional complex projective space. Let $\pi:(\mathbb{C}^n)_*\longrightarrow\mathbb P^{n-1}$ be the canonical projection.
By \cite{Web}, $\psi(\partialrtial D)$ is a totally real manifold of $\mathcal C^{k-1}$ class. Observe that the mapping $(f,\pi\circ \widetilde f):\overlineerline{\DD}\longrightarrow\mathbb{C}^n\times\mathbb P^{n-1}$ is $1/2$-H\"older continuous, is holomorphic on $\mathbb D$ and maps $\mathbb T$ into $\psi(\partialrtial D)$. Therefore, it is $\mathcal C^{k-1-\varepsilon}$-smooth for any $\varepsilon>0$, whence $f$ is $\mathcal C^{k-1-\varepsilon}$-smooth. Since $\nu_D\circ f$ is of class $\mathcal C^{k-1-\varepsilon}$, it suffices to proceed as in the proof of Proposition~\ref{6}.
\end{proof}
\section{Appendix}\label{Appendix}
\subsetbsection{Totally real submanifolds}
Let $M\subsetbset\mathbb{C}^m$ be a totally real local $\mathcal{C}^{\omega}$ submanifold of the real dimension $m$. Fix a point $z\in M$. There are neighborhoods $U_0\subset\mathbb{R}^m$, $V_0\subset\mathbb{C}^m$ of $0$ and $z$ and a $\mathcal{C}^{\omega}$ diffeomorphism $\widetildedetilde{\Phi}:U_0\longrightarrow M\cap V_0$ such that $\widetildedetilde{\Phi}(0)=z$. The mapping $\widetildedetilde{\Phi}$ can be extended in a natural way to a mapping $\Phi$ holomorphic in a neighborhood of $0$ in $\mathbb{C}^m$. Note that this extension will be biholomorphic in a neighborhood of $0$. Actually, we have $$\frac{\partialrtial\Phi_j}{\partialrtial z_k}(0)=\frac{\partialrtial\Phi_j}{\partialrtial
x_k}(0)=\frac{\partialrtial\widetildedetilde{\Phi}_j}{\partialrtial x_k}(0),\ j,k=1,\ldots,m,$$ where $x_k=\re z_k$. Suppose that the complex derivative $\Phi'(0)$ is not an isomorphism. Then there is $X\in(\mathbb{C}^m)_*$ such that $\Phi'(0)X=0$, so \begin{multline*}0=\subsetm_{k=1}^m\frac{\partialrtial\Phi}{\partialrtial z_k}(0)X_k=\subsetm_{k=1}^m\frac{\partialrtial\widetilde\Phi}{\partialrtial x_k}(0)(\re X_k+i\im X_k)=\\=\underbrace{\subsetm_{k=1}^m\frac{\partialrtial\widetilde\Phi}{\partialrtial x_k}(0)\re X_k}_{=:A}+i\underbrace{\subsetm_{k=1}^m\frac{\partialrtial\widetilde\Phi}{\partialrtial x_k}(0)\im X_k}_{=:B}.\end{multline*}
The vectors $$\frac{\partialrtial\widetilde\Phi}{\partialrtial x_k}(0),\ k=1,\ldots,m$$ form a basis of $T^{\mathbb{R}}_M(z)$, so $A,B\in T^{\mathbb{R}}_M(z)$, consequently $A,B\in iT^{\mathbb{R}}_M(z)$. Since $M$ is totally real, i.e. $T^{\mathbb{R}}_M(z)\cap iT^{\mathbb{R}}_M(z)=\{0\}$, we have $A=B=0$. By a property of the basis we get $\re X_k=\im X_k=0$, $k=1,\ldots,m$ --- a contradiction.
Therefore, $\Phi$ in a neighborhood of $0$ is a biholomorphism of two open subsets of $\mathbb{C}^m$, which maps a neighborhood of $0$ in $\mathbb{R}^m$ to a neighborhood of $z$ in $M$.
\begin{lemm}[Reflection Principle]\label{reflection}
Let $M\subsetbset\mathbb{C}^m$ be a totally real local $\mathcal{C}^{\omega}$ submanifold of the real
dimension $m$. Let $V_0\subsetbset\mathbb{C}$ be a neighborhood of $\zeta_0\in\mathbb{T}$ and let $g:\overlineerline{\mathbb{D}}\cap V_0\longrightarrow\mathbb{C}^m$ be a continuous mapping. Suppose that $g\in{\mathcal O}(\mathbb{D}\cap V_0)$ and $g(\mathbb{T}\cap V_0)\subsetbset M$. Then $g$ can be extended holomorphically past $\mathbb{T}\cap V_0$.
\end{lemm}
\begin{proof}
In virtue of the identity principle it is sufficient to extend $g$ locally
past an arbitrary point $\zeta_0\in\mathbb{T}\cap V_0$. For a point $g(\zeta_0)\in M$ take $\Phi$ as above. Let $V_1\subsetbset V_0$ be a neighborhood of $\zeta_0$ such that $g(\overlineerline{\DD}\cap V_1)$ is contained in the image
of $\Phi$. The mapping $\Phi^{-1}\circ g$ is holomorphic in $\mathbb{D}\cap V_1$ and has
real values on $\mathbb{T}\cap V_1$. By the ordinary Reflection Principle we can
extend this mapping holomorphically past $\mathbb{T}\cap V_1$. Denote this extension by
$h$. Then $\Phi\circ h$ is an extension of $g$ in a neighborhood of $\zeta_0$.
\end{proof}
\subsetbsection{Schwarz Lemma for the unit ball}
\begin{lemm}[Schwarz Lemma]\label{schw}
Let $f\in{\mathcal O}(\mathbb{D},B_n(a,R))$ and $r:=|f(0)-a|$. Then $$|f'(0)|\leq \sqrt{R^2-r^2}.$$
\end{lemm}
\subsetbsection{Some estimates of holomorphic functions of ${\mathcal C}^{\alpha}$-class}
Let us recall some theorems about functions holomorphic in $\mathbb{D}$ and continuous in $\overlineerline{\DD}$. Concrete values of constants $M,K$ are possible to calculate, seeing on the proofs. In fact, it is only important that they do not depend on functions.
\begin{tww}[Hardy, Littlewood, \cite{Gol}, Theorem 3, p. 411]\label{lit1}
Let $f\in{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$. Then for $\alpha\in(0,1]$ the following conditions are equivalent
\begin{eqnarray}\label{47}\exists M>0:\ |f(e^{i\theta})-f(e^{i\theta'})|\leq M|\theta-\theta'|^{\alpha},\ \theta,\theta'\in\mathbb{R};\\
\label{45}\exists K>0:\ |f'(\zeta)|\leq K(1-|\zeta|)^{\alpha-1},\ \zeta\in\mathbb{D}.
\end{eqnarray}
Moreover, if there is given $M$ satisfying \eqref{47} then $K$ can be chosen as $$2^{\frac{1-3\alpha}{2}}\pi^\alpha M\int_0^\infty\frac{t^\alpha}{1+t^2}dt$$ and if there is given $K$ satisfying \eqref{45} then $M$ can be chosen as $(2/\alpha+1)K$.
\end{tww}
\begin{tww}[Hardy, Littlewood, \cite{Gol}, Theorem 4, p. 413]\label{lit2}
Let $f\in{\mathcal O}(\mathbb{D})\cap{\mathcal C}(\overlineerline{\DD})$ be such that $$|f(e^{i\theta})-f(e^{i\theta'})|\leq M|\theta-\theta'|^{\alpha},\ \theta,\theta'\in\mathbb{R},$$ for some $\alpha\in(0,1]$ and $M>0$. Then $$|f(\zeta)-f(\zeta')|\leq K|\zeta-\zeta'|^{\alpha},\
\zeta,\zeta'\in\overlineerline{\DD},$$ where $$K:=\max\left\{2^{1-2\alpha}\pi^\alpha M,2^{\frac{3-5\alpha}{2}}\pi^\alpha\alpha^{-1} M\int_0^\infty\frac{t^\alpha}{1+t^2}dt\right\}.$$
\end{tww}
\begin{tww}[Privalov, \cite{Gol}, Theorem 5, p. 414]\label{priv}
Let $f\in{\mathcal O}(\mathbb{D})$ be such that $\re f$ extends continuously on $\overlineerline{\DD}$ and $$|\re f(e^{i\theta})-\re f(e^{i\theta'})|\leq M|\theta-\theta'|^\alpha,\ \theta,\theta'\in\mathbb{R},$$ for some $\alpha\in(0,1)$ and $M>0$. Then $f$ extends continuously on $\overlineerline{\DD}$ and $$|f(\zeta)-f(\zeta')|\leq K|\zeta-\zeta'|^\alpha,\ \zeta,\zeta'\in\overlineerline{\DD},$$ where $$K:=\max\left\{2^{1-2\alpha}\pi^\alpha,2^{\frac{3-5\alpha}{2}}\pi^\alpha\alpha^{-1}\int_0^\infty\frac{t^\alpha}{1+t^2}dt\right\}\left(\frac{2}{\alpha}+1\right)2^{\frac{3-3\alpha}{2}}\pi^{\alpha}M\int_0^\infty\frac{t^\alpha}{1+t^2}dt.$$
\end{tww}
\subsetbsection{Sobolev space}
The Sobolev space $W^{2,2}(\mathbb{T})=W^{2,2}(\mathbb{T},\mathbb{C}^m)$ is a space of functions $f:\mathbb{T}\longrightarrow\mathbb{C}^m$, whose first two derivatives (in the sense of distribution) are in $L^2(\mathbb{T})$ (here we use a standard identification of functions on the unit circle and functions on the interval $[0,2\pi]$). Then $f$ is $\mathcal C^1$-smooth.
It is a complex Hilbert space with the following scalar product
$$\langle f,g\rangle_W:=\langle f,g\rangle_{L}+\langle f',g'\rangle_{L}+\langle f'',g''\rangle_{L},$$
where $$\langle\widetilde f,\widetilde g\rangle_{L}:=\frac{1}{2\pi}\int_0^{2\pi}\langle\widetilde f(e^{it}),\widetilde g(e^{it})\rangle dt.$$ Let $\|\cdot\|_L$, $\|\cdot\|_W$ denote the norms induced by $\langle\cdotp,-\rangle_L$ and $\langle\cdotp,-\rangle_W$. The following characterization simply follows from Parseval's identity $$W^{2,2}(\mathbb{T})=\left\{f\in L^2(\mathbb{T}):\subsetm_{k=-\infty}^{\infty}(1+k^2+k^4)|a_k|^2<\infty\right\},$$ where $a_k\in\mathbb{C}^m$ are the $m$-dimensional Fourier coefficients of $f$, i.e. $$f(\zeta)=\subsetm_{k=-\infty}^{\infty}a_k\zeta^k,\ \zeta\in\mathbb{T}.$$ More precisely, Parseval's identity gives $$\|f\|_W=\sqrt{\subsetm_{k=-\infty}^{\infty}(1+k^2+k^4)|a_k|^2},\ f\in W^{2,2}(\mathbb{T}).$$ Note that $W^{2,2}(\mathbb{T})\subset\mathcal{C}^{1/2}(\mathbb{T})\subset\mathcal{C}(\mathbb{T})$ and both inclusions are continuous (in particular, both inclusions are real analytic). Note also that
\begin{equation}\label{67}\|f\|_{\subsetp}\leq\subsetm_{k=-\infty}^{\infty}|a_k|\leq\sqrt{\subsetm_{k=-\infty}^{\infty}\frac{1}{1+k^2}\subsetm_{k=-\infty}^{\infty}(1+k^2)|a_k|^2}\leq\frac{\pi}{\sqrt 3}\|f\|_W.\end{equation}\\
Now we want to show that there exists $C>0$ such that $$\|h^\alpha\|_W\leq C^{|\alpha|}\|h_1\|^{\alpha_1}_W\cdotp\ldots\cdotp\|h_{2n}\|^{\alpha_{2n}}_W,\quad h\in W^{2,2}(\mathbb{T},\mathbb{C}^n),\,\alpha\in\mathbb{N}_0^{2n}.$$ Thanks to the induction it suffices to prove that there is $\widetilde C>0$ satisfying $$\|h_1h_2\|_W\leq\widetilde C\|h_1\|_W\|h_2\|_W,\quad h_1,h_2\in W^{2,2}(\mathbb{T},\mathbb{C}).$$ Using \eqref{67}, we estimate $$\|h_1h_2\|^2_W=\|h_1h_2\|^2_L+\|h_1'h_2+h_1h_2'\|^2_L+\|h_1''h_2+2h_1'h_2'+h_1h_2''\|^2_L\leq$$$$\leq C_1\|h_1h_2\|_{\subsetp}^2+(\|h_1'h_2\|_L+\|h_1h_2'\|_L)^2+(\|h_1''h_2\|_L+\|2h_1'h_2'\|_L+\|h_1h_2''\|_L)^2\leq$$\begin{multline*}\leq C_1\|h_1\|_{\subsetp}^2\|h_2\|_{\subsetp}^2+(C_2\|h_1'\|_L\|h_2\|_{\subsetp}+C_2\|h_1\|_{\subsetp}\|h_2'\|_L)^2+\\+(C_2\|h_1''\|_L\|h_2\|_{\subsetp}+C_2\|2h_1'h_2'\|_{\subsetp}+C_2\|h_1\|_{\subsetp}\|h_2''\|_L)^2\leq\end{multline*}\begin{multline*}\leq C_3\|h_1\|_W^2\|h_2\|_W^2+(C_4\|h_1\|_W\|h_2\|_W+C_4\|h_1\|_W\|h_2\|_W)^2+\\+(C_4\|h_1\|_W\|h_2\|_W+2C_2\|h_1'\|_{\subsetp}\|h_2'\|_{\subsetp}+C_4\|h_1\|_W\|h_2\|_W)^2\leq\end{multline*}$$\leq C_5\|h_1\|_W^2\|h_2\|_W^2+(2C_4\|h_1\|_W\|h_2\|_W+2C_2\|h_1'\|_{\subsetp}\|h_2'\|_{\subsetp})^2$$ with constants $C_1,\ldots,C_5$. Expanding $h_j(\zeta)=\subsetm_{k=-\infty}^{\infty}a^{(j)}_k\zeta^{k}$, $\zeta\in\mathbb{T}$, $j=1,2$, we obtain $$\|h_j'\|_{\subsetp}\leq\subsetm_{k=-\infty}^{\infty}|k||a^{(j)}_k|\leq\sqrt{\subsetm_{k\in\mathbb{Z}_*}\frac{1}{k^2}\subsetm_{k\in\mathbb{Z}_*}k^4|a^{(j)}_k|^2}\leq\frac{\pi}{\sqrt 3}\|h_j\|_W$$ and finally $\|h_1h_2\|^2_W\leq C_6\|h_1\|_W^2\|h_2\|_W^2$ for some constant $C_6$.
\subsetbsection{Matrices}
\begin{propp}[Lempert, \cite{Lem2}, Th\'eor\`eme $B$]\label{12}
Let $A:\mathbb{T}\longrightarrow\mathbb{C}^{n\times n}$ be a matrix-valued real analytic mapping such
that $A(\zeta)$ is self-adjoint and strictly positive for any $\zeta\in\mathbb{T}$. Then there exists $H\in{\mathcal O}(\overlineerline{\DD},\mathbb{C}^{(n-1)\times(n-1)})$ such that $\det H\neq 0$ on $\overlineerline{\DD}$ and $HH^*=A$ on $\mathbb{T}$.
\end{propp}
In \cite{Lem2}, the mapping $H$ was claimed to be real analytic in a neighborhood of $\overlineerline{\DD}$ and holomorphic in $\mathbb{D}$, but it is equivalent to $H\in{\mathcal O}(\overlineerline{\DD})$. Indeed, since $\overline\partial H$ is real analytic near $\overlineerline{\DD}$ and $\overline\partial H=0$ in $\mathbb{D}$, the identity principle for real analytic functions implies $\overline\partial H=0$ in a neighborhood of $\overlineerline{\DD}$.
\begin{propp}[\cite{Tad}, Lemma $2.1$]\label{59}
Let $A$ be a complex symmetric $n\times n$ matrix. Then $$\|A\|=\subsetp\{|z^TAz|:z\in\mathbb{C}^n,\,|z|=1\}.$$
\end{propp}
\textsc{Acknowledgements.} We would like to thank Sylwester Zaj\k ac for helpful discussions. We are also grateful to our friends for the participation in preparing some parts of the work.
\end{document} |
\begin{document}
\title[SURE-tuned Bridge Regression]{SURE-tuned Bridge Regression}
\author*[1]{\fnm{Jorge} \sur{Lor\'ia}}\email{[email protected]}
\author[2]{\fnm{Anindya} \sur{Bhadra}}
\affil[1]{\orgdiv{Department of Statistics}, \orgname{Purdue University}, \orgaddress{ \city{West Lafayette}, \postcode{47907}, \state{Indiana}, \country{United States}}}
\abstract{Consider the \unboldmath{$\ell_{\alpha}$} regularized linear regression, also termed Bridge regression. For $\alpha\in (0,1)$, Bridge regression enjoys several statistical properties of interest such as sparsity and near-unbiasedness of the estimates \citep{FanLiSCADJASA}. However, the main difficulty lies in the non-convex nature of the penalty for these values of $\alpha$, which makes an optimization procedure challenging and usually it is only possible to find a local optimum. To address this issue, \citet{PolsonScottBayesBridge} took a sampling based fully Bayesian approach to this problem, using the correspondence between the Bridge penalty and a power exponential prior on the regression coefficients. However, their sampling procedure relies on Markov chain Monte Carlo (MCMC) techniques, which are inherently sequential and not scalable to large problem dimensions. Cross validation approaches are similarly computation-intensive. To this end, our contribution is a novel \emph{non-iterative} method to fit a Bridge regression model. The main contribution lies in an explicit formula for Stein's unbiased risk estimate for the out of sample prediction risk of Bridge regression, which can then be optimized to select the desired tuning parameters, allowing us to completely bypass MCMC as well as computation-intensive cross validation approaches. Our procedure yields results in about 1/8th to 1/10th of the computational time compared to iterative schemes, without any appreciable loss in statistical performance. An \texttt{R} implementation is publicly available online at: \href{ https://github.com/loriaJ/Sure-tuned_BridgeRegression}{https://github.com/loriaJ/Sure-tuned_BridgeRegression}.
}
\keywords{Bridge Regression, Cross validation, Monte Carlo estimation, Stein's unbiased risk estimate}
\maketitle
\section{Introduction}\label{sec1}
For regression coefficients $\beta\in \mathbb{R}^p$, the Bridge regression estimate is usually stated as the solution to the following optimization problem:
\begin{align}
\hat\beta = \arg\min_{\beta}\left\{\frac{1}{2}\lVert y - X\beta\rVert_2^2 + \nu\sum_{i}\lvert\beta_i\rvert^\alpha\right\},\label{eq:bridge}
\end{align}
where $y \in \mathbb{R}^n$ is the response variable, $X \in \mathbb{R}^{n\times p}$ is the design matrix, $\nu>0$ is a penalty parameter, and $\alpha\in(0,2]$ is the coefficient exponent. Particular cases of interest are: the degenerate $\alpha=0$, which corresponds to the $\ell_0$ penalized regression; when $\alpha=1$, it corresponds to the lasso procedure; and $\alpha=2$ is ridge regression.
When $\alpha\in(0,1)$, this optimization problem is non-convex, and for $\alpha \in[1,2]$, it is a convex problem. Furthermore, the Bridge model has the desirable properties of sparsity and near-unbiasedness \citep{FanLiSCADJASA}, when $\alpha \in (0,1).$
However, for the same setting of $\alpha\in (0,1)$, \citet{PolsonScottBayesBridge}
argue that a purely optimization-based approach is inappropriate, as the penalized likelihood surface is multi-modal, which in turn leads to several possible solutions or local optimality conditions instead of global optimality, which is hard to establish \citep{mazumder2011sparsenet}. In this context, three iterative strategies are currently available for solving this problem, to the best of our knowledge. These are:
\begin{enumerate}
\item A sampling based fully Bayesian Markov chain Monte Carlo (MCMC) procedure by \citet{PolsonScottBayesBridge}. The key to their approach is the observation that a solution to the optimization problem in Equation~\eqref{eq:bridge} could be obtained as the posterior mode under the model:
\begin{eqnarray*}
y\mid X,\beta &\sim& \mathcal{N}(X\beta,\sigma^2),\\
p(\beta_i) &\propto& \exp(-\nu \lvert \beta_i \lvert^\alpha),
\end{eqnarray*}
where the density on the last line was termed the exponential power density by \citet[][p.~157]{BoxTiao1973}. Consequently, \citet{PolsonScottBayesBridge} recommend a fully Bayesian approach using this hierarchical model and outline two possible sampling schemes. Also of interest are the approaches of \citet{GomezSanchezBridge}, and \citet{MallickYiBridge}.
\item Expectation maximization (EM) point estimation routines for finding the maximum a posteriori estimate under a power exponential prior, which coincides to the solution of the optimization problem in Equation~\eqref{eq:bridge}, proposed by \citet{PolsonScottBayesBridge}, and \citet{MallickYiBridge}. Moreover, pathwise coordinate descent approaches are also available for solving the penalized optimization problem \citep{mazumder2011sparsenet,griffin2022improved}.
\item Finally, a variational Bayes approach, for a tractable approximation to the target posterior under a fully Bayesian model \citep{ArmaganBridge}.
\end{enumerate}
While the approaches above could be termed respectively as fully Bayesian, frequentist and approximately Bayesian; and hence together they cover the full spectrum of statistical inference reasonably well, a common and recurring theme is their reliance on iterative routines for the purpose of model fitting, inherently limiting their scalability. The main contribution of the current paper is to present a \emph{non-iterative} approach. Our approach exploits a closed form expression for the desired posterior moments in a penalized likelihood formulation of the Bridge problem, given a latent Gaussian representation, using the celebrated result of \citet{West1987} that an exponential power density, $p_X(x)\propto \exp(-\lvert x\lvert^\alpha)$, for $\alpha\in (0,1)$, is a normal scale mixture with respect to a positive $\alpha/2$ stable density for the latent scale variable. This result leads to a closed form expression for Stein's unbiased risk estimate or SURE \citep{stein_inadmissibility_1956, stein1981estimation} for Bridge regression, allowing a selection of $\nu$ by minimizing SURE via a simple one-dimensional grid search. Upon selecting $\nu$, the penalized likelihood estimate of $\beta$ that coincides with the Bridge optimization problem is also available analytically, once again by exploiting the latent Gaussian representation. The connection between SURE and cross validation has been explored by \citet{Efron2004}, who showed the latter to be a Monte Carlo estimate of SURE. Thus, choosing the desired tuning parameters via minimizing SURE allows us to completely bypass computationally demanding cross validation procedures. Further, the explicit formula resulting from our latent Gaussian representation preempts iterative Markov chain Monte Carlo simulation as well, since the desired posterior moments are available in closed form. Our results still require numerical evaluation by means of vanilla Monte Carlo of some functions of the latent stable scale variable, but these evaluations are not inherently sequential in the same sense an MCMC or an iterative optimization routine is.
We compare our procedure to the fully Bayesian approach of \citet{PolsonScottBayesBridge}, which is implemented in the \texttt{R} package \texttt{BayesBridge} as well as with cross validation. Our results indicate similar statistical performance, but with a run time that is typically between 1/8th and 1/10th of a fully Bayesian MCMC routine or cross validation. Predictive comparison on a set of spectral reflectances for photosynthetic prediction in plants \citep{MeachamHensoldMontesWuetal} is also presented, once again yielding similar out-of-sample prediction errors, but in a fraction of the time.
To summarize, our main contributions are:
\begin{enumerate}
\item An explicit formula for the desired posterior moments in a latent Gaussian representation of the Bridge regression model, yielding a closed form expression for SURE.
\item A demonstration that the estimates have finite Monte Carlo variance, while avoiding iterative MCMC and optimization routines altogether.
\item A numerical demonstration on simulated and a photosynthetic data set, indicating similar statistical performance with a large saving in computational time.
\end{enumerate}
The rest of the manuscript is organized as follows. In Section \ref{sec:method} we present explicit formulas for the Bridge regression model. Next, in Section~\ref{sec:SURE} we include a closed form expression for $\mathrm{SURE}$, for a general prior on the coefficients of a linear regression. We also provide details of the implementation that makes clear how we are able to avoid an iterative routine, with vanilla Monte Carlo being sufficient for our purposes. This is followed by Section~\ref{sec:Results} where we consider simulation experiments to validate our method, and verify the bounds on the variances. Further, in Section~\ref{sec:real_data} we implement our method in a prediction problem with spectroscopic measurements. We conclude in Section~\ref{sec:Conclusion} by pointing out some future directions.
\section{A Penalized Likelihood Framework for Bridge Regression}\label{sec:method}
In what follows, consider $\alpha\in(0,2)$, a fixed constant. Denote a random variable $B$ following the exponential power distribution as $B\sim EP(\alpha,\nu)$, with probability density function:
\begin{align*}
p_\nu(\beta) = &\frac{\alpha\nu
}{\Gamma(1/\alpha)}\exp\left(-2^\alpha\nu^{\alpha}\lvert\beta\rvert^\alpha\right).
\end{align*}
There is a direct relationship between the density of the exponential power distribution, and the penalty we are considering, since $-\log(p_\nu(\beta)) = 2^\alpha\nu^\alpha\lvert\beta\rvert^\alpha$, up to an additive constant. That is, the penalty is the negative logarithm of the density function, for a fixed parameter $\nu$. With that in mind, and motivated by the observations of \citet{West1987} and \citet{PSLocalShrinkage}, we consider a positive $\alpha/2-$stable random variable $L\sim S^+(\alpha/2,\nu)$, usually defined by its Laplace transform, given by: $\psi_L(\zeta)=\mathbb{E}[\exp(-\zeta L)]=\exp(-\nu^{\alpha/2} \lvert \zeta\rvert^{\alpha/2})$, \citep[see Equation 4.5,][]{ContTankov}. This is because explicit analytical expressions for the density of a positive $\alpha$ stable variable exist only for certain special cases. Making use of the Laplace exponent, \citet{West1987} and \citet{PSLocalShrinkage} expressed the density of an exponential power as a normal mixture of positive stable densities. Namely:
\begin{align}
p_\nu(\beta) \propto& \psi_L(\beta)
=\int_0^\infty\exp\Big(-\frac{\beta^2x}{2\nu}\Big)p_L(x)dx, \label{eq:StableNormalMixture}
\end{align}
where we denote by $p_L(x)$ the density of a positive $\alpha/2-$stable random variable with scale parameter $1$, denoted as $S^+(\alpha/2,1)$.
Noting that the exponential term is proportional to a mean zero normal density in $\beta$ with variance $\nu x^{-1}$, a multiplicative factor of $\nu^{-1/2} x^{1/2}$ is needed to complete the normal density. This motivates using the polynomially-tilted positive stable density: $T \sim PS^+(\alpha/2,\delta),\; \delta\ge 0$, with density $p_T$, given by $p_T(x)\propto x^{-\delta}p_L(x)$, where $p_L$ is the density of $L\sim S^+(\alpha/2,1)$, defined by \citet{devroye2009polynomiallytiltedStable}. We obtain:
\begin{align*}
p_\nu(\beta)\propto & \int_{0}^{\infty} p_\nu(\beta\mid x)x^{-1/2}p_L(x)dx\\
\propto & \int_{0}^{\infty} p_\nu(\beta\mid x)p_T(x)dx,
\end{align*}
where $p_\nu(\beta\mid x)= \mathcal{N} (\beta\mid 0, \nu x^{-1})$ and $T\sim PS^+(\alpha/2,1/2)$. The previous expression shows that the density of an exponential power acts as the marginal of a normal mixture model. To make use of this for the Bridge model, we apply it to the prior and obtain the posterior moments of $\beta$ conditional on $T$ and $y$. The following hierarchy corresponds to the $n-$means model:
\begin{align}
y_i\mid\beta_i \stackrel{ind}\sim & \mathcal{N}(\beta_i,\sigma^2), \label{eq:likelihood_n_means}\\
\beta_i \overset{iid}{\sim}& EP(\alpha,\nu),\label{eq:prior_density_n_means}
\end{align}
\sloppy for $i=1,\dots,n$, and
$\sigma^2\in\mathbb{R}^+$. To obtain the posterior estimates in this $n-$means model, we make use of the following lemma.
\begin{lemma}
\label{le:equivalence_between_models_n_means}
The Bridge $n-$means model, given by Equations \eqref{eq:likelihood_n_means} and \eqref{eq:prior_density_n_means}, is equivalent to the hierarchical model:
\begin{align*}
y_i\mid \beta_i,T_i\stackrel{ind}\sim\,& \mathcal{N}(\beta_i,\sigma^2),\\
\beta_i\mid T_i\stackrel{ind}\sim& \mathcal{N}(0,\nu T_i^{-1}),\\
T_i \overset{iid}{\sim}\,& PS^+(\alpha/2,1/2),
\end{align*}
for $i=1,\dots,n$.
\end{lemma}
Proof of Lemma~\ref{le:equivalence_between_models_n_means} can be found in Appendix~\ref{pf:equivalence_between_models_n_means}. On its own, the previous lemma is not very surprising and is an immediate consequence of the result by \citet{West1987}. Its usefulness becomes clear in the following theorem.
\begin{theorem}\label{th:n_means_moments}
Under the model defined by Equations~\eqref{eq:likelihood_n_means} and~\eqref{eq:prior_density_n_means}, the marginal density for $y_i$ is:
\begin{align*}
m_\nu(y_i) =& \int_{\mathbb{R}^+}p_\nu(y_i\mid t)p_T(t)dt < \infty,
\end{align*}
and the posterior expectation of the first two moments of $\beta_i$ is given by:
\begin{align*}
\tilde{\beta_i} :=&
\mathbb{E}[\beta_i \mid y_i]\\
=& \frac{1}{m_\nu(y_i)}\int_{0}^\infty y_i\nu(\sigma^2t+\nu)^{-1} p_\nu(y_i\mid t)p_T(t)dt ,\\%\label{eq:post_exp_n_means} \\
\tilde{\beta}_i^{(2)}
= & \mathbb{E}[\beta_i^2 \mid y_i]\\
= & \frac{1}{m_\nu(y_i)}\int_{0}^\infty \left(\frac{\sigma^2\nu}{\sigma^2t+\nu} +\frac{y_i^2\nu^2}{(\sigma^2t+\nu)^{2}}\right)\\ &\hspace{1.3cm} \times p_\nu(y_i\mid t)p_T(t)dt,
\end{align*}
where the densities in the integrals correspond to ${T\sim PS^+(\alpha/2,1/2)}$, and $y_i\mid t\sim \mathcal{N}(0,\sigma^2 + \nu t^{-1})$ .
\end{theorem}
Proof of Theorem~\ref{th:n_means_moments} can be found in Appendix~\ref{pf:n_means_moments}. Theorem~\ref{th:n_means_moments} implies that we can estimate the marginal density, $m_\nu(y_i)$, through a Monte Carlo (and not MCMC) averaging, by sampling from the distribution of $T$. This sampling is easily done by following the method proposed in \citet{devroye2009polynomiallytiltedStable}. We are not specifically interested in estimating the marginal density. More importantly, we can estimate the posterior expectation and variance of $\beta_i$ through a Monte Carlo simulation.
Specifically, let $T_1,\dots,T_J\overset{iid}{\sim}PS^+(\alpha/2,1/2)$, and denoting $\mathcal{T}=(T_1,\dots,T_J)$, define the estimates as:
\begin{align*}
m_\nu(y_i)_\mathcal{T} & =\frac{1}{J}\sum_{j=1}^J p_\nu(y_i\mid T_j), \\
\mathbb{E}[\beta_i\mid y_i]_\mathcal{T} & = \frac{1}{J m_\nu(y_i)_\mathcal{T}}\sum_{j=1}^Jp_\nu(y_i\mid T_j)\mathbb{E}[\beta_i\mid y,T_j],\\
\mathbb{E}[\beta_i^2\mid y_i]_{\mathcal{T}} & = \frac{1}{J m_\nu(y_i)_\mathcal{T}}\sum_{j=1}^Jp_\nu(y_i\mid T_j)\left(\Var[\beta_i\mid y_i,T_j]\right. \\
& \left.\hspace{3cm}+ \mathbb{E}[\beta_i\mid y_i,T_j]^2\right).
\end{align*}
In a Monte Carlo estimation, it is natural to wonder: (1) whether the estimates are unbiased and (2) whether they have bounded variance. Now we present a small technical lemma which gives a bound for the marginal. This helps us address the above concerns.
\begin{lemma}\label{le:marginal_bounded}
The marginal under the $n$-means model admits the following lower bound:
\begin{align*}
m_\nu(y_i) > \exp\left(-\frac{y_i^2}{2\sigma^2}\right)C_{(\sigma^2,\nu)},
\end{align*}
where $C_{(\sigma^2,\nu)}$ is a strictly positive constant independent of $y$.
\end{lemma}
Proof of Lemma~\ref{le:marginal_bounded} can be found in \ref{pf:bounded_marginal}. The two concerns above arise since (1)~we are using a ratio of two Monte Carlo estimates that come from the same simulations and (2) we employ simulations of random variables which are related to
$\alpha-$stable densities. The following theorem settles those concerns.
\begin{theorem}\label{th:finite_var}
Our estimators have the following properties:
\begin{enumerate}
\item The estimator $m_\nu(y_i)_\mathcal{T}$ un-biasedly estimates $m_\nu(y_i)$. Next, $\mathbb{E}[\beta_i\mid y_i]_\mathcal{T}$ is an asymptotically unbiased estimator of $\tilde\beta_i$, as well as $\mathbb{E}[\beta_i^2\mid y]_\mathcal{T}$ of $\tilde\beta_i^{(2)}$, when $J\to\infty$.
\item The variances of our Monte Carlo estimates are finite and are bounded by:
\begin{align*}
\Var[m_\nu(y_i)_\mathcal{T}\mid y_i] &< J^{-1}(2\pi\sigma^2)^{-1}, \\
\Var[\mathbb{E}(\beta_i\mid y)_{\mathcal{T}}\mid y_i] &< J^{-1} (2\pi\sigma^2)^{-1}\exp(y_i^2/\sigma^2)C_{(\sigma^2,\nu)}^{-2}\\
& \times (\lvert y_i\rvert+ \tilde{\beta_i})^2,\text{ and}\\
\Var[\mathbb{E}(\beta_i^2\mid y_i)_{\mathcal{T}}\mid y_i] &<
J^{-1}(2\pi\sigma^2)^{-1}\exp(y_i^2/\sigma^2)C_{(\sigma^2,\nu)}^{-2}\\
& \times (\sigma^2 + y_i^2-\tilde{\beta}^{(2)})^2.
\end{align*}
\end{enumerate}
\end{theorem}
Proof of Theorem~\ref{th:finite_var} can be found in Appendix~\ref{pf:th_finite_var}. The first part of the theorem ensures our estimates are asymptotically unbiased. The second part of this theorem is also non-trivial, for two reasons: (1) the inverse of the marginal has been shown to have infinite variance in some circumstances \citep{newton1994approximate}, and (2) positive $\alpha/2-$stable do not even have a finite first moment \citep[Property 1.2.16]{SamorodnitskyTaqqu}. This means some care must be exercised when working with variables related to them. Having a finite variance ensures convergence to a normal distribution for our estimator at the usual parametric rate $J^{-1/2}$ by the central limit theorem. It also bears mention that our approximation of the desired posterior moments for $\beta_i$ consists of approximating the numerator and the denominator (the marginal $m(y_i)$) separately via vanilla Monte Carlo by drawing from the prior of $T$ that can be naively vectorized. It is certainly possible to recast the desired moments with respect to $(T\mid y)$. However, i.i.d. draws from this posterior is not available directly, and would necessitate an iterative MCMC technique, something we strain to avoid in the current work.
Now, we continue to explore the general case, where we have a vector of $n$ observations $y$, with design matrix $X$ with $p$ covariates. We do not impose any conditions on $n$ or $p$, other than these being positive integers. That is, we contemplate both $n\geq p$ and $p>n$ cases. The likelihood and priors are given by Equations \eqref{eq:multivariate_likelihood} and \eqref{eq:multivariate_prior}. Namely:
\begin{align}
y\mid X,\beta\sim& \mathcal{N}(X\beta,\Sigma),\label{eq:multivariate_likelihood}\\
\beta_i\overset{iid}{\sim}&EP(\alpha,\nu),\text{ for } i = 1,\dots,p.\label{eq:multivariate_prior}
\end{align}
Similar to Lemma~\ref{le:equivalence_between_models_n_means}, we present Lemma~\ref{le:equivalence_between_models}, where we use the mixture representation of the exponential power to derive an equivalence between the Bridge model and a normal hierarchical model.
This equivalence is in the sense that the $T_i$ act as lurking variables, and by integrating them we recover the marginal prior, the negative of the logarithm of which is the desired Bridge penalty.
\begin{lemma}\label{le:equivalence_between_models}
The Bridge regression model, given by Equations \eqref{eq:multivariate_likelihood} and \eqref{eq:multivariate_prior}, is equivalent to the hierarchical model:
\begin{align*}
y\mid X,\beta,T\sim\,& \mathcal{N}(X\beta,\Sigma),\\% \label{eq:multivariate_likelihood_T}\\
\beta_i\mid T_i\overset{ind}{\sim}& \mathcal{N}(0,\nu T_i^{-1}),\\
T_i \overset{iid}{\sim}\,& PS^+(\alpha/2,1/2),
\end{align*}
for $i=1,\dots,p$.
\end{lemma}
The proof of Lemma~\ref{le:equivalence_between_models} can be found in Appendix~\ref{pf:equivalence_between_models}.
Lemma~\ref{le:equivalence_between_models} links the Bridge model to a normal hierarchical model, where the prior on the variance of the coefficients is given by a polynomially-tilted stable distribution. In what follows, we prove results for the Bridge model, going through the hierarchical model stated in Lemma~\ref{le:equivalence_between_models}. These results give a simple way to compute the marginal of $y$ and the posterior mean and variance of the coefficients. Although we make use of the marginal, we only need its value up to a normalizing constant. This avoids the complications that arise from estimation of marginal likelihood.
\begin{theorem}\label{th:multivariate_case}
Under the model given by Equations~\eqref{eq:multivariate_likelihood} and \eqref{eq:multivariate_prior}, the marginal density of $y$ is given by:
\begin{align*}
m_\nu(y) = & \int_{(\mathbb{R}^{+})^p} p_\nu(y\mid \mathbf{t})\prod_{i=1}^p p_T(t_i)dt_i < \infty,
\end{align*}
where $\mathbf{t}=(t_1,\dots,t_p)$.
We can compute the posterior first and second moments of $\beta$, and these are respectively given by:
\begin{align*}
\tilde{\beta}=&\mathbb{E}_\nu[\beta\mid y]\\
= & \frac{1}{m_\nu(y)}\int_{(\mathbb{R}^{+})^p}p_\nu(y\mid \mathbf{t}) \mathbb{E}_\nu[\beta\mid y,\mathbf{t}]\\ &\hspace{1cm}\times\prod_{i=1}^p p_T(t_i)dt_i,\\% T_i^{-1/2} \label{eq:posterior_mean_general} \\
\tilde{\beta}^{(2)}=&\mathbb{E}_\nu(\beta \beta^T\mid y)\\
= &\frac{1}{m_\nu(y)}\int_{(\mathbb{R}^{+})^p}p_\nu(y\mid \mathbf{t}) \mathbb{E}_\nu(\beta\beta^T\mid y,\mathbf{t})\\ & \hspace{1cm}\times \prod_{i=1}^p p_T(t_i)dt_i,
\end{align*}
where $p_\nu(y\mid \mathbf{t})$ is the density: $y\mid \mathbf{t}\sim\mathcal{N}_n(0,\nu V_{\mathbf{t},\nu})$, and the expectations inside the integrals come from: $\beta\mid y,\mathbf{t}\sim \mathcal{N}_p(\Lambda_\mathbf{t}X^TV_{\mathbf{t},\nu}^{-1}y,\nu\Lambda_\mathbf{t} - \nu\Lambda_\mathbf{t}X^TV_{\mathbf{t},\nu}^{-1}X\Lambda_\mathbf{t})$. Denoting $\Lambda_\mathbf{t} = \mathrm{diag}(t_i^{-1}); i=1,\dots,p$, $V_{\mathbf{t},\nu}=(X\Lambda_\mathbf{t} X^T + \nu^{-1}\Sigma)$.
\end{theorem}
The proof can be found in Appendix~\ref{pf:multivariate_lemma}.
We omit the normalizing constants in Theorem~\ref{th:multivariate_case} as those appear in the numerator and denominator. As previously mentioned, this result holds for both $n\geq p$ and $n< p$, since we do not need the inverse of $X^TX$. It is possible to include a covariance structure on the observations if one is known beforehand. Also, the constant $\nu$ acts as a balance between the sample covariance matrix of the covariates: $\nu X\Lambda_\mathbf{t} X^T$ and the error variance given by $\Sigma$, with $\nu$ implicitly weighing between these two variances. Furthermore, in Corollary~\ref{cor:simpler_formulas} we give an explicit expression for the first two moments of the fitted values.
\begin{corollary}\label{cor:simpler_formulas}
The posterior expectation and variance of $X\beta$ given $y,\mathbf{t}$ can be expressed as:
\begin{align*}
\mathbb{E}[X\beta \mid y,\mathbf{t} ]= A_\mathbf{t}(A_\mathbf{t}+\nu^{-1}\Sigma)^{-1}y,\\
\Var(X\beta\mid y,\mathbf{t}) = \nu\Sigma(A_\mathbf{t}+\nu^{-1}\Sigma)^{-1}A_\mathbf{t},
\end{align*}
where we define $A_\mathbf{t} = X\Lambda_\mathbf{t}X^T$.
\end{corollary}
Proof of Corollary~\ref{cor:simpler_formulas} can be found in~\ref{pf:simpler_formulas}. This corollary is relevant since we can compute $A_\mathbf{t}$ after simulating $\Lambda_\mathbf{t}$. Once that is done, we just have to perform inversion and addition to compute the posterior mean and variance of the observations, for a fixed $\nu$. This is once again plain Monte Carlo that can be vectorized and not MCMC, allowing us to bypass an iterative routine resulting in faster computation.
Using now $\mathcal{T}=\{\mathbf{T}_{j}: j=1,\dots J\}$, where for each $j$, $\mathbf{T}_{j}=(T_{1,j},\dots,T_{p,j})$, and ${T_{i,j}\overset{iid}{\sim}PS^+(\alpha/2,1/2)}$, we define the Monte Carlo estimates:
\begin{align*}
m_\nu(y)_\mathcal{T} & =\frac{1}{J}\sum_{j=1}^J p_\nu(y\mid \mathbf{T}_j), \\
\mathbb{E}[\beta\mid y]_\mathcal{T} & = \frac{1}{J m_\nu(y)_\mathcal{T}}\sum_{j=1}^Jp_\nu(y\mid \mathbf{T}_j)\mathbb{E}[\beta\mid y,\mathbf{T}_j],\\
\mathbb{E}[\beta\beta^T\mid y]_{\mathcal{T}} & = \frac{1}{Jm_\nu(y)_\mathcal{T}}\sum_{j=1}^Jp_\nu(y\mid \mathbf{T}_j)\left\{\Var[\beta\mid y,\mathbf{T}_j]\right. \\
& \left.\hspace{2cm}+ \mathbb{E}[\beta\mid y,\mathbf{T}_j]\mathbb{E}[\beta\mid y,\mathbf{T}_j]^T\right\}.
\end{align*}
As before, we want to address the unbiasedness and finite variance of these estimators. However, first we need a small technical lemma.
\begin{lemma}\label{le:bounded_marginal_vector}
The marginal $m_\nu(y)$ in the linear regression setting admits the following lower bound:
\begin{align*}
m_\nu(y) > \exp(-y^T\Sigma^{-1}y/2)C_{(\Sigma,\nu,X)},
\end{align*}
where $C_{(\Sigma,\nu,X)}$ is a strictly positive constant independent of $y$.
\end{lemma}
Proof of Lemma~\ref{le:bounded_marginal_vector} can be found in Appendix~\ref{pf:bounded_marginal_vector}. This lemma is a building block for the bounds of the variances that we give in the following theorem.
\begin{theorem}\label{th:finite_var_vector}
Our estimators have the following properties:
\begin{enumerate}
\item The estimator $m_\nu(y)_\mathcal{T}$ unbiasedly estimates the marginal $m_\nu(y)$. Next, $\mathbb{E}[\beta\mid y]_\mathcal{T}$ and ${\mathbb{E}[\beta\beta^T\mid y]_\mathcal{T}}$ are asymptotically unbiased estimators of $\tilde{\beta}$, and $\tilde\beta^{(2)}$.
\item The variances of these Monte Carlo estimates are finite and have the following explicit bounds:
\begin{align*}
\mathrm{Var}[m_\nu(y)_\mathcal{T}\mid y] & < J^{-1}(2\pi)^{-p}\det(\Sigma^{-1}),\\
\Var(\lVert \mathbb{E}[\beta\mid y]_{\mathcal{T}}\rVert_2\mid y) &< J^{-1}(2\pi)^{-p}\det(\Sigma^{-1})\\
& \times \exp(y^T\Sigma^{-1}y)C_{(\Sigma,\nu,X)}^{-2}\\
&\times (\nu^2 pK_2 M\\
& + \lVert\tilde\beta\rVert^2_2 +2p \nu K_1\sqrt{M}\lVert\tilde{\beta}\rVert_2),\\
\Var\left(\lVert \mathbb{E}[\beta\beta^T\mid y]_{\mathcal{T}}\rVert_2 \mid y\right) < & J^{-1}(2\pi)^{-p}\det(\Sigma^{-1})\\
& \times \exp(y^T\Sigma^{-1}y)C_{(\Sigma,\nu,X)}^{-2}\\
& \times (C +2M_2\lVert\tilde{\beta}^{(2)}\rVert_2 +\\
&+\lVert\tilde{\beta}^{(2)}\rVert^2_2)
\end{align*}
where $C=p K_2 + 2Mp K_3 + M\frac{p(p-1)}{2}K_2K_1
+M^2K_4+M^2p(p-1)K_2^2$, $M_2 = \nu p K_1+MpK_2$, $K_i=\int_{\mathbb{R}^+}t^{-i}p_T(t)dt$, and $M=\lVert\Sigma^{-1}\rVert^2_2\lVert X\rVert^2_2 \lVert y\rVert^2_2$.
\end{enumerate}
\end{theorem}
The proof of the previous theorem can be found in Appendix~\ref{pf:finite_var_vector}.
Although Theorems~\ref{th:multivariate_case} and~\ref{th:finite_var_vector} extend the results for the $n-$means models to a regression setting, it still is of interest to understand how the $n-$means case works in practice, as has been done before for other non-convex regression approaches, for example, the horseshoe regression \citep{BhadraJMLR}. Denote $r=\min(n,p)$, and consider the singular value decomposition of $X=UDV^T$, where $U,D,V$ are matrices with dimensions: $n\times r$,\;$r\times r$, and $p\times r$, respectively, $D$ is a diagonal matrix with positive entries, $U$ and $V$ satisfy: $U^TU=V^TV=I_r$, with $I_r$ the $r\times r$ identity matrix. Based on these definitions, we further define: $Z=UD$, and place the prior on a linear transformation of $\beta$ given by $V\gamma$, instead of on $\beta$. Explicitly, the model we use is:
\begin{align}
y\mid X,\beta &\sim \mathcal{N}(X\beta,\sigma^2I_n),\label{eq:multivariate_normal}\\
\beta &= V\gamma,\label{eq:beta_gamma_relationship}\\
\gamma_i &\overset{iid}{\sim} EP(\alpha,\nu)\text{, for } i = 1,\dots,p.\label{eq:gamma_priors}
\end{align}
As an immediate consequence, the least squares estimate of $\gamma$ is given by: $\hat \gamma = (Z^TZ)^{-1}Z^Ty$. Then, $\hat \gamma\mid \gamma \sim \mathcal{N}(\gamma,\sigma^2D^{-2})$, which has diagonal variance matrix with positive entries, by definition of $D$. That is, we have reduced this case to Theorem~\ref{th:n_means_moments}. We formalize this in Corollary~\ref{cor:svd_decomp}.
\begin{corollary}\label{cor:svd_decomp}
Under Equations~\eqref{eq:multivariate_normal}, \eqref{eq:beta_gamma_relationship} and \eqref{eq:gamma_priors},
\begin{align*}
\mathbb{E}[\beta\mid \hat\gamma,\nu,\sigma^2]=V^T\mathbb{E}[\gamma\mid \hat\gamma,\nu,\sigma^2D^{-2}],
\end{align*}
where the marginal of $\hat\gamma_i$ is given in Theorem \ref{th:n_means_moments}, with variance $\sigma^2d_i^{-2}$. The posterior variance and posterior expectation of $\gamma$ can be computed using Theorem \ref{th:n_means_moments}.
\end{corollary}
The proof of Corollary \ref{cor:svd_decomp} can be found in Appendix~\ref{pf:svd_decomp}. We can now estimate the posterior mean and variance for a fixed penalty $\nu$, in both the general model and the $n-$means model, using the SVD.
\section{SURE for Bridge Regression}\label{sec:SURE}
A central question in Bridge regression, or for that matter, in any penalized regression, is how to choose the penalty parameter $\nu$ in Equation~\eqref{eq:bridge}. While the closed form expression and Monte Carlo estimates of the posterior moments of $\beta$ have been worked out in the previous section, the motivation has not been quite clear yet. In this section, we demonstrate that we now have all the necessary ingredients for computing Stein's unbiased risk estimate or SURE \citep{stein_inadmissibility_1956,stein1981estimation} for Bridge regression. As the name suggests, SURE is an unbiased estimate of the out of sample prediction risk under an assumption of Gaussian errors. A tractable expression is not always available, but when it is available, SURE is known to be a Rao--Blackwellized version of the cross validation loss, a connection pointed out by \citet{Efron2004}. Consequently, minimizing SURE provides a natural approach for selecting $\nu$, if prediction is the main modeling goal. We note here that similar formulas for SURE for the lasso regression have been worked out by \citet{zou2007degrees} and \citet{tibshirani2012degrees} and for the horseshoe regression by \citet{BhadraJMLR}.
Starting from the formula for SURE defined by \citet[Equation 2.11]{Efron2004}:
\begin{align*}
\mathrm{SURE} &= \lVert y - \tilde y \rVert_2^2 + 2\sigma^2\sum_{i=1}^{r} \frac{\partial \tilde y_i}{\partial y_i},
\end{align*}
where the first term is an estimate for the squared bias for prediction and the second term is the so called \emph{degrees of freedom,} providing an estimate of the variance. In this way, SURE also makes the bias--variance tradeoff explicit.
Based on the definition of $\mathrm{SURE}$, we give an expression for it in each of the two cases we consider: the $n-$means model and the linear regression model.
\begin{theorem}\label{th:sure_orthog}
Denote with $\Var(\gamma\mid \hat\gamma,\nu)$ the posterior variance-covariance matrix, from the normal likelihood model with a prior specification on $\gamma$ that depends on a parameter $\nu$, where $y=Z\gamma + \varepsilon; Z=UD$ from the SVD decomposition as previously described. Then:
\begin{align*}
\mathrm{SURE}(\nu) = & \lVert y-\tilde y\rVert_2^2 + 2\sum_{i=1}^r\sigma^2d_i^2\Var(\gamma\mid \hat\gamma,\nu)_{i,i}
\end{align*}
\end{theorem}
The proof can be found on Appendix~\ref{pf:sure_orthog}. Theorem \ref{th:sure_orthog} means that in the case of an orthogonal design matrix $X$, we can work directly with the principal components, and reduce our $p-$dimensional integrals to $p$ individual $1-$dimension integrals. In the multivariate case without an orthogonal design matrix, we get a similar result for $\mathrm{SURE}$, and state it in Theorem~\ref{th:sure}.
\begin{theorem}\label{th:sure}
Consider a general prior for $\beta$, which we denote by $\pi(\beta)$, and the normal model as in Equation \eqref{eq:multivariate_likelihood}, with $\Sigma = \sigma^2I$, then:
\begin{align*}
\mathrm{SURE} = & \lVert y-\tilde y\rVert_2^2 + 2\mathrm{tr}(\Var(X\beta\mid y)),
\end{align*}
where we denote with $\tilde y$ the prediction of $y$, with an estimated $\tilde \beta=\mathbb{E}[\beta\mid y]$.
\end{theorem}
The proof of Theorem \ref{th:sure} can be found in Appendix~\ref{pf:sure_multivariate}.
We emphasize the notation change for the prior of $\beta$, now expressed as $\pi(\beta)$ to indicate that this applies to any proper prior, and not only to the exponential power prior. Theorem~\ref{th:sure} is a generalization of Theorem~\ref{th:sure_orthog}, and does not restrict the dimensions of $X$. When applying Theorem~\ref{th:sure} to our setting, we use $\Var_\nu(X\beta\mid y)$, and minimize $\mathrm{SURE}$ as a function of $\nu$. Since $\tilde y$ is also a function of $\nu$ using the estimated $\tilde\beta = \mathbb{E}_\nu[\beta\mid y]$. As noted, SURE is an explicit numeric measure of the bias-variance trade-off. Where the first term is a measure of the squared bias and the second of the variance.
With this in mind, it can be seen from the bias and variance terms that $\nu$ acts as an explicit parameter to control the bias-variance trade-off. When $\nu\to 0$, we have least squares regression and the bias is small, but the variance is large. When $\nu\to \infty$, the estimate becomes intercept only, which has zero variance but a potentially large bias. While these properties of $\nu$ are well known, what is not always available is a formulation of SURE as a function of $\nu$, which can then be passed on to a one-dimensional numerical optimizer to proceed via grid search. We have closed this gap through the expressions of desired fitted quantities.
\subsection{Tuning \texorpdfstring{$\nu$}{TEXT} by Minimizing SURE}
To estimate $\mathbb{E}_{\nu^*}[\beta\mid y]$ for $\nu^*$ the value that minimizes $\mathrm{SURE}$, we make use of Corollary~\ref{cor:simpler_formulas} and Theorem~\ref{th:sure}.
First, simulate $T_{j,i}\overset{iid}{\sim} PS^+(\alpha,0.5)$ a polynomially-tilted stable random variable, for $j=1,\dots,M,i=1,\dots,p$, using the method described in \citet{devroye2009polynomiallytiltedStable}. Let $\mathbf{T}_j=(T_{j,1},\dots,T_{j,p})$ a $p-$dimensional vector. Define: $\Lambda_{\mathbf{T}_j}=\text{diag}(T_{j,1},\dots,T_{j,p})$, and compute $A_{\mathbf{T}_j}=X\Lambda_{\mathbf{T}_j}X^T$. Compute: $\log(p_\nu(y\mid \mathbf{T}_j)), {\mathbb{E}[X\beta\mid y,\mathbf{T}_j]}, \Var(X\beta\mid y, \mathbf{T}_j)$, using respectively the formula from Theorem~\ref{th:multivariate_case}, and from Corollary~\ref{cor:simpler_formulas}. For these we only need to compute the inverse of $(A_{\mathbf{T}_j} + \nu^{-1}\Sigma)$ once per $\nu$ and per $j$, which dramatically speeds up our performance for large $p$, since this is an $n\times n$ matrix.
Next, since we are not concerned about the value of the marginal. We use the $log-exp-sum$ trick. Specifically, define:
\begin{align*}
\log(w^*_{j}) &= \log(p_\nu(y\mid \mathbf{T}_{j})) -\max_{j=1,\dots,M}(\log(p_\nu(y\mid \mathbf{T}_{j}))),\\
w_{j} & = w^{*}_j\left(\sum_{j=1}^M w_{j}^*\right)^{-1}.
\end{align*}
Using these weights, we estimate the posterior mean as: $\tilde y=\sum_{j=1}^M w_j\mathbb{E}[X\beta\mid y,\mathbf{T}_j]$. Similarly, the posterior variance is estimated by:
\begin{align*}
\widehat{\Var}_\nu(X\beta\mid y) =& \sum_{j=1}^M w_{j} \left(\Var(X\beta\mid y,\mathbf{T}_j)\right. \\
& \left.+ \mathbb{E}[X\beta\mid y,\mathbf{T}_j]\mathbb{E}[X\beta\mid y,\mathbf{T}_j]^T\right)\\
& - \mathbb{E}[X\beta\mid y]\mathbb{E}[X\beta\mid y]^{T}.
\end{align*}
To ensure that these are the correct estimates, we present the following corollary of Theorem~\ref{th:multivariate_case}.
\begin{corollary}\label{cor:hats_are_right}
The estimators for the posterior mean and variance satisfy:
\begin{align*}
\tilde y & = X\mathbb{E}[\beta\mid y]_\mathcal{T},\\
\widehat{\Var}_\nu(X\beta\mid y) & = X(\mathbb{E}[\beta\beta^T\mid y]_\mathcal{T} - \mathbb{E}[\beta\mid y]_\mathcal{T}\mathbb{E}[\beta\mid y]^T_\mathcal{T})X^T
\end{align*}
\end{corollary}
The proof of this corollary can be found in Appendix~\ref{pf:hats_are_right}. Now that we have all the ingredients, we proceed to optimize over $\nu$ in the expression $\mathrm{SURE}=\lVert y-\tilde y\rVert^2_2 +2\widehat{Var}_\nu(X\beta\mid y)$.
To ensure we are optimizing on a smooth surface of $\nu$, and to minimize performance time, we simulate the polynomially-tilted stable random variables only $M\times p$ times, instead of repeating it several times. Then, we perform a one-dimensional minimization of $\mathrm{SURE}(\nu)$ for this fixed set of simulated $T$s. The optimization surface is smooth, as all the functions are infinitely differentiable as functions of $\nu$, for positive values of $\nu$.
The implementation is available online \href{ https://github.com/loriaJ/Sure-tuned_BridgeRegression}{https://github.com/loriaJ/Sure-tuned_BridgeRegression} online, and we include a small example of how it can be executed.
\section{Numerical Experiments}\label{sec:Results}
To measure the performance of our method, we compare it to the fully Bayesian method proposed by \citet{PolsonScottBayesBridge} (labeled ``BayesBridge'') as well as with a penalized approach that chooses $\nu$ via cross validation. We compare the following: (1) prediction error as measured by out-of-sample sum of squared errors (SSE), and (2) running time. For this, we simulate data sets $X,y$. Taking $X$ as a multivariate normal with mean vector equal to zero, standard deviations equal to 0.01, and correlation matrix with diagonal equal to one and off-diagonal elements equal to $\rho=0,0.1,\dots,0.9$. We display some of our results here, and leave others in the Supplemental Material~\ref{sup:material}. We simulate $y$ using Equation~\eqref{eq:multivariate_likelihood}, setting $\Sigma$ as the identity matrix. We do this exercise for $p=2000;n=100$, and using $\beta$ with twenty signals which equal ten, and the rest equal to zero, plus a normal noise with standard deviation of $0.5$.
As a note, \citet{PolsonScottBayesBridge} propose two methods, which they call ``stable'' and ``triangular''. They both sample from a fully Bayesian model with the same marginal, but differ in the latent representation used. In our initial simulations the latter performed a lot worse in terms of SSE than the former, so we do not include the comparison with it.
Figure \ref{fig:line_plot_n_100_p_2000} shows the comparison of average running time in seconds for the case of $p=2000,\rho=0.9,n=100$, for the first method. Remarkably, our proposed method (termed SURE-Bridge) runs in about one fourth of the time of BayesBridge, and is over ten times faster than cross validation. Furthermore, the error bars indicate less variability around the mean running time compared to iterative approaches. We remark here that cross validation was parallelized across different folds, which means that hardware specification then enters the picture through the number of available processors, and complicates the comparison of the raw running times. However, for the SURE-Bridge method we did not parallelize it. Potentials for further speed up also exists for the proposed SURE-Bridge approach, for example, by farming out the vectorized vanilla Monte Carlo calculations to a graphics processing unit or GPU. We have refrained from these engineering experiments in current work, and our figures paint an accurate picture of the raw running times for a single processor (possibly multi-threaded) machine without an explicit attempt at parallelization for the proposed method.
\begin{figure*}
\caption{Comparison of average running time (s) $\pm$ SD by method, when changing the $\alpha$ parameter. Using $n=100$, $p=2000$, in matrices generated using $\rho = 0.9$}
\label{fig:line_plot_n_100_p_2000}
\end{figure*}
\begin{table*}[!htb]
\centering
\begin{tabular}{rllll}
\hline
$\alpha$ & SURE & SURE-Bridge & BayesBridge & Cross Validation \\
\hline
0.30 & 101.81 (11.69) & 103.74 (13.77) & 103.63 (13.60) & 105.57 (14.72) \\
0.50 & 101.82 (11.85) & 104.59 (16.38) & 104.46 (16.42) & 105.67 (16.67) \\
0.70 & 101.95 (11.73) & 104.57 (13.58) & 104.28 (13.61) & 104.88 (13.83) \\
0.90 & 101.40 (11.92) & 100.85 (12.58) & 100.78 (12.57) & 101.77 (12.88) \\
1.10 & 101.69 (11.91) & 103.02 (14.20) & 102.90 (14.21) & 103.49 (14.23) \\
1.30 & 101.75 (11.95) & 102.01 (14.88) & 101.84 (14.93) & 102.58 (15.23) \\
1.50 & 101.56 (11.95) & 102.87 (15.79) & 102.66 (15.82) & 103.52 (16.09) \\
1.70 & 101.84 (11.74) & 103.63 (13.83) & 103.42 (13.74) & 104.61 (13.50) \\
1.90 & 101.70 (11.88) & 103.50 (12.56) & 103.36 (12.56) & 104.30 (12.74) \\
\hline
\end{tabular}
\caption{Average SSE (SD) by method in one hundred out of sample simulated datasets, by $\alpha$. Using $n=100$, $p=2000$, in a matrix generated with $\rho=0.9$}
\label{tab:table_n_100_p_2000_cor_09}
\end{table*}
The improved computational speed for SURE-Bridge is only meaningful if there is not a considerable price to be paid so far as the statistical performance is concerned. In Table~\ref{tab:table_n_100_p_2000_cor_09}, we show the SSE and their standard deviation (SD) for the methods under consideration, and the estimated SURE at the optimum $\nu=\nu^*$. Using $n=100$ and $p=2000$, with a design matrix produced using $\rho=0.9$, the methods have similar statistical performances overall. Furthermore, SURE mostly falls within one standard deviation of the SSE for all methods.
To summarize the numerical results, then, our claimed achievement in this paper is not a better statistical estimator per se. Instead, the innovation lies in achieving competitive statistical performance, at a fraction of the running time for other methods, thereby facilitating the deployment of Bridge models at far larger problem dimensions. This is possible because we are able to bypass the iterative routines needed for the other methods through our closed form calculations and vanilla Monte Carlo approaches.
\section{Prediction of Photosynthetic Capacity with Spectroscopic Measurements}\label{sec:real_data}
To predict photosynthesis in plants \citet{MeachamHensoldMontesWuetal} use spectral measurements. They consider the photosynthetic capacity for their measured leaves by the maximum electron transport rate ($\mathcal{J}_{\max}$). Measuring the electron transport rate is expensive and time intensive. This prompted the research by \citet{MeachamHensoldMontesWuetal} to use spectral measurements of the leaves to be able to predict it, as they report that this is much less expensive. For this task, they collected $n=94$ observations and $p=2156$ covariates.
We consider this variable as the response ($y$). Using the leaf reflectances as the predictors ($X$). We use ten random equal-sized splits of the data. For each split we fit both methods in the training split and measure the prediction error as the SSE for the testing split. For each split we obtain an SSE per-method and an estimate of $\mathrm{SURE}$ from our method.
\begin{figure*}
\caption{Comparison of running time (s) by method, for photosynthetic capacity data with $p=2156$ and $n=94$, based on ten splits}
\label{fig:time_plot_Jmax}
\end{figure*}
\begin{table*}[ht]
\centering
\begin{tabular}{rllll}
\hline
$\alpha$ & SURE & SURE-Bridge & BayesBridge & Cross Validation \\
\hline
0.30 & 31.61 (4.05) & 34.64 (6.07) & 34.05 (5.28) & 35.44 (6.53) \\
0.50 & 31.57 (4.08) & 34.72 (6.11) & 33.92 (5.12) & 34.53 (6.21) \\
0.70 & 31.56 (4.09) & 34.72 (6.12) & 33.83 (5.32) & 35.98 (6.65) \\
0.90 & 31.56 (4.10) & 34.73 (6.12) & 33.77 (5.37) & 35.15 (6.65) \\
1.10 & 31.56 (4.10) & 34.73 (6.13) & 33.70 (5.31) & 35.92 (5.46) \\
1.30 & 31.56 (4.10) & 34.73 (6.12) & 33.75 (5.54) & 35.34 (6.31) \\
1.50 & 31.56 (4.10) & 34.73 (6.13) & 33.71 (5.43) & 35.30 (7.14) \\
1.70 & 31.56 (4.10) & 34.73 (6.13) & 33.71 (5.60) & 36.11 (6.31) \\
1.90 & 31.56 (4.10) & 34.73 (6.13) & 33.71 (5.63) & 36.00 (6.79) \\
\hline
\end{tabular}
\caption{Average prediction SSE (SD) by method in ten partitions of photosynthetic capacity data with $p=2156$ and $n=94$}
\label{tab:table_Jmax_sse}
\end{table*}
In Figure~\ref{fig:time_plot_Jmax} we display the time comparison between our method and the BayesBridge method \citep{PolsonScottBayesBridge} for the variable $\mathcal{J}_{\max}$. This only refers to the training portion. In this figure, it is clear how our method runs in about an eighth of the time of the competing method.
Furthermore, our results show that statistical accuracy is similar in both methods, see Table~\ref{tab:table_Jmax_sse}. In this table, we also show the estimated SURE, and how it is mostly within one standard deviation of the SSE.
\section{Conclusion}\label{sec:Conclusion}
We have proposed a new \emph{non-iterative} approach for fitting the Bridge regression model, by selecting the tuning parameter through a one-dimensional numerical minimization of SURE. Once the tuning parameter is selected, using the latent Gaussian representation of the exponential power distribution yields the desired posterior moments in a tractable form that may be evaluated using vectorized vanilla Monte Carlo routines with well-behaved variances for the desired quantities. The construction of the Bridge regression estimate then follows from the equivalence between the Bayesian maximum a posteriori estimate and the penalized optimization problem stated at the very outset of this paper. Consequently, our approach is non-iterative, yielding substantial computational gains over both fully Bayesian MCMC approaches as well as EM or coordinate descent algorithms for finding the maximum a posteriori estimate.
Throughout our calculations in this paper, we have assumed i.i.d. Gaussian error for the errors with known variance $\sigma^2$. If this condition is violated, SURE is not necessarily an unbiased estimation of the out of sample prediction risk \citep{stein1981estimation}. For our simulations, we worked with the true known $\sigma^2$ and for the spectroscopic data analysis we standardized both the predictors and responses and assumed i.i.d. standard normal error terms. The original paper by \citet{stein1981estimation} outlines a strategy for dealing with the unknown error variance case and shows it is still possible to construct an unbiased estimate of the prediction risk in the $n$-means case. However, in linear regression models under heteroskedastic normal or possibly non-normal errors this problem is still open, with some recent progress by \citet{xie2012sure}. Empirical and theoretical extensions of the techniques developed in this paper to potential violations of the modeling assumptions should be a promising direction for future works.
\section*{Supplementary Material}
The Supplementary Material contains additional simulation results. Computer code publicly is available from github at: \href{ https://github.com/loriaJ/Sure-tuned_BridgeRegression}{https://github.com/loriaJ/Sure-tuned_BridgeRegression}
\section{Proofs}
\subsection{Proof of Lemma \ref{le:equivalence_between_models_n_means}}\label{pf:equivalence_between_models_n_means}
Using Equation~\eqref{eq:StableNormalMixture}, we have that the exponential power prior satisfies:
\begin{align*}
p_\nu(\beta_i)\propto & \exp(-2^\alpha\nu^{\alpha}\lvert\beta_i\rvert^\alpha)\\
= & \int_0^\infty p_\nu(\beta_i\mid t_i)t_i^{-1/2}p_L(t_i)dt_i,\\
\propto & \int_0^\infty p_\nu(\beta_i\mid t_i)p_T(t_i)dt_i,
\end{align*}
where $p_\nu(\beta_i\mid t_i) = \exp(-\beta_i^2t_i/(2\nu)) (2\pi t_i/\nu)^{-1/2}$, and $p_T$ corresponds to the density of $T_i\overset{iid}{\sim}PS^+(\alpha/2,1/2)$, which by definition is proportional to $t_i^{-1/2}p_L(t_i)$. This completes the proof.
\subsection{Proof of Theorem \ref{th:n_means_moments}}\label{pf:n_means_moments}
For this proof we omit the sub-index $i$.
From Lemma~\ref{le:equivalence_between_models}, we integrate out $\beta$, and obtain $y\mid T \sim \mathcal{N}(0,\sigma^2 + \nu T^{-1})$, a property of the normal hierarchical model. This means that the marginal is given by:
\begin{align*}
m_\nu(y)=\int_0^\infty p(y\mid t)p_T(t)dt,
\end{align*}
where $p_T(t)$ is the density of $T\sim PS^+(\alpha/2,1/2)$. Now, we show that it is bounded, as follows:
\begin{align*}
m_\nu(y) & = \int_{\mathbb{R}^+}\frac{\exp(-(y^2/2)(\sigma^2+\nu t^{-1})^{-1})}{(2\pi)^{1/2}(\sigma^2+\nu t^{-1})^{1/2}}p_T(t)dt,\\
&< \frac{1}{(2\pi)^{1/2}}\int_{\mathbb{R}^+}\left(\frac{t}{\sigma^2t+\nu}\right)^{1/2}p_T(t)dt,\\
& < (2\pi\sigma^2)^{-1/2},
\end{align*}
where the first inequality follows by upper bounding the exponential term by one, and the second inequality follows from: $\sigma^2t(\sigma^2t+\nu)^{-1}<1$, and $\int_{0}^\infty p_T(t)dt=1$.
Now, for the posterior moment, we use iterated expectations as follows:
\begin{align*}
\mathbb{E}[\beta\mid y]
= & \mathbb{E}[ \mathbb{E}[\beta\mid y,t]\mid y] \\
= & \mathbb{E}[\nu y(\sigma^2T+\nu)^{-1} \mid y], \\
= & \frac{1}{m_\nu(y)}\int_{0}^\infty \frac{\nu y}{\sigma^2t+\nu}p(y\mid t)p_T(t)dt,
\end{align*}
where on the second line we use that: $\beta\mid y,T\sim \mathcal{N}(\nu y(\sigma^2T+\nu)^{-1},\sigma^2\nu(\sigma^2T+\nu)^{-1})$, by Lemma~\ref{le:equivalence_between_models_n_means} and the normal hierarchical model, and the third line follows by Bayes rule. Similarly, for the second posterior moment:
\begin{align*}
\mathbb{E}[\beta^2\mid y]
= & \mathbb{E}[ \mathbb{E}[\beta^2\mid y,T]\mid y] \\
= & \mathbb{E}[ \sigma^2\nu(\sigma^2 T + \nu)^{-1} + (\nu y(\sigma^2T+\nu)^{-1})^2 \mid y] \\
= & \frac{1}{m_\nu(y)}\int_{0}^\infty \left(\frac{\sigma^2\nu}{\sigma^2 t + \nu} + \left(\frac{\nu y}{\sigma^2t+\nu}\right)^2\right)p(y\mid t)p_T(t)dt.
\end{align*}
\subsection{Proof of Lemma~\ref{le:marginal_bounded}}\label{pf:bounded_marginal}
Define the positive constant:
\begin{align*}
C_{(\sigma^2,\nu)} = (2\pi\sigma^2)^{-1/2}\int_{0}^{\infty}\left(\frac{t}{ t+\nu(\sigma^2)^{-1}}\right)^{1/2}p_T(t)dt.
\end{align*}
Since the density $p_T$ is strictly positive the constant must be positive. It is finite since $t/(t+a)<1$ for all $a>0$. Now, by definition of the marginal:
\begin{align*}
m_\nu(y) & = (2\pi)^{-1/2}\int_{0}^\infty \exp\left(-y^2\frac{t}{2(\sigma^2t+\nu)}\right)\\
&\times\left(\sigma^2+\nu t^{-1}\right)^{-1/2}p_T(t)dt,\\
& = \exp\left(\frac{-y^2}{2\sigma^2}\right)\int_{0}^\infty\exp\left(+y^2\frac{\nu}{2\sigma^2(\sigma^2t+\nu)}\right)(2\pi)^{-1/2}\left(\sigma^2+\nu t^{-1}\right)^{-1/2}p_T(t)dt,\\
& > (2\pi)^{-1/2}\exp\left(\frac{-y^2}{2\sigma^2}\right)\int_0^\infty\left(\sigma^2+\nu t^{-1}\right)^{-1/2}p_T(t)dt\\
& = \exp\left(\frac{-y^2}{2\sigma^2}\right) C_{(\sigma^2,\nu)},
\end{align*}
where the second line follows by using inside the exponential that $t/(t+b) = 1 - b/(t+b)$ and appropriately multiplying by $-y^2/(2\sigma^2)$
and the last line follows by using the definition of $C_{(\sigma^2,\nu)}$ and that an exponential of a positive value is greater than 1.
\subsection{Proof of Theorem~\ref{th:finite_var}}\label{pf:th_finite_var}
Again, we omit the $i$. For simplicity call: $m_j=p(y\mid T_j)$, and $\Bar{m}= (m_1+\dots+m_J)/J$. Note that $\mathbb{E}[\Bar{m}\mid y] = \mathbb{E}[m_j\mid y]=m_\nu(y)$. Which proves that $m_\nu(y)_\mathcal{T}$ is an unbiased estimate. Next, let $s_j = \nu y(\sigma^2T_j+\nu)^{-1}p(y\mid T_j)$, $\Bar{s} = (s_1+\dots+s_J)/J$,
By definition, we have that:
\begin{align*}
\mathbb{E}[\Bar{s}\mid y] & = \mathbb{E}[s_j\mid y]\\
&= m_\nu(y)\mathbb{E}[\mathbb{E}[\beta\mid y,T]\mid y]\\
&= m_\nu(y)\mathbb{E}[\beta\mid y]
\end{align*}
Using Bayes' rule and iterated expectations. For what follows, we define $\mu_s = m_\nu(y)\mathbb{E}[\beta\mid y]$. Next, using independence of the $T_j$, we have that:
\begin{align*}
\Var[m_\nu(y)_\mathcal{T}\mid y] & = \frac{1}{J^2}\sum_{j=1}^J \Var(m_j\mid y)\\
&= J^{-1}\Var(m_j\mid y).
\end{align*}
Then, we have:
\begin{align*}
\Var(m_j\mid y) &< \mathbb{E}[m_j^2\mid y]\\
&=\int_{0}^\infty p(y\mid t)^2p_T(t)dt\\
&= (2\pi)^{-1}\int_{0}^\infty \exp[-y^2(\sigma^2 + \nu t^{-1})] (\sigma^2 + \nu t^{-1})^{-1}p_T(t)dt\\
& < (2\pi\sigma^2)^{-1}.
\end{align*}
The first inequality follows from the usual formula for the variance, the second equality from the definition of a normal random variable, and the second inequality follows since $\exp(-y^2(\sigma^2+\nu t^{-1})) <1$, and since $(\sigma^2+\nu t^{-1})^{-1}=t/(\sigma^2t+\nu)<1/\sigma^2$. This finishes the first part of the proof.
Now, for the second part:
\begin{align*}
\Var(s_j\mid y) &\leq \mathbb{E}[s_j^2\mid y],\\
& = \int_{0}^\infty \nu^2 y^2p(y\mid t)^2(\sigma^2 t + \nu)^{-2}p_T(t)dt,\\
& < y^2 \int_{0}^\infty p(y\mid t)^2 p_T(t)dt,\\
& < (2\pi\sigma^2)^{-1} y^2,
\end{align*}
where the first inequality follows by the variance formula: $\Var(X)=\mathbb{E}(X^2)-\mathbb{E}(X)^2$, the second inequality follows from the fact that $\nu/(\nu + a)< 1$, for $a>0$, and the last inequality follows from the proof of finite variance of the marginal. This means that for large enough $J$, $\sqrt{J}(\Bar{s}-\mu_s)\sim \mathcal{N}(0,\sigma_s^2)$, and $\sqrt{J}(\Bar{m}-\mu_m)\sim \mathcal{N}(0,\sigma_m^2)$.
Further, define $\sigma_{sm} = Cov(s_j,m_j\mid y)$, and we have: $\lvert \sigma_{sm}\rvert < \lvert y\rvert (2\pi\sigma^2)^{-1}$. Using the delta method we have that:
\begin{equation*}
\sqrt{J}\left(\frac{\Bar{s}}{\Bar{m}} - \frac{\mu_s}{\mu_m}\right)\sim\mathcal{N}(0, \nabla h(\mu_s,\mu_m)^T \Sigma \nabla h(\mu_s,\mu_n)),
\end{equation*}
where $h(a,b) = a/b$, and
\begin{equation*}
\Sigma = \begin{bmatrix}
\sigma_s^2 & \sigma_{sm}\\
\sigma_{sm} & \sigma_m^2
\end{bmatrix}.
\end{equation*}
First note that this means that our estimate is asymptotically unbiased, as $\mu_s/\mu_m=\mathbb{E}[\beta\mid y]$. Next, since $\nabla h(a,b) = (1/b,-a/b^2)$, the variance term above becomes:
\begin{align*}
\nabla h(\mu_s,\mu_m)^T \Sigma \nabla h(\mu_s,\mu_n) &= \sigma^2_s\mu_{m}^{-2} - 2\sigma_{sm}\mu_s\mu_m^{-3} + \sigma^2_m\mu_s^2\mu_{m}^{-4}.
\end{align*}
Plugging in the values for $\mu_s,\mu_m$ and using the inequalities we derived before, this becomes:
\begin{align*}
\Var(\mathbb{E}[\beta\mid y]_\mathcal{T}\mid y) &= m_\nu(y)^{-2}(\sigma_s^2 - 2 \sigma_{sm}\tilde{\beta}+\tilde{\beta}^2)\\
&<(2\pi\sigma^2)^{-1}m_\nu(y)^{-2}(\lvert y\rvert+ \tilde{\beta})^2,
\end{align*}
which gives the second bound we stated.
Next, denote $v_j = p(y\mid t_j)(\sigma^2\nu(\sigma^2t_j+\nu)^{-1} + y^2\nu^2(\sigma^2t_j+\nu)^{-2})$, and $\Bar{v}=(v_1+\dots + v_J)/J$. Then:
\begin{align*}
\Var(v_j\mid y)& \leq \mathbb{E}[v_j^2\mid y]\\
&= \int_0^\infty p(y\mid t)^2\left(\sigma^2\nu(\sigma^2t+\nu)^{-1} + y^2\nu^2(\sigma^2t+\nu)^{-2}\right)^2p_T(t)dt,\\
&< \int_0^\infty \left( \sigma^2 + y^2\right)^2p(y\mid t)^2 p_T(t)dt\\
& < (2\pi\sigma^2)^{-1}(\sigma^2 + y^2)^2 \int_0^\infty p_T(t)dt,
\end{align*}
where the second inequality follows since for any positive $a$: $\nu/(a+\nu) < 1,\nu^2/(\nu + a)^2<1$, and since $g(x)=x^2$ is a monotone function for $x>0$. The third inequality follows by using the first part of the proof. Similarly, $\mathrm{Cov}(v_j,m_j\mid y) < (2\pi\sigma^2)^{-1}(\sigma^2 +y^2)$. The rest of the proof follows as the second part, by replacing the $s$ sub-indices with $v$ sub-indices and we obtain:
\begin{align*}
\Var(\mathbb{E}[\beta^2\mid y]_\mathcal{T} \mid y)&= (2\pi\sigma^2)^{-1}m_\nu(y)^{-2}((\sigma^2 + y^2)^2 -2(\sigma^2+y^2)\tilde{\beta}^{(2)} + (\tilde{\beta}^{(2)})^2)\\
& = (2\pi\sigma)^{-1}m_\nu(y)^{-2}(\sigma^2 + y^2-\tilde{\beta}^{(2)})^2.
\end{align*}
\subsection{Proof of Lemma \ref{le:equivalence_between_models}} \label{pf:equivalence_between_models}
Using Equation~\eqref{eq:StableNormalMixture}, we have that the exponential power prior satisfies:
\begin{align*}
p_\nu(\beta_i)\propto & \exp(-\nu\lvert\beta_i\rvert^\alpha)\\
= & \int_0^\infty p_\nu(\beta_i\mid t_i)t_i^{-1/2}p_L(t_i)dt_i,\\
= & \int_0^\infty p_\nu(\beta_i\mid t_i)p_T(t_i)dt_i,
\end{align*}
where $\beta_i\mid t_i \overset{indep}{\sim}\mathcal{N}(0,\nu t_i^{-1})$, and the density $p_T$ corresponds to $T\sim PS^+(\alpha/2,1/2)$, which completes the proof as the likelihood doesn't change by adding the information of $T$.
\subsection{Proof of Theorem \ref{th:multivariate_case}} \label{pf:multivariate_lemma}
Using Lemma~\ref{le:equivalence_between_models}, we get:
\begin{align*}
m_\nu(y) = & \int_{\mathbb{R}^p}\int_{(\mathbb{R}^{+})^p} p(y\mid \beta,\mathbf{t})\prod_{i=1}^pp(\beta_i\mid t_i)p_T(t_i)d\beta_i dt_i\\
= &\int_{(\mathbb{R}^+)^p} \int_{\mathbb{R}^p}p(y\mid \beta,\mathbf{t})\prod_{i=1}^p p(\beta_i, t_i) d\beta dt_i, \\
= & \int_{(\mathbb{R}^+)^p}p(y\mid T) \int_{(\mathbb{R}^+)^p}p(\beta\mid y,\mathbf{t}) d\beta\prod_{i=1}^p p_T(t_i) dt_i,\\
= & \int_{(\mathbb{R}^+)^p}p(y\mid \mathbf{t})\prod_{i=1}^p p_T(t_i) dt_i,
\end{align*}
where $y\mid \mathbf{t}\sim\mathcal{N}(0,\nu V_{\mathbf{t},\nu}), V_{\mathbf{t},\nu} = X\Lambda_\mathbf{t}X^T+\nu^{-1} \Sigma$, by using the fact that:
\begin{align*}
(\beta,y)^T\mid \mathbf{t} \sim \mathcal{N}\left((0_n,0_p)^T,
\begin{bmatrix}
\Lambda_\mathbf{t} & \Lambda_\mathbf{t}X^T\\
X\Lambda_\mathbf{t} & \nu V_{\mathbf{t},\nu}
\end{bmatrix} \right).
\end{align*}
We also need to prove that $m_\nu(y)$ is finite. For this we do:
\begin{align*}
m_\nu(y) & < (2\pi)^{-p/2}\int_{(\mathbb{R}^+)^p}\det(\nu V_{\mathbf{t},\nu})^{-1/2}\prod_{i=1}^pp_T(t_i)dt_i,\\
& <(2\pi)^{-p/2}\det(\Sigma)^{-1/2}\int_{(\mathbb{R}^+)^p}\prod_{i=1}^pp_T(t_i)dt_i,
\end{align*}
where the first inequality follows since the exponential of a negative value is bounded by one, the second inequality since $\Sigma \preceq \nu V_{t,\nu}$, which means that $\det(\Sigma)< \det(\nu V_{t,\nu})$. Similarly,
\begin{align*}
\mathbb{E}[\beta\mid y] = & \mathbb{E}[\mathbb{E}[\beta\mid y,T]\mid y] \\
= & \frac{1}{m_{\nu}(y)}\int_{(\mathbb{R}^+)^p}\mathbb{E}[\beta\mid y,\mathbf{t}]p_\nu(y\mid \mathbf{t})\prod_{i=1}^p p_T(t_i)dt_i,
\end{align*}
by applying Lemma \ref{le:equivalence_between_models}. The expectation inside the integral can be computed using $\beta\mid y,\mathbf{t} \sim \mathcal{N}(\Lambda_\mathbf{t}X^TV_{\mathbf{t},\nu}^{-1}y,(\Lambda_{\mathbf{t}}^{-1} + X^T\Sigma X )^{-1})$, using the posterior normal formula. Now, for the second moment:
\begin{align*}
\mathbb{E}[\beta\beta^T\mid y] & = \mathbb{E}[\mathbb{E}[\beta\beta^T\mid y,T]\mid y] \\
& = \frac{1}{m_{\nu}(y)}\int_{(\mathbb{R}^+)^p}\mathbb{E}[\beta\beta^T\mid y,\mathbf{t}]p(y\mid \mathbf{t}) \prod_{i=1}^p p_T(t_i)dt_i.
\end{align*}
We comment on the invertibility of $V_{\mathbf{t},\nu}$, which was assumed throughout the proof. Consider $z\in\mathbb{R}^{n}\setminus\{\vec 0\}$, we name $w=X^Tz$, and $z^TX \Lambda_\mathbf{t} X^Tz + z^Tz=w^T\Lambda_\mathbf{t}w + z^Tz$, since $\Lambda_\mathbf{t}$ is a diagonal matrix with positive entries the first term is non-negative, and the second term is positive. This implies that $V_{\mathbf{t},\nu}$ is positive definite, as is required for a proper variance matrix; which concludes the proof.
\subsection{Proof of Corollary \ref{cor:simpler_formulas}}\label{pf:simpler_formulas}
The first equality follows from ${\mathbb{E}[X\beta\mid y,\mathbf{t}] = X\mathbb{E}[\beta\mid y,\mathbf{t}]}$, and using Theorem~\ref{th:multivariate_case}. The second equality similarly follows from Theorem~\ref{th:multivariate_case} and using the Woodbury matrix identity.
\subsection{Proof of Lemma \ref{le:bounded_marginal_vector}}\label{pf:bounded_marginal_vector}
Define:
\begin{align*}
C_{(\Sigma,\nu,X)}& = \int_{(\mathbb{R}^+)^p}\det(\nu X\Lambda_\mathbf{t}X^T +\Sigma)^{-1/2}(2\pi)^{-p/2}\prod_{i=1}^pp_T(t_i)dt_i.
\end{align*}
Since the matrix inside the determinant is positive definite this determinant will be positive, which implies that $C_{(\Sigma,\nu,X)}>0$. This integral will be finite since $\nu V_{\mathbf{t},\nu}\succeq \Sigma$, which implies that $\det(\nu V_{\mathbf{t},\nu}) > \det(\Sigma)$. That is: $\det(\nu V_{\mathbf{t},\nu})^{-1/2}<\det(\Sigma)^{-1/2}$. By definition of the marginal:
\begin{align*}
m_\nu(y) & = \int_{(\mathbb{R}^+)^p}p(y\mid \mathbf{t})\prod_{i=1}^pp_T(t_i)dt_i,\\
& = \exp(-y^T\Sigma^{-1}y/2)\int_{(\mathbb{R}^+)^p}\exp(B_y^T(\Lambda_{\mathbf{t}}^{-1} + X^T\Sigma^{-1}X)^{-1}B_y/2)\det(\nu V_{\mathbf{t},\nu})^{-1/2}\prod_{i=1}^pp_T(t_i)dt_i,\\
& > \exp(-y^T\Sigma^{-1}y/2)\int_{(\mathbb{R}^+)^p}
\det(\nu V_{\mathbf{t},\nu})^{-1/2}
\prod_{i=1}^pp_T(t_i)dt_i,\\
& = \exp(-y^T\Sigma^{-1}y/2)C_{(\Sigma,\nu,X)},
\end{align*}
where the second line follows by using Woodbury's formula and defining the $p-$dimensional real vector $B_y= X^T\Sigma^{-1}y$, the first inequality follows since the inverse of a positive definite matrix is positive definite and the exponential of a positive value will be greater than one.
\subsection{Proof of Theorem \ref{th:finite_var_vector}}\label{pf:finite_var_vector}
Define $m_j=p(y\mid \mathbf{t}^j)$, and $\Bar{m}=(m_1+\dots+m_J)/J$. As a direct consequence of this, $\mathbb{E}[\Bar{m}]=\mathbb{E}[m_j]=m_\nu(y)$, which means $\Bar{m}$ is an unbiased estimator of $m_\nu(y)$. Next, let $s_j= p(y\mid \mathbf{t}^j)\Lambda_{\mathbf{t}^j}X^TV^{-1}_{\mathbf{t}^{j}}y$, $\Bar{s}=(s_1+\dots+s_J)/J$,
It follows that:
\begin{align*}
\mathbb{E}[\Bar{s}] & = \mathbb{E}[s_j] \\
& =\int_{(\mathbb{R}^+)^p}p(y\mid \mathbf{t})\Lambda_\mathbf{t}X^TV_{\mathbf{t},\nu}y\prod_{i=1}^pp_T(t_i)dt_i \\
& = \tilde{\beta}m_\nu(y).
\end{align*}
For clarity of what follows, we denote the mean of $\Bar{s}$ by $\mu_s=\tilde{\beta}m_\nu(y)$. We proceed by proving the variance bounds for $m_j$ and $s_j$. The bound for $m_j$ will imply the first bound we state in the theorem, and the second bound we state will follow by using the delta method. For this we will need to know that: $\lVert \nu V_{\mathbf{t},\nu}\rVert_2 \leq \lVert \Sigma^{-1}\rVert$, and $\det(\nu V_{\mathbf{t},\nu})>\det(\Sigma)$, both of which follow by using the fact that definition of $\Sigma \preceq \nu V_{\mathbf{t},\nu}$.
Now, the first bound:
\begin{align*}
\Var(m_\nu(y)_\mathcal{T}\mid y) &= J^{-1}\Var(m_j)\\
&< J^{-1}\mathbb{E}[m_j^2\mid y]\\
& = \int_{(\mathbb{R}^+)^p}p_\nu(y\mid \mathbf{t})^2 \prod_{i=1}^p p_T(t_i)dt_i\\
& < J^{-1}\int_{(\mathbb{R}^+)^p}(2\pi)^{-p} \det(V_{\mathbf{t},\nu}^{-1})\prod_{i=1}^p p_T(t_i)dt_i\\
& \leq J^{-1}(2\pi)^{-p}\int_{(\mathbb{R}^+)^p} \det(\Sigma^{-1}) \prod_{i=1}^p p(t_i)dt_i\\
&= J^{-1}(2\pi)^{-p}\det(\Sigma^{-1}),
\end{align*}
where the first inequality is a well known property of the variance. Next, is the definition of expectation. Then, the inequality mentioned in the previous paragraph. Finally, since $p_T(t_i)$ are probability density functions which integrate to $1$.
Now, we repeat this for the norm of $s_j$:
\begin{align*}
\Var(\lVert s_j \rVert_2\mid y) &< \mathbb{E}[\lVert s_j\rVert_2^2 \mid y],\\
&= \int_{(\mathbb{R}^+)^p}p(y\mid \mathbf{t})^2 \lVert \Lambda_\mathbf{t}X^TV_{\mathbf{t},\nu}^{-1}y\rVert_2^2\prod_{i=1}^p p_T(t_i)dt_i\\
& < (2\pi)^{-p}\lVert y\rVert^2_2 \int_{(\mathbb{R}^+)^p}\det(\nu V_{\mathbf{t},\nu}^{-1})\lVert\Lambda_\mathbf{t}\rVert_2^2\lVert X^T\rVert_2^2\lVert V_{\mathbf{t},\nu}^{-1}\rVert_2^2 \prod_{i=1}^p p_T(t_i)dt_i\\
& < (2\pi)^{-p}\nu^2\det(\Sigma^{-1})\lVert\Sigma^{-1}\rVert_2^{2}\lVert y \rVert_2^2\lVert X^T\rVert_2^2 \int_{(\mathbb{R}^+)^p}\sum_{i=1}^p t_{i}^{-2}\prod_{i=1}^pp_T(t_i)dt_i\\
& < \det(\Sigma^{-1})(2\pi)^{-p}\nu^2 pK_2\lVert\Sigma^{-1}\rVert_2^{2}\lVert y \rVert^2_2\lVert X^T\rVert^2_2.
\end{align*}
Similar to before, the first inequality follows by a known property of the variance, next is using the definition of expectation. The second inequality follows since the product of the norms is greater than the norm of the product. The third inequality is by using the inequalities of the first paragraph. Next is by using the definition of the Euclidean-norm. Finally, we have $K_2=\int_{\mathbb{R}^+}t^{-2}p_T(t)dt<\infty$, since the negative moments of $\alpha/2-$stable random variables are finite.
This means that the variance of both $\Bar{s}$ and $\Bar{m}$ are finite. By central limit theorem, for big enough $J$ we have:
\begin{align*}
\sqrt{J}(\Bar{m}-m_\nu(y))&\sim\mathcal{N}(0,\sigma_m^2), \\
\sqrt{J}(\Bar{s} - \mu_s)&\sim\mathcal{N}(0,\Sigma_s).
\end{align*}
We need a bound on the norm of the covariance between $m_j$ and $s_j$:
\begin{align*}
\lVert \mathrm{Cov}(m_j,s_j\mid y)\rVert_2 &= \left\lVert \int_{(\mathbb{R}^+)^p}p(y\mid \mathbf{t})^2 \Lambda_\mathbf{t}X^TV_{\mathbf{t},\nu}^{-1}y\prod_{i=1}^pp_T(t_i)dt_i \right\rVert_2\\
& < \int_{(\mathbb{R}^+)^p}p(y\mid \mathbf{t})^2 \lVert\Lambda_\mathbf{t}X^TV_{\mathbf{t},\nu}^{-1}y\rVert_2\prod_{i=1}^pp_T(t_i)dt_i\\
& < (2\pi)^{-p}\det(\Sigma^{-1}) \int_{(\mathbb{R}^+)^p}\lVert\Lambda_\mathbf{t}X^TV_{\mathbf{t},\nu}^{-1} y\rVert_2\prod_{i=1}^pp_T(t_i)dt_i\\
& < (2\pi)^{-p}\det(\Sigma^{-1})\nu\lVert X^T\rVert_2 \lVert \Sigma^{-1}\rVert_2 \lVert y \rVert_2\int_{(\mathbb{R}^+)^p} \sum_{i=1}^p \lVert t_i^{-1}\rVert_2\prod_{i=1}^pp_T(t_i)dt_i\\
& = (2\pi)^{-p}\det(\Sigma^{-1}) \nu\lVert X^T\rVert_2 \lVert y\rVert_2\lVert \Sigma^{-1}\rVert_2 p K_1,
\end{align*}
where the first line is definition of the $p-$ dimensional covariance, second line is using the fact that the norm of an integral is smaller than the integral of the norm, next is using the bounds established above for $p(y\mid \mathbf{t})$. The third inequality follows by the submultiplicative property of the norm and the triangle inequality on $\Lambda_\mathbf{t}$. The last line follows from independence of the $t_i$'s, and defining $K_1 = \int_{\mathbb{R}^+}t^{-1}p_T(t)dt$.
By definition of $\Bar{m},\Bar{s}$ we have the vector: $\mathrm{Cov}(\Bar{m},\Bar{s}\mid y) = J^{-1}\mathrm{Cov}(m_j,s_j\mid y)=J^{-1} \sigma_{s,m}$. As mentioned before, we use the delta method:
\begin{align*}
\sqrt{J}\left(\frac{\Bar{s}}{\Bar{m}} - \frac{\mu_s}{\mu_m}\right)\sim& \mathcal{N}\left(0,\nabla g^T \Sigma^*g\right),\\
\text{where: }\Sigma^* = & \begin{pmatrix}
\Sigma_s & \sigma_{s,m}^T\\
\sigma_{s,m} & \sigma_m^2
\end{pmatrix}\text{,}\\
h(a,b) = & (a_1/b,\dots,a_p/b)\text{, and}\\
g =& \nabla h(\mu_s,m_\nu(y))
\end{align*}
This means that:
\begin{align*}
\nabla h(a,b)=\left(1/b,\dots,1/b,-\sum_{i=1}^p a_i/b^2\right),
\end{align*}
which implies that:
\begin{align*}
\Var(\Bar{s}/\Bar{m}) = & J^{-1}\left(m_\nu(y)^{-2}\Sigma_s + \sigma^2_m \left(\sum_{i=1}^p \mu_{s,i}\right)^2m_\nu(y)^{-4}+2\sum_{i=1}^p\sigma_{s,m,i}\mu_{s,i}m_\nu(y)^{-3}\right).
\end{align*}
The rest of this bound follows by using Cauchy-Schwarz inequality and the bounds proved before:
\begin{align*}
\Var(\lVert \mathbb{E}[\beta\mid y]_\mathcal{T}\rVert_2) & < J^{-1}m_\nu(y)^{-2}(\nu^2 pK_2\lVert\Sigma^{-1}\rVert_2^{2}\lVert y \rVert^2_2\lVert X^T\rVert^2_2 + \lVert\tilde\beta\rVert^2_2 + 2\nu\lVert X^T\rVert_2 \lVert y\rVert_2\lVert \Sigma^{-1}\rVert_2 p K_1 \lVert\tilde{\beta}\rVert_2).
\end{align*}
Now, we need to verify that $\mathbb{E}[\beta\beta^T\mid y]_\mathcal{T}$ will be asymptotically unbiased, and we also want to bound its variance. With this in mind, define:
\begin{align*}
v_j = p(y\mid \mathbf{t}^j)\left(\nu\Lambda_{\mathbf{t}^j} - \nu\Lambda_{\mathbf{t}^j}X^TV_{\mathbf{t}^j,1}^{-1}X\Lambda_{\mathbf{t}^j}+\Lambda_{\mathbf{t}^j}X^T V_{\mathbf{t}^j,\nu}^{-1}yy^TV_{\mathbf{t}^j,\nu}^{-1} X\Lambda_{\mathbf{t}^j}\right),
\end{align*}
and $\Bar{v}=(v_1+\dots+v_J)/J$. Then we have,
\begin{align*}
\mathbb{E}[\Bar{v}] &= \mathbb{E}[v_j]\\
&= \int_{(\mathbb{R}^+)^p}p(y\mid \mathbf{t})\left(\nu\Lambda_{\mathbf{t}} - \nu\Lambda_{\mathbf{t}}X^TV_{\mathbf{t},1}^{-1}X\Lambda_{\mathbf{t}}+\Lambda_{\mathbf{t}}X^T V_{\mathbf{t},1}^{-1}yy^TV_{\mathbf{t},1}^{-1} X\Lambda_{\mathbf{t}}\right) \prod_{i=1}^p p_T(t_i)dt_i\\
& = \int_{(\mathbb{R}^+)^p} (\Var(\beta\mid y,T)+ \mathbb{E}(\beta\mid y,\mathbf{t})\mathbb{E}(\beta^T\mid y,\mathbf{t}))p(y\mid \mathbf{t})\prod_{i=1}^p p_T(t_i)dt_i\\
& = \int_{(\mathbb{R}^+)^p} \mathbb{E}[\beta\beta^T\mid y,\mathbf{t}]p(y\mid \mathbf{t})\prod_{i=1}^p p_T(t_i)dt_i\\
& = m_\nu(y)\mathbb{E}[\mathbb{E}[\beta\beta^T\mid y,\mathbf{t}]\mid y]\\
& = m_\nu(y)\mathbb{E}[\beta\beta^T\mid y],
\end{align*}
where the first equality follows from definition of $\Bar{v}$, the second one from definition of $v_j$. The third one, using the formulas derived in Theorem~\ref{th:multivariate_case}. The next equality by using the formula $\mathbb{E}[\beta\beta^T]= \Var(\beta) + \mathbb{E}[\beta]\mathbb{E}[\beta]^T$, finally by using Bayes' Theorem and the iterated expectations property. Similarly, we define ${\mu_v = m_\nu(y)\mathbb{E}[\beta\beta^T\mid y]}$.
Now, we proceed to prove the bound, as follows.
\begin{align*}
\Var(\lVert v_j\rVert\mid y) &< \mathbb{E}[\lVert v_j\rVert^2 \mid y ]\\
& = \int_{(\mathbb{R}^+)^p}p(y\mid \mathbf{t})^2 \left\lVert\nu\Lambda_{\mathbf{t}} - \nu\Lambda_{\mathbf{t}}X^TV_{\mathbf{t},\nu}^{-1}X\Lambda_{\mathbf{t}}+ \Lambda_{\mathbf{t}}X^T V_{\mathbf{t},\nu}^{-1}yy^TV_{\mathbf{t},\nu}^{-1} X^T\Lambda_{\mathbf{t}}\right\rVert_2^2\\
& \hspace{2cm}\times\prod_{i=1}^p p_T(t_i)dt_i,\\
& < (2\pi)^{-p}\det(\Sigma^{-1}) \int_{(\mathbb{R}^+)^p}\left(\left\lVert\nu\Lambda_{\mathbf{t}} - \nu\Lambda_{\mathbf{t}}X^TV_{\mathbf{t},\nu}^{-1}X\Lambda_{\mathbf{t}}\right\rVert_2 + \left\lVert\Lambda_{\mathbf{t}}X^T V_{\mathbf{t},\nu}^{-1}yy^TV_{\mathbf{t},\nu}^{-1} X\Lambda_{\mathbf{t}}\right\rVert_2\right)^2\\
& \hspace{3cm}\times \prod_{i=1}^pp_T(t_i)dt_i\\
&< (2\pi)^{-p}\det(\Sigma^{-1}) \int_{(\mathbb{R}^+)^p}\left(\left\lVert\nu\Lambda_{\mathbf{t}}\right\rVert_2+ \lVert \Lambda_\mathbf{t}\rVert_2^2\lVert V_{\mathbf{t},\nu}^{-1}\rVert_2^2\lVert y\rVert_2^2\lVert X\rVert^2_2 \right)^2\\
& \hspace{3cm}\times\prod_{i=1}^p p(t_i)dt_i,\\
& < (2\pi)^{-p}\det(\Sigma^{-1})\int_{(\mathbb{R}^+)^p}\left(\nu\left\lVert\Lambda_{\mathbf{t}}\right\rVert_2 +\nu^2 \lVert\Lambda_\mathbf{t}\rVert^2_2 \lVert\Sigma^{-1}\rVert^2_2\lVert X\rVert^2_2 \lVert y\rVert^2_2 \right)^2 \\
& \hspace{3cm}\times\prod_{i=1}^p p_T(t_i)dt_i\\
& <(2\pi)^{-p}\det(\Sigma^{-1})\int_{(\mathbb{R}^+)^p} \left\{\nu\left(\sum_{i=1}^p t_{i}^{-2}\right)^{1/2} +M\sum_{i=1}^pt_i^{-2} \right\}^2\\
&\hspace{3cm}\times\prod_{i=1}^pp_T(t_i)dt_i\\
& < (2\pi)^{-p}\det(\Sigma^{-1}) \int_{(\mathbb{R}^+)^p} \left(\sum_{i=1}^p\nu^2t_i^{-2} +2M\nu\sum_{i=1}^pt_i^{-2}\sum_{i=1}^pt_i^{-1} +M^2 \bigg(\sum_{i=1}^pt_i^{-4}+\sum_{i\neq j}t_i^{-2}t_j^{-2}\bigg)\right)\\
& \hspace{3cm}\times \prod_{i=1}^pp_T(t_i)dt_i\\
& = (2\pi)^{-p}\det(\Sigma^{-1})\left(p\nu^2K_2 + 2M\nu p K_3+ M\nu\frac{p(p-1)}{2}K_2K_1+M^2K_4+M^2p(p-1)K_2^2\right).
\end{align*}
The first line follows from the variance computation formula, the second line follows by definition of expectation, the second inequality uses triangle inequality and the inequality proved above for $p(y\mid \mathbf{t})$, the third inequality by the property that the norm of a product is smaller than the product of the norms and $\Lambda_\mathbf{t} -\Lambda_\mathbf{t}X^TV_{\mathbf{t},1}^{-1}X\Lambda_\mathbf{t} \preceq \Lambda_\mathbf{t}$, the fourth inequality since $V_{\mathbf{t},\nu}^{-1}\preceq \nu\Sigma^{-1}$, next one by using the definition of the norm and defining $M=\nu^2\lVert\Sigma^{-1}\rVert^2_2\lVert X\rVert^2_2 \lVert y\rVert^2_2$, and the last line follows since all the negative moments of $\alpha/2$-stable random variables are finite. This means that the variance of $v_j$ is finite.
Now, we need to compute a bound on the norm of the covariance between $v_j$ and $m_j$, we use the fact that:
\begin{align*}
\lVert v_j\rVert_2 &< (2\pi)^{-p/2}\det(\Sigma^{-1})^{1/2} \left(\nu \left(\sum_{i=1}^pt_i^{-2}\right)^{1/2} + M\sum_{i=1}^pt_i^{-2}\right),
\end{align*}
which follows from the proof on the bound of the variance of $v_j$. Using this inequality we have that
\begin{align*}
\lVert \mathrm{Cov}(v_j,m_j\mid y)\rVert_2 &< \int_{(\mathbb{R}^+)^p} \lVert v_j\rVert_2 m_j\prod_{i=1}^pp_T(t_i)dt_i\\
&< (2\pi)^{-p}\det(\Sigma^{-1})\int_{(\mathbb{R}^+)^p}\left\{\nu \left(\sum_{i=1}^pt_i^{-2}\right)^{1/2}+ M\sum_{i=1}^pt_i^{-2}\right\}\\
&\hspace{2cm} \times \prod_{i=1}^pp_T(t_i)dt_i\\
& < (2\pi)^{-p}\det(\Sigma^{-1})\int_{(\mathbb{R}^+)^p} \left(\nu \sum_{i=1}^pt_i^{-1}+M\sum_{i=1}^{p}\right)\\
& \hspace{2cm}\times \prod_{i=1}^pp_T(t_i)dt_i\\
& < (2\pi)^{-p}\det(\Sigma^{-1}) (\nu p K_1+MpK_2),
\end{align*}
where the first two lines are using the inequalities derived above for $v_j$ and $m_j$, next using the fact that the square root is a sub-additive function (i.e., $\sqrt{a+b}<\sqrt{a}+\sqrt{b}$). The last line follows by using the definition of $K_1$ and $K_2$ we have previously stablished.
For a large enough $J$, applying central limit theorem, $\sqrt{J}(\Bar{v} - \mu_v)\sim \mathcal{N}(0,\Sigma_v)$. The rest of the proof follows by applying the same technique as the bound for the variance of $s_j/m_j$, replacing all the $s$ sub-indices with $v$ sub-indices. Finally,
\begin{align*}
\Var(\lVert \mathbb{E}[\beta\beta^T\mid y]_{\mathcal{T}}\rVert_2 \mid y) &< J^{-1}(2\pi)^{-p}\det(\Sigma^{-1})m_\nu(y)^{-2} (C + 2M_2\lVert\tilde{\beta}^{(2)}\rVert_2
+\lVert\tilde{\beta}^{(2)}\rVert^2_2),
\end{align*}
where $C=p K_2 + 2Mp K_3 + M\frac{p(p-1)}{2}K_2K_1
+M^2K_4+M^2p(p-1)K_2^2$, and $M_2 = \nu p K_1+MpK_2$.
\subsection{Proof of Corollary \ref{cor:svd_decomp}} \label{pf:svd_decomp}
Follows from applying Theorem \ref{th:n_means_moments} with variance $\sigma^2d_i^{-2}$ to each of the $r$ components.
\subsection{Proof of Theorem \ref{th:sure_orthog}} \label{pf:sure_orthog}
We know by definition that $\mathrm{SURE} = \lVert y-\tilde y \rVert^2_2 +2\sigma^2\sum_{i=1}^n \frac{\partial \tilde y_i}{\partial y_i}$. For the first term, the bias, we can use directly the prediction $\tilde y = Z \tilde \gamma = Z \mathbb{E}[\gamma\mid \hat \gamma,\sigma^2D^{-2}]$. For the ``degrees of freedom'', denote with $\eta = \frac{d}{d\hat\gamma}\frac{d}{d\hat\gamma^T}\log(m_\nu(\hat\gamma))$ by Proposition 1 of \citet{GriffinBrown}:
\begin{align*}
\Var(\gamma\mid \hat \gamma) = & \sigma^2 (Z^TZ)^{-1} + \sigma^4 (Z^TZ)^{-1}\eta (Z^TZ)^{-1}\\
= & \sigma^2 D^{-2}(I + \sigma^2 \eta D^{-2}),\\
\mathrm{tr}(\Var(\gamma\mid \hat \gamma)) = & \sum_i \sigma^2 d_i^{-2}(1+ \sigma^2d_i^{-2}\frac{\partial^2}{\partial\hat\gamma_i^2}\log(m(\hat\gamma)))\\
= & \sum_i \Var(\gamma\mid \hat \gamma)_{ii}.
\end{align*}
Since we only look into the diagonal terms, we can use the trace to summarize it nicely. We have:
\begin{align*}
2\sigma^2\sum_{i=1}^n \frac{d \tilde y_i}{d y_i} = & 2\sigma^2 \mathrm{tr}\Big(\frac{\partial \tilde y}{\partial y}\Big)\\
= & 2\sigma^2\mathrm{tr}\left(Z D^{-1}U^T + \frac{\partial\,\sigma^2 UD^{-1} \nabla_{\hat\gamma}\log(m(\hat\gamma))}{\partial \hat\gamma} \frac{\partial \hat\gamma}{\partial y}\right)\\
= & 2\sigma^2r +2\sigma^4 \mathrm{tr}(D^{-2}\eta)\\
= & 2\sigma^2r +2\sigma^4 \sum_{i=1}^r d_i^{-2}\frac{\partial^2}{\partial\hat\gamma^2_i}\log(m(\hat\gamma))\\
= & 2\sigma^2\sum_{i=1}^r \Var(\gamma\mid \hat\gamma)_{ii}d^2_{i},
\end{align*}
which concludes the proof.
\subsection{Proof of Theorem~\ref{th:sure}:} \label{pf:sure_multivariate}
We know by definition that: $\mathrm{SURE}={\lVert y -\tilde y \rVert_2^2} + {2 \sigma^2\sum_{i=1}^n (\partial \tilde y_i)/(\partial y_i)}$. As in Section ~\ref{pf:sure_orthog}, we use the prediction of $\tilde y = X\tilde \beta$. Using $\sum_{i=1}^n(\partial \tilde y_i)/(\partial y_i) = \mathrm{tr}((\partial \tilde y)/(\partial y^T))$, it suffices to find an expression for $(\partial \tilde y)/(\partial y^T)$. By definition of $\tilde y$ we have that:
\begin{align}
\frac{\partial \tilde y}{\partial y^T} = & X \frac{\partial \tilde \beta}{\partial y^T},\nonumber\\
= & X\frac{1}{m(y)} \frac{\partial }{\partial y^T}\int_{\mathbb{R}}\beta p(y\mid X,\beta)\pi(\beta)d\beta -\tilde \beta \frac{1}{m(y)}\frac{\partial }{\partial y^T}m(y)\nonumber\\
= & X \frac{1}{m(y)}\int_{\mathbb{R}^p}-\beta (y-X\beta)^T p(y\mid X, \beta) \pi(\beta)d\beta\Sigma^{-1}\nonumber \\
& - X\tilde\beta \frac{1}{m(y)}\int_{\mathbb{R}} -(y-X\beta)^Tp(y\mid X,\beta)\pi(\beta)d\beta\Sigma^{-1} \nonumber\\
= & X\mathbb{E}[\beta\beta^T]X^T\Sigma^{-1}- X\tilde \beta y^T + X\tilde\beta y^T-X\tilde\beta\tilde\beta^TX^T\Sigma^{-1} \nonumber\\
= & X\mathbb{E}[\beta\beta^T\mid y]X^T\Sigma^{-1} - X\tilde\beta\tilde\beta^TX^T\Sigma^{-1}.\label{eq:sure_last_line1}
\end{align}
Using the variance formulas: $\Var(X\beta\mid y) = X\Var(\beta\mid y)X^T= X\mathbb{E}[\beta\beta^{T}\mid y]X^T-X\tilde\beta\tilde\beta^TX^T$, Equation \eqref{eq:sure_last_line1} becomes:
\begin{align*}
\frac{\partial \tilde y }{\partial y^T} = & \Var(X\beta\mid y)\Sigma^{-1},
\end{align*}
as required.
\subsection{Proof of Corollary \ref{cor:hats_are_right}}\label{pf:hats_are_right}
Let $p_j = p_\nu(y\mid \mathbf{T}_j)$, and denote with ${p_* = \max_j p_j}$. We want to prove that ${w_j=p_j/(\sum_j p_j)}$. By definition:
\begin{align*}
w_j &= \frac{w_j^*}{\sum_{j}w_j^*}\\
&= \frac{p_j p_{*}^{-1}}{\sum_j p_jp_{*}^{-1}}\\
&= \frac{p_j}{\sum_j p_j},
\end{align*}
where the second line follows by definition of $w_j^*$, and the third line by cancelling $p_*^{-1}$ in the numerator and denominator. This means that $m_\nu(y)_\mathcal{T}=\sum_{j}p_j$. The first equality we wanted to prove follows by multiplying $\mathbb{E}[\beta\mid y]_\mathcal{T}$ by $X$.
Now, the second equality follows by taking the $X$ out of the variance, and applying the variance formula:
\begin{align*}
\widehat{\Var}(X\beta\mid y) & = X\widehat{\Var}(X\beta\mid y)X^T \\
&=X(\mathbb{E}[\beta\beta^T\mid y]_\mathcal{T} - \mathbb{E}[\beta\mid y]_\mathcal{T}\mathbb{E}[\beta\mid y]_\mathcal{T}^T)X^T,
\end{align*}
concluding the proof.
\onecolumn
\pagestyle{plain}
\setcounter{page}{0}
\renewcommand\thepage{S.\arabic{page}}
\setcounter{page}{1}
\setcounter{table}{0}
\renewcommand{S\arabic{table}}{S\arabic{table}}
\setcounter{figure}{0}
\renewcommand{S\arabic{figure}}{S\arabic{figure}}
\setcounter{section}{0}
\renewcommand{S.\arabic{section}}{S.\arabic{section}}
\begin{center}
{\Large \textbf{Supplementary Material for} \\\emph{SURE-tuned Bridge Regression} \\\textbf{by}\\ \emph{Jorge Lor\'ia and Anindya Bhadra}}\\
\end{center}
\section{Additional Simulation Results}\label{sup:material}
We provide additional simulation results for $n=100,\; p=2000,\;\rho=0.1,0.5$. Figures~\ref{fig:rho01} and~\ref{fig:rho05} make the computational advantages of SURE-Bridge explicit in terms of running time. Next, Tables~\ref{tab:rho01} and~\ref{tab:rho05} display that this computational advantage does not come by sacrificing statistical performance.
\begin{figure}
\caption{Comparison of average running time (s) $\pm$ SD by method, when changing the $\alpha$ parameter. Using $p=2000$, $n=100$, in matrices generated using $\rho = 0.1$}
\label{fig:rho01}
\end{figure}
\begin{table}[!htb]
\centering
\begin{tabular}{rllll}
\hline
$\alpha$ & SURE & SURE-Bridge & BayesBridge & Cross Validation \\
\hline
0.30 & 120.45 (9.45) & 127.99 (20.40) & 126.38 (20.23) & 131.22 (22.96) \\
0.50 & 119.01 (9.44) & 124.51 (19.09) & 123.44 (18.52) & 124.27 (18.00) \\
0.70 & 119.12 (9.07) & 128.72 (18.77) & 127.90 (18.44) & 127.97 (18.24) \\
0.90 & 118.59 (9.68) & 128.34 (18.78) & 127.72 (18.57) & 128.16 (18.76) \\
1.10 & 118.93 (9.41) & 125.02 (18.91) & 124.63 (18.77) & 124.50 (18.36) \\
1.30 & 118.58 (9.68) & 124.90 (16.66) & 124.44 (16.49) & 123.95 (16.37) \\
1.50 & 118.51 (9.75) & 124.80 (16.10) & 124.62 (16.16) & 123.92 (15.87) \\
1.70 & 118.93 (9.51) & 128.89 (20.60) & 128.73 (20.71) & 127.90 (20.25) \\
1.90 & 118.82 (9.68) & 125.18 (18.41) & 125.31 (18.40) & 124.27 (17.78) \\
\hline
\end{tabular}
\caption{Average SSE (SD) by method in one hundred out of sample simulated datasets, by $\alpha$. Using $p=2000$, $n=100$, in a matrix generated with $\rho=0.1$}\label{tab:rho01}
\end{table}
\begin{figure}
\caption{Comparison of average running time (s) $\pm$ SD by method, when changing the $\alpha$ parameter. Using $p=2000$, $n=100$, in matrices generated using $\rho = 0.5$}
\label{fig:rho05}
\end{figure}
\begin{table}[!htb]
\centering
\begin{tabular}{rllll}
\hline
$\alpha$ & SURE & SURE-Bridge & BayesBridge & Cross Validation \\
\hline
0.30 & 111.72 (10.68) & 115.80 (16.10) & 114.81 (15.82) & 118.12 (16.92) \\
0.50 & 110.93 (10.59) & 115.13 (15.03) & 114.54 (15.00) & 116.73 (16.03) \\
0.70 & 110.69 (10.65) & 114.65 (16.32) & 114.04 (15.98) & 115.09 (16.12) \\
0.90 & 110.60 (10.89) & 112.97 (17.22) & 112.69 (17.35) & 113.49 (17.47) \\
1.10 & 110.71 (10.64) & 115.88 (17.02) & 115.28 (16.83) & 116.34 (17.08) \\
1.30 & 110.77 (10.68) & 113.99 (16.99) & 113.46 (16.92) & 114.57 (17.20) \\
1.50 & 110.62 (10.90) & 113.59 (16.35) & 113.14 (16.32) & 113.95 (16.40) \\
1.70 & 110.54 (10.85) & 114.10 (16.63) & 113.72 (16.61) & 114.56 (17.04) \\
1.90 & 110.44 (10.93) & 114.34 (17.42) & 113.98 (17.19) & 115.32 (18.15) \\
\hline
\end{tabular}
\caption{Average SSE (SD) by method in one hundred out of sample simulated datasets, by $\alpha$. Using $p=2000$, $n=100$, in a matrix generated with $\rho=0.5$}
\label{tab:rho05}
\end{table}
\section{Numerical Verifications of Variance Bounds}
To avoid computing the constants that bound the variances in Theorem \ref{th:finite_var}, we compare the ratios of the variances when changing the number of Monte Carlo samples $J$. Specifically, we compute the quantities of interest in $100$ independent simulations, using $n=20,p=50,\rho = 0.3$, with 5 signals of 10 and the rest centered standard normals. We do this with three different number of samples: $J=10,100,1000$. Next we compute the variance of the quantities, and take the ratios. We display the results in the Table~\ref{tab:variance_comparison}.
The first column denotes the ratio between which variances according to their number of simulations, the second column denotes the ratio of $\lVert\mathbb{E}[\beta\mid y]_\mathcal{T}\rVert_2$, and the third one this ratio for $\lVert\Var[\beta\mid y]_\mathcal{T}\rVert_2$.
\begin{table}[!htb]
\centering
\begin{tabular}{rrr}
\hline
Ratio & $\lVert\mathbb{E}[\beta\mid y]_\mathcal{T}\rVert_2$ & $\lVert\Var[\beta\mid y]_\mathcal{T}\rVert_2$\\
\hline
$v_{10^2}/v_{10}$ & 0.40 & 0.26 \\
$v_{10^3}/v_{10^2}$ & 0.08 & 0.12 \\
$v_{10^4}/v_{10^3}$ & 0.13 & 0.23 \\
\hline
\end{tabular}
\caption{Ratio of variances of the norms\label{tab:variance_comparison}}
\end{table}
Theorem~\ref{th:finite_var_vector} indicates that we would expect these ratios to be around $1/10$ for one order of magnitude increase in $J$, which the results confirm.
\end{document} |
\begin{document}
\title{Biderivations, commuting mappings and (2-)local derivations of $\mathbb{N}
\begin{quotation}
\small\noindent \tilde{e}xtbf{Abstract}:
In Fialowski's classification for algebras of maximal class, there are three Lie algebras of maximal class with 1-dimensional homogeneous components: $\mathfrak{m}_0$, $L_1$ and $\mathfrak{m}_2$.
In this paper, we studied their biderivations by considering the embedded mapping to derivation algebras.
Then we determined commuting mappings on these algebras as an application of biderivations. Finally,
local and 2-local derivations for these three algebras were characterized as the given gradings.
\noindent{\tilde{e}xtbf{Keywords}}: $\mathbb{N}$-graded Lie algebras of maximal class; biderivations; commuting mappings; (2-)local derivations
\noindent{\tilde{e}xtbf{Mathematics Subject Classification 2020}}: 17B40, 17B65, 17B70
\end{quotation}
\setcounter{section}{-1}
\section{Introduction}
In the past decades, infinite-dimensional Lie algebras play important roles in the study of Lie theory because of their applications in mathematical physics.
Among infinite-dimensional Lie algebras, $\mathbb{N}$-graded ones have attached much attention.
The theory of $\mathbb{N}$-graded Lie algebras are closely related to nilpotent Lie algebras.
For example, it is obvious that any finite-dimensional $\mathbb{N}$-graded Lie algebra is nilpotent.
Infinite-dimensional ones are called residual nilpotent Lie algebras.
Shalev and Zelmanov \cite{cc} introduced the definition of coclass of a
finitely generated and residually nilpotent Lie algebra $\mathfrak{g}$, in analogy with the
case of (pro-)$p$-groups, as $cc(\mathfrak{g})=\sum_{i\geq1}(\mathrm{dim}(\mathfrak{g}^{i}/\mathfrak{g}^{i+1})-1)$
(possibly infinity), where $\mathfrak{g}^{i}$ is the lower central series of $\mathfrak{g}$.
Lie algebras of coclass 1 are called Lie algebras of maximal
class (also called narrow or thin Lie algebras).
Fialowski \cite{fc} classified all infinite-dimensional $\mathbb{N}$-graded two-generated Lie
algebras $\mathfrak{g}=\bigoplus_{i=1}^{\infty}\mathfrak{g}_i$
with 1-dimensional homogeneous components $\mathfrak{g}_i$. According to her classification, we obtain that up to isomorphism, there are only three $\mathbb{N}$-graded Lie algebras of maximal class with 1-dimensional homogeneous components: $\mathfrak{m}_0$, $L_1$ and $\mathfrak{m}_2$, where $L_1$ is the positive part of the Witt algebra $\mathrm{Der}(\mathbb{F}[x])$. This result was also rediscovered in \cite{cc}. Furthermore, the cohomology of these algebras with coefficients in the trivial modules was studied in \cite{ft}. The adjoint cohomology and deformations were studied in \cite{m0,L1,m2,M1,M2}.
The theory of biderivations and commuting mappings was introduced in \cite{b} for associative algebras, developed for Lie algebras in \cite{c2016,ccz2019,dt2019,e2021,jt2020} and Lie superalgebras in \cite{ccc2021,tmc2020,ycc2021}. It happens quite often that all biderivations are inner, for example, see \cite{ex1,ex2,ex4,ccc2021,ccz2019,ycc2021,zcz2020}. In particular, it was proved that all biderivations of a finite-dimensional complex simple Lie algebra, without the restriction of being skew-symmetric, are inner biderivations in \cite{ex1}. However, compared with the finite-dimensional case, less work has done for infinite-dimensional Lie algebras. In this paper,
biderivations and commuting mappings of $\mathfrak{m}_0$, $L_1$ and $\mathfrak{m}_2$ were studied.
Firstly, we determined the biderivations by considering the embedded mappings from these algebras to their derivation algebras. Moveover, we characterized commuting mappings by the biderivatons.
Another generalized derivations that we are also interested in are local and 2-local derivations.
The concepts of local and 2-local derivations were introduced in \cite{K} and \cite{P}.
In recent years, local and 2-local derivations have aroused the interest of many authors, see
\cite{AKR,l1,M0}. In particular, it is proved that a finite-dimensional nilpotent Lie algebra $L$ with $\mathrm{dim}\ L\geq2$ always admits a 2-local derivation which is not a derivation in \cite{AKR}. However,
it does not hold for infinite-dimensional $\mathbb{N}$-graded Lie algebras. In fact, every 2-local derivation of $L_1$ is a derivation \cite{l1}. In this paper, we proved that every local and 2-local derivation is a derivation for $\mathfrak{m}_0$, $L_1$ and $\mathfrak{m}_2$.
\section{Preliminaries}
Throughout this paper, the ground field $\mathbb{F}$ is an algebraically closed field of characteristic
zero and all vector spaces, algebras are over $\mathbb{F}$. A Lie algebra over $\mathbb{F}$ is a skew-symmetric algebra whose multiplication satisfies the Jacobi identity.
Lie algebras of coclass 1 are called \emph{Lie algebras of maximal
class}.
Up to isomorphism, there are only three
$\mathbb{N}$-graded Lie algebras of maximal class with 1-dimensional homogeneous
components (see \cite{fc}):
$\bullet$ $\mathfrak{m}_0$: the Lie algebra $\mathfrak{m}_0$ is an $\mathbb{N}$-graded Lie algebra $\mathfrak{m}_{0}=\bigoplus_{i=1}^{\infty} (\mathfrak{m}_0)_i$ with 1-dimensional graded components $(\mathfrak{m}_0)_i$ and generated by the components of degree 1 and 2. For a basis $e_i$ of $(\mathfrak{m}_0)_i$, the non-trivial brackets are $[e_1,e_i]=e_{i+1}$ for all $i\geq 2$.
$\bullet$ $L_1$: the Lie algebra $L_{1}$ is an $\mathbb{N}$-graded Lie algebra $L_{1}=\bigoplus_{i=1}^{\infty}L_{1}^{(i)}$
with 1-dimensional graded components $L_{1}^{(i)}$ and generated by the components of degree 1 and 2. For a basis $e_i$ of
$L_{1}^{(i)}$, the non-trivial brackets are $[e_i,e_j]=(j-i)e_{i+j}$ for all $i,j\geq1$.
$\bullet$ $\mathfrak{m}_2$: the Lie algebra $\mathfrak{m}_2$ is an $\mathbb{N}$-graded Lie algebra $\mathfrak{m}_2=\bigoplus_{i=1}^{\infty}(\mathfrak{m}_2)_i$ with 1-dimensional graded components
$(\mathfrak{m}_2)_i$ and generated by the components of degree 1 and 2. For a basis $e_i$ of
$(\mathfrak{m}_2)_i$, the non-trivial brackets are $[e_1,e_i]=e_{i+1}$ for all $i\geq2$, $[e_2,e_j]=e_{j+2}$ for all $j\geq3$.
\begin{defn}
A linear mapping of a Lie algebra $\mathfrak{g}$ is called a \emph{derivation} if it satisfies
$$D([x,y])=[D(x),y]+[x,D(y)],$$
for all $x,y$ in $\mathfrak{g}$. Denote by $\mathrm{Der}(\mathfrak{g})$ the set of derivations of $\mathfrak{g}$.
\end{defn}
For $x\in \mathfrak{g}$, the mapping $\mathrm{ad}\ x: y\mapsto [x,y]$ is a derivation of $\mathfrak{g}$. Call it \emph{inner derivation}. A standard fact is that $\mathrm{Der}(\mathfrak{g})$ and $\mathrm{ad}\ \mathfrak{g}$ are both Lie subalgebra of $\mathrm{Hom}(\mathfrak{g},\mathfrak{g})$ and $\mathrm{ad}\ \mathfrak{g}$ is an ideal of $\mathrm{Der}(\mathfrak{g})$.
Moreover, the first cohomology with coefficients in the adjoint module is defined by
$\mathrm{H}^{1}(\mathfrak{g},\mathfrak{g})=\mathrm{Der}(\mathfrak{g})/\mathrm{ad}\ \mathfrak{g}$ (see \cite{Fuks}).
A bilinear mapping $f$ of $\mathfrak{g}$ is called \emph{skew-symmetric} if
$$f(x,y)=-f(y,x),$$
for all $x,y$ in $\mathfrak{g}$. For a bilinear mapping $f$ of $\mathfrak{g}$ and an element $x$ in $\mathfrak{g}$, we define $L_{f,x}$ and $R_{f,x}$ by two linear mappings of $\mathfrak{g}$ satisfying
$L_{f,x}(y)=f(x,y)$ and $R_{f,x}(y)=f(y,x)$ for $y\in\mathfrak{g}$.
\begin{defn}\lambdaabel{b}
A skew-symmetric bilinear mapping $f$ of a Lie algebra $\mathfrak{g}$ is called a \emph{biderivation} of $\mathfrak{g}$ if
$L_{f,x}$ and $R_{f,x}$ both are derivations of $\mathfrak{g}$ for any $x$ in $\mathfrak{g}$.
Denote by $\mathrm{BDer}(\mathfrak{g})$ the set of biderivations of $\mathfrak{g}$.
\end{defn}
Suppose that the mapping $f_{\lambdaambda}: \mathfrak{g}\times \mathfrak{g}\lambdaongrightarrow \mathfrak{g}$ is defined by $f_{\lambdaambda}(x,y)=\lambdaambda [x,y]$
for all $x,y\in\mathfrak{g}$, where $\lambdaambda\in\mathbb{F}$. Then it is easy to check that $f_{\lambdaambda}$ is a biderivation of $\mathfrak{g}$. This class of biderivations is called \emph{inner biderivation}.
Denote by $\mathrm{IBDer}(\mathfrak{g})$ the set of inner biderivations of $\mathfrak{g}$.
Suppose that $\mathfrak{g}=\bigoplus_{i=1}^{\infty} \mathfrak{g}_i$ is a $\mathbb{N}$-graded Lie algebra.
For $x\in\mathfrak{g}$, write $\|x\|$ for the degree of $x$.
If $\mathfrak{g}=\bigoplus_{i=1}^{\infty} \mathfrak{g}_i$ has a homogenous basis $\{e_i\mid i\in\mathbb{N}\}$, we define $e_{i}^{j}\in\mathrm{Hom}(\mathfrak{g},\mathfrak{g})_{j-i}$ and $e^{i,j}_{k}\in\mathrm{Hom}(\mathfrak{g}\wedge\mathfrak{g},\mathfrak{g})_{k-i-j}$, $i<j$ by
$e_{i}^{j}:e_i\rightarrow e_j,$ and $e^{i,j}_{k}:(e_i,e_j)\rightarrow e_k$.
It is easy to see that $\mathrm{Der}(\mathfrak{g})$ and $\mathrm{BDer}(\mathfrak{g})$ are $\mathbb{N}$-graded subspaces of $\mathrm{Hom}(\mathfrak{g},\mathfrak{g})$ and $\mathrm{Hom}(\mathfrak{g}\wedge\mathfrak{g},\mathfrak{g})$ respectively, where the
homogeneous components of weight $k$ are given by
$$\mathrm{Der}_{k}(\mathfrak{g})=\{\phi\in\mathrm{Der(\mathfrak{g})}\mid\phi(\mathfrak{g}_i)\subseteq \mathfrak{g}_{k+i},i\in\mathbb{N}\}$$
and
$$\mathrm{BDer}_{k}(\mathfrak{g})=\{f\in\mathrm{BDer(\mathfrak{g})}\mid f(\mathfrak{g}_i,\mathfrak{g}_j)\subseteq \mathfrak{g}_{k+i+j},i,j\in\mathbb{N}\}.$$
\begin{defn}
For a Lie algebra $\mathfrak{g}$, we denote the \emph{center} of
$\mathfrak{g}$ by
$$\mathrm{C}(\mathfrak{g})=\{x\in\mathfrak{g}\mid [x,\mathfrak{g}]=0\}.$$
The Lie algebra $\mathfrak{g}$ is called \emph{centerless} if $\mathrm{C}(\mathfrak{g})=0$.
\end{defn}
\begin{rem}
It is easy to see that $\mathfrak{m}_0$, $L_1$ and $\mathfrak{m}_2$ are all centerless.
\end{rem}
\begin{lem}\lambdaabel{C}
Suppose that $\mathfrak{g}$ is a centerless $\mathbb{N}$-graded Lie algebra. Then
$\mathfrak{g}$ can be embedded into
a $\mathbb{Z}$-graded Lie algebra $\mathfrak{\widetilde{g}}$ as an ideal such that
$$\mathrm{Der}(\mathfrak{g})\cong \mathrm{ad}_\mathfrak{g}\ \mathfrak{\widetilde{g}}, \quad
\mathrm{Ann}_{\mathfrak{\widetilde{g}}}\ \mathfrak{g}=
\{x\in\mathfrak{\widetilde{g}}\mid [x,\mathfrak{g}]=0\}=0.$$
Moreover, for any $f\in \mathrm{BDer}_{k}(\mathfrak{g})$, $k\in\mathbb{Z}$, there exists a unique linear mapping $\varphi_f:\ \mathfrak{g}\rightarrow \mathfrak{\widetilde{g}}$ such that, for $x,y\in\mathfrak{g}$,
$$f(x,y)=[\varphi_f(x),y]=-[\varphi_f(y),x],$$
where $\|f\|=\|\varphi_f\|$.
\end{lem}
\begin{proof}
Since $\mathfrak{g}$ is centerless, we have $\mathfrak{g}\cong \mathrm{ad}\ \mathfrak{g}\mathrm{tr}iangleleft\mathrm{Der}(\mathfrak{g})$.
Let $D\in \mathrm{Der}(\mathfrak{g})$. If $[D,\mathrm{ad}\ x]=\mathrm{ad}\ D(x)=0$ for all $x\in \mathfrak{g}$, then $D(x)\in \mathrm{C}(\mathfrak{g})=0$. So $D=0$.
By identifying
$\mathfrak{g}$ with $\mathrm{ad}\ \mathfrak{g}$, we get the isomorphism $\mathrm{Der}(\mathfrak{g})\cong \mathrm{ad}_\mathfrak{g}(\mathrm{Der}(\mathfrak{g}))$.
Set $\mathfrak{\widetilde{g}}=\mathrm{Der}(\mathfrak{g})$. We get $\mathrm{Der}(\mathfrak{g})\cong \mathrm{ad}_\mathfrak{g}\ \mathfrak{\widetilde{g}}$ and $\mathrm{Ann}_{\mathfrak{\widetilde{g}}}\ \mathfrak{g}=0$.
For $f\in \mathrm{BDer}(\mathfrak{g})$ and $x\in\mathfrak{g}$, the mapping $L_{f,x}\in\mathrm{Der}(\mathfrak{g})$ by Definition \ref{b}. Then there exists $y_x\in
\mathfrak{\widetilde{g}}$ such that $L_{f,x}=\mathrm{ad}_\mathfrak{g}\ y_x$. This $y_x$ is unique since $\mathrm{Ann}_{\mathfrak{\widetilde{g}}}\ \mathfrak{g}=0$. So we get a unique linear mapping $\varphi_f:\
\mathfrak{g}\rightarrow\mathfrak{\widetilde{g}}$ denoted by $\varphi_f(x)=y_x$. Then,
for any $x,y\in\mathfrak{g}$, we have
$f(x,y)=-f(y,x)=L_{f,x}(y)=[\varphi_{f}(x),y]=-[\varphi_{f}(y),x].$
\end{proof}
\begin{defn}
A linear mapping $\phi$ of a Lie algebra $\mathfrak{g}$ is called \emph{commuting} if
$[\phi(x),x]=0$ for any $x$ in $\mathfrak{g}$.
\end{defn}
An important application of commuting mappings is to construct biderivations (for example, see \cite{ex1,ex2,ex4}), as shown in the following lemma.
\begin{lem}\lambdaabel{linear}\cite{ex4}
Suppose that $\mathfrak{g}$ is a Lie algebra and $\phi$ is a commuting mapping of $\mathfrak{g}$. Then the bilinear form $\phi_f$, satisfying $\phi_{f}(x,y)=[x,\phi(y)]$ for $x,y\in \mathfrak{g}$, is a biderivation of $\mathfrak{g}$.
\end{lem}
We here recall and introduce theories of local and 2-local derivations of Lie algebras.
\begin{defn}
A linear mapping $\Delta$ of a Lie algebra $\mathfrak{g}$ is called a \emph{local derivation} of $\mathfrak{g}$ if
for
every $x\in\mathfrak{g}$, there exists a derivation $D_{x}$ of $\mathfrak{g}$ (depending on $x$) such that
$\Delta(x)=D_{x}(x)$.
Denote by $\mathrm{LDer}(\mathfrak{g})$ the set of 2-local derivations of $\mathfrak{g}$.
\end{defn}
\begin{defn}\lambdaabel{bl}
A linear mapping $\Delta$ of a Lie algebra $\mathfrak{g}$ is called a \emph{ 2-local derivation} of $\mathfrak{g}$ if
for
every $x,y\in\mathfrak{g}$, there exists a derivation $D_{x,y}$ of $\mathfrak{g}$ (depending on $x,y$) such that
$\Delta(x)=D_{x,y}(x)$ and $\Delta(y)=D_{x,y}(y)$.
Denote by $\mathrm{BLDer}(\mathfrak{g})$ the set of 2-local derivations of $\mathfrak{g}$.
\end{defn}
\begin{rem}
Obviously, a derivation is a 2-local derivation.
By taking $x=y$ in Definition \ref{bl}, we get that a 2-local derivation is a local derivation automatically.
Therefore,
for a Lie algebra $\mathfrak{g}$, we have
$$\mathrm{LDer}(\mathfrak{g})\supseteq\mathrm{BLDer}(\mathfrak{g})\supseteq\mathrm{Der}(\mathfrak{g}).$$
\end{rem}
\begin{lem}\lambdaabel{ld}
Suppose that $\mathfrak{g}=\bigoplus_{i=1}^{\infty} \mathfrak{g}_i$ is an $\mathbb{N}$-graded Lie algebra. For $k\in\mathbb{Z}$, Set
\begin{eqnarray*}
\mathrm{LDer}_k(\mathfrak{g})&=& \{\Delta\in\mathrm{Hom}(\mathfrak{g},\mathfrak{g})|\ \mathrm{for\ any}\ x\in\mathfrak{g}, \mathrm{there\ exists}\ D_{x;k}\in\mathrm{Der}_{k}(\mathfrak{g}), \mathrm{such\ that}\ \\
&& \Delta(x)=D_{x;k}(x)\}.
\end{eqnarray*}
Then $\mathrm{LDer}(\mathfrak{g})=\sum_{k\in\mathbb{Z}}\mathrm{LDer}_k(\mathfrak{g})$.
\end{lem}
\begin{proof}
For a local derivation $\Delta\in \mathfrak{g}$, it is sufficient to prove that $\Delta\in\sum_{k\in\mathbb{Z}}\mathrm{LDer}_k(\mathfrak{g})$. For any $x\in\mathfrak{g}$, by definition, there exists a derivation $D_x\in\mathrm{Der}(\mathfrak{g})$ and $D_{x;k}\in\mathrm{Der}_k(\mathfrak{g})$ such that
$$\Delta(x)=D_x(x)=\sum_{k\in\mathbb{Z}}D_{x;k}(x).$$
Define a mapping $\Delta_k$ by $\Delta_k(x)=D_{x;k}(x)$ for any $x\in\mathfrak{g}$. Then $\Delta_k\in
\mathrm{LDer}_k(\mathfrak{g})$ and
$\Delta(x)=\sum_{k\in\mathbb{Z}}\Delta_k(x)$. Thus, $\Delta=\sum_{k\in\mathbb{Z}}\Delta_k\in\sum_{k\in\mathbb{Z}}\mathrm{LDer}_k(\mathfrak{g})$.
\end{proof}
Next, we characterize all biderivations, linear commuting mappings and (2-)local derivations of
$\mathfrak{m}_{0}$, $L_1$ and $\mathfrak{m}_{2}$ one by one.
\section{$\mathbb{N}$-graded Lie algebra $\mathfrak{m}_{0}$}
From the result of the first cohomology of $\mathfrak{m}_0$ with coefficients in the adjoint module \cite[Theorem 2]{m0}, we get the derivations of $\mathfrak{m}_0$ by considering the inner derivations in the following lemma.
\begin{lem}\lambdaabel{m0}
$\mathrm{dim}\ \mathrm{Der}_{k}(\mathfrak{m}_0)=\lambdaeft\{
\begin{array}{ll}
2, & \hbox{$k\geq0$;} \\
0, & \hbox{$k\lambdaeq-1$.}
\end{array}
\right.
$
In particular,
(1) in $\mathrm{Der}_{k}(\mathfrak{m}_0)$ for $k\geq1$, a basis is $e^{1+k}_1$, $\sum_{i\geq2}e^{i+k}_i$;
(2) in $\mathrm{Der}_{0}(\mathfrak{m}_0)$, a basis is $\sum_{i\geq2}e^{i}_i$, $e^{1}_{1}+\sum_{i\geq3}(i-2)e^{i}_i$.
\end{lem}
In order to describe the derivation algebra of $\mathfrak{m}_0$, we denote by $$\mathfrak{\widetilde{m}}_0=\mathfrak{m}_0\oplus\mathrm{span}\{x_{01},x_{02},x_i,i\geq1\}$$
the Lie algebra with brackets:
$$[e_1,e_i]=e_{i+1},\ [x_{01},x_1]=-x_1,\ [x_{01},x_k]=kx_k,\ [x_{02},x_1]=x_1,\ [x_1,x_k]=-e_{k+1},$$
$$[x_{01},e_1]=e_1,\ [x_{01},e_i]=(i-2)e_i,\
[x_{02},e_i]=e_i,\ [x_1,e_1]=-e_2,\ [x_k,e_i]=e_{i+k},$$
where $k,i\geq2$.
Obviously, $\mathfrak{\widetilde{m}}_0=\bigoplus^{\infty}_{i=0}(\mathfrak{\widetilde{m}}_0)_i$ is a $\mathbb{Z}$-graded Lie algebra with the graded component
$(\mathfrak{\widetilde{m}}_0)_i=\lambdaeft\{
\begin{array}{ll}
\mathrm{span}\{x_{01},x_{02}\}, & \hbox{$i=0$;} \\
\mathrm{span}\{e_{i},x_{i}\}, & \hbox{$i\geq 1$.}
\end{array}
\right.$
By Lemma \ref{m0}, $\mathrm{Der}(\mathfrak{m}_0)\cong\mathfrak{\widetilde{m}}_0$.
\begin{thm}\lambdaabel{M01}
$\mathrm{dim}\ \mathrm{BDer}_{k}(\mathfrak{m}_0)=\lambdaeft\{
\begin{array}{ll}
1, & \hbox{$k\geq-1$;} \\
0, & \hbox{$k\lambdaeq-2$.}
\end{array}
\right.
$
In particular, for $k\geq-1$, $\mathrm{BDer}_{k}(\mathfrak{m}_0)$ is spanned by $\sum_{i\geq2}e^{1,i}_{1+i+k}$.
\end{thm}
\begin{proof}
By Lemma \ref{C},
for $f\in \mathrm{BDer}_{k}(\mathfrak{m}_0)$, $k\in\mathbb{Z}$, there exists a linear mapping $\varphi_f:\ \mathfrak{m}_0\rightarrow \mathfrak{\widetilde{m}}_0$ such that, for $i,j\geq1$,
\begin{equation}\lambdaabel{mb}
f(e_i,e_j)=[\varphi_f(e_i),e_j]=-[\varphi_f(e_j),e_i],
\end{equation}
where $\|f\|=\|\varphi_f\|=k$. Now we determine $\varphi_f$ in different weight $k$.
\begin{flushleft}
$\mathbf{Case\ 1.}\ k\lambdaeq-2.$
\end{flushleft}
In this case, $\varphi_f(e_1)=\cdots=\varphi_f(e_{-k-1})=0$. Set $\varphi_f(e_{-k})=\alpha x_{01}+\beta x_{02}$ and $\varphi_f(e_{-k+i})=\lambdaambda_ie_i+\mu_ix_i$ for $i\geq 1$.
Taking $j=1$ in Eq. (\ref{mb}), we get
$[\varphi_f(e_{i}),e_1]=0$.
Thus, we have
\begin{eqnarray*}
&&[\varphi_f(e_{-k}),e_1]=[\alpha x_{01}+\beta x_{02},e_1]=\alpha e_1=0, \\
&&[\varphi_f(e_{-k+1}),e_1]=[\lambdaambda_1e_1+\mu_1x_1,e_1]=-\mu_1e_2=0,\\
&&[\varphi_f(e_{-k+i}),e_1]=[\lambdaambda_ie_i+\mu_ix_i,e_1]=-\lambdaambda_ie_{i+1}=0,
\end{eqnarray*}
where $i\geq 2$. So $\alpha=\mu_1=\lambdaambda_i=0$ for $i\geq 2$. Taking $i=j$ in Eq. (\ref{mb}), we get
$[\varphi_f(e_{i}),e_i]=0$. Thus we have
\begin{eqnarray*}
&&[\varphi_f(e_{-k}),e_{-k}]=[\beta x_{02},e_{-k}]=\beta e_{-k}=0, \\
&&[\varphi_f(e_{-k+1}),e_{-k+1}]=[\lambdaambda_1e_1+\mu_1x_1,e_{-k+1}]=\lambdaambda_1e_{-k+2}=0,\\
&&[\varphi_f(e_{-k+i}),e_{-k+i}]=[\lambdaambda_ie_i+\mu_ix_i,e_{-k+i}]=\mu_ie_{-k+2i}=0,
\end{eqnarray*}
where $i\geq2$. So $\beta=\lambdaambda_1=\mu_i=0$ for $i\geq 2$. Thus $f=\varphi_f=0$.
\begin{flushleft}
$\mathbf{Case\ 2.}\ k=-1.$
\end{flushleft}
In this case, we set $\varphi_f(e_1)=\alpha x_{01}+\beta x_{02}$ and $\varphi_f(e_i)=\lambdaambda_{i-1}e_{i-1}+\mu_{i-1}x_{i-1}$ for $i\geq 2$.
Taking $i=j$ in Eq. (\ref{mb}), we get
$[\varphi_f(e_{i}),e_i]=0$. Thus we have
\begin{eqnarray*}
&&[\varphi_f(e_{1}),e_{1}]=[\alpha x_{01}+\beta x_{02},e_{1}]=\alpha e_{1}=0, \\
&&[\varphi_f(e_{2}),e_{2}]=[\lambdaambda_{1}e_{1}+\mu_{1}x_{1},e_{2}]=\lambdaambda_1e_3=0,\\
&&[\varphi_f(e_{i}),e_{i}]=[\lambdaambda_{i-1}e_{i-1}+\mu_{i-1}x_{i-1},e_{i}]=\mu_{i-1}e_{2i-1}=0,
\end{eqnarray*}
where $i\geq3$. So $\alpha=\lambdaambda_1=\mu_{i-1}=0$ for $i\geq 3$.
Taking $i=1$ in Eq. (\ref{mb}), we get
$[\varphi_f(e_{1}),e_j]=-[\varphi_f(e_{j}),e_1]$.
Thus, we have
\begin{eqnarray*}
&&[\varphi_f(e_{1}),e_2]=-[\varphi_f(e_{2}),e_1]=\beta e_2=\mu_1e_2, \\
&&[\varphi_f(e_{1}),e_j]=-[\varphi_f(e_{j}),e_1]=\beta e_j=\lambdaambda_{j-1}e_j,
\end{eqnarray*}
where $j\geq 3$. So $\beta=\mu_1=\lambdaambda_{j-1}$ for $j\geq 3$.
Thus,
\begin{eqnarray*}
&&f(e_1,e_i)=[\varphi_f(e_1),e_i]=[\beta x_{02},e_i]=\beta e_i,\ i\geq2, \\
&&f(e_2,e_i)=[\varphi_f(e_2),e_i]=[\beta x_1,e_i]=0 ,\ i\geq 3,\\
&&f(e_i,e_j)=[\varphi_f(e_i),e_j]=[\beta e_{i-1},e_j]=0 ,\ j>i\geq 3.
\end{eqnarray*}
That is,
$f=\beta\sum_{i\geq2}e^{1,i}_{i}$.
\begin{flushleft}
$\mathbf{Case\ 3.}\ k=0.$
\end{flushleft}
In this case, we set $\varphi_f(e_i)=\lambdaambda_{i}e_{i}+\mu_{i}x_{i}$ for $i\geq 1$.
Taking $i=j$ in Eq. (\ref{mb}), we get
$[\varphi_f(e_{i}),e_i]=0$. Thus we have
\begin{eqnarray*}
&&[\varphi_f(e_{1}),e_1]=[\lambdaambda_{1}e_{1}+\mu_{1}x_{1},e_1]=-\mu_1 e_2=0, \\
&&[\varphi_f(e_{i}),e_i]=[\lambdaambda_{i}e_{i}+\mu_{i}x_{i},e_i]=\mu_{i}e_{2i}=0,
\end{eqnarray*}
where $i\geq 2$. So $\mu_i=0$ for $i\geq 1$. Taking $i=1$ in Eq. (\ref{mb}), we get
$[\varphi_f(e_1),e_j]=-[\varphi_f(e_j),e_1]$. Thus, we have
\begin{equation*}
[\varphi_f(e_1),e_j]=-[\varphi_f(e_j),e_1]=\lambdaambda_1e_{j+1}=\lambdaambda_je_{j+1},
\end{equation*}
where $j\geq 2$. So $\lambdaambda_1=\lambdaambda_j$ for $j\geq 1$. Thus,
\begin{eqnarray*}
&&f(e_1,e_i)=[\varphi(e_1),e_i]=[\lambdaambda_1e_1,e_i]=\lambdaambda_1e_{i+1},\ i\geq2,\\
&&f(e_i,e_j)=[\varphi(e_i),e_j]=[\lambdaambda_1e_{i},e_j]=0,\ j>i\geq2.
\end{eqnarray*}
That is, $f=\lambdaambda_1\sum_{i\geq 2}e^{1,i}_{1+i}$.
\begin{flushleft}
$\mathbf{Case\ 4.}\ k\geq1.$
\end{flushleft}
In this case, we set $\varphi_f(e_i)=\lambdaambda_{i}e_{i+k}+\mu_{i}x_{i+k}$ for $i\geq 1$.
Taking $i=j$ in Eq. (\ref{mb}), we get
$[\varphi_f(e_{i}),e_i]=0$. Thus we have
\begin{eqnarray*}
&&[\varphi_f(e_{1}),e_1]=[\lambdaambda_{1}e_{k+1}+\mu_{1}x_{k+1},e_1]=-\lambdaambda_1 e_{k+2}=0, \\
&&[\varphi_f(e_{i}),e_i]=[\lambdaambda_{i}e_{k+i}+\mu_{i}x_{k+i},e_i]=\mu_{i}e_{k+2i}=0,
\end{eqnarray*}
where $i\geq 2$. So $\lambdaambda_1=\mu_i=0$ for $i\geq 2$. Taking $i=1$ in Eq.
(\ref{mb}), we get
$[\varphi_f(e_1),e_j]=-[\varphi_f(e_j),e_1]$. Thus, we have
\begin{equation*}
[\varphi_f(e_1),e_j]=-[\varphi_f(e_j),e_1]=\mu_1e_{k+j+1}=\lambdaambda_je_{k+j+1},
\end{equation*}
where $j\geq 2$. So $\mu_1=\lambdaambda_j$ for $j\geq 2$. Thus,
\begin{eqnarray*}
&&f(e_1,e_i)=[\varphi(e_1),e_i]=[\mu_1e_1,e_i]=\mu_1e_{i+1},\ i\geq2,\\
&&f(e_i,e_j)=[\varphi(e_i),e_j]=[\mu_1e_{i+k},e_j]=0,\ j>i\geq2.
\end{eqnarray*}
That is, $f=\mu_1\sum_{i\geq 2}e^{1,i}_{1+i+k}$.
In conclusion,
$$f=\lambdaeft\{
\begin{array}{ll}
\lambdaambda_k\sum_{i\geq 2}e^{1,i}_{1+i+k}, & \hbox{$\lambdaambda_k\in\mathbb{F},\ k\geq-1$;} \\
0, & \hbox{$k\lambdaeq-2$.}
\end{array}
\right.
$$
The proof is complete.
\end{proof}
\begin{cor}
$\mathrm{BDer}_{0}(\mathfrak{m}_0)=\mathrm{IBDer}(\mathfrak{m}_0)$.
\end{cor}
\begin{thm}
A linear mapping of $\mathfrak{m}_0$ is commuting if and only if it is a scalar of $\sum_{i\geq1}e^{i}_{i}$.
\end{thm}
\begin{proof}
The `if' direction is easy to verify. We now prove the `only if' direction.
Suppose that $\phi$ is linear commuting mapping of weight $k$. That is to say that $\phi(e_i)=a_ie_{i+k}$ for $i\geq1$. By Lemma \ref{linear}, $\phi$ defines a biderivation $\phi_{f}\in\mathrm{BDer}_{k}(\mathfrak{m}_0)$.
If $k\lambdaeq-2$, then $\phi_{f}=0$ by Theorem \ref{M01}. So $[x,\phi(y)]=0$ for all $x,y\in\mathfrak{m}_0$. From $\phi(y)\in\mathrm{C}(\mathfrak{m}_0)=0$, we have $\phi=0$. If $k\geq-1$, then
$\phi_{f}=\lambdaambda\sum_{i\geq2}e^{1,i}_{1+i+k}$ for some $\lambdaambda\in\mathbb{F}$.
\begin{flushleft}
$\mathbf{Case\ 1.}\ k\geq-1, k\neq 0.$
\end{flushleft}
From $\phi_{f}(e_j,e_1)=\lambdaambda\sum_{i\geq2}e^{1,i}_{1+i+k}(e_j,e_1)$ for $j\geq2$, we have
$[e_j,a_1e_{k+1}]=-\lambdaambda e_{1+j+k}$.
Since $k\neq 0$, we have $[e_j,e_{k+1}]=0$. So $\lambdaambda=0$. We have $\phi_{f}(x,y)=[x,\phi(y)]=0$ for all $x,y\in\mathfrak{m}_0$. So $\phi(y)\in \mathrm{C}(\mathfrak{m}_0)=0$. Then $\phi=0$.
\begin{flushleft}
$\mathbf{Case\ 2.}\ k=0.$
\end{flushleft}
From $\phi_{f}(e_1,e_j)=\lambdaambda\sum_{i\geq2}e^{1,i}_{1+i}(e_1,e_j)$ for $j\geq2$,
we have $[e_1,a_je_j]=\lambdaambda e_{1+j}$. Thus $a_j=\lambdaambda$, $j\geq2$.
Moreover, for $j\geq2$, from $\phi_{f}(e_j,e_1)=-\phi_{f}(e_1,e_j)=-\lambdaambda e_{1+j}$, we have
$[e_j,a_1e_1]=-\lambdaambda e_{1+j}$. Thus $a_1=\lambdaambda$. So $\phi=\lambdaambda\sum_{i\geq1}e^{i}_{i}$.
\end{proof}
Here we characterize local and 2-local derivations of $\mathfrak{m}_0$ by the result of derivations.
\begin{thm}
$\mathrm{LDer}(\mathfrak{m}_0)=\mathrm{BLDer}(\mathfrak{m}_0)=\mathrm{Der}(\mathfrak{m}_0).$
\end{thm}
\begin{proof}
By Lemmas \ref{ld} and \ref{m0}, it is sufficient to prove
$\mathrm{LDer}_{k}(\mathfrak{m}_0)\subseteq\mathrm{Der}(\mathfrak{m}_0)$ for $k\geq0$.
By Lemma \ref{m0}, we prove that in the following cases.
\begin{flushleft}
$\mathbf{Case\ 1.}\ k=0.$
\end{flushleft}
Suppose that $\Delta\in\mathrm{LDer}_{0}(\mathfrak{m}_0)$ is a local derivation. For $\Delta(e_i)$, $i\geq1$, by definition of local derivations, there exist $a_i$, $b_i\in \mathbb{F}$, such that
\begin{eqnarray}\lambdaabel{44}
\Delta(e_1)&=&b_1e_{1}, \\
\Delta(e_2)&=&a_2e_2, \\
\Delta(e_i)&=&(a_i+(i-2)b_i)e_i,\ i\geq 3.
\end{eqnarray}
By linearity of $\Delta$, for $i\geq 3$,
\begin{equation}\lambdaabel{11}
\Delta(e_1+e_2+e_i)=\Delta(e_1)+\Delta(e_2)+\Delta(e_i)=b_1e_{1}+a_2e_2+(a_i+(i-2)b_i)e_i.
\end{equation}
For $\Delta(e_1+e_2+e_i)$, by definition, there exist $a_{12i}$, $b_{12i}$, such that
\begin{equation}\lambdaabel{22}
\Delta(e_1+e_2+e_i)=a_{12i}(e_2+e_i)+b_{12i}e_{1}+b_{12i}(i-2)e_i.
\end{equation}
Comparing Eqs. (\ref{11}) with (\ref{22}), we get $a_{2}+b_{1}(i-2)=a_i+(i-2)b_i$. From Eq. (2.1.3),
\begin{equation}\lambdaabel{313}
\Delta(e_i)=(a_{2}+b_{1}(i-2))e_i,\ i\geq 3.
\end{equation}
From Eqs. (2.1.1), (2.1.2) and (\ref{313}) and Lemma \ref{m0}, we have
$$\Delta=a_2\sum_{i\geq2}e^{i}_i+b_1\bigg(e^{1}_{1}+\sum_{i\geq3}(i-2)e^{i}_i\bigg)\in\mathrm{Der}_0(\mathfrak{m}_0).$$
\begin{flushleft}
$\mathbf{Case\ 2.}\ k\geq1.$
\end{flushleft}
Suppose that $\Delta_k\in\mathrm{LDer}_{k}(\mathfrak{m}_0)$ is a local derivation. For $\Delta_k(e_i)$, $i\geq1$, by definition of local derivations, there exist $a_{k;i}$, $b_{k;i}\in \mathbb{F}$, such that
\begin{equation*}
\Delta_k(e_1)=a_{k;1}e_{1+k},\quad \mathrm{and}\quad
\Delta_k(e_i)=b_{k;i}e_{i+k},\ i\geq 2.
\end{equation*}
For $\Delta_k(e_2+e_i)$, $i\geq 3$, there exist $b_{k;2,i}\in\mathbb{F}$, such that
\begin{equation}\lambdaabel{111}
\Delta_k(e_2+e_i)=b_{k;2,i}\sum_{j\geq2}e^{j+k}_j(e_2+e_i)=b_{k;2,i}(e_{2+k}+e_{i+k}).
\end{equation}
On the other hand,
\begin{equation}\lambdaabel{1111}
\Delta_k(e_2+e_i)=\Delta_k(e_2)+\Delta_k(e_i)=b_{k;2}e_{2+k}+b_{k;i}e_{i+k}.
\end{equation}
Comparing Eqs. (\ref{111}) with (\ref{1111}), we have $b_{k;2}=b_{k;i}$ for $i\geq 3$.
So $\Delta_k=a_{k;1}e_{1}^{1+k}+b_{k;2}\sum_{i\geq2}e^{i+k}_i\in\mathrm{Der}_k(\mathfrak{m}_0)$.
\end{proof}
\begin{rem}
The 2-local derivations of $\mathfrak{m}_0$ were also studied in \cite{M0,l1}. In fact, there are a few non-linear 2-local derivations which are not derivations (see \cite{M0,l1}, for example).
For $m,q\in \mathbb{N}$, $\lambdaambda\in \mathbb{C}$ and $\theta=(\theta_2,\lambdadots,\theta_m)\in \mathbb{C}^{m-1}$, define the mapping $\Omega^{(q,m)}_{\theta,\lambdaambda}$ of $\mathfrak{m}_0$ by
$$\Omega^{(q,m)}_{\theta,\lambdaambda}\lambdaeft(\sum_{i=1}^{p}k_ie_i\right)=
\lambdaeft\{
\begin{array}{ll}
\sum\lambdaimits_{i=2}^{p}\sum\lambdaimits_{j=2}^{m}k_i\theta_je_{i+j-2}, & \hbox{if $k_1\neq 0$;} \\
\lambdaambda k_qe_q, & \hbox{if $x=k_q e_q$ for some $q$ with $2<q\lambdaeq p$;} \\
0, & \hbox{others.}
\end{array}
\right.$$
In \cite{M0}, the author prove that every 2-local derivation (not necessarily linear) $\Delta$ of $\mathfrak{m}_0$ is of the form $\Delta=D+\Omega^{(q,m)}_{\theta,\lambdaambda}$ for some $D\in\mathrm{Der}(\mathfrak{m}_0)$ \cite[Theorem 4.2]{M0}.
Using this theorem, we can give another proof that if a 2-local derivation of $\mathfrak{m}_0$
is linear, then it is a derivation.
Suppose that $\Delta=D+\Omega^{(q,m)}_{\theta,\lambdaambda}$ is a 2-local derivation (not necessarily linear), where $D\in\mathrm{Der}(\mathfrak{m}_0)$ and $\Omega^{(q,m)}_{\theta,\lambdaambda}$.
Then
$$\Omega^{(q,m)}_{\theta,\lambdaambda}(e_1)=\Omega^{(q,m)}_{\theta,\lambdaambda}(e_2)=0, \ \Omega^{(q,m)}_{\theta,\lambdaambda}(e_q)=\lambdaambda e_q.$$
If $\Delta$ is linear, then $\Omega^{(q,m)}_{\theta,\lambdaambda}$ is additive. Thus, we have
\begin{eqnarray*}
&&\Omega^{(q,m)}_{\theta,\lambdaambda}(e_1+e_2)=\sum_{j=2}^{m}\theta_j e_j=0, \\
&& \Omega^{(q,m)}_{\theta,\lambdaambda}(e_2+e_q)=0=\lambdaambda e_q.
\end{eqnarray*}
Then $\theta_2=\cdots=\theta_m=\lambdaambda=0$. That is, $\Omega^{(q,m)}_{\theta,\lambdaambda}=0$ and $\Delta=D\in\mathrm{Der}(\mathfrak{m}_0)$.
\end{rem}
\section{$\mathbb{N}$-graded Lie algebra $L_{1}$}
Different from $\mathfrak{m}_0$ and $\mathfrak{m}_2$, the derivarion algebra of $L_1$ has been described as the
inner derivation algebra of $\mathbb{F} e_0\lambdatimes L_1$.
\begin{lem}\cite{l1}\lambdaabel{L'}
$\mathrm{Der}(L_1)=\mathrm{ad}_{L_1}(\mathbb{F} e_0\lambdatimes L_1)$, where
$\mathbb{F} e_0\lambdatimes L_1$ has the bracktes: $[e_i,e_j]=(j-i)e_{i+j}$ for all $i,j\geq0$.
\end{lem}
\begin{thm}\lambdaabel{l}
$\mathrm{IBDer}(L_1)=\mathrm{BDer}(L_1)$.
\end{thm}
\begin{proof}
Suppose that $f$ is a biderivation of $L_1$. By Lemmas \ref{C} and \ref{L'}, there exists a mapping $\varphi_f:\ L_1\rightarrow L_1\oplus\mathbb{F} e_0$ such that, for $i,j\geq 1$,
\begin{equation}\lambdaabel{L}
f(e_i,e_j)=[\varphi_f(e_i),e_j]=-[\varphi_f(e_j),e_i].
\end{equation}
Suppose that $\|f\|=\|\varphi_f\|=k$.
If $k\lambdaeq-1$, we set $\varphi_f(e_{-k+i})=\lambdaambda_ie_{i}$, $i\geq 0$.
If $k\geq0$, we set $\varphi_f(e_i)=\lambdaambda_ie_{k+i}$, $i\geq 1$.
Then, if $k\neq 0$, we have $\lambdaambda_i=0$ by taking $i=j$ in Eq. (\ref{L}).
So $f=\varphi_f=0$.
Now we suppose that $k=0$. Then, from Eq. (\ref{L}), we get $\lambdaambda_i=\lambdaambda_j$ for $i,j\geq 1$. Set $\lambdaambda_i=\lambdaambda$ for $i\geq 1$.
Then $f=f_\lambdaambda$ is an inner biderivation.
\end{proof}
\begin{thm}
A linear mapping of $L_1$ is commuting if and only if it is a scalar multiplication mapping of $L_1$.
\end{thm}
\begin{proof}
The `if' direction is easy to verify. We now prove the `only if' direction.
Suppose that $\phi$ is linear commuting mapping of $L_1$. By Lemma \ref{linear}, $\phi$ defines a biderivation $\phi_{f}\in\mathrm{BDer}(L_1)$.
By Theorem \ref{l}, for all $x,y\in L_1$, $\phi_{f}(x,y)=[x,\phi(y)]=\lambdaambda[x,y]$ for some $\lambdaambda\in\mathbb{F}$.
Then $[x,\phi(y)-\lambdaambda y]=0$. That means $\phi(y)-\lambdaambda y\in \mathrm{C}(L_1)=0$. The proof is complete.
\end{proof}
From the result of the first cohomology of $L_1$ with coefficients in the adjoint module \cite{L1}, we get the derivations of $L_1$ by considering the inner derivations in the following lemma.
\begin{lem}\lambdaabel{LL1}
$\mathrm{dim}\ \mathrm{Der}_{k}(L_1)=\lambdaeft\{
\begin{array}{ll}
0, & \hbox{$k\lambdaeq-1$;} \\
1, & \hbox{$k\geq0$.}
\end{array}
\right.
$
In particular, in $\mathrm{Der}_{k}(L_1)$ for $k\geq0$, a basis is $\sum_{i\geq1,i\neq k}(i-k)e^{k+i}_i$.
\end{lem}
Here we characterize local and 2-local derivations of $L_1$ by the result of derivations.
\begin{thm}
$\mathrm{LDer}(L_1)=\mathrm{BLDer}(L_1)=\mathrm{Der}(L_1).$
\end{thm}
\begin{proof}
By Lemmas \ref{ld} and \ref{LL1}, it is sufficient to prove
$\mathrm{LDer}_{k}(L_1)\subseteq\mathrm{Der}(L_1)$ for $k\geq0$.
Suppose that $\Delta_k\in\mathrm{Der}_k(L_1)$ is a local derivation. Fix a $i_0: i_{0}\geq 1$ and $i_{0}\neq k$. For $\Delta_k(e_{i_{0}})$, by the definition of local derivations, there exists $a_{k;i_0}\in\mathbb{F}$, such that
$$\Delta_k(e_{i_{0}})=a_{k;i_0}\sum_{i\geq1,i\neq k}((i-k)e^{k+i}_i)(e_{i_{0}})=a_{k;i_0}(i_0-k)e_{k+i_0}.$$
Similarly, for any $j: j\geq 1$ and $j\neq k, i_0$, there exist $a_{k;j}\in\mathbb{F}$ such that
$$\Delta_k(e_j)=a_{k;j}\sum_{i\geq1,i\neq k}((i-k)e^{k+i}_i)(e_j)=a_{k;j}(j-k)e_{k+j}.$$
Moreover,
\begin{equation}\lambdaabel{x}
\Delta_k(e_{i_{0}}+e_j)=\Delta_k(e_{i_{0}})+\Delta_k(e_j)=a_{k;i_0}(i_0-k)e_{k+i_0}+a_{k;j}(j-k)e_{k+j}.
\end{equation}
For $\Delta_k(e_{i_{0}}+e_j)$, by definition, there exist $a_{k;i_0,j}\in\mathbb{F}$ such that
\begin{equation}\lambdaabel{y}
\Delta_k(e_{i_{0}}+e_j)=a_{k;i_0,j}((i_0-k)e_{k+i_0}+(j-k)e_{k+j}).
\end{equation}
Comparing Eqs (\ref{x}) with (\ref{y}), we have $a_{k;j}=a_{k;i_0}$ for any $j\geq1$ and $j\neq k$.
So $\Delta_k=a_{k;i_0}\sum_{i\geq1,i\neq k}(i-k)e^{k+i}_i\in\mathrm{Der}_k(L_1).$
\end{proof}
\begin{rem}
The 2-local derivations of $L_1$ were also studied in \cite{l1}. In particular,
authors showed that every 2-local derivation (not necessarily linear) is derivation.
\end{rem}
\section{$\mathbb{N}$-graded Lie algebra $\mathfrak{m}_2$}
From the result of the first cohomology of $\mathfrak{m}_2$ with coefficients in the adjoint module \cite[Theorem 2]{m2}, we get the derivations of $\mathfrak{m}_2$ by considering the inner derivations in the following lemma.
\begin{lem}\lambdaabel{M21}
$\mathrm{dim}\ \mathrm{Der}_{k}(\mathfrak{m}_2)=\lambdaeft\{
\begin{array}{ll}
0, & \hbox{$k\lambdaeq-1$;} \\
1, & \hbox{$k=0,1$;} \\
2, & \hbox{$k\geq2$.}
\end{array}
\right.
$
In particular,
(1) in $\mathrm{Der}_{0}(\mathfrak{m}_2)$, a basis is $\sum_{i\geq1}ie^{i}_i$;
(2) in $\mathrm{Der}_{1}(\mathfrak{m}_2)$, a basis is $\sum_{i\geq2}e^{i+1}_i$;
(3) in $\mathrm{Der}_{2}(\mathfrak{m}_2)$, a basis is $\sum_{i\geq2}e^{i+2}_i$, $e^{3}_{1}-\sum_{i\geq3}e^{i+2}_{i}$;
(4) in $\mathrm{Der}_{k}(\mathfrak{m}_2)$ for $k\geq3$, a basis is $e^{k+1}_1+e^{k+2}_2$,
$-\frac{1}{2}e^{k+1}_1+\frac{1}{2}e^{k+2}_2+\sum_{i\geq3}e^{i+k}_i$.
\end{lem}
In order to describe the derivation algebra of $\mathfrak{m}_2$, we denote by $$\mathfrak{\widetilde{m}}_2=\mathfrak{m}_2\oplus\mathrm{span}\{x_{0},x_i,i\geq2\}$$
the Lie algebra with brackets:
$$[e_1,e_i]=e_{i+1},\ [e_2,e_j]=e_{j+2},\ [x_0,x_i]=ix_i,\ [x_2,x_j]=\frac{1}{2}e_{j+2},\
[x_0,e_{i-1}]=(i-1)e_{i-1},$$
$$[x_2,e_i]=e_{i+2},\ [x_j,e_1]=-\frac{1}{2}e_{j+1},\ [x_j,e_2]=\frac{1}{2}e_{j+2},\ [x_i,e_j]=e_{i+j},$$
where $i\geq2$, $j\geq3$.
Obviously, $\mathfrak{\widetilde{m}}_2=\bigoplus^{\infty}_{i=0}(\mathfrak{\widetilde{m}}_2)_i$ is a $\mathbb{Z}$-graded Lie algebra with the graded component
$(\mathfrak{\widetilde{m}}_2)_i=\lambdaeft\{
\begin{array}{ll}
\mathbb{F} x_{0}, & \hbox{$i=0$;} \\
\mathbb{F} e_{1}, & \hbox{$i=1$;} \\
\mathrm{span}\{e_{i},x_{i}\}, & \hbox{$i\geq 2$.}
\end{array}
\right.$
By Lemma \ref{m2}, $\mathrm{Der}(\mathfrak{m}_2)\cong\mathfrak{\widetilde{m}}_2$.
\begin{thm}\lambdaabel{m2}
$\mathrm{dim}\ \mathrm{BDer}_{k}(\mathfrak{m}_2)=\lambdaeft\{
\begin{array}{ll}
1, & \hbox{$k\geq0$;} \\
0, & \hbox{$k\lambdaeq-1$.}
\end{array}
\right.
$
In particular, for $k\geq0$, $\mathrm{BDer}_{k}(\mathfrak{m}_2)$ is spanned by $\sum_{i\geq2}e^{1,i}_{k+i+1}+\sum_{i\geq3}e^{2,i}_{k+i+2}$.
\end{thm}
\begin{proof}
By Lemma \ref{C},
for $f\in \mathrm{BDer}_{k}(\mathfrak{m}_2)$, $k\in\mathbb{Z}$, there exists a linear mapping $\varphi_f:\ \mathfrak{m}_2\rightarrow \mathfrak{\widetilde{m}}_2$ such that, for $i,j\geq1$,
\begin{equation}\lambdaabel{mb2}
f(e_i,e_j)=[\varphi_f(e_i),e_j]=-[\varphi_f(e_j),e_i],
\end{equation}
where $\|f\|=\|\varphi_f\|=k$. Now we determine $\varphi_f$ in different weight $k$.
\begin{flushleft}
$\mathbf{Case\ 1.}\ k\lambdaeq-2.$
\end{flushleft}
In this case, $\varphi_f(e_1)=\cdots=\varphi_f(e_{-k-1})=0$. Set $\varphi_f(e_{-k})=\alpha x_{0}$, $\varphi_f(e_{-k+1})=\beta e_1$ and $\varphi_f(e_{-k+i})=\lambdaambda_ie_i+\mu_ix_i$ for $i\geq 2$.
Taking $j=1$ in Eq. (\ref{mb2}), we get
$[\varphi_f(e_{i}),e_1]=0$.
Thus, we have
\begin{eqnarray*}
&&[\varphi_f(e_{-k}),e_1]=[\alpha x_{0},e_1]=\alpha e_1=0, \\
&&[\varphi_f(e_{-k+2}),e_1]=[\lambdaambda_2e_2+\mu_2x_2,e_1]=-\lambdaambda_2e_3=0,\\
&&[\varphi_f(e_{-k+i}),e_1]=[\lambdaambda_ie_i+\mu_ix_i,e_1]=-(\lambdaambda_i+\frac{1}{2}\mu_i)e_{i+1}=0,
\end{eqnarray*}
where $i\geq 3$. So $\alpha=\lambdaambda_2=\lambdaambda_i+\frac{1}{2}\mu_i=0$ for $i\geq 3$. Taking $i=j$ in Eq. (\ref{mb2}), we get
$[\varphi_f(e_{i}),e_i]=0$. Thus, we have
\begin{eqnarray*}
&&[\varphi_f(e_{-k+1}),e_{-k+1}]=[\beta e_1,e_{-k+1}]=\beta e_{-k+2}=0, \\
&&[\varphi_f(e_{-k+i}),e_{-k+i}]=[\lambdaambda_ie_i+\mu_ix_i,e_{-k+i}]=\mu_ie_{-k+2i}=0,
\end{eqnarray*}
where $i\geq 2$. So $\beta=\mu_i=0$ for $i\geq 2$. Thus $f=\varphi_f=0$.
\begin{flushleft}
$\mathbf{Case\ 2.}\ k=-1.$
\end{flushleft}
In this case, we set $\varphi_f(e_1)=\alpha x_{0}$, $\varphi_f(e_2)=\beta e_1$ and $\varphi_f(e_i)=\lambdaambda_{i-1}e_{i-1}+\mu_{i-1}x_{i-1}$ for $i\geq 3$.
Taking $i=j$ in Eq. (\ref{mb2}), we get
$[\varphi_f(e_{i}),e_i]=0$. Thus, we have
\begin{eqnarray*}
&&[\varphi_f(e_{1}),e_{1}]=[\alpha x_{0},e_{1}]=\alpha e_{1}=0, \\
&&[\varphi_f(e_{2}),e_{2}]=[\beta e_{1},e_{2}]=\beta e_3=0,\\
&&[\varphi_f(e_{3}),e_{3}]=[\lambdaambda_{2}e_{2}+\mu_{2}x_{2},e_{3}]=(\lambdaambda_2+\mu_{2})e_{5}=0,\\
&&[\varphi_f(e_{i}),e_{i}]=[\lambdaambda_{i-1}e_{i-1}+\mu_{i-1}x_{i-1},e_{i}]=\mu_{i-1}e_{2i-1}=0,
\end{eqnarray*}
where $i\geq4$. So $\alpha=\beta=\lambdaambda_2+\mu_{2}=\mu_{i}=0$ for $i\geq 3$.
Taking $j=1$ in Eq. (\ref{mb2}), we get
$[\varphi_f(e_{i}),e_1]=-[\varphi_f(e_{1}),e_i]=0$.
Thus, we have
\begin{eqnarray*}
&&[\varphi_f(e_3),e_1]=[\lambdaambda_2e_2+\mu_2x_2,e_1]=-\lambdaambda_2e_3=0, \\
&&[\varphi_f(e_i),e_1]=[\lambdaambda_{i-1}e_{i-1},e_1]=-\lambdaambda_{i-1}e_i=0,
\end{eqnarray*}
where $i\geq 4$. So $\lambdaambda_i=0$ for $i\geq 2$. Thus $f=\varphi_f=0$.
\begin{flushleft}
$\mathbf{Case\ 3.}\ k=0.$
\end{flushleft}
In this case, we set $\varphi_f(e_1)=\alpha e_{1}$ and $\varphi_f(e_i)=\lambdaambda_{i}e_{i}+\mu_{i}x_{i}$ for $i\geq 2$.
Taking $i=j$ in Eq. (\ref{mb2}), we get
$[\varphi_f(e_{i}),e_i]=0$. Thus, we have
\begin{equation*}
[\varphi_f(e_{i}),e_{i}]=[\lambdaambda_ie_i+\mu_ix_{i},e_{i}]=\mu_i e_{2i}=0,
\end{equation*}
where $i\geq2$. So $\mu_{i}=0$ for $i\geq 2$.
Taking $i=1$ in Eq. (\ref{mb2}), we get
$[\varphi_f(e_{1}),e_j]=-[\varphi_f(e_{j}),e_1]$.
Thus, we have
\begin{equation*}
[\varphi_f(e_{1}),e_j]=-[\varphi_f(e_{j}),e_1]=\alpha e_{j+1}=\lambdaambda_je_{j+1},
\end{equation*}
where $j\geq 2$. So $\lambdaambda_i=\alpha$ for $i\geq 2$. Thus,
\begin{eqnarray*}
&&f(e_1,e_i)=[\varphi_f(e_1),e_i]=[\alpha e_{1},e_i]=\alpha e_{i+1},\ i\geq2, \\
&&f(e_2,e_i)=[\varphi_f(e_2),e_i]=[\alpha e_2,e_i]=\alpha e_{i+2} ,\ i\geq 3,\\
&&f(e_i,e_j)=[\varphi_f(e_i),e_j]=[\alpha e_{i},e_j]=0 ,\ j>i\geq 3.
\end{eqnarray*}
That is,
$f=\alpha\lambdaeft(\sum_{i\geq2}e^{1,i}_{i+1}+\sum_{i\geq3}e^{2,i}_{i+2}\right)$.
\begin{flushleft}
$\mathbf{Case\ 4.}\ k=1.$
\end{flushleft}
In this case, we set $\varphi_f(e_i)=\lambdaambda_{i}e_{i+1}+\mu_{i}x_{i+1}$ for $i\geq 1$.
Taking $i=j$ in Eq. (\ref{mb2}), we get
$[\varphi_f(e_{i}),e_i]=0$. Thus, we have
\begin{eqnarray*}
&&[\varphi_f(e_{1}),e_{1}]=[\lambdaambda_{1}e_{2}+\mu_{1}x_{2},e_{1}]=-\lambdaambda_1 e_{3}=0, \\
&&[\varphi_f(e_{2}),e_{2}]=[\lambdaambda_{2}e_{3}+\mu_{2}x_{3},e_{2}]=(\frac{1}{2}\mu_2-\lambdaambda_2) e_5=0,\\
&&[\varphi_f(e_{i}),e_{i}]=[\lambdaambda_{i}e_{i+1}+\mu_{i}x_{i+1},e_{i}]=\mu_{i}e_{2i+1}=0,
\end{eqnarray*}
where $i\geq 3$. So $\lambdaambda_2=\frac{1}{2}\mu_2$, $\lambdaambda_1=\mu_i=0$ for $i\geq 3$. Taking $j=1$ in
Eq. (\ref{mb2}), we get $[\varphi_f(e_i),e_1]=-[\varphi_f(e_1),e_i]$. Thus, we have
\begin{equation*}
[\varphi_f(e_i),e_1]=-[\varphi_f(e_1),e_i]=-(\lambdaambda_i+\frac{1}{2}\mu_i)e_{i+2}=-\mu_1e_{i+2},
\end{equation*}
where $i\geq 2$. So $\lambdaambda_i=\mu_1-\frac{1}{2}\mu_i$ for $i\geq2$.
Thus,
\begin{eqnarray*}
&&f(e_1,e_i)=[\varphi_f(e_1),e_i]=[\mu_1 x_{2},e_i]=\mu_1 e_{i+2},\ i\geq2, \\
&&f(e_2,e_i)=[\varphi_f(e_2),e_i]=[\frac{1}{2}\mu_1 e_3+\mu_1x_3,e_i]=\mu_1 e_{i+3} ,\ i\geq 3,\\
&&f(e_i,e_j)=[\varphi_f(e_i),e_j]=[\mu_1 e_{i+1},e_j]=0 ,\ j>i\geq 3.
\end{eqnarray*}
That is,
$f=\mu_1\lambdaeft(\sum_{i\geq2}e^{1,i}_{i+2}+\sum_{i\geq3}e^{2,i}_{i+3}\right)$.
\begin{flushleft}
$\mathbf{Case\ 5.}\ k\geq2.$
\end{flushleft}
In this case, we set $\varphi_f(e_i)=\lambdaambda_{i}e_{i+k}+\mu_{i}x_{i+k}$ for $i\geq 1$.
Taking $i=j$ in Eq. (\ref{mb2}), we get
$[\varphi_f(e_{i}),e_i]=0$. Thus we have
\begin{eqnarray*}
&&[\varphi_f(e_{1}),e_{1}]=[\lambdaambda_{1}e_{k+1}+\mu_{1}x_{k+1},e_{1}]=-(\lambdaambda_1+\frac{1}{2}\mu_1) e_{k+2}=0, \\
&&[\varphi_f(e_{2}),e_{2}]=[\lambdaambda_{2}e_{k+2}+\mu_{2}x_{k+2},e_{2}]=(\frac{1}{2}\mu_2-\lambdaambda_2) e_{k+4}=0,\\
&&[\varphi_f(e_{i}),e_{i}]=[\lambdaambda_{i}e_{k+i}+\mu_{i}x_{k+i},e_{i}]=\mu_{i}e_{2i+k}=0,
\end{eqnarray*}
where $i\geq 3$. So $\lambdaambda_1=-\frac{1}{2}\mu_1$, $\lambdaambda_2=\frac{1}{2}\mu_2$, $\mu_i=0$ for $i\geq 3$. Taking $j=1$ in
Eq. (\ref{mb2}),
we get $[\varphi_f(e_i),e_1]=-[\varphi_f(e_1),e_i]$. Thus, we have
\begin{eqnarray*}
&& [\varphi_f(e_2),e_1]=-[\varphi_f(e_1),e_2]=-\mu_2e_{k+3}=-\mu_1e_{k+3},\\
&& [\varphi_f(e_i),e_1]=-[\varphi_f(e_1),e_i]=-\lambdaambda_ie_{k+i+1}=-\mu_1e_{k+i+1},
\end{eqnarray*}
where $i\geq 3$. So $\mu_1=\mu_2=\lambdaambda_i$ for $i\geq 3$.
Thus,
\begin{eqnarray*}
&&f(e_1,e_i)=[\varphi_f(e_1),e_i]=[-\frac{1}{2}\mu_1e_{k+1}+\mu_1 x_{k+1},e_i]=\mu_1 e_{k+i+1},\ i\geq2, \\
&&f(e_2,e_i)=[\varphi_f(e_2),e_i]=[\frac{1}{2}\mu_1 e_{k+2}+\mu_1x_{k+2},e_i]=\mu_1 e_{k+i+2} ,\ i\geq 3,\\
&&f(e_i,e_j)=[\varphi_f(e_i),e_j]=[\mu_1 e_{k+i},e_j]=0 ,\ j>i\geq 3.
\end{eqnarray*}
That is,
$f=\mu_1\lambdaeft(\sum_{i\geq2}e^{1,i}_{k+i+1}+\sum_{i\geq3}e^{2,i}_{k+i+2}\right)$.
In conclusion,
$$f=\lambdaeft\{
\begin{array}{ll}
\lambdaambda_k\lambdaeft(\sum_{i\geq2}e^{1,i}_{k+i+1}+\sum_{i\geq3}e^{2,i}_{k+i+2}\right), & \hbox{$\lambdaambda_k\in\mathbb{F},\ k\geq0$;} \\
0, & \hbox{$k\lambdaeq-1$.}
\end{array}
\right.
$$
The proof is complete.
\end{proof}
\begin{cor}
$\mathrm{BDer}_{0}(\mathfrak{m}_2)=\mathrm{IBDer}(\mathfrak{m}_2)$.
\end{cor}
\begin{thm}
A linear mapping of $\mathfrak{m}_2$ is commuting if and only if it is a scalar of $\sum_{i\geq1}e^{i}_{i}$.
\end{thm}
\begin{proof}
The `if' direction is easy to verify. We now prove the `only if' direction.
Suppose that $\phi$ is linear commuting mapping of weight $k$. That is to say that $\phi(e_i)=a_ie_{i+k}$ for $i\geq1$. By Lemma \ref{linear}, $\phi$ defines a biderivation $\phi_{f}\in\mathrm{BDer}_{k}(\mathfrak{m}_2)$.
If $k\lambdaeq-1$, then $\phi_{f}=0$ by Theorem \ref{m2}. So $[x,\phi(y)]=0$ for all $x,y\in\mathfrak{m}_2$. From $\phi(y)\in\mathrm{C}(\mathfrak{m}_2)=0$, we have $\phi=0$. If $k\geq0$, then
$\phi_{f}=\lambdaambda(\sum_{i\geq2}e^{1,i}_{k+i+1}+\sum_{i\geq3}e^{2,i}_{k+i+2})$ for some $\lambdaambda\in\mathbb{F}$.
\begin{flushleft}
$\mathbf{Case\ 1.}\ k\geq2.$
\end{flushleft}
From $\phi_{f}(e_j,e_1)=\lambdaambda(\sum_{i\geq2}e^{1,i}_{k+i+1}+\sum_{i\geq3}e^{2,i}_{k+i+2})(e_j,e_1)$ for $j\geq3$, we have
$[e_j,a_1e_{k+1}]=-\lambdaambda e_{1+j+k}$.
So $\lambdaambda=0$. We have $\phi_{f}(x,y)=[x,\phi(y)]=0$ for all $x,y\in\mathfrak{m}_2$. So $\phi(y)\in \mathrm{C}(\mathfrak{m}_2)=0$. Then $\phi=0$.
\begin{flushleft}
$\mathbf{Case\ 2.}\ k=1.$
\end{flushleft}
From $\phi_{f}(e_2,e_1)=\lambdaambda(\sum_{i\geq2}e^{1,i}_{k+i+1}+\sum_{i\geq3}e^{2,i}_{k+i+2})(e_2,e_1)$, we have
$[e_2,a_1e_{2}]=-\lambdaambda e_{4}=0$.
So $\lambdaambda=0$. We have $\phi_{f}(x,y)=[x,\phi(y)]=0$ for all $x,y\in\mathfrak{m}_2$. So $\phi(y)\in \mathrm{C}(\mathfrak{m}_2)=0$. Then $\phi=0$.
\begin{flushleft}
$\mathbf{Case\ 3.}\ k=0.$
\end{flushleft}
From $\phi_{f}(e_1,e_j)=\lambdaambda(\sum_{i\geq2}e^{1,i}_{k+i+1}+\sum_{i\geq3}e^{2,i}_{k+i+2})(e_1,e_j)$ for $j\geq2$,
we have $[e_1,a_je_j]=\lambdaambda e_{1+j}$. Thus $a_j=\lambdaambda$, $j\geq2$.
Moreover, for $j\geq2$, from $\phi_{f}(e_j,e_1)=-\phi_{f}(e_1,e_j)=-\lambdaambda e_{1+j}$, we have
$[e_j,a_1e_1]=-\lambdaambda e_{1+j}$. Thus $a_1=\lambdaambda$. So $\phi=\lambdaambda\sum_{i\geq1}e^{i}_{i}$.
\end{proof}
Here we characterize local and 2-local derivations of $\mathfrak{m}_2$ by the result of derivations.
\begin{thm}
$\mathrm{LDer}(\mathfrak{m}_2)=\mathrm{BLDer}(\mathfrak{m}_2)=\mathrm{Der}(\mathfrak{m}_2).$
\end{thm}
\begin{proof}
By Lemmas \ref{ld} and \ref{M21}, it is sufficient to prove
$\mathrm{LDer}_{k}(\mathfrak{m}_2)\subseteq\mathrm{Der}(\mathfrak{m}_2)$ for $k\geq0$.
By Lemma \ref{M21}, we prove that in the following cases.
\begin{flushleft}
$\mathbf{Case\ 1.}\ k=0.$
\end{flushleft}
Suppose that $\Delta\in\mathrm{LDer}_{0}(\mathfrak{m}_2)$ is a local derivation. For
$\Delta(e_i)$, $i\geq1$, by definition, there exists $a_i\in\mathbb{F}$ such that
$$\Delta(e_i)=a_i\sum_{j\geq1}je^{j}_j(e_i)=ia_ie_i.$$
Then, for $i\geq 2$,
\begin{equation}\lambdaabel{23}
\Delta(e_1+e_i)=\Delta(e_1)+\Delta(e_i)=a_1e_1+ia_ie_i.
\end{equation}
For $\Delta(e_1+e_i)$, $i\geq2$, by definition, there exists $a_{1,i}\in\mathbb{F}$ such that
\begin{equation}\lambdaabel{24}
\Delta(e_1+e_i)=a_{1,i}\sum_{j\geq1}je^{j}_j(e_1+e_i)=a_{1,i}(e_1+ie_i).
\end{equation}
Comparing Eqs. (\ref{23}) with (\ref{24}), we have $a_i=a_1$ for $i\geq 1$. So $\Delta=a_1\sum_{i\geq1}ie^{i}_i\in
\mathrm{Der}_0(\mathfrak{m}_2)$.
\begin{flushleft}
$\mathbf{Case\ 2.}\ k=1.$
\end{flushleft}
Suppose that $\Delta\in\mathrm{LDer}_{1}(\mathfrak{m}_2)$ is a local derivation. For
$\Delta(e_i)$, $i\geq2$, by definition, there exists $a_i\in\mathbb{F}$ such that
$$\Delta(e_i)=a_i\sum_{j\geq2}e^{j+1}_{j}(e_i)=a_ie_{i+1}.$$
Then, for $i\geq 3$,
\begin{equation}\lambdaabel{33}
\Delta(e_2+e_i)=\Delta(e_2)+\Delta(e_i)=a_2e_3+a_ie_{i+1}.
\end{equation}
For $\Delta(e_2+e_i)$, $i\geq3$, by definition, there exists $a_{2,i}\in\mathbb{F}$ such that
\begin{equation}\lambdaabel{34}
\Delta(e_2+e_i)=a_{2,i}\sum_{j\geq2}e^{j+1}_j(e_2+e_i)=a_{2,i}(e_3+e_{i+1}).
\end{equation}
Comparing Eqs. (\ref{33}) with (\ref{34}), we have $a_i=a_2$ for $i\geq 2$. So $\Delta=a_2\sum_{i\geq2}e^{i+1}_i\in
\mathrm{Der}_1(\mathfrak{m}_2)$.
\begin{flushleft}
$\mathbf{Case\ 3.}\ k=2.$
\end{flushleft}
Suppose that $\Delta\in\mathrm{LDer}_{2}(\mathfrak{m}_2)$ is a local derivation. For
$\Delta(e_i)$, $i\geq1$, by definition, there exist $a_i,b_i\in\mathbb{F}$ such that
\begin{eqnarray*}
\Delta(e_1) &=& b_1e_3, \\
\Delta(e_2) &=&a_2e_4, \\
\Delta(e_i) &=&(a_i-b_i)e_{i+2},\ i\geq 3.
\end{eqnarray*}
Then, for $i\geq 3$,
\begin{equation}\lambdaabel{43}
\Delta(e_1+e_2+e_i)=\Delta(e_1)+\Delta(e_2)+\Delta(e_i)=b_1e_3+a_2e_4+(a_i-b_i)e_{i+2}.
\end{equation}
For $\Delta(e_1+e_2+e_i)$, $i\geq3$, by definition, there exist $a_{1,2,i},b_{1,2,i}\in\mathbb{F}$ such that
\begin{equation}\lambdaabel{44}
\Delta(e_1+e_2+e_i)=b_{1,2,i}e_3+a_{1,2,i}e_4+(a_{1,2,i}-b_{1,2,i})e_{i+2}.
\end{equation}
Comparing Eqs. (\ref{43}) with (\ref{44}), we have $a_i-b_i=a_2-b_1$ for $i\geq 3$. So $\Delta=a_2\sum_{i\geq2}e^{i+2}_i+b_1(e^{3}_{1}-\sum_{i\geq3}e^{i+2}_{i})\in
\mathrm{Der}_2(\mathfrak{m}_2)$.
\begin{flushleft}
$\mathbf{Case\ 4.}\ k\geq 3.$
\end{flushleft}
Suppose that $\Delta_{k}\in\mathrm{LDer}_{k}(\mathfrak{m}_2)$ is a local derivation.
For
$\Delta(e_i)$, $i\geq1$, by definition, there exist $a_{k;i},b_{k;i}\in\mathbb{F}$ such that
\begin{eqnarray*}
\Delta(e_1) &=& (a_{k;1}-\frac{1}{2}b_{k;1})e_{1+k}, \\
\Delta(e_2) &=&(a_{k;2}+\frac{1}{2}b_{k;2})e_{2+k}, \\
\Delta(e_i) &=&b_{k;i}e_{i+k},\ i\geq 3.
\end{eqnarray*}
Then, for $i\geq 4$,
\begin{equation}\lambdaabel{53}
\Delta(e_3+e_i)=\Delta(e_3)+\Delta(e_i)=b_{k;3}e_{3+k}+b_{k;i}e_{i+k}.
\end{equation}
For $\Delta(e_3+e_i)$, $i\geq4$, by definition, there exists $b_{k;3,i}\in\mathbb{F}$ such that
\begin{equation}\lambdaabel{54}
\Delta(e_3+e_i)=b_{k;3,i}(e_{3+k}+e_{i+k}).
\end{equation}
Comparing Eqs. (\ref{53}) with (\ref{54}), we have $b_{k;i}=b_{k;3}$ for $i\geq 3$.
Similarly, for
$\Delta(e_1+e_2+e_3)$ there exist $a_{k;1,2,3},b_{k;1,2,3}\in\mathbb{F}$ such that
\begin{eqnarray*}
\Delta(e_1+e_2+e_3) &=&(a_{k;1,2,3}-\frac{1}{2}b_{k;1,2,3})e_{1+k}+(a_{k;1,2,3}+\frac{1}{2}b_{k;1,2,3})e_{2+k}+b_{k;1,2,3}e_{3+k}\\
&=& \Delta(e_1)+\Delta(e_2)+\Delta(e_3)\\
&=& (a_{k;1}-\frac{1}{2}b_{k;1})e_{1+k}+(a_{k;2}+\frac{1}{2}b_{k;2})e_{2+k}+
b_{k;3}e_{3+k}.
\end{eqnarray*}
So $(a_{k;2}+\frac{1}{2}b_{k;2})-(a_{k;1}-\frac{1}{2}b_{k;1})=b_{k;3}$.
Moreover, $\Delta_k=(a_{k;1}-\frac{1}{2}b_{k;1}+\frac{1}{2}b_{k;3})(e^{k+1}_1+e^{k+2}_2)+b_{k;3}(-\frac{1}{2}e^{k+1}_1+\frac{1}{2}e^{k+2}_2+
\sum_{i\geq3}e^{i+k}_i)\in\mathrm{Der}_{k}(\mathfrak{m}_2)$.
\end{proof}
\small\noindent \tilde{e}xtbf{Acknowledgment}\\
The authors are supported by NSF of Jilin Province (No. YDZJ202201ZYTS589), NNSF of China (Nos. 12271085, 12071405, 12001141) and the Fundamental Research Funds for the Central Universities.
\end{document} |
\begin{document}
\title{Variants of a theorem of Helson on general Dirichlet series}
\author[Defant]{Andreas Defant}
\address[]{Andreas Defant\newline Institut f\"{u}r Mathematik,\newline Carl von Ossietzky Universit\"at,\newline
26111 Oldenburg, Germany.
}
\email{[email protected]}
\author[Schoolmann]{Ingo Schoolmann}
\address[]{Ingo Schoolmann\newline Institut f\"{u}r Mathematik,\newline Carl von Ossietzky Universit\"at,\newline
26111 Oldenburg, Germany.
}
\email{[email protected]}
\maketitle
\begin{abstract}
\noindent A result of Helson on general Dirichlet series $\sum a_{n} e^{-\lambda_{n}s}$ states that, whenever $(a_{n})$ is $2$-summable and $\lambda=(\lambda_{n})$ satisfies a certain condition introduced by Bohr, then for almost all homomorphism $\omega \colon (\mathbb{ R},+) \to \mathbb{T}$ the Dirichlet series $\sum a_{n} \omega(\lambda_{n})e^{-\lambda_{n}s}$
converges on the open right half plane $[Re>0]$.
For ordinary Dirichlet series $\sum a_n n^{-s}$ Hedenmalm and Saksman related this result with the famous Carleson-Hunt theorem on pointwise convergence of Fourier series, and Bayart extended
it within his theory of Hardy spaces $\mathcal{H}_p$ of such series.
The aim here is to prove variants of Helson's theorem within our recent theory of Hardy spaces $\mathcal{H}_{p}(\lambda),\,1\le p \le \infty,$
of general Dirichlet series.
To be more precise, in the reflexive case $1 < p < \infty$ we extend Helson's result to Dirichlet series
in $\mathcal{H}_{p}(\lambda)$ without any further condition on the frequency $\lambda$, and in the non-reflexive case $p=1$ to
the wider class of frequencies satisfying the so-called Landau condition (more general than Bohr's condition). In both cases we add relevant maximal inequalities.
Finally, we give several applications to the structure theory of
Hardy spaces of general Dirichlet series.
\end{abstract}
\noindent
\renewcommand{\fnsymbol{footnote}}{\fnsymbol{footnote}}
\footnotetext{2010 \emph{Mathematics Subject Classification}: Primary 43A17, Secondary 30B50, 43A50} \footnotetext{\emph{Key words and phrases}: general Dirichlet series, Hardy spaces, almost everywhere convergence, maximal inequalities, completeness.
} \footnotetext{}
\section{\bf Introduction}
A general Dirichlet series is a (formal) series of the form $\sum a_n e^{-\lambda_n s}$, where $s
$ is a complex variable, $(a_n)$ a sequence of complex coefficients (called Dirichlet coefficients), and $\lambda=(\lambda_n)$ a frequency
(a strictly increasing non-negative real sequence which tends to $+\infty$). Fixing a frequency $\lambda$, we call $D=\sum a_{n}e^{-\lambda_{n}s}$ a $\lambda$-Dirichlet series, and $\mathcal{D}(\lambda)$ denotes the space of all these series. All basic information on general Dirichlet series can be found in \cite{HardyRiesz} or \cite{Helson}. In particular that convergence
of $D=\sum a_{n}e^{-\lambda_{n}s}$ in $s_0 \in \mathbb{C}$ implies convergence in all $s\in \mathbb{C}$
with $Re s > Re s_0$, and that the limit function $f(s) = \sum_{n=1}^{\infty} a_{n}e^{-\lambda_{n}s}$ of $D$
is holomorphic on the half plane $[Re > \sigma_c(D)]$, where
\[
\sigma_{c}(D)=\inf\left \{ \sigma \in \mathbb{ R} \mid D \text{ converges on } [Re>\sigma] \right\}
\]
determines the so-called abscissa of convergence.
\subsection{Helson's theorem} Let us start with some details on the state of art of Helson's result mentioned in the abstract. We first consider the frequency $\lambda=(\log n)$, which is of special interest, since it generates so-called ordinary Dirichlet series $\sum a_{n}n^{-s}$. As usual (see e.g. \cite{Defant}, \cite{HLS}, or \cite{QQ}), we denote by $\mathcal{H}_2$ the Hilbert space of all Dirichlet series $\sum a_{n}n^{-s}$ with 2-summable coefficients, that is $(a_{n}) \in \ell_{2}$.
Recall that the infinite dimensional polytorus $\mathbb{T}^{\infty}:=\prod_{n=1}^{\infty} \mathbb{T}$
forms a compact abelian group (with its natural group structure), with the normalized Lebesgue measure $dz$ as its Haar measure.
Denote by $\Xi$ the set of all completely multiplicative characters $\chi\colon \mathbb{ N} \to \mathbb{T}$
(that is $\chi(nm)=\chi(n)\chi(m)$ for all $m$,$n$),
which with the pointwise multiplication forms an abelian group.
Denote by $\mathfrak{p}=(p_{n})$ the sequence of prime numbers.
Looking at the group isomorphism
\begin{align*}
\iota\colon \Xi \to \mathbb{T}^{\infty}, ~~\chi \mapsto (\chi(p_{n})),
\end{align*}
we see that $\Xi$ also forms a compact abelian group, and its Haar measure $d \chi$ is the push forward measure of $dz$ through $\iota^{-1}$.
The following result of
Helson from \cite{Helson3} (see also
\cite[Theorem 4.4]{HLS}) is our starting point.
\begin{Theo} \label{HelsonintroHelson}
Given $D= \sum a_{n}n^{-s}\in \mathcal{H}_2$, for almost all $\chi \in \Xi$ the Dirichlet series
$D^{\chi} =\sum a_{n} \chi(n) n^{-s}$ converges on the open right half plane $[\text{Re}>0]$.
\end{Theo}
Helson actually proves an extended version of Theorem \ref{HelsonintroHelson} for general Dirichlet series. Therefore, given a frequency $\lambda$, let us define the space $\mathcal{H}_{2}(\lambda)$ of all (formal) $D=\sum a_{n}e^{-\lambda_{n}s}$ with $2$-summable Dirichlet coefficients. The substitute for $\Xi$ from Theorem \ref{HelsonintroHelson} is given by the so-called Bohr compactification $\overline{\mathbb{ R}}$ of $(\mathbb{ R},+)$. Recall that $\overline{\mathbb{ R}}$ is a compact abelian group, which may be defined to be the set of all homomorphism $\omega \colon (\mathbb{ R},+) \to \mathbb{T}$ together with the topology of pointwise convergence (i.e. $\overline{\mathbb{ R}}$ is the dual group of $(\mathbb{R},+)$ endowed the discrete topology $d$). Additionally, Helson assumes Bohr's condition $(BC)$ on $\lambda$, that is
\begin{equation} \label{BCHelson} \exists ~l = l (\lambda) >0 ~ \forall ~\delta >0 ~\exists ~C>0~\forall~ n \in \mathbb{ N}: ~~\lambda_{n+1}-\lambda_{n}\ge Ce^{-(l+\delta)\lambda_{n}}.
\end{equation}
This condition was isolated by Bohr in \cite{Bohr}, and, roughly speaking it prevents the $\lambda_n$'s from getting too close too fast. Note that $\lambda=(\log n)$ satisfies $(BC)$ with $l=1$.
Then the extended version of Helson's Theorem~\ref{HelsonintroHelson} reads as follows.
\begin{Theo} \label{HelsonstheoremHelson}
Let
$D=\sum a_{n} e^{-\lambda_{n}s} \in \mathcal{H}_{2}(\lambda)$ and $\lambda$ with $(BC)$. Then the Dirichlet series
$D^{\omega}=\sum a_{n} \omega(\lambda_{n}) e^{-\lambda_{n}s}$ converges on $[Re >0]$
for almost all $\omega \in \overline{\mathbb{ R}}$.
\end{Theo}
One of our aims is to extend Helson's result
to the Hardy space $\mathcal{H}_{1}(\lambda)$ (a class of Dirichlet series much larger than $\mathcal{H}_{2}(\lambda)$, see the definition below) under a milder assumption on the frequency $\lambda$. We say that $\lambda$ satisfies
Landau's condition $(LC)$ (introduced in \cite{Landau}) provided
\begin{equation} \label{LCHelson}
\forall~ \delta>0~ \exists ~C>0 ~\forall~ n \in \mathbb{ N} \colon~ \lambda_{n+1}-\lambda_{n}\ge C e^{-e^{\delta \lambda_{n}}}.
\end{equation}
Observe that $(BC)$ implies $(LC)$, and that e.g. $\lambda=(\sqrt{\log n})$ satisfies $(LC)$, but fails for $(BC)$. To see an example which fails for $(LC)$, take e.g. $\lambda=(\log \log n)$.
\subsection{Dirichlet groups}
From \cite{DefantSchoolmann2} we recall the definition and some basic facts of so-called Dirichlet groups.
Let $G$ be a compact abelian group and $\beta\colon (\mathbb{ R},+) \to G$ a homomorphism of groups. Then the pair $(G,\beta)$ is called Dirichlet group, if $\beta$ is continuous and has dense range. In this case the dual map $\widehat{\beta}\colon \widehat{G} \hookrightarrow \mathbb{ R}$ is injective, where we identify $\mathbb{R}=\widehat{(\mathbb{R},+)}$ (note that we do not assume $\beta$ to be injective). Consequently, the characters $e^{-ix\pmb{\cdot}} \colon \mathbb{ R} \to \mathbb{T}$, $x\in \widehat{\beta}(\widehat{G})$, are precisely those which define a unique $h_{x} \in \widehat{G}$ such that $h_{x} \circ \beta=e^{-ix\pmb{\cdot}}$. In particular, we have that
\begin{equation*}
\widehat{G}=\{h_{x} \mid x \in \widehat{\beta}(\widehat{G}) \}.
\end{equation*}
From \cite[Section 3.1]{DefantSchoolmann2} we know that every $L_{1}(\mathbb{ R})$-function may be interpreted as a bounded regular Borel measure on $G$. In particular, for every $u>0$ the Poisson kernel
$$P_{u}(t):=\frac{1}{\pi}\frac{u}{u^{2}+t^{2}}\,,\,\,\, t \in \mathbb{ R},$$
defines a measure $p_{u}$ on $G$, which we call the Poisson measure on $G$. We have $\|p_{u}\|=\|P_{u}\|_{L_{1}(\mathbb{ R})}=1$ and
\begin{equation}\label{Fourier1Helson}
\text{$\widehat{p_{u}}(h_{x})=\widehat{P_{u}}(x)=e^{-u|x|}$ for all $u >0$ and
$x\in \widehat{\beta}(\widehat{G})$.}
\end{equation}
Finally, recall from \cite[Lemma 3.11]{DefantSchoolmann2} that, given a measurable function $f:G \to \mathbb{C}$, then for almost all $\omega \in G$ there are measurable functions $f_{\omega} \colon \mathbb{ R} \to \mathbb{C}$
such that
\[
\text{$f_{\omega}(t)=f(\omega \beta(t))$ almost everywhere on $\mathbb{ R}$,}
\]
and if $f\in L_{1}(G)$, then all these $f_\omega$ are locally integrable.
Moreover, as shown in \cite[Corollary 2.11]{DefantSchoolmann3}, for almost all $\omega \in G$
\begin{equation} \label{besicoHelson}
\widehat{f}(0)=\lim_{T\to \infty} \frac{1}{2T} \int_{-T}^{T} f_{\omega}(t) dt .
\end{equation}
We will later see, that this way to 'restrict' functions on the group $G$ to $\mathbb{ R}$, in fact establishes a sort of bridge
between Fourier analysis on Dirichlet groups $(G,\beta)$ and Fourier analysis on $\mathbb{ R}$.
\subsection{$\lambda$-Dirichlet groups}
Now, given a frequency $\lambda$, we call a Dirichlet group $(G,\beta)$ a $\lambda$-Dirichlet group whenever $\lambda \subset \widehat{\beta}(\widehat{G})$, or equivalently whenever for every
$e^{-i\lambda_{n} \pmb{\cdot}} \in \widehat{(\mathbb{R},+)}$ there is (a unique) $h_{\lambda_{n}}\in \widehat{G}$ with $h_{\lambda_{n}}\circ \beta=e^{-i\lambda_{n} \pmb{\cdot}}$.
Note that for every $\lambda$ there exists a $\lambda$-Dirichlet groups $(G,\beta)$ (which is not unique).
To see a very first example, take the Bohr compactification $\overline{\mathbb{ R}}$ together with the mapping
$$\beta_{\overline{\mathbb{ R}}} \colon \mathbb{ R} \to \overline{\mathbb{ R}}, ~~ t \mapsto \left[ x \mapsto e^{-itx} \right].$$
Then $\beta_{\overline{\mathbb{ R}}}$ is continuous and has dense range (see e.g. \cite[Theorem 1.5.4, p. 24]{QQ} or \cite[Example 3.6]{DefantSchoolmann2}), and so the pair $(\overline{\mathbb{ R}},\beta_{\overline{\mathbb{ R}}})$ forms a $\lambda$-Dirichlet group for all $\lambda$'s. We refer to \cite{DefantSchoolmann2} for more 'universal' examples of Dirichlet groups. Looking at the frequency $\lambda=(n)=(0,1,2,\ldots)$, the group $G=\mathbb{T}$ together with \[\beta_\mathbb{T}: \mathbb{ R} \to \mathbb{T}, \,\,\beta_{\mathbb{T}}(t)=e^{-it},\]
forms a $\lambda$-Dirichlet group, and the so-called
Kronecker flow
\begin{equation*}
\label{oscarHelson}
\beta_{\mathbb{T}^{\infty}}\colon \mathbb{ R} \to \mathbb{T}^{\infty}, ~~ t \mapsto \mathfrak{p}^{-it}=(2^{-it},3^{-it}, 5^{-it}, \ldots),
\end{equation*}
turns the infinite dimensional torus $\mathbb{T}^{\infty}$ into a $\lambda$-Dirichlet group
for $\lambda = (\log n)$.
We note that, identifying $\widehat{\mathbb{T}} = \mathbb{Z}$ and $\widehat{\mathbb{T}^\infty} = \mathbb{Z}^{(\mathbb{ N})}$ (all finite sequences of integers), in the first case $h_n(z) = z^n$ for $z \in \mathbb{T}, n \in \mathbb{Z}$,
and in the second case $h_{\sum \alpha_j \log p_j}(z) = z^\alpha$ for $z \in \mathbb{T}^\infty, \alpha \in \mathbb{Z}^{(\mathbb{ N})}$.
\subsection{Hardy spaces of general Dirichlet series}
Fix some $\lambda$-Dirichlet group $(G,\beta)$ and $1\le p \le \infty$. By
$$H_{p}^{\lambda}(G)$$
we denote the Hardy space of all functions
$f\in L_{p}(G)$ (recall that being a compact abelian group, $G$ allows a unique normalized Haar measure) having a Fourier transform supported on $\{h_{\lambda_n} \colon n \in \mathbb{N}\} \subset \widehat{G}$. Being a closed subspace of $L_p(G)$, this clearly defines a Banach space.
These spaces $H_{p}^{\lambda}(G)$ naturally define $\lambda$-Dirichlet series. Let
$$\mathcal{H}_{p}(\lambda)$$
be the class of all $\lambda$-Dirichlet series $D=\sum a_n e^{-\lambda_n s}$ for which there is some
$f \in H_p^\lambda(G)$ such that $a_n = \widehat{f}(h_{\lambda_{n}})$ for all $n$. In this case the function $f$ is unique, and together with
the norm $\|D\|_{p}:=\|f\|_{p}$ the linear space $\mathcal{H}_{p}(\lambda)$ obviously forms a Banach space. So (by definition) the so-called Bohr map
\begin{equation} \label{BohrmapHelson}
\mathcal{B}\colon H_{p}^{\lambda}(G)\to \mathcal{H}_{p}(\lambda),~~ f \mapsto \sum \widehat{f}(h_{\lambda_{n}}) e^{-\lambda_{n}s}
\end{equation}
defines an onto isometry. A fundamental fact from \cite[Theorem 3.24.]{DefantSchoolmann2} is that the definition of $\mathcal{H}_{p}(\lambda)$ is independent of the chosen $\lambda$-Dirichlet group $(G,\beta)$.
Now we have given two definitions of the Hilbert space $\mathcal{H}_{2}(\lambda)$, but by Parsel's theorem both of these definitions actually coincide.
Our two basic examples of frequencies, $\lambda = (n)$ and $\lambda = (\log n)$, lead to well-known examples:
\begin{equation} \label{hardyTHelson}
H_{p}(\mathbb{T}):=H_{p}^{(n)}(\mathbb{T}) \,\,\, \,\text{and} \,\,\,\, H_p(\mathbb{T}^\infty) := H_p^{(\log n)}(\mathbb{T}^\infty) \,.
\end{equation}
In particular,
$f \in H_p^{(n)}(\mathbb{T})$ if and only if $f \in L_p(\mathbb{T})$ and $\hat{f}(n) = 0$ for any $n \in \mathbb{Z}$ with $n < 0$, and
$f \in H_p^{(\log n)}(\mathbb{T}^\infty)$ if and only if $f \in L_p(\mathbb{T}^\infty)$ and $\hat{f}(\alpha) = 0$ for any finite sequence
$\alpha = (\alpha_k)$ of integers with $\alpha_k < 0$ for some $k$
(where as usual $\widehat{f}(\alpha) := \widehat{f}(h_{\log \mathfrak{p}^\alpha}))$.
Consequently, if we turn to Dirichlet series, them the Banach spaces
$$\mathcal{H}_p= \mathcal{H}_p((\log n))$$
are precisely Bayart's Hardy spaces of ordinary Dirichlet series from \cite{Bayart}
(see also \cite{Defant} and \cite{QQ}).
\subsection{Vertical limits}
Given a $\lambda$-Dirichlet series $D = \sum a_n e^{-\lambda_n s}$ and $z \in \mathbb{C}$, we say that
$$D_{z}:=\sum a_n e^{-\lambda_n z} e^{-\lambda_n s} $$ is the translation of $D$ about $z$, and we distinguish between horizontal translations $D_u, u \in \mathbb{ R}$, and vertical translations
$D_{i\tau}, \tau \in \mathbb{ R}$.
If $(G, \beta)$ is a $\lambda$-Dirichlet group and $D \in \mathcal{H}_{p}(\lambda)$ is associated to
$f\in H_p^\lambda(G)$, then for each $u>0$
the horizontal translation $D_u$ corresponds to the convolution of $f$ with the Poisson measure $p_{u}$, i.e.
$\mathcal{B}(f*p_{u})=D_{u}$ (compare coefficients),
and we refer to $f*p_{u}$ as the translation of $f$ about $u$. In particular, we have that $D_{u}\in \mathcal{H}_{p}(\lambda)$ for every $u>0$.
Moreover, each Dirichlet series of the form $$D^{\omega}:=\sum a_{n} h_{\lambda_{n}}(\omega)e^{-\lambda_n s}\,,\,\, \omega \in G,$$
is said to be a vertical limit of $D$. Examples are vertical translations
$D_{i\tau}$ with $\tau \in \mathbb{R}$,
and the terminology is explained by the fact that each vertical limit may be approximated by vertical translates. More precisely, given $D = \sum a_n e^{-\lambda_n s}$ which converges absolutely on the right half-plane, for every $\omega \in G$ there is a sequence $(\tau_{k})_{k} \subset \mathbb{ R}$ such that $(D_{i\tau_{k}})$ converges to $D^{\omega}$ uniformly on $[Re>\varepsilon]$ for all $\varepsilon>0$.
Assume conversely that for $(\tau_{k})_{k} \subset \mathbb{ R}$ the vertical translations $D_{i\tau_k}$ converge
uniformly on $[Re>\varepsilon]$ for every $\varepsilon>0$
to a holomorphic function $f$ on $[Re>0]$. Then there is $\omega \in G$ such that
$f(s)= \sum_{n=1}^\infty a_n h_{\lambda_n}(\omega) e^{-\lambda_n s}$
for all $s \in [Re>0]$\,. For all this see \cite[Proposition 4.6]{DefantSchoolmann2}.
\subsection{R\'esum\'e of our results on Helson's theorem}
With all these preliminaries we give a brief r\'esum\'e of our extensions of Helson's theorem \ref{HelsonstheoremHelson}, where we carefully have to distinguish between the cases $1<p<\infty$
and $p=1$.
\noindent
{\bf Synopsis I} \label{SIHelson}\\
Let $(G,\beta)$ be a $\lambda$-Dirichlet group, $1 \leq p < \infty$, and $D\in \mathcal{H}_p(\lambda)$
with associated function $f \in H_p^\lambda(G)$. Then the following statements hold true:
\begin{itemize}
\item[(i)]
If $1 < p < \infty$, then almost all vertical limits $D^\omega$ converge almost everywhere on $[Re = 0]$,
and consequently almost all of them converge on $[Re >0]$.
\item[(ii)]
If $\lambda$ satisfies $(LC)$ and $p=1$, then almost all vertical limits $D^\omega$ converge on $[Re >0]$.
\end{itemize}
Moreover,
there is a null set $N \subset G$ such that for every $\omega \notin N$ in the first case
\[
D^\omega(it) = f_\omega (t) \,\,\, \text{for almost all $t \in \mathbb{ R}$},
\]
and in both cases
\[
D^\omega(u+it) = (f_\omega \ast P_u) (t) \,\,\,
\text{for every $u >0$ and almost all $t \in \mathbb{ R}$}.
\]
Let us indicate carefully which of these results are already known and which are new. We first discuss the ordinary case $\lambda = (\log n)$ with $(\log n)$-Dirichlet group $(\mathbb{T}^{\infty}, \beta_{\mathbb{T}^\infty})$. Then for $p=2$ statement (i) was proved by Hedenmalm and Saksman in \cite{HedenmalmSaksman}, whereas Bayart in \cite[Theorem 6]{Bayart} for every $D \in \mathcal{H}_1$ proves the convergence of almost all vertical limits $D^\omega$ on $[Re >0]$.
For Dirichlet series in $\mathcal{H}_2$ Bayart deduces his theorem from the Menchoff-Rademacher theorem on almost everywhere convergence of orthonormal series (see also \cite{DefantSchoolmann5}), and extends it then to Dirichlet series
$\mathcal{H}_1$ by so-called hypercontractivity.
In the general case statement (ii) for $p=2$ is Helson's theorem~\ref{HelsonstheoremHelson} and under the more restrictive condition $(BC)$ instead of $(LC)$ and $p=1$.
\subsection{Helson's theorem and its maximal inequalities}
Our strategy is to deduce the preceding results
\begin{itemize}
\item
from relevant maximal inequalities for functions in
$H_{1}^{\lambda}(G)$,
\item
to obtain as a consequence results on pointwise convergence of the Fourier series of these functions,
\item
and to use in a final step the Bohr transform (\ref{BohrmapHelson}) to transfer these results to
Helson-type theorems for Dirichlet series.
\end{itemize}
In the reflexive case $1 < p < \infty$ we follow closely the ideas of Duy \cite{Duy} and Hedenmalm-Saksman \cite{HedenmalmSaksman} extending the Carleson-Hunt theorem on pointwise convergence of Fourier series to functions
in $H_p^\lambda(G)$, and in the non-reflexive case $p=1$ we use among others boundedness properties of a Hardy-Littlewood maximal type operator for integrable functions on Dirichlet groups which we invent in \cite{DefantSchoolmann3}.
In order to give a r\'esum\'e of the results we have on the first of the above steps recall that given a measure space $(\Omega, \mu)$ the weak $L_{1}$-space
$L_{1, \infty}(\mu)$ is the linear space of all measurable functions $f\colon \Omega\to \mathbb{C}$ for which there is a constant $C>0$ such that for all $\alpha>0$ we have
$
\mu \big(\left\{ \omega \in \Omega \mid |f(\omega)|>\alpha \right\} \big)\le
C/\alpha.
$
Together with $\|f\|_{1,\infty}:= \inf C$ the space $L_{1,\infty}(\mu)$ becomes a quasi Banach space (see e.g. \cite[\S 1.1.1 and \S 1.4]{Grafakos1}), where the triangle inequality holds with constant $2$.\\
\noindent
{\bf Synopsis II} \label{SIIHelson}\\
Let $(G,\beta)$ be a $\lambda$-Dirichlet group. Then the following statements hold true:
\begin{itemize}
\item[(i)] For every $1 < p < \infty$ there is a constant $C = C(p) >0$
such that for every $f \in H_p^\lambda(G)$
\begin{equation*}
\Big\| \sup_{N} \big| \sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}} \big|
\Big\| _{L_p(G)} \le C \,\|f\|_{p}.
\end{equation*}
\item[(ii)]
If $\lambda$ satisfies $(LC)$, then for every $u >0$ there is a constant $C = C(u) >0$ such that for every $f \in H_1^\lambda(G)$
\begin{equation*}
\Big\| \sup_{N} \big| \sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}}) e^{-u \lambda_n} h_{\lambda_{n}} \big|
\Big\| _{L_{1, \infty}(G)} \le C\,\|f\|_{1}.
\end{equation*}
\item[(iii)]
If $\lambda$ satisfies $(BC)$,
then to every $u>0$ there is a constant $C = C(u) >0$ such that for all $1\le p \le \infty$ and $f \in H_{p}^{\lambda}(G)$
\begin{equation*}
\Big\|\sup_{N} \big| \sum_{n=1}^{N}\widehat{f}(h_{\lambda_{n}})e^{-\lambda_{n}u} h_{\lambda_{n}} \big|
\Big\|_p \le C\|f\|_{p}.
\end{equation*}
\end{itemize}
In particular, for all $f \in H_p^\lambda(G), 1 <p < \infty$
\[
f = \sum_{n=1}^{\infty} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}
\,\,\, \text{almost everywhere on $G$},
\]
and under $(LC)$ for all $f \in H_1^\lambda(G)$ and $u >0$
\[
f\ast p_u =\sum_{n=1}^{\infty} \widehat{f}(h_{\lambda_{n}}) e^{-u \lambda_n} h_{\lambda_{n}}
\,\,\, \text{almost everywhere on $G$.}
\]
A standard argument shows how to deduce from such maximal inequalities pointwise convergence theorem of Fourier series, e.g. using
Egoroff's theorem (see \cite[Lemma 3.6]{DefantSchoolmann3} for a more general situation). The following remark indicates
how pointwise convergence theorems of Fourier series then transfer
to Dirichlet series (see \cite[Lemma 1.4]{DefantSchoolmann3}).
\begin{Rema} \label{tranferHelson}
Let $(G,\beta)$ be a Dirichlet group, and $f_n, f$ measurable functions on $G$. Then the following are equivalent:
\begin{itemize}
\item[(i)]
$\lim_{n\to \infty} f_n(\omega) = f(\omega)$ \,\,\, \text{for almost all $\omega \in G$.}
\item[(ii)]
$\lim_{n\to \infty} (f_n)_\omega(t)= f_\omega(t)$ \,\,\,
\text{for almost all $\omega \in G$ and for almost all $t\in \mathbb{ R}$.}
\end{itemize}
In particular, if $(G,\beta)$ be a $\lambda$-Dirichlet group and $D=\sum a_n e^{-\lambda_n s}$ is associated to $f \in H_1^\lambda(G)$, then
\begin{equation*} \label{FseriesHelson}
f=\sum_{n=1}^{\infty} \widehat{f}(h_{\lambda_{n}})h_{\lambda_{n}}
\end{equation*}
almost everywhere on $G$ if and only if for almost all $\omega \in G$ the Dirichlet series
\begin{equation*} \label{DseriesHelson}
D^{\omega}=\sum a_{n} h_{\lambda_{n}}(\omega) e^{-\lambda_{n}s}
\end{equation*}
converges almost everywhere on the imaginary line $[Re=0]$, and its limit coincides with $f_\omega$
almost everywhere on $\mathbb{ R}$.
\end{Rema}
\subsection{Organization}
The reflexive case from Synopsis I and II we handle in Theorem~\ref{DirichletintegerHelson} and Theorem~\ref{CorointegerHelson}, and under a different point of view also in Theorem~\ref{maximalineqBCHelson}.
The Theorems~\ref{Helson(LC)Helson} and \ref{HelsonstheoHelson} are going to cover the non-reflexive parts. In the final Section~\ref{bohrstheoremsectionHelson} we extend and improve parts of the
structure theory of general Dirichlet series started in \cite{DefantSchoolmann2}. Among others we show in Theorem~\ref{equivalenceHelson} that $\mathcal{D}_{\infty}(\lambda)$, the normed space of all $\lambda$-Dirichlet series which
converge to a bounded and then holomorphic function on the right half plane, is complete if and only if $\mathcal{D}_{\infty}(\lambda)=\mathcal{H}_{\infty}(\lambda)$ holds isometrically if and only if
$\lambda$ satisfies (what we call) 'Bohr's theorem'.
\section{\bf Helson's theorem versus the Carleson-Hunt theorem} \label{CarlesonsectionHelson}
In this section we provide the proofs of the reflexive statements from the Synopses I and II in the introduction.
Therefore, by $CH_{p} >0$ we denote the best constant in the maximal inequality from the Carleson-Hunt theorem -- that is, given $1<p<\infty$, the best $C>0$ such that for all $f\in L_{p}(\mathbb{T})$
$$\bigg(\int_{\mathbb{T}} \sup_{N} \big|\sum_{|k|\le N} \widehat{f}(k)z^{k}\big|^{p} dz\bigg)^{\frac{1}{p}}\le C\|f\|_{p}.$$
\begin{Theo} \label{DirichletintegerHelson} Let $1<p<\infty$ and $\lambda=(\lambda_n)$ an arbitrary frequency. Then for all $\lambda$-Dirichlet group $(G,\beta)$ and $D=\sum a_{n}e^{-\lambda_{n}s}\in \mathcal{H}_{p}(\lambda)$ we for almost all $\omega \in G$ have
\begin{equation}
\label{max1Helson}
\lim_{T\to \infty} \bigg(\frac{1}{2T} \int_{-T}^{T} \sup_{N} \big| \sum_{n=1}^{N} a_{n} h_{\lambda_{n}}(\omega) e^{-it\lambda_{n}} \big|^{p} dt \bigg)^{\frac{1}{p}} \le \text{CH}_{p}\|D\|_{p}.
\end{equation}
Moreover, for almost all $\omega \in G$ almost everywhere on $\mathbb{ R}$
\begin{equation}\label{point1Helson}
D^{\omega}(it)=\sum_{n=1}^{\infty} a_n h_{\lambda_{n}}(\omega) e^{-it\lambda_{n}}=f_{\omega}(t),
\end{equation}
and in particular
\begin{equation}\label{point2Helson}
\text{$D^{\omega}=\sum a_{n} h_{\lambda_{n}}(\omega)e^{-\lambda_{n}s}$ converges on $[Re>0]$.}
\end{equation}
\end{Theo}
As described above
we deduce this from a Carleson-Hunt type maximal inequality for functions in $H_p^\lambda(G)$.
\begin{Theo} \label{CorointegerHelson} Let $\lambda$ be a frequency and $1<p<\infty$. Then for all $\lambda$-Dirichlet groups $(G,\beta)$ and $f \in H_{p}^{\lambda}(G)$ we have
\begin{equation} \label{iiiiiHelson}
\bigg( \int_{G} \sup_{N} \big| \sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}(\omega) \big|^{p} d\omega \bigg)^{\frac{1}{p}} \le CH_{p}\|f\|_{p}.
\end{equation}
In particular, almost everywhere on $G$
\begin{equation} \label{sakssHelson}
f=\sum_{n=1}^{\infty} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}.
\end{equation}
\end{Theo}
Before we begin with the proofs let us apply Theorem~\ref{CorointegerHelson} to the frequency $\lambda =(\log n)$, which, as remarked above, together with the
group $(\mathbb{T}^\infty, \beta_{\mathbb{T}^\infty})$ forms a $(\log n)$-Dirichlet group.
\begin{Coro} \label{OrdiAHelson}
Let $1 < p < \infty$ and $f \in H_p(\mathbb{T}^\infty)$. Then
\[
\lim_{N\to \infty} \sum_{\mathfrak{p}^\alpha \leq N} \widehat{f}(\alpha) z^\alpha=f(z) \,\,\,\,\, \text{almost everywhere on $\mathbb{T}^\infty$}\,,
\]
and moreover
\[
\bigg( \int_{\mathbb{T}^\infty} \sup_N \big| \sum_{\mathfrak{p}^\alpha \leq N}
\widehat{f}(\alpha) z^\alpha
\big|^p d z \bigg)^{1/p}
\leq CH_{p} \|f\|_p\,.
\]
\end{Coro}
We start with the proof of Theorem \ref{CorointegerHelson}, and show at the end of this section that this result
in fact also proves Theorem~\ref{DirichletintegerHelson}.
Actually for a certain choice of $\lambda$-Dirichlet groups, Theorem \ref{CorointegerHelson} is due to Duy in his article \cite{Duy}, where convergence of Fourier series of so-called Besicovitch almost periodic functions is investigated.
In our language, fixing a frequency $\lambda$, Duy considers the $\lambda$-Dirichlet group
$G_{D}:=\widehat{(U,d)}$, where $U$ is the smallest subgroup of $\mathbb{ R}$ containing $\lambda$ and $d$ denotes the discrete topology. This compact abelian group together with the mapping
$$\beta_{D}\colon \mathbb{ R} \to G_{D}, ~~ t \mapsto \left[u \mapsto e^{-itu}\right]$$
forms a $\lambda$-Dirichlet group (see also \cite[Example 3.5]{DefantSchoolmann2}). Then by \cite[Theorem 13, p. 274]{Duy} (in our notation) the maximal operator
\begin{equation*} \label{DuymaximalopHelson}
\mathbb{M}(f)(\omega):=\sup_{N>0} \big| \sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}(\omega) \big|
\end{equation*}
defines a bounded operator from $H_{p}^{\lambda}(G_{D})$ to $L_{p}(G_{D})$, whenever $1<p<\infty$, and this in fact proves Theorem \ref{CorointegerHelson} for $(G_{D},\beta_{D})$.
Moreover, the case $p=2$ and $\lambda=(\log n)$ with Dirichlet group $(\mathbb{T}^{\infty},\beta_{\mathbb{T}^{\infty}})$ of Theorem \ref{CorointegerHelson} is proven by Hedenmalm and Saksman in \cite[Theorem 1.5]{HedenmalmSaksman}, without stating (\ref{iiiiiHelson}). Their proof and the proof of Duy are based on Carleson's maximal inequality
on almost everywhere convergence of Fourier series of square integrable functions on $\mathbb{T}$, and
a technique due to Fefferman from \cite{Fefferman}.
Following closely their ideas, we for the sake of completeness provide a self-contained proof of Theorem \ref{CorointegerHelson} within our framework of Hardy spaces $H_{p}^{\lambda}(G)$, which shows that the special choice of the $\lambda$-Dirichlet group $(G, \beta)$ in fact is irrelevant.
A crucial argument of \cite{Duy} is, that for every finite set $\{a_{1}, \ldots, a_{N}\}$ of positive numbers, there are $\mathbb{Q}$-linearly independent numbers $b_{1}, \ldots, b_{P}$ such that
$\{\lambda_{1}, \ldots, \lambda_{N}\} \subset \operatorname{span}_{\mathbb{ N}_{0}} (b_{1},\ldots, b_{P}).$ We demand for less and only require integer coefficients.
\begin{Lemm} \label{trickDuyHelson} Let $a_{1}, \ldots, a_{N}$ be positive numbers. Then there are $\mathbb{Q}$-linearly independent real numbers $b_{1}, \ldots b_{P}$ such that
$\{a_{1}, \ldots, a_{N}\} \subset \operatorname{span}_{\mathbb{\mathbb{Z}}} (b_{1}, \ldots, b_{P}).$
\end{Lemm}
\begin{proof}
We prove the claim by induction. If $N=1$, then choose $b_{1}:=a_{1}$. Assume that for $a_{1}, \ldots, a_{N}$ there are $\mathbb{Q}$-linearly independent $b_{1}, \ldots, b_{P}$ such that $\{a_{1}, \ldots, a_{N}\} \subset \operatorname{span}_{\mathbb{\mathbb{Z}}} (b_{1}, \ldots, b_{P})$ and let $a_{N+1}$ arbitrary. If $(a_{N+1}, b_{1}, \ldots, b_{P})$ is $\mathbb{Q}$-linearly independent, then
choose $b_{P+1}:=a_{N+1}$. Else, there are rationals $q_{j}$ such that $a_{N+1}=\sum_{j=1}^{P} q_{j}b_{j}$ and so for every $K\in \mathbb{ N}$
$$a_{N+1}=\sum_{j=1}^{P} (Kq_{j}) \frac{b_{j}}{K}.$$
Choose $K$ large enough such that $K q_{j}\in \mathbb{Z}$ for all $j$, and define $\widetilde{b_{j}}:=K^{-1}b_{j}$. Then $\{a_{1}, \ldots, a_{N}, a_{N+1}\} \subset \operatorname{span}_{\mathbb{\mathbb{Z}}} (\widetilde{b_{1}}, \ldots, \widetilde{b_{P}})$, which finishes the proof.
\end{proof}
\begin{proof}[Proof of Theorem \ref{CorointegerHelson}]
We first consider polynomials from $L_{p}(\mathbb{T}^{\infty})$ and then show that the choice of the Dirichlet group is irrelevant. So let $f \in L_{p}(\mathbb{T}^{\infty})$ be a polynomial and define for $x \in \mathbb{R}^N$ the maximal function
\[
M_xf(z) = \sup_{S >0}
\big| \sum_{\substack{\alpha \in \mathbb{Z}^N\\ <\alpha,x> \leq S}} \hat{f}(\alpha) z^\alpha \big|\,,\,\,\, z \in \mathbb{T}^N\,,
\]
where $<\alpha,x>:=\sum \alpha_{j}x_{j}$. We intend to show that
\begin{align} \label{maxinequalityHelson}
\| M_xf\|_p \leq CH_{p}\|f\|_p\,.
\end{align}
Note that then, taking $x=B$, the proof finishes.
We will use, that given
a $N \times N$ matrix $M=(m_{i,j})$ with integer entries and such that $\det M =1$, the
transformation formula
for every integrable function
$g: \mathbb{T}^N \to \mathbb{R}$ gives
\begin{align} \label{integralHelson}
\int_{\mathbb{T}^N} g(z)dz = \int_{\mathbb{T}^N} g(\Phi_M(z)) dz\,,
\end{align}
where
\[
\Phi_M : \mathbb{T}^N \to \mathbb{T}^N\,, (e^{it_j})_j \mapsto (e^{i \sum_k m_{jk}t_k})_j\,,
\]
and moreover for all $\alpha \in \mathbb{Z}^N$ and $z \in \mathbb{T}^N$
\begin{align} \label{monoHelson}
\Phi_M(z)^\alpha = z^{M^{t} \alpha}\,,
\end{align}
where $M^{t}$ denotes the transposed matrix of $M$.
By approximation we only have to prove \eqref{maxinequalityHelson} for a dense collection of $x$ in $\mathbb{R}_{>0}^N$, and, following the argument from the proof of \cite[Theorem 1.4]{HedenmalmSaksman}, we take
\[
x= \bigg(\frac{q_1}{Q}, \ldots\ldots,\frac{q_N}{Q} \bigg)\,,
\]
where $q_1, ., q_n, Q \in \mathbb{Z}$ and $\text{gcd}(q_1,q_2) =1$. Choose $r_1, r_2 \in \mathbb{Z}$ such that $q_1r_2 - q_2r_1 = 1$, and define the $N \times N$ matrix
\[
A=
\begin{bmatrix}
q_1 & q_2 & q_3 & . & . &. & . & q_N \\
r_1 & r_2 & 0 & .& . &.&. & 0 \\
0 & 0 & 1 & 0 & . &.&.& 0 \\
0 & 0 & 0 & 1 & 0 & . &.& 0 \\
. & . &. &. & . & . & .&. &\\
. & . &. &. & . & . & .&. &\\
. & . &. &. & . & . & .&. &\\
0 & 0 & 0 & 0 & 0 & . & 0& 1 \\
\end{bmatrix}
\]
which has determinant one.
Then we deduce from \eqref{integralHelson} and \eqref{monoHelson}
(applied to $M = (A^{-1})^{t}$) that
\begin{align*}
\| M_xf\|_p^p
&
= \int_{\mathbb{T}^N}
\sup_{S >0} \big| \sum_{\substack{\alpha \in \mathbb{Z}^N\\ <q,\alpha> \leq QS}} \hat{f}(\alpha) z^{A^{-1}A\alpha}\big|^p dz
\\&
= \int_{\mathbb{T}^N}
\sup_{S >0} \big| \sum_{\beta \in \{A\alpha \colon <q,\alpha> \leq QS\}} \hat{f}(A^{-1}\beta) z^\beta\big|^p dz\,.
\end{align*}
Now we obseve that
for every $S >0$
\begin{align*}\label{indecesHelson}
\{ A\alpha \colon & \text{$\alpha \in \mathbb{Z}^N$ and $<q,\alpha> \leq QS $} \}
=
\{ (\beta_1,\gamma) \in \mathbb{Z} \times\mathbb{Z}^{N-1} \colon \text{$\beta_1 \leq QS $ }\}\,,
\end{align*}
hence
\begin{align*}
\| M_xf\|_p^p
&
= \int_{\mathbb{T}^{N-1}}
\bigg(
\int_{\mathbb{T}}
\sup_{S >0} \big| \sum_{\substack{\beta_1 \in \mathbb{Z}\\ \beta_1 \leq QS}}
\Big[ \sum_{\gamma \in \mathbb{Z}^{N-1} }
\hat{f}(A^{-1}(\beta_1, \gamma)) z^\gamma
\Big] z_1^{\beta_1}\big|^p dz_1\bigg) dz\,.
\end{align*}
Finally, we deduce from the Carleson-Hunt maximal inequality in $L_p(\mathbb{T}^N)$, and another application of \eqref{monoHelson} and \eqref{integralHelson} that
\begin{align*}
\| M_xf\|_p^p
\le \int_{\mathbb{T}^{N-1}}
CH_{p}^{p}
\bigg(
\int_{\mathbb{T}}
\big| \sum_{\beta \in \mathbb{Z}^N}
&
\hat{f}(A^{-1}\beta) z^\beta
\big|^p dz_1\bigg) dz
\\&
=
CH_{p}^{p}
\int_{\mathbb{T}^{N}}
\big| \sum_{\alpha \in \mathbb{Z}^N}
\hat{f}(\alpha) z^\alpha
\big|^p dz\,,
\end{align*}
which is what we aimed for. Now let $\lambda$ be a frequency and $(G,\beta)$ be a $\lambda$-Dirichlet group. Fix $N$ and let $E_{N}:=\{\lambda_{1},\ldots \lambda_{N}\}$. Then by Lemma \ref{trickDuyHelson} there are $\mathbb{Q}$-linearly independent $B_{N}:=(b_{1},\ldots, b_{P_{N}})$ such that $E_{N}\subset \operatorname{span}_{\mathbb{Z}} (b_{1},\ldots, b_{P_{N}})$. Let $f=\sum_{n=1}^{N} a_{n}h_{\lambda_{n}}$ and define $g:=\sum c_{\alpha} z^{\alpha} \in L_{p}(\mathbb{T}^{\infty})$, where $c_{\alpha}:=a_{n}$, whenever $\lambda_{n}=\sum \alpha_{j}b_{j}$. Observe that $\mathbb{T}^{P_{N}}$ with mapping
$$\beta_{B_{N}}\colon \mathbb{ R} \to \mathbb{T}^{P_{N}}, ~~ t \mapsto (e^{-itb_{1}}, \ldots,e^{-itb_{P_{N}}})$$
forms a Dirichlet group. Then by \cite[Proposition 3.17]{DefantSchoolmann2} we have $\|f\|_{p}=\|g\|_{p}$. Moreover, for every Dirichlet group $(H,\beta_{H})$ we for all $f\in C(H)$ have
\begin{equation}\label{QQQHelson}
\int_{G} f~dm= \lim_{T\to \infty} \frac{1}{2T} \int_{-T}^{T} (f\circ\beta_{H})(t) dt,
\end{equation}
which is straight forward checked on polynomials and follows then by density.
Since $\omega \mapsto \sup_{N\le M} \left| \sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}(\omega) \right|$
is continuous, we obtain using (\ref{QQQHelson}) for $(G,\beta)$ and $(\mathbb{T}^{P_N},\beta_{B_{N}})$ and two times the monotone convergence theorem
\begin{align*}
&\bigg( \int_{G} \sup_{N} \big| \sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}(\omega) \big|^{p} dz \bigg)^{\frac{1}{p}}=\lim_{M\to \infty} \bigg( \int_{G} \sup_{N\le M} \big| \sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}(\omega) \big|^{p} dz \bigg)^{\frac{1}{p}} \\ &= \lim_{M\to \infty} \bigg(\lim_{T\to \infty} \frac{1}{2T} \int_{-T}^{T} \sup_{N\le M} \big| \sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}})e^{-\lambda_{n}it} \big|^{p} dt \bigg)^{\frac{1}{p}} \\ & =\lim_{M\to \infty} \bigg( \int_{\mathbb{T}^{\infty}} \sup_{N\le M} \big| \sum_{\alpha B \le N} \widehat{g}(\alpha) z^{\alpha} \big|^{p} dz\bigg)^{\frac{1}{p}}= \bigg( \int_{\mathbb{T}^{\infty}} \sup_{N} \big| \sum_{\alpha B \le N} \widehat{g}(\alpha) z^{\alpha} \big|^{p} dz \bigg)^{\frac{1}{p}} \\ &\le CH_{p}\|g\|_{p}=CH_{p}\|f\|_{p}. \qedhere
\end{align*}
\end{proof}
\begin{proof}[Proof of Theorem \ref{DirichletintegerHelson}]
Let $D\in \mathcal{H}_{p}(\lambda)$ and $f\in H_{p}^{\lambda}(G)$ with $\mathcal{B}(f)=D$. By Theorem \ref{CorointegerHelson} we know that
\begin{equation*} \label{fHelson}
\omega \mapsto \sup_{N} \big| \sum_{n=1}^{N} a_{n} h_{\lambda_{n}}(\omega) \big|^p \in L_{1}(G).
\end{equation*}
Then \eqref{besicoHelson} shows that the maximal inequality from \eqref{iiiiiHelson} implies the maximal inequality from \eqref{max1Helson}.
Finally, \eqref{point1Helson} is a consequence of \eqref{sakssHelson} and Remark~\ref{tranferHelson}.
\end{proof}
\section{\bf Helson's theorem under Landau's condition} \label{maximalineqsectionLCHelson}
It is almost obvious that Theorem~\ref{DirichletintegerHelson}, \eqref{max1Helson} and \eqref{point1Helson}
as well as their equivalent formulations Theorem~\ref{CorointegerHelson}, \eqref{iiiiiHelson} and \eqref{sakssHelson} of the preceding section fail in the non-reflexive case $p=1$.
Indeed, as described in \eqref{hardyTHelson} we have that $H_{1}(\mathbb{T}) = H_{1}^{(n)}(\mathbb{T})$, and it is well-known that the Carleson-Hunt theorem fails in $H_{1}(\mathbb{T})$.
But as we are going to show now, under Landau's condition $(LC)$ on the frequency $\lambda$ the Helson-type statement from
Theorem~\ref{DirichletintegerHelson}, \eqref{point2Helson} can be saved.
\begin{Theo} \label{Helson(LC)Helson}
Let $(G,\beta)$ be a $\lambda$-Dirichlet group for a frequency $\lambda$ with $(LC)$, and $D = \sum a_n e^{-\lambda_n s}\in \mathcal{H}_1(\lambda)$.
\begin{itemize}
\item[(i)]
Then for almost all $\omega \in G$ the vertical limits $D^\omega$ converge on $[Re >0]$.
\item[(ii)]
More precisely, there is a null set $N \subset G$ such that for every $\omega \notin N$
\[
D^\omega(u+it) = (f_\omega \ast P_u) (t) \,\,\,
\text{for every $u >0$ and almost all $t \in \mathbb{ R}$}\,,
\]
where $f \in H_1^\lambda(G)$ is the function associated to $D$ through Bohr's transform.
\end{itemize}
\end{Theo}
As in the preceding section our general setting combined with some of our preliminaries show that this result on general Dirichlet series in fact is equivalent to a
result on pointwise convergence of Fourier series in Hardy spaces on $\lambda$-Dirichlet groups.
\begin{Theo}\label{HelsonstheoHelson}
Let $(G,\beta)$ be a $\lambda$-Dirichlet group for a frequency $\lambda$ with $(LC)$.
\begin{itemize}
\item[(i)]
Then for every $u>0$ the sublinear operator
\begin{equation*} \label{operatorHHelson}
S_{max}^{u}(f)(\omega):=\sup_{N} \big|\sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}})e^{-u\lambda_{n}} h_{\lambda_{n}}(\omega)\big|
\end{equation*}
is bounded from $H_{1}^{\lambda}(G)$ to $L_{1,\infty}(G)$.
\item[(ii)]
Moreover, if $f \in H_{1}^\lambda(G)$, then there is a null set $N\subset G$ such that for every $\omega \notin N$ and every $u>0$ we have
\begin{equation*} \label{guertelHelson}
(f \ast p_{u})(\omega) = \sum_{n=1}^{\infty} \widehat{f}(h_{\lambda_{n}})e^{-u\lambda_{n}} h_{\lambda_{n}}(\omega).
\end{equation*}
\end{itemize}
\end{Theo}
Note that $S_{max}^{u}$ by Theorem~\ref{CorointegerHelson} without any restriction on $\lambda$ is bounded from $H_{p}^{\lambda}(G)$ to $L_{p}(G)$, whenever $1<p\le \infty$
(apply Theorem~\ref{CorointegerHelson} for $f \in H_{p}^{\lambda}(G)$ to $f \ast p_u$).
The proof of Theorem~\ref{HelsonstheoHelson} needs two lemmas, the first one of which in fact is crucial.
\begin{Lemm} \label{jojHelson} Let $\lambda$ be an arbitrary frequency. Then for any sequence $(k_{N})\subset ]0,1]$ the sublinear operator
$$T_{max}(f)(\omega):=\sup_{N} \Big(\big| \sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}(\omega)\big| k_{N} \Big(\frac{\lambda_{N+1}-\lambda_{N}}{\lambda_{N+1}}\Big)^{k_{N}}\Big)$$
is bounded from $H_{1}^{\lambda}(G)$ to $L_{1,\infty}(G)$ and from $H_{p}^{\lambda}(G)$ to $L_{p}(G)$, where $1<p\le \infty$.
\end{Lemm}
The proof reduces to boundedness properties of the following Hardy-Littlewood maximal type operator $\overline{M}$ introduced in \cite[Section 2.3]{DefantSchoolmann3}:
For $f\in L_{1}(G)$ and almost all $\omega \in G$ we define
\begin{equation*}
\overline{M}(f)(\omega):=\sup_{I\subset \mathbb{ R}} \frac{1}{|I|} \int_{I} |f_{\omega}(t)| dt,
\end{equation*}
where the supremum is taken over all intervals $I\subset \mathbb{ R}$.
Then, as shown in \cite[Theorem 2.10]{DefantSchoolmann3}, $\overline{M}$ is a sublinear bounded operator from $L_{1}(G)$ to $L_{1,\infty}(G)$, and from $L_{p}(G)$ to $L_{p}(G)$, whenever $1<p\le \infty$.
\begin{proof}[Proof of Lemma \ref{jojHelson}]
We recall from \cite[Section 1.3]{DefantSchoolmann3} the notion of Riesz means of some function $f\in H_{1}^{\lambda}(G)$. For $k>0$ and $x>0$ the polynomial
$$R_{x}^{\lambda,k}(f):=\sum_{\lambda_{n}<x} \widehat{f}(h_{\lambda_{n}})\bigg(1-\frac{\lambda_{n}}{x}\bigg)^{k} h_{\lambda_{n}}$$
is called the first $(\lambda,k)$-Riesz mean of $f$. Then, choosing $(k_{N})\subset ]0,1]$, from \cite[Lemma 3.5]{Schoolmann} we know that
\[
\big| \sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}(\omega)\big|\le 3 \bigg(\frac{\lambda_{N+1}}{\lambda_{N+1}-\lambda_{N}} \bigg)^{k_{N}} \sup_{0<x<\lambda_{N+1}} |R^{\lambda,k_{N}}_{x}(f)(\omega)|\,,
\]
and additionally from \cite[Proposition 3.2]{DefantSchoolmann3} that
\begin{equation*}
\sup_{x>0}|R^{\lambda,k_{N}}_{x}(f)(\omega)|\le CK_{N}^{-1}\overline{M}(f)(\omega),
\end{equation*}
where $C$ is an absolute constant. So together
\begin{equation} \label{bjHelson}
|T_{max}(f)(\omega)|\le 3C \overline{M}(f)(\omega),
\end{equation}
and, since $\overline{M}$ has the stated boundedness properties, the claim follows.
\end{proof}
The second lemma is a standard consequence of Abel summation.
\begin{Lemm} \label{abeleHelson}
For every $u>0$ there is a constant $C=C(u)$ such that for every choice of complex numbers $a_1, \ldots, a_N$
for all frequencies $\lambda=(\lambda_{n})$ and $\varepsilon>0$
\[
\big| \sum_{n=1}^{N} a_n e^{-(u+\varepsilon) \lambda_n} \big|
\leq
C(u)\sup_{n\le N}\big|e^{-\varepsilon \lambda_n}\sum_{n=1}^{N} a_n \big|\,.
\]
\end{Lemm}
\begin{proof}
Indeed, by Abel summation
\begin{align*}
&\big| \sum_{n=1}^{N} a_n e^{-(u+\varepsilon) \lambda_{n}} \big|\\ &=\big| e^{-(u+\varepsilon)\lambda_{N}}\sum_{n=1}^{N}a_n + \sum_{n=1}^{N-1} \bigg( \sum_{k=1}^{n} a_n \bigg)(e^{-(u+\varepsilon) \lambda_{n}}-e^{-(u+\varepsilon) \lambda_{n+1}})\big|\\ &\le \sup_{n\le N}\big|e^{-\varepsilon \lambda_{n}}\sum_{k=1}^{n} a_n \big| \bigg(e^{-u\lambda_{N}}+\sum_{n=1}^{N-1} e^{-u\lambda_{n}}-e^{-u\lambda_{n+1}}e^{-\varepsilon(\lambda_{n+1}-\lambda_{n})} \bigg) \\ &\le
\sup_{n\le N}\big|e^{-\varepsilon \lambda_{n}}\sum_{k=1}^{n} a_n \big| \bigg(e^{-u\lambda_{N}}+\sum_{n=1}^{N-1} e^{-\varepsilon \lambda_{n}}-e^{-u\lambda_{n+1}} \bigg)\\ &\le \sup_{n\le N}\big|e^{-\varepsilon \lambda_{n}}\sum_{k=1}^{n} a_n \big| \bigg(1+\frac{1}{u}\int_{0}^{\infty}e^{-ux}dx\bigg)\,\qedhere.
\end{align*}
\end{proof}
\begin{proof}[Proof of Theorem \ref{HelsonstheoHelson}]
For the proof of (i) note first that by $(LC)$ for every $u>0$ there is a constant $C(u,\lambda) >0$, such that for all $N$
$$\lambda_{N+1}-\lambda_{N} \ge C(u,\lambda) e^{-e^{u\lambda_{N}}}.$$
Hence with the choice $k_{N}:=e^{-u\lambda_{N}}$ we for all $N$ have
\begin{equation} \label{(A)Helson}
e^{-u\lambda_{N}}\le C_1(u,\lambda) k_{N} \bigg(\frac{\lambda_{N+1}-\lambda_{N}}{\lambda_{N}}\bigg)^{k_{N}} \,,
\end{equation}
and conclude from Lemma~\ref{abeleHelson} that
\begin{equation} \label{mittHelson}
S_{\text{max}}^u(f) (\omega)
\leq
C_{2}(u,\lambda) \sup_{N} \big|e^{-u\lambda_{N}} \sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}})h_{\lambda_{n}}(\omega)\big|\le C_{3}(u,\lambda) T_{max}(f)(\omega).
\end{equation}
Finally, the boundedness of $S_{\text{max}}^u: H_1^\lambda(G) \to L_{1,\infty}(G)$ is an immediate consequence of Lemma~\ref{jojHelson}.
To understand the second statement (ii) take $f \in H_1^\lambda(G)$ and $u >0$.
Then $p_u \ast f \in H_1^\lambda(G)$, and recall from \eqref{Fourier1Helson} that all non-zero Fourier coefficients
of this function have the form $\widehat{f}(h_{\lambda_{n}}) e^{-u \lambda_n}$.
Using a standard argument (see again \cite[Lemma 3.6]{DefantSchoolmann3} for a more general situation) gives that there is a null set $N\subset G$ such that on $G\setminus N$ we have
\[f*p_{u}=\sum_{n=1}^{\infty} \widehat{f}(h_{\lambda_{n}})h_{\lambda_{n}}.\]
To finish the proof of (ii)
we need to show that the dependence of $N$ on $u>0$ may be avoided: Recall first from \eqref{mittHelson} and \eqref{bjHelson}
that for every $u>0$ there is a constant $C(u,\lambda)>0$ which
for every $f\in H_{1}^{\lambda}(G)$ satisfies
satisfying
\[S_{max}^{u}(f)(\omega)\le C(u,\lambda) \overline{M}(f)(\omega) \,.\]
So fixing $u>0$ and $f\in H_{1}^{\lambda}(G)$, we for all $v>0$ obtain that for almost all $\omega$
$$S_{max}^{u+v}(f)(\omega)=S_{max}^{u}(f*p_{v})(\omega)\le C(u,\lambda)\overline{M}(f*p_{v})(\omega)\le C(u,\lambda) \overline{M}(f)(\omega)\,,$$
where the last estimate is taken from \cite[Proof of Proposition 3.7]{DefantSchoolmann3}.
So for all $u>0$ there is a constant $C_1(u,\lambda)>0$ such that
$$\big\| \sup_{\alpha\ge u} S_{max}^{\alpha}(f)(\pmb{\cdot}) \big\|_{1,\infty}\le C_1(u,\lambda) \|f\|_{1}
\,\,\,
\text{ and }
\,\,\,
\big\| \sup_{\alpha\ge u} |f \ast p_\alpha| \big\|_{1,\infty}\le \|f\|_{1}\,,
$$
where the first estimate is a consequence of the $L_{1}$-$L_{1,\infty}$-boundedness of $\overline{M}$ (see again \cite[Theorem 2.10]{DefantSchoolmann3}) and
the second inequality can be found in the proof of \cite[Proposition 2.4]{DefantSchoolmann3}. We conclude from \cite[Lemma 3.6]{DefantSchoolmann3} that for every $u$ there is a null set $N_{u}\subset G$ such that for all $\omega \notin G$
\begin{equation} \label{hansimglueckHelson}
\lim_{N \to \infty} \sup_{\alpha \ge u} \big| \sum_{n=1}^N \widehat{f}(h_{\lambda_n}) e^{-\alpha \lambda_n} h_{\lambda_n}(\omega) - (f\ast p_\alpha)(\omega) \big| =0.
\end{equation}
Now collecting all null sets $N_{1/n}, n \in \mathbb{ N},$ gives the conclusion.
\end{proof}
Now we check that the Helson-type Theorem~\ref{Helson(LC)Helson} is indeed a consequence of the above maximal inequality from Theorem~\ref{HelsonstheoHelson}.
\begin{proof}[Proof of Theorem~\ref{Helson(LC)Helson}]
Both statements (i) and (ii) follow immediately from (\ref{hansimglueckHelson}) and Remark~\ref{tranferHelson}.
Indeed, applying Remark \ref{tranferHelson} to (\ref{hansimglueckHelson}) we get that for every $u>0$ there is a null set $N_u\subset G$ such that, if $\omega \notin N_u$, then for almost every $t \in \mathbb{ R}$
\begin{equation*}
\lim_{N \to \infty} \sup_{\alpha \ge u} \big| \sum_{n=1}^N \widehat{f}(h_{\lambda_n})e^{-\alpha \lambda_n} h_{\lambda_n}(\omega)e^{-it\lambda_{n}} - (f\ast p_\alpha)(\omega\beta(t)) \big| =0.
\end{equation*}
Hence, again collecting all null sets $N_{1/n}, n \in \mathbb{ N},$ we obtain a null set $N$, such that for every $u>0$ and almost every $t\in \mathbb{ R}$
\[D^{\omega}(u+it)=(f*p_{u})(\omega\beta(t))=\int_{\mathbb{ R}} f_{\omega}(t-x) P_{u}(x) dx=f_{\omega}*P_{u}(t),\]
whenever $\omega \notin N$, and so the proof is finished.
\end{proof}
\begin{Rema}
Obviously, the preceding proof of Theorem~\ref{HelsonstheoHelson} works, if we instead of the condition $(LC)$ for $\lambda$ assume that for every $u>0$ there is a constant $C=C(u)\ge 1$ and sequence $(k_{N})\subset ]0,1]$ such that the estimate from \eqref{(A)Helson} holds for all $N$.
Taking the $k_{N}$th root condition \eqref{(A)Helson} is equivalent to: For every $u>0$ there is a constant $C=C(u)\ge 1$ and sequence $(k_{N})\subset ]0,1]$ such that for all $N$
\begin{equation*}
\lambda_{N} e^{-u\lambda_{N}k_{N}^{-1}}\bigg(\frac{1}{Ck_{N}}\bigg)^{k_{N}^{-1}}\le \lambda_{N+1}-\lambda_{N}.
\end{equation*}
But then an elementary calculation shows that this condition in fact implies $(LC)$.
\end{Rema}
\section{\bf Helson's theorem under Bohr's condition} \label{maximalineqsectionBCHelson}
We now study the results of the preceding section under the more restrictive condition $(BC)$ instead of $(LC)$ for the frequency $\lambda$.
We are going to show that under Bohr's condition $(BC)$ the operator $S_{max}^{u}$ from Theorem \ref{operatorHHelson} improves considerably in the sense that it maps $H_{1}^{\lambda}(G)$ to $L_{1}(G)$ and that its norm is uniformly bounded in $1 \leq p \leq \infty$.
\begin{Theo} \label{HelsonBCinternalHelson}
Let $(BC)$ hold for $\lambda$. Then to every $u>0$ there is a constant $C=C(\lambda,u)$ such that for all $1\le p < \infty$, all $\lambda$-Dirichlet groups $(G,\beta)$ and $D\in \mathcal{H}_{p}(\lambda)$ we for almost all $\omega \in G$ have
\begin{equation*}
\lim_{T\to \infty} \bigg(\frac{1}{2T} \int_{-T}^{T} \sup_{N} \big| \sum_{n=1}^{N} a_{n} h_{\lambda_{n}}(\omega) e^{-(u+it)\lambda_{n}} \big|^{p} dt \bigg)^{\frac{1}{p}} \le C\|D\|_{p}.
\end{equation*}
\end{Theo}
As before we deduce this from an appropriate maximal inequality of 'translated' Fourier series of functions in $H_{p}^{\lambda}(G)$.
\begin{Theo}\label{maximalineqBCHelson} Let $\lambda$ satisfy $(BC)$ and $(G,\beta)$ be a $\lambda$-Dirichlet group. Then for every $u>0$ there is $C=C(u, \lambda)>0$ such that for all $1\le p \le \infty$ and $f \in H_{p}^{\lambda}(G)$
\begin{equation*}
\Big\|\sup_{N} \big| \sum_{n=1}^{N}\widehat{f}(h_{\lambda_{n}})e^{-\lambda_{n}u} h_{\lambda_{n}} \big|
\Big\|_p \le C\|f\|_{p}.
\end{equation*}
\end{Theo}
Obviously, Theorem~\ref{maximalineqBCHelson} transfers to Theorem~\ref{HelsonBCinternalHelson} precisely as in the proof of Theorem~\ref{DirichletintegerHelson} (given at the end of Section~\ref{CarlesonsectionHelson}).
Let us, as in Corollary~\ref{OrdiAHelson}, apply Theorem \ref{maximalineqBCHelson} to $\lambda=(\log n)$ and the $\lambda$-Dirichlet group $(\mathbb{T}^{\infty},\beta_{\mathbb{T}^{\infty}})$.
\begin{Coro}\label{OrdiBHelson}
Let $f \in H_1(\mathbb{T}^\infty)$. Then for all $u >0$
\[
\lim_{N\to \infty} \sum_{\mathfrak{p}^\alpha \leq N} \widehat{f}(\alpha)\,\Big(\frac{z}{\mathfrak{p}^{u}}\Big)^\alpha=f*p_{u}(z) \,\,\,\,\, \text{almost everywhere on $\mathbb{T}^\infty$}\,,
\]
and moreover
\[
\int_{\mathbb{T}^\infty} \sup_N \big| \sum_{\mathfrak{p}^\alpha \leq N} \widehat{f}(\alpha)
\Big(\frac{z}{\mathfrak{p}^{u}}\Big)^\alpha \big| d z
\leq C \|f\|_p\,,
\]
where $C= C(u)$ only depends on $u$.
\end{Coro}
Our proof of Theorem \ref{maximalineqBCHelson}, which is inspired by Helson's proof of Theorem~\ref{HelsonstheoremHelson} from \cite{Helson}, seems to rely strongly on $(BC)$,
and it requires the following two main ingredients.
\begin{Prop} \label{continuityHelson} Let $1\le p <\infty$, $\varepsilon>0$ and $u>0$. Then the operator
\begin{equation*}
\Psi=\Psi(p,u,\varepsilon) \colon L_{p}(G)\hookrightarrow L_{p}(G,L_{1+\varepsilon}(\mathbb{ R})), ~~ f \mapsto \left[ \omega \mapsto \frac{f_{\omega}*P_{u}}{u+i\pmb{\cdot}} \right]
\end{equation*}
defines a bounded linear embedding with
\begin{equation} \label{normpsiHelson}
\|\Psi\|\le \int_{\mathbb{ R}} \bigg(\int_{\mathbb{ R}} \bigg( \frac{P_{u}(t-y)}{|u+it|} \bigg)^{1+\varepsilon} dt \bigg)^{\frac{1}{1+\varepsilon}} dy<\infty.
\end{equation}
In particular, if $f \in L_{1}(G)$, then $\frac{f_{\omega}*P_{u}}{u+i\pmb{\cdot}} \in L_{1+\varepsilon}(\mathbb{ R})$ for almost every $\omega \in G$.
\end{Prop}
So, provided $0<\varepsilon\le 1$, we may apply the Fourier transform $\mathcal{F}_{L_{1+\varepsilon}(\mathbb{ R})}$.
\begin{Prop} \label{perronHelson} Let $0<\varepsilon\le 1$ and $f \in H^{\lambda}_{1}(G)$. Then we for almost all $\omega \in G$ and for almost all $x \in \mathbb{ R}$ have
\begin{equation*}
\mathcal{F}_{L_{1+\varepsilon}(\mathbb{ R})}\bigg(\frac{f_{\omega}*P_{u}}{u+i\pmb{\cdot}}\bigg)(-x)=e^{-u|x|}\sum_{\lambda_{n}<x}\widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}(\omega).
\end{equation*}
\end{Prop}
Let us first show how to obtain Theorem \ref{maximalineqBCHelson} from the Propositions \ref{continuityHelson} and \ref{perronHelson}. As already mentioned our strategy is inspired by Helson's proof of Theorem \ref{HelsonstheoremHelson} from \cite{Helson}, which roughly speaking relies on Plancherel's theorem in $L_{2}(\mathbb{ R})$. Instead
following Helson's ideas we use the Hausdorff-Young inequality in $L_{1+\varepsilon}(\mathbb{ R})$.
\begin{proof}[Proof of Theorem \ref{maximalineqBCHelson}]Adding more entries to the frequency $\lambda$ we may assume that $\lambda_{n+1}-\lambda_{n}\le 1$ for all $n$ (as in the proof \cite[Theorem 4.2]{Schoolmann}). Since $\lambda$ satisfies $(BC)$, there is $l>0$ and $C=C(\lambda)$ such that $\lambda_{n+1}-\lambda_{n}\ge Ce^{-l\lambda_{n}}$ for all $n$. Let $f \in H_{p}^{\lambda}(G)$. Fix $0<\varepsilon \le 1$ and we choose $q$ such that $\frac{1}{1+\varepsilon}+\frac{1}{q}=1$. By Proposition \ref{continuityHelson} we know that $\frac{P_{u}*f_{\omega}}{u+i\pmb{\cdot}} \in L_{1+\varepsilon}(\mathbb{ R})$ for almost all $\omega \in G$. For notational convenience let us define
$$S(f_{\omega})(x)=\sum_{\lambda_{n}<x} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}(\omega).$$
Then, Proposition \ref{perronHelson} and the Hausdorff-Young inequality imply
\begin{align*}
\infty &>\left\|\frac{P_{u}*f_{\omega}}{u+i\pmb{\cdot}}\right\|_{1+\varepsilon}^{q} \ge \int_{0}^{\infty} |e^{-ut} S(f_{\omega})(t)|^{q} dt=\sum_{n=1}^{\infty} |S(f_{\omega})(\lambda_{n+1})|^{q} \int_{\lambda_{n}}^{\lambda_{n+1}} e^{-uqt} dt \\ &\ge \sum_{n=1}^{\infty} |S(f_{\omega})(\lambda_{n+1})|^{q}(\lambda_{n+1}-\lambda_{n})e^{-uq\lambda_{n+1}} \ge\sum_{n=1}^{\infty} |S(f_{\omega})(\lambda_{n+1})|^{q} C e^{-l\lambda_{n}}e^{-uq(\lambda_{n}+1)} \\&=Ce^{-uq} \sum_{n=1}^{\infty} |S(f_{\omega})(\lambda_{n+1})|^{q}e^{\lambda_{n}(-uq+l)} \ge Ce^{-uq} \sup_{N} |S(f_{\omega})(\lambda_{N+1})|^{q}e^{-\lambda_{N}(uq+l)}\\ &= Ce^{-uq}\sup_{N} \big(|S(f_{\omega})(\lambda_{N+1})| e^{-\lambda_{N}\big(u+\frac{l}{q}\big)} \big)^{q}.
\end{align*}
Hence
$$C^{\frac{1}{q}} e^{-u} \sup_{N} |S(f_{\omega})(\lambda_{N+1})| e^{-\lambda_{N}\big(u+\frac{l}{q}\big)}\le \left\|\frac{P_{u}*f_{\omega}}{u+i\pmb{\cdot}}\right\|_{1+\varepsilon}$$
and therefore with the mapping $\Psi$ from Proposition \ref{continuityHelson}
\begin{align*}
\bigg( \int_{G} \sup_{N } \left| \frac{S(f_{\omega})(\lambda_{N+1})}{e^{\lambda_{N}\big(u+\frac{l}{q}\big)}} \right|^{p} dm(\omega) \bigg)^{\frac{1}{p}} &\le C^{-\frac{1}{q}}e^{u}\bigg( \int_{G} \left\|\frac{P_{u}*f_{\omega}}{u+i\pmb{\cdot}}\right\|_{1+\varepsilon}^{p} dm(\omega) \bigg)^{\frac{1}{p}}\\ &\le C_{1}(u, \lambda)\|f\|_{p} \|\Psi(p,u,\varepsilon)\|.
\end{align*}
Now choosing $\varepsilon$ small enough, such that $l\le q u$, we obtain
with (\ref{normpsiHelson}) from Proposition \ref{continuityHelson}
\begin{equation}\label{rasenHelson}
\bigg( \int_{G} \sup_{N} \left| \frac{S(f_{\omega})(\lambda_{N+1})}{e^{2u\lambda_{N}}} \right|^{p} dm(\omega) \bigg)^{\frac{1}{p}} \le C_{2}(u,\lambda) \|f\|_{p}.
\end{equation}
which together with Lemma \ref{abeleHelson} proves the claim in the range $1\le p <\infty$. Now tending $p$ to $+\infty$ gives the full claim.
\end{proof}
\subsection{Proof of Proposition \ref{continuityHelson}}
The technical part of the proof of Proposition \ref{continuityHelson} is to show that for every $\varepsilon,u>0$
\begin{equation} \label{monsterHelson}
\int_{\mathbb{ R}} \bigg(\int_{\mathbb{ R}} \bigg( \frac{P_{u}(t-y)}{|u+it|} \bigg)^{1+\varepsilon} dt \bigg)^{\frac{1}{1+\varepsilon}} dy<\infty.
\end{equation}
Observe that, if $\varepsilon=0$, then by Fubini's theorem for every $u>0$ this integral is infinity. Since $\|P_{u}\|_{1}=1$ and $\|P_{u}\|_{\infty}=\frac{1}{u}$ by Lyapunov's inequality (see e.g. \cite[Lemma II.4.1, p. 72]{Werner}) we obtain $\|P_{u}\|_{1+\varepsilon} \le \big(\frac{1}{u}\big)^{\frac{\varepsilon}{1+\varepsilon}}$ and so for all $y \in \mathbb{ R}$
\begin{equation} \label{trivialboundHelson}
\bigg(\int_{\mathbb{ R}} \bigg( \frac{P_{u}(t-y)}{|u+it|} \bigg)^{1+\varepsilon} dt \bigg)^{\frac{1}{1+\varepsilon}} \le \frac{1}{u} \|P_{u}\|_{1+\varepsilon}\le \frac{1}{u} \bigg(\frac{1}{u}\bigg)^{\frac{\varepsilon}{1+\varepsilon}}=\bigg(\frac{1}{u}\bigg)^{1+\frac{\varepsilon}{1+\varepsilon}}.
\end{equation}
Hence the interior integral of (\ref{monsterHelson}) is defined and in order to verify finiteness of (\ref{monsterHelson}) we claim that the interior integral is sufficiently decreasing considered as a function in $y$.
\begin{Lemm} \label{hardHelson} Let $\varepsilon, u>0$. Then we for all $|y|>4u$ have
\begin{equation} \label{uglycalculationHelson}
\bigg(\int_{\mathbb{ R}} \bigg( \frac{P_{u}(t-y)}{|u+it|} \bigg)^{1+\varepsilon} dt\bigg)^{\frac{1}{1+\varepsilon}} \le 4|y|^{-\big(1+\frac{\varepsilon}{1+\varepsilon}\big)}.
\end{equation}
In particular,
\begin{equation}
\int_{\mathbb{ R}} \bigg(\int_{\mathbb{ R}} \bigg( \frac{P_{u}(t-y)}{|u+it|} \bigg)^{1+\varepsilon} dt \bigg)^{\frac{1}{1+\varepsilon}} dy\le 8 \bigg(\frac{1+\varepsilon}{\varepsilon}\bigg) \bigg(\frac{1}{u}\bigg)^{\frac{\varepsilon}{1+\varepsilon}}.
\end{equation}
\end{Lemm}
\begin{proof} Since $|u|+|t|\le 2 |u+it|$, we have
\begin{equation} \label{buchisdaHelson}
\frac{P_{u}(t-y)}{|u+it|}\le 2\frac{P_{u}(t-y)}{u+|t|}.
\end{equation}
Then fixing $y$ we now estimate separately the integrals
$$ (a): ~~\bigg(\int_{0}^{\infty} \bigg( \frac{P_{u}(t-y)}{u+t} \bigg)^{1+\varepsilon} dt \bigg)^{\frac{1}{1+\varepsilon}} ~ \text{and }~ (b):~~\bigg(\int_{-\infty}^{0} \bigg( \frac{P_{u}(t-y)}{u-y} \bigg)^{1+\varepsilon} dt \bigg)^{\frac{1}{1+\varepsilon}}.$$
Since
$$\int_{-\infty}^{0} \bigg(\frac{P_{u}(t-y)}{u-t}\bigg)^{1+\varepsilon} dt=\int_{0}^{\infty}\bigg(\frac{P_{u}(t+y)}{u+t}\bigg)^{1+\varepsilon} dt=\int_{0}^{\infty}\bigg(\frac{P_{u}(t-(-y))}{u+t}\bigg)^{1+\varepsilon} dt,$$
we see that it suffices to controll integral $(a)$ for $y>0$ and $y<0$. Part I deals with positive $y$ and Part II with negative $y$ in $(a)$.
\textbf{Part I:} Let $y>4u$. Applying the substitution $x(t)=-y+\frac{1}{t}$ we obtain
\begin{align*}
&\int_{0}^{\infty} \bigg( \frac{u}{(u^{2}+(x-y)^{2})(u+x)} \bigg)^{1+\varepsilon}~ dx\\ &=\int_{0}^{\frac{1}{y}} \bigg( \frac{u}{(u^{2}+(2y-\frac{1}{t})^{2})(u+\frac{1}{t}-y)} \bigg)^{1+\varepsilon} \frac{dt}{t^{2}}\\ &=
\int_{0}^{\frac{1}{y}} |t|^{2\varepsilon} \bigg( \frac{u}{((tu)^{2}+(2yt-1)^{2})(u+\frac{1}{t}-y)} \bigg)^{1+\varepsilon} dt\\ &\le \frac{1}{|y|^{2\varepsilon}}\int_{0}^{\frac{1}{y}}\bigg( \frac{u}{((tu)^{2}+(2yt-1)^{2})(u+\frac{1}{t}-y)} \bigg)^{1+\varepsilon} dt.
\end{align*}
Now we consider the function
$$g(t):=\frac{u}{((tu)^{2}+(2yt-1)^{2})(u+\frac{1}{t}-y)}\,,$$
and we claim that $g$ is strictly increasing
on $[0,\frac{1}{y}]$ provided $y>4u$. So then
$$\sup_{t \in [0,\frac{1}{y}]}g(t)=g(y^{-1})=\frac{1}{(\frac{u}{y})^{2}+1}\le 1\,,$$
and hence
\begin{equation} \label{beast1Helson}
\bigg(\int_{0}^{\infty} \bigg( \frac{P_{u}(t-y)}{u+t} \bigg)^{1+\varepsilon} dt \bigg)^{\frac{1}{1+\varepsilon}}\le y^{-\big(\frac{1+2\varepsilon}{1+\varepsilon}\big)}.
\end{equation}
Note that $g$ is not differentiable at $t=\frac{1}{y-u}$. But $g$ is differentiable on $[0, \frac{1}{y}]$, since $\frac{1}{y-u}>\frac{1}{y}$ for $y>u$. We calculate
$$g^{\prime}(t)=\frac{u(-2t^{3}(u-y)(u^{2}+4y^{2})-t^{2}(u^{2}-4uy+8y^{2})+1)}{(t(u-y)+1)^{2}(t^{2}(u^{2}+4y^{2})-4ty+1)^{2}}\,,$$
and show that $g^{\prime}$ is positive. Therefore we only have to focus on the polynomial
$$p(t):=-2t^{3}(u-y)(u^{2}+4y^{2})-t^{2}(u^{2}-4uy+8y^{2})+1.$$
with derivative
\begin{align*}
p^{\prime}(t)&=-6t^{2}(u-y)(u^{2}+4y^{2})-2t(u^{2}-4uy+8y^{2})\\ &=2t(-3t(u-y)(u^{2}+4y^{2})-2(u^{2}-4uy+8y^{2}))),
\end{align*}
which vanishes in $t=0$ and (assuming $y>u$) in
$$t_{0}:=\frac{2(u^{2}-4uy+8y^{2})}{3(y-u)(u^{2}+4y^{2})}.$$
We have $p(0)=1$ and, since $y>4u$, $$p\bigg(\frac{1}{y}\bigg)=\bigg(\frac{u}{y}\bigg)^{2}-2\bigg(\frac{u}{y}\bigg)^{3}-4\bigg(\frac{u}{y}\bigg)+1>0.$$
Moreover $t_{0}>\frac{1}{y}$, and assuming $y>4u$ we have
\begin{align*}
yt_{0}=\frac{2}{3} \frac{8y^{3}-yu(4y-u)}{(y-u)(u^{2}+4y^{2})} \ge \frac{2}{3} \frac{8y^{3}-(y \frac{y}{4}(4y))}{y\big( \big(\frac{y}{4}\big)^{2}+4y^{2} \big)}= \frac{2}{3} \frac{7}{4+\frac{1}{4}}>1.
\end{align*}
Let us summarize that $p$ is positive on the boundary and has no extremal point in the interior, which implies that $p$ is positive on $[0,\frac{1}{y}]$. Hence $g$ is strictly increasing.
\textbf{Part II}:
Now let $y<-4u$. Applying the substitution $x(t)=y+\frac{1}{t}$ we obtain
\begin{align*}
&\int_{0}^{\infty} \bigg( \frac{u}{(u^{2}+(x-y)^{2})(u+x)} \bigg)^{1+\varepsilon} ~dx =\int_{0}^{-\frac{1}{y}} \bigg( \frac{u}{(u^{2}+(\frac{1}{t})^{2})(u+\frac{1}{t}+y)} \bigg)^{1+\varepsilon} \frac{dt}{t^{2}}\\ &=
\int_{0}^{\frac{1}{|y|}} t^{2\varepsilon} \bigg( \frac{u}{((tu)^{2}+1)(u+\frac{1}{t}+y)} \bigg)^{1+\varepsilon} dt\\ &\le \frac{1}{|y|^{2\varepsilon}}\int_{0}^{\frac{1}{|y|}}\bigg( \frac{u}{((tu)^{2}+1)(u+\frac{1}{t}+y)} \bigg)^{1+\varepsilon} dt.
\end{align*}
We follow the same strategy as before and consider
$$h(t):=\frac{u}{((tu)^{2}+1)(u+y+\frac{1}{t})}.$$
Note that $h$ is differentiable on $[0,\frac{1}{|y|}]$. We calculate
$$h^{\prime}(t)=\frac{-u(t^{3}2u^{2}(u+y)+t^{2}u^{2}-1)}{((tu)^{2}+1)^{2}(t(u+y)+1)^{2}}\,,$$
and claim that $h$ is increasing on $[0,\frac{1}{|y|}]$. Therefore consider $$p(t)=t^{3}2u^{2}(u+y)+t^{2}u^{2}-1$$
with derivative
$$p^{\prime}(t)=6t^{2}u^{2}(u+y)+2tu^{2}=t2u^{2}(3(u+y)t+1),$$
which vanishes in $t=0$ and in $t_{0}=\frac{-1}{3(u+y)}.$ Note that $t_{0} \in [0,\frac{1}{|y|}]$, whenever $y<-4u$. We have $p(0)=-1$ and $p(\frac{-1}{y})<0$, since
$$p\bigg(\frac{-1}{y}\bigg)=\bigg(\frac{u}{y}\bigg)^{2} \bigg(1-\frac{2(u+y)}{y}\bigg)-1<0,$$
provided $-y>2u$.
Moreover,
$$p(t_{0})=\bigg(\frac{u}{u+y}\bigg)^{2}\bigg(\frac{-2}{27}+\frac{1}{9}\bigg)-1=\frac{1}{27}\bigg(\frac{u}{u+y}\bigg)^{2}-1<0\,,$$
whenever $\bigg(\frac{u}{u+y}\bigg)^{2}\le 27$. But this holds true assuming $y<-4u$, since
$$\bigg(\frac{u}{u+y}\bigg)^{2}\le \bigg(\frac{y}{4}\bigg)^{2} \frac{1}{(-y-(\frac{y}{4}))^{2}}= \frac{1}{9}.$$
Let us summarize, that $p$ is negative on the boundary of $[0,\frac{1}{|y|}]$ and has a maximum in $t_{0}$ with $p(t_{0})<0$. Hence $p$ is negative on $[0,\frac{1}{|y|}]$, and consequently $h$ is strictly increasing on $[0,\frac{1}{|y|}]$. So we for $y<-4u$ have
\begin{equation} \label{beast2Helson}
\int_{0}^{\infty} \bigg( \frac{u}{(u^{2}+(x-y)^{2})(u+x)} \bigg)^{1+\varepsilon} ~dx \le |y|^{-2\varepsilon} \int_{0}^{\frac{1}{|y|}}\frac{1}{\frac{u}{|y|}+1} dt\le |y|^{-(1+2\varepsilon)}.
\end{equation}
Hence (\ref{buchisdaHelson}), (\ref{beast1Helson}) and (\ref{beast2Helson}) imply (\ref{uglycalculationHelson}). Moreover with (\ref{uglycalculationHelson}) and (\ref{trivialboundHelson}) we conclude
\begin{align*}
&\int_{\mathbb{ R}} \bigg(\int_{\mathbb{ R}} \bigg( \frac{P_{u}(t-y)}{|u+it|} \bigg)^{1+\varepsilon} dt \bigg)^{\frac{1}{1+\varepsilon}} dy\\ &=\int_{|y|\le 4u}\bigg(\int_{\mathbb{ R}} \bigg( \frac{P_{u}(t-y)}{|u+it|} \bigg)^{1+\varepsilon} dt\bigg)^{\frac{1}{1+\varepsilon}}dy + \int_{|y|>4u} \bigg(\int_{\mathbb{ R}} \bigg( \frac{P_{u}(t-y)}{|u+it|} \bigg)^{1+\varepsilon} dt\bigg)^{\frac{1}{1+\varepsilon}} dy \\ &\le
4u\bigg(\frac{1}{u}\bigg)^{1+\frac{\varepsilon}{1+\varepsilon}}+ 4\int_{|y|>4u} |y|^{-\frac{1+2\varepsilon}{1+\varepsilon}} dy=4 \bigg(\frac{1}{u}\bigg)^{\frac{\varepsilon}{1+\varepsilon}}+8\frac{1+\varepsilon}{\varepsilon} \bigg(\frac{1}{4u}\bigg)^{\frac{\varepsilon}{1+\varepsilon}},
\end{align*}
which completes the proof.
\end{proof}
\begin{proof}[Proof of Proposition \ref{continuityHelson}]
Let us for simplicity write
\begin{equation*}
h(y):=\bigg(\int_{\mathbb{ R}} \bigg( \frac{P_{u}(t-y)}{|u+it|} \bigg)^{1+\varepsilon} dt \bigg)^{\frac{1}{1+\varepsilon}}.
\end{equation*}
Then applying two times Minkowski's inequality we obtain
\begin{align*}
&\bigg(\int_{G} \left\|\frac{f_{\omega}*P_{u}}{u+i\pmb{\cdot}} \right\|_{1+\varepsilon}^{p} d\omega \bigg)^{\frac{1}{p}}=\bigg( \int_{G}\bigg( \int_{\mathbb{ R}} \left| \frac{(f_{\omega}*P_{u})(t)}{u+it} \right|^{1+\varepsilon} dt \bigg)^{\frac{p}{1+\varepsilon}} d\omega \bigg)^{\frac{1}{p}}\\ &= \bigg( \int_{G} \bigg( \int_{\mathbb{ R}} \big| \int_{\mathbb{ R}} f_{\omega}(y)\frac{P_{u}(t-y)}{u+it} dy \big|^{1+\varepsilon} dt \bigg)^{\frac{p}{1+\varepsilon}} d\omega \bigg)^{\frac{1}{p}} \\ &\le \bigg(\int_{G} \bigg( \int_{\mathbb{ R}} \bigg( \int_{\mathbb{ R}} |f_{\omega}(y)|^{1+\varepsilon} \left| \frac{P_{u}(t-y)}{u+it} \right|^{1+\varepsilon} dt \bigg)^{\frac{1}{1+\varepsilon}} dy \bigg)^{p} d\omega \bigg)^{\frac{1}{p}}\\ &=\bigg( \int_{G} \bigg( \int_{\mathbb{ R}} |f_{\omega}(y)| h(y) dy \bigg)^{p} d\omega \bigg)^{\frac{1}{p}} \le \int_{\mathbb{ R}} \bigg( \int_{G} |f_{\omega}(y)|^{p} h(y)^{p} d\omega \bigg)^{\frac{1}{p}} dy \\ &\le \|f\|_{p} \int_{\mathbb{ R}} h(y) dy= \|f\|_{p}\int_{\mathbb{ R}}\bigg(\int_{\mathbb{ R}} \bigg( \frac{P_{u}(t-y)}{|u+it|} \bigg)^{1+\varepsilon} dt \bigg)^{\frac{1}{1+\varepsilon}}dy,
\end{align*}
where the latter integral is finite by Lemma \ref{hardHelson}. Hence $\Psi$ is bounded and defined. To prove injectivity we calculate the Fourier coefficients of $\Psi(f)$. Let first $f=\sum_{n=1}^{N}a_{n} h_{x_{n}}$. Then for all $x\in \mathbb{ R}$ and all $t \in \mathbb{ R}$
\begin{align*}
\widehat{\Psi(f)}(h_{x})(t)&=\bigg(\int_{G} \Psi(f)(\omega) \overline{h_{x}(\omega)} d\omega\bigg)(t)=\int_{G} \Psi(f)(\omega)(t) \overline{h_{x}(\omega)} d\omega\\ &=\int_{G} \frac{f_{\omega}*P_{u}(t)}{u+it} \overline{h_{x}(\omega)} d\omega= \frac{1}{u+it}\int_{\mathbb{ R}} P_{u}(y) \int_{G} f(\omega\beta(t-y)) \overline{h_{x}(\omega)} d\omega dy \\ &= \frac{1}{u+it}e^{-ixt} \int_{\mathbb{ R}} P_{u}(y)e^{iyx} dy \int_{G} f(\eta) \overline{h_{x}(\eta)} d\eta=\frac{1}{u+it}e^{-u|x|}e^{-ixt} \widehat{f}(h_{x}).
\end{align*}
Now by density of polynomials and continuity of $\Psi$ we for all $f\in L_{1}(G)$ obtain
\begin{equation*}
\widehat{\Psi(f)}(h_{x})(t)=\frac{1}{u+it}e^{-u|x|}e^{-ixt} \widehat{f}(h_{x}).
\end{equation*}
Hence, assuming $\Psi(f)=0$, we have $\widehat{\Psi(f)}(h_{x})=0$ and so $\widehat{f}(h_{x})=0$ for all $x$, which implies $f=0$.
\end{proof}
\subsection{Proof of Proposition \ref{perronHelson}}
To finish the proof of Theorem \ref{maximalineqBCHelson} it remains to calculate the Fourier transform $\mathcal{F}_{L_{1+\varepsilon}(\mathbb{ R})}$ of $\frac{f_{\omega}*P_{u}}{u+i\pmb{\cdot}}$. Observe that this function may fail to be in $L_{1}(\mathbb{ R})$. For instance, if $f=h_{0}$, then $\|\frac{f_{\omega}*P_{u}}{u+i\pmb{\cdot}}\|_{1}=\int_{\mathbb{ R}} \frac{1}{|u+it|} dt=\infty$. Our strategy is to calculate for $k>0$ the Fourier transform of $\frac{f_{\omega}*P_{u}}{(u+i\pmb{\cdot})^{1+k}}$ (which belongs to $L_{1}(\mathbb{ R})$) and then we tend $k$ to zero to obtain Proposition \ref{perronHelson}. First we consider polynomials.
\begin{Lemm}\label{kpositiveHelson} Let $g=\sum_{n=1}^{N}a_{n}e^{-i\lambda_{n} \pmb{\cdot}}$ and $k>0$. Then for all $x\in \mathbb{ R}$
\begin{equation}
\frac{\Gamma(k+1)}{2\pi} \mathcal{F}_{L_{1}(\mathbb{ R})}\bigg(\frac{g*P_{u}}{(u+i\pmb{\cdot})^{1+k}}\bigg)(-x)=e^{-u|x|} \sum_{\lambda_{n}<x} a_{n}(x-\lambda_{n})^{k},
\end{equation}
where $\Gamma$ denotes the Gamma function.
\end{Lemm}
\begin{proof} From \cite[Lemma 10, p. 50]{HardyRiesz} we have that
for all $\alpha>0$ and $k>0$
\begin{equation}\label{geniusHelson}
\frac{\Gamma(k+1)}{2\pi i}\int_{\alpha-i\infty}^{\alpha+i\infty} \frac{e^{ys}}{s^{1+k}} ds = \begin{cases} y^{k}&, \text{if } y\ge 0,\\ 0 &, \text{if } y<0, \end{cases}
\end{equation}
By linearity it suffices to prove the claim for $g(t)=e^{-\lambda_{n}it}$ for some $n$. Then $g*P_{u}(t)=e^{-(u+it)\lambda_{n}}$ and we obtain
\begin{align*}
&\frac{\Gamma(k+1)}{2\pi} \mathcal{F}_{L_{1}(\mathbb{ R})}\bigg(\frac{g*P_{u}}{(u+i\pmb{\cdot})^{1+k}}\bigg)(-x)=\frac{\Gamma(k+1)}{2\pi} \int_{\mathbb{ R}} \frac{e^{-(u+it)\lambda_{n}}}{(u+it)^{1+k}} e^{xit} dt \\ &=\frac{\Gamma(k+1)}{2\pi} e^{-xu} \int_{\mathbb{ R}} \frac{e^{(x-\lambda_{n})(u+it)}}{(u+it)^{1+k}} dt= \frac{\Gamma(k+1)}{2\pi i} \int_{u-i\infty}^{u+\infty}\frac{e^{(x-\lambda_{n})s}}{s^{1+k}} ds,
\end{align*}
which by (\ref{geniusHelson}) with $\alpha=u$ equals $(x-\lambda_{n})^{k}$, whenever
$x>\lambda_{n}$, and else vanishes.
\end{proof}
\begin{Lemm} \label{transformpolyHelson} Let $g=\sum_{n=1}^{N}a_{n}e^{-i\lambda_{n} \pmb{\cdot}}$ and $0<\varepsilon\le 1$. Then for almost every $x\in \mathbb{ R}$
$$\mathcal{F}_{L_{1+\varepsilon}(\mathbb{ R})}\bigg(\frac{g*P_{u}}{u+i\pmb{\cdot}}\bigg)(-x)=e^{-u|x|} \sum_{\lambda_{n}<x} a_{n}.$$
\end{Lemm}
\begin{proof}
Observe that $\frac{g*P_{u}}{u+i\pmb{\cdot}} \in L_{1+\varepsilon}(\mathbb{ R})$ and $\frac{g*P_{u}}{(u+i\pmb{\cdot})^{1+k}} \in L_{p}(\mathbb{ R})$ for all $k>0$ and $p\ge 1$. The dominated convergence theorem implies $\lim_{k\to 0}\frac{g*P_{u}}{(u+i\pmb{\cdot})^{1+k}}=\frac{g*P_{u}}{u+i\pmb{\cdot}}$ in $L_{1+\varepsilon}(\mathbb{ R})$.
Now by continuity of the Fourier transform and Lemma \ref{kpositiveHelson}
\begin{align*}
&\mathcal{F}_{L_{1+\varepsilon}(\mathbb{ R})}\bigg(\frac{g*P_{u}}{u+i\pmb{\cdot}}\bigg)=\lim_{k\to \infty}\mathcal{F}_{L_{1}(\mathbb{ R})}\bigg(\frac{g*P_{u}}{(u+i\pmb{\cdot})^{1+k}}\bigg)=\lim_{k\to \infty}\mathcal{F}_{L_{1}(\mathbb{ R})}\bigg(\frac{g*P_{u}}{(u+i\pmb{\cdot})^{1+k}}\bigg)\\ &= C(k)\lim_{k\to 0} e^{-u|\pmb{\cdot}|} \sum_{\lambda_{n}<\pmb{\cdot}} a_{n}(\pmb{\cdot}-\lambda_{n})^{k}=C(k) e^{-u|\pmb{\cdot}|} \sum_{\lambda_{n}<\pmb{\cdot}} a_{n},
\end{align*}
with $C(k)=\frac{2\pi}{\Gamma(k+1)}$ and convergence in $L_{q}(\mathbb{ R})$, where $\frac{1}{1+\varepsilon}+\frac{1}{q}=1$.
\end{proof}
\begin{proof}[Proof of Proposition \ref{perronHelson}]
Let $(P^{n})$ be a sequence of polynomials from $H_{1}^{\lambda}(G)$ converging to $f$ (see \cite[Proposition 3.14]{DefantSchoolmann2}). Then $\lim_{n\to \infty} \Psi(P^{n})=\Psi(f)$ by Proposition \ref{continuityHelson} and so there is a subsequence $(n_{k})$ such that $\lim_{k\to \infty} \frac{P^{n_{k}}_{\omega}*P_{u}}{u+i\pmb{\cdot}}=\frac{f_{\omega}*P_{u}}{u+i\pmb{\cdot}}$ in $L_{1+\varepsilon}(\mathbb{ R})$ for almost all $\omega \in G$. Hence by continuity of the Fourier transform and Lemma \ref{transformpolyHelson}
\begin{align*}
&\mathcal{F}_{L_{1+\varepsilon}(\mathbb{ R})}\bigg(\frac{f_{\omega}*P_{u}}{u+i\pmb{\cdot}}\bigg)=\lim_{k\to \infty} \mathcal{F}_{L_{1+\varepsilon}(\mathbb{ R})}\bigg(\frac{P^{n_{k}}_{\omega}*P_{u}}{u+i\pmb{\cdot}}\bigg)\\ & =\lim_{k\to \infty} e^{-u|\pmb{\cdot}|} \sum_{\lambda_{n}<\pmb{\cdot}} \widehat{P^{n_{k}}}(h_{\lambda_{n}}) h_{\lambda_{n}}(\omega)=e^{-u|\pmb{\cdot}|} \sum_{\lambda_{n}<\pmb{\cdot}} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}(\omega). \qedhere
\end{align*}
\end{proof}
\section{\bf Applications} \label{bohrstheoremsectionHelson}
In this final section we give several applications of the results of the preceding sections.
\subsection{Bohr's theorem and its equivalent formulations}
Suppose that $D=\sum a_{n}e^{-\lambda_{n}s}$ converges somewhere and that its limit function extends to a bounded and holomorphic function $f$ on $[Re>0]$. Then a prominent problem from the beginning of the 20th century was to determine the class of $\lambda$'s for which under this assumption all $\lambda$-Dirichlet series converge uniformly on $[Re>\varepsilon]$ for every $\varepsilon>0$.
{\noindent \bf Bohr's theorem.}
We say that $\lambda$ satisfies 'Bohr's theorem' if the answer to the preceding problem is affirmative, and Bohr indeed proves in \cite{Bohr} that all frequencies with his property $(BC)$ belong to this class.
We denote by $\mathcal{D}^{ext}_{\infty}(\lambda)$ the space of all somewhere convergent $D\in \mathcal{D}(\lambda)$ which have a limit function extending to a bounded and holomorphic functions $f$ on $[Re>0]$.
It is then immediate that $\lambda$ satisfies Bohr's theorem if and only if every $D \in \mathcal{D}^{ext}_{\infty}(\lambda)$ converges uniformly on $[Re>\varepsilon]$ for every $\varepsilon>0$.
As proven in \cite[Corollary 3.9]{Schoolmann}, the linear space $\mathcal{D}^{ext}_{\infty}(\lambda)$
together with $\|D\|_{\infty}=\sup_{[Re>0]}|f(s)|$ forms a normed space. The isometric subspace of all $D\in \mathcal{D}^{ext}_{\infty}(\lambda)$, which converge on $[Re>0]$, is denoted by $\mathcal{D}_{\infty}(\lambda)$.
Note that $\mathcal{D}_{\infty}(\lambda)=\mathcal{D}_{\infty}^{ext}(\lambda)$, whenever Bohr's theorem holds for $\lambda$.
Later in \cite{Landau} Landau improves Bohr's result showing that the weaker condition $(LC)$ is sufficient for Bohr's theorem.
More generally, we know from \cite[Remark 4.8.]{Schoolmann} that Bohr's theorem holds for $\lambda$ in each of the following 'testable' cases:
\begin{itemize}
\item
$\lambda$ is $\mathbb{Q}$-linearly independent,
\item
$L(\lambda):=\limsup_{n\to \infty} \frac{\log n}{\lambda_{n}}=0$,
\item
$\lambda$ fulfills (LC) (and in particular, if it fulfills (BC)).
\end{itemize}
In particular, the frequency $\lambda =(\log n)$ satisfies Bohr's theorem which constitutes one of the fundamental tools within the theory of ordinary Dirichlet series $\sum a_{n}n^{-s}$ (see e.g. \cite[Theorem 1.13, p. 21]{Defant} or \cite[Theorem 6.2.2., p. 143]{QQ}).
{\noindent \bf Completeness.}
In general, $\mathcal{D}_{\infty}(\lambda)$ as well as $\mathcal{D}^{ext}_{\infty}(\lambda)$ may fail to be complete. See \cite[Theorem 5.2]{Schoolmann} for generic example of such $\lambda$'s.
Let us recall \cite[Theorem 5.1]{Schoolmann}, where we prove that $\mathcal{D}_{\infty}(\lambda)$ (and consequently also $\mathcal{D}^{ext}_{\infty}(\lambda)$, see Theorem~\ref{equivalenceHelson}) is complete under each of the following concrete conditions:
\begin{itemize}
\item
$\lambda$ is $\mathbb{Q}$-linearly independent,
\item
$L(\lambda)=0$,
\item
$\lambda$ fulfills (LC) and $L(\lambda)<\infty$ (and in particular, if it fulfills (BC)).
\end{itemize}
{\noindent \bf Coincidence.}
From \cite[Section 2.5]{DefantSchoolmann3} we know that for any $\lambda$ there is an isometric linear map
\begin{equation} \label{isometricembeddingAHelson}
\mathcal{A} \colon \mathcal{D}^{ext}_{\infty}(\lambda) \hookrightarrow H_{\infty}^{\lambda}(G),~~ D\mapsto f
\end{equation}
such that $a_{n}(D)=\widehat{f}(h_{\lambda_{n}})$ for all $n$.
Hence $\mathcal{D}_{\infty}^{ext}(\lambda)$, and so also $\mathcal{D}_{\infty}(\lambda)$, actually are isometric subspaces of $\mathcal{H}_{\infty}(\lambda)$.
Clearly, if $\mathcal{D}_{\infty}(\lambda)$ or $\mathcal{D}^{ext}_{\infty}(\lambda)$ are not complete,
then $\mathcal{D}_{\infty}^{ext}(\lambda) \varsubsetneq \mathcal{H}_{\infty}(\lambda)$ or $\mathcal{D}_{\infty}(\lambda) \varsubsetneq \mathcal{H}_{\infty}(\lambda)$, respectively. On the other hand, in the case of the two most prominent examples $\lambda = (n)$ and $\lambda = (\log n)$ we have 'coincidence':
\[
\text{$\mathcal{D}_{\infty}((n))=\mathcal{H}_{\infty}((n))$ and $\mathcal{D}_{\infty}((\log n))=\mathcal{H}_{\infty}((\log n))$;}
\]
the first result is straight forward, the second one a fundamental result from \cite{HLS} (see also \cite[Corollary 5.3]{Defant}). More generally,
\cite[Theorem 4.12]{DefantSchoolmann2} shows that we have the isometric 'coincidence' $\mathcal{D}_{\infty}(\lambda) = \mathcal{H}_{\infty}(\lambda)$
holds, whenever
\begin{itemize}
\item \text{$L(\lambda) < \infty$ and $\mathcal{D}^{ext}_{\infty}(\lambda)= \mathcal{D}_{\infty}(\lambda)$ (so if e.g. $\lambda$ satisfies Bohr's theorem).}
\end{itemize}
We come to the main point of this subsection -- Bohr's theorem, completeness, and coincidence generate the same class of frequencies.
\begin{Theo} \label{equivalenceHelson} Let $\lambda$ be an arbitrary frequency.
Then the following are equivalent:
\begin{itemize}
\item[(a)]
$\lambda$ satisfies Bohr's theorem,
\item[(b)]
$\mathcal{D}_{\infty}(\lambda)$ is complete,
\item[(c)]
$\mathcal{D}_{\infty}(\lambda) = \mathcal{H}_{\infty}(\lambda)$ isometrically.
\end{itemize}
\end{Theo}
Note that each of the equivalent statements (a), (b), and (c) of Theorem \ref{equivalenceHelson} trivially implies that $\mathcal{D}_{\infty}(\lambda)=\mathcal{D}^{ext}_{\infty}(\lambda) = \mathcal{H}_{\infty}(\lambda)$
(look at (c) and \eqref{isometricembeddingAHelson}), and hence
in this case $\mathcal{D}^{ext}_{\infty}(\lambda)$ is complete. But we do not now whether in general completeness of $\mathcal{D}^{ext}_{\infty}(\lambda)$
implies completeness of $\mathcal{D}_{\infty}(\lambda)$, which would allow to replace $\mathcal{D}_{\infty}(\lambda)$ in Theorem \ref{equivalenceHelson} by $\mathcal{D}^{ext}_{\infty}(\lambda)$. In this context we like to mention, that an example of Neder from \cite{Neder} shows, that in general $D_{\infty}(\lambda)$ is not a closed subspace of $\mathcal{D}_{\infty}^{ext}(\lambda)$.
For the proof of Theorem~\ref{equivalenceHelson} we need some preparation, and start with the following simple consequence of the principle of uniform boundedness.
\begin{Lemm} \label{previousHelson}Assume that $\mathcal{D}_{\infty}(\lambda)$ is complete, and $\varepsilon>0$. Then there is a constant $C=C(\varepsilon)$ such that for all $D \in \mathcal{D}_{\infty}(\lambda)$
\begin{equation*}
\sup_{N}\big\| \sum_{n=1}^{N} a_{n}(D)e^{-\varepsilon\lambda_{n}}e^{-\lambda_{n}s}\big\|_{\infty} \le C\|D\|_{\infty}.
\end{equation*}
\end{Lemm}
\begin{proof}
Define for every $N$
\begin{equation*}
T_{N}(D)=\sum_{n=1}^{N}a_{n}(D)e^{-\varepsilon\lambda_{n}}\colon \mathcal{D}_{\infty}(\lambda)\to \mathbb{C}.
\end{equation*}
Then $T_{N}$ is continuous and $\lim_{N}T_{N}(D)=D(\varepsilon)$ exists. Hence by the principle of uniform boundedness (here completeness of $\mathcal{D}_{\infty}(\lambda)$ is essential) there is a constant $C>0$ such that
\begin{equation*}
\sup_{N} \|T_{N}\|\le C<\infty,
\end{equation*}
that is for all $D\in \mathcal{D}_{\infty}(\lambda)$ we have
\begin{equation} \label{bananeHelson}
\sup_{N} \big| \sum_{n=1}^{N} a_{n}(D)e^{-\lambda_{n}\varepsilon} \big|\le C\|D\|_{\infty}.
\end{equation}
Now let $D\in \mathcal{D}_{\infty}(\lambda)$. Applying (\ref{bananeHelson}) to $D_{z}$, which belong to $\mathcal{D}_{\infty}(\lambda)$ for all $z\in [Re>0]$, we obtain
\begin{equation*}
\sup_{z\in [Re>0]} \sup_{N} \big| \sum_{n=1}^{N} a_{n}e^{-\lambda_{n}z}e^{-\lambda_{n}\varepsilon} \big|\le C \sup_{z\in [Re>0]}\|D_{z}\|_{\infty}\le C\|D\|_{\infty},
\end{equation*}
which proves the claim.
\end{proof}
The second lemma is crucial, and in fact a consequence of the Helson-type Theorem \ref{DirichletintegerHelson} (compare this with \cite[Propositions 4.3 and 4.5]{DefantSchoolmann2}).
\begin{Lemm} \label{complHelson} Let $\lambda$ be an arbitrary frequency and $D\in \mathcal{H}_{\infty}(\lambda)$. Then for every $\lambda$-Dirichlet group $(G,\beta)$
almost all vertical limits $D^\omega \in \mathcal{D}_{\infty}(\lambda)$ and $\|D^{\omega}\|_{\mathcal{D}_{\infty}(\lambda)}=\|D\|_{\mathcal{H}_{\infty}(\lambda)}$.
\end{Lemm}
\begin{proof} Let $f \in H_{\infty}^{\lambda}(G)$ be the function associated to $D$, i.e. $\mathcal{B}(f)=D$. Since $\mathcal{H}_{\infty}(\lambda)\subset \mathcal{H}_{2}(\lambda)$ and the function $f_{\omega}*P_{u}$ is continuous, Theorem \ref{DirichletintegerHelson} implies that $D^{\omega}$ converges on $[Re>0]$ and $D^{\omega}(u+it)=f_{\omega}*P_{u}(t)$ for all $t\in \mathbb{ R}$ and $u>0$.
Hence
\[\sup_{[Re>u]} |D^{\omega}(s)|=\sup_{[Re=u]} |D^{\omega}(s)|\le \|f_{\omega}*P_{u}\|_{\infty}\le \|f_{\omega}\|_{\infty} \leq \|f\|_{\infty},\]
and so $D^{\omega}\in \mathcal{D}_{\infty}(\lambda)$ with $\|D^{\omega}\|_{\mathcal{D}_{\infty}(\lambda)}\le \|f\|_{\infty} = \|D\|_{\mathcal{H}_{\infty}(\lambda)}$.
Moreover, by \cite[Propositions 4.3]{DefantSchoolmann2} and \eqref{isometricembeddingAHelson} we have that $\|D\|_{\mathcal{H}_{\infty}(\lambda)}= \|D^{\omega}\|_{\mathcal{H}_{\infty}(\lambda)}
= \|D^\omega\|_{\mathcal{D}_{\infty}(\lambda)}$.
\end{proof}
The third and final ingredient we need for the proof of Theorem~\ref{equivalenceHelson} is a 'Bohr-Cahen formula' for the abscissa of uniform convergence
for general Dirichlet series. Given a Dirichlet series $D=\sum a_{n}e^{-\lambda_{n}s}$, the abscissa $\sigma_{u}(D)$ of uniform convergence
is defined to be the infimum over all $\sigma \in \mathbb{ R}$ such $D$ converges uniformly on $[Re > \sigma]$. The following convenient estimate for $\sigma_{u}(D)$ is proved in \cite[Corollary 2.5]{Schoolmann}:
\begin{equation}\label{alwaysBohrcahenHelson}
\sigma_{u}(D)\le \limsup_{N\to \infty} \frac{\log\bigg( \sup_{t\in \mathbb{ R}} \big|\sum_{n=1}^{N} a_{n}e^{-it\lambda_{n}}\big|\bigg)}{\lambda_{N}}.
\end{equation}
\begin{proof}[Proof of Theorem~\ref{equivalenceHelson}]
In a first step we prove the equivalence $(b) \Leftrightarrow (c)$: Obviously, $(c)$ implies $(b)$. So assume that $(b)$ holds, and let $D\in \mathcal{H}_{\infty}(\lambda)$. Then $D^{\omega}\in \mathcal{D}_{\infty}(\lambda)$ for some $\omega \in G$ by Lemma \ref{complHelson}. Applying \cite[Proposition 3.4, $k=1$]{Schoolmann} for every $\varepsilon>0$ the Dirichlet polynomials
\begin{equation*}
R_{x}(D^{\omega}_{\varepsilon})=\sum_{\lambda_{n}<x} a_{n}(D)e^{-\varepsilon\lambda_{n}} h_{\lambda_{n}}(\omega) \bigg(1-\frac{\lambda_{n}}{x}\bigg) e^{-\lambda_{n}s}
\end{equation*}
converge uniformly to $D^{\omega}$ on $[Re>0]$. Hence, by \cite[Corollary 4.4]{DefantSchoolmann2} (Dirichlet polynomials
in $\mathcal{D}_\infty(\lambda)$ and their vertical limits have the same norm)
\begin{equation*}
R_{x}(D_{\varepsilon})=\sum_{\lambda_{n}<x} a_{n}(D)e^{- \varepsilon\lambda_{n}} \bigg(1-\frac{\lambda_{n}}{x}\bigg) e^{-\lambda_{n}s}, ~x>0,
\end{equation*}
define a Cauchy net in $\mathcal{D}_{\infty}(\lambda)$. Then $(R_{x}(D_{\varepsilon}))$ by $(b)$ has a limit in $\mathcal{D}_{\infty}(\lambda)$, which is $D_{\varepsilon}$ with $a_{n}(D_{\varepsilon})=a_{n}(D)e^{-\varepsilon\lambda_{n}}$ for all $n$ and $\|D_{\varepsilon}\|_{\mathcal{D}_{\infty}(\lambda)}\le \|D\|_{\mathcal{H}_{\infty}(\lambda)}$ for all $\varepsilon>0$.
Hence, as desired, $D \in \mathcal{D}_{\infty}(\lambda)$.
In a second step, we check that $(a) \Leftrightarrow (c)$, and start with the implication $(a) \mathbb{ R}ightarrow (c)$.
So let again $D \in \mathcal{H}_{\infty}(\lambda)$. We have to show that $D \in \mathcal{D}_{\infty}(\lambda)$.
By Lemma \ref{complHelson} there is some $\lambda$-Dirichlet group $(G, \beta)$ and some $\omega \in G$ such that $D^{\omega}\in \mathcal{D}_{\infty}(\lambda)$
and $\|D\|_{\mathcal{H}_\infty(\lambda)} = \|D^\omega\|_{\mathcal{D}_\infty(\lambda)} $. We denote by $D_N$ the $N$th partial sum of $D_N$, and by $D_{N,\varepsilon}$
its horizontal translation by $\varepsilon >0$. Then, for every $\varepsilon>0$, assuming Bohr's theorem for $\lambda$, the sequence $(D^{\omega}_{N,\varepsilon})$ converges to $D^{\omega}_\varepsilon$ in $\mathcal{D}_\infty(\lambda)$. By \cite[Corollary 4.4]{DefantSchoolmann2} we know that
\begin{equation*}
\sup_{N}\sup_{t\in \mathbb{ R}} | D_{N,\varepsilon}(it)|= \sup_{N}\sup_{t\in \mathbb{ R}} | D^\omega_{N,\varepsilon}(it)|<\infty,
\end{equation*}
which by \eqref{alwaysBohrcahenHelson} implies that $\sigma_{u}(D)\le 0$. So $D$ converges on the right half-plane, and it remains to show that the limit function of $D$ is bounded on all of $[Re >0]$.
Indeed, if $\varepsilon>0$, then for large $N$ (again by \cite[Corollary 4.4]{DefantSchoolmann2})
\begin{align*}
\|D_{N,\varepsilon}\|_{\mathcal{D}_\infty(\lambda)}
&
= \|D^\omega_{N,\varepsilon}\|_{\mathcal{D}_\infty(\lambda)}
\\&
\leq 1 + \|D^\omega_{\varepsilon}\|_{\mathcal{D}_\infty(\lambda)}
\leq 1+ \|D^\omega\|_{\mathcal{D}_\infty(\lambda)} = 1+ \|D\|_{\mathcal{H}_\infty(\lambda)}\,.
\end{align*}
Hence $\|D\|_{\mathcal{D}_\infty(\lambda)} \leq 1 + \|D \|_{\mathcal{H}_\infty(\lambda)} < \infty$, the conclusion.
Assume conversely that $(c)$ holds, that is, $\mathcal{D}_\infty(\lambda) = \mathcal{H}_\infty(\lambda)$. Then
$\mathcal{D}_\infty(\lambda)$ is complete and by \eqref{isometricembeddingAHelson} we have $\mathcal{D}_{\infty}^{ext}(\lambda)=\mathcal{D}_{\infty}(\lambda)$.
In order to check $(a)$ take some $D\in \mathcal{D}_{\infty}^{ext}(\lambda)$; we have to show that $\sigma_{u}(D)\le 0$.
Indeed, by Lemma~\ref{previousHelson} and another application of the Bohr-Cahen formula~\eqref{alwaysBohrcahenHelson} we know that $\sigma_{u}(D_\varepsilon)\le 0 $ for all $\varepsilon>0$, which implies $\sigma_{u}(D)\le 0$.
\end{proof}
\begin{Rema} \label{extHelson} A simple analysis of the previous proof shows that the equivalence (b) and (c) of Theorem \ref{equivalenceHelson} holds true, if we replace $\mathcal{D}_{\infty}(\lambda)$ by $\mathcal{D}_{\infty}^{ext}(\lambda)$, that is for any frequency $\lambda$ we have that
$\mathcal{D}^{ext}_{\infty}(\lambda)$ is complete if and only if $\mathcal{D}_{\infty}^{ext}(\lambda)=\mathcal{H}_{\infty}(\lambda)$. Indeed, if we assume that $\mathcal{D}^{ext}_{\infty}
(\lambda)$ is complete, then in particular for $\varepsilon=1$ the sequence $(R_{x}(D_{1}))$ has a limit $D_{1}\in \mathcal{D}_{\infty}^{ext}(\lambda)$. Hence $\sigma_{c}(D_{1})<\infty$, which implies $\sigma_{c}(D)<\infty$ and so $D\in \mathcal{D}_{\infty}^{ext}(\lambda)$. Again, we do not know whether completeness of $\mathcal{D}^{ext}_{\infty}(\lambda)$ implies, that $\lambda$ satisfies Bohr's theorem.
\end{Rema}
Let us apply Theorem \ref{equivalenceHelson} to the concrete frequency $\lambda = (\sqrt{\log n})$ which obviously satisfies $(LC)$, so fulfills Bohr's theorem. Then, although in this case
$L((\sqrt{\log n}))=+\infty$ (!), we may conclude the following (apparently non-trivial) application.
\begin{Coro}
$\mathcal{D}_{\infty}((\sqrt{\log n})) = \mathcal{H}_{\infty}((\sqrt{\log n}))$, and $\mathcal{D}_{\infty}((\sqrt{\log n}))$ is complete.
\end{Coro}
\subsection{Norm of the partial sum operator in $\pmb{\mathcal{H}_{\infty}(\lambda)}$}
Recall from above that Bohr's theorem holds for $\lambda=(\log n)$, and that a quantitative variant of this (see again \cite[Theorem 6.2.2., p. 143]{QQ} or \cite[Theorem 1.13, p. 21]{Defant}) reads as follows: There is a constant $C>0$ such that for every $D\in \mathcal{D}_{\infty}((\log n))$ and $N$
\begin{equation} \label{ordinaryBohrquantiHelson}
\big\| \sum_{n=1}^{N} a_{n} n^{-s} \big\|_{\infty} \le C\log(N) \|D\|_{\infty}.
\end{equation}
Given an arbitrary frequency $\lambda$, we are interested
in establishing quantitative variants of Bohr's theorem in the sense of (\ref{ordinaryBohrquantiHelson}), and this means to control the norm of the partial sum operator
\begin{equation*}
S_{N}\colon \mathcal{D}^{ext}_{\infty}(\lambda) \to \mathcal{D}_{\infty}(\lambda), ~~ D \mapsto \sum_{n=1}^{N}a_{n}(D) e^{-\lambda_{n}s}.
\end{equation*}
The main result of \cite[Theorem 3.2]{Schoolmann} is then, that for all $0<k\le 1$, $D=\sum a_{n}e^{-\lambda_{n}s}\in \mathcal{D}^{ext}_{\infty}(\lambda)$ and $N$ we have
\begin{equation} \label{normofSNHelson}
\big\|\sum_{n=1}^{N} a_{n}e^{-\lambda_{n}s}\big\|_{\infty}\le C \frac{\Gamma(k+1)}{k} \bigg(\frac{\lambda_{N}}{\lambda_{N+1}-\lambda_{N}}\bigg)^{k} \|D\|_{\infty},
\end{equation}
where $C$ is an absolute constant and $\Gamma$ denotes the Gamma function. The case $p=\infty$ of Lemma \ref{jojHelson} extends (\ref{normofSNHelson}) from $\mathcal{D}^{ext}_{\infty}(\lambda)$ to $\mathcal{H}_{\infty}(\lambda)$.
\begin{Theo} \label{coro22Helson} Let $\lambda$ be an arbitrary freuency. Then for all $D\in \mathcal{H}_{\infty}(\lambda)$, all $0<k\le 1$ and all $N$ we have
\begin{equation*}
\big\|\sum_{n=1}^{N} a_{n}(D) e^{-\lambda_{n}s} \big\|_{\infty} \le \frac{C}{k} \bigg(\frac{\lambda_{N+1}}{\lambda_{N+1}-\lambda_{N}}\bigg)^{k} \|D\|_{\infty},
\end{equation*}
where $C>0$ is a universal constant.
\end{Theo}
In particular, assuming $(LC)$ (respectively, (BC)) for $\lambda$ and choosing $k_{N}=e^{-\delta\lambda_{N}}$ (respectively, $k_{N}=\lambda_{N}^{-1}$) we deduce from Theorem \ref{coro22Helson}
(see also again \eqref{(A)Helson}) the following quantitative variants of Bohr's theorem in $\mathcal{H}_{\infty}(\lambda)$. See \cite[Section 4]{Schoolmann} for the corresponding results for $\mathcal{D}^{ext}_{\infty}(\lambda)$.
\begin{Coro} \label{coro1Helson} Let $(LC)$ hold for $\lambda$. Then to every $\delta>0$ there is a constant $C=C(\delta)$ such that for all $D\in \mathcal{H}_{\infty}(\lambda)$ and $N$
\begin{equation*}
\big\|\sum_{n=1}^{N} a_{n}(D) e^{-\lambda_{n}s} \big\|_{\infty} \le Ce^{\delta\lambda_{N}} \|D\|_{\infty}.
\end{equation*}
If $\lambda$ satisfies (BC), then for every $D\in \mathcal{H}_{\infty}(\lambda)$ and $N$
\begin{equation*}
\big\|\sum_{n=1}^{N} a_{n}(D)e^{-\lambda_{n}s}\big \|_{\infty} \le C_{1}\lambda_{N} \|D\|_{\infty}.
\end{equation*}
with an absolute constant $C_{1}>0$.
\end{Coro}
\begin{proof}[Proof of Theorem \ref{coro22Helson}] Let us for simplicity write $C=C(k,N):=\frac{1}{k}\bigg(\frac{\lambda_{N+1}}{\lambda_{N+1}-\lambda_{N}}\bigg)^{k}.$
Then for all $\omega \in G$ with $T_{\max}$ from Lemma \ref{jojHelson} we have
\begin{align*}
\big|\sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}}) h_{\lambda_{n}}(\omega)\big|=C C^{-1}\big|\sum_{n=1}^{N} \widehat{f}(h_{\lambda_{n}})h_{\lambda_{n}}(\omega)\big|\le C T_{\max}(f)(\omega),
\end{align*}
and so the claim follows, since $T_{\max}\colon H_{\infty}^{\lambda}(G) \to L_{\infty}(G)$ is bounded.
\end{proof}
\subsection{Montel theorem}
In the case of ordinary Dirichlet series, so series with frequency $\lambda=(\log n)$, Bayart in \cite{Bayart} (see also \cite[Theorem 3.11]{Defant} or \cite[Theorem 6.3.1]{QQ}) proves an important Montel-type theorem in $\mathcal{H}_{\infty}=\mathcal{D}_{\infty}((\log n))$: For every bounded sequence $(D^{j})$ in $\mathcal{D}_{\infty}((\log n))$ there are a subsequence $(D^{j_{k}})$ and $D\in \mathcal{D}_{\infty}((\log n))$ such that $(D^{j_{k}})$ converges uniformly to $D$ on $[Re>\varepsilon]$ for every $\varepsilon>0$.
Bayart's Montel theorem extends to $\lambda$-Dirichlet series whenever $\lambda$ satisfies (LC) or $L(\lambda)=0$, or is $\mathbb{Q}$-linearly independent (see \cite[Theorem 4.10]{Schoolmann}). Moreover, as proven in \cite[Theorem 4.19]{DefantSchoolmann2}, under one of the the assumptions [(LC) and $L(\lambda)<\infty$, or $L(\lambda)=0$, or $\mathbb{Q}$-linear independence] it extends from $\mathcal{D}_{\infty}(\lambda)$ to $\mathcal{H}_{p}(\lambda)$.
We prove a considerable extension of all this.
A consequence of Theorem \ref{equivalenceHelson} shows that Bayart's Montel theorem holds for every frequency $\lambda$ which satisfies Bohr's theorem (or equivalently (b) or (c) from Theorem \ref{equivalenceHelson}).
\begin{Theo} \label{MontelHelson} Assume that Bohr's theorem holds for $\lambda$ and $1\le p \le \infty$. Then for every bounded sequence $(D^{j})$ in $\mathcal{H}_{p}(\lambda)$ there is a subsequence $(D^{j_{k}})_{k}$ and $D\in \mathcal{H}_{p}(\lambda)$ which converges to $D_{\varepsilon}$ in $\mathcal{H}_{p}(\lambda)$ for every $\varepsilon>0$. The same result holds true, if we replace $\mathcal{H}_{p}(\lambda)$ by $\mathcal{D}_{\infty}(\lambda)$.
\end{Theo}
We follow the same strategy as in the proof of \cite[Theorem 4.19]{DefantSchoolmann2}. We first prove Theorem \ref{MontelHelson} for $\mathcal{D}_{\infty}(\lambda)$, and then, using some vector valued arguments, we extend this result to $\mathcal{H}_{p}(\lambda)$.
Therefore, let us recall, that, given a frequency $\lambda$ and a Banach space $X$, we denote by $\mathcal{D}_{\infty}(\lambda,X)$ the linear space of all Dirichlet series $D=\sum a_{n}e^{-\lambda_{n}s}$ which have coefficients $(a_{n})\subset X$ and which converge and define a bounded function on $[Re>0]$ (then being holomorphic and with values in $X$).
A result from \cite{vectorvalued} states that for any non-trivial Banach space $X$, the space $\mathcal{D}_{\infty}(\lambda)$ is complete if and only if $\mathcal{D}_{\infty}(\lambda,X)$ is complete (again endowed with sup norm on $[Re >0]$).
Moreover, a standard Hahn-Banach argument shows that Lemma~\ref{previousHelson} extends from the scalar-valued case to the vector-valued case:
Given $\varepsilon >0$, there is a constant $C=C(\varepsilon) >0$ such that for
every Banach space $X$ and every $D=\sum a_{n}e^{-\lambda_{n}s} \in \mathcal{D}_{\infty}(\lambda,X)$
\begin{equation} \label{translationinftyHelson}
\sup_{N}\big\| \sum_{n=1}^{N} a_{n}e^{-\varepsilon\lambda_{n}}e^{-\lambda_{n}s}\big\|_{\infty} \le C\|D\|_{\infty}\,,
\end{equation}
provided that $\mathcal{D}_{\infty}(\lambda)$ is complete, or equivalently $\lambda$ satisfies
Bohr's theorem (Theorem \ref{equivalenceHelson}). Indeed, apply Lemma~\ref{previousHelson} to the Dirichlet series
$x^\ast \circ D=\sum x^{\ast}(a_{n})e^{-\lambda_{n}s} \in \mathcal{D}_{\infty}(\lambda),\, x^\ast \in X^\ast$, and use a standard Hahn-Banach argument.
\begin{proof}[Proof of Theorem \ref{MontelHelson}] We first assume that $p=\infty$, so that by assumption and Theorem \ref{equivalenceHelson} we have that $\mathcal{D}_{\infty}(\lambda)=\mathcal{H}_{\infty}(\lambda)$. Moreover, we at first look at a bounded sequence $(D^{j})$ in $\mathcal{D}_{\infty}(\lambda)$, and denote the coefficients of $D^{j}$ by $(a_{n}^{j})_{n}$. So, by \cite[Corollary 3.9]{Schoolmann} there is a constant $C>0$ such that for all $n$, $j$
\begin{equation}
|a_{n}^{j}|\le \|D^{j}\|_{\infty}\le \sup_{j} \|D^{j}\|_{\infty}\le C <\infty.
\end{equation}
Hence by a diagonal process we find a subsequence $(j_{k})_{k}$ such that $\lim_{k\to \infty} a^{j_{k}}_{n}=:a_{n}$ exists for all $n$. Moreover, applying \eqref{translationinftyHelson} we obtain for every $\varepsilon>0$
a constant $C_1 = C_1(\varepsilon)>0$ such that for all $N$
\begin{equation*}
\sup_{k} \big\| \sum_{n=1}^{N} a_{n}^{j_{k}} e^{-\varepsilon\lambda_{n}}e^{-\lambda_{n}s}\big\|_{\infty}\le C_{1}\sup_{k}\|D^{j_{k}}\|_{\infty}<C_{1}C<\infty\,.
\end{equation*}
Hence with $D=\sum a_{n}e^{-\lambda_{n}s}$, by \cite[Proposition 2.4]{Schoolmann} we obtain that $(D^{j_{k}}_{\varepsilon})$ converges uniformly to $D_{\varepsilon}$ on $[Re>\delta]$ for every $\delta>0$, which proves the claim for $\mathcal{D}_{\infty}(\lambda)$. Now let $1\le p < \infty$ and $(D^{j})$ a bounded sequence in $\mathcal{H}_{p}(\lambda)$. Since $\mathcal{D}_{\infty}(\lambda)$ is complete under Bohr's theorem (Theorem \ref{equivalenceHelson}), by \cite[Lemma 4.9]{DefantSchoolmann2} the map
\begin{equation*} \label{embeddingptoDirichletHelson}
\Phi \colon \mathcal{H}_{p}(\lambda) \hookrightarrow \mathcal{D}_{\infty}(\lambda,\mathcal{H}_{p}(\lambda)), ~~ \sum a_{n}e^{-\lambda_{n}s} \mapsto \sum (a_{n}e^{-\lambda_{n}z}) e^{-\lambda_{n}s}
\end{equation*}
defines an into isometry. Hence $(\Phi(D^{j}))$ is a bounded sequence in $\mathcal{D}_{\infty}(\lambda,\mathcal{H}_{p}(\lambda))$ and again for all $n$, $j$
\begin{equation*}
|a_{n}^{j}|=\|a_{n}^{j}e^{-\lambda_{n}z}\|_{p}\le \|\Phi(D^{j})\|=\|D^{j}\|_{p}\le \sup_{j} \|D^{j}\|_{p} \le C<\infty\,,
\end{equation*}
for some absolute constant $C>0$. By another diagonal process we obtain a subsequence $(j_{k})_{k}$ such that $\lim_{k\to \infty} a_{n}^{j_{k}}=:a_{n}$ exists, and using \eqref{translationinftyHelson} together with the
vector-valued variant of \cite[Proposition 2.4]{Schoolmann} (its proof follows word by word from the scalar case) we conclude, that
$(\Phi(D^{j_{k}}_{\varepsilon}))$ converges in $\mathcal{D}_{\infty}(\lambda,\mathcal{H}_{p}(\lambda))$ for every $\varepsilon>0$ as $k\to \infty$. Hence, the sequence $(D_{\varepsilon}^{j_{k}})$ forms a Cauchy sequence in $\mathcal{H}_{p}(\lambda)$ with limit $D_{\varepsilon}$, and the proof is complete.
\end{proof}
\subsection{Nth abschnitte}
Let $H_{\infty}(B_{c_{0}})$ denote the Banach space of of all holomorphic and bounded functions on the open unit ball $B_{c_{0}}$ (of the Banach space $c_{0}$ of all zero sequences). Then as proven in \cite{HLS}
(see also \cite[Theorem 5.1]{Defant}) there is an isometric bijection
\begin{equation} \label{HLSHelson}
H_{\infty}(B_{c_{0}}) \to H_{\infty}(\mathbb{T}^{\infty}), ~~ F\mapsto f,
\end{equation}
which preserves the Taylor and Fourier coefficients in the sense that $c_{\alpha}(F)=\widehat{f}(\alpha)$ for all multi indices $\alpha$.
Recall, that $F\colon B_{c_{0}} \to \mathbb{C}$ belongs to $H_{\infty}(B_{c_{0}})$ if and only if $F$ is continuous and all its restrictions $F_{N}\colon \mathbb{D}^{N} \to \mathbb{C}$ belong to $H_{\infty}(\mathbb{D}^{N})$ with $\sup_{N} \|F_{N}\|_{\infty}<\infty$ (see e.g. \cite[Corollary 2.22]{Defant}). By the Bohr map (\ref{BohrmapHelson}) and (\ref{HLSHelson}) this result transfers to ordinary Dirichlet series: A Dirichlet series $D=\sum a_{n}n^{-s}$ belongs to $\mathcal{D}_{\infty}((\log n))$ if and only if for every $N$ its so-called $N$th abschnitt, that is $D|_{N}=\sum a_{n}n^{-s}$, where the sum is taken over all natural numbers which only have the first $N$ prime numbers as divisors, belong to $\mathcal{D}_{\infty}((\log n))$ with $\sup_{N} \|D|_{N}\|_{\infty} <\infty$ (see also \cite[Corollary 3.10]{Defant}).
This result extends to general Dirichlet series. To understand this let us recall, that for every frequency $\lambda$ there is another real sequence $B=(b_{n})$ such that for every $n$ there are finitely many rationals $q_{1}^{n}, \ldots q_{k}^{n}$ such that
\begin{equation*}
\lambda_{n}=\sum q_{j}^{n} b_{n}.
\end{equation*}
In this case, we call $B$ basis, and $R=(q^{n}_{j})_{n,j}$ Bohr matrix of $\lambda$. Moreover, we write $\lambda=(R,B)$, whenever $\lambda$ decomposes with respect to a basis $B$ with Bohr matrix $R$, and note that every $\lambda$ allows a subsequence which is a basis $B$ for $\lambda$.
Suppose that $\lambda=(R,B)$ and let $D\in \mathcal{D}(\lambda)$. Then the Dirichlet series $D|_{N}=\sum a_{n}(D)e^{-\lambda_{n}s}$,
where $a_{n}(D)\ne 0$ implies that $\lambda_{n}\in \operatorname{span}_{\mathbb{Q}}(b_{1},\ldots, b_{N})$, is denoted as the $N$th abschnitt of $D$.
A consequence of Theorem \ref{MontelHelson} gives an improvement of \cite[Theorem 4.22]{DefantSchoolmann2}.
\begin{Theo} Assume that Bohr's theorem holds for $\lambda$, $1\le p \le \infty$ and $D=\sum a_{n}e^{-\lambda_{n}s}$. Then $D\in \mathcal{H}_{p}(\lambda)$ if and only if
its $N$th abschnitte $D|_{N} \in \mathcal{H}_{p}(\lambda)$ with $\sup_{N} \|D|_{N}\|_{p}<\infty$. Moreover, in this case
$\|D\|_{p}=\sup \|D|_{N}\|_{p}$, and the same results holds true, whenever we replace $\mathcal{H}_{p}(\lambda)$ by $\mathcal{D}_{\infty}(\lambda)$.
\end{Theo}
\begin{proof}
The 'if part' precisely is Remark 4.21 from \cite{DefantSchoolmann2}, and holds true without any assumption on $\lambda$. So, suppose $D|_{N} \in \mathcal{H}_{p}(\lambda)$ for all $N$ with $\sup_{N} \|D|_{N}\|_{p}<\infty$. Then by Theorem \ref{MontelHelson} there is a subsequence $(N_{k})$ and $E\in \mathcal{H}_{p}(\lambda)$ such that $(D_{1}|_{N_{k}})$ converges to $E_{1}$ as $k \to \infty$. Comparing Dirichlet coefficients we see, that $a_{n}(E)e^{-\lambda_{n}}=a_{n}(E_{1})=a_{n}e^{- \lambda_{n}}$ and so $E=D$.
\end{proof}
\end{document} |
\begin{document}
\renewcommand{\arabic{footnote}}{\arabic{footnote}}
\title{Synthetic foundations of cevian geometry, III:\\The generalized orthocenter}
\author{\renewcommand{\arabic{footnote}}{\arabic{footnote}}
Igor Minevich and Patrick Morton\footnotemark[1]}
\footnotetext[1]{The authors were partially supported by an Honors Research Fellowship from the IUPUI Honors Program.}
\maketitle
\begin{section}{Introduction.}
In Part II of this series of papers \cite{mm2} we studied the conic $\mathcal{C}p=ABCPQ$ and its center $Z$, where $P$ is a point not on the extended sides of triangle $ABC$ or its anticomplementary triangle, and $Q=K(\iota(P))=K(P')$ is the complement of the isotomic conjugate of $P$ ({\it isotomcomplement}\footnote[2]{This is Grinberg's terminology.}, for short) with respect to $ABC$. When $P=Ge$ is the Gergonne point of $ABC$, $\mathcal{C}p$ is the Feuerbach hyperbola. To prove this, we introduce in Section 2 a generalization of the orthocenter $H$ which varies with the point $P$.
The generalized orthocenter $H$ of $P$ is defined to be the intersection of the lines through the vertices $A, B, C$, which are parallel, respectively, to the lines $QD, QE, QF$, where $DEF$ is the cevian triangle of $P$ with respect to $ABC$. We prove that the point $H$ always lies on the conic $\mathcal{C}p$, as does the corresponding generalized orthocenter $H'$ for the point $P'$ (Theorem \ref{thm:HonCp}). Thus, the cevian conic $\mathcal{C}p$ lies on the nine points
\[A, B, C, P, Q, P', Q', H, H',\]
where $Q'=K(\iota(P'))=K(P)$.
In the first two parts \cite{mm1} and \cite{mm2} we used the affine maps $T_P, T_{P'}, \mathcal{S}_1=T_P \circ T_{P'}$, and $\lambdambda = T_{P'} \circ T_P^{-1}$, where $T_P$ is the unique affine map which takes $ABC$ to $DEF$ and $T_{P'}$ is defined similarly for the point $P'$. (See Theorems 2.1 and 3.4 of \cite{mm2}.) In Section 2 of this paper we prove the affine formula
\begin{equation}
\lambdabel{eqn:H}
H = K^{-1}T_{P'}^{-1}K(Q)
\end{equation}
for the point $H$ that we defined above and deduce that $H$ and $H'$ are related by $\eta(H) = H'$, where $\eta$ is the affine reflection we made use of in Part II \cite{mm2}. (See Theorem \ref{thm:K(Q)}.) The point $H$ is the anti-complement of the point
\begin{equation}
\lambdabel{eqn:O}
O = T_{P'}^{-1}K(Q),
\end{equation}
which is a generalization of the circumcenter. Several facts from Part I \cite{mm1}, including the Quadrilateral Half-turn Theorem, allow us to give a completely synthetic proof of the affine formulas (\ref{eqn:H}) and (\ref{eqn:O}). We show that the circumscribed conic $\tilde{\mathcal{C}}_O$ of $ABC$ whose center is $O$ is the {\it nine-point conic} (with respect to the line at infinity $l_\infty$) for the quadrangle formed by the point $Q$ and the vertices of the anticevian triangle of $Q$ (for $ABC$). Furthermore, the complement $K(\tilde{\mathcal{C}}_O)$ is the nine-point conic $\mathcal{N}h$ of the quadrangle $ABCH$. When $P=Ge$ is the Gergonne point, $Q=I$ is the incenter, $P'$ is the Nagel point, and (\ref{eqn:H}) and (\ref{eqn:O}) yield affine formulas for the orthocenter and circumcenter as functions of $I$.
In Section 3 we study the relationship between the nine-point conic $\mathcal{N}h$, the circumconic $\tilde{\mathcal{C}}_O$, and the inconic $\mathcal{I}$, which is the conic tangent to the sides of $ABC$ at the traces $D, E, F$ of the point $P$. Its center is $Q$. (See [mm1, Theorem 3.9].) We also study the maps
\[\textsf{M}=T_P \circ K^{-1} \circ T_{P'} \ \ \textrm{and} \ \ \Phi_P=T_P \circ K^{-1} \circ T_{P'} \circ K^{-1}.\]
We show that these maps are homotheties or translations whose centers are the respective points
\[S=OQ \cdotot GV = OQ \cdotot O'Q' \ \ \textrm{and} \ \ Z = GV \cdotot T_P(GV),\]
and use these facts to prove the Generalized Feuerbach Theorem, that $\mathcal{N}h$ and $\mathcal{I}$ are tangent to each other at the point $Z$. The proof boils down to the verification that $\Phi_P$ takes $\mathcal{N}h$ to $\mathcal{I}$, leaving $Z$ and its tangent line to $\mathcal{N}h$ invariant. Thus, this proof continues the theme, begun in Part I \cite{mm1}, of characterizing important points as fixed points of explicitly given affine mappings.
When $\mathcal{N}h$ is an ellipse, the fact that $\mathcal{N}h$ and $\mathcal{I}$ are tangent could be proved by mapping $\tilde{\mathcal{C}}_O$ to the circumcircle and $ABC$ to a triangle $A'B'C'$ inscribed in the same circumcircle; and then using the original Feuerbach theorem for $A'B'C'$. The proof we give does not assume Feuerbach's original theorem, and displays explicitly the intricate affine relationships holding between the various points, lines, and conics that arise in the proof. (See Figure 2 in Section 3.) It also applies when $\mathcal{N}h$ is a parabola or a hyperbola, and when $Z$ is infinite. (See Figures 3 and 4 in Section 3. Also see [mo], where a similar proof is used to prove Feuerbach's Theorem in general Hilbert planes.)
In Section 4 we determine synthetically the locus of points $P$ for which the generalized orthocenter is a vertex of $ABC$. This turns out to be the union of three conics minus six points. (See Figure 5 in Section 4.) We also consider a special case in which the map $\textsf{M}$ is a translation, so that the circumconic $\tilde{\mathcal{C}}_O$ and the inconic are congruent. (See Figures 6 and 7.)
The results of this paper, as well as those in Part IV, could be viewed as results relating to a generalized notion of perpendicularity. The inconic replaces the incircle, and the lines $QD, QE, QF$ replace the perpendicular segments connecting the incenter to the points of contact of the incircle with the sides of $ABC$. In this way we obtain the generalized incenter $Q$, generalized orthocenter $H$, generalized circumcenter $O$, generalized nine-point center $N$, etc., all of which vary with the point $P$. Using the polarity induced by the inconic, we have an involution of conjugate points on the line at infinity, unless $P$ lies on the Steiner circumellipse. Now Coxeter's well-known development (see \cite{co1}, Chapter 9 and \cite{bach}) of Euclidean geometry from affine geometry makes use of an elliptic involution on the line at infinity (i.e., a projectivity $\psi$ from $l_\infty$ to itself without fixed points, such that $\psi(\psi(X)) = X$ for all $X$). This involution is just the involution of perpendicularity: the direction perpendicular to the direction represented by the point $X$ at infinity is represented by $\psi(X)$.
Our development is, however, not equivalent to Coxeter's. If $P$ lies inside the Steiner circumellipse, then the inconic is an ellipse and the
involution is elliptic, but if $P$ lies outside the Steiner circumellipse, the inconic is a hyperbola and the involution is hyperbolic. Furthermore, if
$P$ is on the Steiner circumellipse, then $P' = Q = H = O$ is at infinity, the inconic is a parabola, and there is no corresponding involution on the line at infinity. However, interesting theorems of Euclidean geometry can be proved even in the latter two settings, which cannot be derived by applying an affine map to the standard results, since an affine map will take an elliptic involution on $l_\infty$ to another elliptic involution.
\end{section}
\begin{section}{Affine relationships between $Q, O$, and $H$.}
We continue to consider the affine situation in which $Q$ is the isotomcomplement of $P$ with respect to triangle $ABC$, and $DEF$ is the cevian triangle for $P$ with respect to $ABC$, so that $D=AP \cdotot BC$, $E=BP \cdotot AC$, and $F=CP \cdotot AB$. As in Parts I and II, $D_0E_0F_0=K(ABC)$ is the medial triangle of $ABC$.
\begin{defn}
The point $O$ for which $OD_0 \parallel QD, OE_0 \parallel QE$, and $OF_0 \parallel QF$ is called the {\bf generalized circumcenter} of the point $P$ with respect to $ABC$. The point $H$ for which $HA \parallel QD, HB \parallel QE$, and $HC \parallel QF$ is called the {\bf generalized orthocenter} of $P$ with respect to $ABC$.
\end{defn}
We first prove the following affine relationships between $Q, O$, and $H$.
\begin{thm}
\lambdabel{thm:HO}
The generalized circumcenter $O$ and generalized orthocenter $H$ exist for any point $P$ not on the extended sides or the anticomplementary triangle of $ABC$, and are given by
$$O=T_{P'}^{-1}K(Q), \ \ H = K^{-1}T_{P'}^{-1}K(Q).$$
\end{thm}
\noindent {\bf Remark.} The formula for the point $H$ can also be written as $H=T_L^{-1}(Q)$, where $L=K^{-1}(P')$ and $T_L$ is the map $T_P$ defined for $P=L$ and the anticomplementary triangle of $ABC$. \setminusallskip
\begin{proof}
We will show that the point $\tilde O=T_{P'}^{-1}K(Q)$ satisfies the definition of $O$, namely, that
$$\tilde OD_0 \parallel QD, \ \ \tilde OE_0 \parallel QE, \ \ \tilde OF_0 \parallel QF.$$
It suffices to prove the first relation $\tilde OD_0 \parallel QD$. We have that
$$T_{P'}(\tilde OD_0)=K(Q)T_{P'}(D_0)=K(Q)A_0'$$
and
$$T_{P'}(QD)=P'A_3',$$
by I, Theorem 3.7. Thus, we just need to prove that $K(Q)A_0' \parallel P'A_3'$. We use the map $\mathcal{S}_2=T_{P'}T_P$ from I, Theorem 3.8, which takes $ABC$ to $A_3'B_3'C_3'$. We have $\mathcal{S}_2(Q)=T_{P'}T_P(Q)=T_{P'}(Q)=P'$. Since $\mathcal{S}_2$ is a homothety or translation, this gives that $AQ \parallel \mathcal{S}_2(AQ)=A_3'P'$. Now note that $M'=K(Q)$ in I, Corollary 2.6, so
$$K(Q)A_0'=M'A_0'=D_0A_0',$$
by that corollary. Now the Quadrilateral Half-turn Theorem (I, Theorem 2.5) implies that $AQ \parallel D_0A_0'$, and therefore $P'A_3' \parallel K(Q)A_0'$. This proves the formula for $O$. To get the formula for $H$, just note that $K^{-1}(OD_0)=K^{-1}(O)A, K^{-1}(OE_0)=K^{-1}(O)B, K^{-1}(OF_0)=K^{-1}(O)C$ are parallel, respectively, to $QD,QE,QF$, since $K$ is a dilatation. This shows that $K^{-1}(O)$ satisfies the definition of the point $H$.
\end{proof}
\begin{cor} The points $O$ and $H$ are ordinary whenever $P$ is ordinary and does not lie on the Steiner circumellipse $\iota(l_\infty)$. If $P$ does lie on $\iota(l_\infty)$, then $O=H=Q$.
\end{cor}
\begin{proof}
If $P$ lies on $\iota(l_\infty)$, then $P'=Q$ is infinite, and since $K$ is a dilatation, we have that $O=H=T_{P'}^{-1}(Q)=T_{P'}^{-1} T_P^{-1}(Q)=\mathcal{S}_1^{-1}(Q)=Q$ by I, Theorems 3.2 and 3.8.
\end{proof}
To better understand the connection between the point $P$ and the points $O$ and $H$ of Theorem \ref{thm:HO} we consider the circumconic $\tilde{\mathcal{C}}_O$ on $ABC$ with center $O$. We will show that this circumconic is related to the nine-point conic $\mathcal{N}pp$ (with respect to $l_\infty$) on the quadrangle $ABCP'$. Recall that this is the conic through the diagonal points and the midpoints of the sides of the quadrangle [co1, p. 84]; alternatively, it is the locus of the centers of conics which lie on the vertices of the quadrangle $ABCP'$. Three of these centers are the points
$$D_3 = AP' \cdotot BC, \ \ E_3 = BP' \cdotot AC, \ \ F_3 = CP' \cdotot AB,$$
which are centers of the degenerate conics
$$AP' \cup BC, \ \ BP' \cup AC, \ \ CP' \cup AB.$$
Since the quadrangle is inscribed in $\mathcal{C}p$, its center $Z$ lies on $\mathcal{N}pp$.
\begin{figure}
\caption{Circumconic $\tilde{\mathcal{C}
\end{figure}
\begin{thm}
\lambdabel{thm:K(Q)}
\begin{enumerate}[a)]
\item The point $K(Q)$ is the center of the nine-point conic $\mathcal{N}pp$ (with respect to $l_\infty$) for the quadrangle $ABCP'$.
\item The circumconic $\tilde{\mathcal{C}}_O = T_{P'}^{-1}(\mathcal{N}pp)$ is the nine-point conic for the quadrangle formed by the point $Q$ and the vertices of the anticevian triangle of $Q$. Its center is $O = T_{P'}^{-1}K(Q)$.
\item If $P$ does not lie on a median or on $\iota(l_\infty)$ and $O', H'$ are the points of Theorem 2.2 corresponding to the point $P'$, then $O' = \eta(O)$ and $H' = \eta(H)$. Thus $OO'$ and $HH'$ are parallel to $PP'$.
\end{enumerate}
\end{thm}
\begin{proof}
a) If $P'$ is ordinary, the conic $\mathcal{N}pp$ lies on the midpoints $D_0, E_0, F_0$, the vertices $D_3, E_3, F_3$, and the midpoints $R'_a, R'_b, R'_c$ of $AP', BP'$, and $CP'$. By I, Corollary 2.6, the point $K(Q)$ is the midpoint of segments $D_0R'_a, E_0R'_b, F_0R'_c$, which are chords for the conic $\mathcal{N}pp$. As the midpoint of two intersecting chords, $K(Q)$ is conjugate to two different points at infinity (with respect to the polarity associated with $\mathcal{N}pp$), and so must be the pole of the line at infinity. If $P'$ is infinite, the nine-point conic $\mathcal{N}pp$ lies on the midpoints $D_0, E_0, F_0$, the vertices $D_3, E_3, F_3$, and the harmonic conjugate of $AQ \cdotot l_\infty=Q$ with respect to $A$ and $Q$, which is $Q$ itself. (See [co1, 6.83, p. 84].) In this case we claim that $Q=K(Q)$ is the pole of $l_\infty$. The line $l_\infty$ intersects $\mathcal{N}pp$ at least once at $Q$; it is enough to show that $l_\infty$ intersects $\mathcal{N}pp$ {\it only} at $Q$, because that will imply it is the tangent at $Q$, hence its pole is $Q$. Suppose $l_\infty$ also intersects $\mathcal{N}pp$ at a point $X$. The nine-point conic $\mathcal{N}pp$ is the locus of centers (that is, poles of $l_\infty$) of conics that lie on $A, B, C$, and $P'=Q$, which means $X$ is the pole of $l_\infty$ with respect to one of these conics ${\mathcal C}$. Now $X$ cannot be $D_3, E_3$, or $F_3$, which are the centers of the degenerate conics through $ABCQ$, because none of these points lies on $l_\infty$. Thus ${\mathcal C}$ is nondegenerate. By assumption, $X$ lies on its polar $l_\infty$ with respect to ${\mathcal C}$, so $X$ lies on ${\mathcal C}$ and $l_\infty$ is the tangent line at $X$. But we assumed that $Q$ and $X$ are distinct points on $l_\infty$, so $l_\infty$ is also a secant of ${\mathcal C}$, a contradiction. \setminusallskip
b) By I, Corollary 3.11, the anticevian triangle $A'B'C' $ of $Q$ is $T_{P'}^{-1}(ABC)$. By I, Theorem 3.7, $Q = T_{P'}^{-1}(P')$, so the quadrangle $ABCP'$ is mapped to quadrangle $A'B'C'Q$ by the map $T_{P'}^{-1}$. The diagonal points $D_3, E_3, F_3$ of quadrangle $ABCP'$ map to $A, B, C$ so $T_{P'}^{-1}(\mathcal{N}pp)$ is certainly a circumconic for triangle $ABC$ with center $T_{P'}^{-1}K(Q)=O$, by Theorem \ref{thm:HO}. \setminusallskip
c) By Theorem \ref{thm:HO}, II, Theorem 2.4, and the fact that the map $\eta$ (see the discussion following Prop. 2.3 in Part II) commutes with the complement map, we have that
\[\eta(O) = \eta T_{P'}^{-1}K(Q) = T_{P}^{-1}K(\eta(Q)) = T_{P}^{-1}K(Q') = O',\]
and similarly $\eta(H) = H'$.
\end{proof}
We show now that there are 4 points $P$ which give rise to the same generalized circumcenter $O$ and generalized orthocenter $H$. These points arise in the following way. Let the vertices of the anticevian triangle for $Q$ with respect to $ABC$ be denoted by
\[Q_a = T_{P'}^{-1}(A), Q_b = T_{P'}^{-1}(B), Q_c = T_{P'}^{-1}(C).\]
Then we have
\[A = Q_bQ_c \cdot QQ_a, B = Q_aQ_c\cdot QQ_b, C = Q_aQ_b \cdot QQ_c.\]
This clearly implies that $QQ_aQ_b$ is the anticevian triangle of $Q_c$ with respect to $ABC$. Similarly, the anticevian triangle of any one of these four points is the triangle formed by the other three (analogous to the corresponding property of the excentral triangle). We let $P_a, P_b, P_c$ be the anti-isotomcomplements of the points $Q_a, Q_b, Q_c$ with respect to $ABC$, so that
\[P_a = \iota^{-1}K^{-1}(Q_a), \ P_b = \iota^{-1}K^{-1}(Q_b), \ P_c = \iota^{-1}K^{-1}(Q_c).\]
\begin{thm}
\lambdabel{thm:Pabc}
The points $P, P_a, P_b, P_c$ all give rise to the same generalized circumcenter $O$ and generalized orthocenter $H$.
\end{thm}
\begin{proof}
We use the characterization of the circumconic $\tilde{\mathcal{C}}_O$ from Theorem \ref{thm:K(Q)}(b). It is the nine-point conic $\mathcal{N}_Q$ for the quadrangle $Q_aQ_bQ_cQ$. But this quadrangle is the same for all of the points $P, P_a, P_b, P_c$, by the above discussion, so each of these points gives rise to the same circumconic $\tilde{\mathcal{C}}_O$. This implies the theorem, since $O$ is the center of $\tilde{\mathcal{C}}_O$ and $H$ is the anti-complement of $O$.
\end{proof}
\begin{cor}
The point-set $\{A, B, C, H\}$ is the common intersection of the four conics ${\mathcal C}_Y$, for $Y = P, P_a, P_b, P_c$.
\end{cor}
\begin{proof}
If $P$ does not lie on a median of $ABC$, the points $Q, Q_a, Q_b, Q_c$ are all distinct, since $Q_aQ_bQ_c$ is the anticevian triangle of $Q$ with respect to $ABC$. It follows that the points $P, P_a, P_b, P_c$ are distinct, as well. If two of the conics $\mathcal{C}_Y$ were equal, say $\mathcal{C}_{P_a}=\mathcal{C}_{P_b}$, then this conic lies on the points $Q_a, Q_b$ and $QQ_c \cdotot Q_aQ_b =C$, which is impossible. This shows that the conics $\mathcal{C}_Y$ are distinct.
\end{proof}
The points $Q_a, Q_b, Q_c$ are the analogues of the excenters of a triangle, and the points $P_a, P_b, P_c$ are the analogues of the external Gergonne points. The traces of the points $P_a, P_b, P_c$ can be determined directly from the definition of $H$; for example, $Q_cD_c, Q_cE_c, Q_cF_c$ are parallel to $AH, BH, CH$, which are in turn parallel to $QD, QE, QF$.
The next theorem shows that the points $H$ and $H'$ are a natural part of the family of points that includes $P, Q, P', Q'$.
\begin{thm}
\lambdabel{thm:lambda}
If the ordinary point $P$ does not lie on a median of $ABC$ or on $\iota(l_\infty)$, we have:
\begin{enumerate}[a)]
\item $\lambda(P) = Q'$ and $\lambda^{-1}(P') = Q$.
\item $\lambda(H) = Q$ and $\lambda^{-1}(H') = Q'$.
\end{enumerate}
\end{thm}
\noindent {\bf Remark.} s This gives an alternate formula for the point $H = \lambda^{-1}(Q) = \eta \lambda^2(P)$. Part b) gives an alternate proof that $\eta(H) = H'$.
\begin{proof}
Part a) was already proved as part of the proof of II, Theorem 3.2. Consider part b), which we will prove by showing that
\begin{equation}
\lambdabel{eqn:4.2}
T_P^{-1}(H) = T_{P'}^{-1}(Q).
\end{equation}
The point $T_P^{-1}(H)$ is the generalized orthocenter for the point $T_P^{-1}(P) = Q'$ and the triangle $T_P^{-1}(ABC) = \tilde A \tilde B \tilde C$, which is the anticevian triangle for $Q'$ with respect to $ABC$ (I, Corollary 3.11(a)). It follows that the lines $T_P^{-1}(H)\tilde A$ and $QA$ are parallel, since $Q$ is the isotomcomplement of $Q'$ with respect to $\tilde A \tilde B \tilde C$ (I, Theorem 3.13), and $A$ is the trace of the cevian $\tilde A Q'$ on $\tilde B \tilde C$. We will show that the line $T_{P'}^{-1}(Q)\tilde A$ is also parallel to $QA$. For this, first recall from I, Theorem 2.5 that $QD_0$ and $AP'$ are parallel. Part II, Theorem 3.4(a) implies that $Q, D_0$, and $\lambda(A)$ are collinear, so it follows that $Q\lambda(A)$ and $D_3P' = AP'$ are parallel. Now apply the map $T_{P'}^{-1}$. This shows that $T_{P'}^{-1}(Q\lambda(A)) = T_{P'}^{-1}(Q)T_{P'}^{-1}\lambda(A) = T_{P'}^{-1}(Q)\tilde A$ is parallel to $T_{P'}^{-1}(D_3P') = AQ$, as claimed. Applying the same argument to the other vertices of $T_P^{-1}(ABC) = \tilde A \tilde B \tilde C$ gives (\ref{eqn:4.2}), which is equivalent to $\lambda(H) = Q$. Now apply the map $\eta$ to give $\lambda^{-1}(H') = Q'$.
\end{proof}
\begin{thm}
\lambdabel{thm:HonCp}
If $P$ does not lie on a median of triangle $ABC$ or on $\iota(l_\infty)$, the generalized orthocenter $H$ of $P$ and the generalized orthocenter $H'$ of $P'$ both lie on the cevian conic $\mathcal{C}_P=ABCPQ$.
\end{thm}
\begin{proof}
Theorem \ref{thm:lambda}(b) and the fact that $\lambdambda$ maps the conic $\mathcal{C}_P$ to itself (II, Theorem 3.2) imply that $H$ lies on $\mathcal{C}_P$, and since $Q'$ lies on $\mathcal{C}_P$, so does $H'$.
\end{proof}
\begin{cor}
When $P$ is the Gergonne point of $ABC$, the conic $\mathcal{C}p = ABCPQ$ is the Feuerbach hyperbola $ABCHI$ on the orthocenter $H$ and the incenter $Q=I$. The Feuerbach hyperbola also passes through the Nagel point ($P'$), the Mittenpunkt ($Q'$), and the generalized orthocenter $H'$ of the Nagel point, which is the point of intersection of the lines through $A, B, C$ which are parallel to the respective lines joining the Mittenpunkt to the opposite points of external tangency of $ABC$ with its excircles.
\end{cor}
Part II, Theorem 3.4 gives six other points lying on the Feuerbach hyperbola. Among these is the point $A_0P \cdot D_0Q'$, where $D_0Q'$ is the line joining the Mittenpunkt to the midpoint of side $BC$, and $A_0P$ is the line joining the Gergonne point to the opposite midpoint of its cevian triangle. Using I, Theorem 2.4 and the fact that the isotomcomplement of the incenter $Q=I$ with respect to the excentral triangle $I_aI_bI_c$ is the Mittenpunkt $Q'$ (see I, Theorem 3.13), it can be shown that the line $D_0Q'$ also passes through the excenter $I_a$ of triangle $ABC$ which lies on the angle bisector of the angle at $A$. Thus, $A_0P \cdot D_0Q' = A_0P \cdot I_aQ'$. By II, Theorem 3.4(b), the lines $DQ$ and $A'_3P'$ also lie on this point.
From (\ref{eqn:4.2}) we deduce
\begin{thm}
\lambdabel{thm:PerspectivitesWithH}
Assume that the ordinary point $P$ does not lie on a median of $ABC$ or on $\iota(l_\infty)$.
\begin{enumerate}[a)]
\item The point $Q$ is the perspector of triangles $D_0E_0F_0$ and $\lambda(ABC)$.
\item The point $\tilde H = T_P^{-1}(H) = T_{P'}^{-1}(Q)$ is the perspector of the anticevian triangle for $Q'$ (with respect to $ABC$) and the medial triangle of the anticevian triangle of $Q$. Thus, the points $\tilde A = T_P^{-1}(A), \tilde H$, and $T_{P'}^{-1}(D_0)$ are collinear, with similar statements for the other vertices.
\item The point $H$ is the perspector of triangle $ABC$ and the medial triangle of $\lambda^{-1}(ABC)$. Thus, $A, H$, and $\lambda^{-1}(D_0)$ are collinear, with similar statements for the other vertices.
\item The point $\tilde H$ is also the perspector of the anticevian triangle of $Q$ and the cevian triangle of $P'$. Thus, $\tilde H$ is the $P'$-ceva conjugate of $Q$, with respect to triangle $ABC$.
\item The point $H$ is also the perspector of $\lambda^{-1}(ABC)$ and the triangle $A_3B_3C_3$.
\end{enumerate}
\end{thm}
\begin{proof}
a) As in the previous proof, the points $Q, D_0$, and $\lambda(A)$ are collinear, with similar statements for $Q, E_0, \lambda(B)$ and for $Q, F_0, \lambda(C)$. Therefore, $Q$ is the perspector of triangles $D_0E_0F_0$ and $\lambda(ABC)$. \setminusallskip
b) Now apply the map $T_{P'}^{-1}$, giving that $T_{P'}^{-1}(Q)$ is the perspector of $T_{P'}^{-1}(D_0E_0F_0)$, which is the medial triangle of $T_{P'}^{-1}(ABC)$, and the triangle $T_{P'}^{-1}\lambda(ABC) = T_P^{-1}(ABC)$. The result follows from (\ref{eqn:4.2}) and I, Corollary 3.11.
c) This part follows immediately by applying the map $T_P$ to part (b) or $\lambdambda^{-1}$ to part (a). \setminusallskip
d) I, Theorem 3.5 gives that $A, A'_1$ (on $E_3F_3$), and $Q$ are collinear, from which we get that $T_{P'}^{-1}(A) = A', T_{P'}^{-1}(A'_1) = D'_1 = D_3$, and $T_{P'}^{-1}(Q) = \tilde H$ are collinear. This and the corresponding statements for the other vertices imply (d). \setminusallskip
e) Applying the map $T_P$ to (d) show that $\lambda^{-1}(A), A_3$, and $H$ are collinear, with similar statements for the other vertices.
\end{proof}
\end{section}
\begin{section}{The generalized Feuerbach Theorem.}
Recall that the $9$-point conic $\mathcal{N}h$, with respect to the line $l_\infty$ is the conic which passes through the following nine points:
the midpoints $D_0, E_0, F_0$ of the sides $BC, AC, AB$;
the midpoints $R_1, R_2, R_3$ of the segments $AH, BH, CH$;
the traces (diagonal points) $H_1,H_2,H_3$ of $H$ on the sides $BC,AC,AB$.
\noindent In this section we give a synthetic proof, based on the results of this paper, that $\mathcal{N}h$ is tangent to the inconic of $ABC$ whose center is $Q$, at the generalized Feuerbach point $Z$. We start by showing that the conics $\mathcal{N}h$ and $\tilde{\mathcal{C}}_O$ are in the same relationship to each other as the classical $9$-point circle and circumcircle.
\begin{thm}
\lambdabel{thm:NH}
Assume that $P$ is a point for which the generalized circumcenter $O$ is not the midpoint of any side of $ABC$, so that $H$ does not coincide with any of the vertices $A, B$, or $C$. As in Theorem 2.4, $\tilde{\mathcal{C}}_O$ is the unique circumconic of $ABC$ with center $O$. Then the $9$-point conic $\mathcal{N}h$ is the complement of $\tilde{\mathcal{C}}_O$ with respect to $ABC$. It is also the image of $\tilde{\mathcal{C}}_O$ under a homothety $\textsf{H}$ centered at $H$ with factor $1/2$. Its center is the midpoint $N$ of segment $HO$.
\end{thm}
\begin{proof}
Let $T_1,T_2,T_3$ denote the reflections of $A,B,C$ through the point $O$, and denote by $S_1,S_2,S_3$ the intersections of $AH,BH,CH$ with the conic $\mathcal{C}=\tilde{\mathcal{C}}_O$. Let $\textsf{R}_O$ the mapping which is reflection in $O$. By Theorem \ref{thm:HO}, $K(H)=O$, so $K(O)=N$. Hence $K(\mathcal{C})$ is a conic through the midpoints of the sides of $ABC$ with center $N$. Reflecting the line $AH$ in the point $O$, we obtain the parallel line $T_1S_1'$ with $S_1'=\textsf{R}_O(S_1)$ on $\mathcal{C}$. This line contains the point $\bar{H}=\textsf{R}_O(H)=K^{-1}(H)$, since the centroid $G$ lies on $OH$. If we define $R_1=T_1G \cdotot AH$, then triangle $HGR_1$ is similar to $\bar{H}GT_1$. Hence, $R_1=K(T_1)$ lies on $K(\mathcal{C})$ and $R_1H=K\textsf{R}_O(AH)$, so $R_1$ is the midpoint of $AH$. In the same way, $R_2=K(T_2)$ and $R_3=K(T_3)$ lie on the conic $K(\mathcal{C})$. This shows that $K(\mathcal{C})$ lies on the midpoints of the sides of the quadrangle $ABCH$, and so is identical with the conic $\mathcal{N}h$. Since the affine map $\textsf{H}=K\textsf{R}_O$ takes $A,B,$ and $C$ to the respective midpoints of $AH, BH, CH$ and fixes $H$, it is clear that $\textsf{H}$ is the homothety centered at $H$ with factor $1/2$, and $\textsf{H}(\mathcal{C})=K\textsf{R}_O(\mathcal{C})=K(\mathcal{C})=\mathcal{N}h$.
\end{proof}
\begin{figure}
\caption{$\mathcal{N}
\end{figure}
\begin{prop}
\lambdabel{thm:STparBC}
Assume the hypothesis of Theorem \ref{thm:NH}. If $O$ is not on the line $AH$, and the intersections of the cevians $AH$ and $AO$ with the circumconic $\tilde{\mathcal{C}}_O$ are $S_1$ and $T_1$, respectively, then $S_1T_1$ is parallel to $BC$.
\end{prop}
\begin{proof}
(See Figure 2.) First note that $P$ does not lie on the median $AG$, since $H$ and $O$ lie on this median whenever $P$ does, by Theorem \ref{thm:K(Q)} and the arguments in the proof of II, Theorem 2.7, according to which $T_P$ and $T_{P'}$ fix the line $AG$ when it lies on $P$. Thus, $H$ cannot lie on any of the secant lines $AB, AC$, or $BC$ of the conic $\mathcal{C}p=ABCPQ$, because $H$ is also on this conic and does not coincide with a vertex. It is also easy to see that $H$ does not lie on a side of the anticomplementary triangle $K^{-1}(ABC)$, since $Q$ does not lie on a side of $ABC$. By the proof of I, Theorem 2.5 (Quadrilateral Half-turn Theorem), with $H$ as the point $P$ and $O=K(H)$ as the point $Q'$, we know that $R_1$ is the reflection of $O$ in the midpoint $N_1$ of $E_0F_0$. Since $R_1$ is the midpoint of $AH$, the line $R_1O$ is a midline of triangle $AHT_1$; and since $N_1$ is the midpoint of segment $R_1O$, we know that $AN_1$ intersects the opposite side $HT_1$ in its midpoint $M$, with $N_1$ halfway between $A$ and $M$.
However, $AN_1 \cong N_1D_0$, because $N_1$ is the midpoint of the midline $E_0F_0$ of triangle $ABC$. Therefore, $D_0=M$ is the midpoint of $HT_1$. Now, considering triangle $HS_1T_1$, with $OD_0 \parallel HS_1$ (on $AH$), we know that $OD_0$ intersects $S_1T_1$ in its midpoint. But $D_0$ is the midpoint of the chord $BC$ of the conic $\tilde{\mathcal{C}}_O$, and $S_1T_1$ is also a chord of $\tilde{\mathcal{C}}_O$. Since $O$ is the center of $\tilde{\mathcal{C}}_O$, it follows that $OD_0$ is the polar of the points at infinity of both $BC$ and $S_1T_1$ (with respect to $\tilde{\mathcal{C}}_O$), whence these lines must be parallel.
\end{proof}
When the point $H$ does coincide with a vertex, we define $\mathcal{N}h$ by $\mathcal{N}h=K(\tilde{\mathcal{C}}_O)$.
\begin{prop}
If the generalized orthocenter $H=A$, then $\mathcal{N}h=K(\tilde{\mathcal{C}}_O)$ is the unique conic on the vertices of the quadrangle $AD_0E_0F_0$ which is tangent to the conic $\tilde{\mathcal{C}}_O=T_{P'}^{-1}(\mathcal{N}_{P'})$ at $A$.
\end{prop}
\begin{proof}
If $H = A$, then $O = D_0$ is the center of $\tilde C_O$, so the reflection of $A$ through $D_0$ lies on $\tilde C_O$, and this reflection is precisely the anticomplement $A'=K^{-1}(A)=K^{-2}(D_0)$ of $A$. Then clearly $A=K(A')$ lies on $\mathcal{N}h=K(\tilde{\mathcal{C}}_O)$. Thus $\mathcal{N}h$ is a conic on the vertices of $AD_0E_0F_0$. The tangent line $t'$ to $\tilde{\mathcal{C}}_O$ at $A'$ is taken by the complement map to a parallel tangent line $t_1$ to $\mathcal{N}h$ at $A$. But the tangent line $t_2$ to $\tilde{\mathcal{C}}_O$ at $A$ is also parallel to $t'$ because the line $AA'$ lies on the center $O$ (dually, $a \cdotot a'$ lies on $l_\infty$). It follows that $t_1=t_2$, hence $\mathcal{N}h$ is tangent to $\tilde{\mathcal{C}}_O$ at $A$.
\end{proof}
In this case, we take the point $S_1$ in the above proposition to be the intersection of the conic $\tilde{\mathcal{C}}_O$ with the line $t_A$ through $A$ which is parallel to $QD$, and we claim that we still have $S_1T_1 \parallel BC$. To prove this we first prove the following theorem.
\begin{thm}
\lambdabel{thm: CtoI}
The map $\textsf{M}=T_PK^{-1}T_{P'}$ is a homothety or translation taking the conic $\tilde{\mathcal{C}}_O$ to the inconic $\mathcal{I}$, which is the conic with center $Q$ tangent to the sides of $ABC$ at the points $D,E,F$.
\end{thm}
\noindent {\bf Remark.} Below we will show that the fixed point (center) of the map $\textsf{M}$ is the point $S=OQ \cdotot GV$, which coincides with the point $\gamma_P(P)=Q \cdotot Q'$, where $\gamma_P$ is the generalized isogonal map, to be defined in Part IV of this paper, and $Q \cdotot Q'$ is the point whose barycentric coordinates are the products of corresponding barycentric coordinates of $Q$ and $Q'$.
\begin{proof}
The proof of I, Theorem 3.8 shows that $\textsf{M}$ is a homothety or translation, since $K$ fixes all points at infinity. This map takes the triangle $T_{P'}^{-1}(D_0E_0F_0)$, which is inscribed in $\tilde{\mathcal{C}}_O$ by Theorem \ref{thm:K(Q)}, to the triangle $DEF$, which is inscribed in the inconic. It also takes the center $O=T_{P'}^{-1}K(Q)$ of $\tilde{\mathcal{C}}_O$ to the center $Q$ of $\mathcal{I}$. Now $Q$ is never the midpoint of a side of triangle $DEF$, because, for instance, $Q=A_0$ would imply $Q=T_P^{-1}(A_0)=D_0$ and then $P'=K^{-1}(Q)=K^{-1}(D_0)=A$, contradicting the fact that $P$ and $P'$ do not lie on the sides of $ABC$. It follows that $O$ is never the midpoint of a side of $T_{P'}^{-1}(D_0E_0F_0)$, and this implies that $\tilde{\mathcal{C}}_O$ is mapped to $\mathcal{I}$. This result holds even if the point $O=Q$ is infinite, since then $\textsf{M}$ fixes the tangent at $Q$, i.e., the line at infinity.
\end{proof}
\begin{cor}
\lambdabel{cor:tangent}
The tangent to the conic $\tilde{\mathcal{C}}_O$ at the point $T_{P'}^{-1}(D_0)$ is parallel to $BC$, with similar statements for the other vertices of the medial triangle of the anticevian triangle of $Q$.
\end{cor}
\begin{proof}
The tangent to $\tilde{\mathcal{C}}_O$ at $T_{P'}^{-1}(D_0)$ is mapped by $\textsf{M}$ to and therefore parallel to the tangent to $\mathcal{I}$ at $D$, which is $BC$.
\end{proof}
We also need the following proposition in order to complete the proof of the above remark, when $O=D_0$.
\begin{prop}
\lambdabel{thm:psi}
Let $\psi_1, \psi_2, \psi_3$ be the mappings of conjugate points on $l_\infty$ which are induced by the polarities corresponding to the inconic $\mathcal{I}$ (center $Q$), the circumconic $\tilde{\mathcal{C}}_O$ (center $O$), and the $9$-point conic $\mathcal{N}h$ (center $N$). If $O$ and $Q$ are finite, then $\psi_1=\psi_2=\psi_3$.
\end{prop}
\begin{proof}
If $T$ is any projective collineation, and $\pi$ is the polarity determining a conic $\mathcal{C}$, then the polarity determining $T(\mathcal{C})$ is $T\pi T^{-1}$. If $q$ is any non-self-conjugate line for $\pi$, then $T(q)$ is not self-conjugate, since its pole $T(Q)$ does not lie on $T(q)$. If $\psi$ is the mapping of conjugate points on $q$, then the polar of a point $A$ on $q$ is $\pi(A)=a=Q\psi(A)$. Hence, the polar of $T(A)$ on $T(q)$ is
\[T\pi T^{-1}(T(A))=T\pi(A)=T(Q\psi(A))=T(Q)T(\psi(A))=T(Q) T\psi T^{-1}(T(A)).\]
This shows that the mapping of conjugate points on $T(q)$ is $T \psi T^{-1}$. Now apply this to $\tilde{\mathcal{C}}_O$ and $\mathcal{N}h=K(\tilde{\mathcal{C}}_O)$, with $T=K$ and $q=l_\infty$. The complement mapping fixes the points on $l_\infty$, so $\psi_3=K\psi_2 K^{-1}=\psi_2$. Theorem \ref{thm: CtoI} and a similar argument shows that $\psi_1=\psi_2$.
\end{proof}
\begin{cor} The conclusion of Proposition \ref{thm:STparBC} holds if $A=H$ and $O=D_0$, where $S_1$ is defined to be the intersection of the conic $\tilde{\mathcal{C}}_O$ with the line through $A$ parallel to $QD$.
\lambdabel{cor:S1}
\end{cor}
\begin{proof}
The line $AO=AT_1$ is a diameter of $\tilde{\mathcal{C}}_O$, so the direction of $AS_1$, which equals the direction of $QD$, is conjugate to the direction of $S_1T_1$. But the direction of $QD$ is conjugate to the direction of $BC$, since $BC$ is tangent to $\mathcal{I}$ at $D$, and since $\psi_1=\psi_2$.
Therefore, $S_1T_1$ and $BC$ lie on the same point at infinity.
\end{proof}
Proposition \ref{thm:STparBC} and Corollary \ref{cor:S1} will find application in Part IV. To determine the fixed point of the mapping $\textsf{M}$ in Theorem \ref{thm: CtoI}, we need a lemma.
\begin{lem}
If $P$ does not lie on $\iota(l_\infty)$, the point $\tilde{H}=T_P^{-1}(H)$ is the midpoint of the segment joining $P'$ and $K^{-1}(H)$, and the reflection of the point $Q$ through $O$.
\end{lem}
\begin{proof}
Let $H_1$ be the midpoint of $P'K^{-1}(H)$. The Euclidean quadrilateral $H_1HQK^{-1}(H)$ is a parallelogram, because $K^{-1}(QH)=P'K^{-1}(H)$ and the segment $H_1K^{-1}(H)$ is therefore congruent and parallel to $QH$. The intersection of the diagonals is the point $O$, the midpoint of $HK^{-1}(H)$, so that $O$ is also the midpoint of $H_1Q$. On the other hand, $K(Q)$ is the midpoint of segment $P'Q$, so Theorem \ref{thm:K(Q)} gives that $O=T_{P'}^{-1}K(Q)$ is the midpoint of $T_{P'}^{-1}(P'Q)=Q \tilde{H}$, by (3). This implies that $H_1=\tilde{H}$.
\end{proof}
\begin{thm}
\lambdabel{thm: FixM}
If $P$ is ordinary and does not lie on a median of $ABC$ or on $\iota(l_\infty)$, the fixed point (center) of the map $\textsf{M}=T_P K^{-1} T_{P'}$ is $S=OQ \cdotot GV=OQ \cdotot O'Q'$.
\end{thm}
\noindent {\bf Remark.} The point $S$ is a generalization of the insimilicenter, since it is the fixed point of the map taking the circumconic to the inconic. See \cite{ki2}. \setminusallskip
\begin{proof}
Assume first that $\textsf{M}$ is a homothety. The fixed point $S$ of $\textsf{M}$ lies on $OQ$, since the proof of Theorem \ref{thm: CtoI} gives that $\textsf{M}(O)=Q$. Note that $O \neq Q$, since $T_{P'}(Q)=P' \neq K(Q)$, by I, Theorem 3.7. We claim that $\textsf{M}(O')=Q'$ also. We shall prove the symmetric statement $\textsf{M}'(O)=Q$, where $\textsf{M}'=T_{P'}K^{-1}T_P$. We have that
\[\textsf{M}'(O)=T_{P'} K^{-1} T_P(T_{P'}^{-1}K(Q))=T_{P'}K^{-1} \lambdambda^{-1}(K(Q)).\]
Now $K(Q)$ is the midpoint of $P'Q$, so $K^{-1} \lambdambda^{-1}(K(Q))$ is the midpoint of $K^{-1} \lambdambda^{-1}(P'Q)=K^{-1}(QH) = P'K^{-1}(H)$ (Theorem \ref{thm:lambda}), and therefore coincides with the point $\tilde{H}$, by the lemma. Therefore, by (3), we have
\[\textsf{M}'(O)=T_{P'}(\tilde{H})=Q.\]
Switching $P$ and $P'$ gives that $\textsf{M}(O')=Q'$, as claimed. Therefore, $S=OQ \cdotot O'Q'$. Note that the lines $OQ$ and $O'Q'$ are distinct. If they were not distinct, then $O, O', Q, Q'$ would be collinear, and applying $K^{-1}$ would give that $H, H', P', P$ are collinear, which is impossible since these points all lie on the cevian conic $\mathcal{C}p$. (Certainly, $H \neq P$, since otherwise $O=T_{P'}^{-1}K(Q)=K(P)=Q'$, forcing $K(Q)=T_{P'}(Q')=Q'=K(P)$ and $P=Q$. Similarly, $H \neq P'$, so these are four distinct points.) This shows that $\eta(S)=S$, so $S$ lies on $GV$ and $S=OQ \cdotot GV$.
If $\textsf{M}$ is a translation, then it has no ordinary fixed points, and the same arguments as before give that $OQ \parallel O'Q' \parallel GV$ and these lines are fixed by $\textsf{M}$. But then $\textsf{M}$ is a translation along $GV$, so its center is again $S=OQ \cdotot GV=OQ \cdotot O'Q'$.
\end{proof}
\begin{prop}
\lambdabel{thm: Zfixed}
If $P$ does not lie on a median of $ABC$, then $T_PK^{-1}(Z)=Z$.
\end{prop}
\begin{proof}
We first use II, Theorem 4.1, when $P$ and $P'$ are ordinary. The point $Z$ is defined symmetrically with respect to $P$ and $P'$, since it is the center of the conic $\mathcal{C}p=\mathcal{C}_{P'}$. Therefore II, Theorem 4.1 yields $Z=GV \cdotot T_{P'}(GV)$, so the last theorem implies that
\[T_PK^{-1}(Z)=T_PK^{-1}(GV) \cdotot T_PK^{-1}T_{P'}(GV)=T_P(GV) \cdotot GV = Z,\]
since the point $S$ lies on $GV$. If $P$ lies on $\iota(l_\infty)$, then $T_PK^{-1}(Z)=Z$ follows immediately from II, Theorem 4.3 and the proof of II, Theorem 2.7 (in the case that $P'$ is infinite), since $T_P$ is a translation along $GG_1$ taking $G$ to $G_1=T_P(G)$. If $P$ is infinite, then $P'$ lies on $\iota(l_\infty)$, in which case $T_PK^{-1}=T_{P'}^{-1}K^{-2}$ by I, Theorem 3.14. This mapping fixes $Z$ by II, Theorem 4.3.
\end{proof}
\begin{prop}
\lambdabel{thm: ZonNh}
If $P$ does not lie on a median of $ABC$, then $Z$ lies on the $9$-point conic $\mathcal{N}h$ of the quadrangle $ABCH$, and $K^{-1}(Z)$ lies on $\tilde{\mathcal{C}}_O$.
\end{prop}
\begin{proof}
As we remarked in the paragraph just before Theorem \ref{thm:K(Q)}, the point $Z$ lies on $\mathcal{N}pp$. Theorem \ref{thm:K(Q)} implies that $T_{P'}^{-1}(Z)$ lies on $T_{P'}^{-1}(\mathcal{N}pp)=\tilde{\mathcal{C}}_O$. By Proposition \ref{thm: Zfixed}, with $P'$ in place of $P$, $T_{P'}^{-1}(Z)=K^{-1}(Z)$. Since $K^{-1}(Z)$ lies on $\tilde{\mathcal{C}}_O$, the point $Z$ lies on $K(\tilde{\mathcal{C}}_O)=\mathcal{N}h$.
\end{proof}
\begin{prop}
\lambdabel{prop:PhiP}
\begin{enumerate}[a)]
\item The map $\Phi_P=\textsf{M} \ \circ K^{-1}$ satisfies
\[\Phi_P(K(S))=S, \ \Phi_P(N)=Q, \ \textrm{and} \ \ \Phi_P(K(Q'))=T_P(P).\]
The center of the homothety or translation $\Phi_P$ is the common intersection of the lines $GV, NQ,$ and $K(Q')T_P(P)$.
\item Also, $\Phi_P = \Phi_{P'}$, and the maps $T_PK^{-1}$ and $T_{P'}K^{-1}$ commute with each other.
\item $T_{P'}(P')$ lies on the line $OQ$, and $T_P(P)$ lies on $O'Q'$.
\end{enumerate}
\end{prop}
\begin{proof}
a) The map $\Phi_P$ is a homothety or translation by the same argument as in Theorem \ref{thm: CtoI}. We have $\Phi_P(K(S))=\textsf{M}(S)=S$, while
\begin{align*}
\Phi_P(N)&=T_P \circ K^{-1} \circ T_{P'} \circ K^{-1}(K(O)) = T_P K^{-1} T_{P'}(O)\\
&=T_PK^{-1}(K(Q))=T_P(Q)=Q,
\end{align*}
and
\begin{align*}
\Phi_P(K(Q'))&=T_P \circ K^{-1} \circ T_{P'} \circ K^{-1}(K(Q')) = T_P K^{-1} T_{P'}(Q')\\
&=T_PK^{-1}(Q')=T_P(P).
\end{align*}
It follows that $\Phi_P$ fixes the three lines $GV, NQ$, and $K(Q')T_P(P)$. \setminusallskip
b) Note that
\begin{align*}
\eta \Phi_P &= \eta \circ T_P \circ K^{-1} \circ T_{P'} \circ K^{-1}\\
&=T_{P'} \circ \eta \circ K^{-1} \circ T_{P'} \circ K^{-1} = T_{P'} \circ K^{-1} \circ T_{P} \circ K^{-1} \circ \eta\\
&=\Phi_{P'} \eta,
\end{align*}
since $\eta$ and $K^{-1}$ commute. On the other hand, the center of $\Phi_P$ lies on the line $GV$, which is the line of fixed points for the affine reflection $\eta$. It follows that $\eta \Phi_P \eta=\Phi_{P'}$ has $l_\infty$ as its axis and $\eta(F)=F$ as its center, if $F$ is the center of $\Phi_P$ (a homology or an elation). But both maps $\Phi_P$ and $\Phi_{P'}$ share the pair of corresponding points $\{ K(S), S \}$, also lying on $GV$. Therefore, the two maps must be the same. (See [co2, pp. 53-54].) \setminusallskip
c) From a) and b) we have $\Phi_P(K(Q))=\Phi_{P'}(K(Q))=T_{P'}(P')$. On the other hand, $S$ on $OQ$ implies that $K(S)$ lies on $NK(Q)$, so $K(Q)$ lies on the line $K(S)N$. Mapping by $\Phi_P$ and using a) shows that $T_{P'}(P')$ lies on $\Phi_P(K(S)N) = SQ=OQ$.
\end{proof}
\noindent {\bf The Generalized Feuerbach Theorem.} \setminusallskip
{\it If $P$ does not lie on a median of triangle $ABC$, the map
\[\Phi_P=\textsf{M} \circ K^{-1}=T_P \circ K^{-1} \circ T_{P'} \circ K^{-1}\]
takes the $9$-point conic $\mathcal{N}h$ to the inconic $\mathcal{I}$ and fixes the point $Z$, the center of $\mathcal{C}p$. Thus, $Z$ lies on $\mathcal{I}$, and the conics $\mathcal{N}h$ and $\mathcal{I}$ are tangent to each other at $Z$. The same map $\Phi_P$ also takes the $9$-point conic $\mathcal{N}_{H'}$ to the inconic $\mathcal{I}'$ which is tangent to the sides of $ABC$ at $D_3, E_3, F_3$. The point $Z$ is the center of the map $\Phi_P$ (a homology or elation).}
\begin{proof}
The mapping $\Phi_P$ takes $\mathcal{N}h$ to $\mathcal{I}$ by Theorems \ref{thm:NH} and \ref{thm: CtoI}. Applying Proposition \ref{thm: Zfixed} to the points $P'$ and $P$, we see that $\Phi_P$ fixes $Z$, so $Z$ lies on $\mathcal{I}$ by Proposition \ref{thm: ZonNh}. First assume $Z$ is an ordinary point. As a homothety with center $Z$, $\Phi_P$ fixes any line through $Z$, and therefore fixes the tangent $t$ to $\mathcal{N}h$ at $Z$. Since tangents map to tangents, $t$ is also the tangent to $\mathcal{I}$ at $Z$, which proves the theorem in this case. If $Z \in l_\infty$ and $\mathcal{N}h$ is a parabola, the same argument applies, since the tangent to $\mathcal{N}h$ at $Z$ is just $l_\infty=\Phi_P(l_\infty)$. Assume now that $\mathcal{N}h$ is a hyperbola. Then $Z$ must be a point on one of the asymptotes $t$ for $\mathcal{N}h$, which is also the tangent at $Z$. But $Z$ is on the line $GV$, and by Proposition \ref{prop:PhiP} the center of $\Phi_P$ lies on $GV$. It follows that if $\Phi_P$ is a translation, it is a translation along $GV$, and therefore fixes the parallel line $t$. This will prove the assertion if we show that $\Phi_P$ cannot be a homothety when $Z$ is infinite, i.e., it has no ordinary fixed point. Let $X$ be a fixed point of $\Phi_P$ on the line $GV$. Writing $\Phi_P=\textsf{M}_1\textsf{M}_2$, with $\textsf{M}_1=T_P \circ K^{-1}$ and $\textsf{M}_2=T_{P'} \circ K^{-1}$, we have by part b) of Proposition \ref{prop:PhiP} that
\[\Phi_P(\textsf{M}_1(X))=\textsf{M}_1(\textsf{M}_2\textsf{M}_1(X))=\textsf{M}_1(X),\]
so $\textsf{M}_1(X)$ is a fixed point of $\Phi_P$ on the line $\textsf{M}_1(GV)=T_P(GV)$. Assuming $X$ is ordinary, this shows that $\textsf{M}_1(X)=X$, since a nontrivial homothety has a unique ordinary fixed point. Hence, $X \in GV \cdotot T_P(GV)$. But $Z$ is infinite and $Z=GV \cdotot T_P(GV)$, so this is impossible. Thus, $\Phi_P$ has no ordinary fixed point in this case and its center is $Z$.
\end{proof}
\begin{cor}
\lambdabel{thm:genFeuer}
\begin{enumerate}[a)]
\item If $N=K(O)$ is the midpoint of segment $OH$, the center $Z$ of $\mathcal{C}p$ lies on the line $QN$, and $Z=GV \cdotot QN$.
\item The point $K^{-1}(Z)$ lies on the line $OP'$, so that $K^{-1}(Z)=GV \cdotot OP'$ lies on $\tilde{\mathcal{C}}_O$. This point is the center of the anticevian conic $T_P^{-1}(\mathcal{C}p)$. (See II, Theorem 3.3.)
\item If $Z$ is infinite and the conics $\mathcal{N}h$ and $\mathcal{I}$ are hyperbolas, the line $QN$ is a common asymptote of $\mathcal{N}h$ and $\mathcal{I}$.
\end{enumerate}
\end{cor}
\begin{proof}
For part a), Proposition \ref{prop:PhiP} shows that the center $Z$ of $\Phi_P$ lies on the line $NQ$. For part b), just note that $K^{-1}(NQ)=OP'$ and $K^{-1}(\mathcal{N}h)=\tilde{\mathcal{C}}_O$. The last assertion follows from Proposition \ref{thm: Zfixed}. For part c), the asymptote of $\mathcal{N}h$ through $Z$ must lie on the center of $\mathcal{N}h$, which is $N$, and the asymptote of $\mathcal{I}$ through $Z$ must lie on the center of $\mathcal{I}$, which is $Q$. Therefore, the common asymptote is $QN$.
\end{proof}
\begin{figure}
\caption{$\mathcal{N}
\end{figure}
The Generalized Feuerbach Theorem applies to all four points of Theorem \ref{thm:Pabc}, and therefore generalizes the full statement of Feuerbach's theorem in the case that $P$ is the Gergonne point. Thus, $\mathcal{N}h$ is tangent to four distinct conics, each of which is tangent to the sides of $ABC$, namely, the inconics corresponding to each of the points $P, P_a, P_b, P_c$. Figure \ref{fig:3.2} shows the configuration in case $P$ is outside the Steiner circumellipse, in which case $\mathcal{N}h, \tilde{\mathcal{C}}_O$, and $\mathcal{I}$ are hyperbolas. The point marked $1$ is a general point on the conic $\mathcal{N}pp$, and the points marked $2$ and $3$ are the images of $1$ on $T_{P'}^{-1}(\mathcal{N}pp)=\tilde{\mathcal{C}}_O$ and on $K(\tilde{\mathcal{C}}_O)=\mathcal{N}h$, respectively. As $P$ varies on a line perpendicular to $BC$ in this picture, the locus of the point $Z$ is pictured in teal. This locus consists of three branches which are each tangent to a side of $ABC$ at its midpoint. Figure 4 pictures a situation in which $Z$ is infinite. The point $P$ in this figure was found using the ratios $BD/BC=\frac{15}{16}$ and $BF/AF=\frac{6}{5}$.
\begin{thm}
\lambdabel{thm:tildeZ}
The point $\tilde{Z}=\textsf{R}_OK^{-1}(Z)$ is the fourth point of intersection of the conics $\mathcal{C}p$ and $\tilde{\mathcal{C}}_O$, the other three points being the vertices $A,B,C$.
\end{thm}
\begin{proof}
Theorem \ref{thm:NH} and Proposition \ref{thm: ZonNh} show that $\tilde{Z}=\textsf{R}_OK^{-1}(Z)=\textsf{H}^{-1}(Z)$ lies on $\tilde{\mathcal{C}}_O$. Since $T_{P'}$ maps $\tilde{\mathcal{C}}_O$ to $\mathcal{N}pp$, we know that the half-turns through the points $O$ and $K(Q)=T_{P'}(O)$ are conjugate by $T_{P'}$, namely:
\[T_{P'} \circ \textsf{R}_O \circ T_{P'}^{-1} = \textsf{R}_{K(Q)}.\]
Therefore, $T_{P'}(\tilde{Z})=T_{P'}\textsf{R}_OK^{-1}(Z)=T_{P'} \textsf{R}_O T_{P'}^{-1}(Z)=\textsf{R}_{K(Q)}(Z)$, the second equality following from Proposition \ref{thm: Zfixed}. In other words, $Z$ and $T_{P'}(\tilde{Z})$ are opposite points on the conic $\mathcal{N}pp$. Furthermore, $Z$ lies on $QN$, so $T_{P'}(\tilde{Z})$ lies on the parallel line $l=\textsf{R}_{K(Q)}(QN)$, and since $K(Q)$ is the midpoint of $QP'$, $l$ is the line through $P'$ parallel to $QN$, i.e. $l=OP'=K^{-1}(QN)$. Hence $T_{P'}(\tilde{Z})$ lies on $OP'$, while Corollary \ref{thm:genFeuer}b) implies that $\tilde{Z}=\textsf{R}_OK^{-1}(Z)$ also lies on $OP'$. Therefore, $\tilde{Z}, P'$, and $T_{P'}(\tilde{Z})$ are collinear. Now II, Corollary 2.2b) implies that $\tilde{Z}$ lies on $\mathcal{C}_{P'}=\mathcal{C}p$. This shows that $\tilde{Z} \in \mathcal{C}p \cap \tilde{\mathcal{C}}_O$.
\end{proof}
\begin{figure}
\caption{$Z$ infinite, $\mathcal{N}
\end{figure}
\end{section}
\begin{section}{The special case $H=A, O=D_0$.}
We now consider the set of all points $P$ such that $H=A$ and $O=D_0$. We start with a lemma.
\begin{lem}
Provided the generalized orthocenter $H$ of $P$ is defined, the following are equivalent:
\begin{enumerate}[(a)]
\item $H = A$.
\item $QE = AF$ and $QF = AE$.
\item $F_3$ is collinear with $Q$, $E_0$, and $K(E_3)$.
\item $E_3$ is collinear with $Q$, $F_0$, and $K(F_3)$.
\lambdabel{lem:EquivH=A}
\end{enumerate}
\end{lem}
\begin{proof}
We use the fact that $K(E_3)$ is the midpoint of segment $BE$ and $K(F_3)$ is the midpoint of segment $CF$ from I, Corollary 2.2. Statement (a) holds iff $QE \parallel AB$ and $QF \parallel AC$, i.e. iff $AFQE$ is a parallelogram, which is equivalent to (b). Suppose (b) holds. Let $X = BE \cdotot QF_3$. Then triangles $BXF_3$ and $EXQ$ are congruent since $QE \parallel BF_3 = AB$ and $QE = AF = BF_3$. Therefore, $BX = EX$, i.e. $X$ is the midpoint $K(E_3)$ of $BE$, so $Q, F_3$, and $X = K(E_3)$ are collinear. Similarly, $Q, E_3$, and $K(F_3)$ are collinear. This shows (b) $\Rightarrow$ (c), (d).
Next, we show (c) and (d) are equivalent. Suppose (c) holds. Since $P', E_3, B$ are collinear, $Q, K(E_3), E_0$ are collinear and the line $F_3E_0$ is the complement of the line $BE_3$, hence the two lines are parallel and
\begin{equation}
\frac{AF_3}{F_3B} = \frac{AE_0}{E_0E_3}.
\lambdabel{eqn:Ratios1}
\end{equation}
Conversely, if the equality holds, then the lines are parallel and $F_3$ lies on the line through $K(E_3)$ parallel to $P'E_3$, i.e. the line $K(P'E_3) = QK(E_3)$, so (c) holds. Similarly, (d) holds if and only if
\begin{equation}
\frac{AE_3}{E_3C} = \frac{AF_0}{F_0F_3}.
\lambdabel{eqn:Ratios2}
\end{equation}
A little algebra shows that (\ref{eqn:Ratios1}) holds if and only if (\ref{eqn:Ratios2}) holds. Using signed distances, and setting $AE_0/E_0E_3 = x$, we have $AE_3/E_3C = (x + 1)/(x - 1)$. Similarly, if $AF_0/F_0F_3 = y$, then $AF_3/F_3B = (y + 1)/(y - 1)$. Now (\ref{eqn:Ratios1}) is equivalent to $x = (y+1)/(y-1)$, which is equivalent to $y = (x+1)/(x-1)$, hence also to (\ref{eqn:Ratios2}). Thus, (c) is equivalent to (d). Note that this part of the lemma does not use that $H$ is defined. \setminusallskip
Now if (c) or (d) holds, then they both hold. We will show (b) holds in this case. By the reasoning in the previous paragraph, we have $F_3Q \parallel E_3P'$ and $E_3Q \parallel F_3P'$, so $F_3P'E_3Q$ is a parallelogram. Therefore, $F_3Q = P'E_3 = 2\cdotot QK(E_3)$, so $F_3K(E_3) = K(E_3)Q$. This implies the triangles $F_3K(E_3)B$ and $QK(E_3)E$ are congruent (SAS), so $AF = BF_3 = QE$. Similarly, $AE = CE_3 = QF$, so (b) holds.
\end{proof}
\begin{thm}
\lambdabel{thm:locus}
The locus ${\mathscr L}_A$ of points $P$ such that $H = A$ is a subset of the conic $\overline{\mathcal{C}}_A$ through $B, C, E_0$, and $F_0$, whose tangent at $B$ is $K^{-1}(AC)$ and whose tangent at $C$ is $K^{-1}(AB)$. Namely, $\mathscr{L}_A = \overline{\mathcal{C}}_A \setminus \{B, C, E_0, F_0\}$.
\end{thm}
\begin{proof}
Given $E$ on $AC$ we define $F_3$ as $F_3 = E_0K(E_3) \cdotot AB$, and $F$ to be the reflection of $F_3$ in $F_0$. Then we have the following chain of projectivities:
\[BE \ \barwedge \ E \ \barwedge \ E_3 \ \stackrel{G}{\doublebarwedge} \ K(E_3) \ \stackrel{E_0}{\doublebarwedge} \ F_3 \ \barwedge \ F \ \barwedge \ CF.\]
Then $P = BE \cdotot CF$ varies on a line or a conic. We want to show: (a) for a point $P$ thus defined, $H = A$; and (b) if $H = A$ for some $P$, then $P$ arises in this way, i.e. $F_3$ is on $E_0K(E_3)$. Both of these facts follow from the above lemma. \setminusallskip
Now we list four cases in which $H$ is undefined, namely when $P = B, C, E_0, F_0$. Let $A_\infty, B_\infty, C_\infty$ represent the points at infinity on the respective lines $BC, AC, AB$. \setminusallskip
\begin{enumerate}[1.]
\item For $E = B_\infty = E_3 = K(E_3)$, we have $E_0K(E_3) = AC$ so $F_3 = A, F = B$, and $P = BE \cdotot CF = B$.
\item For $E = C$, we have $E_3 = A, K(E_3) = D_0, E_0K(E_3) = D_0E_0 \parallel AB, F = F_3 = C_\infty$, so $P = BE\cdotot CF = C$.
\item For $E = E_0$, we have $E_3 = E_0$ and $K(E_0)$ is the midpoint of $BE_0$ by I, Corollary 2.2, so $F_3 = B, F = A$, and $P = BE\cdotot CF = E_0$.
\item For $E = A$, we have $E_3 = C, K(E_3) = F_0, F_3 = F = F_0$, and $P = BE\cdotot CF = F_0$.
\end{enumerate}
\begin{figure}
\caption{The conics $\overline{\mathcal{C}
\end{figure}
Since the four points $B, C, E_0, F_0$ are not collinear, this shows that the locus of points $P=BE \cdotot CF$ is a conic $\overline{\mathcal{C}}_A$ through $B, C, E_0, F_0$. Moreover, the locus $\mathscr{L}_A$ of points $P$ such that $H = A$ is a subset of $\overline{\mathcal{C}}_A \setminus \{B, C, E_0, F_0\}$. \setminusallskip
We claim that if $E$ is any point on line $AC$ other than $A, C, E_0$, or $B_\infty$, then $P$ is a point for which $H$ is well-defined. First, $E_3$ is an ordinary point because $E \ne B_\infty$. Second, because $E \ne B_\infty$, the line $E_0K(E_3)$ is not a sideline of $ABC$. The line $E_0K(E_3)$ intersects $AB$ in $A$ if and only if $K(E_3)$ lies on $AC$, which is true only if $E_3 = B_\infty$. The line $E_0K(E_3)$ intersects $AB$ in $B$ iff $K(E_3)$ is on $BE_0$, which holds iff $E_3$ is on $K^{-1}(B)B = BE_0$, and this is the case exactly when $E = E_3 = E_0$. Furthermore, the line $E_0K(E_3)$ is parallel to $AB$ iff $K(E_3)=D_0$ and $E_3=A$, or $K(E_3) = E_3 = E = C_\infty$, which is not on $AC$. Thus, the line $E_0K(E_3)$ intersects $AB$ in an ordinary point which is not a vertex, so $F_3$ and $F$ are not vertices and $P=BE\cdotot CF$ is a point not on the sides of $ABC$. \setminusallskip
It remains to show that $P$ does not lie on the sides of the anticomplementary triangle of $ABC$. If $P$ is on $K^{-1}(AB)$ then $F=F_3 = C_\infty$, which only happens in the excluded case $E=C$ (see Case 2 above). If $P$ is on $K^{-1}(AC)$ then $E= B_\infty$, which is also excluded. If $P$ is on $K^{-1}(BC)$ then $P'$ is also on $K^{-1}(BC)$ so $Q=K(P')$ is on $BC$. \setminusallskip
Suppose $Q$ is on the same side of $D_0$ as $C$. Then $P'$ is on the opposite side of line $AD_0$ from $C$, so it is clear that $CP'$ intersects $AB$ in the point $F_3$ between $A$ and $B$. If $Q$ is between $D_0$ and $C$, then $F_3$ is between $A$ and $F_0$ (since $F_0, C$ and $K^{-1}(C)$ are collinear), and it is clear that $F_3E_0$ can only intersect $BC$ in a point outside of the segment $D_0C$, on the opposite side of $C$ from $Q$. But this is a contradiction, since by construction $F_3, E_0$, and $K(E_3)$ are collinear, and $Q=K(P')$ lies on $K(BE_3)=E_0K(E_3)$. On the other hand, if the betweenness relation $D_0 * C * Q$ holds, then $F_3$ is between $B$ and $F_0$, and it is clear that $F_3E_0$ can only intersect $BC$ on the opposite side of $B$ from $C$. This contradiction also holds when $P'=Q$ is a point on the line at infinity, since then $F_3=B$, and $B, E_0$ and $Q=A_\infty$ (the point at infinity on $BC$) are not collinear. A symmetric argument applies if $Q$ is on the same side of $D_0$ as $B$, using the fact that parts (c) and (d) of the lemma are equivalent. Thus, no point $P$ in $\overline{\mathcal{C}}_A \setminus \{B, C, E_0, F_0\}$ lies on a side of $ABC$ or its anticomplementary triangle, and the point $H$ is well-defined; further, $H=A$ for all of these points. \setminusallskip
Finally, by the above argument, there is only one point $P$ on $\overline{\mathcal{C}}_A$ that is on the line $K^{-1}(AB)$, namely $C$, and there is only one point $P$ on $\overline{\mathcal{C}}_A$ that is on the line $K^{-1}(AC)$, namely $B$, so these two lines are tangents to $\overline{\mathcal{C}}_A$.
\end{proof}
This theorem shows that the locus of points $P$, for which the generalized orthocenter $H$ is a vertex of $ABC$, is the union of the conics $\overline{\mathcal{C}}_A \cup \overline{\mathcal{C}}_B \cup \overline{\mathcal{C}}_C$ minus the vertices and midpoints of the sides.
In the next proposition and its corollary, we consider the special case in which $H=A$ and $D_3$ is the midpoint of $AP'$. We will show that, in this case, the map $\textsf{M}$ is a translation. (See Figure \ref{fig:4.2}.) We first show that this situation occurs.
\begin{lem}
\lambdabel{lem:equilateral}
If the equilateral triangle $ABC$ has sides of length $2$, then there is a point $P$ with $AP \cdotot BC=D$ and $d(D_0,D)=\sqrt{2}$, such that $D_3$ is the midpoint of the segment $AP'$ and $H=A$.
\end{lem}
\begin{figure}
\caption{Proof of Lemma 4.3.}
\end{figure}
\begin{proof}
(See Figure \ref{fig:4.1}.) We will construct $P'$ such that $D_3$ is the midpoint of $AP'$ and $H=A$, and then show that $P$ satisfies the hypothesis of the lemma. The midpoint $D_0$ of $BC$ satisfies $D_0B = D_0C = 1$ and $AD_0 = \sqrt3$. Let the triangle be positioned as in Figure 3. Let $\tilde A$ be the reflection of $A$ in $D_0$, and let $D$ be a point on $BC$ to the right of $C$ such that $D_0D = \sqrt{2}$. In order to insure that the reflection $D_3$ of $D$ in $D_0$ is the midpoint of $AP'$, take $P'$ on $l=K^{-2}(BC)$ with $P'\tilde A = 2\sqrt2$ and $P'$ to the left of $\tilde A$. Then $Q = K(P')$ is on $K^{-1}(BC)$, to the right of $A$, and $AQ = \sqrt{2}$. Let $E_3$ and $F_3$ be the traces of $P'$ on $AC$ and $BC$, respectively. \setminusallskip
We claim $BF_3 = \sqrt{2}$. Let $M$ be the intersection of $BC$ and the line through $F_3$ parallel to $AD_0$. Then triangles $BMF_3$ and $BD_0A$ are similar, so $F_3M = \sqrt{3} \cdotot MB$. Let $N_1$ be the intersection of $BC$ and the line through $P'$ parallel to $AD_0$. Triangles $P'N_1C$ and $F_3MC$ are similar, so
\[\frac{F_3M}{MC} = \frac{P'N_1}{N_1C} = \frac{AD_0}{P'\tilde A + 1} = \frac{\sqrt{3}}{2\sqrt{2} + 1}.\]
Therefore,
\[\frac{\sqrt{3}}{2\sqrt{2} + 1} = \frac{F_3M}{MC} = \frac{\sqrt{3} \cdotot MB}{MB + 2}\]
which yields that $MB = 1/\sqrt{2}$. Then $BF_3=\sqrt{2}$ is clear from similar triangles. \setminusallskip
Now, let $F$ be the reflection of $F_3$ in $F_0$ (the midpoint of $AB$). Then $AQF$ is an equilateral triangle because $m(\angle FAQ)=60^\circ$ and $AQ \cong BF_3 \cong AF$, so $\angle AQF \cong \angle AFQ$. Therefore, $QF \parallel AC$. It follows that the line through $F_0$ parallel to $QF$ is parallel to $AC$, hence is a midline of triangle $ABC$ and goes through $D_0$. (A similar proof shows that $QE \parallel AB$ so the line through $E_0$ parallel to $QE$ goes through $D_0$.) This implies $O = D_0$. Clearly, $P=AD \cdotot CF$ is a point outside the triangle $ABC$, not lying on an extended side of $ABC$ or its anticomplementary triangle, which satisfies the conditions of the lemma.
\end{proof}
The next proposition deals with the general case, and shows that the point $P$ we constructed in the lemma lies on a line through the centroid $G$ parallel to $BC$.
\begin{prop}
\lambdabel{prop:HA}
Assume that $H=A, O=D_0$, and $D_3$ is the midpoint of $AP'$. Then the conic $\tilde{\mathcal{C}}_O = \iota(l)$, where $l=K^{-1}(AQ)=K^{-2}(BC)$ is the line through the reflection $\tilde{A}$ of $A$ in $O$ parallel to the side $BC$. The points $O, O', P, P'$ are collinear, with $d(O,P')=3d(O,P)$, and the map $\textsf{M}$ taking $\tilde{\mathcal{C}}_O$ to the inconic $\mathcal{I}$ is a translation. In this situation, the point $P$ is one of the two points in the intersection $l_G \cap \tilde{\mathcal{C}}_O$, where $l_G$ is the line through the centroid $G$ which is parallel to $BC$.
\end{prop}
\begin{proof}
(See Figure \ref{fig:4.2}.) Since the midpoint $R_1'$ of segment $AP'$ is $D_3$, lying on $BC$, $P'$ lies on the line $l$ which is the reflection of $K^{-1}(BC)$ (lying on $A$) in the line $BC$. It is easy to see that this line is $l=K^{-2}(BC)$, and hence $Q=K(P')$ lies on $K^{-1}(BC)$. From I, Corollary 2.6 we know that the points $D_0, R_1'=D_3$, and $K(Q)$ are collinear. Since $K(Q)$ is the center of the conic $\mathcal{N}pp$, lying on $D_0$ and $D_3$, $K(Q)$ is the midpoint of segment $D_0D_3$ on $BC$. Applying the map $T_{P'}^{-1}$ gives that $O=T_{P'}^{-1}(K(Q))$ is the midpoint of $T_{P'}^{-1}(D_3D_0)=AT_{P'}^{-1}(D_0)$. It follows that $T_{P'}^{-1}(D_0)=\tilde{A}$ is the reflection of $A$ in $O$, so that $\tilde{A} \in \tilde{\mathcal{C}}_O$. Moreover, $K(A)=O$, so $\tilde{A}=K^{-1}(A)$ lies on $l=K^{-1}(AQ) \parallel BC$. \setminusallskip
Next we show that $\tilde{\mathcal{C}}_O = \iota(l)$, where the image $\iota(l)$ of $l$ under the isotomic map is a circumconic of $ABC$ (see Lemma 3.4 in Part IV). It is easy to see that $\iota(\tilde{A})= \tilde{A}$, since $\tilde{A} \in AG$ and $AB\tilde{A}C$ is a parallelogram. Therefore, both conics $\tilde{\mathcal{C}}_O$ and $\iota(l)$ lie on the $4$ points $A,B,C, \tilde{A}$. To show they are the same conic, we show they are both tangent to the line $l$ at the point $\tilde{A}$. From Corollary \ref{cor:tangent} the tangent to $\tilde{\mathcal{C}}_O$ at $\tilde{A}=T_{P'}^{-1}(D_0)$ is parallel to $BC$, and must therefore be the line $l$. To show that $l$ is tangent to $\iota(l)$, let $L$ be a point on $l \cap \iota(l)$. Then $\iota(L) \in l \cap \iota(l)$. If $\iota(L) \neq L$, this would give three distinct points, $L, \iota(L)$, and $\tilde{A}$, lying on the intersection $l \cap \iota(l)$, which is impossible. Hence, $\iota(L)=L$, giving that $L$ lies on $AG$ and therefore $L=\tilde{A}$. Hence, $\tilde{A}$ is the only point on $l \cap \iota(l)$, and $l$ is the tangent line. This shows that $\tilde{\mathcal{C}}_O$ and $\iota(l)$ share $4$ points and the tangent line at $\tilde{A}$, proving that they are indeed the same conic. \setminusallskip
From this we conclude that $P=\iota(P')$ lies on $\tilde{\mathcal{C}}_O$. Hence, $P$ is the fourth point of intersection of the conics $\tilde{\mathcal{C}}_O$ and $\mathcal{C}p=ABCPQ$. From Theorem \ref{thm:tildeZ} we deduce that $P= \tilde{Z}=R_OK^{-1}(Z)$, which we showed in the proof of that theorem to be a point on the line $OP'$. Hence, $P, O, P'$ are collinear, and applying the affine reflection $\eta$ gives that $O'$ lies on the line $PP'$, as well. Now, $Z$ is the midpoint of $HP=AP$, since $\textsf{H}=K \circ \textsf{R}_O$ is a homothety with center $H=A$ and similarity factor $1/2$. Since $Z$ lies on $GV$, where $V=PQ \cdotot P'Q'$, it is clear that $P$ and $Q$ are on the opposite side of the line $GV$ from $P', Q'$, and $A$. The relation $K(\tilde{A})=A$ means that $\tilde{A}$ and also $O$ are on the opposite side of $GV$ from $A$ and $O'$. Also, $J=K^{-1}(Z)=\textsf{R}_O(\tilde{Z})=\textsf{R}_O(P)$ lies on the line $GV$ and on the conic $\tilde{\mathcal{C}}_O$. This implies that $O$ lies between $J$ and $P$, and applying $\eta$ shows that $O'$ lies between $J$ and $P'$. Hence, $OO'$ is a subsegment of $PP'$, whose midpoint is exactly $J=K^{-1}(Z)$, since this is the point on $GV$ collinear with $O$ and $O'$. Now the map $\eta$ preserves distances along lines parallel to $PP'$ (see Part II), so $JO' \cong JO \cong OP \cong O'P'$, implying that $OO'$ is half the length of $PP'$. Furthermore, segment $QQ'=K(PP')$ is parallel to $PP'$ and half as long. Hence, $OO' \cong QQ'$, which implies that $OQQ'O'$ is a parallelogram. Consequently, $OQ \parallel O'Q'$, and Theorem \ref{thm: FixM} shows that $\textsf{M}$ is a translation. Thus, the circumconic $\tilde{\mathcal{C}}_O$ and the inconic $\mathcal{I}$ are congruent in this situation. \setminusallskip
This argument implies the distance relation $d(O,P') =3d(O,P)$. \setminusallskip
The relation $O'Q' \parallel OQ$ implies, finally, that $T_P(O'Q') \parallel T_P(OQ)$, or $K(Q')P \parallel A_0Q = AQ$, since $O'=T_P^{-1}K(Q')$ from Theorem \ref{thm:HO} and $A_0$ is collinear with $A$ and the fixed point $Q$ of $T_P$ by I, Theorem 2.4. Hence $PG=PQ'=PK(Q')$ is parallel to $AQ$ and $BC$. \setminusallskip
\end{proof}
\begin{figure}
\caption{The case $H=A, O=D_0$.}
\end{figure}
There are many interesting relationships in the diagram of Figure \ref{fig:4.2}. We point out several of these relationships in the following corollary.
\begin{cor}
\lambdabel{cor:HArel} Assume the hypotheses of Proposition \ref{prop:HA}.
\begin{enumerate}[a)]
\item If $Q_a$ is the vertex of the anticevian triangle of $Q$ (with respect to $ABC$) opposite the point $A$, then the corresponding point $P_a$ is the second point of intersection of the line $PG$ with $\tilde{\mathcal{C}}_O$.
\item The point $A_3=T_P(D_3)$ is the midpoint of segment $OD$ and $P$ is the centroid of triangle $ODQ$.
\item The ratio $\frac{OD}{OC}=\sqrt{2}$.
\end{enumerate}
\end{cor}
\begin{proof}
The anticevian triangle of $Q$ with respect to $ABC$ is the triangle $T_{P'}^{-1}(ABC)=Q_aQ_bQ_c$. (See I, Cor. 3.11 and Section 2 above.) Since $D_3$ is the midpoint of $AP'$, this gives that $T_{P'}^{-1}(D_3)=A$ is the midpoint of $T_{P'}^{-1}(AP')=Q_aQ$. Therefore, $Q_a$ lies on the line $AQ=K^{-1}(BC)$, so $P_a'=K^{-1}(Q_a)$ lies on the line $l$ and is the reflection of $P'$ in the point $\tilde{A}$. Thus, the picture for the point $P_a$ is obtained from the picture for $P$ by performing an affine reflection about the line $AG=A\tilde{A}$ in the direction of the line $BC$. This shows that $P_a$ also lies on the line $PG \parallel BC$. The conic $\tilde{\mathcal{C}}_O$ only depends on $O$, so this reflection takes $\tilde{\mathcal{C}}_O$ to itself. This proves a). \setminusallskip
To prove b) we first show that $P$ lies on the line $Q\tilde{A}$. Note that the segment $K(P'\tilde{A})=AQ$ is half the length of $P'\tilde{A}$, so $P'\tilde{A} \cong Q_aQ$. Hence, $Q_aQ\tilde{A}P'$ is a parallelogram, so $Q\tilde{A} \cong Q_aP'$. Suppose that $Q\tilde{A}$ intersects line $PP'$ in a point $X$. From the fact that $K(Q)$ is the midpoint of $D_3D_0$ we know that $Q$ is the midpoint of $K^{-1}(D_3)A$. Also, $D_3Q'$ lies on the point $\lambdambda(A)=\lambdambda(H)=Q$, by II, Theorem 3.4(b) and Theorem \ref{thm:lambda} of this paper. It follows that $K^{-1}(D_3), P, P'$ are collinear and $K^{-1}(D_3)QX \sim P'\tilde{A}X$, with similarity ratio $1/2$, since $K^{-1}(D_3)Q$ has half the length of $P'\tilde{A}$. Hence $d(X, K^{-1}(D_3)) = \frac{1}{2} d(X, P')$. On the other hand, $d(O,P) = \frac{1}{3} d(O,P')$, whence it follows, since $O$ is halfway between $P'$ and $K^{-1}(D_3)$ on line $BC$, that $d(P, K^{-1}(D_3)) =\frac{1}{2} d(P, P')$. Therefore, $X=P$ and $P$ lies on $Q\tilde{A}$. \setminusallskip
Now, $\textsf{P}=AD_3OQ$ is a parallelogram, since $K(AP')=OQ$, so opposite sides in $AD_3OQ$ are parallel. Hence, $T_P(\textsf{P})=DA_3A_0Q$ is a parallelogram, whose side $A_3A_0$ lies on the line $EF$. Applying the dilatation $\textsf{H}=K\textsf{R}_O$ (with center $H=A$) to the collinear points $Q, P, \tilde{A}$ shows that $\textsf{H}(Q), Z$, and $O$ are collinear. On the other hand, $O=D_0, Z$, and $A_0$ are collinear by I, Corollary 2.6 (since $Z=R$ is the midpoint of $AP$), and $A_0$ lies on $AQ$ by I, Theorem 2.4. This implies that $A_0=\textsf{H}(Q)=AQ \cdotot OZ$ is the midpoint of segment $AQ$, and therefore $A_3$ is the midpoint of segment $OD$. Since $P$ lies on the line $PG$, $2/3$ of the way from the vertex $Q$ of $ODQ$ to the opposite side $OD$, and lies on the median $QA_3$, it must be the centroid of $ODQ$. This proves b). \setminusallskip
To prove c), we apply an affine map taking $ABC$ to an equilateral triangle. It is clear that such a map preserves all the relationships in Figure 4. Thus we may assume $ABC$ is an equilateral triangle whose sidelengths are $2$. By Lemma \ref{lem:equilateral} there is a point $P$ for which $AP \cdotot BC=D$ with $D_0D=\sqrt{2}, O=D_0$, and $D_3$ the midpoint of $AP'$. Now Proposition \ref{prop:HA} implies the result, since the equilateral diagram has to map back to one of the two possible diagrams (Figure 4) for the original triangle.
\end{proof}
By Proposition \ref{prop:HA} and Theorem \ref{thm:Pabc} we know that the conic $\overline{\mathcal{C}}_A$ lies on the points $P_1, P_2, P_3, P_4$, where $P_1$ and $P_2=(P_1)_a$ are the points in the intersection $\tilde{\mathcal{C}}_O \cap l_G$ described in Proposition \ref{prop:HA} and Corollary \ref{cor:HArel}, and $P_3=(P_1)_b, P_4=(P_1)_c$. (See Figure \ref{fig:locus}.) It can be shown that the equation of the conic $\overline{\mathcal{C}}_A$ in terms of the barycentric coordinates of the point $P=(x,y,z)$ is $xy+xz+yz=x^2$. Furthermore, the center of $\bar \mathcal{C}_A$ lies on the median $AG$, $6/7$-ths of the way from $A$ to $D_0$.
\noindent {\bf Remarks.}
\noindent 1. The polar of $A$ with respect to the conic $\bar \mathcal{C}_A$ is the line $l_G$ through $G$ parallel to $BC$. This because the quadrangle $BCE_0F_0$ is inscribed in $\bar \mathcal{C}_A$, so its diagonal triangle, whose vertices are $A, G$, and $BC \cdotot \l_\infty$, is self-polar. Thus, the polar of $A$ is the line $l_G$. \setminusallskip
\noindent 2. The two points $P$ in the intersection $\bar \mathcal{C}_A \cap l_G$ have tangents which go through $A$. This follows from the first remark, since these points lie on the polar $a=l_G$ of $A$ with respect to $\bar \mathcal{C}_A$. As a result, the points $D$ on $BC$, for which there is a point $P$ on $AD$ satisfying $H = A$, have the property that the ratio of unsigned lengths $DD_0/D_0C \le \sqrt 2$. This follows from the fact that $\bar \mathcal{C}_A$ is an ellipse: since it is an ellipse for the equilateral triangle, it must be an ellipse for any triangle. Then the maximal ratio $DD_0/D_0C$ occurs at the tangents to $\bar \mathcal{C}_A$ from $A$; and we showed above that for these two points $P$, $D = AP \cdotot BC$ satisfies $DD_0/D_0C = \sqrt 2$.
\end{section}
\noindent Dept. of Mathematics, Carney Hall\\
Boston College\\
140 Commonwealth Ave., Chestnut Hill, Massachusetts, 02467-3806\\
{\it e-mail}: [email protected]
\noindent Dept. of Mathematical Sciences\\
Indiana University - Purdue University at Indianapolis (IUPUI)\\
402 N. Blackford St., Indianapolis, Indiana, 46202\\
{\it e-mail}: [email protected]
\end{document} |
\begin{document}
\markboth{HyperKahler Contact Distributions}{H. Attarchi and F. Babaei}
\catchline{}{}{}{}{}
\title{HyperKahler Contact Distributions}
\author{Hassan Attarchi}
\address{Department of Mathematics, University of California, Riverside,\\
Riverside, CA 92521, USA\\
\email{[email protected]}
\http{profiles.ucr.edu/app/home/profile/hassana}}
\author{Fatemeh Babaei}
\address{Department of Mathematics and Computer Science,\\
Amirkabir University of Technology, IR\\
\email{E-mail\,$:$ [email protected]}}
\maketitle
\begin{abstract}
Let $(\varphi_\alpha,\xi_\alpha,g)$ for $\alpha=1,2$, and $3$ be a contact metric $3$-structure on the manifold $M^{4n+3}$. We show that the $3$-contact distribution of this structure admits a HyperKahler structure whenever $(M^{4n+3},\varphi_\alpha,\xi_\alpha,g)$ is a $3$-Sasakian manifold. In this case, we call it HyperKahler contact distribution. To analyze the curvature properties of this distribution, we define a special metric connection that is completely determined by the HyperKahler contact distribution. We prove that the $3$-Sasakian manifold is of constant $\varphi_{\alpha}$-sectional curvatures if and only if its HyperKahler contact distribution has constant holomorphic sectional curvatures.
\end{abstract}
\keywords{Sasakian $3$-structure; HyperKahler Contact Distribution; Holomorphic Sectional Curvature.}
\section{Introduction}
In 1960, Sasaki \cite{sas1} introduced a geometric structure related to an almost contact structure. This structure has known as Sasakian structure, and it has been studied extensively ever since as an odd-dimensional analogous of Kahler manifolds \cite{yano}.\par
Likewise to the concept of Quaternionic Kahler and HyperKahler manifolds in Quaternion spaces \cite{at,boyer1,boyer2,cap1,hi,ishi}, the contact $3$-structure were introduced on $4n+3$-dimensional manifolds. A Sasakian $3$-structure, that have a close relation to both HyperKahler and Quaternionic Kahler manifolds, first appeared in a paper by Kuo in \cite{kuo} and, independently, by Udriste in \cite{udri}. In 1970, more papers were published in the Japanese literature discussing Sasakian $3$-structure, see \cite{KT70,ty,ta}. Later, in 1973, Ishihara \cite{ishi1} had shown that if the distribution formed by the three Killing vector fields which define the Sasakian $3$-structure is regular then the space of leaves is a quaternionic Kahler manifold.\par
We assume that $(M,\varphi_{\alpha},\xi_{\alpha},g)$ for $\alpha=1,2$, and $3$ is a $3$-Sasakian manifold, and $\mathbf{H}$ denotes the transverse distribution to the Riemannian foliation generated by $\{\xi_1,\xi_2,\xi_3\}$ with respect to the metric $g$ in the tangent bundle $TM$. The purpose of this paper is to study the geometric properties of the distribution $\mathbf{H}$ such as curvature tensor, sectional curvature and Ricci tensor. We investigate the close relation of $\varphi_{\alpha}$-sectional curvatures of $M$ and holomorphic sectional curvatures of $\mathbf{H}$. We can refer to \cite{bejancu2} as an analogue work on Sasakian manifolds.\par
Aiming at our purpose, we organized this paper as follows. In section 2, we present some basic notations and definitions which are needed in the following sections. In section 3, the new linear connection $\bar{\nabla}$ is introduced in the terms of Levi-Civita connection. We show that $\bar{\nabla}$ is a metric connection and completely determined by $\mathbf{H}$. Moreover, we prove that the $\varphi_{\alpha}$ structures are parallel with respect to $\bar{\nabla}$ on $\mathbf{H}$. In section 4, we present the HyperKahler properties of the distribution $\mathbf{H}$ and call it the HyperKahler contact distribution. In this section, the curvature and Ricci tensor of HyperKahler contact distribution $\mathbf{H}$ is defined with respect to the metric connection $\bar{\nabla}$. Finally, in section 4, we prove some theorems that show the curvature properties of this distribution and its close geometric relation with the $3$-Sasakian manifold.
\section{Preliminaries and Notations}
Let $M$ be a $(2n+1)$-dimensional smooth manifold. Then, the structure $(\varphi, \eta, \xi)$ on $M$, consisting of $(1,1)$-tensor $\varphi$, non-vanishing vector field $\xi$ and 1-form $\eta$, is called an \emph{almost contact structure} if
$$\varphi^2=-I+\eta\otimes\xi, \ and\ \ \eta(\xi)=1.$$
This structure will be called a \emph{contact structure} if
$$\eta\wedge(d\eta)^n\neq0.$$
The manifold $M^{2n+1}$ with the (almost) contact structure $(\varphi, \eta, \xi)$ is called (almost) contact manifold, and it is denoted by $(M, \varphi, \eta, \xi)$ \cite{Blair}. It was proved that all almost contact manifolds admit a compatible Riemannian metric in the following sense
\begin{equation}\label{compat}
\eta(X)=g(\xi,X), \ \ g(\varphi X,\varphi Y)=g(X,Y)-\eta(X)\eta(Y),
\end{equation}
for all $X,Y\in\Gamma(TM)$. In case of contact metric manifolds, the fundamental 2-form $\Omega$ defined by
$$\Omega(X,Y)=g(X,\varphi Y),\ \ \ \ \forall X,Y\in\Gamma(TM)$$
coincides with $d\eta$.\par
Let $\nabla$ be the Levi-Civita connection with respect to the metric $g$ on the contact manifold $(M,\varphi,\eta,\xi,g)$. Then, the (almost) contact metric manifold $(M,\varphi,\eta,\xi,g)$ is a \emph{Sasakian manifold} if
$$(\nabla_X\varphi)Y=g(X,Y)\xi-\eta(Y)X.$$
If $(M,\varphi,\eta,\xi,g)$ is a Sasakian manifold then,
\begin{equation}\label{sas pro}
\begin{split}
\nabla_X\xi= & -\varphi X,\ \ \ \ \ R(X,Y)\xi=\eta(Y)X-\eta(X)Y,\\
S(X,\xi)= & 2n\eta(X),\ \ \ R(\xi,X)Y=g(X,Y)\xi-\eta(Y)X,
\end{split}
\end{equation}
where $R$ and $S$ are curvature and Ricci tensor, respectively, given by following formulas
\begin{equation}\label{cur1}
R(X,Y)Z:=\nabla_X\nabla_YZ-\nabla_Y\nabla_XZ-\nabla_{[X,Y]}Z,
\end{equation}
\begin{equation}\label{ric1}
S(X,Y):=\sum_{i=1}^{2n+1}R(E_i,X,E_i,Y)=\sum_{i=1}^{2n+1}g(R(E_i,X)Y,E_i),
\end{equation}
where $E_i$ are orthonormal local vector fields on $(M,g)$ \cite{oku,tan}.\par
Let $(M,g)$ be a smooth Riemannian manifold of dimension $4n+3$. The manifold $(M,g)$ is called a $3$-\emph{Sasakian manifold} when it is endowed with three Sasakian structures $(M,\varphi_{\alpha},\eta^{\alpha},\xi_{\alpha},g)$ for $\alpha=1,2,3$, satisfying the following relations
$$\begin{array}{l}
\hspace{.5cm}\varphi_{\theta}=\varphi_{\beta}\varphi_{\gamma}-\eta^{\gamma}\otimes\xi_{\beta}=-\varphi_{\gamma}\varphi_{\beta}
+\eta^{\beta}\otimes\xi_{\gamma},\cr
\xi_{\theta}=\varphi_{\beta}\xi_{\gamma}=-\varphi_{\gamma}\xi_{\beta}, \ \ \ \eta^{\theta}=\eta^{\beta}\circ\varphi_{\gamma}=-\eta^{\gamma}\circ\varphi_{\beta},
\end{array}$$
for all even permutations $(\beta,\gamma,\theta)$ of $(1,2,3)$ \cite{Blair}.\par
Let $\mathbf{\xi}$ be the distribution spanned by the three global (Reeb) vector fields $\{\xi_1,\xi_2,\xi_3\}$. By means of $\nabla_X\xi_{\alpha}=-\varphi_{\alpha}X$ for $\alpha=1,2,3$, one can prove the integrability of $\mathbf{\xi}$ as follows
\begin{equation}\label{bracket xi}
[\xi_{\alpha},\xi_{\beta}]=\nabla_{\xi_{\alpha}}\xi_{\beta}-\nabla_{\xi_{\beta}}\xi_{\alpha}=2\xi_{\gamma},
\end{equation}
for all even permutation $(\alpha,\beta,\gamma)$ of $(1,2,3)$. Therefore, $\mathbf{\xi}$ defines a $3$-dimensional foliation on $M$. Moreover, the equations $\nabla_{\xi_{\alpha}}g=0$ for $\alpha=1,2,3$ show that the foliation $\mathbf{\xi}$ is a Riemannian foliation. The transverse distribution of $\xi$ with respect to the metric $g$ is denoted by $\mathbf{H}$, where $\mathbf{H}=\cap_{\alpha=1}^3ker(\eta^{\alpha})$. The distribution $\mathbf{H}$ is a $4n$-dimensional distribution on $M$. Thus, we obtain the following decomposition of the tangent bundle $TM$:
$$TM=\mathbf{H}\oplus\mathbf{\xi}.$$
In a $3$-Sasakian manifold, the distribution $\mathbf{H}$ is never integrable and in the sequel we call it $3$-\emph{contact distribution}. Note that throughout this paper, Latin and Greek indices are used for the ranges $1,2,...,4n$ and $1,2,3$, respectively.
\section{$\mathbf{H}$-Connection of $3$-Contact Distribution}
Considering the foliation $\mathbf{\xi}$ on a $3$-Sasakian manifold $M$, we choose the following local coordinate system,
$$\forall \mathbf{x}\in M \ \ \ \ \mathbf{x}=(z^1,z^2,z^3,x^1,...,x^{4n}),$$
where $\xi_\alpha=\frac{\partial}{\partial z^\alpha}$. Then, one can construct the local basis $\{\frac{\delta}{\delta x^1},...,\frac{\delta}{\delta x^{4n}}\}$ of $\mathbf{H}$ orthogonal to $\mathbf{\xi}$ with respect to the metric $g$ where
$$\frac{\delta}{\delta x^i}=\frac{\partial}{\partial x^i}-\eta_i^{\alpha}\xi_{\alpha}, \ \ i=1,...,4n\ \ \alpha=1,2,3$$
and $\eta_i^{\alpha}=\eta^{\alpha}(\frac{\partial}{\partial x^i})$. Considering the local basis
\begin{equation}\label{basis1}
\{\xi_1,\xi_2,\xi_3,\frac{\delta}{\delta x^1},...,\frac{\delta}{\delta x^{4n}}\},
\end{equation}
the Riemannian metric $g$ will have the following presentation:
\begin{equation}\label{met.mat.}
g:=\left(
\begin{array}{cccc}
1 & 0 & 0 & 0\\
0 & 1 & 0 & 0\\
0 & 0 & 1 & 0\\
0 & 0 & 0 & [g_{ij}]\\
\end{array}
\right)
\end{equation}
where $g_{ij}=g(\frac{\delta}{\delta x^i},\frac{\delta}{\delta x^j})$. Moreover, the adapted local frame (\ref{basis1}) will satisfy the following properties:
\begin{equation}\label{bracket1}
\left\{\begin{array}{l}
dx^k([\frac{\delta}{\delta x^i},\xi_{\alpha}])=-2d^2x^k(\frac{\delta}{\delta x^i},\xi_{\alpha})+\frac{\delta}{\delta x^i}(dx^k(\xi_{\alpha}))-\xi_{\alpha}(dx^k(\frac{\delta}{\delta x^i}))=0\cr
\eta^{\beta}([\frac{\delta}{\delta x^i},\xi_{\alpha}])=-2d\eta^{\beta}(\frac{\delta}{\delta x^i},\xi_{\alpha})+\frac{\delta}{\delta x^i}(\eta^{\beta}(\xi_{\alpha}))-\xi_{\alpha}(\eta^{\beta}(\frac{\delta}{\delta x^i}))=0
\end{array}\right.
\end{equation}
\begin{equation}\label{bracket4}
\left\{\begin{array}{l}
dx^k([\frac{\delta}{\delta x^i},\frac{\delta}{\delta x^j}])=\cr
\hspace{1.7cm}-2d^2x^k(\frac{\delta}{\delta x^i},\frac{\delta}{\delta x^j})+\frac{\delta}{\delta x^i}(dx^k(\frac{\delta}{\delta x^j}))-\frac{\delta}{\delta x^j}(dx^k(\frac{\delta}{\delta x^i}))=0\cr
\eta^{\beta}([\frac{\delta}{\delta x^i},\frac{\delta}{\delta x^j}])=\cr
\hspace{1.7cm}-2d\eta^{\beta}(\frac{\delta}{\delta x^i},\frac{\delta}{\delta x^j})+\frac{\delta}{\delta x^i}(\eta^{\beta}(\frac{\delta}{\delta x^j}))-\frac{\delta}{\delta x^j}(\eta^{\beta}(\frac{\delta}{\delta x^i}))=-2\Omega_{ij}^{\beta}
\end{array}\right.
\end{equation}
Using (\ref{bracket1}) and (\ref{bracket4}), we obtain
\begin{equation}\label{bracket2}
[\frac{\delta}{\delta x^i},\xi_{\alpha}]=0,\ \ \ [\frac{\delta}{\delta x^i},\frac{\delta}{\delta x^j}]=-2\Omega_{ij}^{\alpha}\xi_{\alpha}.
\end{equation}
Let,
$$h_{\alpha\beta}(X)=\frac{1}{2}\left(\mathcal{L}_{\xi_{\alpha}}\varphi_{\beta}\right)(X),$$
for $\alpha, \beta=1,2,3$. Then
$$\begin{array}{l}
h_{11}(X)=h_{22}(X)=h_{33}(X)=0,\cr
h_{12}(X)=-h_{21}(X)=\varphi_3(X),\cr
h_{31}(X)=-h_{13}(X)=\varphi_2(X),\cr
h_{23}(X)=-h_{32}(X)=\varphi_1(X),
\end{array}$$
for all $X\in\Gamma TM$.
\begin{Theorem}\label{levi1}
In the adapted basis (\ref{basis1}), the Levi-Civita connection $\nabla$ with respect to the Riemannian metric $g$ of the $3$-Sasakian manifold $(M,\varphi_{\alpha},\eta^{\alpha},\xi_{\alpha},g)$ has the following components:
$$\left\{\begin{array}{l}
\nabla_{\frac{\delta}{\delta x^i}}\frac{\delta}{\delta x^j}=F_{ij}^k\frac{\delta}{\delta x^k}-\Omega_{ij}^{\alpha}\xi_{\alpha},\cr
\nabla_{\xi_{\alpha}}\frac{\delta}{\delta x^i}=\nabla_{\frac{\delta}{\delta x^i}}\xi_{\alpha}=\Omega_{ij}^{\alpha}g^{jk}\frac{\delta}{\delta x^k},\cr
\nabla_{\xi_1}\xi_2=-\nabla_{\xi_2}\xi_1=\xi_3,\cr
\nabla_{\xi_3}\xi_1=-\nabla_{\xi_1}\xi_3=\xi_2,\cr
\nabla_{\xi_2}\xi_3=-\nabla_{\xi_3}\xi_2=\xi_1,\cr
\nabla_{\xi_1}\xi_1=-\nabla_{\xi_2}\xi_2=\nabla_{\xi_3}\xi_3=0,
\end{array}\right.$$
where
\begin{equation}\label{gamma}
F_{ij}^k=\frac{g^{kh}}{2}\left\{\frac{\delta g_{ih}}{\delta x^j}+\frac{\delta g_{jh}}{\delta x^i}-\frac{\delta g_{ij}}{\delta x^h}\right\}.
\end{equation}
\end{Theorem}
\begin{cor}\label{bundle like}
\cite{boyer1,boyer2} The foliation $\mathbf{\xi}$ is totally geodesic, and the Riemannian metric $g$ is bundle-like with respect to this foliation.
\end{cor}
Consider the Levi-Civita connection $\nabla$ on the $3$-Sasakian manifold $(M,g)$. Then, we define the linear connection $\bar{\nabla}$ by
\begin{equation}\label{new conn}
\bar{\nabla}_XY=\nabla_XY-\eta^{\alpha}(X)\nabla_Y\xi_{\alpha}-\eta^{\alpha}(Y)\nabla_X\xi_{\alpha}+
\Omega^{\alpha}(X,Y)\xi_{\alpha}.
\end{equation}
Using (\ref{basis1}) and (\ref{new conn}), we obtain the following theorem:
\begin{Theorem}\label{new comp.}
The linear connection $\bar{\nabla}$ is completely determined by
$$\left\{\begin{array}{l}
\bar{\nabla}_{\frac{\delta}{\delta x^i}}\frac{\delta}{\delta x^j}=F_{ij}^k\frac{\delta}{\delta x^k},\cr
\bar{\nabla}_{\xi_{\alpha}}\frac{\delta}{\delta x^i}=\bar{\nabla}_{\frac{\delta}{\delta x^i}}\xi_{\alpha}=0,\cr
\bar{\nabla}_{\xi_{\alpha}}\xi_{\beta}=0.
\end{array}\right.$$
\end{Theorem}
From Theorem \ref{new comp.} and Eq. (\ref{gamma}), we obtain that $\bar{\nabla}$ is completely determined by Riemannian metric induced by the $g$ on the $3$-contact distribution $\mathbf{H}$. Moreover, it is easy to see that the $3$-contact distribution $\mathbf{H}$ and foliation $\mathbf{\xi}$ are parallel with respect to the linear connection $\bar{\nabla}$. For these reasons we call $\bar{\nabla}$ the $\mathbf{H}$-\emph{connection} on the $3$-Sasakian manifold $M$.\par
This is surprising and interesting that the connection $\bar{\nabla}$ presented in (\ref{new conn}) coincides with the connection $\tilde{\nabla}$ defined in \cite{cap}. In the following Lemma, we prove this equality
\begin{Lemma}
Let $(M,g)$ be a Riemannian manifold with a Sasakian $3$-structure. Then, the connection $\bar{\nabla}$ presented in (\ref{new conn}) coincides with the connection $\tilde{\nabla}$ defined in \cite{cap}.
\end{Lemma}
\begin{proof}
By the definition of $\tilde{\nabla}$ in \cite{cap} and using the local frame (\ref{basis1}), we obtain
\begin{equation*}
\begin{split}
\tilde{\nabla}_{\frac{\delta}{\delta x^i}}\frac{\delta}{\delta x^j}= & (\nabla_{\frac{\delta}{\delta x^i}}\frac{\delta}{\delta x^j})^h=F_{ij}^k\frac{\delta}{\delta x^k}=\bar{\nabla}_{\frac{\delta}{\delta x^i}}\frac{\delta}{\delta x^j},\\
\tilde{\nabla}_{\xi_{\alpha}}\frac{\delta}{\delta x^i}= & [\xi_{\alpha},\frac{\delta}{\delta x^i}]=0=\bar{\nabla}_{\xi_{\alpha}}\frac{\delta}{\delta x^i},\\
\tilde{\nabla}_{\frac{\delta}{\delta x^i}}\xi_{\alpha}=& \tilde{\nabla}_{\xi_{\beta}}\xi_{\alpha}=0=\bar{\nabla}_{\frac{\delta}{\delta x^i}}\xi_{\alpha}=\bar{\nabla}_{\xi_{\beta}}\xi_{\alpha},
\end{split}
\end{equation*}
where $(\nabla_{\frac{\delta}{\delta x^i}}\frac{\delta}{\delta x^j})^h$ denotes the restricted component of the Levi-Civita connection $\nabla$ on $3$-contact distribution $\mathbf{H}$.
\end{proof}
From which, one can find the metrizability of $\bar{\nabla}$ and some more information about its torsion and curvature in \cite{cap}. In the following, we present the local expression of the torsion and curvature of $\bar{\nabla}$ with respect to the frame (\ref{basis1}),
\begin{equation}\label{torsion}
\left\{\begin{array}{l}
T_{\bar{\nabla}}(\frac{\delta}{\delta x^i},\frac{\delta}{\delta x^j})=2\Omega_{ij}^{\alpha}\xi_{\alpha},\cr
T_{\bar{\nabla}}(\frac{\delta}{\delta x^i},\xi_{\alpha})=0,\cr
T_{\bar{\nabla}}(\xi_{\alpha},\xi_{\beta})=-T_{\bar{\nabla}}(\xi_{\beta},\xi_{\alpha})=-2\xi_{\gamma},
\end{array}\right.
\end{equation}
for all even permutations $(\alpha,\beta,\gamma)$ of $(1,2,3)$.
\begin{equation}\label{curvature}
\left\{\begin{array}{l}
\bar{R}(\frac{\delta}{\delta x^i},\frac{\delta}{\delta x^j})\frac{\delta}{\delta x^k}=\bar{R}_{\ ijk}^h\frac{\delta}{\delta x^h},\cr
\bar{R}(\frac{\delta}{\delta x^i},\xi_{\alpha})\frac{\delta}{\delta x^j}=-\xi_{\alpha}(F_{ij}^k)\frac{\delta}{\delta x^k},\cr
\bar{R}(\xi_{\alpha},\xi_{\beta})\frac{\delta}{\delta x^i}=0,\cr
\bar{R}(X,Y)\xi_{\alpha}=0,
\end{array}\right.
\end{equation}
where $X,Y\in\Gamma(TM)$ and
\begin{equation}\label{cur. com.}
\bar{R}_{\ ijk}^h=\frac{\delta F_{ij}^h}{\delta x^k}-\frac{\delta F_{ik}^h}{\delta x^j}+F_{ij}^tF_{tk}^h-F_{ik}^tF_{tj}^h.
\end{equation}
Moreover, the Lie brackets of vector fields on $M$ in terms of the $\mathbf{H}$-connection have the following expression
\begin{equation}\label{bracket3}
[X,Y]=\bar{\nabla}_XY-\bar{\nabla}_YX-2\Omega^{\alpha}(X,Y)\xi_{\alpha},
\end{equation}
for any $X,Y\in\Gamma TM$.
\begin{Theorem}\label{varphi}
Consider the linear connection $\bar{\nabla}$ given by (\ref{new conn}) on $3$-Sasakian manifold $(M,\varphi_{\alpha},\eta^{\alpha},\xi_{\alpha},g)$ for $\alpha=1,2,3$. Then, the following equation is satisfied
$$(\bar{\nabla}_X\varphi_{\alpha})Y=0,\ \ \ \ \ \ \ \forall\alpha=1,2,3$$
where $X,Y\in\Gamma\mathbf{H}$.
\end{Theorem}
\begin{proof} To complete the proof, we need to evaluate $(\bar{\nabla}_{\frac{\delta}{\delta x^i}}\varphi_{\alpha})\frac{\delta}{\delta x^j}$. Using (\ref{new conn}), we obtain
\begin{equation*}
\begin{split}
(\bar{\nabla}_{\frac{\delta}{\delta x^i}}\varphi_{\alpha})\frac{\delta}{\delta x^j}= & \bar{\nabla}_{\frac{\delta}{\delta x^i}}\varphi_{\alpha}(\frac{\delta}{\delta x^j})-\varphi_{\alpha}(\bar{\nabla}_{\frac{\delta}{\delta x^i}}\frac{\delta}{\delta x^j})\\
= & \nabla_{\frac{\delta}{\delta x^i}}\varphi_{\alpha}(\frac{\delta}{\delta x^j})+\Omega^{\beta}(\frac{\delta}{\delta x^i},\varphi_{\alpha}(\frac{\delta}{\delta x^j}))\xi_{\beta}\\
& -\varphi_{\alpha}(\nabla_{\frac{\delta}{\delta x^i}}\frac{\delta}{\delta x^j}+\Omega^{\beta}(\frac{\delta}{\delta x^i},\frac{\delta}{\delta x^j})\xi_{\beta})\\
= & (\nabla_{\frac{\delta}{\delta x^i}}\varphi_{\alpha})\frac{\delta}{\delta x^j}+\Omega^{\beta}(\frac{\delta}{\delta x^i},\varphi_{\alpha}(\frac{\delta}{\delta x^j}))\xi_{\beta}-\Omega^{\beta}(\frac{\delta}{\delta x^i},\frac{\delta}{\delta x^j})\varphi_{\alpha}(\xi_{\beta})\\
= & g(\frac{\delta}{\delta x^i},\frac{\delta}{\delta x^j})\xi_{\alpha}+g(\frac{\delta}{\delta x^i},\varphi_1\varphi_{\alpha}(\frac{\delta}{\delta x^j}))\xi_1-g(\frac{\delta}{\delta x^i},\varphi_1(\frac{\delta}{\delta x^j}))\varphi_{\alpha}(\xi_1)\\
& +g(\frac{\delta}{\delta x^i},\varphi_2\varphi_{\alpha}(\frac{\delta}{\delta x^j}))\xi_2-g(\frac{\delta}{\delta x^i},\varphi_2(\frac{\delta}{\delta x^j}))\varphi_{\alpha}(\xi_2)\\
& +g(\frac{\delta}{\delta x^i},\varphi_3\varphi_{\alpha}(\frac{\delta}{\delta x^j}))\xi_3-g(\frac{\delta}{\delta x^i},\varphi_3(\frac{\delta}{\delta x^j}))\varphi_{\alpha}(\xi_3)
\end{split}
\end{equation*}
It is easy to check the last equation is vanish for all $\alpha=1,2,3$. \end{proof}
\section{HyperKahler Contact Distribution and its Holomorphic Sectional Curvatures}
If we restrict metric $g$ and $\varphi_{\alpha}$ for $\alpha=1,2,3$ to the $3$-contact distribution $\mathbf{H}$, then $\mathbf{H}$ can be considered as an almost Hyper-Hermitian vector bundle. Moreover, $\varphi_{\alpha}$ for $\alpha=1,2,3$ are parallel with respect to the metric connection $\bar{\nabla}$ on $\mathbf{H}$. Therefore, $\mathbf{H}$ carries an analogue HyperKahler structure and we call it a \emph{HyperKahler contact distribution}. The close relation of HyperKahler and $3$-Sasakian manifolds suggests this name as well.\par
By Corollary \ref{bundle like}, we know that $g_{ij}$ are functions of $(x^i)$ for $i=1,...,4n$ (i.e. $\xi_{\alpha}(g_{ij})=0$ for all $\alpha=1,2,3$). Using this fact and $[\frac{\delta}{\delta x^i},\xi_{\alpha}]=0$, one can check that $\xi_{\alpha}(F_{ij}^k)=0$. Therefore, the equations (\ref{curvature}) and (\ref{cur. com.}) imply the followings
\begin{equation}\label{curvature1}
\left\{\begin{array}{l}
\bar{R}(\frac{\delta}{\delta x^i},\frac{\delta}{\delta x^j})\frac{\delta}{\delta x^k}=\bar{R}_{\ ijk}^h\frac{\delta}{\delta x^h},\cr
\bar{R}(\frac{\delta}{\delta x^i},\xi_{\alpha})\frac{\delta}{\delta x^j}=\bar{R}(\xi_{\alpha},\xi_{\beta})\frac{\delta}{\delta x^i}=\bar{R}(X,Y)\xi_{\alpha}=0,
\end{array}\right.
\end{equation}
where $X,Y\in\Gamma(TM)$ and
\begin{equation}\label{cur. com.1}
\bar{R}_{\ ijk}^h=\frac{\partial F_{ij}^h}{\partial x^k}-\frac{\partial F_{ik}^h}{\partial x^j}+F_{ij}^tF_{tk}^h-F_{ik}^tF_{tj}^h.
\end{equation}
The equations (\ref{curvature1}) and (\ref{cur. com.1}) show that $\bar{R}$ only depends on $\mathbf{H}$. Therefore, we define
\begin{equation}\label{cur H}
\bar{R}(X,Y,Z,W):=g(\bar{R}(X,Y)W,Z), \ \ \ \forall X,Y,Z,W\in\Gamma\mathbf{H},
\end{equation}
and we call it the \emph{curvature tensor field} of $(\mathbf{H},g|_{\mathbf{H}})$.
\begin{Lemma}\label{cur pro}
The curvature tensor field $\bar{R}$ of the HyperKahler contact distribution $\mathbf{H}$ satisfies the identities:
\begin{equation}\label{curvature2}
\left\{\begin{array}{l}
\bar{R}(X,Y,Z,U)=-\bar{R}(Y,X,Z,U)=-\bar{R}(X,Y,U,Z),\cr
\bar{R}(X,Y,U,Z)+\bar{R}(Y,Z,U,X)+\bar{R}(Z,X,U,Y)=0,\cr
\bar{R}(X,Y,Z,U)=\bar{R}(Z,U,X,Y).
\end{array}\right.
\end{equation}
for any $X,Y,Z,U\in\Gamma\mathbf{H}$.
\end{Lemma}
\begin{proof} The first equality is a general property of curvature tensor of any linear connection. The next equality of the first equation is a consequence of the fact that $\bar{\nabla}$ is a metric connection. To prove the second equality, we use (\ref{bracket3}) in a straightforward calculation as follows
$$\bar{R}(X,Y)Z+\bar{R}(Y,Z)X+\bar{R}(Z,X)Y=\bar{\nabla}_X\bar{\nabla}_YZ-\bar{\nabla}_Y\bar{\nabla}_XZ
-\bar{\nabla}_{[X,Y]}Z$$
$$+\bar{\nabla}_Y\bar{\nabla}_ZX-\bar{\nabla}_Z\bar{\nabla}_YX
-\bar{\nabla}_{[Y,Z]}X+\bar{\nabla}_Z\bar{\nabla}_XY-\bar{\nabla}_X\bar{\nabla}_ZY
-\bar{\nabla}_{[Z,X]}Y$$
$$=[X,[Y,Z]]+2(\Omega^{\alpha}(X,[Y,Z])+\Omega^{\alpha}(\bar{\nabla}_XY,Z)+\Omega^{\alpha}(Y,\bar{\nabla}_XZ))
\xi_{\alpha}$$
$$+[Y,[Z,X]]+2(\Omega^{\alpha}(Y,[Z,X])+\Omega^{\alpha}(\bar{\nabla}_YZ,X)+\Omega^{\alpha}(Z,\bar{\nabla}_YX))
\xi_{\alpha}$$
$$+[Z,[X,Y]]+2(\Omega^{\alpha}(Z,[X,Y])+\Omega^{\alpha}(\bar{\nabla}_ZX,Y)+\Omega^{\alpha}(X,\bar{\nabla}_ZY))
\xi_{\alpha}=0.$$
Then, by Lemma 3.1 in \cite{yano} on page 32 the last equality is obtained.
\end{proof}
To present $\bar{R}(X,Y)Z$ in term of $R(X,Y)Z$, for all $X,Y,Z\in\Gamma(TM)$, we compute the followings
\begin{equation}\label{cur com1}
\begin{split}
\bar{\nabla}_X\bar{\nabla}_YZ= & \nabla_X\nabla_YZ\\
& +\eta^{\alpha}(X)\varphi_{\alpha}(\nabla_YZ)+\eta^{\alpha}(Y)\varphi_{\alpha}(\nabla_XZ)+\eta^{\alpha}(Z)\varphi_{\alpha}(\nabla_XY)\\
& +\eta^{\alpha}(\nabla_YZ)\varphi_{\alpha}(X)+\eta^{\alpha}(\nabla_XZ)\varphi_{\alpha}(Y)+\eta^{\alpha}(\nabla_XY)\varphi_{\alpha}(Z)\\
& +\Omega^{\alpha}(X,\nabla_YZ)\xi_{\alpha}+\Omega^{\alpha}(Y,\nabla_XZ)\xi_{\alpha}+\Omega^{\alpha}(\nabla_XY,Z)\xi_{\alpha}\\
& -\left(\Omega^{\alpha}(Y,X)\varphi_{\alpha}(Z)
+\Omega^{\alpha}(Z,X)\varphi_{\alpha}(Y)\right)\\
& +\eta^{\alpha}(Y)\eta^{\beta}(X)\varphi_{\beta}\varphi_{\alpha}(Z)+\eta^{\alpha}(Z)\eta^{\beta}(X)\varphi_{\beta}\varphi_{\alpha}(Y)\\
& -2\sum_{\alpha=1}^3\eta^{\alpha}(Y)\eta^{\alpha}(Z)X+2\eta^{\alpha}(Y)g(X,Z)\xi_{\alpha}\\
& +\eta^{\alpha}(Y)\Omega^{\beta}(X,\varphi_{\alpha}(Z))\xi_{\beta}+\eta^{\alpha}(Z)\Omega^{\beta}(X,\varphi_{\alpha}(Y))\xi_{\beta}\\
& +\eta^{\alpha}(Y)\eta^{\beta}(\varphi_{\alpha}(Z))\varphi_{\beta}(X)+\eta^{\alpha}(Z)\eta^{\beta}(\varphi_{\alpha}(Y))\varphi_{\beta}(X),
\end{split}
\end{equation}
and
\begin{equation}\label{cur com2}
\begin{split}
\bar{\nabla}_{[X,Y]}Z= & \nabla_{[X,Y]}Z+\eta^{\alpha}([X,Y])\varphi_{\alpha}(Z)\\
& +\eta^{\alpha}(Z)\varphi_{\alpha}([X,Y])+\Omega^{\alpha}([X,Y],Z)
\xi_{\alpha}.
\end{split}
\end{equation}
Therefore,
\begin{equation}\label{cur1 2}
\begin{split}
\bar{R}(X,Y)Z= & R(X,Y)Z\\
& -2\Omega^{\alpha}(Y,X)\varphi_{\alpha}(Z)-\Omega^{\alpha}(Z,X)\varphi_{\alpha}(Y)
+\Omega^{\alpha}(Z,Y)\varphi_{\alpha}(X)\\
& +\sum_{\alpha=1}^3(\eta^{\alpha}(X)\eta^{\alpha}(Z)Y-\eta^{\alpha}(Y)\eta^{\alpha}(Z)X)\\
& -\eta^{\alpha}(Z)\eta^{\beta}(Y)\varphi_{\beta}\varphi_{\alpha}(X)+\eta^{\alpha}(Z)\eta^{\beta}(X)
\varphi_{\beta}\varphi_{\alpha}(Y)\\
& +2\eta^{\alpha}(Y)\eta^{\beta}(X)\varphi_{\beta}\varphi_{\alpha}(Z)+2\eta^{\alpha}(Z)\Omega^{\beta}
(X,\varphi_{\alpha}(Y))\xi_{\beta}\\
& -\eta^{\alpha}(X)\Omega^{\beta}(Y,\varphi_{\alpha}(Z))\xi_{\beta}+\eta^{\alpha}(Y)\Omega^{\beta}
(X,\varphi_{\alpha}(Z))\xi_{\beta}\\
& +\eta^{\alpha}(Y)\eta^{\beta}(\varphi_{\alpha}(Z))\varphi_{\beta}(X)+\eta^{\alpha}(Z)\eta^{\beta}
(\varphi_{\alpha}(Y))\varphi_{\beta}(X)\\
& -\eta^{\alpha}(X)\eta^{\beta}(\varphi_{\alpha}(Z))\varphi_{\beta}(Y)-\eta^{\alpha}(Z)\eta^{\beta}(\varphi_{\alpha}(X))\varphi_{\beta}(Y)\\
& +2\eta^{\alpha}(Y)g(X,Z)\xi_{\alpha}-2\eta^{\alpha}(X)g(Y,Z)\xi_{\alpha},
\end{split}
\end{equation}
where the Einstein notation is used for repeated indices $\alpha$ and $\beta$ on their range in case of $\alpha\neq\beta$.
\begin{cor}\label{X X1 X2 X3}
Let $M$ be a $3$-Sasakian manifold and $(\mathbf{H},g|_{\mathbf{H}})$ the HyperKahler contact distribution on $M$. Then the following holds for each vector field $X\in\Gamma\mathbf{H}$
$$\bar{R}(X,\varphi_1X,\varphi_2X,\varphi_3X)=R(X,\varphi_1X,\varphi_2X,\varphi_3X).$$
\end{cor}
The \emph{Ricci tensor} $\bar{S}$ of the HyperKahler contact distribution $\mathbf{H}$ is defined by
\begin{equation}\label{Ricci}
\bar{S}(X,Y)=\sum_{i=1}^{4n}\bar{R}(E_i,X,E_i,Y)+\sum_{\alpha=1}^3\bar{R}(\xi_{\alpha},X,\xi_{\alpha},Y)\ \ \ \forall X,Y\in\Gamma\mathbf{H},
\end{equation}
where $\{E_1,...,E_{4n}\}$ is an orthonormal local basis of vectors in $\Gamma\mathbf{H}$.
\begin{Lemma}\label{ric}
Let $M$ be a connected $3$-Sasakian manifold of dimension $4n+3$. Then, the Ricci tensor $\bar{S}$ of the HyperKahler contact distribution $\mathbf{H}$ satisfies
$$\bar{S}(X,Y)=(4n+5)g(X,Y) \ \ \ \forall X,Y\in\Gamma\mathbf{H}.$$
\end{Lemma}
\begin{proof}
In \cite{kashi}, it was proved that $3$-Sasakian manifolds are Einstein spaces and their Ricci tensor fields with respect to the Levi-Civita connection are given by
$$S(X,Y)=(4n+2)g(X,Y)\ \ \ \forall X,Y\in\Gamma TM,$$
where $dim(M)=4n+3$. If $\{E_1,...,E_{4n}\}$ is an orthonormal local basis of $\mathbf{H}$ then $\{E_1,...,E_{4n},E_{4n+1}:=\xi_1,E_{4n+2}:=\xi_2,E_{4n+3}:=\xi_3\}$ will be an unitary orthogonal local basis of $TM$, and vice versa.
Using (\ref{sas pro}), (\ref{cur1 2}) and (\ref{Ricci}), for all $X,Y\in\Gamma\mathbf{H}$, we obtain
\begin{equation*}
\begin{split}
\bar{S}(X,Y)= & \sum_{i=1}^{4n+3}g(\bar{R}(E_i,X)Y,E_i)\\
= & \sum_{i=1}^{4n+3}g(R(E_i,X)Y,E_i)-3\sum_{\alpha=1}^3\sum_{i=1}^{4n}g(X,\varphi_{\alpha}E_i)g(\varphi_{\alpha}Y,E_i)\\
& -2\sum_{\beta=1}^3\eta^{\alpha}(\xi_{\beta})g(X,Y)g(\xi_{\alpha},\xi_{\beta})\\
= & S(X,Y)+9g(X,Y)-6g(X,Y)=(4n+5)g(X,Y).
\end{split}
\end{equation*}
\end{proof}
The sectional curvature $K$ of Levi-Civita connection $\nabla$ for the plane $\Pi$ spanned by $\{X,Y\}$ at a point is given by
$$K(\Pi)=K(X,Y)=-\frac{R(X,Y,X,Y)}{g(X,X)g(Y,Y)-g^2(X,Y)}.$$
It is easy to check that $K(\Pi)$ is independent of choosing the vector fields $X$ and $Y$ spanned $\Pi$.\par
The \emph{holomorphic sectional} curvatures of the HyperKahler contact distribution $\mathbf{H}$ are defined by
$$\bar{H}_{\alpha}(X)=\bar{R}(X,\varphi_{\alpha}X,X,\varphi_{\alpha}X) \ \ \ \forall \alpha=1,2,3$$
where $X\in\Gamma\mathbf{H}$ has unit length at any point with respect to the metric $g$.
\begin{Definition}
Let $M$ be a $3$-Sasakian manifold. The plane $\Pi$ is called a $\varphi_{\alpha}$-plane, whenever for any $X\in\Pi$, the sections $X$ and $\varphi_{\alpha}X$ span $\Pi$.
\end{Definition}
\begin{Theorem}\label{sec rela}
Let $M$ be a $3$-Sasakian manifold with HyperKahler contact distribution $\mathbf{H}$ and $\alpha$ be a number in $\{1,2,3\}$. Then the $\varphi_{\alpha}$-holomorphic sectional curvatures of $\mathbf{H}$ is equal to $k$ (i.e. $\bar{H}_{\alpha}(X)=k$) if and only if its respective $\varphi_{\alpha}$-sectional curvature of $M$ for the $\varphi_{\alpha}$-plane $\{X,\varphi_{\alpha}X\}$, where $X\in\Gamma\mathbf{\mathbf{H}}$, is equal to $k-3$.
\end{Theorem}
\begin{proof} By using (\ref{cur1 2}) and a straightforward calculation, the result is obtained.\end{proof}
\begin{cor}\label{bianchi}
Let $M$ be a $3$-Sasakian manifold. Then the holomorphic sectional curvatures $\bar{H}_{\alpha}$ satisfy the following equality
\begin{equation}\label{sum H}
\bar{H}_1(X)+\bar{H}_2(X)+\bar{H}_3(X)=12,
\end{equation}
for all unitary vector field $X\in\mathbf{H}$.
\end{cor}
\begin{proof} In \cite{tan3}, it was proved that
\begin{equation}\label{tanno}
\sum_{\alpha=1}^3H_{\alpha}(X)=3,
\end{equation}
where $X$ is a unitary vector field tangent to $\mathbf{H}$ and $H_{\alpha}$ are holomorphic sectional curvatures of Levi-Civita connection on $M$ with respect to $\varphi_{\alpha}$ for $\alpha=1,2,3$. Therefore, equation (\ref{sum H}) is a direct consequence of Theorem \ref{sec rela} and (\ref{tanno}).\end{proof}
\begin{cor}\label{cons2}
Let $M$ be a $3$-Sasakian manifold. If two holomorphic sectional curvatures of $\mathbf{H}$ are constant then the third one will be constant.
\end{cor}
\begin{Theorem}\label{sec}
Let $\Pi$ be the $\varphi_{\alpha}$-plane at a point of a $3$-Sasakian manifold $M$. Then, the sectional curvatures of $\Pi$ with respect to the $\nabla$ and $\bar{\nabla}$ are related as follows
\begin{equation*}
\begin{split}
\bar{K}(\Pi)= & K(\Pi)+3+4\left(\eta^{\beta}(X)\eta^{\gamma}(X)\right)^2\\
& +6\left((\eta^{\beta}(X))^4+(\eta^{\gamma}(X))^4\right)
-8\left((\eta^{\beta}(X))^2+(\eta^{\gamma}(X))^2\right),
\end{split}
\end{equation*}
where $X$ is a unit vector in the $\varphi_{\alpha}$-plane $\Pi$ and $(\alpha, \beta, \gamma)$ is an arbitrary permutation of $(1,2,3)$.
\end{Theorem}
\begin{proof} Without losing the generality, we prove these theorem for $\alpha=1$. Consider the $\alpha_1$-plane $\Pi$ and unit vector $X\in\Pi$, then by using (\ref{cur1 2}), we obtain
\begin{equation*}
\begin{split}
\bar{K}(\Pi)= & \bar{R}(X,\varphi_1X,X,\varphi_1X)\\
= & R(X,\varphi_1X,X,\varphi_1X)-3\Omega^{\beta}(\varphi_1X,X)g(\varphi_{\beta}\varphi_1X,X)\\
& +\Omega^{\beta}(\varphi_1X,\varphi_1X)
g(\varphi_{\beta}X,X)\\
& +\sum_{\beta=1}^3\left(\eta^{\beta}(X)\eta^{\beta}(\varphi_1X)g(\varphi_1X,X)-\eta^{\beta}(\varphi_1X)
\eta^{\beta}(\varphi_1X)g(X,X)\right)\\
& -\eta^{\beta}(\varphi_1X)\eta^{\gamma}(\varphi_1X)g(\varphi_{\gamma}\varphi_{\beta}X,X)+3\eta^{\beta}(\varphi_1X)
\eta^{\gamma}(X)g(\varphi_{\gamma}\varphi_{\beta}\varphi_1X,X)\\
& +2\eta^{\beta}(\varphi_1X)\eta^{\gamma}(\varphi_{\beta}\varphi_1X)g(\varphi_{\gamma}X,X)-\eta^{\beta}(X)
\eta^{\gamma}(\varphi_{\beta}\varphi_1X)g(\varphi_{\gamma}\varphi_1X,X)\\
& -\eta^{\beta}(\varphi_1X)\eta^{\gamma}(\varphi_{\beta}X)g(\varphi_{\gamma}\varphi_1X,X)+\eta^{\beta}(\varphi_1X)
g(X,\varphi_1X)g(\xi_{\beta},X)\\
& -\eta^{\beta}(X)g(\varphi_1X,\varphi_1X)g(\xi_{\beta},X)+3\eta^{\beta}(\varphi_1X)\Omega^{\gamma}
(X,\varphi_{\beta}\varphi_1X)g(\xi_{\gamma},X)\\
& -\eta^{\beta}(X)\Omega^{\gamma}(\varphi_1X,\varphi_{\beta}\varphi_1X)g(\xi_{\gamma},X)\\
= & K(\Pi)+3+4\left(\eta^{\beta}(X)\eta^{\gamma}(X)\right)^2\\
& +6\left((\eta^{\beta}(X))^4+(\eta^{\gamma}(X))^4\right)-8\left((\eta^{\beta}(X))^2+(\eta^{\gamma}(X))^2\right).
\end{split}
\end{equation*}
\end{proof}
\end{document} |
\begin{document}
\title[Combinatorial properties]{Combinatorial properties of ultrametrics and generalized ultrametrics}
\author{Oleksiy Dovgoshey}
\newcommand{\newline\indent}{\newline\indent}
\address{\textbf{O. Dovgoshey}\newline\indent
Function theory department\newline\indent
Institute of Applied Mathematics and Mechanics of NASU\newline\indent
Dobrovolskogo str. 1, Slovyansk 84100, Ukraine}
\email{[email protected]}
\subjclass[2010]{Primary 54E35, Secondary 06A05, 06A06}
\keywords{ultrametric, generalized ultrametric, equivalence relation, poset, totally ordered set, isotone mapping.}
\begin{abstract}
Let \(X\), \(Y\) be sets and let \(\Phi\), \(\Psi\) be mappings with domains \(X^{2}\) and \(Y^{2}\) respectively. We say that \(\Phi\) and \(\Psi\) are \emph{combinatorially similar} if there are bijections \(f \colon \Phi(X^2) \to \Psi(Y^{2})\) and \(g \colon Y \to X\) such that \(\Psi(x, y) = f(\Phi(g(x), g(y)))\) for all \(x\), \(y \in Y\). Conditions under which a given mapping is combinatorially similar to an ultrametric or a pseudoultrametric are found. Combinatorial characterizations are also obtained for poset-valued ultrametric distances recently defined by Priess-Crampe and Ribenboim.
\end{abstract}
\maketitle
\section{Introduction}
Recall some definitions from the theory of metric spaces. Let \(X\) be a set, let \(X^{2}\) be the Cartesian square of \(X\),
\[
X^{2} = X \times X = \{\<x, y> \colon x, y \in X\},
\]
and let \(\mathbb{R}^{+} = [0, \infty)\).
\begin{definition}\label{d1.1}
A \textit{metric} on \(X\) is a function \(d\colon X^{2} \to \mathbb{R}^{+}\) such that for all \(x\), \(y\), \(z \in X\):
\begin{enumerate}
\item \(d(x,y) = 0\) if and only if \(x=y\), the \emph{positive property};
\item \(d(x,y)=d(y,x)\), the \emph{symmetric property};
\item \(d(x, y)\leq d(x, z) + d(z, y)\), the \emph{triangle inequality}.
\end{enumerate}
A metric \(d\colon X^{2} \to \mathbb{R}^{+}\) is an \emph{ultrametric} on \(X\) if
\begin{enumerate}
\item [\((iv)\)] \(d(x,y) \leq \max \{d(x,z),d(z,y)\}\)
\end{enumerate}
holds for all \(x\), \(y\), \(z \in X\).
\end{definition}
Inequality \((iv)\) is often called the {\it strong triangle inequality}.
The theory of ultrametric spaces is closely connected with various investigations in mathematics, physics, linguistics, psychology and computer science. Different properties of ultrametrics have been studied in~\cite{DM2009, DD2010, DP2013SM, Groot1956, Lemin1984FAA, Lemin1984RMS39:5, Lemin1984RMS39:1, Lemin1985SMD32:3, Lemin1988, Lemin2003, QD2009, QD2014, BS2017, DM2008, DLPS2008, KS2012, Vaughan1999, Vestfrid1994, Ibragimov2012, GomoryHu(1961), Carlsson2010, DLW, Fie, GurVyal(2012), GV, Hol, H04, BH2, Lemin2003, Bestvina2002, DDP(P-adic), DP2019, DPT(Howrigid),PD(UMB), P2018(p-Adic),DP2018, DPT2015, CO2017TaAoC, Wei2017TaAoC, Ber2019SMJ}.
An useful generalization of the concept of ultrametric is the concept of pseudoultrametric and this is one of the main objects of our research below.
\begin{definition}\label{ch2:d2}
Let \(X\) be a set and let \(d \colon X^{2} \to \mathbb{R}^{+}\) be a symmetric function such that \(d(x, x) = 0\) holds for every \(x \in X\). The function \(d\) is a \emph{pseudoultrametric} (\emph{pseudometric}) on \(X\) if it satisfies the strong triangle inequality (triangle inequality).
\end{definition}
The strong triangle inequality also admits a natural generalization for poset-valued mappings.
Let \((\Gamma, \leqslant)\) be a partially ordered set with the smallest element \(\gamma_0\) and let \(X\) be a nonempty set.
\begin{definition}\label{d1.3}
A mapping \(d \colon X^{2} \to \Gamma\) is an \emph{ultrametric distance}, if the following conditions hold for all \(x\), \(y\), \(z \in X\) and \(\gamma \in \Gamma\).
\begin{enumerate}
\item [\((i)\)] \(d(x, y) = \gamma_0\) if and only if \(x = y\).
\item [\((ii)\)] \(d(x, y) = d(y, x)\).
\item [\((iii)\)] If \(d(x, y) \leqslant \gamma\) and \(d(y, z) \leqslant \gamma\), then \(d(x, z) \leqslant \gamma\).
\end{enumerate}
\end{definition}
The ultrametric distances were introduced by Priess-Crampe and Ribenboim \cite{PR1993AMSUH} and studied in~\cite{PR1996AMSUH, PR1997AMSUH, Rib2009JoA, Rib1996PMH}. This generalization of ultrametrics has some interesting applications to logic programming, computational logic and domain theory \cite{Kro2006TCS, PR2000JLP, SH1998IMSB}.
Let us recall now the definition of combinatorial similarity. In what follows we will denote by \(F(A)\) the range of a mapping \(F \colon A \to B\), \(F(A) = \{F(x) \colon x \in A\}\).
\begin{definition}[{\cite{Dov2019a}}]\label{d2.17}
Let \(X\), \(Y\) be nonempty sets and let \(\Phi\), \(\Psi\) be mappings with the domains \(X^{2}\) and \(Y^{2}\), respectively. The mapping \(\Phi\) is \emph{combinatorially similar} to \(\Psi\) if there are bijections \(f \colon \Phi(X^2) \to \Psi(Y^{2})\) and \(g \colon Y \to X\) such that
\begin{equation}\label{d2.17:e1}
\Psi(x, y) = f(\Phi(g(x), g(y)))
\end{equation}
holds for all \(x\), \(y \in Y\). In this case, we say that \(g \colon Y \to X\) is a \emph{combinatorial similarity} for the mappings \(\Psi\) and \(\Phi\).
\end{definition}
Equality~\eqref{d2.17:e1} means that the diagram
\begin{equation*}
\ctdiagram{
\ctv 0,50:{X^{2}}
\ctv 100,50:{Y^{2}}
\ctv 0,0:{\Phi(X^{2})}
\ctv 100,0:{\Psi(Y^{2})}
\ctet 100,50,0,50:{g\otimes g}
\ctet 0,0,100,0:{f}
\ctel 0,50,0,0:{\Phi}
\cter 100,50,100,0:{\Psi}
}
\end{equation*}
is commutative, where we understand the mapping \(g\otimes g\) as
\[
(g\otimes g)(\<y_1, y_2>) := \<g(y_1), g(y_2)>
\]
for \(\<y_1, y_2> \in Y^{2}\).
Some characterizations of mappings which are combinatorially similar to pseudometrics, strongly rigid pseudometrics and discrete pseudometrics were obtained in~\cite{Dov2019a}. The present paper deals with combinatorial properties of ultrametrics and generalized ultrametrics and this can be seen as a further development of research begun in~\cite{Dov2019a, Dov2019IEJA}.
The paper is organized as follows.
In Section~2 we introduce the notions of strongly consistent mappings and \(a_0\)-coherent mappings and show that these properties of mappings are invariant w.r.t. combinatorial similarities, Proposition~\ref{p2.4}. The main results of the section, Proposition~\ref{p2.7} and Theorem~\ref{p2.10}, describe \(a_0\)-coherent mappings in terms of binary relations defined on the domains of these mappings. An important special case of combinatorial similarities, the so-called weak similarities, are introduced in Definition~\ref{d2.9} at the end of the section.
In Section~3, starting from the characterization of mappings which are combinatorially similar to pseudometrics, we prove Theorem~\ref{t3.7}, a characterization of mappings which are combinatorially similar to pseudoultrametrics with at most countable range. The corresponding results for ultrametrics are given in Corollary~\ref{c3.8}. A basic for our goals subclass of Priess-Crampe and Ribemboim ultrametric distances, the \({\preccurlyeq}_Q\)-ultra\-metrics an related them \({\preccurlyeq}_Q\)-pseudo\-ultra\-metrics, are introduced in Definition~\ref{d3.11}. In Proposition~\ref{p3.16} we show that \({\preccurlyeq}_Q\)-pseudo\-ultra\-metrics are \(a_0\)-coherent. The main result of the section is Theorem~\ref{t3.15} which gives us the necessary and sufficient condition under which a given mapping is combinatorially similar to some \({\preccurlyeq}_Q\)-pseudo\-ultra\-metric. Proposition~\ref{p3.23} and Corollary~\ref{c3.24} expand on \({\preccurlyeq}_Q\)-pseudo\-ultra\-metrics the characterization of ultrametric-preserving functions obtained recently by Pongsriiam and Termwuttipong.
Section~4 mainly describes the interrelations between combinatorial and weak similarities of \({\preccurlyeq}_Q\)-pseudo\-ultra\-metrics. First of all, in Definition~\ref{d3.13}, we expand the notion of weak similarity from usual pseudo\-ultra\-metrics to \({\preccurlyeq}_Q\)-pseudo\-ultra\-metrics. Proposition~\ref{p3.16} claims that, for all \({\preccurlyeq}_Q\)-pseudo\-ultra\-metrics, every weak similarity is a combinatorial similarity (but not conversely in general). The orders \({\preccurlyeq}_Q\), for which the weak similarities and the combinatorial similarities are the same (for the corresponding \({\preccurlyeq}_Q\)-pseudo\-ultra\-metrics) are described in Theorem~\ref{t4.3}. In Proposition~\ref{c4.4}, for every totally ordered set \((Q, {\preccurlyeq}_Q)\) (which contains a smallest element) we construct a \({\preccurlyeq}_Q\)-ultra\-metric satisfying conditions of Theorem~\ref{t4.3}. Using this result in Proposition~\ref{p4.8} we found a metric \(d^{*}\), defined on a set \(X\) with \(|X| = 2^{\aleph_{0}}\), such that \(d^{*}\) is not combinatorially similar to any ultrametric but, for every countable \(X_1 \subseteq X\), the restriction \(d^{*}\) on \(X_1\) is combinatorially similar to an ultrametric.
The mappings which are combinatorially similar to \({\preccurlyeq}_Q\)-pseudo\-ultra\-metrics are described in Theorems~\ref{t4.11}, \ref{t4.15} and \ref{t4.19} for the case of totally ordered \((Q, {\preccurlyeq}_Q)\) satisfying the distinct universal and topological restrictions.
The final results of the paper, Theorem~\ref{t4.20} and Corollary~\ref{c4.22}, give a kind of necessary and sufficient conditions under which a given mapping is combinatorially similar to a pseudoultrametric or, respectively, to an ultrametric.
\section{Consistency with equivalence relations}
Let \(X\) be a set. A \emph{binary relation} on \(X\) is a subset of the Cartesian square \(X^{2}\). A relation \(R \subseteq X^{2}\) is an \emph{equivalence relation} on \(X\) if the following conditions hold for all \(x\), \(y\), \(z \in X\):
\begin{enumerate}
\item \(\<x, x> \in R\), the \emph{reflexive} property;
\item \((\<x, y> \in R) \Leftrightarrow (\<y, x> \in R)\), the \emph{symmetric} property;
\item \(((\<x, y> \in R) \text{ and } (\<y, z> \in R)) \Rightarrow (\<x, z> \in R)\), the \emph{transitive} property.
\end{enumerate}
Let \(R\) be an equivalence relation on \(X\). A mapping \(F \colon X^{2} \to X\) is \emph{consistent} with \(R\) if the implication
\begin{equation*}
\bigl(\<x_1, x_2> \in R \text{ and } \<x_3, x_4> \in R\bigr) \Rightarrow \bigl(\<F(x_1, x_3), F(x_2, x_4)> \in R\bigr)
\end{equation*}
is valid for all \(x_1\), \(x_2\), \(x_3\), \(x_4 \in X\) (see~\cite[p.~78]{KurMost}). Similarly, we will say that a mapping \(\Phi \colon X^{2} \to Y\) is \emph{strongly consistent} with \(R\) if the implication
\begin{equation}\label{e2.5}
\bigl(\<x_1, x_2> \in R \text{ and } \<x_3, x_4> \in R\bigr) \Rightarrow \bigl(\Phi(x_1, x_3) = \Phi(x_2, x_4)\bigr)
\end{equation}
is valid for all \(x_1\), \(x_2\), \(x_3\), \(x_4 \in X\).
\begin{remark}\label{r2.1}
Let \(R\) be an equivalence relation on a set \(X\). Then every strongly consistent with \(R\) mapping \(\Phi \colon X^{2} \to X\) is consistent with \(R\). The converse statement holds if and only if \(R\) is the diagonal of \(X\),
\[
R = \Delta_{X} = \{\<x, x> \colon x \in X\}.
\]
\end{remark}
\begin{definition}\label{d2.5}
Let \(X\) be a nonempty set, let \(\Phi\) be a mapping with \(\dom \Phi = X^{2}\) and let \(a_0 \in \Phi(X^{2})\). The mapping \(\Phi\) is \(a_0\)-\emph{coherent} if \(\Phi\) is strongly consistent with the fiber
\[
\Phi^{-1}(a_0) := \{\<x, y> \colon \Phi(x, y) = a_0\}.
\]
\end{definition}
\begin{remark}\label{r2.3}
In particular, if \(\Phi\) is \(a_0\)-coherent, then \(\Phi^{-1}(a_0)\) is an equivalence relation on \(X\).
\end{remark}
The following proposition claims that the properties to be strongly consistent and to be coherent are invariant w.r.t. combinatorial similarities.
\begin{proposition}\label{p2.4}
Let \(X\), \(Y\) be nonempty sets, let \(\Phi\), \(\Psi\) be combinatorially similar mappings with \(\dom \Phi = X^{2}\) and \(\dom \Psi = Y^{2}\) and the commutative diagram
\begin{equation*}
\ctdiagram{
\def25{25}
\ctv 0,25:{X^{2}}
\ctv 100,25:{Y^{2}}
\ctv 0,-25:{\Phi(X^{2})}
\ctv 100,-25:{\Psi(Y^{2})}
\ctet 100,25,0,25:{g\otimes g}
\ctet 0,-25,100,-25:{f}
\ctel 0,25,0,-25:{\Phi}
\cter 100,25,100,-25:{\Psi}
}.
\end{equation*}
If \(\Phi\) is strongly consistent with an equivalence relation \(R_X\) on \(X\), then \(\Psi\) is strongly consistent with an equivalence relation \(R_Y\) on \(Y\) satisfying
\[
(\<x,y> \in R_Y) \Leftrightarrow (\<g(x), g(y)> \in R_X)
\]
for every \(\<x, y> \in Y^{2}\). In addition, if \(\Phi\) is \(a_0\)-coherent for \(a_0 \in \Phi(X^{2})\), then \(\Psi\) is \(f(a_0)\)-coherent.
\end{proposition}
The proof is straightforward and we omit it here.
Let \(X\) be a set and let \(R_1\) and \(R_2\) be binary relations on \(X\). Recall that a composition of binary relations \(R_1\) and \(R_2\) is a binary relation \(R_1 \circ R_2 \subseteq X^{2}\) for which \(\<x, y> \in R_1 \circ R_2\) holds if and only if there is \(z \in X\) such that \(\<x, z> \in R_1\) and \(\<z, y> \in R_2\).
Using the notion of binary relations composition we can reformulate Definition~\ref{d2.5} as follows.
\begin{proposition}\label{p2.7}
Let \(X\) be a nonempty set, \(\Phi\) be a mappings with \(\dom \Phi = X^{2}\) and let \(a_0 \in \Phi(X^{2})\). Then \(\Phi\) is \(a_0\)-coherent if and only if the fiber \(R = \Phi^{-1}(a_0)\) is an equivalence relation on \(X\) and the equality
\begin{equation}\label{p2.7:e1}
\Phi^{-1}(b) = R \circ \Phi^{-1}(b) \circ R
\end{equation}
holds for every \(b \in \Phi(X^{2})\).
\end{proposition}
\begin{proof}
It suffices to show that \(\Phi\) is strongly consistent with \(R\) if and only if equality \eqref{p2.7:e1} holds for every \(b \in \Phi(X^{2})\). Let \(b \in \Phi(X^{2})\) and \eqref{p2.7:e1} hold. Suppose \(\<x_1, x_3> \in X^{2}\) such that
\[
\Phi(x_1, x_3) = b.
\]
If \(\<x_2, x_1> \in R\), \(\<x_1, x_3> \in \Phi^{-1}(b)\) and \(\<x_3, x_4> \in R\), then from the definition of the composition \(\circ\) we obtain
\[
\<x_2, x_4> \in R \circ \Phi^{-1}(b) \circ R
\]
that implies \(\<x_2, x_4> \in \Phi^{-1}(b)\) by equality~\eqref{p2.7:e1}. Thus, the implication~\eqref{e2.5} is valid.
Conversely, suppose that \(\Phi\) is strongly consistent with \(R\). Then~\eqref{e2.5} implies the inclusion
\begin{equation}\label{p2.7:e3}
R \circ \Phi^{-1}(b) \circ R \subseteq \Phi^{-1}(b)
\end{equation}
for every \(b \in \Phi(X^{2})\). Since \(R\) is reflexive, the converse inclusion is also valid. Equality~\eqref{p2.7:e1} follows.
\end{proof}
\begin{corollary}\label{c2.5}
Let \(X\) be a nonempty set, let \(\Phi\) be a symmetric mapping with \(\dom \Phi = X^{2}\) and let \(a_0 \in \Phi(X^{2})\). Suppose \(R := \Phi^{-1}(a_0)\) is an equivalence relation on \(X\). Then the following conditions are equivalent.
\begin{enumerate}
\item[\((i)\)] \(\Phi\) is \(a_0\)-coherent.
\item[\((ii)\)] \(\Phi^{-1}(b) = R \circ \Phi^{-1}(b) \circ R\) holds for every \(b \in \Phi(X^{2})\).
\item[\((iii)\)] \(\Phi^{-1}(b) = R \circ \Phi^{-1}(b)\) holds for every \(b \in \Phi(X^{2})\).
\item[\((iv)\)] \(\Phi^{-1}(b) = \Phi^{-1}(b) \circ R\) holds for every \(b \in \Phi(X^{2})\).
\item[\((v)\)] For every \(b \in \Phi(X^{2})\), at least one of the equalities
\[
\Phi^{-1}(b) = R \circ \Phi^{-1}(b), \quad \Phi^{-1}(b) = \Phi^{-1}(b) \circ R
\]
holds.
\end{enumerate}
\end{corollary}
\begin{proof}
In what follows, for every \(b \in \Phi(X^{2})\), we write \(R_b = \Phi^{-1}(b)\) and, for every \(A \subseteq X^{2}\), define the inverse binary relation \(A^{T}\) by the rule:
\begin{itemize}
\item the membership \(\<x, y> \in A^{T}\) holds if and only if \(\<y, x> \in A\).
\end{itemize}
Suppose \((v)\) is valid and we have
\begin{equation}\label{c2.5:e1}
R_b = R_b \circ R.
\end{equation}
It is trivial that a binary relation \(A\) is symmetric if and only if we have \(A^{T} = A\). Furthermore, the equality
\[
(C \circ B)^{T} = B^{T} \circ C^{T}
\]
holds for all binary relations \(B\) and \(C\) defined on the one and the same set (see, for example, \cite[p.~15]{How1976AP}). Consequently, from \eqref{c2.5:e1} it follows that
\begin{align*}
R_b &= (R_b)^{T} = (R_b \circ R)^{T} = R^{T} \circ R_b^{T} \\
& = R \circ R_b = R \circ (R_b \circ R) = R \circ R_b \circ R.
\end{align*}
Similarly, from \(R_b = R \circ R_b\) follows \(R_b = R \circ R_b \circ R\). Thus, the implication \((v) \Rightarrow (ii)\) is valid.
If \((ii)\) holds, then we have
\[
R_b = R \circ R_b \circ R
\]
for every \(b \in \Phi(X^{2})\). Since \(R\) is an equivalence relation, the equality \(R \circ R = R\) holds. Consequently,
\begin{align*}
R_b & = (R \circ R) \circ R_b \circ R = R \circ (R \circ R_b \circ R) = R \circ R_b.
\end{align*}
Thus, \((ii)\) implies \((iii)\). Analogously, \((ii)\) implies \((iv)\). The implications \((iii) \Rightarrow (v)\) and \((iv) \Rightarrow (v)\) are evidently valid. To complete the proof we recall that \((i)\) and \((ii)\) are equivalent by Proposition~\ref{p2.7}.
\end{proof}
Let \(X\) be a nonempty set and \(P = \{X_j \colon j \in J\}\) be a set of nonempty subsets of \(X\). Then \(P\) is a \emph{partition} of \(X\) with the blocks \(X_j\) if
\[
\bigcup_{j \in J} X_j = X
\]
and \(X_{j_1} \cap X_{j_2} = \varnothing\) holds for all distinct \(j_1\), \(j_2 \in J\).
There exists the well-known, one-to-one correspondence between the equivalence relations and partitions.
If \(R\) is an equivalence relation on \(X\), then an \emph{equivalence class} is a subset \([a]_R\) of \(X\) having the form
\begin{equation}\label{e1.1}
[a]_R = \{x \in X \colon \<x, a> \in R\}, a \in X.
\end{equation}
The \emph{quotient set} of \(X\) w.r.t. \(R\) is the set of all equivalence classes \([a]_R\), \(a \in X\).
\begin{proposition}\label{p2.5}
Let \(X\) be a nonempty set. If \(P = \{X_j \colon j \in J\}\) is a partition of \(X\) and \(R_P\) is a binary relation on \(X\) defined as
\begin{itemize}
\item[] \(\<x, y> \in R_P\) if and only if \(\exists j \in J\) such that \(x \in X_j\) and \(y \in X_j\),
\end{itemize}
then \(R_P\) is an equivalence relation on \(X\) with the equivalence classes \(X_j\). Conversely, if \(R\) is an equivalence relation on \(X\), then the set \(P_R\) of all distinct equivalence classes \([a]_R\) is a partition of \(X\) with the blocks \([a]_R\).
\end{proposition}
For the proof, see, for example, \cite[Chapter~II, \S{}~5]{KurMost}.
\begin{lemma}[{\cite[p.~9]{Kel1975S}}]\label{l2.6}
Let \(X\) be a nonempty set. If \(R\) is an equivalence relation on \(X\) and \(P_R = \{X_j \colon j \in J\}\) is the corresponding partition of \(X\), then the equality
\begin{equation*}
R = \bigcup_{j \in J} X_j^2
\end{equation*}
holds.
\end{lemma}
For every partition \(P = \{X_j \colon j \in J\}\) of a nonempty set \(X\) we define a partition \(P \otimes P^1\) of \(X^{2}\) by the rule:
\begin{itemize}
\item A subset \(B\) of \(X^{2}\) is a block of \(P \otimes P^1\) if and only if either
\[
B = \bigcup_{j \in J} X_{j}^{2}
\]
or there are \emph{distinct} \(j_1\), \(j_2 \in J\) such that
\[
B = X_{j_1} \times X_{j_2}.
\]
\end{itemize}
\begin{definition}\label{d2.8}
Let \(X\) be a nonempty set and let \(P_1\) and \(P_2\) be partitions of \(X\). The partition \(P_{1}\) is \emph{finer} than the partition \(P_{2}\) if the inclusion
\[
[x]_{R_{P_1}} \subseteq [x]_{R_{P_2}}
\]
holds for every \(x \in X\), where \(R_{P_1}\) and \(R_{P_2}\) are equivalence relations corresponding to \(P_1\) and \(P_2\) respectively.
\end{definition}
If \(P_1\) is finer than \(P_2\), then we say that \(P_{1}\) is a \emph{refinement} of \(P_{2}\).
The following proposition gives us a new characterization of \(a_0\)-coherent mappings.
\begin{theorem}\label{p2.10}
Let \(X\) be a nonempty set, \(\Phi\) be a mapping with \(\dom \Phi= X^{2}\) and let \(a_0 \in \Phi(X^{2})\). Then \(\Phi\) is \(a_0\)-coherent if and only if the fiber
\[
R := \Phi^{-1}(a_0)
\]
is an equivalence relation on \(X\) and the partition \(P_R \otimes P_R^1\) of \(X^{2}\) is a refinement of the partition \(P_{\Phi^{-1}} := \{\Phi^{-1}(b) \colon b \in \Phi(X^{2})\}\), where \(P_R\) is a partition of \(X\) whose blocks are the equivalence classes of \(R\).
\end{theorem}
\begin{proof}
Let \(\Phi\) be \(a_0\)-coherent. Then, by Definition~\ref{d2.5}, \(R\) is an equivalence relation on \(X\). We claim that \(P_R \otimes P_R^{1}\) is a refinement \(P_{\Phi^{-1}}\). It suffices to show that for every block \(B_0\) of \(P_R \otimes P_R^{1}\) there is \(b_0 \in \Phi(X^{2})\) such that
\begin{equation}\label{p2.10:e1}
B_0 \subseteq \Phi^{-1}(b_0).
\end{equation}
Suppose that
\begin{equation}\label{p2.10:e2}
B_0 = \bigcup_{j \in J} X_j^{2},
\end{equation}
where \(X_j\), \(j \in J\), are the blocks of the partition corresponding to the equivalence relation \(\Phi^{-1}(a_0)\) on \(X\). By Lemma~\ref{l2.6}, we have the equality
\[
\bigcup_{j \in J} X_j^{2} = \Phi^{-1}(a_0).
\]
The last equality and \eqref{p2.10:e2} imply \eqref{p2.10:e1} with \(b_0 = a_0\). If \(B_0\) is a block of \(P_R \otimes P_R^{1}\) but \eqref{p2.10:e2} does not hold, then there are two distinct \(j_1\), \(j_2 \in J\) such that
\begin{equation}\label{p2.10:e3}
B_0 = X_{j_1} \times X_{j_2}.
\end{equation}
Let \(x_1 \in X_{j_1}\) and \(x_2 \in X_{j_2}\) and let \(b_0 \in \Phi(X^{2})\) such that
\begin{equation}\label{p2.10:e4}
\<x_1, x_2> \in \Phi^{-1}(b_0).
\end{equation}
We must show that
\begin{equation}\label{p2.10:e5}
X_{j_1} \times X_{j_2} \subseteq \Phi^{-1}(b_0).
\end{equation}
It follows from Proposition~\ref{p2.7} and Lemma~\ref{l2.6} that
\begin{equation}\label{p2.10:e6}
\Phi^{-1}(b_0) = \left(\bigcup_{j \in J} X_j^{2}\right) \circ \Phi^{-1}(b_0) \circ \left(\bigcup_{j \in J} X_j^{2}\right)
\end{equation}
holds. Inclusion~\eqref{p2.10:e5} holds if, for every \(x \in X_{j_1}\) and \(y \in X_{j_2}\), we have
\[
\<x, y> \in \Phi^{-1}(b_0).
\]
Using \eqref{p2.10:e6} we obtain
\begin{equation}\label{p2.10:e7}
\Phi^{-1}(b_0) \supseteq X_{j_1}^{2} \circ \Phi^{-1}(b_0) \circ X_{j_2}^{2}.
\end{equation}
Since \(\<x, x_1> \in X_{j_1}^{2}\) and \(\<x_1, x_2> \in \Phi^{-1}(b_0)\) and \(\<x_2, y> \in X_{j_2}^{2}\), the definition of composition \(\circ\) and \eqref{p2.10:e7} imply \(\<x, y> \in \Phi^{-1}(b_0)\). Thus, \(P_R \otimes P_R^{1}\) is a refinement of \(P_{\Phi^{-1}}\) if \(\Phi\) is \(a_0\)-coherent.
Conversely, suppose that \(R = \Phi^{-1}(a_0)\) is an equivalence relation on \(X\) and \(P_R \otimes P_R^{1}\) is a finer than \(P_{\Phi^{-1}}\). By Proposition~\ref{p2.7}, the mapping \(\Phi\) is \(a_0\)-coherent if and only if the equality
\[
R \circ \Phi^{-1}(b) \circ R = \Phi^{-1}(b)
\]
holds for every \(b \in \Phi(X^{2})\). The reflexivity of \(R\) implies that
\[
R \circ \Phi^{-1}(b) \circ R \supseteq \Phi^{-1}(b).
\]
Consequently, to complete the proof it suffices to show that
\begin{equation}\label{p2.10:e8}
R \circ \Phi^{-1}(b) \circ R \subseteq \Phi^{-1}(b)
\end{equation}
holds for every \(b \in \Phi(X^{2})\). Inclusion~\eqref{p2.10:e8} holds if and only if
\begin{equation}\label{p2.10:e9}
R \circ \{\<x, y>\} \circ R \subseteq \Phi^{-1}(b)
\end{equation}
holds for every \(\<x, y> \in \Phi^{-1}(b)\), where \(\{\<x, y>\}\) is the one-point subset of \(X^{2}\) consisting the point \(\<x, y>\) only. A simple calculation shows that
\begin{equation}\label{p2.10:e10}
B = R \circ B \circ R
\end{equation}
holds for every block \(B\) of the partition \(P_R \otimes P_R^{1}\). Since \(P_R \otimes P_R^{1}\) is a refinement of \(P_{\Phi^{-1}}\), equality \eqref{p2.10:e10} implies \eqref{p2.10:e9} for \(\<x, y> \in B\).
\end{proof}
Let us consider now some examples.
\begin{proposition}\label{c2.2}
Let \(X\) be a nonempty set and let \(d \colon X^{2} \to \mathbb{R}^{+}\) be a pseudoultrametric on \(X\). Then \(d^{-1}(0)\) is an equivalence relation on \(X\) and \(d\) is \(0\)-coherent.
\end{proposition}
This proposition is a corollary of the corresponding result for pseudometrics \cite[Ch.~4, Th.~15]{Kel1975S}.
\begin{definition}\label{d2.9}
Let \((X_1, d_1)\) and \((X_2, d_2)\) be pseudoultrametric spaces. A bijection \(\Phi \colon X_1 \to X_2\) is a weak similarity if there is a strictly increasing bijective function \(f \colon d_1(X_{1}^{2}) \to d_2(X_{2}^{2})\) such that the equality
\begin{equation}\label{d2.9:e1}
d_1(x, y) = f(d_2(\Phi(x), \Phi(y)))
\end{equation}
holds for all \(x\), \(y \in X_1\).
\end{definition}
\begin{remark}\label{r2.10}
The weak similarities of semimetric spaces and ultrametric ones were studied in~\cite{DP2013AMH} and \cite{P2018(p-Adic)}. See also~\cite{KvL2014} and references therein for some results related to weak similarities of subsets of Euclidean finite-dimensional spaces.
\end{remark}
\begin{proposition}\label{p2.11}
Let \((X_1, d_1)\) and \((X_2, d_2)\) be pseudoultrametric spaces and \(\Phi \colon X_1 \to X_2\) be a weak similarity. Then \(\Phi\) is a combinatorial similarity for the pseudoultrametrics \(d_1\) and \(d_2\).
\end{proposition}
\begin{proof}
It follows directly from Definition~\ref{d2.9} and Definition~\ref{d2.17}.
\end{proof}
\section{Combinatorial similarity for generalized ultrametrics}
First of all, we recall a combinatorial characterization of arbitrary pseudometric.
\begin{theorem}[{\cite{Dov2019a}}]\label{ch2:p7}
Let \(X\) be a nonempty set. The following conditions are equivalent for every mapping \(\Phi\) with \(\dom\Phi = X^{2}\).
\begin{enumerate}
\item\label{ch2:p7:s1} \(\Phi\) is combinatorially similar to a pseudometric.
\item\label{ch2:p7:s2} \(\Phi\) is symmetric, and \(|\Phi(X^{2})| \leqslant 2^{\aleph_{0}}\), and there is \(a_0 \in \Phi(X^{2})\) such that \(\Phi\) is \(a_0\)-coherent.
\end{enumerate}
\end{theorem}
\begin{corollary}[{\cite{Dov2019a}}]\label{c3.2}
Let \(X\) be a nonempty set and let \(\Phi\) be a mapping with \(\dom\Phi = X^{2}\). Then \(\Phi\) is combinatorially similar to a metric if and only if \(\Phi\) is symmetric, and \(|\Phi(X^{2})| \leqslant 2^{\aleph_{0}}\), and there is \(a_0 \in \Phi(X^{2})\) such that \(\Phi^{-1}(a_0) = \Delta_{X}\), where \(\Delta_{X}\) is the diagonal of \(X\).
\end{corollary}
Consequently, if a mapping \(\Phi\), with \(\dom \Phi = X^{2}\), is combinatorially similar to a pseudoultrametric, then it satisfies condition \((ii)\) of Theorem~\ref{ch2:p7}.
Another necessary condition for combinatorial similarity of \(\Phi\) to a pseudoultrametric follows from the fact that
\begin{itemize}
\item all triangles are isosceles in every pseudoultrametric space.
\end{itemize}
This fact can be written in the form.
\begin{lemma}\label{l3.1}
Let \(X\) be a nonempty set and let \(\Phi\) be a mapping with \(\dom \Phi = X^{2}\). If \(\Phi\) is combinatorially similar to a pseudoultrametric, then,
\begin{enumerate}
\item[\((i)\)] for every triple \(\<x_1, x_2, x_3>\) of points of \(X\), there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that \(\Phi(x_{i_1}, x_{i_2}) = \Phi(x_{i_2}, x_{i_3})\).
\end{enumerate}
\end{lemma}
The following example shows that condition~\((i)\) is not sufficient for existence of a pseudoultrametric \(d\) which is combinatorially similar to \(\Phi\), even \(\Phi\) is a metric.
\begin{example}\label{ex3.4}
Let \(X = \{x_1, x_2, x_3, x_4\}\) and let \(\rho \colon X^{2} \to \mathbb{R}^{+}\) be a symmetric mapping defined as
\begin{equation}\label{ex3.4:e1}
\rho(x, y) = \begin{cases}
0 & \text{if } x = y,\\
\frac{\pi}{2} & \text{if } \{x, y\} = \{x_1, x_2\} \text{ or } \{x, y\} = \{x_2, x_3\},\\
\pi & \text{otherwise}.
\end{cases}
\end{equation}
It is easy to see that \(\rho\) is a metric on \(X\) such that every triangle is isosceles in \((X, \rho)\) (see Figure~\ref{fig1}). Suppose \(\rho\) is combinatorially similar to some pseudoultrametric \(d \colon Y^{2} \to \mathbb{R}^{+}\). Then, by Definition~\ref{d2.17}, there are bijections \(f \colon \rho(X^{2}) \to d(Y^{2})\) and \(g \colon Y \to X\) such that
\[
d(x, y) = f(\rho(g(x), g(y)))
\]
for all \(x\), \(y \in Y\). The last equality and \eqref{ex3.4:e1} imply
\[
d(g^{-1}(x_1), g^{-1}(x_2)) = d(g^{-1}(x_2), g^{-1}(x_3)) = f\left(\frac{\pi}{2}\right)
\]
and
\begin{equation*}
d(g^{-1}(x_1), g^{-1}(x_4)) = d(g^{-1}(x_4), g^{-1}(x_2)) = f\left(\pi\right).
\end{equation*}
Using these equalities and the strong triangle inequality (for the triples \(\<g^{-1}(x_1), g^{-1}(x_2), g^{-1}(x_3)>\) and \(\<g^{-1}(x_1), g^{-1}(x_4), g^{-1}(x_2)>\)) we obtain
\[
f\left(\frac{\pi}{2}\right) \geqslant f\left(\pi\right) \text{ and } f\left(\frac{\pi}{2}\right) \leqslant f\left(\pi\right).
\]
Thus, \(f\left(\frac{\pi}{2}\right) = f\left(\pi\right)\) holds, contrary to the bijectivity of \(f\).
\end{example}
\begin{figure}
\caption{The metric space \((X, \rho)\) is (up to isometry) a subspace of the metric space \(L\) consisting of the three rays \(\protect\overrightarrow{x_4x_1}
\label{fig1}
\end{figure}
We want to describe the mappings which are combinatorially similar to pseudoultrametrics. For this goal we recall some definitions.
Let \(\gamma\) be a binary relation on a set \(X\). We will write \(\gamma^{1} = \gamma\) and \(\gamma^{n+1} = \gamma^{n}\circ \gamma\) for every integer \(n \geqslant 1\). The \emph{transitive closure} \(\gamma^{t}\) of \(\gamma\) is the relation
\begin{equation}\label{e3.2}
\gamma^{t} := \bigcup_{n=1}^{\infty} \gamma^{n}.
\end{equation}
For every \(\beta \subseteq X^{2}\), the transitive closure \(\beta^{t}\) is transitive and the inclusion \(\beta \subseteq \beta^{t}\) holds. Moreover, if \(\tau \subseteq X^{2}\) is an arbitrary transitive binary relation for which \(\beta \subseteq \tau\), then we also have \(\beta^{t} \subseteq \tau\), i.e., \(\beta^{t}\) is the smallest transitive binary relation containing \(\beta\).
Recall that a reflexive and transitive binary relation \(\preccurlyeq_Y\) on a set \(Y\) is a \emph{partial order} on \(Y\) if, for all \(x\), \(y \in Y\), we have the \emph{antisymmetric property},
\[
\bigl(\<x, y> \in \preccurlyeq_Y \text{ and } \<y, x> \in \preccurlyeq_Y \bigr) \Rightarrow (x = y).
\]
In what follows we use the formula \(x \preccurlyeq y\) instead of \(\<x, y> \in \preccurlyeq\) and write \(x\prec y\) instead of
\[
x \preccurlyeq y \quad \text{and} \quad x \neq y.
\]
Let \(\preccurlyeq_Y\) be a partial order on a set \(Y\). A pair \((Y, \preccurlyeq_Y)\) is called to be a \emph{poset} (a partially ordered set). A poset \((Y, \preccurlyeq_Y)\) is \emph{linear} (= \emph{totally ordered}) if, for all \(y_1\), \(y_2 \in Y\), we have
\[
y_1 \preccurlyeq_Y y_2 \quad \text{or} \quad y_2 \preccurlyeq_Y y_1.
\]
\begin{definition}\label{d3.5}
Let \((Q, {\preccurlyeq}_Q)\) and \((L, {\preccurlyeq}_L)\) be posets. A mapping \(f \colon Q \to L\) is \emph{isotone} if, for all \(q_1\), \(q_2 \in Q\), we have
\[
(q_1 \preccurlyeq_Q q_2) \Rightarrow (f(q_1) \preccurlyeq_L f(q_2)).
\]
Let \(\Phi \colon X \to Y\) be an isotone mapping of posets \((X, {\preccurlyeq}_X)\) and \((Y, {\preccurlyeq}_Y)\). If \(\Phi\) is bijective and the inverse mapping \(\Phi^{-1} \colon Y \to X\) is also isotone, then we say that \((X, {\preccurlyeq}_X)\) and \((Y, {\preccurlyeq}_Y)\) are \emph{isomorphic} and \(\Phi\) is an (\emph{order}) \emph{isomorphism}.
\end{definition}
If \((Y, \preccurlyeq_Y)\) is a poset, and \(Y_1 \subseteq Y\), and \(\preccurlyeq_{Y_1}\) is a partial order on \(Y_1\) such that, for all \(x\), \(y \in Y_1\),
\[
(x \preccurlyeq_{Y_1} y) \Leftrightarrow (x \preccurlyeq_Y y),
\]
then we say that \((Y_1, \preccurlyeq_{Y_1})\) is a \emph{subposet} of the poset \((Y, \preccurlyeq_Y)\).
Write \(\mathbb{Q}^{+}\) for the set of all nonnegative rational numbers,
\[
\mathbb{Q}^{+} = \mathbb{Q} \cap [0, +\infty),
\]
and let \(\leqslant\) be the usual ordering on \(\mathbb{Q}^{+}\).
\begin{lemma}[Cantor]\label{l3.2}
Let \((X, \preccurlyeq_X)\) be a totally ordered set and let \(|X| \leqslant \aleph_{0}\) hold. Then \((X, \preccurlyeq_X)\) is isomorphic to a subposet of \((\mathbb{Q}^{+}, \leqslant)\).
\end{lemma}
The proof can be obtained directly from the classical Cantor's results (see, for example, \cite{Ros1982}, Chapter~2, Theorem~2.6 and Theorem~2.8).
We will also use the following Szpilrajn Theorem.
\begin{lemma}[Szpilrajn]\label{l3.3}
Let \((X, \preccurlyeq_X)\) be a poset. Then there is a linear order \(\preccurlyeq\) on \(X\) such that
\[
{\preccurlyeq_X} \subseteq {\preccurlyeq}.
\]
\end{lemma}
Informally speaking it means that each partial order on a set can be extended to a linear order on the same set.
\begin{remark}\label{r3.9}
This result was obtained by Edward Szpilrajn in~\cite{Szp1930FM}. Interesting reviews of Szpilrajn-type theorems can be found in~\cite{And2009} and \cite{BP1982}.
\end{remark}
Let \(X\) be a nonempty set and let \(\Phi\) be a symmetric mapping with \(\dom\Phi = X^{2}\) and let \(Y := \Phi(X^{2})\). Let us define a binary relation \(u_{\Phi}\) by the rule: \(\<y_1, y_2> \in u_{\Phi}\) if and only if \(\<y_1, y_2> \in Y^{2}\) and there are \(x_1\), \(x_2\), \(x_3 \in X\) such that
\begin{equation}\label{e2.16}
y_1 = \Phi(x_1, x_3) \text{ and } y_2 = \Phi(x_1, x_2) = \Phi(x_2, x_3).
\end{equation}
\begin{example}\label{ex3.20}
Let \((X, d)\) be a nonempty ultrametric space. Recall that a subset \(B\) of \(X\) is a (closed) ball if there are \(x^{*} \in X\) and \(r^{*} \in \mathbb{R}^{+}\) such that
\[
B = \{x \in X \colon d(x, x^{*}) \leqslant r^{*}\}.
\]
The diameter of \(B\), we denote it by \(\diam(B)\), is defined as
\[
\diam(B) := \sup\{d(x,y) \colon x, y \in B\}.
\]
The following statements are equivalent for every \(\<r_1, r_2> \in \mathbb{R}^{+} \times \mathbb{R}^{+}\).
\begin{itemize}
\item \(\<r_1, r_2> \in u_d\).
\item There are some balls \(B_1\) and \(B_2\) in \((X, d)\) such that \(B_1 \subseteq B_2\), and \(r_1 = \diam(B_1)\), and \(r_2 = \diam(B_2)\).
\item There are some balls \(B_1\) and \(B_2\) in \((X, d)\) such that \(B_1 \cap B_2 \neq \varnothing\), and \(r_1 = \diam(B_1)\), \(r_2 = \diam(B_2)\), and \(r_1 \leqslant r_2\).
\end{itemize}
The interchangeability of these conditions is easy to justify using the known properties of balls in ultrametric spaces (see, for example, Proposition~1.2 and Proposition~1.6 in~\cite{Dov2019PNUAA}).
\end{example}
\begin{theorem}\label{t3.7}
Let \(X\) be a nonempty set and let \(\Phi\) be a mapping with \(\dom \Phi = X^{2}\) and \(|\Phi(X^{2})| \leqslant \aleph_{0}\). Then the following conditions are equivalent.
\begin{enumerate}
\item[\((i)\)] \(\Phi\) is combinatorially similar to a pseudoultrametric \(d \colon X^{2} \to \mathbb{R}^{+}\) with \(d(X^{2}) \subseteq \mathbb{Q}^{+}\).
\item[\((ii)\)] \(\Phi\) is combinatorially similar to a pseudoultrametric.
\item[\((iii)\)] The mapping \(\Phi\) is symmetric, and the transitive closure \(u_{\Phi}^{t}\) of the binary relation \(u_{\Phi}\) is antisymmetric, and \(\Phi\) is \(a_0\)-coherent for a point \(a_0 \in \Phi(X^{2})\), and, for every triple \(\<x_1, x_2, x_3>\) of points of \(X\), there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that \(\Phi(x_{i_1}, x_{i_2}) = \Phi(x_{i_2}, x_{i_3})\).
\end{enumerate}
\end{theorem}
\begin{proof}
\((i) \Rightarrow (ii)\). This is trivially valid.
\((ii) \Rightarrow (iii)\). Suppose \(\Phi\) is combinatorially similar to a pseudoultrametric. Then \(\Phi\) also is combinatorially similar to a pseudometric. Consequently, by Theorem~\ref{ch2:p7}, \(\Phi\) is symmetric and there is \(a_0 \in \Phi(X^{2})\) such that \(\Phi\) is \(a_0\)-coherent. If \(\<x_1, x_2, x_3>\) is an arbitrary triple of points of \(X\), then, by Lemma~\ref{l3.1}, there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that \(\Phi(x_{i_1}, x_{i_2}) = \Phi(x_{i_2}, x_{i_3})\). To complete the proof of validity of \((ii) \Rightarrow (iii)\) it suffices to show that the transitive closure \(u_{\Phi}^{t}\) of the binary relation
\[
u_{\Phi} \subseteq Y^{2}, \quad Y = \Phi(X^{2}),
\]
is antisymmetric. Suppose contrary that there are distinct \(y_1\), \(y_2 \in Y\) such that \(\<y_1, y_2> \in u_{\Phi}^{t}\) and \(\<y_2, y_1> \in u_{\Phi}^{t}\). The definition of the transitive closure (see \eqref{e3.2}) and the definition of the composition of binary relations imply that there are a positive integer \(n_1\) and some points
\[
y_{1}^{*},\ y_{2}^{*},\ \ldots,\ y_{n_1+1}^{*} \in Y
\]
with
\begin{equation}\label{t3.7:e2}
y_{1}^{*} = y_1 \quad \text{and} \quad y_{n_1+1}^{*} = y_2 \quad \text{and} \quad \<y_{i}^{*}, y_{i+1}^{*}> \in u_{\Phi}
\end{equation}
for \(i = 1\), \(\ldots\), \(n_1\). Since \(\Phi\) is combinatorially similar to a pseudoultrametric \(d \colon Z^2 \to \mathbb{R}^{+}\), there are bijections
\[
g \colon Z \to Y \text{ and } f \colon \Phi(X^{2}) \to d(Z^{2})
\]
satisfying
\[
d(z_1, z_2) = f(\Phi(g(z_1), g(z_2)))
\]
for all \(z_1\), \(z_2 \in Z\). Consequently,
\[
d(g^{-1}(x_1), g^{-1}(x_2)) = f(\Phi(x_1, x_2))
\]
holds for all \(x_1\), \(x_2 \in X\). As in Example~\ref{ex3.4}, the last equality, \eqref{e2.16}, \eqref{t3.7:e2}, and the strong triangle inequality imply
\[
f(y_1) = f(y_{1}^{*}) \geqslant f(y_{2}^{*}) \geqslant \ldots \geqslant f(y_{n_1+1}^{*}) = f(y_{2}).
\]
Thus, the inequality \(f(y_1) \geqslant f(y_{2})\) holds. Similarly, we can obtain the inequality \(f(y_2) \geqslant f(y_{1})\).
Consequently, the equality \(f(y_1) = f(y_{2})\) holds, that contradicts the bijectivity of \(f\).
\((iii) \Rightarrow (i)\). Suppose \(\Phi\) satisfies condition \((iii)\). Let us define a binary relation \({\preccurlyeq}\) on \(Y = \Phi(X^{2})\) as
\begin{equation}\label{t3.7:e3}
{\preccurlyeq} := u_{\Phi}^{t} \cup \Delta_{Y},
\end{equation}
where \(\Delta_{Y} = \{\<y, y> \colon y \in Y\}\). We claim that \({\preccurlyeq}\) is a partial order on \(Y\). Indeed, \eqref{t3.7:e3} implies that \({\preccurlyeq}\) is reflexive. By condition~\((iii)\), the transitive closure \(u_{\Phi}^{t}\) is antisymmetric. From this and \eqref{t3.7:e3} it follows that \({\preccurlyeq}\) is also antisymmetric. Moreover, using the transitivity of \(u_{\Phi}^{t}\) we obtain
\begin{align*}
(u_{\Phi}^{t} \cup \Delta_{Y})^2 &= (u_{\Phi}^{t} \circ u_{\Phi}^{t}) \cup (u_{\Phi}^{t} \circ \Delta_{Y}) \cup (\Delta_{Y} \circ u_{\Phi}^{t}) \cup (\Delta_{Y} \circ \Delta_{Y}) \\
& \subseteq u_{\Phi}^{t} \cup \Delta_{Y}.
\end{align*}
Consequently, \({\preccurlyeq}\) is transitive. Thus, \({\preccurlyeq}\) is a partial order as required.
By condition~\((iii)\), \(\Phi\) is \(a_0\)-coherent. We will show that \(a_0\) is the smallest element of the poset \((Y, {\preccurlyeq})\).
Let \(y_1\) be an arbitrary point of \(Y\). Then there are \(x_1\), \(x_2 \in X\) such that \(y_1 = \Phi(x_1, x_2)\). The mapping \(\Phi\) is symmetric. Thus,
\begin{equation}\label{t3.7:e4}
\Phi(x_1, x_2) = \Phi(x_2, x_1)
\end{equation}
holds. Since \(\Phi\) is \(a_0\)-coherent, we have
\begin{equation}\label{t3.7:e5}
\Phi(x_1, x_1) = a_0.
\end{equation}
Using~\eqref{t3.7:e4}, \eqref{t3.7:e5} and the definition of \(u_{\Phi}\) we obtain \(\<a_0, y_1> \in u_{\Phi}\) for every \(y_1 \in Y\), as required.
Write \(\preccurlyeq_{0}\) for the intersection \({\preccurlyeq}\) with the set \(Y_0^{2}\), where
\[
Y_0 = \{y \in Y \colon y \neq a_0\}.
\]
Then \(\preccurlyeq_{0}\) is a partial order on the set \(Y_0\). By Lemma~\ref{l3.3}, there is a linear order \(\preccurlyeq^{*}\) on \(Y_0\) such that
\[
{\preccurlyeq_{0}} \subseteq {\preccurlyeq^{*}}.
\]
The inequality \(|Y| \leqslant \aleph_{0}\) implies \(|Y_0| \leqslant \aleph_{0}\). Using Lemma~\ref{l3.2} we can find an injective mapping \(f^{*} \colon Y \to \mathbb{Q}^{+}\) such that \(f^{*}(a_0) = 0\) and
\[
(y_1 \preccurlyeq^{*} y_2) \Leftrightarrow (f^{*}(y_1) \leqslant f^{*}(y_2))
\]
for all \(y_1\), \(y_2 \in Y\). Then the function \(d \colon X^{2} \to \mathbb{R}^{+}\),
\[
d(x_1, x_2) = f^{*}(\Phi(x_1, x_2)), \quad x_1, x_2 \in X,
\]
is a pseudoultrametric on \(X\) and \(d(X^{2}) \subseteq \mathbb{Q}^{+}\) holds. Since the function \(f^{*}\) is injective, the identical mapping \(X \xrightarrow{\operatorname{id}} X\) is a combinatorial similarity.
\end{proof}
Using Theorem~\ref{t3.7} and Corollary~\ref{c3.2} we also obtain.
\begin{corollary}\label{c3.8}
Let \(X\) be a nonempty set. The following conditions are equivalent for every mapping \(\Phi\) with \(\dom \Phi = X^{2}\) and \(|\Phi(X^{2})| \leqslant \aleph_{0}\).
\begin{enumerate}
\item[\((i)\)] \(\Phi\) is combinatorially similar to an ultrametric \(d \colon X^{2} \to \mathbb{R}^{+}\) satisfying the inclusion \(d(X^{2}) \subseteq \mathbb{Q}^{+}\).
\item[\((ii)\)] \(\Phi\) is combinatorially similar to an ultrametric.
\item[\((iii)\)] \(\Phi\) is symmetric, and the transitive closure \(u_{\Phi}^{t}\) of the binary relation \(u_{\Phi}\) is antisymmetric, and the equality
\[
\Phi^{-1}(a_0) = \Delta_{X}
\]
holds for some \(a_0 \in \Phi(X^{2})\), and, for every triple \(\<x_1, x_2, x_3>\) of points of \(X\), there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that \(\Phi(x_{i_1}, x_{i_2}) = \Phi(x_{i_2}, x_{i_3})\).
\end{enumerate}
\end{corollary}
\begin{example}\label{ex3.9}
A four-point metric space \((X, d)\) is called a \emph{pseudolinear quadruple} (see~\cite{Blu1953CP} for instance) if, for a suitable enumeration of points of \(X\), we have
\begin{multline}\label{ex3.9:e1}
d(x_1, x_2) = d(x_3, x_4) = s, \quad d(x_2, x_3) = d(x_4, x_1) = t, \\
d(x_2, x_4) = d(x_3, x_1) = s + t,
\end{multline}
with some positive reals \(s\) and \(t\). For a pseudolinear quadruple \((X, d)\), Corollary~\ref{c3.8} implies that the metric \(d \colon X^{2} \to \mathbb{R}^{+}\) is combinatorially similar to an ultrametric if and only if \((X, d)\) is ``equilateral'', i.e., \eqref{ex3.9:e1} holds with \(s= t\) (see Figure~\ref{fig2}).
\begin{figure}
\caption{Each equilateral, pseudolinear quadruple is (up to similarity) a subspace \(\{x_1, x_2, x_3, x_4\}
\label{fig2}
\end{figure}
\end{example}
\begin{remark}\label{r3.10}
The pseudolinear quadruples appeared for the first time in the paper of Menger~\cite{Men1928MA}. According to Menger, the pseudolinear quadruples are characterized as the metric spaces which are not isometric to any subset of \(\mathbb{R}\), but such that every triple of whose points embeds isometrically into \(\mathbb{R}\). There is also an elementary proof of this fact \cite{DD2009UMZ}. It is interesting to note that the equilateral, pseudolinear quadruples are the ``most non-Ptolemaic'' metric spaces~\cite{DP2011SMJ}.
\end{remark}
For what follows we need a specification of the concept of ultrametric distances introduced above in Definition~\ref{d1.3}.
\begin{definition}\label{d3.11}
Let \((Q, \preccurlyeq_Q)\) be a poset with a smallest element \(q_0\) and let \(X\) be a nonempty set. A mapping \(d \colon X^2 \to Q\) is a \(\preccurlyeq_Q\)-\emph{pseudo\-ultra\-metric} if \(d\) is symmetric and \(d(x, x) = q_0\) holds for every \(x \in X\) and, in addition, for every triple \(\<x_1, x_2, x_3>\) of points of \(X\), there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that
\begin{equation}\label{d3.11:e1}
d(x_{i_1}, x_{i_3}) \preccurlyeq_Q d(x_{i_1}, x_{i_2}) \quad \text{and} \quad d(x_{i_1}, x_{i_2}) = d(x_{i_2}, x_{i_3}).
\end{equation}
For \(\preccurlyeq_{Q}\)-pseudoultrametric \(d\), satisfying \(d(x, y) = q_0\) if and only if \(x = y\), we say that \(d\) is a \(\preccurlyeq_{Q}\)-\emph{ultrametric}.
\end{definition}
If there is no ambiguity in the choice of the order \(\preccurlyeq_{Q}\) we write ``\(d\) is a \(Q\)-pseudoultrametric'' instead of ``\(d\) is a \(\preccurlyeq_Q\)-pseudoultrametric''.
\begin{remark}\label{r3.13}
It is easy to prove that every ultrametric is a \(\leqslant\)-ultra\-metric for \((\mathbb{R}^{+}, \leqslant)\). Moreover, every \({\preccurlyeq}_{Q}\)-ultrametric is an ultrametric distance with the same \((Q, {\preccurlyeq}_Q)\) but not conversely (see, in particular, Example~\ref{ex3.26} at the end of the present section). For all totally ordered sets \(Q\), the ultrametric distances coincide with \(Q\)-ultrametrics, and with generalized ultrametrics defined by Priess-Crampe \cite{Pri1990RiM}.
\end{remark}
The following proposition is an extension of Proposition~\ref{c2.2} for the case of arbitrary \(Q\)-pseudoultrametric.
\begin{proposition}\label{p3.12}
Let \(X\) be a nonempty set and \((Q, \preccurlyeq_Q)\) be a poset with the smallest element \(q_0\) and let \(d \colon X^2 \to Q\) be a \(Q\)-pseudo\-ultra\-metric on \(X\). Then \(d^{-1}(q_0)\) is an equivalence relation on \(X\) and the mapping \(d\) is \(q_0\)-coherent.
\end{proposition}
\begin{proof}
It follows directly from Definition~\ref{d3.11} that \(d^{-1}(q_0)\) is reflexive. To prove that \(d^{-1}(q_0)\) is symmetric it suffices to note that the mapping \(d \colon X^{2} \to Q\) is symmetric, because, for each mapping \(\Phi\) with
\[
\dom \Phi = X^{2},
\]
\(\Phi\) is symmetric if and only if \(\Phi^{-1}(b)\) is a symmetric binary relation for every \(b \in \Phi(X^{2})\). Thus, \(d^{-1}(q_0)\) is an equivalence relation if and only if \(d^{-1}(q_0)\) is transitive.
Let \(\<x_1, x_2>\) and \(\<x_2, x_3>\) belong to \(X^{2}\) and let
\begin{equation}\label{p3.12:e1}
d(x_1, x_2) = d(x_2, x_3) = q_0.
\end{equation}
We claim that \(d(x_1, x_3) = q_0\) holds. Indeed, by Definition~\ref{d3.11}, there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that~\eqref{d3.11:e1} holds. From~\eqref{p3.12:e1} and \eqref{d3.11:e1} it follows that
\begin{equation}\label{p3.12:e2}
d(x_{i_1}, x_{i_2}) = d(x_{i_2}, x_{i_3}) = q_0.
\end{equation}
Using \eqref{d3.11:e1} again we see that \eqref{p3.12:e2} implies
\begin{equation}\label{p3.12:e3}
d(x_{i_1}, x_{i_3}) \preccurlyeq_Q q_0.
\end{equation}
Since \(q_0\) is the smallest element of \((Q, \preccurlyeq_Q)\), inequality \eqref{p3.12:e3} implies
\begin{equation}\label{p3.12:e4}
d(x_{i_1}, x_{i_3}) = q_0.
\end{equation}
The equality \(d(x_1, x_3) = q_0\) follows from \eqref{p3.12:e4} and \eqref{p3.12:e2}. Thus, \(d^{-1}(q_0)\) is transitive.
Now we need to prove that \(d\) is \(q_0\)-coherent. The mapping \(d\) is symmetric. Hence, by Corollary~\ref{c2.5}, it suffices to show that
\begin{equation}\label{p3.12:e5}
d^{-1}(q) = d^{-1}(q_1) \circ d^{-1}(q_0)
\end{equation}
for every \(q_1 \in d(X^{2})\). Let \(q_1 \in d(X^{2})\). We have
\[
d^{-1}(q_1) \subseteq d^{-1}(q_1) \circ d^{-1}(q_0),
\]
because \(d^{-1}(q_0)\) is reflexive. The converse inclusion
\begin{equation}\label{p3.12:e6}
d^{-1}(q_1) \supseteq d^{-1}(q_1) \circ d^{-1}(q_0)
\end{equation}
holds if and only if, for all \(x_1\), \(x_2\), \(x_3 \in X\), we have
\begin{equation}\label{p3.12:e7}
\<x_1, x_3> \in d^{-1}(q_1)
\end{equation}
whenever \(\<x_1, x_2> \in d^{-1}(q_1)\) and \(\<x_2, x_3> \in d^{-1}(q_0)\). If \(q_1 = q_0\), then \eqref{p3.12:e6} holds, since \(d^{-1}(q_0)\) is an equivalence relation. Suppose
\[
q_1 \neq q_0.
\]
Write \(q_2 := d(x_1, x_3)\). If \(q_2 = q_1\), then \eqref{p3.12:e7} follows from \(\<x_1, x_3> \in d^{-1}(q_2)\). Consequently, if \eqref{p3.12:e7} is false, then we have
\begin{equation}\label{p3.12:e8}
q_2 \neq q_1 \neq q_0.
\end{equation}
The equality \(q_2 = q_0\) implies
\begin{equation}\label{p3.12:e9}
\<x_1, x_3> \in d^{-1}(q_0),
\end{equation}
because \(d^{-1}(q_0)\) is transitive. From \eqref{p3.12:e9} and \eqref{p3.12:e7} follows \(q_0 = q_1\), contrary to \eqref{p3.12:e8}. Thus, \(q_0\), \(q_1\) and \(q_2\) are pairwise distinct, that contradicts~\eqref{d3.11:e1}.
\end{proof}
\begin{corollary}\label{c3.17}
Let \(X\) be a nonempty set and \((Q, \preccurlyeq_Q)\) be a poset and let \(d \colon X^{2} \to Q\) be a \(Q\)-pseudoultrametric (\(Q\)-ultrametric) on \(X\). Then the following statements are valid.
\begin{enumerate}
\item [\((i)\)] If \(|d(X^2)| \leqslant 2^{\aleph_{0}}\) holds, then \(d\) is combinatorially similar to an usual pseudometric (metric).
\item [\((ii)\)] If \(|d(X^2)| \leqslant \aleph_{0}\) holds, then \(d\) is combinatorially similar to an usual pseudoultrametric (ultrametric).
\end{enumerate}
\end{corollary}
\begin{proof}
Suppose first that \(d\) is a \(Q\)-pseudoultrametric.
\((i)\). If \(|d(X^2)| \leqslant 2^{\aleph_{0}}\) holds, then Definition~\ref{d3.11} and Proposition~\ref{p3.12} imply condition \((ii)\) of Theorem~\ref{ch2:p7}. Thus, \((i)\) is valid by Theorem~\ref{ch2:p7}.
\((ii)\). Analogously, using Definition~\ref{d3.11} we can show that condition \((iii)\) of Theorem~\ref{t3.7} is valid for \(\Phi = d\). Thus, \((ii)\) follows from Theorem~\ref{t3.7}.
The case when \(d\) is a \(Q\)-ultrametric can be considered similarly.
\end{proof}
The next theorem is a partial generalization of Theorem~\ref{t3.7}.
\begin{theorem}\label{t3.15}
Let \(X\) be a nonempty set and let \(\Phi\) be a mapping with \(\dom \Phi = X^{2}\). Then the following conditions are equivalent.
\begin{enumerate}
\item[\((i)\)] There is a totally ordered set \(Q\) such that \(\Phi\) is combinatorially similar to a \(Q\)-pseudoultrametric.
\item[\((ii)\)] There is a poset \(Q\) such that \(\Phi\) is combinatorially similar to a \(Q\)-pseudoultrametric.
\item[\((iii)\)] The mapping \(\Phi\) is symmetric, and the transitive closure \(u_{\Phi}^{t}\) of the binary relation \(u_{\Phi}\) is antisymmetric, and there is \(a_0 \in \Phi(X^{2})\) for which \(\Phi\) is \(a_0\)-coherent, and, for every triple \(\<x_1, x_2, x_3>\) of points of \(X\), there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that \(\Phi(x_{i_1}, x_{i_2}) = \Phi(x_{i_2}, x_{i_3})\).
\item[\((iv)\)] There is \(b_0 \in \Phi(X^{2})\) such that \(\Phi(x,x) = b_0\) holds for every \(x \in X\), and the binary relation
\begin{equation}\label{t3.15:e1}
{\preccurlyeq}_{\Phi} := u_{\Phi}^{t} \cup \Delta_{\Phi(X^{2})}
\end{equation}
is a partial order on \(\Phi(X^{2})\), and \(b_0\) is the smallest element of \((\Phi(X^{2}), {\preccurlyeq}_{\Phi})\), and \(\Phi\) is a \( {\preccurlyeq}_{\Phi}\)-pseudo\-ultra\-metric on \(X\).
\end{enumerate}
\end{theorem}
\begin{proof}
The implication \((i) \Rightarrow (ii)\) is trivially valid. The validity of \((ii) \Rightarrow (iii)\) can be verified by repetition of the first part of the proof of Theorem~\ref{t3.7} with the replacement of the word ``Theorem~\ref{ch2:p7}'' by word ``Proposition~\ref{p3.12}''. It should be noted that Lemma~\ref{l3.1} remains valid if \(\Phi\) is combinatorially similar to an arbitrary \(Q\)-pseudoultrametric.
\((iii) \Rightarrow (iv)\). Let \((iii)\) hold. Then \(u_{\Phi}^{t}\) is antisymmetric and transitive. Consequently, the relation \({\preccurlyeq}_{\Phi}\) is reflexive, antisymmetric and transitive, i.e., \({\preccurlyeq}_{\Phi}\) is a partial order on \(\Phi(X^{2})\). Since \(\Phi\) is \(a_0\)-coherent, the equality \(\Phi(x,x) = a_0\) holds for every \(x \in X\).
The point \(a_0\) is the smallest element of \((\Phi(X^{2}), {\preccurlyeq}_{\Phi})\) if and only if the inequality
\begin{equation}\label{t3.15:e2}
a_0 \preccurlyeq_{\Phi} \Phi(x, y)
\end{equation}
holds for all \(x\), \(y \in X\). To prove \eqref{t3.15:e2} we consider the triple \(\<y, x, y>\) and note that \(\Phi(x, y) = \Phi(y,x)\). Consequently, \(\<\Phi(x,x), \Phi(x,y)>\) belongs to \(u_{\Phi}\). Now \eqref{t3.15:e2} follows from \eqref{t3.15:e1}.
By condition \((iii)\), \(\Phi(x,x) = a_0\) holds for every \(x \in X\) and, for every triple \(\<x_1, x_2, x_3>\) of points of \(X\), there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that \(\Phi(x_{i_1}, x_{i_2}) = \Phi(x_{i_2}, x_{i_3})\). The mapping \(\Phi\) is symmetric. Hence, \(\Phi\) is a \({\preccurlyeq}_{\Phi}\)-pseudoultrametric on \(X\) as required.
\((iv) \Rightarrow (i)\). Let \((iv)\) hold. Then \(\Phi\) is a \({\preccurlyeq}_{\Phi}\)-pseudoultrametric. By Lemma~\ref{l3.3} (Szpilrajn) the partial order \({\preccurlyeq}_{\Phi}\) can be extended to a linear order \({\preccurlyeq}\) on \(\Phi(X^{2})\). It is easy to see that the smallest element \(a_0\) of \((\Phi(X^{2}), {\preccurlyeq}_{\Phi})\) is also the smallest element of \((\Phi(X^{2}), {\preccurlyeq})\). Thus, \(\Phi\) is also a \({\preccurlyeq}\)-pseudoultrametric. Condition \((i)\) follows.
\end{proof}
\begin{corollary}\label{c3.19}
Let \(X\) be a nonempty set and let \(\Phi\) be a mapping with \(\dom \Phi = X^{2}\). Then the following conditions are equivalent.
\begin{enumerate}
\item[\((i)\)] There is a totally ordered set \(Q\) such that \(\Phi\) is combinatorially similar to a \(Q\)-ultra\-metric.
\item[\((ii)\)] There is a poset \(Q\) such that \(\Phi\) is combinatorially similar to a \(Q\)-ultra\-metric.
\item[\((iii)\)] The mapping \(\Phi\) is symmetric, and the transitive closure \(u_{\Phi}^{t}\) of the binary relation \(u_{\Phi}\) is antisymmetric, and there is \(a_0 \in \Phi(X^{2})\) for which \(\Phi^{-1}(a_0) = \Delta_{X}\) holds, and, for every triple \(\<x_1, x_2, x_3>\) of points of \(X\), there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that \(\Phi(x_{i_1}, x_{i_2}) = \Phi(x_{i_2}, x_{i_3})\).
\item[\((iv)\)] There is \(b_0 \in \Phi(X^{2})\) such that \(\Phi^{-1}(b_0) = \Delta_{X}\) holds, and the binary relation
\[
{\preccurlyeq}_{\Phi} := u_{\Phi}^{t} \cup \Delta_{\Phi(X^{2})}
\]
is a partial order on \(\Phi(X^{2})\), and \(b_0\) is the smallest element of \((\Phi(X^{2}), {\preccurlyeq}_{\Phi})\), and \(\Phi\) is a \( {\preccurlyeq}_{\Phi}\)-ultra\-metric on \(X\).
\end{enumerate}
\end{corollary}
The next corollary follows from Corollary~\ref{c3.8} and Corollary~\ref{c3.19}.
\begin{corollary}\label{c3.20}
Let \((Q, {\preccurlyeq}_{Q})\) be a poset with a smallest element, let \(X\) be a nonempty set and let \(d \colon X^{2} \to Q\) be an ultrametric distance in the sense of Priess-Crampe and Ribenboim. If the inequality \(|Q| \leqslant \aleph_{0}\) holds, then the following conditions are equivalent.
\begin{enumerate}
\item [\((i)\)] The mapping \(d\) is a \(Q\)-ultra\-metric.
\item [\((ii)\)] There is an usual ultrametric \(\rho \colon X^{2} \to \mathbb{R}^{+}\) such that \(d\) and \(\rho\) are combinatorially similar.
\end{enumerate}
\end{corollary}
The following proposition guarantees, for a given \(Q\)-pseudo\-ultra\-metric \(d\), the presence of the weakest (on \(Q\)) partial order at which \(d\) remains \(Q\)-pseudo\-ultra\-metric.
\begin{proposition}\label{p3.17}
Let \(X\) be a nonempty set, \((Q, {\preccurlyeq}_{Q})\) be a poset and let \(d \colon X^{2} \to Q\) be a \({\preccurlyeq}_{Q}\)-pseudoultrametric. Then there is a unique partial order \({\preccurlyeq}_{Q}^{0}\) on \(Q\) such that \(d\) is a \({\preccurlyeq}_{Q}^{0}\)-pseudoultrametric and the inclusion
\[
{\preccurlyeq}_{Q}^{0} \subseteq {\preccurlyeq}
\]
holds whenever \({\preccurlyeq}\) is a partial order on \(Q\) for which \(d\) is a \({\preccurlyeq}\)-pseudo\-ultrametric.
\end{proposition}
\begin{proof}
The uniqueness of \({\preccurlyeq}_{Q}^{0}\) satisfying the desirable conditions is clear. For the proof of existence of \({\preccurlyeq}_{Q}^{0}\), let \(\mathcal{F} = \{{\preccurlyeq}_i \colon i \in I\}\) be the family of all partial orders \({\preccurlyeq}_i\) on \(Q\) for which \(d\) is a \({\preccurlyeq}_i\)-pseudoultrametric. The family \(\mathcal{F}\) is non-void because \({\preccurlyeq}_{Q} \in \mathcal{F}\). Let us define a binary relation \({\preccurlyeq}_{Q}^{0}\) as the intersection of all \({\preccurlyeq}_i\), i.e., for \(p\), \(q \in Q\),
\[
(\<p,q> \in {\preccurlyeq}_{Q}^{0}) \Leftrightarrow (p \preccurlyeq_i q \text{ holds for every } i \in I).
\]
Then \({\preccurlyeq}_{Q}^{0}\) is a partial order on \(Q\). Since \(d\) is a \({\preccurlyeq}_{Q}\)-pseudoultrametric, the poset \((Q, {\preccurlyeq}_{Q})\) has a smallest element \(q_0\) by definition. It is easy to prove that \(q_0\) is the common smallest element of all posets \((Q, {\preccurlyeq}_i)\), \(i \in I\).
Indeed, since \(d\) is a \({\preccurlyeq}_{Q}\)-pseudoultrametric, we have \(d(x, x) = q_0\). In addition, since, for arbitrary \(i^{*} \in I\), the mapping \(d\) is a \({\preccurlyeq}_{i^*}\)-pseudo\-ultrametric, we also have
\[
d(x, x) = q_0^{*},
\]
where \(q_0^{*}\) is the smallest element of \((Q, {\preccurlyeq}_{i^*})\). That implies \(q_0^{*} = q_0\).
Consequently, \(q_0\) is the smallest element of \((Q, {\preccurlyeq}_{Q}^{0})\).
Hence, to prove that \(d\) is a \({\preccurlyeq}_{Q}^{0}\)-pseudoultrametric it suffices to show that for every triple \(\<x_1, x_2, x_3>\) of points of \(X\) there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that
\begin{equation}\label{p3.17:e0}
d(x_{i_1}, x_{i_3}) \preccurlyeq_{Q}^{0} d(x_{i_1}, x_{i_2}) \text{ and } d(x_{i_1}, x_{i_2}) = d(x_{i_2}, x_{i_3}).
\end{equation}
Condition~\eqref{p3.17:e0} evidently holds if
\begin{equation}\label{p3.17:e1}
d(x_1, x_2) = d(x_2, x_3) = d(x_3, x_1).
\end{equation}
If \eqref{p3.17:e1} does not hold, then we may set, for definiteness, that
\begin{equation}\label{p3.17:e2}
d(x_1, x_2) = d(x_2, x_3) \neq d(x_1, x_3).
\end{equation}
(The case when \(d(x_1, x_2)\), \(d(x_2, x_3)\) and \(d(x_1, x_3)\) are pairwise distinct is impossible because \(d\) is a \({\preccurlyeq}_Q\)-pseudoultrametric.) Using \eqref{p3.17:e2} and \eqref{d3.11:e1} we obtain
\begin{equation}\label{p3.17:e3}
d(x_1, x_3) \preccurlyeq_i d(x_1, x_2) \text{ and } d(x_1, x_2) = d(x_2, x_3)
\end{equation}
for every \(i \in I\), that, together with the equality
\[
{\preccurlyeq}_{Q}^{0} = \bigcap_{i \in I} {\preccurlyeq}_{i},
\]
implies
\[
d(x_1, x_3) \preccurlyeq_{Q}^{0} d(x_1, x_2) \text{ and } d(x_1, x_2) = d(x_2, x_3). \qedhere
\]
\end{proof}
\begin{lemma}\label{l3.18}
Let \(X\) be a nonempty set, \((Q, {\preccurlyeq}_{Q})\) be a poset and let \(d \colon X^{2} \to Q\) be a \({\preccurlyeq}_{Q}\)-pseudo\-ultrametric with \(d(X^{2}) = Q\). Then the equality
\begin{equation}\label{l3.18:e1}
{\preccurlyeq}_{Q}^{0} = (u_d^t \cup \Delta_{Q})
\end{equation}
holds, where \(\Delta_{Q} := \{\<q,q> \colon q \in Q\}\).
\end{lemma}
\begin{proof}
As in the second part of the proof of Theorem~\ref{t3.7} we see that \(u_{d}^{t} \cup \Delta_{Q}\) is reflexive and transitive. Using \({\preccurlyeq}_{Q}\) instead of \(\leqslant\) and arguing as in the first part of that proof we obtain the antisymmetry of \(u_{d}^{t} \cup \Delta_{Q}\). Consequently, \(u_{d}^{t} \cup \Delta_{Q}\) is a partial order on \(Q\).
Let \({\preccurlyeq}\) be an arbitrary partial order on \(Q\) for which \(d\) is a \({\preccurlyeq}\)-pseudo\-ultrametric. Then, using Definition~\ref{d3.11} and the definition of \(u_d\), we see that
\[
u_{d} \subseteq {\preccurlyeq}.
\]
The last inclusion implies
\[
(u_{d}^{t} \cup \Delta_{Q}) \subseteq ({\preccurlyeq}^{t} \cup \Delta_{Q}) = {\preccurlyeq}.
\]
Consequently, \({\preccurlyeq}_{Q}^{0} \supseteq (u_{d}^{t} \cup \Delta_{Q})\) holds.
From the definition of the relation \(u_{d}\), Definition~\ref{d3.11} and the fact that \(d\) is \({\preccurlyeq}_{Q}\)-pseudo\-ultrametric it follows that \(d\) is a \((u_{d}^{t} \cup \Delta_{Q})\)-pseudo\-ultra\-metric. Thus, equality~\eqref{l3.18:e1} holds.
\end{proof}
\begin{remark}\label{r3.18}
Equality~\eqref{l3.18:e1} does not hold if \(d(X^{2}) \neq Q\). Indeed, if \(q_1 \in Q \setminus d(X^{2})\), then we evidently have \(q_1 \notin u_d^t\), that implies
\[
\<q, q_1> \notin (u_d^t \cup \Delta_{Q})
\]
for every \(q \in Q \setminus \{q_1\}\). Consequently, the poset \((Q, u_d^t \cup \Delta_{Q})\) does not have any smallest element. The last statement contradicts \eqref{l3.18:e1}, because the smallest element \(q_0 \in d(X^{2})\) of \((Q, {\preccurlyeq}_Q)\) is also the smallest element of \((Q, {\preccurlyeq}_Q^{0})\).
\end{remark}
Results of the present section are based on the fact that, for all posets \((Q, {\preccurlyeq}_Q)\) and \((L, {\preccurlyeq}_L)\) with the smallest elements \(q_0 \in Q\) and \(l_0 \in L\), for every isotone injection \(f \colon Q \to L\) satisfying the condition \(f(q_0) = l_0\), and for each \(Q\)-pseudoultrametric \(d\), the mappings \(d\) and \(f \circ d\) are combinatorially similar. Moreover, in this case the transformation \(d \mapsto f \circ d\) converts the \(Q\)-pseudoultrametrics into \(L\)-pseudoultrametrics.
\begin{proposition}\label{p3.23}
Let \((Q, {\preccurlyeq}_Q)\) and \((L, {\preccurlyeq}_L)\) be posets with the smallest elements \(q_0 \in Q\) and \(l_0 \in L\). The following conditions are equivalent for every mapping \(f \colon Q \to L\).
\begin{enumerate}
\item [\((i)\)] \(f \circ d\) is a \(L\)-pseudoultrametric whenever \(d\) is a \(Q\)-pseudo\-ultra\-metric.
\item [\((ii)\)] \(f \circ d\) is a \(L\)-pseudoultrametric whenever \(d\) is a \(Q\)-ultra\-metric.
\item [\((iii)\)] \(f\) is isotone and \(f(q_0) = l_0\) holds.
\end{enumerate}
\end{proposition}
\begin{proof}
\((i) \Rightarrow (ii)\). This is evidently valid.
\((ii) \Rightarrow (iii)\). Suppose statement \((ii)\) is valid. Then, for every \(Q\)-ultrametric space \((X, d)\) and for every \(x \in X\), the equalities
\[
f(q_0) = f(d(x, x)) = l_0
\]
hold. Let \(q_1\), \(q_2 \in Q\) such that \(q_1 \preccurlyeq_Q q_2\). We must prove the inequality
\begin{equation}\label{p3.23:e1}
f(q_1) \preccurlyeq_L f(q_2).
\end{equation}
This is trivial if \(f(q_1) = f(q_2)\). Suppose \(f(q_1) \neq f(q_2)\) and \(X = \{x_1, x_2, x_3\}\). Let us define \(d \colon X^{2} \to L\) as
\begin{equation}\label{p3.23:e2}
d(x_1, x_2) = d(x_2, x_3) = q_2, \quad \text{and} \quad d(x_1, x_3) = q_1,
\end{equation}
and \(d(x_1, x_1) = d(x_2, x_2) = d(x_3, x_3) = q_0\). Then \(d\) is a \(Q\)-ultrametric and \(f \circ d\) is a \(L\)-pseudo\-ultra\-metric. Inequality \eqref{p3.23:e1} follows from \(f(q_1) \neq f(q_2)\), \eqref{p3.23:e2} and \eqref{d3.11:e1}.
\((iii) \Rightarrow (i)\). The validity of this implication follows directly from the definition of isotone mappings and the definition of poset-valued pseudoultrametrics.
\end{proof}
\begin{corollary}\label{c3.24}
Let \((Q, {\preccurlyeq}_Q)\) and \((L, {\preccurlyeq}_L)\) be posets with the smallest elements \(q_0 \in Q\) and \(l_0 \in L\). Then the following conditions are equivalent for every mapping \(f \colon Q \to L\).
\begin{enumerate}
\item [\((i)\)] \(f \circ d\) is a \(L\)-ultrametric whenever \(d\) is a \(Q\)-ultrametric.
\item [\((ii)\)] \(f\) is isotone and the equivalence
\begin{equation}\label{c3.24:e1}
(f(q) = l_0) \Leftrightarrow (q = q_0)
\end{equation}
is valid for every \(q \in Q\).
\end{enumerate}
\end{corollary}
\begin{proof}
\((i) \Rightarrow (ii)\). Let \((i)\) hold. Then, by Proposition~\ref{p3.23}, \(f\) is isotone and \(f(q_0) = l_0\) holds. Thus, to prove \((ii)\) it suffices to show that \(f(q) = l_0\) implies \(q = q_0\). Suppose contrary that there is \(q_1 \in Q\) such that \(q_1 \neq q_0\) and \(f(q_1) = l_0\).
Let \(X\) be an arbitrary set with \(|X| \geqslant 2\). The function \(d \colon X^{2} \to Q\), defined as
\begin{equation}\label{c3.24:e2}
d(x, y) = \begin{cases}
q_0 & \text{if } x = y,\\
q_1 & \text{if } x \neq y,
\end{cases}
\end{equation}
is a \(Q\)-ultrametric on \(X\). The equalities \(f(q_0) = l_0\), \(f(q_1) = l_0\) and \eqref{c3.24:e2} imply \(f(d(x, y)) = l_0\) for all \(x\), \(y \in X\). Hence, \(f \circ d\) is not a \(L\)-ultra\-metric on \(X\), which contradicts condition~\((i)\).
\((ii) \Rightarrow (i)\). Suppose \((ii)\) holds, but there are a set \(X\) and a \(Q\)-ultra\-metric \(d \colon X^{2} \to Q\) such that \(f \circ d\) is not a \(L\)-ultrametric. Then we evidently have \(|X| \geqslant 2\). Moreover, Proposition~\ref{p3.23} implies that \(f \circ d\) is a \(L\)-pseudoultrametric. Consequently, there are \(x_1\), \(x_2 \in X\) such that \(x_1 \neq x_2\) and
\begin{equation}\label{c3.24:e3}
f(d(x_1, x_2)) = l_0.
\end{equation}
Since \(d\) is a \(Q\)-ultrametric,
\begin{equation}\label{c3.24:e4}
d(x_1, x_2) \neq q_0
\end{equation}
holds. From \eqref{c3.24:e3} and \eqref{c3.24:e4} it follows that \eqref{c3.24:e1} is false with \(q = d(x_1, x_2)\), contrary to condition \((ii)\).
\end{proof}
The following example shows that we cannot replace statement \((i)\) of Corollary~\ref{c3.24} by the statement
\begin{itemize}
\item \(f \circ d\) is an ultrametric distance w.r.t \((L, {\preccurlyeq}_L)\) whenever \(d\) is an ultrametric distance w.r.t \((Q, {\preccurlyeq}_Q)\)
\end{itemize}
leaving statement \((ii)\) unchanged.
\begin{example}\label{ex3.26}
Let \(P\) and \(Q\) be sets with \(|P| = |Q| \geqslant 4\) and let \({\preccurlyeq}_{P}\) be a linear order on \(P\) with a smallest element \(p_0\). Let us define a binary relation \({\preccurlyeq}_{Q}\) on \(Q\) by the rule:
\begin{equation}\label{ex3.26:e1}
(\<q_1, q_2> \in {\preccurlyeq}_{Q}) \Leftrightarrow (q_1 = q_2 \text{ or } q_1 = q_0).
\end{equation}
Then \({\preccurlyeq}_{Q}\) is a partial order on \(Q\) and, for a set \(X = \{x_1, x_2, x_3\}\), a mapping \(d \colon X^{2} \to Q\) is an ultrametric distance w.r.t. \((Q, {\preccurlyeq}_{Q})\) if and only if \(d\) is symmetric and
\[
(d(x, y) = q_0) \Leftrightarrow (x = y)
\]
holds for all \(x\), \(y \in X\). Since \(|Q| \geqslant 4\) holds, there is an ultrametric distance \(d^{*} \colon X^{2} \to Q\) such that \(d^{*}(x_1, x_2)\), \(d^{*}(x_2, x_3)\), \(d^{*}(x_3, x_1)\) are pairwise distinct. It follows directly from \eqref{ex3.26:e1} and Definition~\ref{d3.5} that a function \(f \colon Q \to P\) is isotone if and only if \(f(q_0) = p_0\). Now, using the equality \(|P| = |Q|\) we can find an isotone bijection \(f^{*} \colon Q \to P\) such that
\[
(f^{*}(q) = p_0) \Leftrightarrow (q = q_0)
\]
is valid for every \(q \in Q\). Since \((P, {\preccurlyeq}_{P})\) is totally ordered, and \(f^{*}\) is bijective, and \(d^{*}(x_1, x_2)\), \(d^{*}(x_2, x_3)\), \(d^{*}(x_3, x_1)\) are pairwise distinct, we can find a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
for which
\[
f^{*}(d^{*}(x_{i_1}, x_{i_2})) \prec_{P} f^{*}(d^{*}(x_{i_2}, x_{i_3})) \prec_{P} f^{*}(d^{*}(x_{i_1}, x_{i_3})).
\]
From Definition~\ref{d1.3} it follows that the mapping
\[
X^{2} \xrightarrow{d^{*}} Q \xrightarrow{f^{*}} P
\]
is not an ultrametric distance w.r.t. \((P, {\preccurlyeq}_{P})\).
\end{example}
\begin{remark}\label{r3.25}
For the case of standard ultrametrics and pseudoultrametrics Proposition~\ref{p3.23} and Corollary~\ref{c3.24} are known. In particular, Proposition~\ref{p3.23} is a generalization of Proposition~2.4 \cite{Dov2019v2} and, respectively, Corollary~\ref{c3.24} is a generalization of Theorem~9 \cite{PTAbAppAn2014}.
\end{remark}
\section{From weak similarities to combinatorial similarities and back}
Let us expand the notion of weak similarity to the case of poset-valued pseudoultrametrics.
\begin{definition}\label{d3.13}
Let \((Q_i, {\preccurlyeq}_{Q_i})\) be a poset, and \((X_i, d_{i})\) be a \(Q_i\)-pseudo\-ultra\-metric space, and let \(Y_i := d_{i}(X_i^2)\), \(i = 1\), \(2\). A bijection \(\Phi \colon X_1 \to X_2\) is a \emph{weak similarity} for \(d_1\) and \(d_2\) if there is an isomorphism \(f \colon Y_1 \to Y_2\) of the subposet \((Y_1, {\preccurlyeq}_{Y_1})\) of the poset \((Q_1, {\preccurlyeq}_{Q_1})\) and the subposet \((Y_2, {\preccurlyeq}_{Y_2})\) of the poset \((Q_2, {\preccurlyeq}_{Q_2})\) such that
\begin{equation}\label{d3.13:e1}
d_1(x, y) = f(d_2(\Phi(x), \Phi(y)))
\end{equation}
for all \(x\), \(y \in X_1\).
\end{definition}
\begin{remark}\label{r4.2}
For every totally ordered set \((P_1, \preccurlyeq_{P_1})\) and arbitrary poset \((P_2, \preccurlyeq_{P_2})\), every isotone bijection \(f \colon P_1 \to P_2\) is an isomorphism of \((P_1, \preccurlyeq_{P_1})\) and \((P_2, \preccurlyeq_{P_2})\). Thus, Definition~\ref{d2.9} and Definition~\ref{d3.13} are equivalent for the case when \((Q_1, \preccurlyeq_{Q_1})\) and \((Q_2, \preccurlyeq_{Q_2})\) coincide with \((\mathbb{R}^{+}, \leqslant)\).
\end{remark}
The following is a generalization of Proposition~\ref{p2.11}.
\begin{proposition}\label{p3.16}
Let \((Q_i, {\preccurlyeq}_{Q_i})\) be a poset and \((X_i, d_{i})\) be a \(Q_i\)-pseudoultrametric space, \(i=1\), \(2\). Then every weak similarity for \(d_1\) and \(d_2\) is a combinatorial similarity for \(d_{1}\) and \(d_{2}\).
\end{proposition}
\begin{proof}
The proposition can be directly driven from definitions. We just notice that if \(Y_1 := d_{1}(X_1^2)\) and \(Y_2 := d_{2}(X_2^2)\), and \(f \colon Y_1 \to Y_2\) is an isomorphism of the subposet \((Y_1, {\preccurlyeq}_{Y_1})\) of \((Q_1, {\preccurlyeq}_{Q_1})\) and the subposet \((Y_2, {\preccurlyeq}_{Y_2})\) of \((Q_2, {\preccurlyeq}_{Q_2})\), and~\eqref{d3.13:e1} holds for all \(x\), \(y \in X_1\), then we have \(q_2 = f(q_1)\), where \(q_i \in d_{i}(X_i^2)\) is the smallest element of \((Q_i, {\preccurlyeq}_{Q_i})\), \(i = 1\), \(2\), that agrees with Proposition~\ref{p3.12} and the second statement of Proposition~\ref{p2.4}.
\end{proof}
\begin{theorem}\label{t4.3}
Let \(X_i\) be a nonempty set and let \(\Phi_i\) be a mapping with \(\dom \Phi = X_i^{2}\), \(i = 1\), \(2\). Suppose
\begin{equation}\label{t4.3:e1}
{\preccurlyeq}_{1} := u_{\Phi_1}^{t} \cup \Delta_{\Phi_1(X_1^{2})} \quad \text{and} \quad {\preccurlyeq}_{2} := u_{\Phi_2}^{t} \cup \Delta_{\Phi_2(X_2^{2})}
\end{equation}
are partial orders on \(\Phi_1(X_1^{2})\) and, respectively, on \(\Phi_2(X_2^{2})\). If \(\Phi_i\) is a \({\preccurlyeq}_{i}\)-pseudoultrametric, \(i = 1\), \(2\), then the following conditions are equivalent for every mapping \(g \colon X_1 \to X_2\).
\begin{enumerate}
\item [\((i)\)] \(g\) is a weak similarity for \(\Phi_1\) and \(\Phi_2\).
\item [\((ii)\)] \(g\) is a combinatorial similarity for \(\Phi_1\) and \(\Phi_2\).
\end{enumerate}
\end{theorem}
\begin{proof}
Suppose \(\Phi_i\) is a \({\preccurlyeq}_{i}\)-pseudoultrametric, \(i = 1\), \(2\).
\((i) \Rightarrow (ii)\). This is valid by Proposition~\ref{p3.16}.
\((ii) \Rightarrow (i)\). Let \(g \colon X_1 \to X_2\) be a combinatorial similarity. We must prove that \(g\) is a weak similarity for \(\Phi_1\) and \(\Phi_2\). Since \(g\) is a combinatorial similarity, there is a bijection \(f \colon \Phi_2(X_2^2) \to \Phi_1(X_1^2)\) such that
\begin{equation}\label{t4.3:e2}
\Phi_1(x, y) = f(\Phi_2(g(x), g(y)))
\end{equation}
holds for all \(x\), \(y \in X_1\). In the correspondence with Definition~\ref{d3.13}, it suffices to show that \(f\) is an isomorphism of the posets \((\Phi_1(X_1^2), {\preccurlyeq}_1)\) and \((\Phi_2(X_2^2), {\preccurlyeq}_2)\). Using \eqref{t4.3:e1} we see that if
\begin{equation}\label{t4.3:e3}
\bigl(\<a, b> \in u_{\Phi_2}\bigr) \Leftrightarrow \bigl(\<f(a), f(b)> \in u_{\Phi_1}\bigr)
\end{equation}
is valid for all \(a\), \(b \in \Phi_2(X_2^2)\), then \(f\) is an isomorphism of these posets. Condition~\eqref{t4.3:e3} follows directly from \eqref{t4.3:e2} and the definitions of \(u_{\Phi_1}\) and \(u_{\Phi_2}\).
\end{proof}
\begin{corollary}\label{c4.3}
Let \(X\) and \(Y\) be nonempty sets and let \((Q, {\preccurlyeq}_{Q})\) and \((L, {\preccurlyeq}_{L})\) be posets. Suppose \(d_Q \colon X^2 \to Q\) and \(d_L \colon Y^2 \to L\) are a \(Q\)-pseudo\-ultrametric and a \(L\)-pseudoultrametric, respectively. If \(d_{Q}(X^{2}) = Q\), and \(d_{L}(Y^{2}) = L\), and \({\preccurlyeq}_{Q} = {\preccurlyeq}_{Q}^{0}\), and \({\preccurlyeq}_{L} = {\preccurlyeq}_{L}^{0}\), then the following conditions are equivalent for every mapping \(\Phi \colon X \to Y\).
\begin{enumerate}
\item [\((i)\)] \(\Phi\) is a weak similarity for \(d_Q\) and \(d_L\).
\item [\((ii)\)] \(\Phi\) is a combinatorial similarity for \(d_Q\) and \(d_L\).
\end{enumerate}
\end{corollary}
In what follows we will use the next modification of Corollary~\ref{c4.3}.
\begin{lemma}\label{l4.7}
Let \((Q, {\preccurlyeq}_{Q})\) be a totally ordered set and let \(d \colon Q^{2} \to Q\) be a \(Q\)-pseudoultrametric such that \(d(Q^{2}) = Q\) and \({\preccurlyeq}_{Q}^{0} = {\preccurlyeq}_{Q}\). Then, for every poset \((L, {\preccurlyeq}_{L})\) having a smallest element and for each \(L\)-pseudoultrametric \(d_L \colon X^{2} \to L\) with \(d_L(X^2) = L\), the following statement holds. If \(d_L\) is combinatorially similar to \(d\), then the corresponding combinatorial similarity is a weak similarity for \(d\) and \(d_L\).
\end{lemma}
\begin{proof}
Let \((L, {\preccurlyeq}_{L})\) be a poset with a smallest element and let \(d_L\) be a pseudoultrametric on a set \(X\) with \(d_L(X^2) = L\). Suppose \(d\) and \(d_L\) are combinatorially similar. Then there are bijections
\[
g \colon X \to Q \quad \text{and} \quad f \colon Q \to L
\]
such that the diagram
\begin{equation}\label{l4.7:e1}
\ctdiagram{
\ctv 0,25:{Q^{2}}
\ctv 100,25:{X^{2}}
\ctv 0,-25:{Q}
\ctv 100,-25:{L}
\ctet 100,25,0,25:{g\otimes g}
\ctet 0,-25,100,-25:{f}
\ctel 0,25,0,-25:{d}
\cter 100,25,100,-25:{d_L}
}
\end{equation}
is commutative. If \(f\) is an isomorphism of \((Q, {\preccurlyeq}_Q)\) and \((L, {\preccurlyeq}_L)\), then \(g\) is a weak similarity. Since \((Q, {\preccurlyeq}_Q)\) is totally ordered and \(f\) is bijective, to prove that \(f\) is an isomorphism it suffices to show that the implication
\begin{equation}\label{l4.7:e2}
(q_1 \preccurlyeq_Q q_2) \Rightarrow (f(q_1) \preccurlyeq_L f(q_2))
\end{equation}
is valid for all \(q_1\), \(q_2 \in Q\). The inclusion \({\preccurlyeq}_L^0 \subseteq {\preccurlyeq}_L\) (see Proposition~\ref{p3.17}) implies that \eqref{l4.7:e2} is valid if
\begin{equation}\label{l4.7:e3}
(q_1 \preccurlyeq_Q q_2) \Rightarrow (f(q_1) \preccurlyeq_L^0 f(q_2)).
\end{equation}
By Lemma~\ref{l3.18}, the equalities \(d(Q^2) = Q\) and \(d(X^2) = L\) imply
\begin{equation}\label{l4.7:e4}
{\preccurlyeq}_Q^0 = u_d^t \cup \Delta_{Q} \quad \text{and} \quad {\preccurlyeq}_L^0 = u_{d_L}^t \cup \Delta_{L}.
\end{equation}
Using \eqref{l4.7:e4} we see that \eqref{l4.7:e3} is valid whenever
\[
(\<q_1, q_2> \in u_d) \Rightarrow (\<f(q_1), f(q_2)> \in u_{d_L}),
\]
which follows directly from the commutativity of \eqref{l4.7:e1} and the definition of \(u_d\) and \(u_{d_L}\).
\end{proof}
\begin{proposition}\label{c4.4}
Let \((Q, {\preccurlyeq}_{Q})\) be a totally ordered set with a smallest element \(q_0\). Then there is a \({\preccurlyeq}_{Q}\)-ultrametric \(d \colon Q^2 \to Q\) such that
\[
d(Q^2) = Q \quad \text{and} \quad {\preccurlyeq}_{Q}^{0} = {\preccurlyeq}_{Q}.
\]
\end{proposition}
\begin{proof}
Let us define a mapping \(d \colon Q^{2} \to Q\) by the rule:
\begin{equation}\label{c4.4:e1}
d(p, q) := \begin{cases}
q_0 & \text{if } p=q,\\
p & \text{if } q \prec_{Q} p,\\
q & \text{if } p \prec_{Q} q.
\end{cases}
\end{equation}
It is clear that \(d\) is symmetric and the equality \(d(p, q) = q_0\) holds if and only if \(p=q\).
Now let \(\<q_1, q_2, q_3>\) be a triple of points of \(Q\). Suppose these points are pairwise different. Since \((Q, {\preccurlyeq}_{Q})\) is totally ordered, there is a permutation
\[
\begin{pmatrix}
q_1 & q_2 & q_3\\
q_{i_1} & q_{i_2} & q_{i_3}
\end{pmatrix}
\]
such that
\begin{equation}\label{c4.4:e3}
q_{i_1} \prec_{Q} q_{i_3} \prec_{Q} q_{i_2}.
\end{equation}
From \eqref{c4.4:e1} and \eqref{c4.4:e3} it follows that
\[
d(q_{i_1}, q_{i_3}) = q_{i_3} \prec_{Q} q_{i_2} = d(q_{i_1}, q_{i_2}) = d(q_{i_2}, q_{i_3}).
\]
Thus,
\begin{equation}\label{c4.4:e9}
d(q_{i_1}, q_{i_3}) \preccurlyeq d(q_{i_1}, q_{i_2}) = d(q_{i_2}, q_{i_3})
\end{equation}
holds. Analogously, if the number of different points in \(\<q_1, q_2, q_3>\) is two, we can find a permutation such that \(q_{i_1} = q_{i_3} \neq q_{i_2}\). Hence,
\[
d(q_{i_1}, q_{i_3}) = q_{0} \prec_{Q} d(q_{i_1}, q_{i_2}) = d(q_{i_2}, q_{i_3}),
\]
that implies \eqref{c4.4:e9}. For the case when \(q_1 = q_2 = q_3\) holds, \eqref{c4.4:e9} is trivially valid for every permutation
\[
\begin{pmatrix}
q_1 & q_2 & q_3\\
q_{i_1} & q_{i_2} & q_{i_3}
\end{pmatrix}.
\]
Hence, \(d\) is a \({\preccurlyeq}_{Q}\)-ultrametric on \(Q\).
It follows from \eqref{c4.4:e1} that \(d(q_0, q) = q\) holds for every \(q \in Q\). Thus, we have
\begin{equation}\label{c4.4:e4}
d(Q^{2}) = Q.
\end{equation}
To complete the proof it suffices to show that
\begin{equation}\label{c4.4:e5}
{\preccurlyeq}_{Q}^{0} = {\preccurlyeq}_{Q}.
\end{equation}
By definition of \({\preccurlyeq}_{Q}^{0}\), equality \eqref{c4.4:e5} holds if
\begin{equation}\label{c4.4:e6}
{\preccurlyeq}_{Q}^{0} \supseteq {\preccurlyeq}_{Q}.
\end{equation}
Lemma~\ref{l3.18} and \eqref{c4.4:e4} imply the equality \({\preccurlyeq}_{Q}^{0} = (u_d^t \cup \Delta_{Q})\). Consequently, \eqref{c4.4:e6} is valid if and only if
\begin{equation}\label{c4.4:e7}
(u_d^t \cup \Delta_{Q}) \supseteq {\preccurlyeq}_{Q}.
\end{equation}
Let \(q_1\) and \(q_2\) be some points of \(Q\) and let \(q_1 \preccurlyeq_{Q} q_2\). If there is \(q_3 \in Q\) such that
\begin{equation}\label{c4.4:e8}
q_1 = d(q_1, q_3) \quad \text{and} \quad q_2 = d(q_1, q_2) = d(q_2, q_3),
\end{equation}
then \(\<q_1, q_2> \in u_d\) holds. If we set \(q_3\) equals to \(q_0\), the smallest element of \((Q, {\preccurlyeq}_{Q})\), then \eqref{c4.4:e8} follows from \(q_1 \preccurlyeq_{Q} q_2\) and \eqref{c4.4:e1}. Thus, the inclusion \(u_d \supseteq {\preccurlyeq}_{Q}\) holds, that implies \eqref{c4.4:e7}.
\end{proof}
\begin{remark}\label{r4.8}
If \(Q\) is finite, \(Q = \{0, 1, \ldots, n\}\), and \({\preccurlyeq}_Q = {\leqslant}\) hold, then the mapping \(d\) defined by \eqref{c4.4:e1} is an ultrametric on \(Q\) for which the ultrametric space \((Q, d)\) is ``as rigid as possible''. Some extremal properties of such spaces and related graph-theoretical characterizations were found in \cite{DPT(Howrigid)}.
\end{remark}
\begin{example}\label{ex4.6}
Let us denote by \(\mathbb{R}_{0}\) the Cartesian product of \(\mathbb{R}^{+}\) and the two-points set \(\{0, 1\}\), \(\mathbb{R}_{0} := \mathbb{R}^{+} \times \{0, 1\}\), and let \({\preccurlyeq}_{\mathbb{R}_{0}}\) be the \emph{lexicographical} order on \(\mathbb{R}_{0}\),
\begin{equation}\label{ex4.6:e1}
\bigl(\<a,b> \preccurlyeq_{\mathbb{R}_{0}} \<c,d>\bigr) \Leftrightarrow \bigl((a < c) \text{ or } (a = c \text{ and } b = 0 \text{ and } d = 1)\bigr),
\end{equation}
where \(\leqslant\) is the standard order on \(\mathbb{R}^{+}\). The poset \((\mathbb{R}_{0}, {\preccurlyeq}_{\mathbb{R}_{0}})\) is totally ordered. By Proposition~\ref{c4.4}, the mapping \(d \colon \mathbb{R}_{0}^{2} \to \mathbb{R}_{0}\), defined by formula~\eqref{c4.4:e1}, is a \({\preccurlyeq}_Q\)-ultrametric and
\begin{equation}\label{ex4.6:e2}
d(\mathbb{R}_{0}^{2}) = \mathbb{R}_{0} \quad \text{and} \quad {\preccurlyeq}_{\mathbb{R}_{0}}^{0} = {\preccurlyeq}_{\mathbb{R}_{0}}
\end{equation}
hold.
Suppose that there is an ultrametric space \((X, \rho)\) such that \(d\) and \(\rho\) are combinatorially similar. From the definition of combinatorial similarity it follows that there are bijections \(f \colon \rho(X^{2}) \to d(\mathbb{R}_{0}^{2})\) and \(g \colon \mathbb{R}_{0} \to X\) such that \(d(x, y) = f(\rho(g(x), g(y)))\) holds for all \(x\), \(y \in \mathbb{R}_{0}\). Let us consider now the poset \((\rho(X^{2}), {\preccurlyeq}_{\rho})\), where
\begin{equation}\label{ex4.6:e3}
{\preccurlyeq}_{\rho} := u_{\rho}^{t} \cup \Delta_{\rho(X^{2})}.
\end{equation}
By Theorem~\ref{t3.15}, \(\rho\) is a \({\preccurlyeq}_{\rho}\)-ultrametric on \(X\). Moreover, using Lemma~\ref{l3.18} and Theorem~\ref{t4.3} we obtain that \(g \colon \mathbb{R}_{0} \to X\) is a weak similarity for \(d\) and \(\rho\). Hence, \(f \colon \rho(X^{2}) \to \mathbb{R}_{0}\) is an isomorphism of \((\mathbb{R}_{0}, {\preccurlyeq}_{\mathbb{R}_{0}})\) and \((\rho(X^{2}), {\preccurlyeq}_{\rho})\). Proposition~\ref{p3.17}, Lemma~\ref{l3.18} and \eqref{ex4.6:e3} imply
\begin{equation}\label{ex4.6:e4}
(q_1 \prec_{\mathbb{R}_{0}} q_2) \Leftrightarrow (f^{-1}(q_1) < f^{-1}(q_2))
\end{equation}
for all \(q_1\), \(q_2 \in \mathbb{R}_{0}\).
Let us consider now the points
\[
q_i^x := \<x, i> \quad \text{and} \quad q_i^y := \<y, i>, \quad i = 0, 1, \quad x, y \in \mathbb{R}^{+}.
\]
It follows directly from \eqref{ex4.6:e1} that if \(x < y\), then
\[
q_0^x \prec_{\mathbb{R}_{0}} q_1^x \prec_{\mathbb{R}_{0}} q_0^y \prec_{\mathbb{R}_{0}} q_1^y.
\]
Consequently,
\begin{equation}\label{ex4.6:e5}
f^{-1}(q_0^x) < f^{-1}(q_1^x) < f^{-1}(q_0^y) < f^{-1}(q_1^y).
\end{equation}
Since \(\mathbb{Q}^{+} = \mathbb{R}^{+} \cap \mathbb{Q}\) is a dense subset of \(\mathbb{R}^{+}\), for every \(x \in \mathbb{R}^{+}\) there is \(p^x \in \mathbb{Q}^{+}\) such that
\begin{equation}\label{ex4.6:e6}
f^{-1}(q_1^x) < p^x < f^{-1}(q_2^x).
\end{equation}
From \eqref{ex4.6:e5} and \eqref{ex4.6:e6} it follows that the mapping
\[
\mathbb{R}^{+} \ni x \mapsto p^x \in \mathbb{Q}^{+}
\]
is injective, contrary to the equalities \(|\mathbb{R}^{+}| = 2^{\aleph_{0}}\) and \(|\mathbb{Q}^{+}| = \aleph_{0}\). Thus, there are no ultrametrics which are combinatorially similar to \(d\).
\end{example}
\begin{remark}\label{r4.9}
An interesting topological property of the poset \((\mathbb{R}_{0}, {\preccurlyeq}_{\mathbb{R}_{0}})\) was found by F.~S.Cater \cite{Cat1999/2000RAE}. We will return to it later in Theorem~\ref{t4.19}.
\end{remark}
Example~\ref{ex4.6} shows that, after replacing \(\aleph_{0}\) by \(2^{\aleph_{0}}\) and \(\mathbb{Q}^{+}\) by \(\mathbb{R}^{+}\), Theorem~\ref{t3.7} becomes false. In particular, we have the following proposition.
\begin{proposition}\label{p4.8}
Let \(X\) be a set with \(|X| = 2^{\aleph_{0}}\). Then there is a metric \(d^{*} \colon X^{2} \to \mathbb{R}^{+}\) such that:
\begin{enumerate}
\item [\((i)\)] If \(\rho\) is an arbitrary ultrametric, then \(\rho\) and \(d^{*}\) are not combinatorially similar;
\item [\((ii)\)] For every \(X_1 \subseteq X\) with \(|X_1| \leqslant \aleph_{0}\), the restriction \(d^{*}|_{X_1^2}\) of \(d^{*}\) is combinatorially similar to an ultrametric.
\end{enumerate}
\end{proposition}
\begin{proof}
Let \(d \colon \mathbb{R}_{0}^{2} \to \mathbb{R}_{0}\) be the \({\preccurlyeq}_{\mathbb{R}_{0}}\)-ultrametric defined in Example~\ref{ex4.6}. The equalities
\begin{equation}\label{p4.8:e1}
|X| = 2^{\aleph_{0}} \quad \text{and} \quad 2^{\aleph_{0}} = |\mathbb{R}_{0}|
\end{equation}
imply the existence of a bijection \(g \colon X \to \mathbb{R}_{0}\). Let \(d_1 \colon X^{2} \to \mathbb{R}_{0}\) be a \({\preccurlyeq}_{\mathbb{R}_{0}}\)-ultrametric defined as
\[
d_1(x, y) = d(g(x), g(y)), \quad x, y \in X.
\]
From~\eqref{p4.8:e1} it follows that \(|d_1(X^{2})| \leqslant 2^{\aleph_{0}}\). Consequently, by statement \((i)\) of Corollary~\ref{c3.17}, there is an usual metric \(d_2\) such that \(d_1\) and \(d_2\) are combinatorially similar. It follows directly from the definition of combinatorial similarity that there is a metric \(d^{*} \colon X^{2} \to \mathbb{R}^{+}\) which is combinatorially similar to \(d_2\). Thus, \(d^{*}\) and \(d\) are combinatorially similar.
It is easy to prove that \(d^*\) satisfies conditions \((i)\) and \((ii)\). Indeed, condition \((ii)\) follows from statement \((ii)\) of Corollary~\ref{c3.17}. Furthermore, it was shown in Example~\ref{ex4.6} that there are no ultrametrics which are combinatorially similar to \(d \colon \mathbb{R}_{0}^{2} \to \mathbb{R}_{0}\). Consequently, \((i)\) also holds.
\end{proof}
Let \((Q, {\preccurlyeq}_Q)\) be a totally ordered set, and let \(A\), \(B\) be nonempty subsets of \(Q\). We write \(A \prec_{Q} B\) when \(a \prec_{Q} b\) holds for all \(a \in A\) and \(b \in B\).
The sets \(A\) and \(B\) are \emph{neighboring} if \(A \prec_{Q} B\) or, respectively, \(B \prec_{Q} A\) and there is no \(q \in Q\) such that
\[
A \prec_{Q} \{q\} \quad \text{and} \quad \{q\} \prec_{Q} B
\]
or, respectively,
\[
B \prec_{Q} \{q\} \quad \text{and} \quad \{q\} \prec_{Q} A.
\]
\begin{definition}\label{d4.9}
A totally ordered set \(Q\) is a \(\eta_1\)-set if it has no neighboring subsets which both have a cardinality strictly less than \(\aleph_1\).
\end{definition}
Let \((Q, {\preccurlyeq}_Q)\) and \((L, {\preccurlyeq}_L)\) be posets. An injection \(f \colon Q \to L\) is an \emph{embedding} of \((Q, {\preccurlyeq}_Q)\) in \((L, {\preccurlyeq}_L)\) if
\[
\bigl(q_1 \preccurlyeq_Q q_2\bigr) \Leftrightarrow \bigl(f(q_1) \preccurlyeq_L f(q_2)\bigr)
\]
is valid for all \(q_1\), \(q_2 \in Q\).
A totally ordered set \(L\) is \(\aleph_1\)-\emph{universal} if every totally ordered set \(Q\) with \(|Q| \leqslant \aleph_1\) can be embedded into \(L\).
\begin{lemma}\label{l4.10}
Every \(\eta_1\)-set is \(\aleph_1\)-universal.
\end{lemma}
For the detailed proof of the lemma see, for example, Theorem~20 in~\cite{Ada2018}.
\begin{remark}\label{r4.11}
The above definition of \(\aleph_1\)-universal sets can be naturally extended to arbitrary infinite cardinal number \(\aleph\). The construction of \(\aleph\)-universal posets was studied by many mathematicians (see, for example, \cite{Joh1956PA, Hed1969JoA} and the references therein).
\end{remark}
In the proof of the following theorem we will use the Continuum Hypothesis.
\begin{theorem}\label{t4.11}
Let \(X\) be a nonempty set, let \(\Phi\) be a mapping with \(\dom \Phi = X^{2}\) and \(|\Phi(X^{2})| \leqslant 2^{\aleph_{0}}\), and let \((Q, {\preccurlyeq}_Q)\) be a \(\eta_1\)-set with a smallest element \(q_0\). Then the following conditions are equivalent.
\begin{enumerate}
\item[\((i)\)] \(\Phi\) is combinatorially similar to a \({\preccurlyeq}_Q\)-pseudoultrametric.
\item[\((ii)\)] The mapping \(\Phi\) is symmetric, and the transitive closure \(u_{\Phi}^{t}\) of the binary relation \(u_{\Phi}\) is antisymmetric, and \(\Phi\) is \(a_0\)-coherent for a point \(a_0 \in \Phi(X^{2})\), and, for every triple \(\<x_1, x_2, x_3>\) of points of \(X\), there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that \(\Phi(x_{i_1}, x_{i_2}) = \Phi(x_{i_2}, x_{i_3})\).
\end{enumerate}
\end{theorem}
\begin{proof}
The validity of \((i) \Rightarrow (ii)\) follows from Theorem~\ref{t3.15}.
Suppose that \((ii)\) holds. Using Theorem~\ref{t3.15} we obtain that \(\Phi\) is a \({\preccurlyeq}_{\Phi}\)-pseudoultrametric for the partial order
\[
{\preccurlyeq}_{\Phi} := u_{\Phi}^{t} \cup \Delta_{\Phi(X^{2})}
\]
defined on \(\Phi(X^{2})\).
By Lemma~\ref{l3.3} (Szpilrajn), there is an linear order \({\preccurlyeq}_{1}\) on \(\Phi(X^{2})\) such that \({\preccurlyeq}_{\Phi} \subseteq {\preccurlyeq}_{1}\). Consequently, \(\Phi\) is also a \({\preccurlyeq}_{1}\)-pseudoultrametric. The inequality \(|\Phi(X^{2})| \leqslant 2^{\aleph_{0}}\) holds. The Continuum Hypothesis, \(2^{\aleph_{0}} = \aleph_1\), and the last inequality imply the inequality \(|\Phi(X^{2})| \leqslant \aleph_1\). By Lemma~\ref{l4.10}, the \(\eta_1\)-set \((Q, {\preccurlyeq}_Q)\) is \(\aleph_1\)-universal. It is easy to prove that there is an embedding \(f \colon \Phi(X^{2}) \to Q\) of \((\Phi(X^{2}), {\preccurlyeq}_{1})\) in \((Q, {\preccurlyeq}_{Q})\) such that \(f(a_0) = q_0\). Then the mapping
\[
X^2 \xrightarrow{\Phi} \Phi(X^{2}) \xrightarrow{f} Q
\]
is a \({\preccurlyeq}_{Q}\)-pseudoultrametric and this mapping is combinatorially similar to \(\Phi\).
\end{proof}
The following definition can be found in \cite[pp.~57--58]{Kel1975S}.
\begin{definition}\label{d4.13}
Let \((Q, {\preccurlyeq}_{Q})\) be a totally ordered set with \(|Q| > 1\). A topology \(\tau\) with a subbase consisting of all sets of the form
\[
\{q \in Q \colon q \prec_Q a\} \quad \text{or} \quad \{q \in Q \colon a \prec_Q q\}
\]
for some \(a \in Q\) is the order topology on \(Q\). In this case we say that \(\tau\) is the \({\preccurlyeq}_{Q}\)-topology for short.
\end{definition}
Recall that a topological space is second countable if it has a countable or finite base.
\begin{lemma}\label{l4.14}
Let \((Q, {\preccurlyeq}_{Q})\) be a totally ordered set with \(|Q| > 1\). Then the following conditions are equivalent.
\begin{enumerate}
\item [\((i)\)] The \({\preccurlyeq}_{Q}\)-topology is second countable.
\item [\((ii)\)] The poset \((Q, {\preccurlyeq}_{Q})\) is isomorphic to a subposet of \((\mathbb{R}^{+}, \leqslant)\).
\end{enumerate}
\end{lemma}
This lemma is a simple modification of Theorem~II from paper~\cite{Cat1999/2000RAE} of F.~S.~Cater.
\begin{theorem}\label{t4.15}
Let \((Q, {\preccurlyeq}_{Q})\) be a totally ordered set satisfying \(|Q| > 1\) and having the smallest element \(q_0\). Then the following conditions are equivalent.
\begin{enumerate}
\item [\((i)\)] The \({\preccurlyeq}_{Q}\)-topology is second countable.
\item [\((ii)\)] For every \({\preccurlyeq}_{Q}\)-pseudoultrametric \(d\) there is a pseudoultrametric \(\rho\) such that \(d\) and \(\rho\) are weakly similar.
\item [\((iii)\)] For every \({\preccurlyeq}_{Q}\)-pseudoultrametric \(d\) there is a pseudoultrametric \(\rho\) such that \(d\) and \(\rho\) are combinatorially similar.
\end{enumerate}
\end{theorem}
\begin{proof}
It is easy to see that \((i)\), \((ii)\) and \((iii)\) are equivalent if \(|Q| = 2\). Suppose \(|Q| \geqslant 3\) holds.
\((i) \Rightarrow (ii)\). Let the \({\preccurlyeq}_{Q}\)-topology be second countable, let \(X\) be a nonempty set and let \(d \colon X^{2} \to Q\) be a \({\preccurlyeq}_{Q}\)-pseudoultrametric. Write \(Q_0 := Q \setminus \{q_0\}\) and \({\preccurlyeq}_{Q_0} := Q_0^2 \cap {\preccurlyeq}_{Q}\). The inequality \(|Q| \geqslant 3\) implies \(|Q_0| > 1\). The \({\preccurlyeq}_{Q_0}\)-topology coincides with the topology induced on \(Q_0\) by \({\preccurlyeq}_{Q}\)-topology. Consequently, the \({\preccurlyeq}_{Q_0}\)-topology is also second countable. Hence, by Lemma~\ref{l4.14}, there is an isomorphism \(f \colon Q_0 \to A_0\) of the posets \((Q_0, {\preccurlyeq}_{Q_0})\) and \((A_0, \leqslant)\), where \(A_0 \subseteq (0, \infty)\) and \(\leqslant\) is the standard order on \(\mathbb{R}\). Write \(A := A_0 \cup \{0\}\). The function \(f^{*} \colon Q \to A\),
\[
f^{*}(q) = \begin{cases}
0 & \text{if } q = q_0,\\
f(q) & \text{if } q\neq q_0,
\end{cases}
\]
is an isomorphism of \((Q, {\preccurlyeq}_{Q})\) and \((A, \leqslant)\). Let \(\rho \colon X^{2} \to \mathbb{R}^{+}\) be defined as
\[
\rho(x, y) = f^{*}(d(x, y)), \quad x, y \in X.
\]
Then \(\rho\) is a pseudoultrametric on \(X\) and the identical mapping \hbox{\(X \xrightarrow{\operatorname{id}} X\)} is a weak similarity for \(d\) and \(\rho\).
\((ii) \Rightarrow (iii)\). The validity of this implication follows from Proposition~\ref{p3.16}.
\((iii) \Rightarrow (i)\). Suppose condition \((iii)\) holds. By Proposition~\ref{c4.4}, there is a \({\preccurlyeq}_{Q}\)-ultrametric \(d \colon Q^2 \to Q\) satisfying the equalities \(d(Q^2) = Q\) and \({\preccurlyeq}_{Q}^{0} = {\preccurlyeq}_{Q}\).
Let \(\rho \colon X^{2} \to \mathbb{R}^{+}\) be a pseudoultrametric such that \(\rho\) and \(d\) are combinatorially similar. Write \(L := \rho(X^2)\) and \({\preccurlyeq}_{L} := {\leqslant} \cap L^2\). Then the \(L\)-pseudoultrametric \(\rho_L \colon X^{2} \to L\),
\[
\rho_L(x, y) = \rho(x, y), \quad x, y \in X,
\]
is also combinatorially similar to \(d\). By Lemma~\ref{l4.7}, \(d\) and \(\rho_L\) are weakly similar. Using Definition~\ref{d3.13} we obtain that \((Q, {\preccurlyeq}_{Q})\) is isomorphic to the subposet \((L, {\preccurlyeq}_{L})\) of \((\mathbb{R}^{+}, {\leqslant})\). Hence, by Lemma~\ref{l4.14} (Cater), the \({\preccurlyeq}_{Q}\)-topology is second countable.
\end{proof}
Recall that a topological space \((X, \tau)\) is said to be separable if there is a set \(A \subseteq X\) such that \(|A| \leqslant \aleph_{0}\) and \(A \cap U \neq \varnothing\) for every nonempty set \(U \in \tau\).
In what follows we denote by \((\mathbb{R}_{0}, {\preccurlyeq}_{\mathbb{R}_{0}})\) the totally ordered set constructed in Example~\ref{ex4.6}.
The next lemma is a part of Theorem~III \cite{Cat1999/2000RAE}.
\begin{lemma}[Cater]\label{l4.18}
Let \((Q, {\preccurlyeq}_{Q})\) be a totally ordered set with \(|Q| > 1\). Then the following conditions are equivalent.
\begin{enumerate}
\item [\((i)\)] The \({\preccurlyeq}_{Q}\)-topology is separable.
\item [\((ii)\)] The poset \((Q, {\preccurlyeq}_{Q})\) is isomorphic to a subposet of \((\mathbb{R}_{0}, {\preccurlyeq}_{\mathbb{R}_{0}})\).
\end{enumerate}
\end{lemma}
\begin{theorem}\label{t4.19}
Let \((Q, {\preccurlyeq}_{Q})\) be a totally ordered set having a smallest element and satisfying the inequality \(|Q| > 1\). Then the following conditions are equivalent.
\begin{enumerate}
\item [\((i)\)] The \({\preccurlyeq}_{Q}\)-topology is separable.
\item [\((ii)\)] For every \({\preccurlyeq}_{Q}\)-pseudoultrametric \(d\) there is a \({\preccurlyeq}_{\mathbb{R}_{0}}\)-pseudo\-ultra\-metric \(\rho\) such that \(d\) and \(\rho\) are weakly similar.
\item [\((iii)\)] For every \({\preccurlyeq}_{Q}\)-pseudoultrametric \(d\) there is a \({\preccurlyeq}_{\mathbb{R}_{0}}\)-pseudo\-ultra\-metric \(\rho\) such that \(d\) and \(\rho\) are combinatorially similar.
\end{enumerate}
\end{theorem}
Using Lemma \ref{l4.18} instead of Lemma \ref{l4.14} we can prove this theorem similarly to Theorem~\ref{t4.15}.
The following theorem gives us some necessary and sufficient conditions under which a mapping is combinatorially similar to a pseudoultrametric, and it can be considered as a main result of the section.
\begin{theorem}\label{t4.20}
Let \(X\) be a nonempty set and let \(\Phi\) be a mapping with \(\dom \Phi = X^{2}\). Then the following conditions are equivalent.
\begin{enumerate}
\item [\((i)\)] \(\Phi\) is combinatorially similar to pseudoultrametric.
\item [\((ii)\)] There is \(b_0 \in \Phi(X^{2})\) such that \(\Phi(x,x) = b_0\) holds for every \(x \in X\), and the binary relation
\begin{equation}\label{t4.20:e1}
{\preccurlyeq}_{\Phi} := u_{\Phi}^{t} \cup \Delta_{\Phi(X^{2})}
\end{equation}
is a partial order on \(\Phi(X^{2})\), and \(b_0\) is the smallest element of \((\Phi(X^{2}), {\preccurlyeq}_{\Phi})\), and \(\Phi\) is a \( {\preccurlyeq}_{\Phi}\)-pseudo\-ultra\-metric on \(X\), and there is a linear order \({\preccurlyeq}\) on \(\Phi(X^{2})\) such that
\begin{equation}\label{t4.20:e2}
{\preccurlyeq}_{\Phi} \subseteq {\preccurlyeq}
\end{equation}
holds, and \((\Phi(X^{2}), {\preccurlyeq})\) is isomorphic to a subposet of \((\mathbb{R}^{+}, \leqslant)\).
\item [\((iii)\)] The mapping \(\Phi\) is symmetric, and there is \(a_0 \in \Phi(X^{2})\) for which \(\Phi\) is \(a_0\)-coherent, and, for every triple \(\<x_1, x_2, x_3>\) of points of \(X\), there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that \(\Phi(x_{i_1}, x_{i_2}) = \Phi(x_{i_2}, x_{i_3})\), and there is a linear order \({\preccurlyeq}\) on \(\Phi(X^{2})\) such that \(a_0\) is the smallest element of \((\Phi(X^{2}), {\preccurlyeq})\) and \(u_{\Phi} \subseteq {\preccurlyeq}\) holds, and \((\Phi(X^{2}), {\preccurlyeq})\) is isomorphic to a subposet of \((\mathbb{R}^{+}, \leqslant)\).
\end{enumerate}
\end{theorem}
\begin{proof}
\((i) \Rightarrow (ii)\). Let \((i)\) hold. Then using Theorem \ref{t3.15} we see that condition \((ii)\) is valid whenever there is a linear order \(\preccurlyeq\) on \(\Phi(X^{2})\) such that \eqref{t4.20:e2} holds and \((\Phi(X^{2}), {\preccurlyeq})\) is isomorphic to a subposet of \((\mathbb{R}^{+}, \leqslant)\).
By condition~\((i)\), there are a set \(Y\) and a pseudoultrametric \(\rho \colon Y^{2} \to \mathbb{R}^{+}\) such that \(\Phi\) and \(\rho\) are combinatorially similar. Write
\begin{equation}\label{t4.20:e3}
{\preccurlyeq}_{\rho} := u_{\rho}^{t} \cup \Delta_{\rho(Y^{2})}.
\end{equation}
From Lemma~\ref{l3.18} it follows that \(\rho\) is a \({\preccurlyeq}_{\rho}\)-pseudo\-ultra\-metric. Since \(\Phi\) and \(\rho\) are combinatorially similar, there exists a bijection \(g \colon X \to Y\) such that \(g\) is combinatorial similarity for \(\Phi\) and \(\rho\). Now using Theorem~\ref{t4.3}, and \eqref{t4.20:e1}, and \eqref{t4.20:e3} we see that \(g\) is a weak similarity for \(\Phi\) and \(\rho\). Consequently, there is an order isomorphism
\[
f \colon \Phi(X^{2}) \to \rho(Y^{2})
\]
of posets \((\Phi(X^{2}), {\preccurlyeq}_{\Phi})\) and \((\rho(Y^{2}), {\preccurlyeq}_{\rho})\). By Proposition~\ref{p3.17} and Lemma~\ref{l3.18}, we obtain that
\[
(\gamma_1 \preccurlyeq_{\rho} \gamma_2) \Rightarrow (\gamma_1 \leqslant \gamma_2)
\]
is valid for all \(\gamma_1\), \(\gamma_2 \in \rho(Y^{2})\).
Let us define a binary relation \(\preccurlyeq\) by the rule:
\[
(\<g_1, g_2> \in {\preccurlyeq}) \Leftrightarrow (\<g_1, g_2> \in \Phi(X^{2}) \times \Phi(X^{2}) \text{ and } (f(g_1) \leqslant f(g_2)))
\]
Then \({\preccurlyeq}\) is a linear order satisfying all desirable conditions.
\((ii) \Rightarrow (i)\). Suppose \((ii)\) holds. Then \(\Phi\) is a \({\preccurlyeq}_{\Phi}\)-pseudo\-ultra\-metric on \(X\) and there is an injection \(f \colon \Phi(X^{2}) \to \mathbb{R}^{+}\) such that
\[
(b_1 \preccurlyeq_{\Phi} b_2) \Rightarrow (f(b_1) \leqslant f(b_2))
\]
holds for all \(b_1\), \(b_2 \in \Phi(X^{2})\). Since \(b_0\) is the smallest element of the poset \((\Phi(X^{2}), {\preccurlyeq}_{\Phi})\), the function \(f^{*} \colon \Phi(X^{2}) \to \mathbb{R}^{+}\) defined as
\[
f^{*}(b) = f(b) - f(b_0)
\]
is nonnegative and isotone, and satisfies the condition
\[
(f^{*}(b) = 0) \Leftrightarrow (b = b_0)
\]
for every \(b \in \Phi(X^{2})\). Proposition~\ref{p3.23} implies that \(f^{*} \circ \Phi\) is a pseudoultrametric on \(X\). From Definition~\ref{d2.17} it directly follows that \(\Phi\) and \(f^{*} \circ \Phi\) are combinatorially similar.
The validity of the equivalence \((ii) \Leftrightarrow (iii)\) follows from Theorem~\ref{t3.15}. We only note that \(u_{\Phi}^{t}\) is antisymmetric if and only if there is a partial order \({\preccurlyeq}'\) such that \({\preccurlyeq}' \supseteq u_{\Phi}\).
\end{proof}
The proof of the following corollary is similar to prove of Theorem~\ref{t4.20}.
\begin{corollary}\label{c4.22}
Let \(X\) be a nonempty set and let \(\Phi\) be a mapping with \(\dom \Phi = X^{2}\). Then the following conditions are equivalent.
\begin{enumerate}
\item [\((i)\)] \(\Phi\) is combinatorially similar to ultrametric.
\item [\((ii)\)] There is \(b_0 \in \Phi(X^{2})\) such that \(\Phi^{-1}(b_0) = \Delta_{X}\), and the binary relation
\[
{\preccurlyeq}_{\Phi} := u_{\Phi}^{t} \cup \Delta_{\Phi(X^{2})}
\]
is a partial order on \(\Phi(X^{2})\), and \(b_0\) is the smallest element of \((\Phi(X^{2}), {\preccurlyeq}_{\Phi})\), and \(\Phi\) is a \( {\preccurlyeq}_{\Phi}\)-ultra\-metric on \(X\), and there is a linear order \({\preccurlyeq}\) on \(\Phi(X^{2})\) such that
\[
{\preccurlyeq}_{\Phi} \subseteq {\preccurlyeq}
\]
holds, and \((\Phi(X^{2}), {\preccurlyeq})\) is isomorphic to a subposet of \((\mathbb{R}^{+}, \leqslant)\).
\item [\((iii)\)] The mapping \(\Phi\) is symmetric, and there is \(a_0 \in \Phi(X^{2})\) for which \(\Phi^{-1}(a_0) = \Delta_{X}\) holds, and, for every triple \(\<x_1, x_2, x_3>\) of points of \(X\), there is a permutation
\[
\begin{pmatrix}
x_1 & x_2 & x_3\\
x_{i_1} & x_{i_2} & x_{i_3}
\end{pmatrix}
\]
such that \(\Phi(x_{i_1}, x_{i_2}) = \Phi(x_{i_2}, x_{i_3})\), and there is a linear order \({\preccurlyeq}\) on \(\Phi(X^{2})\) such that \(a_0\) is the smallest element of \((\Phi(X^{2}), {\preccurlyeq})\) and \(u_{\Phi} \subseteq {\preccurlyeq}\) holds, and \((\Phi(X^{2}), {\preccurlyeq})\) is isomorphic to a subposet of \((\mathbb{R}^{+}, \leqslant)\).
\end{enumerate}
\end{corollary}
In connection with Theorem \ref{t4.20} and Corollary \ref{c4.22}, the following problem naturally arises.
\begin{problem}\label{pr4.21}
Describe (up to order-isomorphism) the partially ordered sets \((Q, {\preccurlyeq}_{Q})\) which admit extensions to totally ordered sets \((Q, {\preccurlyeq})\) such that \((Q, {\preccurlyeq})\) is order-isomorphic to a subposet of \((\mathbb{R}^{+}, \leqslant)\).
\end{problem}
We do not discuss this problem in details but formulate the following conjecture.
\begin{conjecture}\label{con4.24}
The following conditions are equivalent.
\begin{enumerate}
\item [\((i)\)] A poset \((Q, {\preccurlyeq}_{Q})\) admits an extension to totally ordered set \((Q, {\preccurlyeq})\) such that \((Q, {\preccurlyeq})\) is order-isomorphic to a subposet of \((\mathbb{R}^{+}, \leqslant)\).
\item [\((ii)\)] The inequality \(|Q| \leqslant 2^{\aleph_{0}}\) holds and every totally ordered subposet of \((Q, {\preccurlyeq}_{Q})\) can be embedded into \((\mathbb{R}^{+}, \leqslant)\).
\end{enumerate}
\end{conjecture}
\end{document} |
\begin{document}
\title{Number of unique Edge-magic total labelings on Path $P_n$ }
\begin{abstract}
Edge-magic total labeling was introduced by \cite{GS}. The number of edge-magic solutions for cycles have been explored in \cite{BS}. This sequence is mentioned in On Line Encyclopedia of Integer Sequences (OEIS) \cite{oeis}. In this short note, we enumerate the number of unique edge-magic total labelings on Path $P_n$
\end{abstract}
\section{Introduction}
Edge-magic labeling (EMTL) has been studied in the past with an application towards communication networks. Given a simple undirected graph $G=(V,E)$, let $\lambda$ be a mapping from the numbers $1,2,\cdots, |V|+|E|$ to the vertices and edges of graph G, such that each element has an unique label.
The weight of an edge is obtained as the sum of the labels of that edge and its two end vertices. An edge-magic total labeling is labeling in which the weight of every edge is the same. The weight of the each edge is said to be a magic constant. Figure~\ref{fig:fig1} illustrates an example for a path of length 5 with a magic constant of 16.
\begin{figure}
\caption{\label{fig:fig1}
\label{fig:fig1}
\end{figure}
Paper by Wallis et al \cite {wallis} describes existence of edge-magic total labeling of many types of graphs including $P_n$. The aim of this note is to enumerate unique edge-magic total labeling of Path $P_n$.
\section{Main Results}
\label{mainresults}
Our results are summarized in the following Table.
\begin{table}
\centering
\begin{tabular}{||l|r||}
\hline\hline Path Length & Number of Solutions \\\hline
0 & 1 \\
1 & 3 \\
2 & 12 \\
3 & 28 \\
4 & 48 \\
5 & 240 \\
6 & 944 \\
7 & 5344 \\
8 & 23408 \\
9 & 133808 \\
10 & 751008 \\
11 & 5222768 \\
12 & 37898776 \\
13 & 292271304 \\\hline
\end{tabular}
\caption{\label{tab:counts}Number of Edge-Magic Total Labels.}
\end{table}
As far as we have seen this series 1,3,12,28,48,240,944,5344,23408 does not appear in OEIS.
\subsection{Method and Program}
We started with a simple python program to obtain all edge-magic solutions of paths of lengths 2 to 17. Paths of length 2 means that there will be three vertices and 2 edges, a total of 5 graph elements.
\begin{verbatim}
import itertools
for j in range(5,27,2):
x = range(1,j+1)
sum2 = 0
for a in itertools.permutations(x):
x = list(a)
sum1 = x[0]+x[1]+x[2]
d = 1
for i in range(2,j-2,2):
if (sum1== x[i]+x[i+1]+x[i+2]):
d = 1
else:
d = 0
break
if (d==1):
if (a[0]<a[j-1]):
#print x, sum1
sum2 = sum2+1
print j, "\t", sum2
\end{verbatim}
This program generated one permutation at a time, and checked for whether the assignment leads to an edge-magic labeling.
However it is too slow and we could not compute past the path length of 7. We further optimized our python code and utilized the bounds on magic sum, $k$, similar to the one used in the paper\cite{BS}.
Let $f(r)=\frac{r\times (r+1)}{2}$
$\frac{f(2\times n +1) +f(n-1)}{n} ~\le ~k ~\le \frac{ 2 \times f(2 \times n + 1) - f( n+2)}{n}$
With this improvement and a shortcircuit optimization, we are able to get up to a path length of 13. All our code is located in the following github location \url {https://github.com/allenlavoie/path-counting }.
We will like to point the total number of edge-magic solutions for paths form a strict (albeit weak) upper bound for total number of edge-magic solutions for cycles of the same length.
\end{document} |
\begin{document}
\title[Small feedback vertex sets in planar digraphs]{Small feedback
vertex sets\\ in planar digraphs}
\author{Louis Esperet}
\author{Laetitia Lemoine}
\author{Fr\'ed\'eric Maffray}
\address{Laboratoire G-SCOP (CNRS,
Univ. Grenoble-Alpes), Grenoble, France}
\email{\{louis.esperet,laetitia.lemoine,frederic.maffray\}@grenoble-inp.fr}
\thanks{The authors are partially supported by ANR Project Stint
(\textsc{anr-13-bs02-0007}), and LabEx PERSYVAL-Lab
(\textsc{anr-11-labx-0025}).}
\date{}
\sloppy
\begin{abstract}
Let $G$ be a directed planar graph on $n$ vertices, with no
directed cycle of length less than $g\geqslantslant 4$. We prove that $G$
contains a set $X$ of vertices such that $G-X$ has no directed
cycle, and $|X|\leqslantslant \tfrac{5n-5}9$ if $g=4$, $|X|\leqslantslant \tfrac{2n-5}4$
if $g=5$, and $|X|\leqslantslant \tfrac{2n-6}{g}$ if $g\geqslantslant 6$. This improves
recent results of Golowich and Rolnick.
\end{abstract}
\maketitle
A directed graph $G$ (or digraph, in short) is said to be
acyclic if it does not contain any directed cycle. The \emph{digirth} of a digraph $G$ is
the minimum length of a directed cycle in $G$ (if $G$ is acyclic, we
set its digirth to $+\infty$).
A \emph{feedback
vertex set} in a digraph $G$ is a set $X$ of vertices such that
$G-X$ is acyclic, and the minimum size of such a set is
denoted by $\tau(G)$. In this short note, we study the maximum
$f_g(n)$ of $\tau(G)$ over all planar digraphs $G$ on $n$
vertices with digirth $g$. Harutyunyan~\cite{Har11,HM15} conjectured
that $f_3(n)\leqslantslant \tfrac{2n}5$ for all $n$. This conjecture was recently
refuted by Knauer, Valicov and Wenger~\cite{KVW16} who showed that
$f_g(n)\geqslantslant \tfrac{n-1}{g-1}$ for all $g\geqslantslant 3$ and infinitely many values of
$n$. On the other hand, Golowich and Rolnick~\cite{GR15} recently
proved that $f_4(n)\leqslantslant \tfrac{7n}{12}$, $f_5(n)\leqslantslant \tfrac{8n}{15}$, and $f_g(n)\leqslantslant \tfrac{3n-6}{g}$
for all $g\geqslantslant 6$ and $n$. Harutyunyan and Mohar~\cite{HM15} proved
that the vertex set of every planar digraph of digirth at least 5 can
be partitioned into two acyclic subgraphs. This result was very recently extended to planar digraphs
of digirth 4 by Li and Mohar~\cite{LM16}, and therefore $f_4(n)\leqslantslant
\tfrac{n}2$.
This
short note is devoted to the following result, which improves all
the previous upper bounds for $g \geqslantslant 5$ (although the improvement for
$g=5$ is rather minor). Due to the very recent result of Li and Mohar~\cite{LM16}, our
result for $g=4$ is not best possible (however its proof is of
independent interest and might lead to further improvements).
\begin{thm}\label{th:main}
For all $n\geqslantslant 3$ we have $f_4(n)\leqslantslant \tfrac{5n-5}9$, $f_5(n)\leqslantslant
\tfrac{2n-5}{4}$ and for all $g\geqslantslant 6$, $f_g(n)\leqslantslant \tfrac{2n-6}{g}$.
\end{thm}
In a planar graph, the degree of a face $F$, denoted by $d(F)$, is the sum of
the lengths (number of edges) of the boundary walks of $F$. In the
proof of Theorem~\ref{th:main}, we will need the following two
simple lemmas.
\begin{lem}\label{lem:1}
Let $H$ be a planar bipartite graph, with bipartition $(U,V)$, such
that all faces of $H$ have degree at least 4, and all vertices of $V$ have
degree at least 2. Then $H$ contains at most $2|U|-4$ faces of degree at least
6.
\end{lem}
\begin{proof}
Assume that $H$ has $n$ vertices, $m$ edges, $f$ faces, and $f_6$
faces of degree at least 6. Let $N$ be the sum of the degrees of the
faces of $H$, plus twice the sum of the degrees of the vertices of
$V$. Observe that $N=4m$, so, by Euler's formula, $N\leqslantslant 4n+4f-8$. The sum of degrees
of the faces of $H$ is at least $4(f-f_6)+6f_6=4f+2f_6$, and since each vertex
of $V$ has degree at least 2, the sum of the degrees of the vertices
of $V$ is at least $2|V|$. Therefore, $4f+2f_6+4|V|\leqslantslant 4n+4f-8$. It
follows that $f_6\leqslantslant 2 |U|-4$, as desired.
\end{proof}
\begin{lem}\label{lem:2}
Let $G$ be a connected planar graph, and let $S=\{F_1,\ldots,F_k\}$ be a set of
$k$ faces of $G$, such that each $F_i$ is bounded by a cycle, and
these cycles are pairwise vertex-disjoint. Then $\sum_{F \not\in S}
(3d(F)-6)\geqslantslant \sum_{i=1}^k (3d(F_i)+6)-12$, where the first sum varies
over faces $F$ of $G$ not contained in $S$.
\end{lem}
\begin{proof}
Let $n$, $m$, and $f$ denote the number of vertices, edges, and faces
of $G$, respectively. It follows from Euler's formula that the sum of $3d(F)-6$ over all
faces of $G$ is equal to $6m-6f=6n-12\geqslantslant 6\sum_{i=1}^k
d(F_i)-12$. Therefore,
$\sum_{F\not\in S} (3d(F)-6)\geqslantslant 6\sum_{i=1}^k
d(F_i)-12 - \sum_{i=1}^k (3d(F_i)-6)=\sum_{i=1}^k (3d(F_i)+6)-12$, as desired.
\end{proof}
We are now able to prove Theorem~\ref{th:main}.
\noindent \emph{Proof of Theorem~\ref{th:main}.}
We prove the result by induction on $n\geqslantslant 3$.
Let $G$ be a planar digraph with $n$ vertices and digirth $g\geqslantslant 4$. We can assume
without loss of generality
that $G$ has no multiple arcs, since $g\geqslantslant 4$ and removing one arc from a
collection of multiple arcs with the same orientation does not change
the value of $\tau(G)$. We can also assume that $G$ is connected,
since otherwise we can consider each connected component of $G$ separately
and the result clearly follows from the induction (since $g\geqslantslant 4$,
connected components of at most 2 vertices are acyclic and can thus be left
aside). Finally, we can assume that $G$ contains a directed cycle,
since otherwise $\tau(G)=0\leqslantslant
\min\{\tfrac{5n-5}9,\tfrac{2n-5}{4},\tfrac{2n-6}{g}\}$ (since $n\geqslantslant
3$).
Let $\mathcal{C}$
be a maximum collection of arc-disjoint directed cycles in $G$. Note
that $\mathcal{C}$ is non-empty.
Fix a planar embedding of $G$. For a given directed cycle $C$ of $\mathcal{C}$, we
denote by $\overline{C}$ the closed region bounded by $C$, and by
$\mathring{C}$ the interior of $\overline{C}$. It follows from classical uncrossing techniques (see~\cite{GW97}
for instance), that we can assume without loss of generality that
the directed cycles of $\mathcal{C}$ are pairwise non-crossing, i.e. for any two
elements $C_1,C_2 \in \mathcal{C}$, either $\mathring{C_1}$ and $\mathring{C_2}$
are disjoint, or one is contained in the other. We define the partial
order $\preceq$ on $\mathcal{C}$ as follows: $C_1 \preceq C_2$ if and only if
$\mathring{C_1}\subseteq \mathring{C_2}$. Note that $\preceq$ naturally defines a
rooted forest $\mathcal{F}$ with vertex set $\mathcal{C}$: the roots of each of the
components of $\mathcal{F}$ are the maximal elements of $\preceq$, and the children of
any given node $C\in \mathcal{F}$ are the maximal elements $C' \preceq C$
distinct from $C$ (the
fact that $\mathcal{F}$ is indeed a forest follows from the non-crossing
property of the elements of $\mathcal{C}$).
Consider a node $C$ of $\mathcal{F}$, and the children $C_1,\ldots,C_k$ of $C$
in $\mathcal{F}$. We define the closed region $\mathcal{R}_C=\overline{C}-\bigcup_{1\leqslantslant
i \leqslantslant k} \mathring{C_i}$. Let $\phi_C$ be the sum of $3d(F)-6$, over all faces $F$ of $G$ lying in
$\mathcal{R}_C$.
\begin{claim}\label{cl:1} Let $C_0$ be a node of $\mathcal{F}$ with children
$C_1,\ldots,C_k$. Then $\phi_{C_0}\geqslantslant \tfrac{3}2 (g-2)k+\tfrac32g$. Moreover, if
$g\geqslantslant 6$, then $\phi_{C_0}\geqslantslant \tfrac{3}2 (g-2)k+\tfrac32g+3$.
\end{claim}
Assume first that the cycles $C_0,\ldots,C_k$ are pairwise vertex-disjoint.
Then, it follows from Lemma~\ref{lem:2} that $\phi_{C_0}\geqslantslant
(k+1)(3g+6)-12$. Note that since $g\geqslantslant 4$, we have $(k+1)(3g+6)-12\geqslantslant
\tfrac{3}2 (g-2)k+\tfrac32g$. Moreover, if $g\geqslantslant 6$, $(k+1)(3g+6)-12\geqslantslant
\tfrac{3}2 (g-2)k+\tfrac32g+3$, as desired. As a
consequence, we can assume that two of the cycles $C_0,\ldots,C_k$
intersect, and in particular, $k\geqslantslant 1$.
Consider the following planar bipartite graph $H$: the vertices of the
first partite set of $H$ are the directed cycles
$C_0,C_1,\ldots,C_k$, the vertices of the second partite set of $H$
are the vertices of $G$ lying in at least two cycles among
$C_0,C_1,\ldots,C_k$, and there is an edge in $H$ between some cycle
$C_i$ and some vertex $v$ if and only if $v\in C_i$ in $G$ (see
Figure~\ref{fig:ex}). Observe
that $H$ has a natural planar embedding in which all internal faces
have degree at least 4. Since $k\geqslantslant 1$ and at least two of the
cycles $C_0,\ldots,C_k$
intersect, the outerface also has degree at least 4.
Note that the faces $F_1,\ldots,F_t$ of $H$ are in
one-to-one correspondence with the maximal subsets
$\mathcal{D}_1,\ldots,\mathcal{D}_t$ of $\mathcal{R}_{C_0}$ whose interior is connected. Also note that each face of $G\cap \mathcal{R}_{C_0}$ is in precisely
one region $\mathcal{D}_i$ and each arc of $\bigcup_{i=0}^{k} C_i$
(i.e. each arc on the boundary of $\mathcal{R}_{C_0}$) is on the boundary of
precisely one region $\mathcal{D}_i$. For each region $\mathcal{D}_i$, let $\ell_i$ be
the number of arcs on the boundary of $\mathcal{D}_i$, and observe that
$\sum_{i=1}^{t}\ell_i=\sum_{j=0}^{k} |C_j|$. Let $\phi_{\mathcal{D}_i}$ be the sum of $3d(F)-6$, over all
faces $F$ of $G$ lying in $\mathcal{D}_i$. It follows from Lemma~\ref{lem:2} (applied with $k=1$) that $\phi_{\mathcal{D}_i}\geqslantslant 3\ell_i-6$, and therefore
$\phi_{C_0}=\sum_{i=1}^{t}\phi_{\mathcal{D}_i}\geqslantslant \sum_{i=1}^{t}(3\ell_i-6)$.
\begin{figure}
\caption{The region $\mathcal{R}
\label{fig:ex}
\end{figure}
A region
$\mathcal{D}_i$ with $\ell_i\geqslantslant 4$ is said to be of \emph{type 1}, and we set
$T_1=\{1\leqslantslant i\leqslantslant t \,|\, \mathcal{D}_i \mbox{ is of type 1}\}$.
Since for any
$\ell \geqslantslant 4$ we have $3\ell-6\geqslantslant \tfrac{3\ell}2$, it follows from the
paragraph above that the regions $\mathcal{D}_i$ of type 1 satisfy $\phi_{\mathcal{D}_i}\geqslantslant \tfrac{3\ell_i}2$. Let $\mathcal{D}_i$ be a region that is not of type 1. Since $G$ is simple,
$\ell_i=3$. Assume first that $\mathcal{D}_i$ is bounded by (parts of) two
directed cycles of $\mathcal{C}$ (in other words, $\mathcal{D}_i$ corresponds to a face of
degree four in the graph $H$). In this case we say that $\mathcal{D}_i$
is of \emph{type 2} and we set
$T_2=\{1\leqslantslant i\leqslantslant t \,|\, \mathcal{D}_i \mbox{ is of type 2}\}$. Then the boundary of $\mathcal{D}_i$
consists in two consecutive arcs $e_1,e_2$ of some directed cycle
$C^+$ of $\mathcal{C}$, and one arc $e_3$ of some
directed cycle $C^-$ of $\mathcal{C}$. Since $g\geqslantslant 4$, these three arcs do not form a
directed cycle, and therefore their orientation is transitive. It follows that
$|C^+|\geqslantslant g+1$, since otherwise the directed cycle obtained from $C^+$
by replacing $e_1,e_2$ with $e_3$ would have length $g-1$,
contradicting that $G$ has digirth at least $g$. Consequently,
$\sum_{i=0}^k |C_i|\geqslantslant (k+1)g+|T_2|$. If a region $\mathcal{D}_i$ is not of
type 1 or 2, then $\ell_i=3$ and each of the 3 arcs on the boundary of
$\mathcal{D}_i$ belongs to a different directed cycle of $\mathcal{C}$. In other words, $\mathcal{D}_i$
corresponds to some face of degree 6 in the graph $H$. Such a
region $\mathcal{D}_i$ is said to be of \emph{type 3}, and we set
$T_3=\{1\leqslantslant i\leqslantslant t \,|\, \mathcal{D}_i \mbox{ is of type 3}\}$. It follows from Lemma~\ref{lem:1} that the
number of faces of degree at least 6 in $H$ is at most
$2(k+1)-4$. Hence, we have $|T_3|\leqslantslant 2k-2$.
Using these bounds on
$|T_2|$ and $|T_3|$, together with the fact that for any $i\in T_2\cup
T_3$ we have $\phi_{\mathcal{D}_i} \geqslantslant 3\ell_i-6=3=\tfrac{3\ell_i}2 -\tfrac{3}2$, we
obtain:
\begin{eqnarray*}
\phi_{C_0} & = & \sum_{i\in T_1} \phi_{\mathcal{D}_i} + \sum_{i\in T_2}
\phi_{\mathcal{D}_i} + \sum_{i\in T_3} \phi_{\mathcal{D}_i} \\
& \geqslantslant & \sum_{i=1}^t \tfrac{3\ell_i}2 - \tfrac32 |T_2| - \tfrac32
|T_3|\\
& \geqslantslant & \tfrac32\, \sum_{i=0}^k |C_i| - \tfrac32 |T_2| - \tfrac32
(2k-2)\\
& \geqslantslant & \tfrac32 (k+1)g-3k+3 \, = \, \tfrac32 (g-2)k+\tfrac32g+3,
\end{eqnarray*}
as desired. This concludes the proof of Claim~\ref{cl:1}.
$\Box$
Let $C_1,\ldots,C_{k_\infty}$ be the $k_\infty$ maximal elements of
$\preceq$.
We denote by $\mathcal{R}_\infty$ the closed
region obtained from the plane by removing $\bigcup_{i=1}^{k_\infty} \mathring{C_i}$. Note that each face of
$G$ lies in precisely one of the regions $\mathcal{R}_C$ ($C \in \mathcal{C}$) or $\mathcal{R}_\infty$.
Let $\phi_\infty$ be the sum of $3d(F)-6$, over all faces $F$ of $G$ lying in
$R_\infty$. A proof similar to that of Claim~\ref{cl:1} shows that
$\phi_\infty\geqslantslant \tfrac32 k_\infty (g-2)+3$, and if $g\geqslantslant 6$, then $\phi_\infty\geqslantslant \tfrac32 k_\infty (g-2)+6$.
We now compute the sum $\phi$ of $3d(F)-6$ over all faces $F$ of
$G$. By Claim~\ref{cl:1},
\begin{eqnarray*}
\phi & = & \phi_\infty + \sum_{C \in \mathcal{F}} \phi_C\\
& \geqslantslant &\tfrac32 k_\infty (g-2)+3 + (|\mathcal{C}|-k_\infty) \tfrac32(g-2)+ |\mathcal{C}|
\cdot \tfrac32g \\
& \geqslantslant & (3g-3) |\mathcal{C}|+3.
\end{eqnarray*}
If $g\geqslantslant 6$, a similar computation gives $\phi \geqslantslant 3g |\mathcal{C}|+6 $.
On the other hand, it easily follows from Euler's formula that
$\phi=6n-12$. Therefore, $|\mathcal{C}|\leqslantslant \tfrac{2n-5}{g-1}$, and if $g\geqslantslant
6$, then $|\mathcal{C}|\leqslantslant \tfrac{2n-6}{g}$.
Let $A$ be a set of arcs of $G$ of minimum size such that $G-A$ is acyclic.
It follows from the Lucchesi-Younger theorem~\cite{LY78} (see
also~\cite{GR15}) that $|A|=|\mathcal{C}|$. Let $X$ be a set of vertices
covering the arcs of $A$, such that $X$ has minimum size. Then $G-X$
is acyclic. If $g=5$ we have $|X|\leqslantslant
|A|=|\mathcal{C}|\leqslantslant \tfrac{2n-5}{4}$ and if $g\geqslantslant 6$, we have $|X|\leqslantslant
|A|=|\mathcal{C}|\leqslantslant \tfrac{2n-6}{g}$, as desired. Assume now that $g=4$. In
this case $|A|=|\mathcal{C}|\leqslantslant \tfrac{2n-5}{3}$. It
was observed by Golowich and Rolnick~\cite{GR15} that $|X|\leqslantslant
\tfrac13(n+|A|)$ (which easily follows from the fact that any graph on
$n$ vertices and $m$ edges contains an independent set of size at
least $\tfrac{2n}3-\tfrac{m}3$),
and thus, $|X|\leqslantslant \tfrac{5n-5}{9}$.
This concludes the proof of Theorem~\ref{th:main}.
$\Box$
\section*{Final remark}
A natural problem is to determine the
precise value of $f_g(n)$, or at least its asymptotical value as $g$
tends to infinity. We
believe that $f_g(n)$ should be closer to the lower bound of $\tfrac{n-1}g$,
than to our upper bound of $\tfrac{2n-6}g$.
For a digraph $G$, let $\tau^*(G)$ denote the the infimum real number $x$ for which
there are weights in $[0,1]$ on each vertex of $G$, summing up
to $x$, such that for each directed cycle $C$, the sum of the
weights of the vertices lying on $C$ is at least $1$. Goemans and Williamson~\cite{GW97} conjectured that for
any planar digraph $G$, $\tau(G)\leqslantslant \tfrac32 \tau^*(G)$. If a planar
digraph $G$ on $n$ vertices has digirth at least $g$, then clearly $\tau^*(G)\leqslantslant
\tfrac{n}{g}$ (this can be seen by assigning weight $1/g$ to
each vertex). Therefore, a direct consequence of the conjecture of
Goemans and Williamson would be that $f_g(n)\leqslantslant \tfrac{3n}{2g}$.
\end{document} |
\begin{document}
\title{New Coresets for Projective Clustering and
Applications}
\begin{abstract}
$(j,k)$-projective clustering is the natural generalization of the family of $k$-clustering and $j$-subspace clustering problems. Given a set of points $P$ in $\mathbb{R}^d$, the goal is to find $k$ flats of dimension $j$, i.e., affine subspaces, that best fit $P$ under a given distance measure. In this paper, we propose the first algorithm that returns an $L_\infty$ coreset of size polynomial in $d$. Moreover, we give the first strong coreset construction for general $M$-estimator regression. Specifically, we show that our construction provides efficient coreset constructions for Cauchy, Welsch, Huber, Geman-McClure, Tukey, $L_1-L_2$, and Fair regression, as well as general concave and power-bounded loss functions. Finally, we provide experimental results based on real-world datasets, showing the efficacy of our approach.
\end{abstract}
\section{INTRODUCTION}
Coresets are often used in machine learning, data sciences, and statistics as a pre-processing dimensionality reduction technique to represent a large dataset with a significantly smaller amount of memory, thereby improving the efficiency of downstream algorithms in both running time and working space.
Intuitively, a coreset $C$ of a set $P$ of $n$ points in $\mathbb{R}^d$ is a smaller number of weighted representatives of $P$ that can be used to approximate the cost of any query from a set of a given queries.
Hence rather than optimizing some predetermined objective on $P$, it suffices to optimize the objective on $C$, which has significantly smaller dimension than $P$.
In this paper, we present coresets for projective clustering.
Projective clustering is an important family of clustering problems for applications in unsupervised learning~\citep{Procopiuc10}, data mining~\citep{AggarwalPWYP99,AggarwalY00}, computational biology~\citep{Procopiuc10}, database management~\citep{ChakrabartiM00}, and computer vision~\citep{ProcopiucJAM02}.
Given a set $P$ of $n$ points in $\mathbb{R}^d$, a parameter $z$ for the exponent of the distance, and a parameter $k$ for the number of flats of dimension $j$, the $(j,k)$-projective clustering problem is to find a set $\calF$ of $k$ $j$-flats that minimizes the sum of the distances of $P$ from $\calF$, i.e., $\min_{\calF}\sum_{\p\in P}\dist(\p,\calF)^z$, where $\dist(\p,\calF)^z$ denotes the $z$-th power of the Euclidean distance from $\p$ to the closest point in any flat in $\calF$.
We abuse notation by defining the projective clustering problem to be $\min_{\calF}\max_{\p\in P}\dist(\p,\calF)$ for $z=\infty$.
Projective clustering includes many well-studied problems such as the $k$-median clustering problem for $z=1$, $j=0$, $k\in\mathbb{Z}^+$, the $k$-means clustering problem for $z=2$, $j=0$, $k\in\mathbb{Z}^+$, the $k$-line clustering problem for $z\ge 0$, $j=1$, $k\in\mathbb{Z}^+$, the subspace approximation problem for $z\ge 0$, $j\in\mathbb{Z}^+$, $k=1$, the minimum enclosing ball problem for $z=\infty$, $j=0$, $k=1$, the $k$-center clustering problem for $z=\infty$, $j=0$, $k\in\mathbb{Z}^+$, the minimum enclosing cylinder problem for $z=\infty$, $j=1$, $k=1$, and the $k$-cylinder problem for $z=\infty$, $j=1$, $k\in\mathbb{Z}^+$.
\subsection{Related Work}
Finding the optimal set $C$ for projective clustering is known to be NP-hard~\cite{AloiseDHP09} and even finding a set with objective value that is within a factor of $1.0013$ of the optimal value is NP-hard~\cite{LeeSW17}.
\cite{ProcopiucJAM02} implemented a heuristics-based Monte Carlo algorithm for projective clustering while~\cite{Har-PeledV02} introduced a dimensionality reduction technique to decrease the size of each input point, which distorts the cost of the optimal projective clustering.
Similarly, \cite{KerberR15} used random projections to embed the input points into a lower dimensional space.
However, none of these approaches reduces the overall number of input points, whose often causes the main bottleneck for implementing approximation algorithms for projective clustering in big data applications.
\cite{BadoiuHI02} first introduced coresets for the $k$-center and $k$-median clustering problems in Euclidean space.
Their coresets constructions gave $(1+\varepsilon)$-approximations and sampled a number of points with exponential dependency in both $\frac{1}{\varepsilon}$ and $k$.
Their work also inspired a number of coresets for specific projective clustering problems; coresets have subsequently been extensively studied in $k$-median or $k$-means clustering~\citep{BadoiuHI02,Har-PeledM04,FrahlingS05,FrahlingS08,Chen09,FeldmanS12,BravermanLUZ19,HuangV20}, subspace approximation~\citep{DeshpandeRVW06,DeshpandeV07,FeldmanL11,FeldmanMSW10,ClarksonW15,SohlerW18,FeldmanSS20, tukan2021no}, and a number of other geometric problems and applications~\citep{AgarwalHY06,FeldmanFS06,Clarkson08,DasguptaDHKM08,
AckermannB09,PhillipsT18,HuangJLW18,AssadiBBMS19,MunteanuSSW18,
BravermanDMMUWZ20,MussayOBZF20,maalouf2020tight,tukan2020coresets,tukan2021coresets,jubran2020sets,maalouf2021coresets}.
However, these coreset constructions were catered toward specific problems rather than the general $(j,k)$-projective clustering problem.
\cite{FeldmanL11} introduced a framework for constructing coresets by sampling each input point with probability proportional to its \emph{sensitivity}, which informally quantifies the importance of the point with respect to the predetermined objective function.
\cite{FeldmanL11} also performed dimensionality reduction for $(j,k)$-projective clustering by taking the union of two sets $\mathcal{S}$ and $\textrm{proj}(P,B)$, where $P$ is the input data set of size $n$.
Although the set $\mathcal{S}$ can have size $\poly(j,k,d)$, the set $\textrm{proj}(P,B)$ still has size $n$, so their resulting output can actually have \emph{larger} size than the original input.
The main point is that $\textrm{proj}(P,B)$ lies in a low-dimensional space, so their approach should be viewed as a dimensionality reduction technique to decrease the ambient dimension $d$ whereas our coreset construction decreases the input size $n$. \citep{ClarksonW15} suggested approximation algorithms based on matrix sketches for $(1,j)$-projective clustering problems with respect to family of $M$-estimator functions, and~\citep{clarkson2019dimensionality} provided tighter result with respect to the $(1,j)$-projective clustering problems with respect to the Tukey loss function.
\citep{VaradarajanX12} proved upper bounds for the total sensitivity of the input points for a number of shape fitting problems, including the $k$-median, $k$-means, and $k$-line clustering problems, as well as an $L_1$ coreset for the integer $(j,k)$-projective clustering problem.
On the other hand, \citep{Har-Peled04} showed that $L_\infty$ coresets for the projective clustering problem does not exist even for $j=k=2$ when the input set consists of points from $\mathbb{R}^d$.
When the input is restricted to integer coordinates, \citep{EdwardsV05} constructed an $L_\infty$ coreset that gives a $(1+\varepsilon)$-approximation for $(j,k)$-projective clustering.
However, their construction uses a subset of points with size exponential in both $k$ and $d$, which often prevents practical implementations.
Hence, a natural open question is whether there exist $L_\infty$ coreset constructions for integer $(j,k)$-projective clustering with size polynomial in $d$.
\subsection{Our Contributions}
We give the first $L_\infty$ coreset construction for the integer $(j,k)$-projective clustering problem with size polynomial in $d$, resolving the natural open question from \cite{EdwardsV05}.
Specifically, we give an $L_\infty$ $\xi$-coreset $C$, so that for any choice $\calF$ of $k$ flats with dimension $j$, the maximum connection cost of $C$ to $\calF$ is at most $\xi$ times the maximum connection cost of $P$.
Previously, even in the case of $k=1$ and constant $j$, the best known $L_\infty$ coreset construction had size $\exp(d)$~\citep{EdwardsV05}.
We first introduce an $L_\infty$ coreset construction for the $(j,1)$-projective clustering problem using Carath\'{e}odory's theorem; see Figure~\ref{fig:illustration}. We then use our $L_\infty$ coreset for $(j,1)$-projective clustering as a base case to recursively build a coreset $D_k$ for $(j,k)$-projective clustering from coresets for $(j,k-1)$-projective clustering on the partitions of the input points that have geometrically increasing distances from the affine subspace spanned by the points chosen in the previous steps.
We use properties from \cite{EdwardsV05,FeldmanSS20} to bound the number of partitions determined by the distances from the input points to each of the affine subspaces, which bounds our coreset size for an input with aspect ratio $\Delta$, i.e., the ratio of the largest and smallest coordinate magnitudes.
\begin{theorem}[Small $L_\infty$ coreset for $(j,k)$-projective clustering]
\label{thm:main:infty}
There exists an $L_\infty$ constant-factor approximation coreset for the $(j,k)$-projective clustering problem with size $(8j^3\log(d\Delta))^{\O{jk}}$.
\end{theorem}
Our main technical contribution is the novel $L_\infty$ coreset construction for the $(j,1)$-projective clustering problem that relies on Carath\'{e}odory's theorem, which we crucially use to form the base case in our recursive argument.
We then build upon our novel coreset construction by adding a polynomial number of points to the coreset over each step in the inductive argument.
By comparison, even the base case for the previous best coreset~\citep{EdwardsV05} uses exponential space by essentially constructing an epsilon net with $\left(\frac{1}{\varepsilonilon}\right)^{\O{d}}$ points.
We then give the first $L_\infty$ coresets for a number of $M$-estimator regression problems.
Although the framework of Theorem~\ref{thm:main:infty} immediately gives coreset constructions for Cauchy, Welsch, Huber, Geman-McClure, Tukey, $L_1-L_2$, and Fair regression, we instead apply sharper versions of the proof of Theorem~\ref{thm:main:infty} to the respective parameters induced by each of the loss functions to obtain even more efficient coreset constructions.
Our constructions give strong coresets so that with high probability, the data structure simultaneously succeeds for all queries.
We then apply the framework of Theorem~\ref{thm:main:infty} to give $L_\infty$ coresets for any non-decreasing concave loss function $\Psi$ with $\Psi(0)=0$.
We generalize this approach to give $L_\infty$ coresets for any non-decreasing concave loss function $\Psi$ with $\Psi(y)/\Psi(x)\le(y/x)^z$ for a fixed constant $z>0$, for all $0\le x\le y$.
Note that this property essentially states that the loss function $\Psi(x)$ is bounded by some power function $x^z$.
We summarize these results in Table~\ref{table:Mestimators}.
We also use Theorem~\ref{thm:main:infty} along with the well-known sensitivity sampling technique to obtain an $L_2$ coreset for integer $(j,k)$-projective clustering with approximation $(1+\varepsilon)$.
\begin{figure*}
\caption{\textbf{Overview of our approach (see Algorithm~\ref{alg:single:projective}
\label{fig:step1}
\label{fig:step2}
\label{fig:step3}
\label{fig:step4}
\label{fig:illustration}
\end{figure*}
\begin{theorem}[Small $L_2$ coreset for $(j,k)$-projective clustering]
\label{thm:main:two}
There exists an $L_2$ coreset with approximation guarantee $(1+\varepsilon)$ for the $(j,k)$-projective clustering problem with size $\O{(8j^3\log(d\Delta))^{\O{jk}}\log n}$.
\end{theorem}
\begin{table*}[!htb]
\centering
\caption{$M$-estimator loss functions that can be captured by our coreset construction; $d$ here denotes the dimension of the input data $P$; all lemmata below can be found at Section~\ref{supplement:app} of the supplementary material.}
\begin{tabular}{l|T{0.35\textwidth}|T{0.2\textwidth}|r}
\hline
Loss Function $\Psi$ & Formulation & multiplicative error ($\ell_\infty$-coreset) & Reference \\
\hline
Cauchy & $\left(\lambda^2/2\right) \log{\left( 1 + (x/\lambda)^2\right)}$ & $8(d+1)^3$ & Lemma~\ref{lem:cauchy} \\ \hline
Welsch & $\frac{\lambda^2}{2}\left(1-e^{-\left(\frac{x}{\lambda}\right)^2}\right)$ & $8(d+1)^3$ & Lemma~\ref{lem:welsch} \\ \hline
Huber & $\begin{cases} x^2/2 & \text{If } \abs{x} \leq \lambda\\
\lambda\abs{x} - \lambda^2/2 & \text{otherwise} \end{cases}$ & $16(d+1)^3$ & Lemma~\ref{lem:huber} \\ \hline
Geman-McClure & $x^2/\left(2 + 2x^2\right)$ & $8(d+1)^3$ & Lemma~\ref{lem:gm} \\ \hline
Concave & $\frac{d^2\Psi}{dx^2}\le 0$ & $4(d+1)^{1.5}$& Lemma~\ref{lem:concave} \\ \hline
Tukey & $\begin{cases} \frac{\lambda^2}{6}\left(1-\left(1-\frac{x^2}{\lambda^2}\right)^3\right) & \text{if } \abs{x} \leq \lambda\\
\frac{\lambda^2}{6} & \text{otherwise} \end{cases}$ & $8(d+1)^3$ & Lemma~\ref{lem:tukey} \\ \hline
$L_1-L_2$ & $2\left(\sqrt{1+x^2/2}-1\right)$ & $8(d+1)^3$ & Lemma~\ref{lem:ll} \\ \hline
Fair & $\lambda|x|-\lambda^2\ln\left(1+|x|/\lambda\right)$ & $8(d+1)^3$ & Lemma~\ref{lem:fair} \\ \hline
Power Bounded & $\Psi_{Pow}(y)/\Psi_{Pow}(x)\le(y/x)^z$ for all $0\le x\le y$ & $4^z(d+1)^{1.5z}$ & Lemma~\ref{lem:power} \\
\hline
\end{tabular}
\label{table:Mestimators}
\end{table*}
\textbf{Experiments.}
Finally, we complement our theoretical results with empirical evaluations on synthetic and real world datasets for regression and clustering problems.
We first consider projective clustering on a bike sharing dataset and a 3D spatial network from the UCI machine learning repository~\citep{Dua:2019}.
We then generate a synthetic dataset in the two-dimensional Euclidean plane.
Since previous coreset constructions with theoretical guarantees are impractical for implementations, we compare our algorithms to a baseline produced by uniform sampling.
Our experiments demonstrate that our algorithms have superior performance both across various ranges of $j$ and $k$ for the $(j,k)$-projective clustering problem as well as across various regression problems, e.g., Cauchy, Huber loss functions.
\subsection{Preliminaries}
For a positive integer $n$, we write $[n]:=\{1,\ldots,n\}$.
We use bold font variables to denote vectors and matrices.
For a vector $\x\in\mathbb{R}^d$, we have the Euclidean norm $\|\x\|_2=\sqrt{\sum_{i=1}^d x_i^2}$.
We use $\log$ to denote the base two logarithm.
We use the notation $\circ$ to denote vertical concatenation, so that if $\u$ and $\v$ are row vectors with dimension $d$, then $\u\circ\v$ is the matrix with dimension $2\times d$ whose first row is $\u$ and second row is $\v$. Recall that for $\c\in\mathbb{R}^d$ and a symmetric positive definite matrix $\G\in\mathbb{R}^{d\times d}$, we define the ellipsoid $E(\G,\c)$ to be the set $E(\G,\c):=\br{\x\in\mathbb{R}^d\,|\,(\x-\c)^\top\G(\x-\c)\le1}.$
\begin{theorem}[John-L\"{o}wner ellipsoid]
\citep{John14}
\label{thm:loewner:ellipsoid}
For a set $L\subseteq\mathbb{R}^d$ of points with nonempty interior, there exists an ellipsoid $E(\G,\c)$, where $\G\in\mathbb{R}^{d\times d}$ is a positive definite matrix and $\c\in\mathbb{R}^d$, of minimal volume such that $\frac{1}{d}(E(\G,\c)-\c)+\c\subseteq\conv(L)\subseteq E(\G,\c).$
\end{theorem}
The following defines an approximated solution to problem of finding the L\"{o}wner ellipsoid.
\begin{definition}[$\alpha$-rounding]
\citep{todd2007khachiyan}
Let $L\subseteq\mathbb{R}^d$ be a finite set such that $\Span(L)=\mathbb{R}^d$ and let $\alpha\ge 1$.
Then an ellipsoid $E(\G,\c)$ is called an $\alpha$-rounding of $\conv(L)$ if $\frac{1}{\alpha}(E(\G,\c)-\c)+\c\subseteq\conv(L)\subseteq E(\G,\c).$
\end{definition}
Note that if $\alpha$ in the above definition is $d$ (or equiv. $\sqrt{d}$), the corresponding ellipsoid is the L\"{o}wner ellipsoid.
In order to define a distance to any affine subspace, we first need the following ingredients.
\begin{definition}[Orthogonal matrices]
Let $d>j\ge 1$ be integers.
We say $\X\in\mathbb{R}^{d\times j}$ is an orthogonal matrix if $\X^\top\X=\I_j$.
We use $\calV_j\subseteq\mathbb{R}^{d\times j}$ to denote the set of all $d\times j$ orthogonal matrices.
\end{definition}
\begin{definition}[$j$-dimensional subspace]
Let $d>j\ge 1$ be integers and let $\v\in\mathbb{R}^d$.
Let $\X\in\calV_j$ and $\Y\in\calV_{d-j}$ such that $\Y^\top\X=0^{(d-j)\times j}$ and $\X^\top\Y=0^{j\times(d-j)}$.
Let $H(\X,\v):=\{\X\X^\top\p+\v\,|\,\p\in\mathbb{R}^d\}$ denote the $j$-dimensional affine subspace $H$ that is spanned by the column space of $\X$ and offset by $\v$.
Let $\calH_j:=\{H(\X,\v)\,|\X\in\calV_j,\v\in\mathbb{R}^d\}$ denote the set of all $j$-affine subspaces in $\mathbb{R}^d$.
\end{definition}
We use $\dist(H(\X,\v),\p):=\|(\p-\v)^\top\Y\|_2$ to denote the distance between any point $\p\in\mathbb{R}^d$ and the $j$-dimensional affine subspace $H(\X,\v)$, where here $\Y \in \mathbb{R}^{d \times (d - j)}$ such that $\Y^\top\X = 0^{(d-j) \times j}$ .
We now define the term \emph{query space} which will aid us in simplifying the proofs as well as the corresponding theorems.
\begin{definition}[query space]
Let $1\le j<d<n$ be positive integers and let $P\subseteq\mathbb{R}^d$ be a set of $n$ points such that $\Span(P)=\mathbb{R}^d$.
Then for the union of all $j$-affine subspaces $\calH_j$, the tuple $(P,\calH_j,\dist)$ is called a \emph{query space}.
\end{definition}
Following the previous definition, we now can define the notion of $L_\infty$ coreset and $L_2$ coreset.
\begin{definition}[$L_\infty$ coreset]
Let $j\in[d-1]$, $\varepsilon\in(0,1)$, and $(P,\calH_j,\dist)$ be a query space.
Then a set $C \subseteq P$ is called an $L_\infty$ $\varepsilon$-coreset with respect to the query space $(P,\calH_j,\dist)$ if for every $\X\in\calV_j$ and $\v\in\mathbb{R}^d$, $\max_{\p\in P}\dist(H(\X,\v),\p)\le(1+\varepsilon)\max_{\p\in C}\dist(H(\X,\v),\p).$
\end{definition}
\begin{definition}[$L_2$ coreset]
Let $j\in[d-1]$, $\varepsilon\in(0,1)$, and $(P,\calH_j,\dist)$ be a query space.
Then a set $C \subseteq P$ with a weight function $w:C\to\mathbb{R}$ is called an $L_2$ $\varepsilon$-coreset with respect to the query space $(P,\calH_j,\dist)$ if for every $\X\in\calV_j$ and $\v\in\mathbb{R}^d$,
$\sum_{\p\in P}\dist(H(\X,\v),\p)^2\le(1+\varepsilon)\sum_{\p\in C}w(\p)\dist(H(\X,\v),\p)^2$.
\end{definition}
Finally, we define a coreset for the $k$ $j$-cylinders problem, followed by the Carath\'{e}odory's theorem which will be used in our proofs and algorithms in computing the $L_\infty$ coreset for the $(k,j)$-projective clustering problem.
\begin{definition}
A closed $j$-cylinder of radius $r$ is a set of points in $\mathbb{R}^d$ whose distance to a certain $j$-flat is at most $r$.
A set $D$ is an $L_\infty$ $C$-coreset of $P\subseteq\mathbb{R}^d$ for the $(j,k)$-projective clustering problem if $D$ is a subset of $P$ such that there exists a union of $k$ $j$-cylinders of radius $Cr$ that covers $P$ for each union of $k$ $j$-cylinders of radius $r$ that covers $D$.
\end{definition}
\begin{theorem}[Carath\'{e}odory's theorem]
\citep{Caratheodory07,Steinitz13}
For any $A\subset\mathbb{R}^d$ and $\p\in\conv(A)$, there exists $m\le d+1$ points $\p_1,\ldots,\p_m\in A$ such that $\p\in\conv(\{\p_1,\ldots,\p_m\})$.
\end{theorem}
\section{$L_\infty$ CORESETS FOR PROJECTIVE CLUSTERING}
First, we note that~\cite{Har-Peled04} showed that $L_\infty$ coresets do not exist when the input set is $n$ points from $\mathbb{R}^d$. However in this paper, we consider the integer projective clustering problem, e.g.~\cite{EdwardsV05}, where the input points lie on a polynomial grid.
We first give an $L_\infty$ coreset for the $(j,1)$-projective clustering problem in Section~\ref{sec:linfty:j1}. We then use our $L_\infty$ coreset for the $(j,1)$-projective clustering to inductively build an $L_\infty$ coreset for the $(j,k)$-projective clustering problem.
\subsection{$L_\infty$ Coreset for $(j,1)$-Projective Clustering}
\label{sec:linfty:j1}
We first give an overview for our algorithm that produces a constant factor approximation coreset for the $(j,1)$-projective clustering problem.
We again emphasize that our coreset for the $(j,1)$-projective clustering problem serves as our main technical contribution because we use Carath\'{e}odory's theorem to explicitly find a polynomial number of points to add to our coreset.
We can then use a natural inductive argument to recursively add a polynomial number of points to create a coreset for the integer $(j,k)$-projective clustering problem.
By contrast, even the base case for the only existing coreset for the integer $(j,k)$-projective clustering problem already contains an exponential number of points~\citep{EdwardsV05}.
The algorithm takes as input a set $P\subseteq\mathbb{R}^d$ of $n$ points, which are promised to lie on a flat of dimension $j$, and computes a subset $C\subseteq P$, which satisfies Theorem~\ref{thm:single:projective}.
The algorithm appears in full detail in Algorithm~\ref{alg:single:projective} and first initializes $C$ to be an empty set.
Our algorithm computes $H(\W,\u)$ to be the $j$-dimensional flat that contains $P$ and sets $Q$ to be the set of points obtained by projecting $P$ onto the column space of $\W$.
The algorithm then defines $E(\G,\c)$ to be the John-L\"{o}wner ellipsoid containing the convex hull of $Q$ and $S$ to be the set of vertices defined the axes of symmetry and the center of the scaled ellipsoid $\frac{1}{j}(E(\G,\c)-\c)+\c$, which can be explicitly and efficiently computed, and note that $|S|\le 2j$.
From Carath\'{e}odory's theorem, we can express each point in $S\cup\{\c\}$ as a linear combination of $j+1$ points from $Q$.
We thus define $K$ to be the $\O{j^2}$ points of $Q$ needed to represent all points in $S\cup\{\c\}$ and set $C=\mu(K)$, where $\mu$ is the inverse mapping from $Q$ to $P$.
\begin{algorithm}[!htb]
\caption{Coreset for $(j,1)$-Projective Clustering}
\label{alg:single:projective}
\DontPrintSemicolon
\KwIn{$P\subseteq\mathbb{R}^d$ of $n$ points that lie on a flat of dimension $j$}
\KwOut{Coreset of size $\O{j^2}$}
$C\gets\emptyset$\;
Let $H(\W,\u) := $ a $j$-dimensional flat containing $P$\;
$Q:= \br{\W^\top\p\,|\,\p\in P}$\;
Let $\mu$ be function that maps each point $q\in Q$ to its original point in $P$\;
Let $E(\G,\c) := $ the John-L\"{o}wner ellipsoid of the convex hull of $Q$\;
$S := $ the vertices of the scaled ellipsoid $\frac{1}{j}\left(E(\G,\c)-\c \right)+\c$\;
\For{each $\s\in S\cup\{\c\}$}{
$K_{\s} := $ be at most $j+1$ points from $Q$ whose convex hull contains $\s$ \;
$C := C\cup\mu\left(K_{\s}\right)$\;
}
\Return{$C$}\;
\end{algorithm}
We first prove the following structural property that follows from Carath\'{e}odory's theorem.
\begin{lemma}
\label{lem:conv:ineq}
Let $d,\ell,m\ge 1$ be integers.
Let $\p\in\mathbb{R}^d$ and $A\subseteq\mathbb{R}^d$ be a set of $m$ points with $\p\in\conv(A)$ so that there exists $\alpha:A\to[0,1]$ such that $\sum_{\q\in A}\alpha(\q)=1$ and $\sum_{\q\in A}\alpha(\q)\cdot\q=\p$.
Then for every $\Y\in\mathbb{R}^{d\times\ell}$ and $\v\in\mathbb{R}^{\ell}$, $\|\p^\top\Y-\v\|_2\le\max_{\q\in A}\|\q^\top\Y-\v\|_2.$
\end{lemma}
\begin{proof}
Since we can write $\p$ as the convex combination of points $\q\in A$ with weight $\alpha(\q)$, we have
\[\|\p^\top\Y-\v\|_2=\|\left(\sum_{\q\in A}\alpha(\q)\q^\top\Y\right)-\v\|_2.\]
Moreover, we have $\sum_{\q\in A}\alpha(\q)=1$, so we can decompose $\v$ into
\[\|\p^\top\Y-\v\|_2=\|\sum_{\q\in A}\alpha(\q)\left(\q^\top\Y-\v\right)\|_2.\]
By triangle inequality,
\[\|\p^\top\Y-\v\|_2\le\sum_{\q\in A}\alpha(\q)\|\q^\top\Y-\v\|_2\le\max_{\q\in A}\|\q^\top\Y-\v\|_2.\]
\end{proof}
We use Lemma~\ref{lem:conv:ineq} to show that Algorithm~\ref{alg:single:projective} gives a coreset for the $(j,1)$-projective clustering problem as summarized below. In addition, we show that our $\ell_\infty$-coreset is also applicable towards the $\left(j,z\right)$-clustering where $j$ denotes the dimensionality of the subspace, and $z$ denotes the power of the distance function. For instance, $z \in [1,2)$ is used for obtaining robust clustering, which is useful against outliers.
\begin{theorem}
\label{thm:single:projective}
Let $j\in[d-1]$, $z\ge 1$, and let $(P,\calH_j,\dist)$ be a query space, where $P$ lies in a $j$-dimensional flat.
Let $C\subseteq P$ be the output of Algorithm~\ref{alg:single:projective}.
Then $|C|=\O{j^2}$ and for every $H(\X,\v)\in\calH_j$, we have
$\max_{\p\in P}\dist(\p,H(\X,\v))^z\le 2^{z+1}j^{1.5z}\max_{\q\in C}\dist(\q,H(\X,\v))^z.$
\end{theorem}
\begin{proof}
To show the first part of the claim, note that since the ellipsoid $E(\G,\c)$ has at most $2j$ vertices and each vertex point of the ellipsoid can be represented a convex combination fo at most $j+1$ points from $Q$ by Carath\'{e}odory's theorem, then the number of points in $C$ is at most $2(j+1)^2$, so that $|C|=\O{j^2}$.
To show the second part of the claim, we first set $H(\W,\u)$ to be the $j$-flat containing $P$ and $\Y\in\calH_{d-j}$ so that $\Y^\top\X=0^{(d-j)\times j}$ and $\X^\top\Y=0^{j\times(d-j)}$.
Notice that each $\p\in P$ satisfies
\[\dist(p,H(\X,\v))^z=\|(\p-\v)^\top\Y\|_2^z=\|(\p-\u+\u-\v)^\top\Y\|_2^z.\]
Since $\p$ lies in the affine flat $H(\W,\u)$, then we have
\begin{align}
\label{eqn:bound}
\dist(p,H(\X,\v))^z=\|\left(\W\W^\top(\p-\u)+\u-\v\right)^\top\Y\|_2^z.
\end{align}
We now rely on properties of Carath\'{e}odory's Theorem and the John-L\"{o}wner ellipsoid to bound (\ref{eqn:bound}).
First note that
\[\|\left(\W\W^\top(\p-\u)+\u-\v\right)^\top\Y\|_2^z = \|\left(\W\W^\top\p-\W\W^\top\u+\u-\v\right)^\top\Y\|_2^z.\]
Recall that for each $\p\in P$, there exists $\q\in Q$ such that $\q=\W^\top\p$ and
\[\|\left(\W\W^\top(\p-\u)+\u-\v\right)^\top\Y\|_2^z=\|\left(\W\q-\W\W^\top\u+\u-\v\right)^\top\Y\|_2^z.\]
Since $S$ is the set of vertices of $E(\G,\c)$, we have by the definition of the John-L\"{o}wner ellipsoid that
\[\frac{1}{j}(E(\G,\c)-\c)+\c\subseteq\conv(Q)\subseteq E(\G,\c).\]
Thus $S\subseteq\conv(S)\subseteq\conv(Q)$ and by Carath\'{e}odory's theorem, for each $\s\in S$, there exists a set $K_\s$ of at most $j+1$ points such that $\s\in\conv(K_\s)$.
By Lemma~\ref{lem:conv:ineq},
\[\|\s\W^\top\Y\|_2^z\le\max_{\q\in K_\s}\|q^\top\W^\top\Y\|_2^z.\]
We also have
\[\frac{1}{\sqrt{j}}\cdot\frac{E(\G,\c)-\c}{j}+\c\subseteq\conv(S)\subseteq\frac{E(\G,\c)-\c}{j}+\c.\]
Therefore,
\begin{align}
\label{eqn:conv:contain}
\conv(S)\subseteq\conv(Q)\subseteq E(\G,\c)\subseteq j^{1.5}(\conv(S)-\c)+\c.
\end{align}
Thus for every $\q\in Q$, there exists $\s\in\conv(S)$ and $\gamma\in[0,1]$ such that
\[\q=\gamma\s+(1-\gamma)(j^{1.5}(\s-\c)+\c).\]
For $\a=\u^\top\W\W^\top\Y-\u^\top\Y-\v^\top\Y$, we then have
\[\|\q^\top\W^\top\Y+\a\|_2^z=\|(\gamma\s+(1-\gamma)(j^{1.5}(\s-\c)+\c)\W^\top\Y+\a\|_2^z.\]
Since $z\ge 1$, then $\|\cdot\|_2^z$ is a convex function.
Thus by Jensen's inequality,
\[\|\q^\top\W^\top\Y+\a\|_2^z \le \gamma\|\s\W^\top\Y+\a\|_2^z+(1-\gamma)\|j^{1.5}\s^\top\W^\top\Y+(1-j^{1.5})\c^\top\W^\top\Y+\a\|_2^z.\]
Since $\a=j^{1.5}\a+(1-j^{1.5})\a$, then
\[\|j^{1.5}\s^\top\W^\top\Y+(1-j^{1.5})\c^\top\W^\top\Y+\a\|_2^z\le2^zj^{1.5z}\|\s^\top\W^\top\Y+\a\|_2^z+2^z(j^{1.5}-1)^z\|\c^\top\W^\top\Y+\a\|_2^z.\]
Since $\c\in\conv(S)$ by \eqref{eqn:conv:contain}, then
\[\|\c^\top\W^\top\Y+\a\|_2^z\le\max_{\s\in\conv(S)}\|\s^\top\W^\top\Y+\a\|_2^z.\]
Since $j^{1.5z}+(j^{1.5}-1)^z\le2j^{1.5z}$, then we have that for every $\q\in Q$,
\[\|\q^\top\W^\top\Y+\a\|_2^z\le2^{z+1}j^{1.5z}\max_{\s\in S}\|\s^\top\W^\top\Y+\a\|_2^z.\]
Thus we have for every $\s\in S$,
\begin{equation*}
\begin{split}
\|\s^\top\W^\top\Y+\a\|_2^z&\le\max_{\q\in K}\|\q^\top\W^\top\Y+\a\|_2^z\\
&\le\max_{\p\in C}\|\p^\top\W\W^\top\Y+\a\|_2^z
\end{split}
\end{equation*}
Because $\a=\u^\top\W\W^\top\Y-\u^\top\Y-\v^\top\Y$, we have
\begin{equation*}
\begin{split}
\dist(\p,H(\X,\v))^z&\le 2^{z+1}j^{1.5z}\max_{\p\in C}\|\p^\top\W\W^\top\Y+\a\|_2^z\\
&=2^{z+1}j^{1.5z}\max_{\p\in C}\|(\W\W^\top(\p-\u))^\top\Y+\u^\top\Y-\v^\top\Y\|_2^z.
\end{split}
\end{equation*}
Since $(\p-\u)\in P$ and $P$ lies within $H(\W,\u)$, then
\begin{align*}
\dist(\p,H(\X,\v))^z&\le2^{z+1}j^{1.5z}\max_{\p\in C}\|\p^\top\Y-\u^\top\Y+\u^\top\Y-\v^\top\Y\|_2^z\\
&=\max_{\p\in C}\|\p^\top\Y-\v^\top\Y\|_2^z\\
&=2^{z+1}j^{1.5z}\max_{\p\in C}\dist(\p,H(\X,\v))^z.
\end{align*}
\end{proof}
\subsection{$L_\infty$ Coreset for $(j,k)$-Projective Clustering}
Our coreset construction is recursive. Generally speaking, we construct a coreset $D_k$ for $(j,k)$-projective clustering from a coreset $D_{k-1}$ for $(j,k-1)$-projective clustering. For the base case, we show how to construct a coreset $D_1$ for $(j,1)$-projective clustering in Theorem~\ref{thm:single:projective}.
Now for $k\ge 2$, given a coreset $D_{k-1}\subset P$ for $(j,k-1)$-projective clustering, the construction of $D_k$ has $j+1$ levels and the $i$-th level will specify $i+1$ points $\v_0,\ldots,\v_i$ and a corresponding point set $P[\v_0,\ldots,\v_i]\subset P$.
We first add $D_{k-1}$ into $D_k$ and separately initialize Level 0 with $\v_0$ being each point of $D_{k-1}$ and define $P[\v_0]=P$. Crucially, each of the $j+1$ levels only adds to the coreset a number of points that is polynomial in $j\le d-1$ at each level due to the base case using our new coreset for $(j,1)$-projective clustering based on Carath\'{e}odory's theorem.
Hence, the total number of points is polynomial in $d$ but exponential in $j$.
By contrast, existing coreset constructions of \cite{EdwardsV05} use partitions that must be analyzed over $d$ levels due to their lack of an efficient coreset for their base case; thus their size is exponential in $d$.
\textbf{Level 0:}
Given any choice of $\v_0$ from $D_{k-1}$, we define $P[\v_0]:=P\subset[\Delta]^d$, we have $\dist(\p,\v_0)\in[1,\Delta\sqrt{d}]$ for every $\p\in P[\v_0]$.
We can partition $P[\v_0]$ into $\ell=\O{\log(d\Delta)}$ sets $K_{0,0},K_{0,1},\ldots,K_{0,\ell}$ such that $K_{0,0}=\{\v_0\}$ and $K_{0,i}=\{\p\in P[\v_0]\,:\,2^{i-1}\le\dist(\p,\v_0)\le2^i\}$ for $i\ge 1$.
Intuitively, this can be seen as partitioning the points of $P$ into sets with exponentially increasing distance from $\v_0$.
For each $K_{0,i}$, we construct an $L_\infty$-coreset $D_{0,i}$ of $K_{0,i}$ for the $(j,k-1)$-projective clustering problem and add $D_{0,i}$ into $D_k$.
We then separately select $\v_1$ to be any point in $D_{0,i}$ across all $i\in[\ell]$ and set $P[v_0,v_1]=\cup_{x=0}^i K_{0,x}$.
\textbf{Level $t$, for $t\in[1,j]$:}
Given $\v_0,\ldots,\v_t$ and $P[\v_0,\ldots,\v_t]$, let $A_t$ denote the affine subspace spanned by $\v_0,\ldots,\v_t$.
We recall the following structural properties about the convex hull of affine subspaces.
\begin{lemma}[\citep{EdwardsV05}, Lemma 45 in~\citep{FeldmanSS20}]
\label{lem:disc:affine}
Let $\Delta\ge 2$, $k$ be a positive integer, and $j\le d-1$ be a positive integer.
Let $\calQ_{j,k}$ be the family of all sets of $k$ affine subspaces of $\mathbb{R}^d$ with dimension $j$.
Let $\A\in\{-\Delta,\ldots,\Delta\}^{n\times d}$.
Then for every $H\in\calH_j$, we have either $\dist(H,\A)=0$ or $\dist(H,\A)\ge\frac{1}{(d\Delta)^{cj}}$, for some universal constant $c>0$.
\end{lemma}
By Lemma~\ref{lem:disc:affine}, we have that for every $\p\in P[\v_0,\ldots,\v_t]$, that $\dist(\p,A_t)$ is either $0$ or in the range $[1/(d\Delta)^{cj},2\Delta\sqrt{d}]$.
Thus we can once again partition $P[\v_0,\ldots,\v_t]$ into $\O{j\log(d\Delta)}$ subsets $K_{t,0},\ldots,K_{t,\ell}$ such that $K_{t,0}=P[\v_0,\ldots,\v_t]\cap A_t$ and for each integer $i\in[\ell]$, $K_{t,i}:=\{\p\in P[\v_0,\ldots,\v_t]\,:\,2^{i-1}c_j/\Delta^j\le\dist(\p,A_t)<2^i c_j/\Delta^j\}.$
For each $K_{t,i}$, we construct an $L_\infty$-coreset $D_{t,i}$ of $K_{t,i}$ for $(j,k-1)$-projective clustering and add $D_{t,i}$ to $D_k$.
We then separately select $\v_{t+1}$ to be any point in $D_{t,i}$ across all $i\in[\ell]$ and set $P[v_0,\ldots,\v_{t+1}]=\cup_{x=0}^i K_{t,x}$. We remark that we terminate at level $j+1$. Finally, in what follows, we give a bound on the size of our $L_\infty$ coreset.
\begin{restatable}[Coreset size]{lemma}{lemcoresetsize}
\label{lem:coresetsize}
Let $f(k)=|D_k|$ denote the size of the coreset $D_k$ formed at level $k$ for $(j,k)$-projective clustering.
Then $f(k)=\left(8j^3\log(d\Delta)\right)^{\O{jk}}$.
\end{restatable}
\begin{proof}
By Theorem~\ref{thm:single:projective}, we have that $f(1)\le2(j+1)^2\le8j^2$.
Our construction has $j+1$ levels and each level partitions the data set into $\O{j\log(d\Delta)}$ sets.
For each of the sets, we construct an $L_\infty$-coreset for $(k-1,j)$-projective clustering and each of the points in the union of the coresets to be used in the point set $P[\v_0,\ldots,\v_{k+1}]$ for the next level.
Thus we have
\[f(k)\le(\O{j\log(d\Delta)}\cdot f(k-1))^{j+1},\]
so that by induction, $f(k)\le(8j^3\log(d\Delta))^{\O{jk}}$.
\end{proof}
\noindent
To prove that our construction yields an $L_\infty$ constant-factor approximation coreset for the integer $(j,k)$-projective clustering problem, we use a structural property about the convex hull of affine subspaces.
Informally, the property says that if $\v_0,\ldots,\v_d\in\mathbb{R}^d$ are $d+1$ affinely independent vectors that induce a sequence of affine subspaces $\A_0,\ldots,\A_d$, then under certain assumptions, the convex hull formed by $\v_0,\ldots,\v_d$ contains a translation of a scaled hyperrectangle formed by a sequence $\u_0,\ldots,\u_d$ of vectors formed by the orthogonal projection away from $\A_0,\ldots,\A_d$.
\begin{lemma}[Lemma 1 in~\citep{EdwardsV05}]
\label{lem:affsub:convrect}
Let $\v_0,\ldots,\v_d\in\mathbb{R}^d$ be $d+1$ affinely independent vectors and for each $0\le i\le d$, let $A_i$ be the affine subspace spanned by $\v_0,\ldots,\v_i$.
Let $\w_i$ be the projection of $\v_i$ onto $A_i$ and let $\u_i=\v_i-\w_i$.
Suppose we have $\dist(\v_j,A_i)\le2\|\u_i\|_2$ for every $0\le i\le d$ and $j\ge i$.
Then there exists an absolute constant $c_d$ that only depends on $d$, so that the simplex $\conv(\v-0,\ldots,\v_d)$ contains a translation of the hyperrectangle
$\{c_d(\alpha_1\u_1+\ldots+\alpha_d\u_d\,:\,\alpha_i\in[0,1]\}.$
\end{lemma}
Using this structural property, we achieve an $L_\infty$ constant-factor approximation coreset for the integer $(j,k)$-projective clustering problem with size $(8j^3\log(d\Delta))^{\O{jk}}$:
\begin{restatable}{lemma}{lemcoresetapprox}
\label{lem:coreset:approx}
There exists a universal constant $\xi>0$ such that $D_k$ is a $\xi$-coreset for the $(j,k)$-projective clustering problem.
\end{restatable}
\begin{proof}
Suppose $D_k$ is covered by the $k$ cylinders $S_1,\ldots,S_k$.
Then we would like to show that $P$ is covered by a constant-factor $C$-expansion of $S_1,\ldots,S_k$. Here a $x$-expansion of a cylinder $S$ is the set $\left\lbrace x p \middle| p \in S \right\rbrace$.
We first induct on $k$ and then $j$, noting that the base case $k=1$ is already handled by Theorem~\ref{thm:single:projective}.
We then fix $k\ge 2$ and induct on $j$, first considering stage $0$, where we have some $\v_0$ and we define $K_{0,i}=\{\p\in P[\v_0]\,:\,2^{i-1}\le\dist(\p,\v_0)\le 2^i\}$ for $i\in[\ell]$, where $\ell=\O{\log(d\Delta)}$.
We then set $D_{0,i}$ to be the corresponding coreset for $K_{0,i}$ for the $(k-1,j)$-projective clustering problem.
Let $a$ denote the largest positive integer such that $S_k\cap D_{0,a}\neq\emptyset$, so that by the definition of $a$, we have that $\cup_{x=a+1}^{\ell}D_{0,x}$ is covered by $S_1,\ldots,S_{k-1}$.
Since $D_{0,x}$ is a coreset for the $(k-1,j)$-projective clustering problem, then $\cup_{x=a+1}^\ell K_{0,x}$ is covered by a $C$-expansion of $S_1,\ldots,S_{k-1}$.
For any point $\v_1$ in $S_k\cap D_{0,a}$, we enter stage $1$ with $\v_0,\v_1$ and so it remains to prove that a $C$-expansion of $S_1,\ldots,S_k$ covers $P[\v_0,\v_1]=\cup_{x=0}^aK_{0,x}$.
For the inductive step, suppose we have fixed $\v_0,\ldots,\v_t$ and for each $i\in[0,t]$, let $A_i$ denote the affine subspace spanned by $\v_0,\ldots,\v_i$, that is $A_i = \left\lbrace\sum\limits_{l = 0}^i \alpha_l v_l \middle| \forall l \in [i] \, \alpha_i \in \mathbb{R}, \sum\limits_{l=0}^i \, \alpha_l = 1 \right\rbrace$.
Let $\w_i$ denote the projection of $\v_i$ on $A_i$ and set $\u_i=\v_i-\w_i$.
Then for every $\p\in P[\v_0,\ldots,\v_i]\cap A_i$, we have
\[\dist(\p,A_i)\le2\dist(\v_i,A_i).\]
Thus for $\p\in P[\v_0,\ldots,\v_t]\cap A_t$, we have that $\p$ is contained in the hyperrectangle
\[\calM:=\v_0+\{\alpha_1\u_1+\ldots+\alpha_t\u_t\,:\,\alpha_i\in[-2,2]\}.\]
By Lemma~\ref{lem:affsub:convrect}, there exists a constant $c_t$ such that $\conv(\v_0,\ldots,\v_t)$ contains a translation of the hyperrectangle
\[\calM_1:=\{c_t(\alpha_1\u_1+\ldots+\alpha_t\u_t)\,:\,\alpha_i\in[0,1]\}.\]
Since $S_k$ covers $\v_0,\ldots,\v_t$, then $\calM_1\subset S_k$.
Moreover, we have that for an absolute constant $\xi$, $\calM\subset\xi\cdot\calM_1$.
Thus, a $\xi$-expansion of $S_k$ covers $P[\v_0,\ldots,\v_t]\cap A_t$.
Let $b$ denote the largest positive integer such that $S_k\cap D_{t,b}\neq\emptyset$, so that by the definition of $b$, we have that $\cup_{x=b+1}^{\ell}D_{t,x}$ is covered by $S_1,\ldots,S_{k-1}$.
Since $D_{t,x}$ is a coreset for the $(k-1,j)$-projective clustering problem, then $\cup_{x=b+1}^\ell K_{t,x}$ is covered by a $\xi$-expansion of $S_1,\ldots,S_{k-1}$.
For any point $\v_{t+1}$ in $S_k\cap D_{t,b}$, we enter stage $t+1$ with $\v_0,\ldots,\v_{t+1}$ and so then by induction, it holds that a $\xi$-expansion of $S_1,\ldots,S_k$ covers $P[\v_0,\ldots,\v_{t+1}]=\cup_{x=0}^b K_{t,x}$.
\end{proof}
Theorem~\ref{thm:main:infty} then follows from Lemma~\ref{lem:coreset:approx} and Lemma~\ref{lem:coresetsize} and the observation that $j\le d-1$.
Thus our coresets have size polynomial in $d$, resolving the natural open question from \cite{EdwardsV05}.
\begin{algorithm}[!htb]
\caption{Coreset for $(j,k)$-Projective Clustering}
\label{alg:full:projective}
\DontPrintSemicolon
\KwIn{$P\subseteq\mathbb{R}^d$ of $n$ points, an integer $j\in[d-1]$, an integer $k\ge 1$, an accuracy parameter $\varepsilon\in(0,1)$ and a failure probability $\delta\in(0,1)$.}
\KwOut{A weighted set $(C,u)$}
$P_1 := P$, $i := 1$, $C := \emptyset$ \;
\While{$\left|P_i\right| \ge 1$}{
$S_i := $ an $L_\infty$-coreset for $(j,k)$-projective clustering\;
\For{every $\p\in S_i$}{
$s(\p) := \frac{1}{i}\cdot\left|S_i\right|$ \tcp{$|S_i|=\O{j^{1.5}(j\log(d\Delta))^{\O{jk}}}$}
}
$P_{i+1} := P_i\setminus S_i$, $i := i+1$\;
}
$t := \sum_{\p\in P}s(\p)$ \tcp{$t=\O{j^{1.5}(j\log(d\Delta))^{\O{jk}}\log n}$}
$m := \frac{ct}{\varepsilon^2}\left(djk\log\frac{t}{\delta}\right)$\;
\For{$m$ iterations}{
Sample a point $\p \in P$ with probability $\frac{s(\p)}{t}$\;
$C := C\cup\{\p\}$, $u(\p):= \frac{t}{m\cdot s(\p)}$\;
}
\Return{$(C,u)$}\;
\end{algorithm}
\section{APPLICATIONS}
In this section, we show that our framework gives an $L_\infty$ coreset for subspace clustering, as well as a large class of $M$-estimators.
To the best of our knowledge, our constructions are the first coresets with size polynomial in $d$ for these $M$-estimators.
Namely, our algorithm achieves approximate regression for the Cauchy, Welsch, Huber, Geman-McClure, Tukey, $L_1-L_2$, Fair loss functions, as well as general loss functions that are concave or power-bounded; see Table~\ref{table:Mestimators}.
\textbf{Beyond traditional projective clustering.} First, we present that our $L_\infty$-coreset algorithm is applicable for a family of non-decreasing log-log Lipschitz function.
\begin{restatable}[$L_\infty$ coreset for log-log Lipschitz loss functions]{theorem}{thmmainapps}
\label{thm:main:apps}
Let $j\in[d-1]$, $z\ge 1$, and let $(P,\calH_j,\dist)$ be a query space, where $P$ lies in a $j$-dimensional flat.
Let $f:[0,\infty)\to[0,\infty)$ such that both (1) $f$ is a monotonically non-decreasing function, i.e., for every $x,y\in[0,\infty)$ with $x\le y$, it holds that $f(x)\le f(y)$ and (2) $f$ is log-log Lipschitz, i.e., there exists $\rho\ge 1$ for every $b\ge 1$ such that $f(bx)\le b^\rho f(x)$.
Let $C$ be the output of a call to $L_\infty-\coreset(P)$.
Then for every $H\in\calH_j$, $\max_{\p\in P}f(\dist(p,H(\X,\v))^z)\le (2^{z+1}j^{1.5z})^\rho\max_{\p\in C}f(\dist(p,H(\X,\v))^z).$
\end{restatable}
\begin{proof}
Let $H(\X,\v)\in\calH_j$.
Then by Theorem~\ref{thm:single:projective}, we have that
\[\max_{\p\in P}\dist(p,H(\X,\v))^z\le 2^{z+1}j^{1.5z}\max_{\q\in C}\dist(q,H(\X,\v))^z.\]
Since $f$ is a monotonically non-decreasing function, then
\begin{align*}
\max_{\p\in P}\,f(\dist(p,H(\X,\v))^z)&=f\left(\max_{\p\in P}\dist(p,H(\X,\v))^z\right)\\
&\le f\left(2^{z+1}j^{1.5z}\max_{\q\in C}\dist(q,H(\X,\v))^z\right).
\end{align*}
Since $f$ is log-log Lipschitz, then
\begin{align*}
f\left(2^{z+1}j^{1.5z}\max_{\q\in C}\dist(q,H(\X,\v))^z\right)&\le(2^{z+1}j^{1.5z})^\rho f\left(\max_{\q\in C}\dist(q,H(\X,\v))^z\right)\\
&\le (2^{z+1}j^{1.5z})^\rho\max_{\q\in C} f\left(\dist(q,H(\X,\v))^z\right).
\end{align*}
Hence, we have
\begin{align*}
\max_{\p\in P}&f(\dist(p,H(\X,\v))^z) \le(2^{z+1}j^{1.5z})^\rho\max_{\p\in C}f(\dist(p,H(\X,\v))^z)
\end{align*}
as desired.
\end{proof}
Although the above theorem is applicable to large family of functions, it may not yield tight bounds for each of the loss functions in Table~\ref{table:Mestimators}.
Thus we first prove the following lemma, which guarantees an coreset for power-bounded loss functions $\Psi_{Pow}(x)$.
\begin{restatable}[$L_\infty$ coreset for regression with power-bounded loss function]{lemma}{lempower}
\label{lem:power}
Let $P\subseteq\mathbb{R}^d$ be a set of $n$ points, $b:P\to\mathbb{R}$, $\lambda\in\mathbb{R}$, and let $z>0$ be a fixed constant.
Let $\Psi_{Pow}$ denote any non-decreasing loss function with $\Psi_{Pow}(0)=0$ and $\Psi_{Pow}(y)/\Psi_{Pow}(x)\le(y/x)^z$ for all $0\le x\le y$.
Let $P'=\{\p\circ b(\p)\,|\,\p\in P\}$, where $\circ$ denotes vertical concatenation.
Let $C'$ be the output of a call to $L_\infty-\coreset(P',d)$ and let $C\subseteq P$ so that $C'=\{\q\circ b(\q)\,|\,\q\in C\}$.
Then for every $\w\in\mathbb{R}^d$, $\max_{\p\in P}\Psi_{Pow} \left(|\p^\top\w-b(\p)|\right)\le 4^z(d+1)^{1.5z}\cdot\max_{\q\in C}\Psi_{Pow}\left(|\q^\top\w-b(\q)|\right).$
\end{restatable}
\begin{proof}
Because the claim is trivially true for $\w=0^d$, then it suffices to consider nonzero $\w\in\mathbb{R}^d$.
Let $Y\in\calH_{d-1}$ such that $\w^\top\Y=0^{d-1}$ and $\Y^\top\w=0^d$.
For each $\p\in P$, let $\p'=\p\circ b(\p)=\begin{bmatrix}\p\\ b(\p)\end{bmatrix}$ denote the vertical concatenation of $\p$ with $b(\p)$.
We also define the vertical concatenation $\w'=\w\circ(-1)=\begin{bmatrix}\w\\ -1\end{bmatrix}$.
By setting $C$ to be the output of $\coreset$ on $P'=\{\p'\,|\,\p\in P\}$, then by Theorem~\ref{thm:main:apps},
\begin{align*}
\max_{\p\in P}&\dist(\p',H(\w',0^{d+1}))^z\\
&\le2^{z+1}(d+1)^{1.5z}\max_{\q\in C}\dist(\q',H(\w',0^{d+1}))^z
\end{align*}
Thus for $z=1$, we have for every $\p\in P$,
\[|(\p')^\top\w'|\le4(d+1)^{1.5}\max_{\q\in C}|(\q')^\top\w'|.\]
Since $\Psi_{Pow}$ is monotonically non-decreasing, then $\Psi_{Pow}(|\p^\top\x-b(\p))$ increases as $|\p^\top\x-b(\p)|$ increases.
Moreover, we have $\Psi_{Pow}(y)/\Psi_{Pow}(x)\le(y/x)^z$ for all $0\le x\le y$.
Therefore,
\begin{align*}
\max_{\p\in P}\Psi_{Pow}\left(|\p^\top\w-b(\p)|\right)
\le\max_{\q\in C}\Psi_{Pow}\left(4(d+1)^{1.5}|\q^\top\w-b(\q)|\right)
\le4^z(d+1)^{1.5z}\max_{\q\in C}\Psi_{Pow}\left(|\q^\top\w-b(\q)|\right).
\end{align*}
\end{proof}
Since power-bounded loss functions satisfy the conditions of Theorem~\ref{thm:main:apps}, then we can immediately apply Theorem~\ref{thm:main:apps} to obtain a base case for $z=1$.
Lemma~\ref{lem:power} then follows by the definition of power-bounded loss functions for general $z$.
It turns out that many of the loss functions of interest in Table~\ref{table:Mestimators} are power-bounded loss functions with specific parameters, so we can apply Theorem~\ref{thm:main:apps} in the same way as the proof of Lemma~\ref{lem:power} to obtain the guarantees for Cauchy regression, Huber regression, and Gem-McClure regression.
However, in certain cases, we can prove structural properties bounding the growth of these loss functions to obtain guarantees that are sharper than those provided by Theorem~\ref{thm:main:apps}.
We prove such structural properties at Section~\ref{supplement:app} of the supplementary material to handle Welsch regression, regression with concave loss functions, Tukey regression, $L_1-L_2$ regression, and Fair regression.
\textbf{$L_\infty$-coreset to $L_2$-coreset for integer $(j,k)$-projective clustering.}
To construct an $\varepsilon$-coreset, we use our $L_\infty$ coreset along with the framework of sensitivity sampling, in which points are sampled according to their sensitivity, a quantity that roughly captures how important or unique each point is. We give the coreset construction in Algorithm~\ref{alg:full:projective} using a standard reduction from an $L_2$ coreset to an $L_{\infty}$ coreset based on sensitivity sampling as summarized below.
\begin{restatable}{theorem}{thmltwomain}
\label{thm:ltwo:main}
With constant probability, Algorithm~\ref{alg:full:projective} outputs an $L_2$ $(1+\varepsilonilon)$-coreset for $(j,k)$-projective clustering of $P$.
\end{restatable}
\begin{proof}
The coreset size follows the bound of \cite{FeldmanSS20} once the sensitivity and the shattering dimension upper bound are given to us.
We actually follow the way of Lemma 3.1 of \cite{Varadarajan2012ANA} to give the sensitivity upper bound $s(p)$.
The shattering dimension upper bound $\tilde{O}(djk)$ follows Corollary 34 of \cite{FeldmanSS20}
\end{proof}
\textbf{Time complexity of our methods.} The running time of Algorithm~\ref{alg:full:projective}, we need to handle two cases -- \begin{enumerate*}
\item $k = 1$, and
\item $k > 1$.
\end{enumerate*}
Observe that the time needed for constructing our $L_2$-coreset for $(k,j)$-projective clustering where $k = 1$ and any $j \geq 2$ is bounded by $O\left( n \left( n + j^4\log{n} \right)\right)$ time. Specifically speaking, the time depends heavily on the time that Algorithm~\ref{alg:single:projective}. Algorithm~\ref{alg:single:projective} depends heavily on the computation of the L\"{o}wner ellipsoid and on applying Carath\'{e}odory's theorem. The time needed to compute the L\"{o}wner ellipsoid of a given set of point $Q \subseteq \mathbb{R}^j$ such that $|Q| = n$ is bounded by $O\left( nj^3\log{n}\right)$~\citep{todd2007khachiyan}.
As for constructing the Caratheodory set, recently~\cite{maalouf2019fast} provided an algorithm for computing such set in time $O\left(nj + j^4\log{n}\right)$. Combining these two methods with the observation that Algorithm~\ref{alg:full:projective} has $O\left( \frac{n}{j^2}\right)$ calls to Algorithm~\ref{alg:single:projective}, results in the upper bound above.
As for $k \geq 2$, following our analyzed steps needed to construct an $L_\infty$-coreset for the $(k,j)$-projective clustering problem and its variants, the running time is bounded from above by $O\left( nj^4\left(\log{\Delta}\right)^{j^2k}\right)$. Hence, Algorithm~\ref{alg:full:projective} requires $O\left( n^2j^4\left(\log{\Delta}\right)^{j^2k}\right)$ to construct an $L_2$-coreset for the $(k,j)$-projective clustering problem.
We note that our algorithm can be boosted theoretically speaking via the use of the merge-and-reduce tree~\cite{feldman2020core}, resulting in an algorithm that are near-linear in $n$ rather than quadratic in $n$.
We further note that, our assumption on $P$ being contained in some $j$-dimensional affine subspace can be dropped as follows.
\begin{remark}
So far, $P$ was assumed to lie on $j$-dimensional subspaces, however, one can remove this assumption by using Theorem $7$ of~\cite{varadarajan2012sensitivity}.
\end{remark}
\textbf{Subspace clustering.}
We first recall that subspace clustering is a variant of projective clustering where $k=1$ and $j\in[d-1]$.
\textbf{$M$-estimator regression.}
We present various robust $(1,d-1)$-projective clustering problems for which a strong $\varepsilon$-coreset can be generated using our algorithms.
We are given a set $P$ of $n$ points in $\mathbb{R}^d$ and a function $b:P\to\mathbb{R}$, and our goal is to optimize the minimization problem $\min_{\w\in\mathbb{R}^d}\sum_{\p\in P}\Psi\left(|\p^\top\w-b(\p)|\right)$, where $\Psi$ is any loss function.
In particular, the choice of $\Psi$ encompasses many robust regression loss functions that have been designed to reduce the effect of outliers across various optimization problems.
We show that Algorithm~\ref{alg:single:projective} achieves an $L_\infty$-coreset with accuracy $1-\frac{1}{\poly(d)}$ for a variety of loss functions; See Section~B in the supplementary material.
\section{EXPERIMENTS}
\label{sec:results}
\begin{table*}[htb!]
\caption{\textbf{Summary of our results: } Our coreset construction was applied on various application of projective clustering, of which were robust regression as well as robust subspace clustering}
\centering
\begin{tabular}{|c|c|c|c|c|c|}
\hline
Problem type & Loss function & $k$ & $j$ & Dataset & Figure \\
\hline
Regression & Huber & $1$ & $d-1$ & \ref{dataset:1} & \ref{fig:reg_synth_huber} \\
\hline
Regression & Cauchy & $1$ & $d-1$ & \ref{dataset:1} & \ref{fig:reg_synth_cauchy} \\ \hline
$(2,2)$-projective clustering & $L_2^2$ & $2$ & $2$ & \ref{dataset:2} & \ref{fig:proj_hour_2_2_l2}\\
\hline
Robust $(2,2)$-projective clustering & Cauchy & $2$ & $2$ & \ref{dataset:2} & \ref{fig:proj_hour_2_2_Cauchy}\\ \hline
Robust $(2,2)$-projective clustering & Tukey & $2$ & $2$ & \ref{dataset:3} & \ref{fig:proj_CASP_2_2_Tukey}\\
\hline
Robust $(2,2)$-projective clustering & Welsch & $2$ & $2$ & \ref{dataset:3} & \ref{fig:proj_CASP_2_2_Welsch}\\
\hline
\end{tabular}
\label{tab:summary_results}
\end{table*}
\begin{figure*}
\caption{Our experimental results: evaluating the efficacy of our coreset against uniform sampling.}
\label{fig:results}
\end{figure*}
In this section, we evaluate our coreset against uniform sampling on synthetic and real-world datasets, with respect to the projective clustering problem and its variants.
\textbf{Software/Hardware.} Our algorithms were implemented~\cite{opencode} in Python 3.6~\citep{10.5555/1593511} using \say{Numpy}~\citep{oliphant2006guide}, \say{Scipy}~\citep{2020SciPy-NMeth}. Tests were performed on $2.59$GHz i$7$-$6500$U ($2$ cores total) machine with $16$GB RAM.
\textbf{Datasets.} The following datasets used for our experiments were mostly from UCI machine learning repository~\citep{Dua:2019}:
\begin{enumerate*}[label=(\roman*)]
\item \label{dataset:1}\textbf{Synthetic} -- $20,000$ points in the two dimensional Euclidean space where $19,990$ points lie on the $x$-axis while the remaining $10$ points are generated away from the $x$-axis.
\item \label{dataset:2}\textbf{Bike Sharing Dataset Data Set}~\citep{Dua:2019} -- consists of $17389$ samples, and $17$ features of which only $15$ were used for the sake of our comparisons.
\item \label{dataset:3}\textbf{Physicochemical Properties of Protein Tertiary Structure Data Set}~\citep{Dua:2019} -- $45,730$ samples, each consisting of $10$ features.
\end{enumerate*}
\textbf{Evaluation against uniform sampling.} Throughout the experiments, we have chosen $10$ sample sizes, starting from $100$ till $1,000$ for projective clustering problems and from $1,000$ till $10,000$ for regression problems; see Figure~\ref{fig:results}. At each sample size, we generate two coresets, where the first is using uniform sampling and the latter is using Algorithm~\ref{alg:full:projective}. When handling projective clustering problems, for each coreset $(S,v)$, we have computed a suboptimal solution $\tilde{H} \in \calH_j$ using an EM-like algorithm (Expectation Maximization) where the number of steps for convergence was $6$ while the number of different initializations was set to $1,000$. E.g., in Figure~\ref{fig:proj_hour_2_2_l2}, the goal was to find an suboptimal solution $\tilde{H}$ for the problem $\min_{H \in \calH_j} \sum_{p \in S} v(p) \mathrm{dist}\left( p, H\left( X,v \right)\right)^2$.
As for regression related problems, we have computed the suboptimal solution using Scipy's~\citep{2020SciPy-NMeth} own optimization sub-library which can handle such problem instances, where similarly to the projective clustering settings, we have ran the solver for $100$ iterations (at max) while having at max $15,000$ different initializations for the solver. The approximation error $\varepsilon$ is set to be the ratio $\sum_{p \in P} f\left(\mathrm{dist} \left(p, \tilde{H}\left( X, v\right)\right) \right)$ to $\left(\min_{H\in\calH_j} \sum_{p \in P} f\left(\mathrm{dist}\left( p, H(X,v)\right) \right) \right) - 1$. Finally, the results were averaged across $22$ trials, while the shaded regions correspond to the standard deviation.
\textbf{Choice of baseline.}
We remark that uniform sampling was selected as the baseline for our algorithm because the only existing coreset construction with theoretical guarantees for the integer $(j,k)$-projective clustering problem is that of \cite{EdwardsV05}.
However, their construction is known to be impractical due to the large coreset size.
In fact, even the base case requires a number of points that is exponential in $d$; thus we could not implement the coreset construction of \cite{EdwardsV05}.
In practice uniform sampling is used due to the observation that real-world data is often not ``worst-case'' data.
Thus it is a natural choice to compare the performance of our algorithm to that of uniform sampling across a number of real-world datasets, even though it is clear that we can generate synthetic data for which uniform sampling can perform arbitrarily badly due to its lack of provable guarantees, while our coreset constructions still maintains its theoretical guarantees.
\textbf{Discussion.}
First note that our coresets are generally more accurate than uniform sampling across the experiments, sometimes outperforming uniform sampling by a factor of $\approx 10000$, e.g., $(2,2)$-projective clustering with the Tukey loss function in Figure~\ref{fig:proj_CASP_2_2_Tukey}.
Moreover, there exist data distributions in which uniform sampling provably performs \emph{arbitrarily} worse than our coreset construction.
For example, consider choosing $k=2$ centers across $n$ points when $n-1$ points are located at the origin and a single point is located at the position $N$ on the $x$-axis.
Then the optimal clustering has cost zero by choosing a center at the origin and a center at $N$, but uniform sampling will not find the point at $N$ without $\Omega(N)$ samples and thus incur cost $N$. Since our coreset finds a multiplicative approximation to the optimal solution, it will also achieve a clustering with cost zero, which is arbitrarily better than $N$, sampling only $\polylog(n)$ points. On the other hand, in some of the figures, e.g., Figure~\ref{fig:proj_CASP_2_2_Tukey}, as we increase the sample size, the approximation error that corresponds to our coreset might increase at some sample sizes. This phenomenon is associated with the probabilistic nature of our coreset, as our coreset is a result of a sensitivity sampling technique. This problem can be easily resolved via increasing the number of trials (the number of trials was chosen to be $22$). The same holds for uniform sampling.
Although our coreset is generally better in terms of approximation error than uniform sampling, however the running time of our implementation is slow. We strongly believe that our algorithm can achieve faster results using the merge-and-reduce tree on the expense of an increase in the approximation error. For additional results, see Section~\ref{sec:exp_ext} at the appendix.
\section{CONCLUSIONS AND FUTURE WORK}
In this paper, we have provided an $L_\infty$ and $L_2$ coresets for $(k,j)$-projective clustering problems and its variants, e.g., $M$-estimators. Our approach leveraged an elegant combination between L\"{o}wner ellipsoid and Carath\'{e}odory's theorem. This in term sheds light on the use of constant-approximation coresets (our $L_\infty$ coreset) as a stepping stone towards $L_2$ coresets with $\varepsilon$ approximation. We believe that there is room for future work with respect to constructing $L_\infty$-coresets with smaller sizes for constant factor approximation. Finally, the lower bound on the size of constant factor coresets for the $(j,k)$-projective clustering problem is still unknown. We hope our work presents an important step in resolving the complexity of this problem.
\appendix
\thispagestyle{empty}
\section{APPLICATIONS}
\label{supplement:app}
In what follows, we will show that $\ell_\infty$-coreset can serve a family of functions including (but not bounded to) $M$-estimators.
\subsection{$L_\infty$ Coreset for Cauchy Regression}
\begin{lemma}
\label{lem:cauchy}
Let $P\subseteq\mathbb{R}^d$ be a set of $n$ points, $b:P\to\mathbb{R}$, $\lambda\in\mathbb{R}$, and let $\Psi_{Cau}$ denote the Cauchy loss function.
Let $P'=\{\p\circ b(\p)\,|\,\p\in P\}$, where $\circ$ denotes vertical concatenation.
Let $C'$ be the output of a call to $L_\infty-\coreset(P',d)$ and let $C\subseteq P$ so that $C'=\{\q\circ b(\q)\,|\,\q\in C\}$.
Then for every $\w\in\mathbb{R}^d$,
$\max_{\p\in P}\Psi_{Cau} \left(|\p^\top\w-b(\p)|\right)\le 8(d+1)^3\cdot\max_{\q\in C}\Psi_{Cau}\left(|\q^\top\w-b(\q)|\right).$
\end{lemma}
\begin{proof}
We first observe that the claim is trivially true for $\w=0^d$.
Thus it suffices to consider nonzero $\w\in\mathbb{R}^d$.
Let $Y\in\calH_{d-1}$ such that $\w^\top\Y=0^{d-1}$ and $\Y^\top\w=0^d$.
For each $\p\in P$, let $\p'=\p\circ b(\p)=\begin{bmatrix}\p\\ b(\p)\end{bmatrix}$ denote the vertical concatenation of $\p$ with $b(\p)$.
We also define the vertical concatenation $\w'=\w\circ(-1)=\begin{bmatrix}\w\\ -1\end{bmatrix}$.
By running $\coreset$ on $P'=\{\p'\,|\,\p\in P\}$ to obtain a coreset $C$, then we have by Theorem~\ref{thm:main:apps},
\begin{align*}
\max_{\p\in P}&\dist(\p',H(\w',0^{d+1}))^z\le2^{z+1}(d+1)^{1.5z}\max_{\q\in C}\dist(\q',H(\w',0^{d+1}))^z.
\end{align*}
Thus for $z=2$, we have for every $\p\in P$,
\[|(\p')^\top\w'|^2\le8(d+1)^3\max_{\q\in C}|(\q')^\top\w'|^2.\]
The Cauchy loss function is monotonically increasing, so that $\Psi_{Cau}(|\p^\top\x-b(\p)|)$ increases as $|\p^\top\x-b(\p)|$ increases.
Thus for every $\p\in P$,
\begin{align*}
\Psi_{Cau}(|\p^\top\w-b(\p)|)&=\Psi_{Cau}\left(\left|(\p')^\top\w'\right|\right)\\
&=\frac{\lambda^2}{2}\log\left(1+\left(\frac{\left|(\p')^\top\w'\right|}{\lambda}\right)^2\right)\\
&\le\max_{\q\in C}\frac{\lambda^2}{2}\log\left(1+8(d+1)^3\left(\frac{\left|(\q')^\top\w'\right|}{\lambda}\right)^2\right),
\end{align*}
where the inequality follows from the $L_\infty$-coreset property above and the monotonicity of the Cauchy loss function.
Thus by Bernoulli's inequality, we have
\begin{align*}
\Psi_{Cau}(|\p^\top\w-b(\p)|)&\le\max_{\q\in C}8(d+1)^3\cdot\frac{\lambda^2}{2}\log\left(1+\left(\frac{\left|(\q')^\top\w'\right|}{\lambda}\right)^2\right)\\
&=8(d+1)^3\max_{\q\in C}\Psi_{Cau}\left(\left|(\q')^\top\w'\right|\right)\\
&=8(d+1)^3\max_{\q\in C}\Psi_{Cau}\left(\q^\top\w-b(\q)\right).
\end{align*}
\end{proof}
\subsection{$L_\infty$ Coreset for Welsch Regression}
First, we will present the following as a stepping stone towards bounding the approximation error that our $L_\infty$-coreset achieves in the context of Welsch regression problem.
\begin{lemma}
\label{lem:welsch:struct}
Let $a\ge 1$ be a positive real number.
Then for every $x\in\mathbb{R}$,
\[1-e^{-a^2x^2}\le a^2(1-e^{-x^2}).\]
\end{lemma}
\begin{proof}
Since $e^{-x^2}$ decreases as $x^2$ increases, then $a^2e^{-x^2}-e^{-a^2x^2}$ is a monotonically non-increasing function that achieves its maximum at $x=0$.
In particular, the value of $a^2e^{-x^2}-e^{-a^2x^2}$ at $x=0$ is $a^2-1$, so that
\[a^2e^{-x^2}-e^{-a^2x^2}\le a^2-1.\]
Thus from rearranging the terms, we have that
\[1-e^{-a^2x^2}\le a^2(1-e^{-x^2}).\]
\end{proof}
\begin{lemma}
\label{lem:welsch}
Let $P\subseteq\mathbb{R}^d$ be a set of $n$ points, $b:P\to\mathbb{R}$, $\lambda\in\mathbb{R}$, and let $\Psi_{Wel}$ denote the Welsch loss function.
Let $P'=\{\p\circ b(\p)\,|\,\p\in P\}$, where $\circ$ denotes vertical concatenation.
Let $C'$ be the output of a call to $L_\infty-\coreset(P',d)$ and let $C\subseteq P$ so that $C'=\{\q\circ b(\q)\,|\,\q\in C\}$.
Then for every $\w\in\mathbb{R}^d$,
$\max_{\p\in P}\Psi_{Wel} \left(|\p^\top\w-b(\p)|\right)\le 8(d+1)^3\cdot\max_{\q\in C}\Psi_{Wel}\left(|\q^\top\w-b(\q)|\right).$
\end{lemma}
\begin{proof}
We observe that the claim is trivially true for $\w=0^d$, so that it suffices to consider nonzero $\w\in\mathbb{R}^d$.
Let $Y\in\calH_{d-1}$, so that $\w^\top\Y=0^{d-1}$ and $\Y^\top\w=0^d$, and for each $\p\in P$, let $\p'=\p\circ b(\p)=\begin{bmatrix}\p\\ b(\p)\end{bmatrix}$ denote the vertical concatenation of $\p$ with $b(\p)$.
Let $\w'$ denote the vertical concatenation $\w'=\w\circ(-1)=\begin{bmatrix}\w\\ -1\end{bmatrix}$.
By Theorem~\ref{thm:main:apps}, we have that the output $C$ of $\coreset$ on $P'=\{\p'\,|\,\p\in P\}$ satisfies
\begin{align*}
\max_{\p\in P}&\dist(\p',H(\w',0^{d+1}))^z\le2^{z+1}(d+1)^{1.5z}\max_{\q\in C}\dist(\q',H(\w',0^{d+1}))^z.
\end{align*}
Thus for $z=2$, we have for every $\p\in P$,
\[|(\p')^\top\w'|^2\le8(d+1)^3\max_{\q\in C}|(\q')^\top\w'|^2.\]
The Welsch loss function is monotonically increasing, so that $\Psi_{Wel}(|\p^\top\x-b(\p)|)$ increases as $|\p^\top\x-b(\p)|$ increases.
Hence, for every $\p\in P$,
\begin{align*}
\Psi_{Wel}(|\p^\top\w-b(\p)|)&=\Psi_{Wel}\left(\left|(\p')^\top\w'\right|\right)\\
&=\frac{\lambda^2}{2}\left(1-e^{-\left(\frac{\left|(\p')^\top\w'\right|}{\lambda}\right)^2}\right)\\
&\le\max_{\q\in C}\frac{\lambda^2}{2}\left(1-e^{-\left(\frac{8(d+1)^3\left|(\q')^\top\w'\right|}{\lambda}\right)^2}\right),
\end{align*}
where the inequality results from the $L_\infty$-coreset property above and the monotonicity of the Welsch loss function.
By Lemma~\ref{lem:welsch:struct},
\begin{align*}
\Psi_{Wel}(|\p^\top\w-b(\p)|)&\le\max_{\q\in C}8(d+1)^3\cdot\frac{\lambda^2}{2}\left(1-e^{-\left(\frac{\left|(\q')^\top\w'\right|}{\lambda}\right)^2}\right)\\
&=8(d+1)^3\max_{\q\in C}\Psi_{Wel}\left(\left|(\q')^\top\w'\right|\right)\\
&=8(d+1)^3\max_{\q\in C}\Psi_{Wel}\left(|\q^\top\w-b(\q)|\right).
\end{align*}
\end{proof}
\subsection{$L_\infty$ coreset for Huber regression}
\begin{lemma}
\label{lem:huber}
Let $P\subseteq\mathbb{R}^d$ be a set of $n$ points, $b:P\to\mathbb{R}$, $\lambda\in\mathbb{R}$, and let $\Psi_{Hub}$ denote the Huber loss function.
Let $P'=\{\p\circ b(\p)\,|\,\p\in P\}$, where $\circ$ denotes vertical concatenation.
Let $C'$ be the output of a call to $L_\infty-\coreset(P',d)$ and let $C\subseteq P$ so that $C'=\{\q\circ b(\q)\,|\,\q\in C\}$.
Then for every $\w\in\mathbb{R}^d$,
$\max_{\p\in P}\Psi_{Hub} \left(|\p^\top\w-b(\p)|\right)\le 16(d+1)^3\cdot\max_{\q\in C}\Psi_{Hub}\left(|\q^\top\w-b(\q)|\right).$
\end{lemma}
\begin{proof}
The claim is trivially true for $\w=0^d$; it remains to consider nonzero $\w\in\mathbb{R}^d$.
Let $Y\in\calH_{d-1}$, so that $\w^\top\Y=0^{d-1}$ and $\Y^\top\w=0^d$.
For each $\p\in P$, we use $\p'$ to denote the vertical concatenation of $\p$ with $b(\p)$, $\p':=\p\circ b(\p)=\begin{bmatrix}\p\\ b(\p)\end{bmatrix}$.
Similarly, we use $\w'$ to denote the vertical concatenation $\w'=\w\circ(-1)=\begin{bmatrix}\w\\ -1\end{bmatrix}$.
By Theorem~\ref{thm:main:apps}, we have that the output $C$ of $\coreset$ on $P'=\{\p'\,|\,\p\in P\}$ satisfies
\begin{align*}
\max_{\p\in P}\dist(\p',H(\w',0^{d+1}))^z&\le2^{z+1}(d+1)^{1.5z}\max_{\q\in C}\dist(\q',H(\w',0^{d+1}))^z.
\end{align*}
Thus for $z=2$, we have for every $\p\in P$,
\begin{align}
\label{eqn:huber:base}
|(\p')^\top\w'|^2\le8(d+1)^3\max_{\q\in C}|(\q')^\top\w'|^2.
\end{align}
We now consider casework for whether $|(\p')^\top\w'|\le\lambda$ or $|(\p')^\top\w'|>\lambda$.
If $|(\p')^\top\w'|\le\lambda$, then we immediately have from (\ref{eqn:huber:base}) and the fact that $C\subseteq P$ that
\[\Psi_{Hub}\left(|(\p')^\top\w'|\right)\le 8(d+1)^3\max_{\q\in C}\Psi_{Hub}\left(|(\q')^\top\w'|\right).\]
On the other hand if $|(\p')^\top\w'|>\lambda$, we further consider casework for whether $\max_{\q\in C}|(\q')^\top\w'|\le\lambda$ or $\max_{\q\in C}|(\q')^\top\w'|>\lambda$.
If $\max_{\q\in C}|(\q')^\top\w'|>\lambda$, then we again have from (\ref{eqn:huber:base}) and the fact that $C\subseteq P$ that
\[\Psi_{Hub}\left(|(\p')^\top\w'|\right)\le 8(d+1)^3\max_{\q\in C}\Psi_{Hub}\left(|(\q')^\top\w'|\right).\]
Finally, if $|(\p')^\top\w'|>\lambda$ but $\max_{\q\in C}|(\q')^\top\w'|\le\lambda$, then we observe that from (\ref{eqn:huber:base}) and the assumption that $|(\p')^\top\w'|>\lambda$, we have
\[\frac{\lambda}{\sqrt{8}(d+1)^{1.5}}\le\max_{\q\in C}|(\q')^\top\w'|.\]
Thus if $|(\p')^\top\w'|>\lambda$, then
\begin{align*}
\Psi_{Hub}(|\p^\top\w-b(\p)|)&=\Psi_{Hub}\left(\left|(\p')^\top\w'\right|\right)\\
&=\lambda\left(\left|(\p')^\top\w'\right|-\frac{\lambda}{2}\right)\\
&\le\lambda\left(\left|(\p')^\top\w'\right|\right)\\
&\le\sqrt{8}\lambda(d+1)^{1.5}\left(\max_{\q\in C}\left|(\q')^\top\w'\right|\right),
\end{align*}
where the last inequality results from the $L_\infty$-coreset property in (\ref{eqn:huber:base}) above.
Therefore,
\begin{align*}
\Psi_{Hub}(|\p^\top\w-b(\p)|)&\le\frac{\lambda}{\sqrt{8}(d+1)^{1.5}}\cdot8(d+1)^3\left(\max_{\q\in C}\left|(\q')^\top\w'\right|\right)\\
&\le8(d+1)^3\left(\max_{\q\in C}\left|(\q')^\top\w'\right|^2\right)\\
&\le16(d+1)^3\max_{\q\in C}\Psi_{Hub}(|\q^\top\w-b(\q)|).
\end{align*}
Thus in all cases, we have
\begin{align*}
\max_{\p\in P}&\Psi_{Hub}\left(|\p^\top\w-b(\p)|\right)\le16(d+1)^3\cdot\max_{\q\in C}\Psi_{Hub}\left(|\q^\top\w-b(\q)|\right).
\end{align*}
\end{proof}
\subsection{$L_\infty$ coreset for Geman-McClure regression}
\begin{lemma}
\label{lem:gm}
Let $P\subseteq\mathbb{R}^d$ be a set of $n$ points, $b:P\to\mathbb{R}$, $\lambda\in\mathbb{R}$, and let $\Psi_{GM}$ denote the Geman-McClure loss function.
Let $P'=\{\p\circ b(\p)\,|\,\p\in P\}$, where $\circ$ denotes vertical concatenation.
Let $C'$ be the output of a call to $L_\infty-\coreset(P',d)$ and let $C\subseteq P$ so that $C'=\{\q\circ b(\q)\,|\,\q\in C\}$.
Then for every $\w\in\mathbb{R}^d$,
$\max_{\p\in P}\Psi_{GM} \left(|\p^\top\w-b(\p)|\right)\le 8(d+1)^3\cdot\max_{\q\in C}\Psi_{GM}\left(|\q^\top\w-b(\q)|\right).$
\end{lemma}
\begin{proof}
Note that the claim is trivially true for $\w=0^d$, so it therefore suffices to consider nonzero $\w\in\mathbb{R}^d$.
Let $Y\in\calH_{d-1}$ such that $\w^\top\Y=0^{d-1}$ and $\Y^\top\w=0^d$.
For each $\p\in P$, let $\p'=\p\circ b(\p)=\begin{bmatrix}\p\\ b(\p)\end{bmatrix}$ denote the vertical concatenation of $\p$ with $b(\p)$.
We also define the vertical concatenation $\w'=\w\circ(-1)=\begin{bmatrix}\w\\ -1\end{bmatrix}$.
By setting $C$ to be the output of $\coreset$ on $P'=\{\p'\,|\,\p\in P\}$, then by Theorem~\ref{thm:main:apps},
\begin{align*}
\max_{\p\in P}&\dist(\p',H(\w',0^{d+1}))^z\le2^{z+1}(d+1)^{1.5z}\max_{\q\in C}\dist(\q',H(\w',0^{d+1}))^z.
\end{align*}
Thus for $z=2$, we have for every $\p\in P$,
\[|(\p')^\top\w'|^2\le8(d+1)^3\max_{\q\in C}|(\q')^\top\w'|^2.\]
The Geman-McClure loss function is monotonically increasing, so that $\Psi_{GM}(|\p^\top\x-b(\p)|)$ increases as $|\p^\top\x-b(\p)|$ increases.
Therefore,
\begin{align*}
\max_{\p\in P}\Psi_{GM}\left(|\p^\top\w-b(\p)|\right)&=\max_{\p\in P}\frac{|(\p')^\top\w|^2}{2+2|(\p')^\top\w|^2)}\\
&\le\max_{\q\in C}\frac{8(d+1)^3|(\q')^\top\w|^2}{2+2|(\q')^\top\w|^2}\\
&=8(d+1)^3\max_{\q\in C}\Psi_{GM}\left(|\q^\top\w-b(\q)|\right),
\end{align*}
where the inequality results from the $L_\infty$-coreset property of Theorem~\ref{thm:main:apps} and the fact that $C\subseteq P$.
\end{proof}
\subsection{$L_\infty$ Coreset for Regression with Concave Loss Function}
We first recall the following property of concave functions:
\begin{lemma}
\label{lem:concave:struct}
Let $f:\mathbb{R}\to\mathbb{R}$ be a concave function with $f(0)=0$.
Then for any $x\le y$, we have $\frac{f(x)}{x}\ge\frac{f(y)}{y}$.
\end{lemma}
Using Lemma~\ref{lem:concave:struct}, we obtain an $L_\infty$ coreset for regression for any non-decreasing concave loss function $\Psi_{Con}$ satisfying $\Psi_{Con}(0)=0$.
We obtain an $L_\infty$ coreset for regression for any non-decreasing concave loss function $\Psi_{Con}$ satisfying $\Psi_{Con}(0)=0$.
\begin{lemma}
\label{lem:concave}
Let $P\subseteq\mathbb{R}^d$ be a set of $n$ points, $b:P\to\mathbb{R}$, $\lambda\in\mathbb{R}$, and let $\Psi_{Con}$ denote any non-decreasing concave loss function with $\Psi_{Con}(0)=0$.
Let $P'=\{\p\circ b(\p)\,|\,\p\in P\}$, where $\circ$ denotes vertical concatenation.
Let $C'$ be the output of a call to $L_\infty-\coreset(P',d)$ and let $C\subseteq P$ so that $C'=\{\q\circ b(\q)\,|\,\q\in C\}$.
Then for every $\w\in\mathbb{R}^d$, $\max_{\p\in P}\Psi_{Con} \left(|\p^\top\w-b(\p)|\right)\le 4(d+1)^{1.5}\cdot\max_{\q\in C}\Psi_{Con}\left(|\q^\top\w-b(\q)|\right).$
\end{lemma}
\begin{proof}
The claim is trivially true for $\w=0^d$, so it suffices to consider nonzero $\w\in\mathbb{R}^d$.
Let $Y\in\calH_{d-1}$ such that $\w^\top\Y=0^{d-1}$ and $\Y^\top\w=0^d$.
For each $\p\in P$, let $\p'=\p\circ b(\p)=\begin{bmatrix}\p\\ b(\p)\end{bmatrix}$ denote the vertical concatenation of $\p$ with $b(\p)$.
We also define the vertical concatenation $\w'=\w\circ(-1)=\begin{bmatrix}\w\\ -1\end{bmatrix}$.
By setting $C$ to be the output of $\coreset$ on $P'=\{\p'\,|\,\p\in P\}$, then by Theorem~\ref{thm:main:apps},
\begin{align*}
\max_{\p\in P}\dist(\p',H(\w',0^{d+1}))^z&\le2^{z+1}(d+1)^{1.5z}\max_{\q\in C}\dist(\q',H(\w',0^{d+1}))^z.
\end{align*}
Thus for $z=1$, we have for every $\p\in P$,
\[|(\p')^\top\w'|\le4(d+1)^{1.5}\max_{\q\in C}|(\q')^\top\w'|.\]
Since $\Psi_{Con}$ is monotonically non-decreasing, then $\Psi_{Con}(|\p^\top\x-b(\p))$ increases as $|\p^\top\x-b(\p)|$ increases.
Thus by Lemma~\ref{lem:concave:struct},
\begin{align*}
\max_{\p\in P}\Psi_{Con}\left(|\p^\top\w-b(\p)|\right)&\le\max_{\q\in C}\Psi_{Con}\left(4(d+1)^{1.5}|\q^\top\w-b(\q)|\right)\\
&\le4(d+1)^{1.5}\max_{\q\in C}\Psi_{Con}\left(|\q^\top\w-b(\q)|\right).
\end{align*}
\end{proof}
\subsection{$L_\infty$ Coreset for Tukey Regression}
\begin{lemma}
\label{lem:tukey}
Let $P\subseteq\mathbb{R}^d$ be a set of $n$ points, $b:P\to\mathbb{R}$, $\lambda\in\mathbb{R}$, and let $\Psi_{Tuk}$ denote the Tukey loss function.
Let $P'=\{\p\circ b(\p)\,|\,\p\in P\}$, where $\circ$ denotes vertical concatenation.
Let $C'$ be the output of a call to $L_\infty-\coreset(P',d)$ and let $C\subseteq P$ so that $C'=\{\q\circ b(\q)\,|\,\q\in C\}$.
Then for every $\w\in\mathbb{R}^d$, $\max_{\p\in P}\Psi_{Tuk} \left(|\p^\top\w-b(\p)|\right)\le 8(d+1)^3\cdot\max_{\q\in C}\Psi_{Tuk}\left(|\q^\top\w-b(\q)|\right).$
\end{lemma}
\begin{proof}
We first observe that the claim is trivially true for $\w=0^d$, so that it suffices to consider nonzero $\w\in\mathbb{R}^d$.
Let $Y\in\calH_{d-1}$ such that $\w^\top\Y=0^{d-1}$ and $\Y^\top\w=0^d$.
For each $\p\in P$, let $\p'=\p\circ b(\p)=\begin{bmatrix}\p\\ b(\p)\end{bmatrix}$ denote the vertical concatenation of $\p$ with $b(\p)$.
We define the vertical concatenation $\w'=\w\circ(-1)=\begin{bmatrix}\w\\ -1\end{bmatrix}$.
By setting $C$ to be the output of $\coreset$ on $P'=\{\p'\,|\,\p\in P\}$, then by Theorem~\ref{thm:main:apps},
\begin{align*}
\max_{\p\in P}&\dist(\p',H(\w',0^{d+1}))^z\le2^{z+1}(d+1)^{1.5z}\max_{\q\in C}\dist(\q',H(\w',0^{d+1}))^z.
\end{align*}
Thus for $z=2$, we have for every $\p\in P$,
\[|(\p')^\top\w'|^2\le8(d+1)^3\max_{\q\in C}|(\q')^\top\w'|^2.\]
We first note that if $\frac{|(\p')^\top\w'|}{\sqrt{8}(d+1)^{1.5}}\ge\lambda$, then we trivially have $\max_{\q\in C}|(\q')^\top\w'|^2\ge\lambda^2$ so that $\max_{\q\in C}\Psi_{Tuk}\left(|\q^\top\w-b(\q)|\right)=\frac{\lambda^2}{6}\ge\Psi_{Tuk}(x)$ for all $x$.
Thus, we would have
\[\max_{\p\in P}\Psi_{Tuk}\left(|\p^\top\w-b(\p)|\right)\le\max_{\q\in C}\Psi_{Tuk}\left(|\q^\top\w-b(\q)|\right),\]
as desired.
Hence, we assume $\frac{|(\p')^\top\w'|}{\sqrt{8}(d+1)^{1.5}}<\lambda$ and consider casework for whether $|(\p')^\top\w'|\le\lambda$ or $|(\p')^\top\w'|>\lambda$.
If $|(\p')^\top\w'|\le\lambda$, then since the Tukey loss function is monotonically increasing, we have
\begin{align*}
\Psi_{Tuk}\left(|(\p')^\top\w'|\right)&=\frac{\lambda^2}{6}\left(1-\left(1-\frac{|(\p')^\top\w'|^2}{\lambda^2}\right)^3\right)\\
&\le\max_{\q\in C}\frac{\lambda^2}{6}\left(1-\left(1-\frac{8(d+1)^3|(\q')^\top\w'|^2}{\lambda^2}\right)^3\right)
\end{align*}
Unfortunately, the Tukey loss function is not concave, so we cannot directly apply Lemma~\ref{lem:concave:struct}.
However, if we define the function $f(x):=\frac{\lambda^2}{6}\left(1-\left(1-\frac{x}{\lambda^2}\right)^3\right)$, then we have
\[\frac{d^2f}{dx^2}=\frac{x-\lambda^2}{\lambda^4},\]
which is non-positive for all $x\le\lambda^2$.
Thus by Lemma~\ref{lem:concave:struct}, we have for all $0\le x\le y\le\lambda^2$ that $\frac{f(x)}{x}\ge\frac{f(y)}{y}$.
Since $f(x^2)=\Psi_{Tuk}(x)$, then we have for all $0\le x\le y\le\lambda$ that $\frac{\Psi_{Tuk}(x)}{x^2}\ge\frac{\Psi_{Tuk}(y)}{y^2}$.
Hence by the assumption that $\frac{|(\p')^\top\w'|}{\sqrt{8}(d+1)^{1.5}}<\lambda$,
\begin{align*}
\Psi_{Tuk}\left(|(\p')^\top\w'|\right)&\le 8(d+1)^3\max_{\q\in C}\frac{\lambda^2}{6}\left(1-\left(1-\frac{|(\q')^\top\w'|^2}{\lambda^2}\right)^3\right)\\
&\le8(d+1)^3\max_{\q\in C}\Psi_{Tuk}\left(|(\q')^\top\w'|\right)\\
&=8(d+1)^3\max_{\q\in C}\Psi_{Tuk}\left(|\q^\top\w-b(\q)|\right).
\end{align*}
On the other hand, if $|(\p')^\top\w'|>\lambda$, then we further consider casework on whether $\max_{\q\in C}|(\q')^\top\w'|>\lambda$ or $\max_{\q\in C}|(\q')^\top\w'|\le\lambda$.
If $\max_{\q\in C}|(\q')^\top\w'|>\lambda$, then we immediately have
\begin{align*}
\Psi_{Tuk}\left(|(\p')^\top\w'|\right)&=\frac{\lambda^2}{6}=\max_{\q\in C}\Psi_{Tuk}\left(|(\q')^\top\w'|\right)\\
&=\max_{\q\in C}\Psi_{Tuk}\left(|\q^\top\w-b(\q)|\right).
\end{align*}
Otherwise, suppose $\max_{\q\in C}|(\q')^\top\w'|\le\lambda$.
Note that $|(\p')^\top\w'|^2\le8(d+1)^3\max_{\q\in C}|(\q')^\top\w'|^2$ implies $\max_{\q\in C}|(\q')^\top\w'|>\frac{\lambda}{\sqrt{8}(d+1)^{1.5}}$.
Since the Tukey loss function is monotonically increasing, then
\[\max_{\q\in C}\Psi_{Tuk}\left(|(\q')^\top\w'|\right)\ge\Psi_{Tuk}\left(\frac{\lambda}{\sqrt{8}(d+1)^{1.5}}\right).\]
Because $\max_{\q\in C}|(\q')^\top\w'|\le\lambda$, then we can again apply the relationship $\frac{\Psi_{Tuk}(x)}{x^2}\ge\frac{\Psi_{Tuk}(y)}{y^2}$ for all $0\le x\le y\le\lambda$, so that
\[\max_{\q\in C}\Psi_{Tuk}\left(|(\q')^\top\w'|\right)\ge\frac{1}{8(d+1)^3}\Psi_{Tuk}(\lambda).\]
Hence,
\begin{align*}
\Psi_{Tuk}\left(|(\p')^\top\w'|\right)=\Psi_{Tuk}(\lambda)\le 8(d+1)^3\max_{\q\in C}\Psi_{Tuk}\left(|(\q')^\top\w'|\right).
\end{align*}
Therefore across all cases, we have
\begin{align*}
\max_{\p\in P}\Psi_{Tuk}\left(|\p^\top\w-b(\p)|\right)\le 8(d+1)^3\cdot\max_{\q\in C}\Psi_{Tuk}\left(|\q^\top\w-b(\q)|\right).
\end{align*}
\end{proof}
\subsection{$L_\infty$ Coreset for $L_1-L_2$ Regression}
\begin{lemma}
\label{lem:ll}
Let $P\subseteq\mathbb{R}^d$ be a set of $n$ points, $b:P\to\mathbb{R}$, $\lambda\in\mathbb{R}$, and let $\Psi_{LL}$ denote the $L_1-L_2$ loss function.
Let $P'=\{\p\circ b(\p)\,|\,\p\in P\}$, where $\circ$ denotes vertical concatenation.
Let $C'$ be the output of a call to $L_\infty-\coreset(P',d)$ and let $C\subseteq P$ so that $C'=\{\q\circ b(\q)\,|\,\q\in C\}$.
Then for every $\w\in\mathbb{R}^d$, $\max_{\p\in P}\Psi_{LL} \left(|\p^\top\w-b(\p)|\right)\le 8(d+1)^3\cdot\max_{\q\in C}\Psi_{LL}\left(|\q^\top\w-b(\q)|\right).$
\end{lemma}
\begin{proof}
We first observe that the claim is trivially true for $\w=0^d$.
Therefore, it suffices to consider nonzero $\w\in\mathbb{R}^d$.
Let $Y\in\calH_{d-1}$ such that $\w^\top\Y=0^{d-1}$ and $\Y^\top\w=0^d$.
For each $\p\in P$, let $\p'=\p\circ b(\p)=\begin{bmatrix}\p\\ b(\p)\end{bmatrix}$ denote the vertical concatenation of $\p$ with $b(\p)$.
We also define the vertical concatenation $\w'=\w\circ(-1)=\begin{bmatrix}\w\\ -1\end{bmatrix}$.
By setting $C$ to be the output of $\coreset$ on $P'=\{\p'\,|\,\p\in P\}$, then by Theorem~\ref{thm:main:apps},
\begin{align*}
\max_{\p\in P}&\dist(\p',H(\w',0^{d+1}))^z\le2^{z+1}(d+1)^{1.5z}\max_{\q\in C}\dist(\q',H(\w',0^{d+1}))^z.
\end{align*}
Thus for $z=2$, we have for every $\p\in P$,
\[|(\p')^\top\w'|^2\le8(d+1)^3\max_{\q\in C}|(\q')^\top\w'|^2.\]
The $L_1-L_2$ loss function is monotonically increasing, so that $\Psi_{LL}(|\p^\top\x-b(\p))$ increases as $|\p^\top\x-b(\p)|$ increases.
Therefore,
\begin{align*}
\max_{\p\in P}\Psi_{LL}\left(|\p^\top\w-b(\p)|\right)&=\max_{\p\in P}2\left(\sqrt{1+\frac{|\p^\top\w-b(\p)|^2}{2}}-1\right)\\
&\le\max_{\q\in C}2\left(\sqrt{1+\frac{8(d+1)^3|\q^\top\w-b(\q)|^2}{2}}-1\right).
\end{align*}
Since the $L_1-L_2$ loss function is not concave, so we cannot directly apply Lemma~\ref{lem:concave:struct}.
Fortunately, if we define the function $f(x):=2\left(\sqrt{1+\frac{x}{2}}-1\right)$, then we have
\[\frac{d^2f}{dx^2}=-\frac{2}{16\left(\frac{x}{2}+1\right)^{3/2}},\]
which is non-positive for all $x\ge 0$.
Thus by Lemma~\ref{lem:concave:struct}, we have for all $0\le x\le y$ that $\frac{f(x)}{x}\ge\frac{f(y)}{y}$.
Since $f(x^2)=\Psi_{LL}(x)$, then we have for all $0\le x\le y$ that $\frac{\Psi_{LL}(x)}{x^2}\ge\frac{\Psi_{LL}(y)}{y^2}$.
Thus,
\begin{align*}
\max_{\p\in P}\Psi_{LL}\left(|\p^\top\w-b(\p)|\right)&\le 8(d+1)^3\max_{\q\in C}2\left(\sqrt{1+\frac{|\q^\top\w-b(\q)|^2}{2}}-1\right)\\
&=8(d+1)^3\max_{\q\in C}\Psi_{LL}\left(|\q^\top\w-b(\q)|\right).
\end{align*}
\end{proof}
\subsection{$L_\infty$ Coreset for Fair Regression}
\begin{lemma}
\label{lem:fair}
Let $P\subseteq\mathbb{R}^d$ be a set of $n$ points, $b:P\to\mathbb{R}$, $\lambda\in\mathbb{R}$, and let $\Psi_{Fair}$ denote the Fair loss function.
Let $P'=\{\p\circ b(\p)\,|\,\p\in P\}$, where $\circ$ denotes vertical concatenation.
Let $C'$ be the output of a call to $L_\infty-\coreset(P',d)$ and let $C\subseteq P$ so that $C'=\{\q\circ b(\q)\,|\,\q\in C\}$.
Then for every $\w\in\mathbb{R}^d$, $\max_{\p\in P}\Psi_{Fair} \left(|\p^\top\w-b(\p)|\right)\le 8(d+1)^3\cdot\max_{\q\in C}\Psi_{Fair}\left(|\q^\top\w-b(\q)|\right).$
\end{lemma}
\begin{proof}
Since the claim is trivially true for $\w=0^d$, then it suffices to consider nonzero $\w\in\mathbb{R}^d$.
Let $Y\in\calH_{d-1}$ such that $\w^\top\Y=0^{d-1}$ and $\Y^\top\w=0^d$.
For each $\p\in P$, let $\p'=\p\circ b(\p)=\begin{bmatrix}\p\\ b(\p)\end{bmatrix}$ denote the vertical concatenation of $\p$ with $b(\p)$.
We also define the vertical concatenation $\w'=\w\circ(-1)=\begin{bmatrix}\w\\ -1\end{bmatrix}$.
By setting $C$ to be the output of $\coreset$ on $P'=\{\p'\,|\,\p\in P\}$, then by Theorem~\ref{thm:main:apps},
\begin{align*}
\max_{\p\in P}\dist(\p',H(\w',0^{d+1}))^z&\le2^{z+1}(d+1)^{1.5z}\max_{\q\in C}\dist(\q',H(\w',0^{d+1}))^z.
\end{align*}
Thus for $z=2$, we have for every $\p\in P$,
\[|(\p')^\top\w'|^2\le8(d+1)^3\max_{\q\in C}|(\q')^\top\w'|^2.\]
The Fair loss function is monotonically increasing, so that $\Psi_{Fair}(|\p^\top\x-b(\p))$ increases as $|\p^\top\x-b(\p)|$ increases.
Therefore,
\begin{align*}
\max_{\p\in P}\Psi_{Fair}\left(|\p^\top\w-b(\p)|\right)\le\max_{\q\in C}\Psi_{Fair}\left(\sqrt{8}(d+1)^{1.5}|\q^\top\w-b(\q)|\right).
\end{align*}
The Fair loss function is not concave, so we cannot directly apply Lemma~\ref{lem:concave:struct}.
However, if we define the function $f(x):=\lambda\sqrt{|x|}-\lambda^2\ln\left(1+\frac{\sqrt{|x|}}{\lambda}\right)$, then we have
\[\frac{d^2f}{dx^2}=-\frac{\lambda}{4\sqrt{x}(\lambda+\sqrt{x})^2},\]
which is non-positive for all $x\ge 0$.
Thus by Lemma~\ref{lem:concave:struct}, we have for all $0\le x\le y$ that $\frac{f(x)}{x}\ge\frac{f(y)}{y}$.
Since $f(x^2)=\Psi_{Fair}(x)$, then we have for all $0\le x\le y$ that $\frac{\Psi_{Fair}(x)}{x^2}\ge\frac{\Psi_{Fair}(y)}{y^2}$.
Thus,
\begin{align*}
\max_{\p\in P}\Psi_{Fair}\left(|\p^\top\w-b(\p)|\right)\le8(d+1)^3\max_{\q\in C}\Psi_{Fair}\left(|\q^\top\w-b(\q)|\right).
\end{align*}
\end{proof}
\section{ADDITIONAL EXPERIMENTS}
\label{sec:exp_ext}
In this section, we carry additional experimental results evaluating our coreset against uniform sampling on real-world datasets, with respect to the projective clustering problem and its variants.
\begin{table*}[!htb]
\caption{\textbf{Summary of our results: } Our coreset construction was applied on various application of projective clustering, of which were robust regression as well as robust subspace clustering}
\centering
\begin{tabular}{|c|c|c|c|c|c|}
\hline
Problem type & Loss function & $k$ & $j$ & Dataset & Figure \\
\hline
Robust $(2,2)$-projective clustering & $L_1-L_2$ & $2$ & $2$ & \ref{dataset:3} & \ref{fig:proj_CASP_2_2_l12} \\ \hline
Robust $(2,2)$-projective clustering & Huber & $2$ & $3$ & \ref{dataset:3} & \ref{fig:proj_CASP_2_3_huber}\\
\hline
\end{tabular}
\label{tab:more:summary_results}
\end{table*}
\begin{figure*}
\caption{Our experimental results: evaluating the efficacy of our coreset against uniform sampling.}
\label{fig:more:results}
\end{figure*}
\end{document} |
{\bf e}gin{document}
\title{The Hamiltonian structure of the nonlinear
Schr\"odinger equation and the asymptotic stability of its ground
states}
\author {Scipio Cuccagna}
\date{8.1.11}
\maketitle
{\bf e}gin{abstract}
In this paper we prove that ground states of the NLS which satisfy
the sufficient conditions for orbital stability of M.Weinstein,
are also asymptotically stable, for seemingly generic equations.
Here we assume that the NLS has a smooth short range nonlinearity.
We assume also the presence of a very short range and smooth linear
potential, to avoid translation invariance. The basic idea is to
perform a Birkhoff normal form argument on the hamiltonian, as in a
paper by Bambusi and Cuccagna on the stability of the 0 solution for
NLKG. But in our case, the natural coordinates arising from the
linearization are not canonical. So we need also to apply the
Darboux Theorem. With some care though, in order not to destroy
some nice features of the initial hamiltonian.
\end{abstract}
\section{Introduction}
\langlebel{section:introduction} We consider the nonlinear
Schr\"odinger equation (NLS)
{\bf e}gin{equation}\langlebel{NLS}
{\rm i} u_{t }=-\Delta u +Vu +{\bf e}ta (|u|^2) u , \, u(0,x)=u_0(x), \, (t,x)\in\mathbb{ R}\times
\mathbb{ R}^3
\end{equation}
with $ -\Delta +V(x)$ a selfadjoint Schr\"odinger operator. Here
$V(x)\neq 0$ to exclude translation invariance. We assume
that both $V(x)$ and ${\bf e}ta (|u|^2)u$ are short range
and smooth.
We assume that \eqref{NLS} has a smooth family of ground states.
We then prove that the sufficient conditions for
orbital stability by Weinstein \cite{W1} (which, essentially, represent the
correct definition of linear stability, see \cite{Cu3}), imply for a generic \eqref{NLS} that
the ground states are not only orbitally stable, as proved in
\cite{W1} (under less restrictive hypotheses), but that their
orbits are also asymptotically stable. That is, a solution $u(t)$
of \eqref{NLS} starting sufficiently close to ground states, is
asymptotically of the form $e^{i\theta (t)} \phi _{\omega _+} (x)+
e^{it\Delta }h_+$, for $\omega _+$ a fixed number and for $h_+\in
H^1(\mathbb{R}^3)$ a small energy function. The problem of
stability of ground states has a long history. Orbital stability
has been well understood since the 80's, see in the sequence
\cite{CL,W1,GSS1,GSS2}, and has been a very active field
afterwards. Asymptotic stability is a more recent, and less
explored, field. In the context of the NLS the first results are in
the pioneering works \cite{SW1,SW2,BP1,BP2}. Almost all references
on asymptotic stability of ground states of the NLS tackle the
problem by first linearizing at ground states, and by attempting to
deal with the resulting nonlinear problem for the error term. An
apparent problem in the linear theory is that the linearization is
a not symmetric operator. However the linearization is covered by
the scattering theory of non selfadjoint operators developed by
T.Kato in the 60's, see his classical \cite{kato}, see also
\cite{CPV,Schlag}. Dispersive and Strichartz estimates for the
linearization, analogous to the theory for short range scalar
Schr\"odinger operators elaborated in \cite{JSS,Y1,Y2}, to name
only few of many papers, can be proved using similar ideas, see
for example \cite{Cu1,Schlag,KS}. It is fair to say that anything
that can be proved for short range scalar Schr\"odinger operators,
can also be proved for the linearizations. The only notable
exception is the problem of "positive signature" embedded
eigenvalues, see \cite{Cu3}, which we conjecture not to exist (in
analogy to the absence of embedded eigenvalues for short range
Schr\"odinger operators), and which in any case are unstable, see
\cite{CPV}. Hence it is reasonable to focus on NLS's where these
positive signature embedded eigenvalues do not exist (in the case
of ground states, all positive eigenvalues are of positive
signature).While linear theory is not a problem in understanding
asymptotic stability, the real trouble lies in the difficult NLS
like equation one obtains for the error term. Specifically, the
linearization has discrete spectrum which, at the level of linear
theory, tends not to decay and potentially could yield
quasiperiodic solutions. A good analogy with more standard
problems, is that the continuous spectrum of the linearization
corresponds to stable spectrum while the discrete spectrum
corresponds to central directions. Stability cannot be established
by linear theory alone. The first intuition on how nonlinear
interactions are responsible for loss of energy of the discrete
modes, is in a paper by Sigal \cite{sigal}.
His ideas, inspired by the classical Fermi golden rule in linear
theory,
are later elaborated in \cite{SW3}, to study asymptotic stability of
vacuum for the nonlinear Klein Gordon equations with a potential
with non empty discrete spectrum. This problem, easier than the one
treated in the present paper, to a large extent is solved in
\cite{bambusicuccagna}. In reality, the main ideas in \cite{SW3}
had already be sketched, for the problem of stability of ground
states of NLS, in a deep
paper by Buslaev and Perelman \cite{BP2}, see also
the expanded version \cite{BS}. In the case when the linearization
has just one positive eigenvalue close to the continue spectrum,
\cite{SW3,BP2}, or \cite{sigal} in a different context, identify the
mechanisms for loss of energy of the discrete modes in the nonlinear
coupling of continuous and discrete spectral components.
Specifically, in the discrete mode equation there is a key
coefficient of the form $\langlenglegle D F,F\ranglenglegle $ for $D $ a
positive operator and $F$ a function. Assuming the
generic condition $\langlenglegle D F,F\ranglenglegle\not=0 $, this gives rise
to dissipative effects leading to leaking of energy from the
discrete mode to the continuous modes, where energy disperses
because of linear dispersion, and to the ground state. After
\cite{BP2} there is strong evidence that, generically, linearly
stable ground states, in the sense of \cite{W1}, should be
asymptotically stable. Still, it is a seemingly technically
difficult problem to solve rigorously. After \cite{BP2,SW3}, a
number of papers analyze the same ideas in various situations,
\cite{TY1,TY2,TY3,T,SW4,Cu2}. In the meantime, a useful series of
papers \cite{GNT,M1,M2} shows how to use endpoint Strichartz and
smoothing estimates to prove in energy space the result of
\cite{SW2,PiW}, generalizing the result and simplifying the
argument. The next important breakthrough is due to Zhou and Sigal
\cite{zhousigal}. They tackle for the first time the case of one
positive eigenvalue arbitrarily close to 0, developing further the
normal forms analysis of \cite{BP2} and obtaining the rate of
leaking conjectured in \cite{SW3} p.69. The argument is improved
in \cite{cuccagnamizumachi}. The crucial coefficient is now of the
form $\langlenglegle D F,G\ranglenglegle $, with $F$ and $G$ not obviously
related. In \cite{cuccagnamizumachi} it is noticed that $\langlenglegle D
F,G\ranglenglegle <0$ is incompatible with orbital stability (an argument
along these lines is suggested in \cite{SW3} p.69). So, for
orbitally stable ground states, the generic condition $\langlenglegle D
F,G\ranglenglegle \neq 0$ implies positivity, and hence leaking of energy
out of the discrete
modes. This yields a result similar to \cite{sigal,BP2,SW3}
and in particular is a partially positive answer to a conjecture
on p.69 in \cite{SW3}.
The case with more than one positive eigenvalue is harder. In
this case, due to possible cancelations, \cite{cuccagnamizumachi} is
not able to draw
conclusions on the sign of the coefficients
under the assumption of orbital stability. But, apart from the issue
of positivity of the coefficients, \cite{cuccagnamizumachi} shows
that the rest of the proof does not depend on the number of positive
eigenvalues. Moreover, \cite{T,zhouweinstein1,Cu3} show that if
there are many positive eigenvalues, all close to the continuous
spectrum, then the important coefficients are again of the form
$\langlenglegle D F,F\ranglenglegle $. The reason for this lies in the
hamiltonian nature of the NLS. The above papers contain normal forms
arguments. The hamiltonian structure is somewhat lost in the above
papers. When the eigenvalues are close to the continuous spectrum,
the normal form argument consists of just one step. This single step
does not change the crucial coefficients. Then, the hamiltonian
nature of the initial system, yields information on these
coefficients (this is emphasized in \cite{Cu3}). In the case
treated in \cite{zhousigal,cuccagnamizumachi} though, there are
many steps in the normal form. The important coefficients are
changed in ways which look very complicated, see \cite{Gz} which
deals with the next two easiest cases after the easiest. The correct
way to look at this problem is introduced in \cite{bambusicuccagna},
which deals with the problem introduced in \cite{SW3}. Basically,
the positivity can be seen by doing the normal form directly on the
hamiltonian. We give a preliminary and heuristic justification on
why the hamiltonian structure is crucial at the end of section
{\rm e}f{section:linearization}. \cite{bambusicuccagna} consists in a
mixture of a Birkhoff normal forms argument, with the arguments in
\cite{cuccagnamizumachi}. For asymptotic stability of ground states
of NLS though, \cite{bambusicuccagna} is still not enough. Indeed in
\cite{bambusicuccagna} something peculiar happens: the natural
coordinates arising by the spectral decomposition of the
linearization at the vacuum solution, are also canonical
coordinates for the symplectic structure. This is no longer true if
instead of vacuum we consider ground states. So we need an extra
step, which consists in the search of canonical coordinates, through
the Darboux theorem. This step requires care, because we must make
sure that our problem remains similar to a semilinear NLS also in
the new system of coordinates.
In a forthcoming paper, Zhou and Weinstein \cite{zhouweinstein2}
track precisely in the setting of \cite{zhouweinstein1} how much of
the energy of the discrete modes goes to the ground state and how
much is dispersed. For another result on asymptotic stability, that
is asymptotic stability of the blow up profile, we refer to
\cite{MR}. In some respects the situation in \cite{MR} is harder
than here, since there the additional discrete modes are
concentrated in the kernel of the linearization. There is important
work on asymptotic stability for KdV equations due to Martel and
Merle, see \cite{MM1} and further references therein, which solve a
problem initiated by Pego and Weinstein \cite{PW}, the latter closer
in spirit to our approach to NLS. It is an interesting question to
see if elaboration of ideas in \cite{MM1,MMT} can be used for
alternative solutions of the problem which we consider here. Our
result does not cover important cases, like the pure power NLS, with
${\bf e}ta (|u|^2)= -|u|^{p-1}$ and $V=0$, where our result is probably
false. Indeed it is well known that in 3D ground states are stable
for $p<7/3$ and unstable for $p\ge 7/3$. In the $p<7/3$ case there
are ground states of arbitrarily small $H^1 $ norm. They are
counterexamples to the asymptotic stability in $H^1 $ of the 0
solution. Then for $p>5/3$ the 0 solution is asymptotically stabile
in a smaller space usually denoted by $\mathhexbox278igma$, which involves also
the $\| x u\| _{L^2_x}$ norm, see in \cite{strauss} the comments
after Theorem 6 p. 55. In $\mathhexbox278igma $ there are no small ground states
for $p\in (5/3,7/3 )$. Presumably one should be able to prove
asymptotic stability of ground states in $\mathhexbox278igma$. To our knowledge
even the following (presumably easier) problem is not solved yet:
the asymptotic stability of 0 in $\mathhexbox278igma$ when $V\neq 0$, $\sigma
_p (-\Delta +V )=\emptyset $ and ${\bf e}ta (|u|^2)= -|u|^{p-1}$ with
$p\in (5/3,7/3 )$. In the literature on asymptotic stability of
ground states like \cite{BP2,BS,zhousigal,cuccagnamizumachi}, the
case of moving solitons is left aside, because in that set up it
appears substantially more complex. We do not treat moving solitons
here either, but it is possible that our approach might help also
with moving solitons. In the step when we perform the Darboux
Theorem, the velocity should freeze and we should reduce to the same
situation considered from section {\rm e}f{section:reformulation} on.
The extra difficulty with moving solitons is that there are more
obstructions to the fact that after Darboux we have a semilinear
NLS. But it would be surprising if this difficulty had a really
deep nature. In any case, the main conceptual problem stemming
from \cite{sigal,BP2,SW3}, which we solve here, is the issue of the
positive semidefiniteness of the critical coefficients. There is a
growing literature on interaction between solitons, see for example
\cite{MM2,HW,M3}, and we expect our result to be relevant.
We do not reference all the literature on asymptotic stability of
ground states, see \cite{cuccagnatarulli} for more. We like to
conclude observing that Sigal \cite{sigal}, Buslaev and Perelman
\cite{BP2} and Soffer and Weinstein \cite{SW3} had identified
with great precision the right mechanism of leaking
of energy away from the discrete modes.
\section{Statement of the main result}
\langlebel{section:statement}
We will assume the following hypotheses.
{\bf e}gin{itemize}
\item[(H1)] ${\bf e}ta (0)=0$, ${\bf e}ta\in C^\infty(\R,\R)$.
\item[(H2)] There exists a $p\in(1,5)$ such that for every
$k\ge 0$ there is a fixed $C_k$ with
$$\left| \frac{d^k}{dv^k}{\bf e}ta(v^2)\right|\le C_k
|v|^{p-k-1} \quad\text{if $|v|\ge 1$}.$$
\item[(H3)] $V(x)$ is smooth and for any multi
index $\alpha $ there are $C_\alpha >0$ and $a_\alpha >0$ such that
$|\partial ^\alpha _x V(x)|\le
C_\alpha e^{-a_\alpha |x|}$.
\item[(H4)]
There exists an open interval $\mathcal{O}$ such that
{\bf e}gin{equation}
\langlebel{eq:B}
\Delta u-Vu-\omega u+{\bf e}ta(|u|^2)u=0\quad\text{for $x\in \R^3$},
\end{equation}
admits a $C^1$-family of ground states $\phi _ {\omega }(x)$ for
$\omega\in\mathcal{O}$.
\item [(H5)]
{\bf e}gin{equation}
\langlebel{eq:1.2}
\frac d {d\omega } \| \phi _ {\omega }\|^2_{L^2(\R^3)}>0
\quad\text{for $\omega\in\mathcal{O}$.}
\end{equation}
\item [(H6)]
Let $L_+=-\Delta +V +\omega -{\bf e}ta (\phi _\omega ^2 )-2{\bf e}ta '(\phi
_\omega ^2) \phi_\omega^2$ be the operator whose domain is $H^2
(\R^3)$. Then $L_+$ has exactly one negative eigenvalue and
does not have kernel.
\item [(H7)] Let $\mathcal{H}_\omega$ be the linearized operator around $e^{it\omega}\phi_\omega$
(see Section {\rm e}f{section:linearization} for the precise
definition). There is a fixed $m\ge 0$ such that
$\mathcal{H}_\omega$ has $m$ positive eigenvalues $\langlembda
_1(\omega )\le \langlembda _2(\omega ) \le ...\le \langlembda _m(\omega )$.
We assume there are fixed integers $m_0=0< m_1<...<m_{l_0}=m$ such that
$\langlembda _j(\omega )= \langlembda _i(\omega )$ exactly for $i$ and $j$
both in $(m_l, m_{l+1}]$ for some $l\le l_0$. In this case $\dim
\ker (\mathcal{H}_\omega -\langlembda _j(\omega ) )=m_{l+1}-m_l$.
We assume there exist $N_j\in \mathbb{N}$ such that
$0<N_j\langlembda _j(\omega )< \omega < (N_j+1)\langlembda _j(\omega )$ with
$N_j\ge 1$. We set $N=N_1$.
\item [(H8)] There is no multi index $\mu \in \mathbb{Z}^{m}$
with $|\mu|:=|\mu_1|+...+|\mu_m|\leq 2N_1+3$ such that $\mu \cdot
\langlembda =\omega$.
\item[(H9)] If $\langlembda _{j_1}<...<\langlembda _{j_k}$ are $k$ distinct
$\langlembda$'s, and $\mu\in {\mathbb Z}^k$ satisfies
$|\mu| \leq 2N_1+3$, then we have
$$
\mu _1\langlembda _{j_1}+\dots +\mu _k\langlembda _{j_k}=0 \iff \mu=0\ .
$$
\item[(H10)] $\mathcal{H}_\omega$ has no other eigenvalues except for $0$ and
the $ \pm \langlembda _j (\omega )$. The points $\pm \omega$ are not
resonances.
\item [(H11)]
The Fermi golden rule Hypothesis (H11) in subsection
{\rm e}f{subsec:FGR}, see \eqref{eq:FGR}, holds.
\end{itemize}
{\bf e}gin{remark}
\langlebel{rem:Prelim FGR} The novelty of this paper with
respect to \cite{cuccagnamizumachi} is that we prove that some
crucial coefficients are of a specific form, see \eqref{eq:FGR}. As
a consequence, see Lemma {\rm e}f{lemma:FGR8}, these coefficients are
positive semidefinite. In the analogue of \eqref{eq:FGR} in
\cite{cuccagnamizumachi}, see Hypothesis 5.2 p.72
\cite{cuccagnamizumachi}, except for the special case $n=1$ of just one
eigenvalue (or of possibly many eigenvalues but all with $N_j=1$), there is no clue on the sign of the term
on the rhs of the key inequality, and the fact that it is positive
is an hypothesis.
\end{remark}
{\bf e}gin{theorem}\langlebel{theorem-1.1}
Let $\omega_1\in\mathcal{O}$ and $\phi_{\omega_1}(x)$
be a ground state of \eqref{NLS}. Let $u(t,x)$ be a solution to
\eqref{NLS}. Assume (H1)--(H10). Then, there exist an $\epsilon_0>0$
and a $C>0$ such that if
$\epsilon:=\inf_{\gamma\in[0,2\pi]}\|u_0-e^{{\rm i} \gamma}\phi_
{\omega _1} \|_{H^1} <\epsilon_0,$ there exist $\omega
_\pm\in\mathcal{O}$, $\theta\in C^1(\R;\R)$ and $h _\pm \in H^1$
with $\| h_\pm\| _{H^1}+|\omega _\pm -\omega_1|\le C \epsilon $
such that
{\bf e}gin{equation}\langlebel{scattering}
\lim_{t\to \pm\infty}\|u(t,\cdot)-e^{{\rm i} \theta(t)}\phi_{\omega
_\pm}-e^{{\rm i} t\Delta }h _\pm\|_{H^1}=0 .
\end{equation}
It is possible to write $u(t,x)=e^{{\rm i} \theta(t)}\phi_{\omega (t)}
+ A(t,x)+\widetilde{u}(t,x)$ with $|A(t,x)|\le C_N(t) \langlenglegle x
\ranglenglegle ^{-N}$ for any $N$, with $\lim _{|t|\to \infty }C_N(t)=0$,
with $\lim _{ t \to \pm \infty } \omega (t)= \omega _\pm$, and such
that for any pair $(r,p)$ which is admissible, by which we mean that
{\bf e}gin{equation}\langlebel{admissiblepair} 2/r+3/p= 3/2\,
, \quad 6\ge p\ge 2\, , \quad
r\ge 2,
\end{equation}
we have
{\bf e}gin{equation}\langlebel{Strichartz} \| \widetilde{u} \|
_{L^r_t( \mathbb{R},W^{1,p}_x)}\le
C\epsilon .
\end{equation}
\end{theorem}
We end the introduction with some notation. Given two functions
$f,g:\mathbb{R}^3\to \mathbb{C}$ we set $\langlenglegle f,g\ranglenglegle = \int
_{\mathbb{R}^3}f(x) g(x) dx$. Given a matrix $A$, we denote by
$A^*$, or by $^tA$, its transpose. Given two vectors $A$ and $B$,
we denote by $A^*B=\sum _j A_jB_j$ their inner product. Sometimes
we omit the summation symbol, and we use the convention on sum over
repeated indexes. Given two functions $f,g:\mathbb{R}^3\to \mathbb{C
}^2$ we set $\langlenglegle f,g\ranglenglegle = \int _{\mathbb{R}^3}f^*(x) g(x)
dx$. For any $k,s\in \mathbb{R}$ and any Banach space $K$ with field
$\mathbb{C}$
\[ H^{ k,s}(\mathbb{R}^3,K)=\{ f:\mathbb{R}^3\to K \text{ s.t.}
\| f\| _{H^{s,k}}:=\| \langlenglegle x \ranglenglegle ^s \| (-\Delta +1)^{k} f
\| _{K}\| _{L^2
}<\infty \},\]
$(-\Delta +1)^{k} f(x)= (2\pi )^{-\frac{3}{2}}
\int e^{ {\rm i} x \cdot \xi }(\xi ^2+1)^k \widehat{f}(\xi ) d\xi $,
$\widehat{f}(\xi )= (2\pi )^{-\frac{3}{2}}\int e^{ -{\rm i} x \cdot \xi
} {f}(x ) dx$. In particular we set
$L^{2,s} =H^{0,s} $, $L^2=L^{2,0} $, $H^k=H^{2,0} $.
Sometimes, to emphasize that these spaces refer to spatial
variables, we will denote them by $W^{k,p}_x$, $L^{ p}_x$, $H^k_x$,
$H^{ k,s}_x$ and $L^{2,s}_x$. For $I$ an interval and $Y_x$ any of
these spaces, we will consider Banach spaces $L^p_t( I, Y_x)$ with
mixed norm $ \| f\| _{L^p_t( I, Y_x)}:= \| \| f\| _{Y_x} \| _{L^p_t(
I )}.$ Given an operator $A$, we will denote by $R_A(z)=(A-z)^{-1}$
its resolvent. We set $\mathbb{N}_0=\mathbb{N}\cup \{0 \}$. We will
consider multi indexes $\mu \in \mathbb{N}_0^n$. For $\mu \in
\mathbb{Z}^n$ with $\mu =(\mu _1,..., \mu _n)$ we set $|\mu |=\sum
_{j=1}^n |\mu _j|.$ For $X$ and $Y$ two Banach space, we will
denote by $B(X,Y)$ the Banach space of bounded linear operators from
$X$ to $Y$ and by $B^{\ell}(X,Y)= B ( \prod _{j=1}^\ell X ,Y)$. We
denote by $a^{\otimes \ell}$ the element $\otimes _{j=1}^\ell a$ of
$\otimes _{j=1}^\ell X$ for some $a\in X$. Given a differential form
$\alpha$, we denote by
$d\alpha$ its exterior differential.
{\bf Acknowledgments} I wish to thank Dario Bambusi for pointing out
a gap in the proof of an earlier version of Theorem {\rm e}f{th:main}.
\section{Linearization and set up}
\langlebel{section:linearization}
Let $U={^t(u,\overline{u})}$. We introduce now energy $E(u)$ and mass $Q(u)$. We set
{\bf e}gin{equation} \langlebel{eq:energyfunctional}{\bf e}gin{aligned}&
E(U)=E_K(U)+E_P(U)\\&
E_K(U)= \int _{\R ^3}
\nabla u \cdot \nabla \overline{u} dx+ \int _{\R ^3}
V u \overline{u} dx \\&
E_P(U)=
\int _{\R ^3}B( u \overline{u}) dx \end{aligned}
\end{equation}
with $B(0)=0$ and $\partial _{\overline{u}}B(|u|^2)={\bf e}ta (|u|^2)u$.
We will consider the matrices {\bf e}gin{equation}
\langlebel{eq:Pauli}{\bf e}gin{aligned} &\sigma _1=
{\bf e}gin{pmatrix}0 &
1 \\
1 & 0
\end{pmatrix} \, ,
\sigma _2={\bf e}gin{pmatrix} 0 &
{\rm i} \\
-{\rm i} & 0
\end{pmatrix} \, ,
\sigma _3={\bf e}gin{pmatrix} 1 & 0\\0 & -1 \end{pmatrix} .
\end{aligned}
\end{equation}
We introduce the mass {\bf e}gin{equation}\langlebel{eq:charge}Q(U)= \int _{\R ^3}u
\overline{u} dx= \frac{1}{2}\langlenglegle U, \sigma _1 U\ranglenglegle .
\end{equation}
Let
{\bf e}gin{equation} \langlebel{eq:function q} {\mathcal P}i _\omega ={\bf e}gin{pmatrix} \phi _\omega
\\ \phi _\omega
\end{pmatrix} , \, q(\omega )=Q({\mathcal P}i _\omega ),
\, e (\omega )=E({\mathcal P}i _\omega ), \, d(\omega )=e (\omega )+\omega
q(\omega ) .
\end{equation}
Often we will denote ${\mathcal P}i _\omega $ simply by ${\mathcal P}i$.
The
\eqref{NLS} can be written as
{\bf e}gin{equation}\langlebel{eq:NLSvectorial} {\rm i} \dot U =
{\bf e}gin{pmatrix} 0 &1
\\ -1 & 0
\end{pmatrix} {\bf e}gin{pmatrix} \partial _{u}E
\\ \partial _{\overline{u}}E
\end{pmatrix} = \sigma _3 \sigma _1 \nabla E (U),
\end{equation}
with $\nabla E (U)$ defined by \eqref{eq:NLSvectorial}.
We have for $\vartheta \in \R$
{\bf e}gin{equation}\langlebel{eq:gaugeInvariance}
E( e^{-{\rm i} \sigma _3\vartheta } U)=E( U) \text{ and }
\nabla E ( e^{-{\rm i} \sigma _3\vartheta }
U)=e^{ {\rm i} \sigma _3\vartheta } \nabla E ( U) .
\end{equation}
Write for $\omega \in \mathcal{O}$
{\bf e}gin{equation}
U= e^{{\rm i} \sigma _3\vartheta } ({\mathcal P}i _\omega + R).\nonumber
\end{equation}
Then
{\bf e}gin{equation}\langlebel{system}{\rm i} \dot U= -\sigma _3
\dot \vartheta e^{{\rm i} \sigma _3\vartheta }
({\mathcal P}i _\omega + R) + {\rm i} \dot \omega e^{{\rm i} \sigma _3\vartheta }
\partial _\omega {\mathcal P}i _\omega + {\rm i} e^{{\rm i} \sigma _3\vartheta }
\dot R
\end{equation}
and
{\bf e}gin{equation}
{\bf e}gin{aligned} &
-\sigma _3
\dot \vartheta e^{{\rm i} \sigma _3\vartheta }
({\mathcal P}i _\omega + R) + {\rm i} \dot \omega e^{{\rm i} \sigma _3\vartheta }
\partial _\omega {\mathcal P}i _\omega + {\rm i} e^{{\rm i} \sigma _3\vartheta }
\dot R =
\sigma _3 \sigma _1 e^{-{\rm i} \sigma _3 \vartheta }\nabla E ({\mathcal P}i _\omega
+R).\end{aligned} \nonumber
\end{equation}
Equivalently we get
{\bf e}gin{equation}\langlebel{system1}
{\bf e}gin{aligned} &
-\sigma _3
(\dot \vartheta -\omega )
({\mathcal P}i _\omega + R) + {\rm i} \dot \omega
\partial _\omega {\mathcal P}i _\omega + {\rm i}
\dot R =\\& = \sigma _3 \sigma _1 \left ( \nabla E ({\mathcal P}i _\omega
+R)+\omega \nabla Q ({\mathcal P}i _\omega
+R)\right ) .\end{aligned}
\end{equation}
We have $\frac{d}{dt}\sigma _3 \sigma _1 \left ( \nabla E ({\mathcal P}i _\omega
+tR)+\omega \nabla Q ({\mathcal P}i _\omega
+tR)\right ) _{\mid _{t=0}}= \mathcal{H}_\omega R$ with
{\bf e}gin{equation} \langlebel{eq:linearization} {\bf e}gin{aligned} &
\mathcal{H}_\omega = \sigma_3(-\Delta+V+\omega)
+\sigma_3
\left[{\bf e}ta (\phi ^2_{\omega }) +{\bf e}ta ^\prime (\phi ^2_{\omega
})\phi ^2_{\omega } \right] -{\rm i} \sigma _2 {\bf e}ta ^\prime (\phi ^2
_{\omega })\phi ^2 _{\omega } .\end{aligned}
\end{equation}
The essential spectrum of $\mathcal{H}_\omega$ consists of $(-\infty
, -\omega ]\cup [ \omega,+\infty )$. It is well known (see
\cite{W2}) that by (H5) $0$ is an isolated eigenvalue of
$\mathcal{H}_\omega$ with $\dim N_g(\mathcal{H}_\omega)=2$ and
{\bf e}gin{equation}\langlebel{eq:Kernel} \mathcal{H}_\omega\sigma_3{\mathcal P}i_\omega=0,\quad
\mathcal{H}_\omega\partial_\omega{\mathcal P}i_\omega =-{\mathcal P}i_\omega.
\end{equation}
Since $\mathcal{H}_\omega^*=\sigma_3\mathcal{H}_\omega\sigma_3$, we
have $N_g(\mathcal{H}_\omega^*)=\operatorname{span}\{{\mathcal P}i_\omega,
\sigma_3\partial_\omega{\mathcal P}i_\omega\}$. We consider
eigenfunctions $\xi _j(\omega)$ with eigenvalue $\langlembda
_j(\omega)$:
$$
\mathcal{H}_\omega\xi _j(\omega)=\langlembda _j(\omega)\xi
_j(\omega),\quad \mathcal{H}_\omega\sigma_1\xi _j(\omega)=-\langlembda
_j(\omega)\sigma_1\xi _j(\omega) .$$ They can be normalized so that
$\langlenglegle \sigma_3 \xi _j(\omega),\overline{\xi} _\ell(\omega)
\ranglenglegle =\delta _{j\ell }$, this is based on Proposition 2.4
\cite{Cu3}. Furthermore, they can be chosen to be real, that is with
real entries, so $\xi _j=\overline{ \xi} _j$ for all $j$.
Both $\phi_\omega$ and $\xi _j(\omega,x)$ are smooth in
$\omega\in\mathcal{O}$ and $x\in\R^3$ and satisfy
$$\sup_{\omega\in\mathcal{K},x\in\R^3} e^{a|x|}( |\partial ^\alpha _x\phi_\omega(x)|+
\sum _{j=1}^{m}|\partial ^\alpha _x \xi _j(\omega,x)| <\infty$$ for
every $a\in(0,\inf_{\omega\in\mathcal{K}}\sqrt{\omega-\langlembda
_m(\omega)})$ and every compact subset $\mathcal{K}$ of
$\mathcal{O}$.
For $\omega\in\mathcal{O}$, we have the
$\mathcal{H}_\omega$-invariant Jordan block decomposition
{\bf e}gin{align} \langlebel{eq:spectraldecomp} &
L^2(\R^3,\mathbb{C}^2)=N_g(\mathcal{H}_\omega)\oplus \big (\oplus _{\pm}
\oplus _{j=1}^m \ker (\mathcal{H}_\omega\mp \langlembda _j(\omega))
\big)\oplus L_c^2(\mathcal{H}_\omega),
\end{align}
$L_c^2(\mathcal{H}_\omega):=
\left\{N_g(\mathcal{H}_\omega^\ast)\oplus \big (\oplus _{\langlembda \in
\sigma _d\backslash \{ 0\}} \ker (\mathcal{H}_\omega ^*- \langlembda
) \big)\right\} ^\perp $ with $\sigma _d =\sigma _d
(\mathcal{H}_\omega)$. We also set $L_d^2(\mathcal{H}_\omega):=
N_g(\mathcal{H}_\omega)\oplus \big (\oplus _{\langlembda \in \sigma
_d\backslash \{ 0\}} \ker (\mathcal{H}_\omega - \langlembda (\omega))
\big ) .$ By $P_c(\mathcal{H}_{\omega})$ (resp.
$P_d(\mathcal{H}_{\omega})$), or simply by $P_c( {\omega})$ (resp.
$P_d( {\omega})$), we denote the projection on
$L_c^2(\mathcal{H}_\omega)$ (resp. $L_d^2(\mathcal{H}_\omega)$)
associated to the above direct sum. The space
$L^2_c(\mathcal{H}_{\omega})$ depends continuously on $\omega$.
We specify the
ansatz imposing that
{\bf e}gin{equation}\langlebel{eq:anzatz}
U= e^{{\rm i} \sigma _3\vartheta } ({\mathcal P}i _\omega + R)
\text{ with $\omega \in \mathcal{O}$, $\vartheta \in \R$ and $R\in N^{\perp}_g (\mathcal{H}_\omega ^*)$.}
\end{equation}
We consider coordinates
{\bf e}gin{equation}\langlebel{eq:coordinate}U= e^{{\rm i} \sigma _3\vartheta}
\left( {\mathcal P}i
_\omega +z \cdot \xi (\omega )+ \overline{z }\cdot \sigma_1\xi
(\omega )+P_c(\mathcal{H}_{\omega}) f\right ) \end{equation}
where $\omega \in \mathcal{O}$, $z\in \mathbb{ C} $ and $f\in
L^2_c(\mathcal{H}_{\omega _0})$ where we fixed $\omega _0\in
\mathcal{O}$ such that $q(\omega _0)=\| u_0\| _2^2$.
\eqref{eq:coordinate} is a system of coordinates if we use the
notation $ \mathcal{O}$ to denote a small neighborhood of $\omega
_1$ in Theorem {\rm e}f{theorem-1.1}. Indeed by Lemma {\rm e}f{lem:regular}
below, then the map $P_c(\mathcal{H}_{\omega})$ is an isomorphism
from $L^2_c(\mathcal{H}_{\omega _0})$ to $L^2_c(\mathcal{H}_{\omega
})$. In particular
{\bf e}gin{align}
\langlebel{eq:decomp2}
& R =\sum _{j=1}^{m}z_j \xi _j(\omega )+
\sum _{j=1}^{m}\overline{z }_j\sigma_1\xi _j(\omega )
+P_c(\mathcal{H}_{\omega } )f ,\\
\langlebel{eq:decomp3} & R \in N_g^\perp(\mathcal{H}_{\omega
}^*)\quad\text{and}\quad f \in L_c^2(\mathcal{H}_{\omega _0}).
\end{align}
We also set $z\cdot \xi =\sum _jz_j \xi _j$ and $\overline{z}\cdot
\sigma_1\xi =\sum _j\overline{z}_j \sigma_1\xi _j$. In the sequel we
set
{\bf e}gin{equation} \langlebel{eq:partialR} \partial _\omega R=
\sum _{j=1}^{m}z_j \partial _\omega \xi _j(\omega )+ \sum
_{j=1}^{m}\overline{z }_j\sigma_1\partial _\omega \xi _j(\omega
)+\partial _\omega P_c(\mathcal{H}_{\omega } )f.
\end{equation}
We have:
{\bf e}gin{lemma}
\langlebel{lem:regular} We have $P_c(\mathcal{H}_{\omega } )^*=
P_c(\mathcal{H}_{\omega } ^*)$ for all $ \omega
\in \mathcal{O}$.
For all $ \omega , \widetilde{\omega}\in \mathcal{O}$ the following operators
are bounded from $H^{-k,-s}$ to $H^{ k', s'}$ for all exponents:
{\bf e}gin{equation} \langlebel{eq:list op1} {\bf e}gin{aligned} &\partial ^\ell _\omega
P_c(\mathcal{H}_{\omega } ) \text{ for any $\ell >0$}\, ; \\&
P_c(\mathcal{H}_{\omega } )
-P_c(\mathcal{H}_{\omega } ^*) \, ; \, P_c(\mathcal{H}_{\omega } )
-P_c(\mathcal{H}_{\widetilde{\omega}} ).
\end{aligned}
\end{equation}
Consider $\omega _1$ of Theorem
{\rm e}f{theorem-1.1}. There exists $\varepsilon _1 >0$
such that $(\omega _1-\varepsilon _1, \omega _1+\varepsilon _1)
\subset \mathcal{O}$, and for any pair $\widetilde{\omega} , \omega
\in (\omega _1-\varepsilon _1, \omega _1+\varepsilon _1)$
we have {\bf e}gin{equation} \langlebel{isomorphism}
\text{$P_c( \omega ) P_c( \widetilde{\omega}
): L^2_c(\mathcal{H}_{\widetilde{\omega} } )\to
L^2_c(\mathcal{H}_{\omega } ) $ is an isomorphism}\end{equation}
Furthermore,
the following operator
is bounded from $H^{-k,-s}$ to $H^{ k', s'}$ for all exponents:
{\bf e}gin{equation} \langlebel{eq:list op2} {\bf e}gin{aligned} &
P_c(\mathcal{H}_{\widetilde{\omega} } )
\left ( 1- (P_c(\mathcal{H}_{\omega } )
P_c(\mathcal{H}_{\widetilde{\omega}
} ))^{-1} \right ) P_c(\mathcal{H}_{\omega } )
\end{aligned}
\end{equation}
where in the last line
and $(P_c( \omega ) P_c( \widetilde{\omega} ))^{-1}$ is the
inverse of the operator in \eqref{isomorphism}. Finally, for
$\epsilon _0 $ in Theorem {\rm e}f{theorem-1.1} sufficiently small, we
have $|\omega _0 -\omega _1|<\varepsilon _1$, with $\omega _0$
defined under \eqref{eq:coordinate}.
\end{lemma} \proof The first statement follows from the definition.
We have $P_c(\mathcal{H}_{\omega})=1-P_d(\mathcal{H}_{\omega})$
where $P_d(\mathcal{H}_{\omega})$ are finite linear combinations of
rank 1 operators $\Psi \langlenglegle \Psi ', \quad \ranglenglegle $ with
$\Psi, \Psi ' \in H^{K,S}$ for any $(K,S)$. This implies the statement for
the second line of \eqref{eq:list op1}. $\partial ^\ell _\omega
P_c(\mathcal{H}_{\omega } ) $ is well defined by the fact that in
(H4) the dependence on $\omega$ is in fact smooth (this is seen
iterating the argument in Theorem 18 \cite{shatahstrauss}). Assuming
\eqref{isomorphism}, and for $P_c=P_c( \omega )$, $\widetilde{P}_c
=P_c( \widetilde{\omega } )$, $P_d=P_d( \omega )$, $\widetilde{P}_d
=P_d( \widetilde{\omega } )$, we have {\bf e}gin{equation}
{\bf e}gin{aligned} & \widetilde{P}_c \left ( 1- (P_c \widetilde{P}_c
)^{-1} \right ) P_c = (\widetilde{P}_c
-P_c)P_c-(\widetilde{P}_c-P_c)(P_c \widetilde{P}_c)^{-1}P_c ,
\end{aligned} \nonumber
\end{equation}
which yields \eqref{eq:list op2}.
We prove \eqref{isomorphism}.
First of all the map is 1--1. Indeed if $P_c \widetilde{P}_cf=0$,
then we have $f= P_df= (P_d-\widetilde{P}_d)f$. Then $\| f\| _{2}\le
C |\omega -\widetilde{\omega }| \| f\| _{2}$ for some fixed $C>0$.
This, for $2C \varepsilon _1<1$, is compatible only with $f=0$. If
we knew that the map in \eqref{isomorphism} is onto, then
\eqref{isomorphism} would hold by the open mapping theorem. So suppose
the map is not onto. Let $\mathcal{R}(P_c \widetilde{P}_c )$ be the
range of $P_c \widetilde{P}_c $. If there exists $\widetilde{g}\in
L^2_c(\mathcal{H}^*_{\omega} )$ such that $\widetilde{g}\neq 0$ and
$\langlenglegle
\widetilde{g} ,P_c \widetilde{P}_c f\ranglenglegle =0$ for all $f \in
L^2_c(\mathcal{H}_{\widetilde{\omega } } )$, then since
$\widetilde{g}=\sigma _3 g$ for a $g \in L^2_c(\mathcal{H}_{\omega }
)$, we get $0= \langlenglegle \widetilde{g} ,P_c \widetilde{P}_c f\ranglenglegle =
\langlenglegle \widetilde{P}_c P_c g ,\sigma _3 f\ranglenglegle$ for all $f \in
L^2_c(\mathcal{H}_{\widetilde{\omega }})$. This implies
$\widetilde{P}_c P_c g=0$, and since $g \in
L^2_c(\mathcal{H}_{\omega } )$, by the 1--1 argument this implies
$g=0$. So if the map in \eqref{isomorphism} is not onto, then
$\mathcal{R}(P_c \widetilde{P}_c )$ is dense in $
L^2_c(\mathcal{H}_{\omega } )$. We will see in a moment that
$\mathcal{R}(P_c \widetilde{P}_c )$ is closed in $
L^2_c(\mathcal{H}_{\omega } )$, hence concluding that the map
in \eqref{isomorphism} is also onto. To see that $\mathcal{R}(P_c
\widetilde{P}_c )$ is closed in $ L^2_c(\mathcal{H}_{\omega } )$,
let $\widetilde{f}_n\in L^2_c(\mathcal{H}_{\widetilde{\omega} } )$
be a sequence such that $\| P_c\widetilde{f}_n -f\| _2\to 0 $ for
some $f\in L^2_c(\mathcal{H}_{\omega } )$. By $\| \widetilde{f}_n \|
_2\le \| P_c \widetilde{f}_n \| _2 +C|\omega -\widetilde{\omega} |
\| \widetilde{f}_n \| _2$ for some fixed $C$, it follows that for
$2C\varepsilon _{1}<1$ the sequence $\| \widetilde{f}_n \| _2$ is
bounded. Then by weak compactness there is a subsequence
$\widetilde{f} _{n_j}$ weakly convergent to a $\widetilde{f}\in
L^2_c(\mathcal{H}_{\widetilde{\omega} } )$. Since $P_c
\widetilde{P}_c$ is also weakly continuous, $P_c
\widetilde{P}_c\widetilde{f}=f$.
\noindent The final statement is elementary by \eqref{eq:1.2}.\qed
Using the system of coordinates \eqref{eq:coordinate} we rewrite
system \eqref{system1} as
{\bf e}gin{equation} \langlebel{eq:system}
{\bf e}gin{aligned} &
-\sigma _3
(\dot \vartheta -\omega )
({\mathcal P}i _\omega + z\cdot \xi +
{\overline{z}}\cdot \sigma _1\xi + P_c(\mathcal{ H}_\omega ) f)+ \\& + {\rm i} \dot \omega
( \partial _\omega {\mathcal P}i _\omega +z\cdot \partial _\omega \xi +
\overline{z} \cdot \sigma _1\partial _\omega \xi +\partial _\omega
P_c(\mathcal{H}_\omega ) f) +{\rm i}
\dot z\cdot \xi +\\& + {\rm i}
\dot {\overline{z}}\cdot \sigma _1\xi +{\rm i}
P_c(\mathcal{ H}_\omega ) \dot f = \sigma _3
\sigma _1
\nabla E ( {\mathcal P}i _\omega + z\cdot \xi +
{\overline{z}}\cdot \sigma _1\xi +
P_c(\mathcal{ H}_\omega ) f) \\& +\omega \sigma _3
\sigma _1 \nabla Q ( {\mathcal P}i _\omega + z\cdot \xi +
{\overline{z}}\cdot \sigma _1\xi +
P_c(\mathcal{ H}_\omega ) f) ,\end{aligned}
\end{equation}
where $z\cdot \xi =\sum _j z_j\xi _j$ and
$\overline{z}\cdot \sigma _1\xi =\sum _j \overline{z}_j\sigma _1\xi _j$, where $\xi =\xi (\omega )$. Notice for future reference, that fixed any $\omega _0$
we also have
{\bf e}gin{equation} \langlebel{system2}
{\bf e}gin{aligned} &
-\sigma _3
(\dot \vartheta -\omega _{0})
({\mathcal P}i _\omega + z\cdot \xi +
{\overline{z}}\cdot \sigma _1\xi + P_c(\mathcal{ H}_\omega ) f)+ \\& + {\rm i} \dot \omega
( \partial _\omega {\mathcal P}i _\omega +z\cdot \partial _\omega \xi +
\overline{z} \cdot \sigma _1\partial _\omega \xi +\partial _\omega
P_c(\mathcal{H}_\omega ) f) +{\rm i}
\dot z\cdot \xi +\\& + {\rm i}
\dot {\overline{z}}\cdot \sigma _1\xi +{\rm i}
P_c(\mathcal{ H}_\omega ) \dot f = \sigma _3
\sigma _1
\nabla E ( {\mathcal P}i _\omega + z\cdot \xi +
{\overline{z}}\cdot \sigma _1\xi +
P_c(\mathcal{ H}_\omega ) f) \\& +\omega _{0}\sigma _3
\sigma _1 \nabla Q ( {\mathcal P}i _\omega + z\cdot \xi +
{\overline{z}}\cdot \sigma _1\xi +
P_c(\mathcal{ H}_\omega ) f) ,\end{aligned}
\end{equation}
where \eqref{system2} is the same of \eqref{eq:system} except for $\omega _0$
replacing $\omega $ in the first spot where they appear in the first and last line.
We end this section with a short heuristic description about why
the crucial property needed to prove asymptotic stability of ground
states, is the hamiltonian nature of the \eqref{NLS}. In terms of
\eqref{eq:coordinate}, and oversimplifying, \eqref{system} splits as
{\bf e}gin{equation} {\bf e}gin{aligned} &
{\rm i} \dot z -\langlembda z = \sum _{\mu \nu}a _{\mu \nu}
z^{\mu}\overline{z}^{\nu}+ \sum _{\mu \nu} z^{\mu}\overline{z}^{\nu}
\langlenglegle f(t,x) ,\overline{ G} _{\mu \nu}(x,\omega ) \ranglenglegle _{L^2_x}+\cdots
\\& {\rm i} \dot f -\mathcal{H}_\omega f = \sum _{\mu \nu}
z^{\mu}\overline{z}^{\nu} M _{\mu \nu}(x,\omega )+ \cdots
.\end{aligned} \nonumber
\end{equation}
Here we are assuming $m=1$. We focus on positive times $t\ge 0$
only. After changes of variables, see \cite{cuccagnamizumachi}, we
obtain
{\bf e}gin{equation} \langlebel{final} {\bf e}gin{aligned} &
{\rm i} \dot z -\langlembda z = P(|z|^2)z+ \overline{z}^N \langlenglegle f(t,x) ,\overline{ G} _{\mu \nu}(x,\omega ) \ranglenglegle _ {L^2_x}+\cdots
\\& {\rm i} \dot f -\mathcal{H}_\omega f = z^{N+1}
M (x,\omega )+ \cdots
.\end{aligned}
\end{equation}
The next step is to write, for $g$ an error term,
{\bf e}gin{equation} {\bf e}gin{aligned} & f= -z^{N+1}R_{\mathcal{H}_\omega}^{+}((N +1)\, \langlembda )M + g\\&
\\& {\rm i} \dot z-\langlembda z= P(|z|^2)z- |z|^{2N }z\langlenglegle R_{\mathcal{H}_\omega}^{+}((N +1)\,
\langlembda )M , \overline{G} \ranglenglegle _{L^2_x} +...\end{aligned}\nonumber
\end{equation}
Then, ignoring error terms, by $$R_{\mathcal{H}_\omega}^{+}
((N +1)\, \langlembda
) = P.V. \dfrac{1}{{\mathcal{H}_\omega}-(N +1)\,\langlembda}+{\rm i} \pi
\delta ({\mathcal{H}_\omega}-(N +1)\, \langlembda )$$ the equation for
$z$ has solutions such that
$$ \frac{d}{dt}|z|^2=-\Gamma |z|^{2N+2 } ,
|z(t)|=\dfrac{|z(0)|}{ (|z(0)|^{2N} \, N\, \Gamma \, t+
1)^{\frac{1}{2N}}}$$ with (the Fourier transforms are associated
to $\mathcal{H}_\omega$)
$$\Gamma =
2\pi \langlenglegle \delta (\mathcal{H}_\omega-(N +1)\,\langlembda )M, G
\ranglenglegle = \int _{ |\xi| = \sqrt{(N +1)\, \langlembda -\omega} }\frac{\widehat{M}(\xi
) \cdot \overline{\widehat{G} (\xi )}d\sigma}{\sqrt{(N +1)\, \langlembda -\omega}} .$$ If $\Gamma
>0$, we see that $z(t)$ decays. Notice that $\Gamma
<0$ is incompatible with orbital stability, which requires $z$ to
remain small, see Corollary 4.6 \cite{cuccagnamizumachi}. The latter
indirect argument to prove positive semidefiniteness of $\Gamma$,
does not seem to work when in \eqref{system} there are further
discrete components. So we need another way to prove that $\Gamma
\ge 0$. This is provided by the hamiltonian structure. Indeed, if
\eqref{final} is of the form
{\bf e}gin{equation} \langlebel{ham} {\bf e}gin{aligned} &
{\rm i} \dot z = \partial _{\overline{z}} K \, , \quad {\rm i} \dot f = \nabla
_{\overline{f}} K,
\end{aligned}
\end{equation}
then by Schwartz lemma $(N+1)!M=\partial ^{N+1}_z\nabla _{\overline{f}}K
=\overline{\partial ^{N }_{\overline{z}}\nabla _{f}\partial _{\overline{z}}K}=N! {G}$ at $z=0$ and $f=0$.
So $\Gamma$ is positive semidefinite. This very simple
idea on system \eqref{ham}, inspired \cite{bambusicuccagna}
and inspires the present
paper.
\section{Gradient of the coordinates}
\langlebel{section:modulation} We focus on ansatz \eqref{eq:anzatz}
and on the coordinates \eqref{eq:coordinate}. In particular we
compute the gradient of the coordinates. Here we recall that given
a scalar valued function $F$, the relation between exterior
differential and gradient is $dF=\langlenglegle \nabla F, \quad \ranglenglegle $.
Consider the following two functions
{\bf e}gin{equation} \mathcal{F}(U,\omega , \vartheta ):=\langlenglegle
e^{-{\rm i} \sigma _3\vartheta }U-{\mathcal P}i _\omega , {\mathcal P}i _\omega\ranglenglegle
\text{ and } \mathcal{G}(U,\omega , \vartheta ):=\langlenglegle e^{-{\rm i}
\sigma _3\vartheta }U ,\sigma _3\partial _\omega {\mathcal P}i
_\omega\ranglenglegle . \nonumber
\end{equation}
Then ansatz \eqref{eq:anzatz} is obtained by choosing $(\omega ,
\vartheta )$ s.t. $R:=e^{-{\rm i} \sigma _3\vartheta }U-{\mathcal P}i _\omega$
satisfies $R\in N_g^\perp (\mathcal{ H} _\omega ^*)$ by means of the
implicit function theorem. In particular:
{\bf e}gin{equation}
{\bf e}gin{aligned} & \mathcal{F}_{\vartheta} =-{\rm i} \langlenglegle \sigma _3e^{-{\rm i} \sigma _3\vartheta
}U, {\mathcal P}i _\omega \ranglenglegle =-{\rm i} \langlenglegle \sigma _3R, {\mathcal P}i _\omega
\ranglenglegle ;\\& \mathcal{F}_{\omega} = -2 q'(\omega)+\langlenglegle e^{-{\rm i}
\sigma _3\vartheta }U, \partial _\omega {\mathcal P}i _\omega \ranglenglegle = -
q'(\omega)+\langlenglegle R, \partial _\omega {\mathcal P}i _\omega \ranglenglegle
; \\& \nabla _U \mathcal{F}=e^{-{\rm i} \sigma _3\vartheta
} {\mathcal P}i _\omega \, , \, \nabla _U \mathcal{G}=e^{-{\rm i} \sigma
_3\vartheta } \sigma _3\partial _\omega {\mathcal P}i _\omega ;\\&
\mathcal{G}_{\vartheta} =-{\rm i} \langlenglegle
e^{-{\rm i} \sigma _3\vartheta }U,\partial _\omega {\mathcal P}i _\omega \ranglenglegle
=-{\rm i} ( q'(\omega)+ \langlenglegle R, \partial _\omega {\mathcal P}i _\omega
\ranglenglegle ) ;\\& \mathcal{G}_{\omega} =\langlenglegle e^{-{\rm i} \sigma
_3\vartheta }U ,\sigma _3\partial _\omega ^2 {\mathcal P}i _\omega\ranglenglegle
=\langlenglegle R ,\sigma _3\partial _\omega ^2 {\mathcal P}i _\omega\ranglenglegle .
\end{aligned}\nonumber
\end{equation}
By $\mathcal{F}(U,\omega (U), \vartheta (U)=\mathcal{G}(U,\omega (U), \vartheta (U)=0$ we get $ \mathcal{W}_{\omega} \nabla \omega +\mathcal{W}_{\vartheta} \nabla \vartheta =-\nabla _UW$ for $\mathcal{W}=\mathcal{F},\mathcal{G}.$
By the above formulas, if we set
{\bf e}gin{equation}\langlebel{eq:matrixA} \mathcal{A}={\bf e}gin{pmatrix} - q'(\omega)+\langlenglegle R, \partial _\omega {\mathcal P}i _\omega \ranglenglegle &
-{\rm i} \langlenglegle \sigma _3R, {\mathcal P}i _\omega \ranglenglegle \\ \langlenglegle R ,\sigma
_3\partial _\omega ^2 {\mathcal P}i _\omega\ranglenglegle & -{\rm i} ( q'(\omega)+
\langlenglegle R, \partial _\omega {\mathcal P}i _\omega \ranglenglegle ) \end{pmatrix}
\end{equation}
we have
{\bf e}gin{equation}\langlebel{eq:ApplmatrixA}
\mathcal{A}
{\bf e}gin{pmatrix} \nabla \omega \\ \nabla \vartheta \end{pmatrix}
={\bf e}gin{pmatrix} -e^{-{\rm i} \sigma _3\vartheta } {\mathcal P}i _\omega \\
-e^{-{\rm i} \sigma _3\vartheta } \sigma _3\partial _\omega{\mathcal P}i _\omega
\end{pmatrix} .
\end{equation}
So
{\bf e}gin{equation}\langlebel{eq:GradModulation}
{\bf e}gin{aligned} &
\nabla \omega =\frac{(q'(\omega)+ \langlenglegle R, \partial _\omega {\mathcal P}i
_\omega \ranglenglegle )e^{-{\rm i} \sigma _3\vartheta } {\mathcal P}i _\omega
-\langlenglegle \sigma _3R, {\mathcal P}i _\omega \ranglenglegle e^{-{\rm i} \sigma _3\vartheta
} \sigma _3\partial _\omega{\mathcal P}i _\omega}{(q'(\omega))^2 -\langlenglegle R,
\partial _\omega {\mathcal P}i _\omega \ranglenglegle ^2 + \langlenglegle \sigma _3R, {\mathcal P}i
_\omega \ranglenglegle \langlenglegle R ,\sigma _3\partial _\omega ^2 {\mathcal P}i
_\omega\ranglenglegle }
\\ & \nabla \vartheta =\frac{\langlenglegle R ,\sigma _3\partial _\omega ^2 {\mathcal P}i
_\omega\ranglenglegle e^{-{\rm i} \sigma _3\vartheta } {\mathcal P}i _\omega
+(q'(\omega)- \langlenglegle R, \partial _\omega {\mathcal P}i _\omega \ranglenglegle
)e^{-{\rm i} \sigma _3\vartheta } \sigma _3\partial _\omega{\mathcal P}i
_\omega} {{\rm i} \left [ q'(\omega))^2 -\langlenglegle R,
\partial _\omega {\mathcal P}i _\omega \ranglenglegle ^2 + \langlenglegle \sigma _3R, {\mathcal P}i
_\omega \ranglenglegle \langlenglegle R ,\sigma _3\partial _\omega ^2 {\mathcal P}i
_\omega\ranglenglegle\right ]} \, .
\end{aligned}
\end{equation}
Notice that along with the decomposition \eqref{eq:spectraldecomp}
we have {\bf e}gin{align} \langlebel{eq:dualspectraldecomp} &
L^2(\R^3,\mathbb{C}^2)=N_g(\mathcal{H}_\omega ^*)\oplus \big (\oplus
_{\langlembda \in \sigma _d\backslash \{ 0\}} \ker (\mathcal{H}_\omega
^*- \langlembda (\omega)) \big)\oplus L_c^2(\mathcal{H}_\omega ^*),
\end{align}
$L_c^2(\mathcal{H}_\omega ^*):= \left\{N_g(\mathcal{H}_\omega
)\oplus\big (\oplus _{\langlembda \in \sigma _d\backslash \{ 0\}} \ker
(\mathcal{H}_\omega - \langlembda (\omega)) \big) \right\} ^\perp.$ We
also set $L_d^2(\mathcal{H}_\omega ^*):= N_g(\mathcal{H}_\omega
^*)\oplus \big (\oplus _{\langlembda \in \sigma _d\backslash \{ 0\}}
\ker (\mathcal{H}_\omega ^* - \langlembda (\omega)) \big ) .$ Notice
that $N_g(\mathcal{H}_\omega ^*) =\sigma _3N_g(\mathcal{H}_\omega
)$, $\ker (\mathcal{H}_\omega ^*- \langlembda ) =\sigma _3\ker
(\mathcal{H}_\omega - \langlembda ) $, $L_c^2(\mathcal{H}_\omega
^*)=\sigma _3L_c^2(\mathcal{H}_\omega )$ and
$L_d^2(\mathcal{H}_\omega ^*)=\sigma _3L_d^2(\mathcal{H}_\omega )$,
so that \eqref{eq:dualspectraldecomp} is obtained applying $\sigma
_3 $ to decomposition \eqref{eq:spectraldecomp}. We can decompose
gradients as
{\bf e}gin{equation}\langlebel{eq:gradient}{\bf e}gin{aligned} &
\nabla F(U)=e^{-{\rm i} \sigma _3\vartheta } \big [ P_{N_g(\mathcal{H}^*_\omega)}
+ \\& \sum _j (P _{
\ker (\mathcal{H}^*_\omega-\langlembda _j)}
+
P _{
\ker (\mathcal{H}^*_\omega +\langlembda _j)} )
+
P _c (\mathcal{H}_\omega ^*)
\big ] e^{ {\rm i} \sigma _3\vartheta }\nabla F (U)=\\&
\frac{\langlenglegle \nabla F(U) ,
e^{ {\rm i} \sigma _3\vartheta }\partial _\omega {\mathcal P}i \ranglenglegle }
{q'(\omega )}
e^{- {\rm i} \sigma _3\vartheta } {\mathcal P}i +
\frac{\langlenglegle \nabla F (U), e^{ {\rm i} \sigma _3\vartheta }
\sigma _3 {\mathcal P}i \ranglenglegle }
{q'(\omega )}
e^{- {\rm i} \sigma _3\vartheta }\sigma _3\partial _\omega {\mathcal P}i
\\& +
\sum _j\langlenglegle \nabla F(U) ,
e^{ {\rm i} \sigma _3\vartheta } \xi _j \ranglenglegle e^{ -{\rm i} \sigma _3\vartheta }
\sigma _3\xi _j +\sum _j \langlenglegle \nabla F(U) ,
e^{ {\rm i} \sigma _3\vartheta }\sigma _1 \xi _j \ranglenglegle
e^{ -{\rm i} \sigma _3\vartheta }\sigma _1\sigma _3\xi _j \\& + e^{ -{\rm i} \sigma _3\vartheta }
P_c(\mathcal{H}_\omega ^* )e^{ {\rm i} \sigma _3\vartheta }\nabla F(U).
\end{aligned}
\end{equation}
Using coordinates \eqref{eq:coordinate} and
notation\eqref{eq:partialR}, at $U$ we have the following formulas
for the vectorfields
{\bf e}gin{equation}\langlebel{eq:vectorfields} {\bf e}gin{aligned} &
\frac \partial {\partial {\omega}} =
e^{ {\rm i} \sigma _3\vartheta } \partial _\omega ( {\mathcal P}i +R)
\, ,\, \frac \partial {\partial {\vartheta}} ={\rm i}
e^{ {\rm i} \sigma _3\vartheta } \sigma _3 ( {\mathcal P}i +R)
,\\& \frac \partial {\partial {z_j}} =
e^{ {\rm i} \sigma _3\vartheta } \xi _j \, ,\,
\frac \partial {\partial {\overline{z}_j}} =
e^{ {\rm i} \sigma _3\vartheta }\sigma _1 \xi _j .\end{aligned}
\end{equation}
Hence, by $\partial _{\omega}F =dF(\frac \partial {\partial
{\omega}})=\langlenglegle \nabla F, \frac \partial {\partial {\omega}}
\ranglenglegle $ etc., we have
{\bf e}gin{equation}\langlebel{eq:derivativeZ} {\bf e}gin{aligned} &
\partial _{\omega}F =\langlenglegle \nabla F ,
e^{ {\rm i} \sigma _3\vartheta } \partial _\omega ( {\mathcal P}i +R) \ranglenglegle
\, ,\, \partial _{\vartheta }F ={\rm i} \langlenglegle \nabla F ,
e^{ {\rm i} \sigma _3\vartheta } \sigma _3 ( {\mathcal P}i +R) \ranglenglegle
,\\&
\partial _{z_j}F =
\langlenglegle \nabla F ,
e^{ {\rm i} \sigma _3\vartheta } \xi _j \ranglenglegle \, ,\,
\partial _{\overline{z}_j}F = \langlenglegle \nabla F ,
e^{ {\rm i} \sigma _3\vartheta }\sigma _1 \xi _j\ranglenglegle .\end{aligned}
\end{equation}
{\bf e}gin{lemma} \langlebel{lem:gradient z} We have the following
formulas:
{\bf e}gin{eqnarray} \langlebel{ZOmegaTheta}&
\nabla z_j = - \langlenglegle \sigma _3 \xi _j, \partial _\omega R \ranglenglegle
\nabla \omega - {\rm i} \langlenglegle \sigma _3 \xi _j, \sigma _3 R \ranglenglegle
\nabla \vartheta + e^{-{\rm i} \sigma _3\vartheta }
\sigma
_3\xi _j\\ \langlebel{barZOmegaTheta}&\nabla \overline{z}_j = -
\langlenglegle \sigma _1\sigma _3 \xi _j,
\partial _\omega R \ranglenglegle \nabla \omega-
{\rm i} \langlenglegle \sigma _1\sigma _3 \xi _j, \sigma _3 R \ranglenglegle
\nabla \vartheta + e^{-{\rm i} \sigma _3\vartheta }
\sigma _1\sigma
_3\xi _j.\end{eqnarray}
\end{lemma}
\proof Equalities $ \frac{\partial z_j}{\partial z_\ell } =\delta _{j\ell}$,
$ \frac{\partial z_j}{\partial \overline{z}_\ell } = \frac{\partial z_j}{\partial \omega }= \frac{\partial z_j}{\partial \vartheta } =0$
and $\nabla _f z_j=0$ are equivalent to
{\bf e}gin{equation}\langlebel{eq:indentitiesGradZ} {\bf e}gin{aligned} & \langlenglegle \nabla z_j,
e^{ {\rm i} \sigma _3\vartheta } \xi _\ell \ranglenglegle =\delta _{j\ell},
\langlenglegle \nabla z_j, e^{ {\rm i} \sigma _3\vartheta } \sigma _1\xi _\ell
\ranglenglegle \equiv 0 =\langlenglegle \nabla z_j, e^{ {\rm i} \sigma _3\vartheta }
\sigma _3({\mathcal P}i +R) \ranglenglegle \\& \langlenglegle \nabla z_j, e^{ {\rm i} \sigma
_3\vartheta } \partial _\omega ({\mathcal P}i +R) \ranglenglegle =0\equiv \langlenglegle
\nabla z_j, e^{ {\rm i} \sigma _3\vartheta } P _c( \omega ) P _c( \omega
_0)g \ranglenglegle \, \forall g\in L^2_c(\mathcal{H}_{\omega _0}).
\end{aligned}
\end{equation}
Notice that the last identity implies $ P _c( \mathcal{H}_{\omega
_0}^{*} ) P _c( \mathcal{H}_{\omega }^{*} )e^{ {\rm i} \sigma
_3\vartheta }\nabla z_j=0$ which in turn implies $ P _c(
\mathcal{H}_{\omega }^{*} )e^{ {\rm i} \sigma _3\vartheta }\nabla
z_j=0$. Then , applying \eqref{eq:gradient} and using the product
row column, we get for some pair of numbers $(a,b)$
{\bf e}gin{equation} {\bf e}gin{aligned} & \nabla z_j=a
e^{- {\rm i} \sigma _3\vartheta } {\mathcal P}i
+b e^{- {\rm i} \sigma _3\vartheta }\sigma _3\partial _\omega {\mathcal P}i +
e^{-{\rm i} \sigma _3\vartheta } \sigma _3\xi _j
\\& = (a,b) {\bf e}gin{pmatrix} e^{- {\rm i} \sigma _3\vartheta } {\mathcal P}i
\\ e^{- {\rm i} \sigma _3\vartheta }\sigma _3\partial _\omega {\mathcal P}i \end{pmatrix} + e^{-{\rm i} \sigma _3\vartheta }
\sigma
_3\xi _j =-(a,b) \mathcal{A} {\bf e}gin{pmatrix} \nabla \omega
\\ \nabla \vartheta \end{pmatrix} + e^{-{\rm i} \sigma _3\vartheta }
\sigma
_3\xi _j,\end{aligned}\nonumber
\end{equation}
where in the last line we used \eqref{eq:ApplmatrixA}. Equating the two extreme sides and applying to the formula $\langlenglegle \quad , \frac{\partial}{\partial \omega}\ranglenglegle $ and $\langlenglegle \quad , \frac{\partial}{\partial \vartheta}\ranglenglegle $, by $\langlenglegle \nabla z_j , \frac{\partial}{\partial \omega}\ranglenglegle =\langlenglegle \nabla z_j , \frac{\partial}{\partial \vartheta}\ranglenglegle =\langlenglegle \nabla \vartheta , \frac{\partial}{\partial \omega}\ranglenglegle =\langlenglegle \nabla \omega , \frac{\partial}{\partial \vartheta}\ranglenglegle =0 $, by
$ \langlenglegle \nabla \vartheta , \frac{\partial}{\partial \vartheta}\ranglenglegle =\langlenglegle \nabla \omega , \frac{\partial}{\partial \omega}\ranglenglegle =1 $ and by
\eqref{eq:vectorfields} and \eqref{eq:indentitiesGradZ}, we get
{\bf e}gin{equation} \mathcal{A}^* {\bf e}gin{pmatrix} a \\ b \end{pmatrix} =
{\bf e}gin{pmatrix} \langlenglegle \sigma _3 \xi _j, \partial _\omega R \ranglenglegle
\\ {\rm i} \langlenglegle \sigma _3 \xi _j, \sigma _3 R \ranglenglegle \end{pmatrix}.\nonumber
\end{equation}
This implies
{\bf e}gin{equation} {\bf e}gin{aligned} &
\nabla z_j = -(\langlenglegle \sigma _3 \xi _j, \partial _\omega R \ranglenglegle
, {\rm i} \langlenglegle \sigma _3 \xi _j, \sigma _3 R \ranglenglegle ){\bf e}gin{pmatrix} \nabla \omega
\\ \nabla \vartheta \end{pmatrix}+ e^{-{\rm i} \sigma _3\vartheta }
\sigma
_3\xi _j.\end{aligned}\nonumber
\end{equation}
This yields \eqref{ZOmegaTheta}. Similarly
{\bf e}gin{equation} \nabla \overline{z}_j=a e^{- {\rm i} \sigma _3\vartheta } {\mathcal P}i
+b e^{- {\rm i} \sigma _3\vartheta }\sigma _3\partial _\omega {\mathcal P}i +
e^{-{\rm i} \sigma _3\vartheta } \sigma _1\sigma _3\xi _j , \nonumber
\end{equation}
where
{\bf e}gin{equation} \mathcal{A}^* {\bf e}gin{pmatrix} a \\ b \end{pmatrix} =
{\bf e}gin{pmatrix} \langlenglegle \sigma _1\sigma _3 \xi _j, \partial _\omega R \ranglenglegle
\\ {\rm i} \langlenglegle \sigma _1\sigma _3 \xi _j, \sigma _3 R \ranglenglegle \end{pmatrix}.\nonumber
\end{equation}
\qed
{\bf e}gin{lemma} \langlebel{lem:gradient f} Consider the map
$f(U)=f$ for $U$ and $f$ as in \eqref{eq:coordinate}. Denote by
$f'(U) $ the Frech\'et derivative of this map. Then
{\bf e}gin{equation} {\bf e}gin{aligned} &
f'(U)= (P_c(\omega )P_c(\omega _0))^{-1} P_c(\omega )\left [
- \partial _\omega R \, d\omega -{\rm i}
\sigma _3 R \, d\vartheta + e^{-{\rm i}
\sigma _3\vartheta }{\kern+.3em {\rm 1} \kern -.22em {\rm l}} \right ] .
\end{aligned} \nonumber
\end{equation}
\end{lemma}
\proof We have
{\bf e}gin{equation}\langlebel{eq:indentitiesGradf} {\bf e}gin{aligned} &
f'(U) e^{ {\rm i} \sigma _3\vartheta } \xi _\ell \equiv f'(U) e^{ {\rm i}
\sigma _3\vartheta } \sigma _1\xi _\ell
\equiv 0= f'(U) e^{ {\rm i} \sigma _3\vartheta }
\sigma _3({\mathcal P}i +R) = \\& f'(U) e^{ {\rm i} \sigma _3\vartheta }
\partial _\omega ({\mathcal P}i +R) \ranglenglegle \text { and } f'(U)
e^{ {\rm i} \sigma _3\vartheta } P_c(\omega )g=g \, \forall g\in
L^2_c(\mathcal{H}_{\omega _0}).
\end{aligned}
\end{equation}
This implies that for a pair of vectors valued functions $A$ and $B$
and with the inverse of $P_c(\mathcal{H}_{\omega }
)P_c(\mathcal{H}_{\omega _0}):L^2_c(\mathcal{H}_{\omega _0})\to
L^2_c(\mathcal{H}_{\omega })$,
{\bf e}gin{equation} {\bf e}gin{aligned} & f'= (A,B)
{\bf e}gin{pmatrix} \langlenglegle e^{- {\rm i} \sigma _3\vartheta } {\mathcal P}i , \quad
\ranglenglegle
\\ \langlenglegle e^{- {\rm i} \sigma _3\vartheta }\sigma _3
\partial _\omega {\mathcal P}i , \quad \ranglenglegle \end{pmatrix} + (P_c(\omega )P_c(\omega _0))^{-1} P_c(\omega
)e^{-{\rm i} \sigma _3\vartheta } =\\& -(A,B)\mathcal{A}
{\bf e}gin{pmatrix}d\omega
\\ d \vartheta \end{pmatrix} +
(P_c(\omega )P_c(\omega _0))^{-1} P_c(\omega
)e^{-{\rm i} \sigma _3\vartheta } .\end{aligned}\nonumber
\end{equation}
By \eqref{eq:indentitiesGradf} we obtain that $A$ and $B$ are
identified by the following equations (treating the last
$(P_c(\omega )P_c(\omega _0))^{-1} P_c(\omega )$ like a scalar):
{\bf e}gin{equation} \mathcal{A}^* {\bf e}gin{pmatrix} A \\ B \end{pmatrix} =
(P_c(\omega )P_c(\omega _0))^{-1} P_c(\omega ) {\bf e}gin{pmatrix}
\partial _\omega R
\\ {\rm i} \sigma _3 R
\end{pmatrix}.\nonumber
\end{equation}
\qed
\section{Symplectic structure}
\langlebel{section:symplectic}
Our ambient space is $H^1( \R ^3, \mathbb{C} )\times H^1( \R ^3, \mathbb{C} )$.
We focus only on points with $\sigma _1U=\overline{U}$. The natural
symplectic structure for our problem is
{\bf e}gin{equation}\langlebel{eq:SymplecticForm}
\Omega (X,Y)=\langlenglegle X, \sigma _3\sigma _1 Y \ranglenglegle .
\end{equation}
We will see that the coordinates we introduced in
\eqref{eq:coordinate}, which arise naturally from the linearization,
are not canonical for \eqref{eq:SymplecticForm}. This is the main
difference with \cite{bambusicuccagna}. In this section we exploit
the work in section {\rm e}f{section:modulation} to compute the Poisson
brackets for pairs of coordinates. We end the section with a crucial
property for $Q$,
Lemma
{\rm e}f{lem:InvarianceQ}.
The hamiltonian vector field $X_G$ of a scalar function $G$ is
defined by the equation $\langlenglegle X_G, \sigma _3\sigma _1 Y
\ranglenglegle=-{\rm i} \langlenglegle \nabla G , Y \ranglenglegle$ for any vector $Y$ and
is $X_G=-{\rm i} \sigma _3\sigma _1 \nabla G $. At $U=e^{{\rm i} \sigma
_3\vartheta } ({\mathcal P}i _\omega + R)$ as in \eqref{eq:anzatz} we have by
\eqref{eq:gradient}
{\bf e}gin{equation}\langlebel{eq:HamVectorfield}{\bf e}gin{aligned} &
X_G(U)={\rm i}
\frac{\langlenglegle \nabla G (U), e^{ {\rm i} \sigma _3\vartheta }
\sigma _3 {\mathcal P}i \ranglenglegle }
{q'(\omega )}
e^{ {\rm i} \sigma _3\vartheta }\partial _\omega {\mathcal P}i -{\rm i}
\frac{\langlenglegle \nabla G(U) ,
e^{ {\rm i} \sigma _3\vartheta }\partial _\omega {\mathcal P}i \ranglenglegle }
{q'(\omega )}
e^{ {\rm i} \sigma _3\vartheta }\sigma _3 {\mathcal P}i
\\& +{\rm i}
\sum _j\partial _{z_j}G(U) e^{ {\rm i} \sigma _3\vartheta }
\sigma _1\xi _j -{\rm i} \sum _j\partial _{\overline{z}_j}G(U)
e^{ {\rm i} \sigma _3\vartheta }\xi _j -\\& -{\rm i} e^{ {\rm i} \sigma _3\vartheta }\sigma _3\sigma _1
P_c(\mathcal{H}_\omega ^* )e^{ {\rm i} \sigma _3\vartheta }\nabla G(U).
\end{aligned}
\end{equation}
We call Poisson bracket of a pair of scalar valued functions $F$
and $G$ the scalar valued function
{\bf e}gin{equation}\langlebel{eq:PoissonBracket}
\{ F,G \} = \langlenglegle \nabla F , X_ G \ranglenglegle =
-{\rm i} \langlenglegle \nabla F ,\sigma _3\sigma _1\nabla G \ranglenglegle
= {\rm i} \Omega ( X_F, X_G )
.
\end{equation}
By $0= {\rm i} \frac{d}{dt} Q (U(t)) =\langlenglegle
\nabla Q (U(t)),\sigma _3\sigma _1\nabla E (U(t)) \ranglenglegle $ we have the
commutation
{\bf e}gin{equation}\langlebel{eq:PoissonCommutation}
\{ Q,E \} =0.
\end{equation}
In terms of spectral components we have
{\bf e}gin{equation}\langlebel{eq:PoissonBracketComponent}
{\bf e}gin{aligned} & {\rm i} \{ F,G \} (U)=\langlenglegle
\nabla F (U) ,\sigma _3\sigma _1\nabla G (U) \ranglenglegle =
(q') ^{-1}
\times \\&
\big [ \langlenglegle \nabla F ,
e^{ {\rm i} \sigma _3\vartheta }\sigma _3 {\mathcal P}i \ranglenglegle
\langlenglegle \nabla G ,
e^{ {\rm i} \sigma _3\vartheta }\partial _\omega {\mathcal P}i \ranglenglegle
- \langlenglegle \nabla F , e^{ {\rm i} \sigma _3\vartheta }\partial _\omega
{\mathcal P}i \ranglenglegle
\langlenglegle \nabla G ,
e^{ {\rm i} \sigma _3\vartheta }\sigma _3 {\mathcal P}i \ranglenglegle
\big ]
\\& + \sum _j \big [
\partial _{z_j} F \partial _{\overline{z}_j} G
-\partial _{\overline{z}_j} F \partial _{ {z}_j} G
\big ] +\\& +
\langlenglegle \sigma _3 e^{ - {\rm i} \sigma _3\vartheta }P_c(\mathcal{H}_\omega ^*)
e^{ {\rm i} \sigma _3\vartheta } \nabla F ,
\sigma _1 e^{ -{\rm i} \sigma _3\vartheta }
P_c(\mathcal{H}_\omega ^*) e^{ {\rm i} \sigma _3\vartheta }
\nabla G \ranglenglegle
. \end{aligned}
\end{equation}
{\bf e}gin{lemma}
\langlebel{lem:PoissBrackCoord} Let $F(U)$ be a scalar function. We have
the following equalities:
{\bf e}gin{eqnarray} & \{ \omega ,\vartheta \} = \frac{ q'}{(q'
)^2 -\langlenglegle R,
\partial _\omega {\mathcal P}i \ranglenglegle ^2 + \langlenglegle \sigma _3R, {\mathcal P}i
\ranglenglegle \langlenglegle R ,\sigma _3\partial _\omega ^2 {\mathcal P}i
\ranglenglegle }\langlebel{omegatheta};\\ & \{ z_j ,F \} =\langlenglegle \sigma _3
\xi _j, \partial _\omega R \ranglenglegle \{ F, \omega \} +{\rm i}
\langlenglegle \sigma _3
\xi _j, \sigma _3 R \ranglenglegle \{ F, \vartheta \} -{\rm i}
\partial _{\overline{z}_j}F ;\langlebel{ZF} \\ & \{ \overline{z}_j ,F
\} =\langlenglegle \sigma _1\sigma _3
\xi _j, \partial _\omega R \ranglenglegle \{ F, \omega \} +{\rm i}
\langlenglegle \sigma _1\sigma _3
\xi _j, \sigma _3 R \ranglenglegle \{ F, \vartheta \} +{\rm i}
\partial _{z_j}F .\langlebel{barZF}
\end{eqnarray}
In particular we have:
{\bf e}gin{equation} {\bf e}gin{aligned}
&
\{ z_j, \omega \} ={\rm i}
\langlenglegle \sigma _3
\xi _j, \sigma _3 R \ranglenglegle \{ \omega, \vartheta \}
\, ; \, \{ \overline{z}_j, \omega \} ={\rm i}
\langlenglegle \sigma _1\sigma _3
\xi _j, \sigma _3 R \ranglenglegle \{ \omega, \vartheta \} ; \\&
\{ z_j, \vartheta \} =
\langlenglegle \sigma _3
\xi _j, \partial _\omega R \ranglenglegle \{ \vartheta , \omega \}
\, ; \, \{ \overline{z}_j, \vartheta \} =
\langlenglegle \sigma _1\sigma _3
\xi _j, \partial _\omega R \ranglenglegle \{ \vartheta , \omega \}
; \\
&
\{ z_k,z_j \} ={\rm i} (
\langlenglegle \sigma _3 \xi _k , \partial _\omega R \ranglenglegle \langlenglegle \sigma _3
\xi _j, \sigma _3 R\ranglenglegle -\langlenglegle \sigma _3 \xi _j , \partial _\omega R \ranglenglegle \langlenglegle \sigma _3
\xi _k, \sigma _3 R\ranglenglegle ) \{ \omega, \vartheta \} ;\\&
\{ \overline{z}_k,\overline{z}_j \} ={\rm i} (
\langlenglegle \sigma _1\sigma _3 \xi _k ,
\partial _\omega R \ranglenglegle \langlenglegle \sigma _1\sigma _3
\xi _j, \sigma _3 R\ranglenglegle -\langlenglegle
\sigma _1\sigma _3 \xi _j ,
\partial _\omega R \ranglenglegle \langlenglegle \sigma _1\sigma _3
\xi _k, \sigma _3 R\ranglenglegle ) \{ \omega, \vartheta \} ;\\&
\{ z_k,\overline{z}_j \}
=-{\rm i} \delta _{jk}+
{\rm i} (\langlenglegle \sigma _3 \xi _k , \partial _\omega R \ranglenglegle
\langlenglegle \sigma _1\sigma _3
\xi _j, \sigma _3 R\ranglenglegle -\langlenglegle \sigma _1\sigma _3 \xi _j ,
\partial _\omega R \ranglenglegle \langlenglegle
\xi _k, R\ranglenglegle ) \{ \omega, \vartheta \}.
\end{aligned}\nonumber\end{equation}
\end{lemma}
\proof By \eqref{eq:GradModulation} and \eqref{eq:PoissonBracketComponent}
we have $ {\rm i} \{ \omega ,\vartheta \} = $ {\bf e}gin{equation}{\bf e}gin{aligned}&
(q') ^{-1}
\big [ \langlenglegle \nabla \omega ,
e^{ {\rm i} \sigma _3\vartheta }\sigma _3 {\mathcal P}i \ranglenglegle
\langlenglegle \nabla \vartheta ,
e^{ {\rm i} \sigma _3\vartheta }\partial _\omega {\mathcal P}i \ranglenglegle
- \langlenglegle \nabla \omega , e^{ {\rm i} \sigma _3\vartheta }\partial _\omega
{\mathcal P}i \ranglenglegle
\langlenglegle \nabla \vartheta ,
e^{ {\rm i} \sigma _3\vartheta }\sigma _3 {\mathcal P}i \ranglenglegle
\big ] =\\& \frac{- \langlenglegle \sigma _3R, {\mathcal P}i _\omega \ranglenglegle q' \langlenglegle R ,\sigma _3\partial _\omega ^2 {\mathcal P}i
_\omega\ranglenglegle q'-[(q'(\omega))^2 -\langlenglegle R,
\partial _\omega {\mathcal P}i _\omega \ranglenglegle ^2] (q' )^2 }{q' {\rm i} \left [ q'(\omega))^2 -\langlenglegle R,
\partial _\omega {\mathcal P}i _\omega \ranglenglegle ^2 + \langlenglegle \sigma _3R, {\mathcal P}i
_\omega \ranglenglegle \langlenglegle R ,\sigma _3\partial _\omega ^2 {\mathcal P}i
_\omega\ranglenglegle\right ]^2}
.\end{aligned}\nonumber
\end{equation}
This yields
\eqref{omegatheta}. For \eqref{ZF}, substituting \eqref{ZOmegaTheta}
in \eqref{eq:PoissonBracket}, we get $\{ z_j ,F \} =$
{\bf e}gin{equation}{\bf e}gin{aligned}& \langlenglegle \nabla z_j, X_F\ranglenglegle = - \langlenglegle \sigma _3 \xi _j, \partial _\omega R \ranglenglegle
\{ \omega ,F \} - {\rm i} \langlenglegle \sigma _3 \xi _j, \sigma _3 R \ranglenglegle
\{ \vartheta ,F \} + \langlenglegle e^{-{\rm i} \sigma _3\vartheta }
\sigma
_3\xi _j, X_F\ranglenglegle
.\end{aligned}\nonumber
\end{equation}
When we substitute $X_F$ with the decomposition in \eqref{eq:HamVectorfield}, the last term in the above sum becomes $\langlenglegle e^{-{\rm i} \sigma _3\vartheta }
\sigma
_3\xi _j, X_F\ranglenglegle =-{\rm i} \partial _{\overline{z}_j}F\langlenglegle e^{-{\rm i} \sigma _3\vartheta }
\sigma
_3\xi _j, e^{ {\rm i} \sigma _3\vartheta }\xi _j\ranglenglegle =-{\rm i} \partial _{\overline{z}_j}F . $ This yields \eqref{ZF}. \eqref{barZF} can be derived
by first replacing $F$ with $\overline{F}$ in \eqref{ZF} and by taking the
complex conjugate of the resulting equation:
{\bf e}gin{equation} \{ z_j ,F \} =\langlenglegle \sigma _3
\xi _j, \partial _\omega \overline{R} \ranglenglegle \{ F, \omega \} -{\rm i}
\langlenglegle \sigma _3
\xi _j, \sigma _3 \overline{R} \ranglenglegle \{ F, \vartheta \} +{\rm i}
\partial _{ {z}_j}F.
\nonumber
\end{equation}
Then \eqref{barZF} follows by using that $\overline{R}=\sigma _1 R$ and $\sigma _1\sigma _3=-\sigma _3\sigma _1$. The remaining formulas in the
statement follow from \eqref{ZF}--\eqref{barZF}.
\qed
{\bf e}gin{definition}\langlebel{def:PoissonFunct}
Given a function $\mathcal{G}(U)$
with
values in $L^2 _c(\mathcal{H} _{\omega _0}) $, a symplectic form
$\Omega$ and a scalar function $F(U)$, we define
{\bf e}gin{equation}\langlebel{fF} \{ \mathcal{G}, F\} :=
\mathcal{G}'(U)X_F(U)
\end{equation}
with $X_F$ the hamiltonian vector field associated to $F$. We set
$\{ F,\mathcal{G} \} :=-\{ \mathcal{G}, F\}$.
\end{definition}
We have:
{\bf e}gin{lemma}
\langlebel{lem:fF} For $f(U)$ the functional in Lemma {\rm e}f{lem:gradient
f}, we have:
{\bf e}gin{equation}\langlebel{fF1} \{ f, F\} =
(P_c(\omega )P_c(\omega _0))^{-1} P_c(\omega ) \left [ \{ F,\omega \}
\partial _\omega R +{\rm i} \{ F, \vartheta \} \sigma _3 R -{\rm i}
e^{-{\rm i} \sigma _3 \vartheta} \sigma _3\sigma _1\nabla F\right ] .
\end{equation}
In particular we have:
{\bf e}gin{equation}\langlebel{fomegaZ} {\bf e}gin{aligned}& \{ f, \omega\} ={\rm i} \{ \omega, \vartheta
\} (P_c(\omega )P_c(\omega _0))^{-1} P_c(\omega ) \sigma _3 R
; \\&\{ f, \vartheta \} = \{ \vartheta ,\omega
\}(P_c(\omega )P_c(\omega _0))^{-1} P_c(\omega ) \partial _\omega
R;
\\& \{ f, z_j\} =
(P_c(\omega )P_c(\omega _0))^{-1} P_c(\omega ) \left [ \{ z_j,\omega
\}
\partial _\omega R +{\rm i} \{ z_j, \vartheta \} \sigma _3 R \right ] ; \\&
\{ f, \overline{z}_j\} =
(P_c(\omega )P_c(\omega _0))^{-1} P_c(\omega ) \left [ \{
\overline{z}_j,\omega \}
\partial _\omega R +{\rm i} \{ \overline{z}_j, \vartheta \} \sigma _3 R \right ] .
\end{aligned}
\end{equation}
\end{lemma}
\proof Using Lemma {\rm e}f{lem:gradient f} and by \eqref{eq:ApplmatrixA}
{\bf e}gin{equation}{\bf e}gin{aligned} & f' \sigma _3\sigma _1\nabla
F =-(A,B)\mathcal{A} {\bf e}gin{pmatrix} \langlenglegle \nabla \omega ,\sigma
_3\sigma _1\nabla F \ranglenglegle
\\ \langlenglegle \nabla \vartheta ,\sigma
_3\sigma _1\nabla F \ranglenglegle \end{pmatrix} \\& +(P_c(\omega
)P_c(\omega _0))^{-1} P_c(\omega )
e^{-{\rm i} \sigma _3 \vartheta} \sigma _3\sigma _1\nabla F.
\end{aligned} \nonumber
\end{equation}
By Lemma {\rm e}f{lem:gradient f} we have
{\bf e}gin{equation}{\bf e}gin{aligned} (A,B)\mathcal{A}
{\bf e}gin{pmatrix} \{ \omega , F \}
\\ \{ \vartheta , F \} \end{pmatrix} =
(P_c(\omega )P_c(\omega _0))^{-1}
P_c(\omega ) ( \partial _\omega R
, {\rm i} \sigma _3 R) {\bf e}gin{pmatrix} \{ \omega , F \}
\\ \{ \vartheta , F \} \end{pmatrix}.
\end{aligned} \nonumber
\end{equation}
\qed
The following result is important in the sequel.
{\bf e}gin{lemma}
\langlebel{lem:InvarianceQ} Let $Q$ be the function defined in
\eqref{eq:charge}.
Then, we have the following formulas:
{\bf e}gin{eqnarray}
& \{ Q,\omega \} =0 ; \langlebel{Qomega}\\& \{ Q,\vartheta \} =1;
\langlebel{Qtheta} \\& \{ Q,z_j \} =\{ Q,\overline{z}_j \}=0;
\langlebel{QZ}\\& \{ Q,f \}=0 . \langlebel{Qf}
\end{eqnarray}
Denote by $X_Q$ the hamiltonian vectorfield of $Q$. Then
{\bf e}gin{equation} \langlebel{eq:Ham.VecFieldQ} X_Q=-
\frac{\partial}{\partial \vartheta} .
\end{equation}
\end{lemma}
\proof We have by \eqref{eq:PoissonBracketComponent},
\eqref{eq:GradModulation} and $\nabla Q(U)=\sigma _1U$,
{\bf e}gin{equation}
{\bf e}gin{aligned} & {\rm i} q'\{ Q,\omega \} =
\langlenglegle \nabla Q ,
e^{ {\rm i} \sigma _3\vartheta }\sigma _3 {\mathcal P}i \ranglenglegle
\langlenglegle \nabla \omega ,
e^{ {\rm i} \sigma _3\vartheta }\partial _\omega {\mathcal P}i \ranglenglegle
- \langlenglegle \nabla Q , e^{ {\rm i} \sigma _3\vartheta }\partial _\omega
{\mathcal P}i \ranglenglegle
\langlenglegle \nabla \omega ,
e^{ {\rm i} \sigma _3\vartheta }\sigma _3 {\mathcal P}i \ranglenglegle
\\&
= q'\frac{ -\langlenglegle R, \sigma _3 {\mathcal P}i \ranglenglegle (q'(\omega)+ \langlenglegle
R,
\partial _\omega {\mathcal P}i _\omega \ranglenglegle )
-(q'(\omega)+ \langlenglegle R,
\partial _\omega {\mathcal P}i _\omega \ranglenglegle ) (-1)\langlenglegle R, \sigma _3 {\mathcal P}i \ranglenglegle
}{(q'(\omega))^2 -\langlenglegle R,
\partial _\omega {\mathcal P}i _\omega \ranglenglegle ^2 + \langlenglegle \sigma _3R, {\mathcal P}i
_\omega \ranglenglegle \langlenglegle R ,\sigma _3\partial _\omega ^2 {\mathcal P}i
_\omega\ranglenglegle } =0. \end{aligned}\nonumber
\end{equation}
Similarly,
{\bf e}gin{equation}
{\bf e}gin{aligned} & {\rm i} q'\{ Q,\vartheta \} =
\langlenglegle \nabla Q ,
e^{ {\rm i} \sigma _3\vartheta }\sigma _3 {\mathcal P}i \ranglenglegle
\langlenglegle \nabla \vartheta ,
e^{ {\rm i} \sigma _3\vartheta }\partial _\omega {\mathcal P}i \ranglenglegle
- \langlenglegle \nabla Q , e^{ {\rm i} \sigma _3\vartheta }\partial _\omega
{\mathcal P}i \ranglenglegle
\langlenglegle \nabla \vartheta ,
e^{ {\rm i} \sigma _3\vartheta }\sigma _3 {\mathcal P}i \ranglenglegle
\\&
= q'\frac{ -\langlenglegle R, \sigma _3 {\mathcal P}i \ranglenglegle \langlenglegle R, \sigma _3
\partial _\omega ^2{\mathcal P}i \ranglenglegle -(q'(\omega)+ \langlenglegle R,
\partial _\omega {\mathcal P}i _\omega \ranglenglegle ) (q'(\omega)- \langlenglegle R,
\partial _\omega {\mathcal P}i _\omega \ranglenglegle )
}{{\rm i} [(q'(\omega))^2 -\langlenglegle R,
\partial _\omega {\mathcal P}i _\omega \ranglenglegle ^2 + \langlenglegle \sigma _3R, {\mathcal P}i
_\omega \ranglenglegle \langlenglegle R ,\sigma _3\partial _\omega ^2 {\mathcal P}i
_\omega\ranglenglegle ]} =q' {\rm i} . \end{aligned}\nonumber
\end{equation}
By \eqref{ZF},\eqref{Qomega} and \eqref{Qtheta} we have
{\bf e}gin{equation}\langlebel{eq:ZQ}
{\bf e}gin{aligned} & {\rm i} \{ z_j ,Q \} = -
\langlenglegle
\xi _j, R \ranglenglegle +
\partial _{\overline{z}_j}Q \\& {\rm i} \{ \overline{z}_j ,Q \} =
\langlenglegle
\xi _j, \sigma _1 R \ranglenglegle -
\partial _{ {z}_j}Q. \end{aligned}
\end{equation}
By
{\bf e}gin{equation} \langlebel{eq:Qcoord} Q(U)=q +\frac{1}{2}
\langlenglegle z\cdot \xi +\overline{z}\cdot \sigma _1\xi +P_c(\omega
)f,\sigma _1(z\cdot \xi +\overline{z}\cdot \sigma _1\xi +P_c(\omega
)f)\ranglenglegle
\end{equation}
we have
{\bf e}gin{equation} \langlebel{eq:QZderivatives}
\partial _{ {z}_j}Q=\langlenglegle
\xi _j, \sigma _1 R \ranglenglegle \, , \quad
\partial _{\overline{z}_j}Q=\langlenglegle
\xi _j, R \ranglenglegle .
\end{equation}
So both lines in \eqref{eq:ZQ} are 0 and yield \eqref{QZ}.
Finally \eqref{Qf} follows by \eqref{fF},
Lemma {\rm e}f{lem:fF}, \eqref{Qomega} ,
\eqref{Qtheta} and by
{\bf e}gin{equation} {\bf e}gin{aligned} &
\{ f,Q \} =
(P_c(\omega )P_c(\omega _0))^{-1}P_c(\omega ) \left [ {\rm i} \{ Q,
\vartheta \} \sigma _3 R -{\rm i}
e^{-{\rm i} \sigma _3 \vartheta} \sigma _3\sigma _1\nabla Q\right ]
\\& = (P_c(\omega )P_c(\omega _0))^{-1}P_c(\omega )
\left [ {\rm i} \sigma _3 R -{\rm i} \sigma _3{\mathcal P}i -{\rm i} \sigma _3 R \right ] =0.
\end{aligned}\nonumber
\end{equation}
\eqref{eq:Ham.VecFieldQ} is an immediate consequence of the
definition of $X_Q$ and of \eqref{Qomega}--\eqref{Qf}.
\qed
\section{Hamiltonian riformulation of the system}
\langlebel{section:Hamiltonian riformulation}
\eqref{eq:system} is how the problem is framed in the literature.
Yet \eqref{eq:system} hides the crucial hamiltonian nature of the
problem. In the coordinate system \eqref{eq:coordinate} can be written
as follows:
{\bf e}gin{equation} \langlebel{eq:SystPoiss} {\bf e}gin{aligned} &
\dot \omega = \{ \omega , E \} \, , \quad \dot f= \{f, E
\} \, , \\& \dot z_j = \{ z_j , E \} \, , \quad {\dot
{\overline{z}}_j }= \{ \overline{z}_j , E \} \, ,
\\ & \dot \vartheta = \{ \vartheta , E \}. \end{aligned}
\end{equation}
For the scalar coordinates the equations in \eqref{eq:SystPoiss} are
due to the hamiltonian nature of \eqref{eq:NLSvectorial}. Exactly for the same reasons we have the equation $ \dot f= \{f, E \} $, which we now derive in
the following standard way. Multiplying \eqref{eq:system} by $e^{{\rm i}
\vartheta\sigma _3}$ one can rewrite \eqref{eq:system} by
\eqref{eq:vectorfields} and \eqref{eq:gaugeInvariance},
as {\bf e}gin{equation}
\langlebel{eq:sys1}
{\bf e}gin{aligned} &
-{\rm i}
\dot \vartheta
\frac{\partial}{\partial \vartheta }+ {\rm i} \dot \omega
\frac{\partial}{\partial \omega } + {\rm i}
\sum _j \dot z_j\frac{\partial}{\partial z _j } + {\rm i}
\sum _j \dot {\overline{z}}_j\frac{\partial}{\partial
\overline{z} _j } \\& +{\rm i}
e^{{\rm i} \vartheta\sigma
_3} P_c(\mathcal{ H}_\omega ) \dot f = \sigma _3
\sigma _1
\nabla E ( U) .\end{aligned}
\end{equation}
When we apply the derivative $f'(U)$ to \eqref{eq:sys1} the first
line cancels, so that
{\bf e}gin{equation}
\langlebel{eq:sys2}
{\bf e}gin{aligned} & \dot f= f'(U)
e^{{\rm i} \vartheta\sigma
_3} P_c(\mathcal{ H}_\omega ) \dot f =- f'(U){\rm i} \sigma _3
\sigma _1
\nabla E ( U) =f'(U)X_E(U)=\{ f,E\} .
\end{aligned}\nonumber
\end{equation}
where the first equality is \eqref{eq:indentitiesGradf}, the third
the definition of hamiltonian field two lines above
\eqref{eq:HamVectorfield} and the last equality is Definition
{\rm e}f{def:PoissonFunct}.
We now introduce a new hamiltonian. For $u_0$ the initial datum in
\eqref{NLS}, set
{\bf e}gin{equation} \langlebel{eq:K} {\bf e}gin{aligned} &
K(U)=E(U)+\omega (U) Q(U)-\omega (U)\| u_0\| _{L^2_x}^{2}.
\end{aligned}
\end{equation}
By Lemma {\rm e}f{lem:InvarianceQ} the solution of the initial value
problem in \eqref{NLS} solves also
{\bf e}gin{equation} \langlebel{eq:SystPoissK} {\bf e}gin{aligned} &
\dot \omega = \{ \omega , K \} \, , \quad \dot f= \{f, K
\} \, , \\& \dot z_j = \{ z_j , K \} \, , \quad {\dot
{\overline{z}}_j }= \{ \overline{z}_j , K \} \, ,
\\ & \dot \vartheta -\omega = \{ \vartheta , K \}. \end{aligned}
\end{equation}
By $ \frac{\partial}{\partial \vartheta} K =0$ the right hand
sides in the equations \eqref{eq:SystPoissK} do not depend on
$\vartheta$. Hence, if we look at the new system
{\bf e}gin{equation} \langlebel{eq:SystK} {\bf e}gin{aligned} &
\dot \omega = \{ \omega , K \} \, , \quad \dot f= \{f, K
\} \, , \\& \dot z_j = \{ z_j , K \} \, , \quad {\dot
{\overline{z}}_j }= \{ \overline{z}_j , K \} \, ,
\\ & \dot \vartheta = \{ \vartheta , K \}, \end{aligned}
\end{equation}
the evolution of the crucial variables $(\omega , z, \overline{z},
f)$ in \eqref{eq:SystPoiss} and \eqref{eq:SystK} is the same.
Therefore, to prove Theorem {\rm e}f{theorem-1.1} it is sufficient to
consider system \eqref{eq:SystK}.
\section{Application of the Darboux Theorem}
\langlebel{section:Darboux}
Since the main obstacle at reproducing the Birkhoff normal forms
argument of \cite{bambusicuccagna} for \eqref{eq:SystK} is that the
coordinates \eqref{eq:coordinate} are not canonical, we change
coordinates. That is, we apply the Darboux Theorem. We warn the
reader not to confuse the variable $t\in [0,1]$ of this section with
the time of the evolution equation of the other sections.
We introduce the 2-form, for
$q=q(\omega )=\| \phi _\omega \| ^{2}_{L^2_x}$ and summing on repeated indexes,
{\bf e}gin{equation} \langlebel{eq:Omega0} \Omega _0={\rm i} d\vartheta \wedge
dq + dz_j\wedge d\overline{z}_j+\langlenglegle f' (U )\quad , \sigma _3
\sigma _1 f' (U )\quad \ranglenglegle ,
\end{equation}
with $f (U)$ the function in Lemma {\rm e}f{lem:gradient f}, $f '(U)$
its Frech\'et derivative and the last term in \eqref{eq:Omega0}
acting on pairs $(X,Y)$ like $\langlenglegle f' (U )X , \sigma _3 \sigma
_1 f' (U )Y\ranglenglegle $.
It is an elementary exercise to show that $\Omega _0 $ is a
closed and non degenerate 2 form. In Lemma {\rm e}f{lem:OmegaOmega0} we
check that $\Omega _0(U) =\Omega (U)$ at $U=e^{{\rm i} \sigma
_3\vartheta}
{\mathcal P}i _{\omega_{0}}$. Then the proof of the Darboux
Theorem
goes as follows. One first considers
{\bf e}gin{equation} \langlebel{eq:Omegat} \Omega _t =(1-t)\Omega _0+t
\Omega =\Omega _0 +t\widetilde{\Omega} \text{ with
$\widetilde{\Omega}
:=\Omega -\Omega _0$.} \end{equation}
Then one considers a 1- differential form $\gamma (t,U)$ such that
(external differentiation will always be on the $U$ variable only)
${\rm i} d \gamma (t,U) = \widetilde{\Omega} $ with $\gamma (U) =0$
at $U=e^{{\rm i}
\sigma _3\vartheta}
{\mathcal P}i _{\omega_{0}}$. Finally one considers the vector field
$\mathcal{Y}^t$ such that
$i_{\mathcal{Y}^t}\Omega _t=-{\rm i} \gamma $ (here for $\Omega$ a 2 form and $Y$ a vector field, $i_Y\Omega $ is the 1 form defined by $i_Y\Omega (X):=\Omega (Y,X))$
and the
flow $\mathfrak{F}_t$ generated by $\mathcal{Y}^t$, which near
the points
$e^{{\rm i} \sigma _3\vartheta}
{\mathcal P}i _{\omega_{0}}$ is defined up to time 1, and show that
$ \mathfrak{F}_1^*\Omega =\Omega _0$ by
{\bf e}gin{equation} \langlebel{eq:dartheorem}{\bf e}gin{aligned} &\frac{d}{dt}
\left ( \mathfrak{F}_t^*\Omega _t\right )
=\mathfrak{F}_t^*\left ( L_{\mathcal{Y}_t} \Omega _t\right )
+ \mathfrak{F}_t^*\frac{d}{dt}\Omega _t =
\\& = \mathfrak{F}_t^*d\left (
i_{\mathcal{Y}^t} \Omega _t\right )
+ \mathfrak{F}_t^* \widetilde{\Omega } =
\mathfrak{F}_t^*\left ( -{\rm i} d \gamma
+ \widetilde{\Omega}\right ) =0
.
\end{aligned}
\end{equation}
For $\Omega _0$, the coordinates \eqref{eq:coordinate} are
canonical. But if one does not choose the 1 form $\gamma$ carefully,
then the new hamiltonian $\widetilde{K}= K\circ \mathfrak{F}_1$ will
not yield a semilinear NLS for coordinates \eqref{eq:coordinate},
which is what we need to perform the argument of
\cite{bambusicuccagna,cuccagnamizumachi}. In the sequel of this
section all the work is finalized to the correct choice if
$\gamma$. In Lemma {\rm e}f{lem:1forms} we compute explicitly a
differential form $\alpha$ and we make the preliminary choice
$\gamma =-{\rm i} \alpha$. This is not yet the right choice. By the
computations in Lemma {\rm e}f{lem:linearAlgebra} and Remark
{\rm e}f{rem:correction}, we find the obstruction to the fact that
$\widetilde{K}$ is of the desired type. Lemmas
{\rm e}f{lem:HamThetaOmega}--{\rm e}f{lem:flow Htheta} are necessary to find
an appropriate solution $F$ of a differential equation in Lemma
{\rm e}f{lem:correction alpha}. Then $\gamma =-{\rm i} \alpha + dF$ is the
right choice of $\gamma$. In Lemma {\rm e}f{lem:flow1} we collect a
number of useful estimates for $\mathfrak{F}_1$. Finally, Lemma
{\rm e}f{lem:flow2} is valid independently of the precise $\gamma$
chosen and contains information necessary for
\eqref{eq:newH}--\eqref{eq:SystK1}.
For any vector $Y\in T _U L^2$ we set
{\bf e}gin{equation} \langlebel{eq:Y} {\bf e}gin{aligned} &
Y=Y _{\vartheta}\frac{\partial}{\partial \vartheta}+Y
_{\omega}\frac{\partial}{\partial \omega} +\sum Y
_{j}\frac{\partial}{\partial z_j}+\sum Y
_{\overline{j}}\frac{\partial}{\partial \overline{z}_j} +e^{{\rm i}
\sigma _3 \vartheta} P_c (\omega )Y _{f }
\end{aligned}\end{equation}
for
{\bf e}gin{equation} \langlebel{eq:Y1} {\bf e}gin{aligned} & Y_{\vartheta}
=d\vartheta (Y)\, , \quad Y_{\omega} =d\omega (Y)\, , \quad Y_{j}
=dz_j (Y)\\& \quad Y_{\overline{j}} =d\overline{z}_j (Y) \, , \quad
\quad Y_{f } =f' (U) Y .
\end{aligned}\end{equation}
Similarly, a differential 1-form $\gamma $ decomposes as
{\bf e}gin{equation} \langlebel{eq:gamma} {\bf e}gin{aligned} &
\gamma=\gamma ^{\vartheta}d \vartheta +\gamma ^{\omega}d \omega
+\sum \gamma ^{j}d z_j +\sum \gamma ^{\overline{j}}d\overline{z}_j
+\langlenglegle \gamma ^{f }, f'\quad \ranglenglegle ,
\end{aligned}\end{equation}
where: $\langlenglegle \gamma ^{f }, f'\quad \ranglenglegle $ acts on a vector $Y$
as $\langlenglegle \gamma ^{f }, f'Y \ranglenglegle $, with here $\gamma ^f \in
L^2_c(\mathcal{H}_{\omega _0}^*)$; $\gamma ^{\vartheta}$, $\gamma
^{\omega}$, $\gamma ^{j}$ and $\gamma ^{\overline{j}}$ are in $\mathbb{C}$.
Notice that we are reversing the standard notation on super and
subscripts for forms and vector fields. In the sequel, given a differential
1 form $\gamma $ and
a point $U$, we will denote by $\gamma _U$ the value of
$\gamma $ at $U$.
Given a function $\chi $, denote its hamiltonian vector field with
respect to $\Omega _t$
by $X^t_\chi$ : $i _{X^t_\chi}\Omega _t=-{\rm i} \, d\chi$.
By \eqref{eq:Omega0} we have the following hamiltonian
vectorfield associated to $q(\omega )$ (this is important in Lemma
{\rm e}f{lem:flow2} later):
{\bf e}gin{equation} \langlebel{eq:HamVect0q} X_{q(\omega )}^{0}=
-\frac{\partial}{\partial \vartheta}.
\end{equation}
We have the following preliminary observation:
{\bf e}gin{lemma}
\langlebel{lem:OmegaOmega0} At $U=e^{{\rm i} \sigma _3\vartheta}
{\mathcal P}i _{\omega_{0}}$, for any $\vartheta $, we have $\Omega _0(U)=\Omega
(U)$.
\end{lemma}
\proof Using the following partition of the identity
{\bf e}gin{equation}\langlebel{eq:partitionUno}{\kern+.3em {\rm 1} \kern -.22em {\rm l}} =
e^{{\rm i} \sigma _3 \vartheta}
[ P_{N_g(\mathcal{H}_{\omega})} +\sum _{\langlembda \in \sigma
(\mathcal{H}_{\omega}) \backslash \{ 0 \} } P _{\ker
(\mathcal{H}_{\omega}-\langlembda )} +P_{c} (\mathcal{H}_{\omega}) ]
e^{-{\rm i} \sigma _3 \vartheta}
\end{equation}
we get, summing on repeated indexes,
{\bf e}gin{equation}\langlebel{eq:OmegaComponent1}
{\bf e}gin{aligned} & \Omega (X,Y)=\langlenglegle
X ,\sigma _3\sigma _1Y \ranglenglegle =
\\&
\frac{1}{q'}\big [ \langlenglegle X , e^{ - {\rm i} \sigma _3\vartheta
}\sigma _3\partial _\omega {\mathcal P}i \ranglenglegle
\langlenglegle Y ,e^{ - {\rm i} \sigma _3\vartheta }
{\mathcal P}i \ranglenglegle -\langlenglegle X ,
e^{ - {\rm i} \sigma _3\vartheta } {\mathcal P}i \ranglenglegle
\langlenglegle Y ,e^{ - {\rm i} \sigma _3\vartheta }
\sigma _3 \partial _\omega {\mathcal P}i \ranglenglegle
\big ] +
\\& \big [
\langlenglegle X , e^{ - {\rm i} \sigma _3\vartheta }\sigma _3 \xi _j
\ranglenglegle \langlenglegle Y , e^{ - {\rm i} \sigma _3\vartheta }\sigma _1
\sigma _3 \xi _j
\ranglenglegle -\langlenglegle X , e^{ - {\rm i} \sigma _3\vartheta }\sigma _1
\sigma _3 \xi _j
\ranglenglegle \langlenglegle Y , e^{ - {\rm i} \sigma _3\vartheta }\sigma _3 \xi _j
\ranglenglegle
\big ] \\& +
\langlenglegle P_c(\mathcal{H}_\omega )
e^{ -{\rm i} \sigma _3\vartheta } X ,
\sigma _3 \sigma _1
P_c(\mathcal{H}_\omega ) e^{ - {\rm i} \sigma _3\vartheta }
Y \ranglenglegle
. \end{aligned}
\end{equation}
By \eqref{eq:ApplmatrixA} we have
{\bf e}gin{equation}\langlebel{eq:Omega1}
{\bf e}gin{aligned} & \langlenglegle \quad , e^{ - {\rm i} \sigma _3\vartheta
}\sigma _3\partial _\omega {\mathcal P}i \ranglenglegle
\wedge \langlenglegle \quad ,e^{ - {\rm i} \sigma _3\vartheta }
{\mathcal P}i \ranglenglegle =\det \mathcal{A}\, d\omega \wedge d\vartheta .
\end{aligned}
\end{equation}
Substituting \eqref{ZOmegaTheta}--\eqref{barZOmegaTheta} we get
{\bf e}gin{equation}\langlebel{eq:Omega2}
{\bf e}gin{aligned} & \langlenglegle \quad , e^{ - {\rm i} \sigma _3\vartheta
}\sigma _3\xi _j\ranglenglegle
\wedge \langlenglegle \quad ,e^{ - {\rm i} \sigma _3\vartheta }
\sigma _1 \sigma _3\xi _j \ranglenglegle =\\& (dz_j + \langlenglegle \sigma _3 \xi _j, \partial _\omega R \ranglenglegle
d \omega + {\rm i} \langlenglegle \sigma _3 \xi _j, \sigma _3 R \ranglenglegle
d \vartheta ) \\& \wedge ( d\overline{z}_j +\langlenglegle \sigma _1\sigma _3 \xi _j,
\partial _\omega R \ranglenglegle d \omega +
{\rm i} \langlenglegle \sigma _1\sigma _3 \xi _j, \sigma _3 R \ranglenglegle
d \vartheta ) .
\end{aligned}
\end{equation}
By Lemma {\rm e}f{lem:gradient f} we have
{\bf e}gin{equation}\langlebel{eq:Omega3}
{\bf e}gin{aligned} & \langlenglegle P_c(\mathcal{H}_\omega )
e^{ -{\rm i} \sigma _3\vartheta }\quad ,
\sigma _3 \sigma _1
P_c(\mathcal{H}_\omega ) e^{ - {\rm i} \sigma _3\vartheta }
\quad \ranglenglegle =\\& \langlenglegle P_c(\omega )P_c(\omega _0)f'\quad + P_c(\omega )\partial _\omega R \, d\omega +{\rm i}
P_c(\omega )\sigma _3 R \, d\vartheta ,\\& \sigma _3 \sigma _1 (P_c(\omega )P_c(\omega _0)f'\quad + P_c(\omega )\partial _\omega R \, d\omega +{\rm i}
P_c(\omega )\sigma _3 R \, d\vartheta) \ranglenglegle
.
\end{aligned}
\end{equation}
Then by \eqref{eq:OmegaComponent1}--\eqref{eq:Omega3} we have
{\bf e}gin{equation}\langlebel{eq:OmegaComponent2}
{\bf e}gin{aligned} & \Omega = ( {\rm i} q'+a_1) d\vartheta \wedge d\omega
+ dz_j\wedge d\overline{z}_j+ \\& + dz_j\wedge \left ( \langlenglegle
\sigma _1 \sigma _3 \xi _j , \partial _\omega R \ranglenglegle \, d\omega
+ {\rm i} \langlenglegle \sigma _1 \sigma _3 \xi _j ,\sigma _3 R \ranglenglegle \,
d\vartheta \right ) \\& - d\overline{z}_j\wedge \left ( \langlenglegle
\sigma _3 \xi _j , \partial _\omega R \ranglenglegle \, d\omega + {\rm i}
\langlenglegle \sigma _3 \xi _j ,\sigma _3 R \ranglenglegle \, d\vartheta
\right ) +\\& +\langlenglegle P_c(\omega ) P_c(\omega _0)f' \quad , \sigma
_3 \sigma _1 P_c(\omega ) P_c(\omega _0) f'
\quad \ranglenglegle +\\& +
\langlenglegle P_c(\omega ) P_c(\omega _0)f' \quad , \sigma
_3 \sigma _1 P_c(\omega )
\partial _\omega R \ranglenglegle \wedge d\omega + \\&
+ {\rm i} \langlenglegle P_c(\omega ) P_c (\omega _0)f' \quad , \sigma _3
\sigma _1 P_c(\omega )
\sigma _3 R \ranglenglegle \wedge d\vartheta
, \end{aligned}
\end{equation}
where
{\bf e}gin{equation}\langlebel{eq:Omega4} {\bf e}gin{aligned} & {\rm i} q'+a_1= \frac{\det \mathcal{A}}{q'} + \langlenglegle P_c(\omega ) \partial _\omega R, \sigma _3 \sigma _1 P_c(\omega )\sigma _3 {\rm i} R \ranglenglegle \\& + \langlenglegle \sigma _3\xi _j,
\partial _\omega R \ranglenglegle \langlenglegle \sigma _1\sigma _3\xi _j,{\rm i} \sigma _3R
\ranglenglegle -
\langlenglegle \sigma _1\sigma _3\xi _j, \partial _\omega R
\ranglenglegle
\langlenglegle \sigma _3\xi _j, {\rm i} \sigma _3R
\ranglenglegle .
\end{aligned}
\end{equation}
In particular we have
{\bf e}gin{equation}\langlebel{eq:a1}
{\bf e}gin{aligned} & a_1:= -{\rm i} q'+\frac{\det \mathcal{A}}{q'} +
\langlenglegle P_{N^\perp _g(\mathcal{H}^{*}_{\omega})} {\rm i}\sigma _3
R, \sigma _3 \sigma _1 \partial _\omega R\ranglenglegle .
\end{aligned}
\end{equation}
Notice that $a_1=a_1(\omega , z,f)$ is smooth in the arguments
$\omega \in \mathcal{O}$, $z\in \mathbb{C} ^n$ and $f\in H^{-K',-S'}$ for any pair
$(K', S')$ with, for $( z,f)$ near 0,
{\bf e}gin{equation}\langlebel{eq:bounda1}
{\bf e}gin{aligned} & |a_1| \le C (K',S') (|z|+\| f\| _{H^{-K',-S'} } )^2 .
\end{aligned}
\end{equation}
At points $U=e^{{\rm i} \sigma _3 \vartheta} {\mathcal P}i _\omega$, that is for
$R=0$, we have
{\bf e}gin{equation}\langlebel{eq:OmegaComponent3}{\bf e}gin{aligned} &
\Omega ={\rm i} d\vartheta \wedge dq + dz_j\wedge
d\overline{z}_j+\langlenglegle P_c(\omega ) P_c(\omega _0)f' \quad ,
\sigma _3 \sigma _1 P_c(\omega ) P_c(\omega _0) f'
\quad \ranglenglegle
. \end{aligned} \nonumber
\end{equation}
At $\omega =\omega _0$ we get $\Omega =\Omega _0$.
\qed
{\bf e}gin{lemma}
\langlebel{lem:1forms} Consider the following forms:
{\bf e}gin{equation} \langlebel{eq:1forms}{\bf e}gin{aligned} &
{\bf e}ta (U)Y:=\frac{1}{2}\langlenglegle \sigma _1\sigma _3U , Y\ranglenglegle ;\\&
{\bf e}ta _0(U):=-{\rm i} qd\vartheta - \sum _j\frac{\overline{z}_j dz_j -
{z}_j d\overline{z}_j}{2} +\frac{1}{2}\langlenglegle f (U),\sigma _3\sigma
_1f '(U)\quad \ranglenglegle.
\end{aligned}
\end{equation}
Then {\bf e}gin{equation} \langlebel{eq:1formsExtDiff} d{\bf e}ta _0=\Omega _0\,
, \quad d{\bf e}ta =\Omega .\end{equation} Set
{\bf e}gin{equation} \langlebel{eq:alpha1} \alpha (U)={\bf e}ta (U)-{\bf e}ta _0(U)
+d\psi (U)\text{ where } \psi (U):=\frac{1}{2}\langlenglegle \sigma _3{\mathcal P}i
, R\ranglenglegle .
\end{equation}
We have $\alpha = \alpha ^{\vartheta} d\vartheta +\alpha ^{\omega}
d\omega
+ \langlenglegle \alpha ^f,f'\ranglenglegle$ with:
{\bf e}gin{equation} \langlebel{eq:alpha2} {\bf e}gin{aligned}
\alpha ^{\vartheta} +\frac{{\rm i}}{2}\| f\| _2^2 =&
-\frac{{\rm i}}{2} \| z\cdot \xi +\overline{z}\cdot
\sigma _1\xi \| _2^2-{\rm i} \langlenglegle z\cdot \xi +\overline{z}\cdot
\sigma _1\xi , \sigma _1P_c(\omega ) f\ranglenglegle
\\& - \frac{{{\rm i}}}{2} \langlenglegle (P_c(\omega )
-P_c(\omega _0 )) f, \sigma _1(P_c(\omega )
+P_c(\omega _0 ))f \ranglenglegle ;
\\ \alpha ^{\omega} =& -\frac{1}{2}
\langlenglegle \sigma _1R, \sigma _3\partial _\omega
R\ranglenglegle ; \\ \alpha ^{f} = &\frac{1}{2}\sigma_1 \sigma _3P_c(\omega
_0)\left ( P_c(\omega ) -P_c(\omega _0)\right )f.
\end{aligned}
\end{equation}
\end{lemma}
\proof Everything is straightforward except for
\eqref{eq:alpha2}, which we now prove. We will sum over repeated
indexes. We substitute $U$ using \eqref{eq:coordinate}
getting
{\bf e}gin{equation} \langlebel{eq:beta} {\bf e}gin{aligned} &
{\bf e}ta =\frac{1}{2}\langlenglegle e^{-{\rm i} \sigma _3\vartheta}\sigma _1\sigma
_3 {\mathcal P}i , \quad \ranglenglegle + \frac{1}{2}\langlenglegle e^{-{\rm i} \sigma
_3\vartheta}\sigma _1\sigma _3P_c(\omega )f, \, \ranglenglegle +\\&
\frac{1}{2} \left [z_j \langlenglegle e^{-{\rm i} \sigma _3\vartheta}\sigma
_1\sigma _3\xi _j, \quad
\ranglenglegle -\overline{z}_j \langlenglegle e^{-{\rm i}
\sigma _3\vartheta} \sigma _3\xi _j, \quad \ranglenglegle \right ] .
\end{aligned}
\end{equation}
When we decompose $\frac{1}{2} e^{-{\rm i} \sigma _3\vartheta}\sigma
_1\sigma _3 {\mathcal P}i $ like $\nabla F$ in \eqref{eq:gradient}, we obtain
{\bf e}gin{equation} \langlebel{eq:betatilde} {\bf e}gin{aligned} &
\frac{1}{2}\langlenglegle e^{-{\rm i} \sigma _3\vartheta}\sigma _1\sigma
_3 {\mathcal P}i , \quad \ranglenglegle = -\frac{q}{q'} \langlenglegle e^{-{\rm i} \sigma
_3\vartheta} \sigma _3 \partial _\omega {\mathcal P}i , \quad \ranglenglegle
\\& -\frac{1}{2} \langlenglegle \sigma _3{\mathcal P}i , \xi _j
\ranglenglegle \left ( \langlenglegle e^{-{\rm i} \sigma _3\vartheta} \sigma _3\xi
_j, \quad \ranglenglegle -\langlenglegle e^{-{\rm i} \sigma _3\vartheta} \sigma
_1\sigma _3\xi _j, \quad \ranglenglegle \right ) \\& - \frac{1}{2}\langlenglegle
e^{-{\rm i} \sigma _3\vartheta}P_c (\mathcal{H}_\omega ^{*})\sigma _3
{\mathcal P}i , \quad \ranglenglegle \,
\end{aligned}
\end{equation}
with by \eqref{eq:ApplmatrixA}
{\bf e}gin{equation} \langlebel{eq:bettild}-\frac{q}{q'}\langlenglegle e^{-{\rm i} \sigma
_3\vartheta} \sigma _3 \partial _\omega {\mathcal P}i , \quad \ranglenglegle
=\frac{q}{q'}\langlenglegle R, \sigma _3 \partial _\omega ^2 {\mathcal P}i \ranglenglegle
\, d \omega -{\rm i} \, \frac{q}{q'} \, (q' +\langlenglegle R, \partial
_\omega {\mathcal P}i \ranglenglegle ) \, d \vartheta .
\end{equation}
Substituting slightly manipulated versions of the formulas in Lemmas {\rm e}f{lem:gradient z}--{\rm e}f{lem:gradient f}, in particular using $\sigma _3P_c(\omega )= P_c(\omega )^*\sigma _3$, $\sigma _1P_c(\omega )= P_c(\omega ) \sigma _1$ and $\sigma _1\sigma _3=-\sigma _1\sigma _3 $,
and summing over repeated indexes, we get
{\bf e}gin{equation} \langlebel{eq:beta0} {\bf e}gin{aligned} &
{\bf e}ta _0=-{\rm i} q\, d\vartheta + \frac{1}{2} \overline{z}_j ( \langlenglegle \sigma _1 \sigma _1 \xi _j, \sigma _3\partial _\omega R \ranglenglegle
d \omega + {\rm i} \langlenglegle \xi _j, R \ranglenglegle
d \vartheta - \langlenglegle e^{-{\rm i} \sigma _3\vartheta } \sigma _3 \xi _j, \quad \ranglenglegle ) \\& + \frac 12
{z}_j \, (
\langlenglegle \sigma _1 \xi _j, \sigma _3
\partial _\omega R \ranglenglegle d \omega +
{\rm i} \langlenglegle \xi _j, \sigma _1 R \ranglenglegle
d \vartheta + \langlenglegle e^{-{\rm i} \sigma _3\vartheta }
\sigma _1\sigma
_3\xi _j, \quad \ranglenglegle ) \\& +\frac{1}{2}\langlenglegle f ,\sigma
_3\sigma _1 (1-P_c(\omega ) P_c(\omega _0)) f ' \quad \ranglenglegle + \frac{1}{2}\langlenglegle f ,\sigma
_3\sigma _1P_c(\omega ) e^{-{\rm i}
\sigma _3\vartheta } \quad \ranglenglegle \\& + \frac{1}{2}\langlenglegle \sigma _1 P_c(\omega ) f ,\sigma
_3 \partial _\omega R \ranglenglegle d\omega
+ \frac{ {\rm i}}{2}\langlenglegle P_c(\omega )f , \sigma _1 R \ranglenglegle d\vartheta .
\end{aligned}\nonumber
\end{equation}
Hence
{\bf e}gin{equation} \langlebel{eq:beta0} {\bf e}gin{aligned} &
{\bf e}ta _0={\rm i} \left ( -q +\frac{1}{2}\langlenglegle R, \sigma _1R\ranglenglegle \right
) \, d \vartheta + \frac{1}{2}\langlenglegle \sigma _1R, \sigma
_3\partial _\omega R\ranglenglegle \, d \omega +
\\& +
\frac{1}{2}\langlenglegle \sigma _1\sigma _3 \left ( 1 - P_c(\omega
_0)P_c(\omega ) \right ) f, f'\, \ranglenglegle + \\& + \frac{1}{2} \left (z_j
\langlenglegle e^{-{\rm i} \sigma _3\vartheta}\sigma _1\sigma _3\xi _j, \quad
\ranglenglegle -\overline{z}_j \langlenglegle e^{-{\rm i}
\sigma _3\vartheta} \sigma _3\xi _j, \quad \ranglenglegle \right )+
\\& +
\frac{1}{2}\langlenglegle e^{-{\rm i} \sigma _3\vartheta} \sigma _1\sigma
_3P_c(\omega )f,\quad \ranglenglegle .
\end{aligned}
\end{equation}
By \eqref{eq:coordinate} we have
{\bf e}gin{equation} \langlebel{eq:dPsi0} {\bf e}gin{aligned} & d\psi =
\frac{1}{2}\langlenglegle \sigma _3{\mathcal P}i , \partial _\omega R \ranglenglegle
d\omega +\frac{1}{2} \langlenglegle \sigma _3{\mathcal P}i , \xi _j \ranglenglegle \left (
dz_j -d\overline{z}_j\right ) +\frac{1}{2}\langlenglegle \sigma _3{\mathcal P}i ,
P_c(\omega ) f' \quad \ranglenglegle .
\end{aligned}
\end{equation}
Applying to \eqref{eq:dPsi0} Lemmas
{\rm e}f{lem:gradient z}--{\rm e}f{lem:gradient f}, the fact that, in particular, we have
{\bf e}gin{equation} {\bf e}gin{aligned} &
P_c(\omega )f'(U)= P_c(\omega )P_c(\omega _0)f'(U)= P_c(\omega )\left [
- \partial _\omega R \, d\omega -{\rm i}
\sigma _3 R \, d\vartheta + e^{-{\rm i}
\sigma _3\vartheta }{\kern+.3em {\rm 1} \kern -.22em {\rm l}} \right ] ,
\end{aligned} \nonumber
\end{equation}
and the identities
\eqref{eq:cancel0}--\eqref{eq:cancel1} below,
we get $d\psi =$
{\bf e}gin{equation} \langlebel{eq:dPsi} {\bf e}gin{aligned} & = \frac{1}{2} \langlenglegle \sigma
_3{\mathcal P}i , \xi _j \ranglenglegle \left ( \langlenglegle e^{-{\rm i} \sigma
_3\vartheta} \sigma _3\xi _j, \quad \ranglenglegle -\langlenglegle e^{-{\rm i} \sigma
_3\vartheta} \sigma _1\sigma _3\xi _j, \quad \ranglenglegle \right )
\\& + \frac{1}{2}\langlenglegle e^{-{\rm i} \sigma _3\vartheta}P_c
(\mathcal{H}_\omega ^{*})\sigma _3 {\mathcal P}i , \quad \ranglenglegle
\\& +\frac{q}{q'} \langlenglegle \sigma _3\partial _\omega{\mathcal P}i , \partial
_\omega R \ranglenglegle d\omega -\frac{{\rm i} }{2} \langlenglegle \sigma _3 {\mathcal P}i ,
P _{N_g^\perp (\mathcal{H}^*_\omega )}\sigma _3 R \ranglenglegle d\vartheta
.
\end{aligned}
\end{equation}
To get the last line of \eqref{eq:dPsi} we have used:
{\bf e}gin{equation} \langlebel{eq:cancel0} {\bf e}gin{aligned} & \frac{1}{2} \langlenglegle \sigma _3 {\mathcal P}i , \partial
_\omega R \ranglenglegle - \frac{1}{2} \langlenglegle \sigma _3 {\mathcal P}i , \xi _j \ranglenglegle \langlenglegle \sigma _3\xi _j , \partial
_\omega R \ranglenglegle \\& - \frac{1}{2} \langlenglegle \sigma _3 {\mathcal P}i , \sigma _1\xi _j \ranglenglegle \langlenglegle \sigma _1\sigma _3\xi _j , \partial
_\omega R \ranglenglegle - \frac{1}{2} \langlenglegle \sigma _3{\mathcal P}i , P_c(\omega )\partial
_\omega R \ranglenglegle =\frac{1}{2} \langlenglegle \sigma _3 {\mathcal P}i , \partial
_\omega R \ranglenglegle \\& - \frac{1}{2}\left [ \langlenglegle \sigma _3 {\mathcal P}i , \partial
_\omega R \ranglenglegle -\frac{1}{q'} \langlenglegle \sigma _3 {\mathcal P}i , \sigma _3 {\mathcal P}i \ranglenglegle \langlenglegle \sigma _3\partial _\omega{\mathcal P}i , \partial
_\omega R \ranglenglegle \right ] =\frac{2q}{2q'}\langlenglegle \sigma _3\partial _\omega{\mathcal P}i , \partial
_\omega R \ranglenglegle ;
\end{aligned}
\end{equation}
{\bf e}gin{equation} \langlebel{eq:cancel1} {\bf e}gin{aligned} & - \frac{{\rm i} }{2} \langlenglegle \sigma _3 {\mathcal P}i , \xi _j \ranglenglegle \langlenglegle \sigma _3\xi _j , \sigma _3 R \ranglenglegle - \frac{{\rm i} }{2} \langlenglegle \sigma _3 {\mathcal P}i , \sigma _1\xi _j \ranglenglegle \langlenglegle \sigma _1\sigma _3\xi _j ,\sigma _3 R \ranglenglegle \\& - \frac{{\rm i} }{2} \langlenglegle \sigma _3{\mathcal P}i , P_c(\omega )\sigma _3 R \ranglenglegle =-\frac{{\rm i} }{2} \langlenglegle \sigma _3 {\mathcal P}i ,P _{N_g^\perp (\mathcal{H}^*_\omega )} \sigma _3 R \ranglenglegle .
\end{aligned}
\end{equation}
Let us consider the sum \eqref{eq:alpha1}. There are various cancelations.
The first and second (resp.
the first term of the third) line of \eqref{eq:dPsi} cancel with
the second and third lines of \eqref{eq:betatilde} (resp. the first
term of the rhs of
\eqref{eq:bettild}). The last three terms in
rhs\eqref{eq:beta} cancel with the last two lines of \eqref{eq:beta0}.
The $-{\rm i} qd\vartheta $ term in the rhs of \eqref{eq:beta0} cancels
with the $-{\rm i} qd\vartheta $ term in \eqref{eq:bettild}. Adding
the second term of the third line of \eqref{eq:dPsi} with the last
term in the rhs of \eqref{eq:bettild} we get the product of ${\rm i} $ times the
following quantities:
{\bf e}gin{equation} \langlebel{eq:cancel} {\bf e}gin{aligned} &
-\frac{1}{2} \langlenglegle \sigma _3
{\mathcal P}i , P_{N^\perp _g(\mathcal{H}^*_\omega)} \sigma _3R\ranglenglegle -
\frac{q}{q'} \langlenglegle R, \partial _\omega {\mathcal P}i \ranglenglegle =-
\frac{1}{2} \langlenglegle {\mathcal P}i , R\ranglenglegle \\& +\frac{1}{2} \langlenglegle \sigma
_3 {\mathcal P}i , P_{N _g(\mathcal{H} _\omega)} \sigma _3R\ranglenglegle -
\frac{q}{q'} \langlenglegle R, \partial _\omega {\mathcal P}i \ranglenglegle \\& = -
\frac{1}{2} \langlenglegle {\mathcal P}i ,
R \ranglenglegle+
\frac{1}{2q'} \langlenglegle \sigma _3R,
{\mathcal P}i \ranglenglegle \langlenglegle \sigma _3{\mathcal P}i ,
\partial _\omega {\mathcal P}i \ranglenglegle \\& +
\frac{1}{2q'} \langlenglegle \sigma _3R,
\sigma _3 \partial _\omega {\mathcal P}i \ranglenglegle \langlenglegle \sigma _3{\mathcal P}i ,
\sigma _3 {\mathcal P}i \ranglenglegle - \frac{q}{q'} \langlenglegle R, \partial _\omega
{\mathcal P}i \ranglenglegle =0,
\end{aligned}
\end{equation}
where for the second equality we have used
{\bf e}gin{equation} P_{N _g(\mathcal{H} _\omega)}=\frac{1}{q'} \sigma _3 {\mathcal P}i
\langlenglegle \sigma _3 \partial _\omega {\mathcal P}i , \quad \ranglenglegle +\frac{1}{q'}\partial _\omega {\mathcal P}i
\langlenglegle {\mathcal P}i , \quad \ranglenglegle .\nonumber
\end{equation}
The last equality in \eqref{eq:cancel} can be seen as follows. The two terms in the third line in \eqref{eq:cancel} are both equal to 0. Indeed, $\langlenglegle \sigma _3{\mathcal P}i ,
\partial _\omega {\mathcal P}i \ranglenglegle =0$ and, by $
R\in N^{\perp}_g (\mathcal{H}_\omega ^*)$
and ${\mathcal P}i \in N _g (\mathcal{H}_\omega ^*) $,
$ \langlenglegle R, {\mathcal P}i \ranglenglegle =0$. The two terms in
the fourth line in \eqref{eq:cancel} cancel each other. Then we get formulas for $\alpha ^{\omega} $ and $\alpha ^{f}$. We get $\alpha ^{\vartheta} $ also by
{\bf e}gin{equation} \| P_c(\omega )f\| _2^2=\| f\| _2^2 + \langlenglegle (P_c(\omega )
-P_c(\omega _0 )) f, \sigma _1(P_c(\omega )
+P_c(\omega _0 ))f \ranglenglegle .\nonumber
\end{equation} \qed
We have, summing over repeated indexes (also on $j$
and $\overline{j}$):
{\bf e}gin{lemma}
\langlebel{lem:linearAlgebra} We have
{\bf e}gin{equation} \langlebel{eq:linAlg0}{\bf e}gin{aligned} &
i _{Y }\Omega _0={\rm i} q' Y_\vartheta d\omega -{\rm i} q' Y_\omega
d\vartheta + ( Y_j d\overline{z}_{j}-Y_{\overline{j}}dz_j)+
\langlenglegle \sigma _1 \sigma _3 Y_f,f' \quad \ranglenglegle .
\end{aligned}\end{equation}
For $a_1$ given by \eqref{eq:a1}, and for $ \Gamma =
i_Y\widetilde{\Omega}$, we have
{\bf e}gin{equation} \langlebel{eq:linAlg1} {\bf e}gin{aligned} \Gamma
_\omega =& a_1 Y_\vartheta + \langlenglegle \sigma _1\sigma _3\xi _j,
\partial _\omega R \ranglenglegle Y_j- \langlenglegle \sigma _3\xi _j,
\partial _\omega R \ranglenglegle Y_{\overline{j}} \\& + \langlenglegle
Y_f,\sigma _3\sigma _1P_c(\omega ) \partial _\omega R \ranglenglegle ;\\
- \Gamma _\vartheta =& a_1 Y_\omega -{\rm i} \, \langlenglegle \sigma
_1\sigma _3\xi _j, \sigma _3 R \ranglenglegle Y_j+{\rm i} \, \langlenglegle \sigma
_3\xi _j, \sigma _3 R \ranglenglegle Y_{\overline{j}}
\\& - {\rm i} \, \langlenglegle Y_f,\sigma _3\sigma _1P_c(\omega ) \sigma _3 R \ranglenglegle ;
\\ - \Gamma
_j =& \langlenglegle \sigma _1\sigma _3\xi _j,
\partial _\omega R \ranglenglegle Y_\omega +{\rm i} \, \langlenglegle \sigma
_1\sigma _3\xi _j, \sigma _3 R \ranglenglegle Y_{\vartheta}
; \\ \Gamma_{\overline{j}} =&
\langlenglegle \sigma _3\xi _j, \partial _\omega R \ranglenglegle Y_\omega +{\rm i}
\, \langlenglegle \sigma _3\xi _j, \sigma _3 R \ranglenglegle Y_{\vartheta}
;
\\ \sigma _3 \sigma _1\Gamma _{f}= &
( P_c(\omega _0) P_c(\omega )-1 ) Y_{f} \\& + Y_\omega
P_c(\omega _0 )P_c(\omega ) \partial _\omega R
+{\rm i} \, Y_{\vartheta} P_c(\omega _0 )
P_c(\omega ) \sigma _3 R
.
\end{aligned}\end{equation}
In particular, for $ \gamma = i_{Y^{t}} {\Omega}_t=i_{Y^{t}}
{\Omega}_0+t\, i_{Y^{t}} \widetilde{{\Omega} } $ we have
{\bf e}gin{equation} \langlebel{eq:linAlg2} {\bf e}gin{aligned} \gamma
_\omega =& ({\rm i} q'+t a_1) ({Y}^t)_\vartheta + t \langlenglegle \sigma
_1\sigma _3\xi _j,
\partial _\omega R \ranglenglegle ({Y}^t)_j- t \langlenglegle \sigma _3\xi _j,
\partial _\omega R \ranglenglegle ({Y}^t)_{\overline{j}} \\& + t \langlenglegle
({Y}^t)_f,\sigma _3\sigma _1P_c(\omega _0) P_c(\omega )\partial _\omega R
\ranglenglegle ;\\ - \gamma _\vartheta =& ({\rm i} q'+t a_1)
({Y}^t)_\omega -{\rm i} \, t \, \langlenglegle \sigma _1\sigma _3\xi _j,
\sigma _3 R \ranglenglegle ({Y}^t)_j+{\rm i} \, t \, \langlenglegle \sigma _3\xi _j,
\sigma _3 R \ranglenglegle ({Y}^t)_{\overline{j}}
\\& -{\rm i} \,
t \, \langlenglegle ({Y}^t)_f,\sigma _3\sigma _1P_c(\omega _0) P_c(\omega
)\sigma _3 R \ranglenglegle ;
\\ - \gamma
_j =& ({Y}^t) _{\overline{j}} +t\langlenglegle \sigma _1\sigma _3\xi _j,
\partial _\omega R \ranglenglegle ({Y}^t)_\omega +{\rm i} \,
t \, \langlenglegle \sigma _1\sigma _3\xi _j, \sigma _3 R \ranglenglegle
({Y}^t)_{\vartheta}
; \\ \gamma_{\overline{j}} =& ({Y}^t)_{ {j}}+t
\langlenglegle \sigma _3\xi _j, \partial _\omega R \ranglenglegle ({Y}^t)_\omega
+{\rm i} \, t \, \, \langlenglegle \sigma _3\xi _j, \sigma _3 R \ranglenglegle
({Y}^t)_{\vartheta}
;
\\ \sigma _3 \sigma _1\gamma _{f}= & ({Y}^t)_{f}+
t
( P_c(\omega _0 )P_c(\omega ) -1 ) ({Y}^t)_{f} +\\& +t \,
({Y}^t)_\omega \,
P_c(\omega _0 )P_c(\omega ) \partial _\omega R
+t \, {\rm i} \, ({Y}^t)_{\vartheta}\, P_c(\omega _0 )
P_c(\omega ) \sigma _3 R
\,
.
\end{aligned}\end{equation}
\end{lemma}
\proof \eqref{eq:linAlg0} is trivial. \eqref{eq:linAlg2} follows
immediately from \eqref{eq:linAlg0}--\eqref{eq:linAlg1}. In the
following formulas we denote $P_c=P_c(\omega )$, $P_c^0=P_c(\omega
_0)$ and we sum on repeated indexes. We can split
$\widetilde{\Omega} =\widehat{\Omega}+ \widehat{\Omega}_1$ with, see
\eqref{eq:OmegaComponent2},
{\bf e}gin{equation} {\bf e}gin{aligned} & \widehat{\Omega}_1
=\langlenglegle (P_c^0P_c -1) f' \, , \sigma _3 \sigma _1f' \, \ranglenglegle
,\\& \widehat{\Omega} = a_1 d\vartheta \wedge d\omega +dz_j\wedge (
\langlenglegle \sigma _1 \sigma _3 \xi _j, \partial _\omega R \ranglenglegle
d\omega + {\rm i} \langlenglegle \sigma _1 \sigma _3 \xi _j, \sigma _3 R
\ranglenglegle d\vartheta ) \\& -d\overline{z}_j\wedge ( \langlenglegle \sigma
_3 \xi _j,
\partial _\omega R \ranglenglegle d\omega + {\rm i} \langlenglegle \sigma _3 \xi
_j, \sigma _3 R \ranglenglegle d\vartheta ) +\\& \langlenglegle P_cP_c^0f' \, ,
\sigma _3 \sigma _1P_c \partial _\omega R \ranglenglegle \wedge d\omega
+{\rm i} \langlenglegle P_cP_c^0f' \, , \sigma _3 \sigma _1P_c \sigma _3 R
\ranglenglegle \wedge d\vartheta .
\end{aligned} \nonumber
\end{equation}
Then
{\bf e}gin{equation} i_Y\widehat{\Omega}_1 = \langlenglegle \sigma _1 \sigma _3
(P_c^0P_c -1)Y_f , f' \, \ranglenglegle \nonumber
\end{equation}
and
{\bf e}gin{equation}{\bf e}gin{aligned} & i_Y\widehat{\Omega} = \big [
a_1Y_\vartheta +Y_j \langlenglegle \sigma _1
\sigma _3 \xi _j, \partial _\omega R \ranglenglegle
- Y_{\overline{j}} \langlenglegle
\sigma _3 \xi _j, \partial _\omega R \ranglenglegle
+\langlenglegle Y_f
, \sigma _3 \sigma _1 P_c \partial _\omega R \ranglenglegle \big ]
d\omega +\\& \big [
-a_1Y_\omega +{\rm i} Y_j \langlenglegle \sigma _1
\sigma _3 \xi _j, \sigma _3 R \ranglenglegle
- {\rm i} Y_{\overline{j}} \langlenglegle
\sigma _3 \xi _j, \sigma _3 R \ranglenglegle +{\rm i} \langlenglegle Y_f
, \sigma _3 \sigma _1 P_c \sigma _3 R \ranglenglegle \big ]
d\vartheta \\& -(
\langlenglegle \sigma _1 \sigma _3 \xi _j, \partial _\omega R \ranglenglegle
Y_\omega + {\rm i} \langlenglegle \sigma _1 \sigma _3 \xi _j, \sigma _3 R
\ranglenglegle Y_ \vartheta ) dz_j\\& +( \langlenglegle \sigma _3 \xi _j,
\partial _\omega R \ranglenglegle Y_\omega + {\rm i} \langlenglegle \sigma
_3 \xi _j, \sigma _3 R \ranglenglegle Y_ \vartheta ) d\overline{z}_j
\\& -\langlenglegle f'\, , Y_\omega \sigma _3 \sigma _1 P_c^0P_c
\partial _\omega R + {\rm i} Y_\vartheta \sigma _3 \sigma _1 P_c^0P_c
\sigma _3 R\ranglenglegle .
\end{aligned} \nonumber
\end{equation}
\qed
{\bf e}gin{remark} \langlebel{rem:correction} If we choose $\gamma =-{\rm i} \alpha$
in Lemma {\rm e}f{lem:linearAlgebra} with the $\alpha$ of
\eqref{eq:alpha1}, and if $\mathcal{F}_t$ is the flow of $Y^t$,
then $ ({Y}^t)_\vartheta \neq 0$ is an obstruction to the
fact that, for $0<t\le 1$, $K\circ \mathcal{F}_t$ is the
hamiltonian of the sort of semilinear NLS that \eqref{eq:SystPoiss}
is. Indeed $ ({Y}^t)_f = - t{\rm i} ({Y}^t)_{\vartheta}\, P_c(\omega _0 )
P_c(\omega ) \sigma _3 R + \mathcal{S}(\R ^3, \mathbb{C} ^2).$ Then if we substitute
$f$ with $f- {\rm i} ({Y}^1)_{\vartheta}\, P_c(\omega _0 )
P_c(\omega ) \sigma _3 R +\dots $ in $\langlenglegle \mathcal{H}_\omega f, \sigma _3 \sigma _1f\ranglenglegle $ we obtain a term of the form $({Y}^1)_{\vartheta} ^2 \langlenglegle \mathcal{H}_\omega f, \sigma _3 \sigma _1f\ranglenglegle . $
To avoid terms like this, we want flows defined from fields with $ ({Y}^t)_\vartheta =0
$. To this effect we add a correction to $\alpha$.
\end{remark}
We first consider the hamiltonian fields of $\vartheta$ and
$\omega$.
{\bf e}gin{lemma}
\langlebel{lem:HamThetaOmega} Consider the vectorfield
$X^t_\vartheta $ (resp. $X^t_\omega $) defined by
$i_{X^t_\vartheta }\Omega _t=-{\rm i} d\vartheta$
(resp. $i_{X^t_\omega }\Omega _t=-{\rm i} d\omega$). Then we have (here $P_c=P_c(\mathcal{H}_\omega )$
and $P_c^0=P_c(\mathcal{H}_{\omega _0 })$):
{\bf e}gin{equation}\langlebel{hamiltonians1}{\bf e}gin{aligned}& X^t_\vartheta
= (X^t_\vartheta ) _\omega \big [ \frac{\partial}{\partial\omega} -
t
\langlenglegle
\sigma _3 \xi _j , \partial _\omega R\ranglenglegle
\frac{\partial}{\partial z_j} - t
\langlenglegle \sigma _1
\sigma _3 \xi _j , \partial _\omega R\ranglenglegle
\frac{\partial}{\partial \overline{z}_j} \\& -tP_c^0(1 +t
P_c-tP_c^0
)^{-1} P_c^0 P_c\partial _\omega R \big ] ,
\\& X^t_\omega
= (X^t_\omega ) _\vartheta \big [ \frac{\partial}{\partial\vartheta}
- {\rm i} t
\langlenglegle
\xi _j , R\ranglenglegle
\frac{\partial}{\partial z_j} +{\rm i} t
\langlenglegle
\sigma _1 \xi _j , R\ranglenglegle \frac{\partial}{\partial
\overline{z}_j} \\& -{\rm i} tP_c^0(1 +t P_c-tP_c^0
)^{-1} P_c^0
P_c\sigma _3 R \big ] ,
\end{aligned}
\end{equation}
where, for the $a_1$ of \eqref{eq:a1}, we have
{\bf e}gin{equation}\langlebel{hamiltonians2}{\bf e}gin{aligned}&
(X^t_\vartheta ) _\omega =\frac{ {\rm i}}{ {\rm i} q'+ta_1+ ta_2}=-
(X^t_\omega ) _\vartheta
\end{aligned}
\end{equation}
{\bf e}gin{equation}\langlebel{a2}{\bf e}gin{aligned} a_2:=& {\rm i} t
\langlenglegle
\sigma _3 \xi _j , \partial _\omega R\ranglenglegle \langlenglegle \sigma _1\xi
_j ,
R\ranglenglegle -{\rm i} t \langlenglegle \sigma _1 \sigma _3 \xi _j ,
\partial _\omega R\ranglenglegle \langlenglegle \xi _j , R\ranglenglegle +\\&
+{\rm i} t \langlenglegle P_c^0(1 +t P_c-tP_c^0
)^{-1} P_c^0 P_c\partial _\omega R,
\sigma _3 \sigma _1 P_c \sigma _3 R\ranglenglegle .
\end{aligned}
\end{equation}
\end{lemma}
\proof By \eqref{eq:linAlg2} for $\gamma =-{\rm i} \,d\vartheta$,
$X^t_\vartheta $ satisfies
{\bf e}gin{equation}\langlebel{HamTheta1}{\bf e}gin{aligned}
& (X^t_\vartheta ) _\vartheta =0; \\ & {\rm i} = ({\rm i} q' + ta_1)
(X^t_\vartheta ) _\omega -{\rm i} t \langlenglegle \sigma _1 \sigma _3 \xi _j ,
\sigma _3 R\ranglenglegle (X^t_\vartheta ) _j+\\& + {\rm i} t \langlenglegle \sigma _3
\xi _j , \sigma _3 R\ranglenglegle (X^t_\vartheta ) _{\overline{j}}-{\rm i} t
\langlenglegle (X^t_\vartheta ) _{f}, \sigma _3 \sigma _1 P_c \sigma _3 R\ranglenglegle ; \\&
(X^t_\vartheta ) _{f} =t (1-P_c^0 P_c )(X^t_\vartheta ) _{f} - t
(X^t_\vartheta ) _\omega P_c^0P_c
\partial _\omega R ;\\&
(X^t_\vartheta ) _{\overline{j}} = -t (X^t_\vartheta ) _\omega
\langlenglegle \sigma _1
\sigma _3 \xi _j , \partial _\omega R\ranglenglegle ; \,
(X^t_\vartheta ) _{ {j}}=- t (X^t_\vartheta ) _\omega
\langlenglegle
\sigma _3 \xi _j , \partial _\omega R\ranglenglegle .
\end{aligned}
\end{equation}
This yields \eqref{hamiltonians1} for $X^t_\vartheta $ and the first
equality in \eqref{hamiltonians2}. By \eqref{eq:linAlg2} for $\gamma
=-{\rm i} \,d\omega$, $X^t_\omega $ satisfies
{\bf e}gin{equation}\langlebel{HamOmega1}{\bf e}gin{aligned}
& (X^t_\omega ) _\omega =0; \\ & -{\rm i} \, -{\rm i} \, q' (X^t_\omega )
_\vartheta = ta_1 (X^t_\omega ) _\vartheta + t \langlenglegle \sigma _1
\sigma _3 \xi _j , \partial _\omega R\ranglenglegle (X^t_\omega ) _j-\\& -
t \langlenglegle \sigma _1 \sigma _3 \xi _j , \partial _\omega R\ranglenglegle
(X^t_\omega ) _{\overline{j}}+ t \langlenglegle (X^t_\omega ) _{f}, \sigma
_3 \sigma _1 P_c \partial _\omega R\ranglenglegle ;
\\& (X^t_\omega ) _{f} = t (1-P_c^0P_c)
(X^t_\omega ) _{f}- {\rm i} \, t (X^t_\omega ) _\vartheta P_c^0P_c \sigma _3 R ;\\&
(X^t_\omega ) _{\overline{j}} = -{\rm i} \,
t (X^t_\omega ) _\vartheta
\langlenglegle \sigma _1
\sigma _3 \xi _j , \sigma _3 R\ranglenglegle ;\quad
(X^t_\omega ) _{ {j}}=- {\rm i} \, t (X^t_\omega ) _\vartheta
\langlenglegle
\sigma _3 \xi _j , \sigma _3 R\ranglenglegle .
\end{aligned}
\end{equation}
This yields the rest of
\eqref{hamiltonians1}--\eqref{hamiltonians2}. \qed
The following lemma is an immediate consequence of the formulas in Lemma {\rm e}f{lem:HamThetaOmega} and of \eqref{eq:bounda1}.
{\bf e}gin{lemma}
\langlebel{lem:HamBounds}
For any $ (K', S' ,K , S)$ we have
{\bf e}gin{equation}\langlebel{HamTheta2}{\bf e}gin{aligned} &
|1- (X^t_\vartheta ) _\omega \, q'|\lesssim \| R \| _{H^{-K',-S'}}^2
\\& |(X^t_\vartheta ) _j| +|(X^t_\vartheta )
_{\overline{j}}| + \| (X^t_\vartheta ) _{f} \| _{H^{ K , S
}}\lesssim \| R \| _{H^{-K',-S'}} .
\end{aligned}
\end{equation}
and
{\bf e}gin{equation}\langlebel{HamOmega2}{\bf e}gin{aligned} &
|1+ (X^t_\omega ) _\vartheta \, q'|\lesssim \| R \| _{H^{-K',-S'}}^2\,
,
\\& |(X^t_\omega ) _j| +|(X^t_\omega )
_{\overline{j}}| + \| (X^t_\omega ) _{f} \| _{H^{ -K',-S'
}}\lesssim \| R \| _{H^{-K',-S'}} . \end{aligned}
\end{equation}
\end{lemma}
Set $H_c^{K,S}(\omega )=P_c(\omega )H ^{K,S}$ and denote
{\bf e}gin{equation}\langlebel{eq:PhaseSpace} \widetilde{{\mathcal P}}^{K,S}=\mathbb{C }^m\times H_c^{K,S}(\omega
_0)\, , \quad {\mathcal P}^{K,S}=\mathbb{R}^2 \times \widetilde{{\mathcal P}}^{K,S}
\end{equation}
with elements $(\vartheta , \omega , z, f)\in {\mathcal P}^{K,S}$ and $( z,
f)\in \widetilde{{\mathcal P}}^{K,S}$.
{\bf e}gin{lemma}
\langlebel{lem:flow Htheta} We consider $\forall$ $t\in [0,1]$
the hamiltonian field $X^t_\vartheta $ and
the flow
{\bf e}gin{equation}\langlebel{FlowTheta1}
\frac{d}{ds}{\mathcal P}i _s(t,U)=X^t_\vartheta ({\mathcal P}i _s(t,U))\, , \, {\mathcal P}i
_0(t,U)=U.\end{equation} {\bf e}gin{itemize}
\item[(1)] For any $(K', S')$ there is a $s_0>0$
and a neighborhood $\mathcal{U}$ of $\mathbb{R}\times \{ (\omega
_0,0,0)\}$ in ${\mathcal P}^{-K',-S'}$ such that the map $ (s,t,U)\to {\mathcal P}i
_s(t,U)$ is smooth
{\bf e}gin{equation}\langlebel{FlowTheta2} (-s_0,s_0) \times
[0,1]\times \left (\mathcal{U}\cap \{ \omega =\omega _0\}
\right )\to {\mathcal P}^ {-K',-S'} .
\end{equation}
\item[(2)] $\mathcal{U}$ can be chosen so that for any $t\in [0,1]$
there is another neighborhood $\mathcal{V}_t$ of $\mathbb{R}\times
\{ (\omega _0,0,0)\}$ in ${\mathcal P}^{-K',-S'}$ s.t. the above map
establishes a diffeomorphism
{\bf e}gin{equation}\langlebel{FlowTheta5} (-s_0,s_0) \times
\left ( \mathcal{U}\cap \{ \omega =\omega _0\}
\right )\to \mathcal{V}_t .
\end{equation}
\item[(3)]
$ f({\mathcal P}i _s(t,U))
-f (U )=G (t, s , z, f ) $ is a smooth map for all $(K,S)$
{\bf e}gin{equation}(-s_0,s_0) \times
[0,1]\times \left ( \mathcal{U}\cap \{ \omega =\omega _0\}
\right )\to H^{K,S} \nonumber
\end{equation}
with $ \| G (t, s , z, f ) \| _{H^{K,S}} \le C |s| (|z| +\| f\|
_{H^{-K',-S'}} ).$
\end{itemize}
\end{lemma}\proof
Claims (1)--(2) follow by Lemma {\rm e}f{lem:HamThetaOmega} which
implies $X^t_\vartheta \in C^\infty (\mathcal{U}, {\mathcal P} ^{K,S})$ for
all $(K,S)$. Let $\zeta $ be any coordinate $z_j$ or $f$.
Then, for $\zeta $ a scalar coordinate, we have
{\bf e}gin{equation}\langlebel{FlowTheta31} {\bf e}gin{aligned} &
|\zeta ({\mathcal P}i _s(t,U))- \zeta (U)|\le
\int _{-s}^s |(X^t_\vartheta )_{\zeta} ({\mathcal P}i _{s'}(t,U))| ds'
\\& \le C |s| \sup _{|s'|\le s} (|z({\mathcal P}i _{s'}(t,U))|
+\| f({\mathcal P}i _{s'}(t,U))\|
_{H^{-K',-S'}} ) . \end{aligned}
\end{equation}
For $\zeta =f$ we have
{\bf e}gin{equation}\langlebel{FlowTheta32} {\bf e}gin{aligned} &
\|f ({\mathcal P}i _s(t,U))- f (U)\|_{H^{K,S}}\le
\int _{-s}^s \|(X^t_\vartheta )_{f} ({\mathcal P}i _{s'}(t,U))\| _{H^{K,S}} ds'
\le \text{rhs\eqref{FlowTheta31}.} \end{aligned}\nonumber
\end{equation}
The above two formulas imply the following, which
yields claim (3),
{\bf e}gin{equation}\langlebel{FlowTheta33} {\bf e}gin{aligned} &
\|f ({\mathcal P}i _s(t,U))- f (U)\|_{H^{K,S}}
\le C |s| (|z| +\| f\|
_{H^{-K',-S'}} ) , \\& |z ({\mathcal P}i _s(t,U))- z (U)|
\le C |s| (|z| +\| f\|
_{H^{-K',-S'}} ) .\end{aligned}
\end{equation}
\qed
{\bf e}gin{lemma}
\langlebel{lem:correction alpha} We consider a scalar function
$F(t,U)$ defined as follows:
{\bf e}gin{equation}\langlebel{defCorrect} F (t,{\mathcal P}i _s(t,U))= {\rm i} \,
\int _0^s\alpha _{{\mathcal P}i _{s'}(t,U)}\left ( X^t_\vartheta ({\mathcal P}i
_{s'}(t,U))\right ) ds'\, , \text{ where $\omega (U)=\omega _0$ .}
\end{equation}
We have $F
\in C^{ \infty} ( [0,1]\times \mathcal{U}, \mathbb{R})$ for
a neighborhood $\mathcal{U}$ of $\mathbb{R}\times \{ (\omega
_0,0,0)\}$ in ${\mathcal P}^{-K',-S'}$. We have
{\bf e}gin{equation} \langlebel{estCorrect}
|F (t,U)| \le C (K',S') |\omega -\omega _0|\, \left ( |z|+ \| f \|
_{H^{-K',-S'}}\right )^2.
\end{equation}
We have (exterior differentiation only in $U$)
{\bf e}gin{equation}\langlebel{Vectorfield21}
(\alpha +{\rm i} \, d F )(X^t_\vartheta ) =0.
\end{equation}
\end{lemma}
\proof $F$ is smooth by \eqref{eq:alpha2} and Lemma
{\rm e}f{lem:flow Htheta}. \eqref{Vectorfield21} follows by \eqref{FlowTheta1}
and by {\bf e}gin{equation}\alpha _{ U}\left (X^t_\vartheta (U)\right ) +
{\rm i} \frac{d}{ds} _{|s=0} F (t,{\mathcal P}i _s(t,U)) =0 . \end{equation} By
\eqref{eq:alpha2} and \eqref{HamTheta2} we have
{\bf e}gin{equation}\langlebel{BoundCorrect1} {\bf e}gin{aligned} &
|\alpha (X^t_\vartheta )|\le | \alpha ^\omega | \, | (X^t_\vartheta
) _\omega | + | \langlenglegle \alpha ^f, (X^t_\vartheta ) _f \ranglenglegle |
\lesssim \left ( |z|+\| f \| _{H^{-K',-S'}} \right ) ^2.
\end{aligned}
\end{equation}
Then \eqref{estCorrect} follows by $|s|\approx |\omega ({\mathcal P}i
_s(t,U))-\omega _0|.$ \qed
{\bf e}gin{lemma}
\langlebel{lem:vectorfield} Denote by $\mathcal{X}^t$ the vector field which solves
{\bf e}gin{equation}\langlebel{Vectorfield1}
i_{\mathcal{X}^t} \Omega _t=-\alpha - {\rm i} \, d F (t) .
\end{equation}
Then the following properties hold.
{\bf e}gin{itemize}
\item[(1)] There is a neighborhood $\mathcal{U}$ of
$ \mathbb{{R}}\times \{ (\omega _0, 0,0) \} $ in ${\mathcal P} ^{1,0}$ such
that $ \mathcal{X}^t ( U)
\in C^{ \infty} ( [0,1]\times \mathcal{U}, {\mathcal P} ^{1,0})$.
\item[(2)] We have
$
(\mathcal{X}^t)_\vartheta \equiv 0.
$
\item[(3)] For constants $C(K,S,K',S')$ we have
{\bf e}gin{equation}\langlebel{Vectorfield3}{\bf e}gin{aligned} &
\left | (\mathcal{X}^t)_\omega + \frac{\|f\| _2^2 }{2q'(\omega )}
\right | \lesssim (|z|+\| f \| _{H^{-K',-S'}})^2; \\&
|(\mathcal{X}^t )_
{j} | +|(\mathcal{X}^t )_
{\overline{j }} | +\|
(\mathcal{X}^t )_{f} \| _{H^{ K , S }}
\lesssim (|z|+\| f \| _{H^{-K',-S'}}) \times \\&
\times
(|\omega -\omega _0| +|z|+\| f \| _{H^{-K',-S'}} +\|f\| _{L^2}^2).
\end{aligned}\end{equation}
\item[(4)] We have
{\bf e}gin{equation} \langlebel{eq:L_X^t} L _{\mathcal{X}^t}
\frac{\partial}{\partial \vartheta} :=\left [ \mathcal{X}^t,
\frac{\partial}{\partial \vartheta} \right ] =0.
\end{equation}
\end{itemize}
\end{lemma}
\proof Claim (1) follows from the regularity properties of $\alpha$,
$F$ and $\Omega _t$ and from equations \eqref{Vectorfield5} and
\eqref{Vectorfield7} below. \eqref{Vectorfield21} implies (2) by
{\bf e}gin{equation} {\rm i} (\mathcal{X}^t)_\vartheta =
{\rm i} d\vartheta (\mathcal{X}^t)=- i _{X^t_\vartheta}\Omega _t
(\mathcal{X}^t )= i _{\mathcal{X}^t}\Omega _t (X^t_\vartheta
)=-(\alpha +{\rm i} \, dF )(X^t_\vartheta ) =0. \nonumber
\end{equation}
We have ${\rm i} (\mathcal{X}^t)_\omega ={\rm i} d\omega
(\mathcal{X}^t)=- i _{X^t_\omega}\Omega _t
(\mathcal{X}^t )$, so by \eqref{Vectorfield1} and \eqref{hamiltonians1} we get
{\bf e}gin{equation}\langlebel{Vectorfield5} {\bf e}gin{aligned}
& {\rm i} (\mathcal{X}^t)_\omega = i _{\mathcal{X}^t}\Omega
_t (X^t_\omega )= -(X^t_\omega )_{\vartheta} \big [\alpha ^\vartheta
+t
\partial _j F \, \langlenglegle
\xi _j , R\ranglenglegle -t
\partial _{\overline{j}}F \langlenglegle
\sigma _1 \xi _j , R\ranglenglegle \\ & +t \langlenglegle \nabla _{f}F + {\rm i}
\alpha ^{f} , P_c^0(1 +t P_c-tP_c^0
)^{-1} P_c^0
P_c\sigma _3 R \ranglenglegle \big ].\end{aligned}
\end{equation}
Then by \eqref{eq:alpha2}, \eqref{hamiltonians2}, \eqref{eq:a1} and
\eqref{a2}, we get the first inequality in \eqref{Vectorfield3}:
{\bf e}gin{equation}\langlebel{Vectorfield51} {\bf e}gin{aligned}
&\left | (\mathcal{X}^t)_\omega +\frac{\| f\| _2^2 }{2q'(\omega )}
\right |
\le C \left ( |z| +\| f\| _{H^{-K',-S'}} \right ) ^2.\end{aligned}
\end{equation}
By \eqref{eq:linAlg2} we have the following equations
{\bf e}gin{equation}\langlebel{Vectorfield7} {\bf e}gin{aligned}
{\rm i} \, \partial _j F & =(\mathcal{X}^t )_
{\overline{j }} +
t\langlenglegle \sigma _1 \sigma _3 \xi _j, \partial _\omega R \ranglenglegle
(\mathcal{X}^t)_\omega \, \\ - {\rm i} \,
\partial _{\overline{j }}F & =(\mathcal{X}^t )_{j} +
t\langlenglegle \sigma _3 \xi _j, \partial _\omega R \ranglenglegle
(\mathcal{X}^t)_\omega \, \\ \sigma _3\sigma _1(\alpha ^{f}+{\rm i} \,
\nabla _{f} F ) & =-(\mathcal{X}^t )_{f} -
t (P_c ^0 P_c-1)(\mathcal{X}^t )_{f}
\\& - t (\mathcal{X}^t)_\omega P_c^0P_c \partial _\omega
R .\end{aligned}
\end{equation}
Formulas \eqref{Vectorfield7} imply
{\bf e}gin{equation}\langlebel{Vectorfield8} {\bf e}gin{aligned}
& |(\mathcal{X}^t_\omega )_
{\overline{j }} |\le |\partial _{j} F | +C
\left ( |z| +\| f\| _{H^{-K',-S'}} \right )
|(\mathcal{X}^t)_\omega |\\ & |(\mathcal{X}^t_\omega )_
{j} |\le |\partial _{\overline{j}}
F | +C\left ( |z| +\| f\| _{H^{-K',-S'}} \right )
|(\mathcal{X}^t)_\omega | \\& \|
(\mathcal{X}^t_\omega )_{f} \| _{H^{ K , S }} \le
\|
\alpha ^{f} \| _{H^{ K , S }}+ \|
\nabla
_{f} F \| _{H^{ K , S }} +C\left ( |z| +\| f\| _{H^{-K',-S'}}
\right ) |(\mathcal{X}^t)_\omega | \end{aligned}\nonumber
\end{equation}
which with \eqref{Vectorfield51}, \eqref{eq:alpha2} and Lemma
\eqref{estCorrect} imply \eqref{Vectorfield3}. \eqref{eq:L_X^t} is a consequence of the following equalities, which we will justify below:
{\bf e}gin{equation}\langlebel{lieder}0= L_{\frac{\partial}{\partial \vartheta}} \left (
i_{\mathcal{X}^t} \Omega _t\right ) = i_{[\frac{\partial}{\partial
\vartheta},\mathcal{X}^t]} \Omega _t+i_{\mathcal{X}^t}
L_{\frac{\partial}{\partial \vartheta}}\Omega _t
=i_{[\frac{\partial}{\partial \vartheta},\mathcal{X}^t]} \Omega _t .
\end{equation}
The first equality is a consequence of \eqref{Vectorfield1} and
$L_{\frac{\partial}{\partial \vartheta}} \left (\alpha + {\rm i}
d F \right ) =0 $. The latter is a consequence of $ L_{\frac{\partial}{\partial \vartheta}} \alpha =0$ and $\frac{\partial}{\partial \vartheta}F=0$. Notice that
$\frac{\partial}{\partial \vartheta}F=0$ can be proved observing that
\eqref{Vectorfield21}, \eqref{eq:alpha2} and Lemma {\rm e}f{lem:HamThetaOmega} imply $ X^t _\vartheta \frac{\partial}{\partial \vartheta}F=0$ and that
on $\omega =\omega _0$ we have $\frac{\partial}{\partial \vartheta}F=0$.
$ L_{\frac{\partial}{\partial \vartheta}} \alpha =0$ is a consequence of
the Cartan "magic" formula $L_X \gamma = (i_Xd+ di_X)\gamma $,
of the definition \eqref{eq:alpha1}
and of following equalities:
{\bf e}gin{equation}\langlebel{eq:magic} {\bf e}gin{aligned} &
L_{\frac{\partial}{\partial \vartheta}} {\bf e}ta = d i_{\frac{\partial}{\partial \vartheta}} {\bf e}ta + i_{\frac{\partial}{\partial \vartheta}} d{\bf e}ta = -\frac{{\rm i} }{2}d\langlenglegle \sigma _1U,U\ranglenglegle + {\rm i} \langlenglegle \sigma _1U,\quad \ranglenglegle =0;\\&
L_{\frac{\partial}{\partial \vartheta}} {\bf e}ta _0= d i_{\frac{\partial}{\partial \vartheta}} {\bf e}ta _0+ i_{\frac{\partial}{\partial \vartheta}} d{\bf e}ta _0=-{\rm i} dq -{\rm i} i_{\frac{\partial}{\partial \vartheta}} (dq\wedge d\vartheta )=-{\rm i} dq +{\rm i} dq\wedge i_{\frac{\partial}{\partial \vartheta}} d\vartheta =0; \\&
L_{\frac{\partial}{\partial \vartheta}}d\psi = d i_{\frac{\partial}{\partial \vartheta}}d\psi = \frac{1}{2}d \frac{\partial}{\partial \vartheta}\langlenglegle \sigma _3{\mathcal P}i
, R\ranglenglegle =0.
\end{aligned}
\end{equation}
The second equality in \eqref{lieder} follows by the product rule for the Lie
derivative. Finally, the third equality in \eqref{lieder} follows by
$L_{\frac{\partial}{\partial \vartheta}}\Omega _t= (1-t)L_{\frac{\partial}{\partial \vartheta}}\Omega _0 + tL_{\frac{\partial}{\partial \vartheta}}\Omega =0$, consequence of
$ L_{\frac{\partial}{\partial \vartheta}}\Omega =0$ (resp. $ L_{\frac{\partial}{\partial \vartheta}}\Omega _0 =0$), in turn consequence of the first (resp. second) line in \eqref{eq:magic} and of the identity $L_X d\gamma = dL_X \gamma $.
\qed
We have:
{\bf e}gin{lemma}
\langlebel{lem:flow1} Consider the vectorfield $\mathcal{X}^t$
in Lemma {\rm e}f{lem:correction alpha} and denote by $\mathcal{F}_t(U)$
the corresponding flow. Then the flow $\mathcal{F}_t(U)$ for $U$
near $ e^{{\rm i} \sigma _3 \vartheta}{\mathcal P}i _{\omega _0}$ is defined for
all $t\in [0,1]$. We have $\vartheta \circ \mathcal{F}_1=\vartheta
$. We have for $\ell =j,\overline{j}$,
{\bf e}gin{equation}\langlebel{flow5}{\bf e}gin{aligned} &
q\left ( \omega (\mathcal{F}_1 (U))\right ) = q\left (\omega (U)
\right ) -\frac{\| f \|_2^2 }{2} + \mathcal{E}_{\omega} (U)
\\& z_\ell (\mathcal{F}_1 (U))=z_\ell (U)+ \mathcal{E}_{\ell }
(U)\\& f (\mathcal{F}_1 (U))= f (U) + \mathcal{E}_{f} (U)
\end{aligned}
\end{equation}
with
{\bf e}gin{eqnarray}
\langlebel{flow6} & |\mathcal{E}_{
\omega }(U)| \lesssim (|\omega -\omega _0| +|z|+\| f \|
_{H^{-K',-S'}} )^2,
\\& \langlebel{flow7}
|\mathcal{E}_{ \ell }(U)| +\| \mathcal{E}_f(U)\| _{H^{ K , S
}}\lesssim (|\omega -\omega _0| +|z|+\| f \| _{H^{-K',-S'}}+\| f \|
^2_{L^{2}} ) \\& \times (|\omega -\omega _0| +|z|+\| f \|
_{H^{-K',-S'}} ) .\nonumber
\end{eqnarray}
For each $\zeta =\omega , z_\ell , f$ we have
{\bf e}gin{equation}\langlebel{flow71}{\bf e}gin{aligned} & \mathcal{E}_\zeta(U)
=\mathcal{E}_\zeta ( \| f\| _{L^2}^2, \omega , z, f)
\end{aligned}
\end{equation}
with, for a neighborhood $\mathcal{U}^{-K',-S'}$ of
$\mathbb{R}\times \{ (\omega _0,0,0)\}$ in ${\mathcal P}^{-K',-S'}$ and for
some fixed $a_0>0$
{\bf e}gin{equation}\langlebel{flow8}{\bf e}gin{aligned} & \mathcal{E}_\zeta
( \varrho , \omega , z, f)\in C^\infty ( (-a_0,a_0)\times
\mathcal{U}^{-K',-S'}, \mathbb{C})
\end{aligned}
\end{equation}
for $\zeta =\omega , z_\ell $ and with
{\bf e}gin{equation}\langlebel{flow9}{\bf e}gin{aligned} & \mathcal{E}_f
( \varrho , \omega , z, f)\in C^\infty ( (-a_0,a_0)\times
\mathcal{U}^{-K',-S'}, H ^{ K , S }).
\end{aligned}
\end{equation}
\end{lemma}
\proof We add a new variable $\varrho$. We define a new field by
{\bf e}gin{equation}\langlebel{Vectorfield52} {\bf e}gin{aligned}
& {\rm i} (Y^t)_\omega = -(X^t_\omega )_{\vartheta} \big [
\alpha ^\vartheta +{\rm i} \frac{\| f\|_2^2-\rho }{2}
+t
\partial _j F \, \langlenglegle
\xi _j , R\ranglenglegle -t
\partial _{\overline{j}}F \langlenglegle
\sigma _1 \xi _j , R\ranglenglegle \\ & +t \langlenglegle \nabla _{f}F + {\rm i}
\alpha ^{f} , P_c^0(1 +t P_c-tP_c^0
)^{-1} P_c^0
P_c\sigma _3 R \ranglenglegle \big ], \end{aligned}
\end{equation}
by {\bf e}gin{equation}\langlebel{Vectorfield71} {\bf e}gin{aligned}
{\rm i} \, \partial _j F & =(Y^t )_
{\overline{j }} +
t\langlenglegle \sigma _1 \sigma _3 \xi _j, \partial _\omega R \ranglenglegle
(Y^t)_\omega \, \\ - {\rm i} \,
\partial _{\overline{j }}F & =(Y^t )_{j} +
t\langlenglegle \sigma _3 \xi _j, \partial _\omega R \ranglenglegle
(Y^t)_\omega \, \\ \sigma _3\sigma _1(\alpha ^{f}+
{\rm i} \, \nabla
_{f} F ) & =(Y^t )_{f} +
t (P_c ^0 P_c-1)(Y^t )_{f}
\\& - t (Y^t)_\omega P_c^0P_c \partial _\omega
R .\end{aligned}
\end{equation}
and by $Y^t_\rho =2\langlenglegle (Y^t)_f,\sigma _1 f\ranglenglegle $. Then $Y^t=Y^t(
\omega , \rho , z, f)$ defines a new flow $\mathcal{G}_t (\rho , U)
$, which reduces to $\mathcal{F}_t ( U) $ in the invariant manifold
defined by $\rho = \| f\| _2^2.$ Notice that by $\rho (t)= \rho (0)+\int _0^t
Y_{\rho}^{s}ds$ it is easy to conclude $\rho (\mathcal{G}_1 (\rho ,U) )= ( U) +O(\text{rhs\eqref{flow6}})$. Using \eqref{HamOmega2}, \eqref{eq:alpha2} and
\eqref{Vectorfield52} it is then easy to get
{\bf e}gin{equation} {\bf e}gin{aligned} &q(\omega (t))= q(\omega (0))+\int _0^t q'(\omega (s))Y_{\omega}^{s}ds =q(\omega (0))-\int _0^t \frac{\rho( s ) }{2} ds +O(\text{rhs\eqref{flow6}}). \end{aligned}\nonumber
\end{equation}
By standard arguments, see for example the proof of Lemma 4.3 \cite{bambusicuccagna}, we get
{\bf e}gin{equation}\langlebel{flow51}{\bf e}gin{aligned} &
q\left ( \omega (\mathcal{G}_1 (\rho ,U))\right ) = q\left (
\omega (U)\right ) -\frac{\rho }{ 2} + \mathcal{E}_{\omega} (\rho
,U)\, ,
\\& z_\ell (\mathcal{G}_1 (\rho ,U))=z_\ell (U)+ \mathcal{E}_{\ell
} (\rho ,U)\, , \\& f (\mathcal{G}_1 (\rho ,U))= f (U) +
\mathcal{E}_{f} (\rho ,U) \, ,
\end{aligned}
\end{equation}
with $\mathcal{E}_{\zeta } (\rho ,U)$ satisfying \eqref{flow8} for
$\zeta =\omega , z_\ell$ and \eqref{flow9} for $\zeta =f$. We have
$\mathcal{E}_{\zeta } ( U)=\mathcal{E}_{\zeta } (\| f\| _2 ,U)$
satisfying \eqref{flow6} for
$\zeta =\omega $ and \eqref{flow7} for $\zeta =z_\ell ,f$. \qed
We have:
{\bf e}gin{lemma}
\langlebel{lem:flow2}
Consider the flow $\mathcal{F}_t$ of Lemma {\rm e}f{lem:flow1}. Then we
have
{\bf e}gin{equation} \langlebel{eq:Darboux} \mathcal{F}_t^*\Omega _t=\Omega
_0 .
\end{equation}
We have
{\bf e}gin{equation} \langlebel{eq:QcircF}
Q\circ \mathcal{F}_1=q.
\end{equation}
If $\chi$ is a function with $\partial _\vartheta \chi \equiv 0$,
then $\partial _\vartheta (\chi \circ \mathcal{F}_t) \equiv 0$.
\end{lemma}
\proof \eqref{eq:Darboux} is Darboux Theorem, see
\eqref{eq:dartheorem}.
Let $ \mathcal{G}_t= (\mathcal{F}_t) ^{-1}$. Then
$\mathcal{G}_t^*\Omega _0=\Omega _t$. We have
$\mathcal{G}_t^*X^0_{q(\omega)} =X^t_{q(\omega) \circ
\mathcal{G}_t}$ by
{\bf e}gin{equation}{\bf e}gin{aligned} & i _{\mathcal{G}_t^*X^0_{q(\omega)}}
\Omega _t= i _{\mathcal{G}_t^*X^0_q(\omega)}
\mathcal{G}_t^*\Omega _0= \mathcal{G}_t^*i _{X^0_q(\omega)}\Omega
_0=-{\rm i} d(q(\omega) \circ \mathcal{G}_t ) = i _{X^t_{q(\omega) \circ
\mathcal{G}_t}}\Omega _t.
\end{aligned} \nonumber
\end{equation}
Then by $\left [ \mathcal{X}^t,
\frac{\partial}{\partial \vartheta} \right ] =0$ for all $t$
{\bf e}gin{equation}\frac{d}{dt}X^t_{q(\omega) \circ \mathcal{G}_t}
= \frac{d}{dt}
\mathcal{G}_t^*X^0_{q(\omega)} = - \frac{d}{dt}
\mathcal{G}_t^*\frac{\partial}{\partial \vartheta} =-
\mathcal{G}_t^*\left [ \mathcal{X}^{1-t}, \frac{\partial}{\partial
\vartheta} \right ] =0. \nonumber
\end{equation}
So $X_{{q(\omega)}\circ \mathcal{G}_1}^1= X_{q(\omega)}^0$. Since
by \eqref{eq:Ham.VecFieldQ} and \eqref{eq:HamVect0q} this implies $d
(q\circ \mathcal{G}_1) =dQ$ and since there are points with $q\circ
\mathcal{G}_1 (U)= Q (U) $, we obtain \eqref{eq:QcircF}. Finally,
the last statement of Lemma {\rm e}f{lem:flow2} follows by
\eqref{eq:L_X^t} and by
{\bf e}gin{equation} \frac{\partial}{\partial \vartheta}\mathcal{F}_t^*
\chi = \left (\mathcal{F}_t^*\frac{\partial}{\partial
\vartheta}\right )\left (\mathcal{F}_t^* \chi \right )=
\mathcal{F}_t^* \left ( \frac{\partial}{\partial \vartheta}\chi
\right ) . \nonumber
\end{equation}
\qed
\section{Reformulation of \eqref{eq:SystK} in the new coordinates}
\langlebel{section:reformulation}
We set
{\bf e}gin{equation} \langlebel{eq:newH} {\bf e}gin{aligned} &
H=K\circ \mathcal{F}_1.
\end{aligned}
\end{equation}
In the new coordinates \eqref{eq:SystK} becomes
{\bf e}gin{equation} \langlebel{eq:SystK1} {\bf e}gin{aligned} &
q' \dot \omega = \frac{\partial H}{\partial \vartheta}\equiv 0 \,
, \quad q' \dot \vartheta =
-\frac{\partial H}{\partial \omega}
\end{aligned}
\end{equation}
and
{\bf e}gin{equation} \langlebel{eq:SystK2} {\bf e}gin{aligned} &
{\rm i} \dot z_j = \frac{\partial H}{\partial \overline{z}_j }
\, , \quad
{\rm i} {\dot {\overline{z}}}_j = - \frac{\partial H}{\partial {z}_j }
\\& {\rm i} \dot f= \sigma _3 \sigma _1 \nabla _f H. \end{aligned}
\end{equation}
Recall that we are solving the initial value problem \eqref{NLS} and
that we have chosen $\omega _0$ with $q(\omega _0)=\| u_0\|
_{L^2_x}^{2}.$ Correspondingly it is enough to focus on
\eqref{eq:SystK2} with $\omega =\omega _0$. For system
\eqref{eq:SystK2} we prove :
{\bf e}gin{theorem}\langlebel{theorem-1.2}
Then there exist $\varepsilon >0$
and $C>0$ such that for $ |z(0)|+\| f (0) \| _{H^1 }\le \epsilon
<\varepsilon $ the corresponding solution of \eqref{eq:SystK2} is
globally defined and there are $f_\pm \in H^1$ with $\| f_\pm\|
_{H^1 }\le C \epsilon $ such that
{\bf e}gin{equation}\langlebel{scattering1}\lim _{t\to \pm \infty } \| e^{ {\rm i} \vartheta (t) \sigma _3 } f(t) -
e^{ {\rm i} t \sigma _3 \Delta } f_\pm \| _{H^1 }=0
\end{equation}
where $\vartheta (t)$ is the variable associated to $U^T(t)=(u(t), \overline{u}(t))$ in \eqref{eq:anzatz} and \eqref{eq:coordinate}.
We also have
{\bf e}gin{equation}\langlebel{decay}\lim _{t\to \infty } z(t)=0.
\end{equation}
In particular, it is possible to write
$R(t,x)=A(t,x)+\widetilde{f}(t,x)$ with $|A(t,x)|\le C_N(t) \langlenglegle
x \ranglenglegle ^{-N}$ for any $N$, with $\lim _{t\to \infty }C_N(t)=0$
and such that for any admissible pair $(r,p)$, i.e.
\eqref{admissiblepair}, we have
{\bf e}gin{equation}\langlebel{Strichartz1}
\| \widetilde{f} \| _{L^r_t( \mathbb{R},
W^{ 1,p}_x)}\le
C\epsilon .
\end{equation}
\end{theorem}
By Lemma {\rm e}f{lem:flow1}, Theorem {\rm e}f{theorem-1.2} implies
Theorem {\rm e}f{theorem-1.1}. Indeed, if we denote $(\omega , z', f')$ the initial coordinates, and $(\omega _0 , z , f )$ the coordinates in \eqref{eq:SystK2},
we have $z'=z+O(|z|+ \| f \| _{L _x ^{2,-2}} )$ and $f'=f+O(|z|+ \| f \| _{L _x ^{2,-2}} )$. The two error terms $O$ converge to 0 as $t\to \infty$. Hence the asymptotic behavior of $( z', f')$ and of $( z , f )$ is the same. We also have
$
q\left ( \omega (t) \right ) = q\left (\omega _0 \right )
-\frac{\| f(t) \|_2^2 }{2} + O( |z(t)|+\| f(t) \| _{L^{2,-2 }_x} )
$ which implies, say at $+\infty$
{\bf e}gin{equation} {\bf e}gin{aligned}
& \lim _{t \to +\infty }
q\left ( \omega (t) \right ) = \lim _{t \to +\infty }
\left ( q\left (\omega _0 \right ) -\frac{\| e^{ {\rm i}t
\sigma _3 \Delta } f_+ \|_2^2 }{2}\right ) = q\left (\omega _0 \right )
-\frac{\| f_+ \|_2^2 }{2}=q(\omega _+)
\end{aligned} \nonumber
\end{equation}
for somewhere $\omega _+$ is the unique element near $\omega _0$ for which the last inequality holds. So $ \lim _{t
\to +\infty } \omega (t) = \omega _+.$
In the rest of the paper we focus on Theorem {\rm e}f{theorem-1.2}. The
main idea is that \eqref{eq:SystK2} is basically like the system
considered in \cite{bambusicuccagna}. Therefore Theorem
{\rm e}f{theorem-1.2} follows by the Birkhoff normal forms argument of
\cite{bambusicuccagna}, supplemented with the various dispersive
estimates in \cite{cuccagnamizumachi}.
\subsection{Taylor expansions}
\langlebel{subsec:Taylor expansions}
Consider $U= e^{{\rm i} \sigma _3\vartheta } ({\mathcal P}i _\omega + R)$ as in
\eqref{eq:anzatz}. Decompose $R$ as in \eqref{eq:decomp2}. Set
$u=\varphi + u_c$ with $^t (P_c(\omega )f)=(u_c,\overline{u_c})$.
We have
{\bf e}gin{equation}\langlebel{ExpEP0}{\bf e}gin{aligned}
& B (|u |^2)=B \left ( |u_c |^2
\right ) +\int _0^1 \left [ \frac{\partial }{\partial u}
B (|u|^2 )_{|u=u_c+t\varphi} \varphi + \frac{\partial }
{\partial \overline{u}}
B (|u|^2 )_{|u=u_c+t\varphi} \overline{\varphi} \right ]
dt\\& = B \left ( |u_c |^2
\right ) +\int _0^1 dt \sum _{i+j\le 4} \frac{1}{i!j!}
\partial _{u}^{i+1} \partial _{\overline{u}}^j
B \left ( | u | ^2 \right )_{|u= t\varphi} u_c^i\overline{u_c}^j \varphi +\\&
\int _0^1 dt \sum _{i+j\le 4} \frac{1}{i!j!}
\partial _{u}^{i } \partial _{\overline{u}}^{j+1}
B \left ( | u | ^2 \right )_{|u= t\varphi} u_c^i\overline{u_c}^j
\overline{\varphi} +\\&
5\int _{[0,1]^2} dt ds (1-s)^4\sum _{i+j= 5} \frac{1}{i!j!}
\partial _{u}^{i+1 } \partial _{\overline{u}}^{j }
B \left ( | u| ^2 \right ) _{|u= t\varphi +su_c} u_c^i\overline{u_c}^j \varphi
+\\&
5\int _{[0,1]^2} dt ds (1-s)^4\sum _{i+j= 5} \frac{1}{i!j!}
\partial _{u}^{i } \partial _{\overline{u}}^{j+1}
B \left ( | u| ^2 \right ) _{|u= t\varphi +su_c} u_c^i\overline{u_c}^j
\overline{\varphi} .
\end{aligned}\end{equation}
{\bf e}gin{lemma}
\langlebel{lem:K} The following statements hold.
{\bf e}gin{equation} \langlebel{eq:ExpK} {\bf e}gin{aligned} & K =d(\omega )-\omega
\| u_0\| _2^2+K_2+K_P \\& K_2=\sum _j\langlembda _j (\omega ) |z_j|^2+
\frac{1}{2} \langlenglegle \sigma _3 \mathcal{H}_{\omega } f, \sigma _1
f\ranglenglegle \\& K_P = \sum _{|\mu +\nu |= 3} \langlenglegle a_{\mu \nu
}(\omega ,z ) , 1 \ranglenglegle z^\mu \overline{z}^\nu +\sum _{|\mu +\nu
|= 2} z^\mu \overline{z}^\nu \langlenglegle G_{\mu \nu }(\omega ,z
),\sigma _3\sigma _1P_c(\omega )f\ranglenglegle \\& + \sum _{d=2}^4
\langlenglegle B_{d } ( \omega , z ), (P_c(\omega )f)^{\otimes d} \ranglenglegle
+ \langlenglegle B_6 (\omega , f) , 1 \ranglenglegle +\int _{\mathbb{R}^3}
B_5(x,\omega, z , f(x) ) f^{\otimes 5}(x) dx,
\end{aligned}\nonumber
\end{equation}
for $ B_6 (x, \omega , f)=B \left ( \frac{|P_c(\omega )f(x)|^2
}{2}\right ),$
where we have what follows.
{\bf e}gin{itemize}
\item[(1)] $a_{\mu \nu
}( \cdot , \omega ,z ) \in C^\infty ( \mathrm{U},
H^{K,S}_x(\mathbb{R}^3,\mathbb{C})) $ for any pair $(K,S)$ and a
small neighborhood $\mathrm{U}$ of $(\omega _0,0)$ in
$\mathcal{O}\times \mathbb{C}^m$.
\item[(2)] $G_{\mu \nu
}( \cdot , \omega ,z ) \in C^\infty ( \mathrm{U},
H^{K,S}_x(\mathbb{R}^3,\mathbb{C}^2)) $, for $\mathrm{U}$ like in
(1), possibly smaller.
\item[(3)] $B_{d
}( \cdot , \omega ,z ) \in C^\infty ( \mathrm{U},
H^{K,S}_x(\mathbb{R}^3, B (
(\mathbb{C}^2)^{\otimes d},\mathbb{C} ))) $, for $2\le d \le 4$
for $\mathrm{U}$ possibly smaller.
\item[(4)] Let $^t\eta = (\zeta , \overline{\zeta}) $ for
$ \zeta \in \mathbb{C}$. Then for
$B_5(\cdot ,\omega , z , \eta )$ we have
{\bf e}gin{equation} \langlebel{5power2}{\bf e}gin{aligned} &\text{for any $l$ ,
} \| \nabla _{ \omega ,z,\overline{z} ,\zeta,\overline{\zeta} }
^lB_5( \omega ,z,\eta ) \| _{H^{K,S}_x (\mathbb{R}^3, B (
(\mathbb{C}^2)^{\otimes 5},\mathbb{C} )} \le C_l .
\end{aligned}\nonumber \end{equation}
\item[(5)] We have $a_{\mu \nu }=\overline{a}_{\nu \mu }$, $G_{\mu \nu }
=-\sigma _1\overline{G}_{\nu \mu } $.
\end{itemize}
\end{lemma}
\proof The expansion for $K$ is a consequence of well know
cancelations. (1)--(4) follow from \eqref{ExpEP0} and elementary
calculus. (5) follows from the fact that $K(U)$ is real valued for
$\overline{U}=\sigma _1U$.\qed
We set $\delta _j$ be for $j\in \{ 1,...m \}$ the multi index
$\delta _j=( \delta _{1j}, ..., \delta _{mj}).$ Let $\langlembda
_j^0=\langlembda _j(\omega _0)$ and $\langlembda ^0 = (\langlembda _1^0, \cdots,
\langlembda _m^0)$.
{\bf e}gin{lemma}
\langlebel{lem:ExpH} Let $H=K\circ \mathcal{F} _1$. Then, at $
e^{i\sigma _3 \vartheta} {\mathcal P}i _{\omega _0}$
we have the
expansion
{\bf e}gin{equation} \langlebel{eq:ExpH1} {\bf e}gin{aligned} & H =d(\omega _0)
-\omega _0\| u_0\| _2^2+ \psi (\|f\| _2^2) +H_2 ^{(1)}+{\mathcal R} ^{(1)}
\end{aligned}
\end{equation} for $\omega =\omega _0$, where the following holds.
{\bf e}gin{itemize}
\item[(1)]
We have for $r=1$
{\bf e}gin{equation} \langlebel{eq:ExpH2} H_2 ^{(r)}=
\sum _{\substack{ |\mu +\nu |=2\\
\langlembda ^0\cdot (\mu -\nu )=0}}
a_{\mu \nu}^{(r)}( \| f\| _2^2 ) z^\mu
\overline{z}^\nu + \frac{1}{2} \langlenglegle \sigma _3 \mathcal{H}_{\omega
_0} f, \sigma _1 f\ranglenglegle .
\end{equation}
\item[(2)] We have ${\mathcal R} ^{(1)}=\widetilde{{\mathcal R} ^{(1)}} +
\widetilde{{\mathcal R} ^{(2)}} $, with $\widetilde{{\mathcal R} ^{(1)}}=$
{\bf e}gin{equation} \langlebel{eq:ExpH2resto} {\bf e}gin{aligned} &
=\sum _{\substack{ |\mu +\nu |=2\\
\langlembda ^0\cdot (\mu -\nu )\neq 0 }} a_{\mu \nu }^{(1)}(\| f\| _2^2
)z^\mu \overline{z}^\nu +\sum _{|\mu +\nu | = 1} z^\mu
\overline{z}^\nu \langlenglegle \sigma _1 \sigma _3G_{\mu \nu }(\| f\| _2^2
),f\ranglenglegle ,\\& \widetilde{{\mathcal R} ^{(2)}}= \sum _{|\mu +\nu | = 3 }
z^\mu \overline{z}^\nu \int _{\mathbb{R}^3}a_{\mu \nu }
(x,z,f,f(x),\| f\| _2^2 ) dx\\& + \sum _{|\mu +\nu | =2 }z^\mu
\overline{z}^\nu \int _{\mathbb{R}^3} \left [ \sigma _1 \sigma
_3G_{\mu \nu } (x,z,f,f(x), \| f\| _2^2 )\right ]^*f(x) dx \\&
+\sum _{j=2}^5 {\mathcal R} ^{(1)} _j +
\int _{\mathbb{R}^3}B (|f(x)|^2/2 ) dx + \widehat{{\mathcal R}}
^{(1)} _2(z,f, \| f\| _2^2 ) \\& \text{with }
{\mathcal R} ^{(1)} _j =\int _{\mathbb{R}^3} F_j(x,z ,f, f(x),\| f\|
_2^2) f^{\otimes j}(x) dx .
\end{aligned}
\end{equation}
\item[(3)] We have $F_2(x,0 ,0, 0,0)=0 $.
\item[(4)] $\psi (s)$ is smooth with $\psi (0)=\psi ' (0)=0$.
\item[(5)] At $\| f\| _2=0$ with $r=1$
{\bf e}gin{equation} \langlebel{eq:ExpHcoeff1} {\bf e}gin{aligned} &
a_{\mu \nu }^{(r)}( 0 ) =0 \text{ for $|\mu +\nu | = 2$ with $(\mu
, \nu )\neq (\delta _j, \delta _j)$ for all $j$,} \\& a_{\delta _j
\delta _j }^{(r)}( 0 ) =\langlembda _j (\omega _0) , \text{ where
$\delta _j=( \delta _{1j}, ..., \delta _{mj}),$}
\\& G_{\mu \nu }( 0 ) =0 \text{ for $|\mu +\nu | = 1$ }
\end{aligned}
\end{equation}
These $a_{\mu \nu }^{(r)}( \varrho )$ and $G_{\mu \nu }( x,\varrho
)$ are smooth in all variables with $G_{\mu \nu }( \cdot ,\varrho )
\in C^\infty ( \mathbb{R}, H^{K,S} _x(\mathbb{R}^3,\mathbb{C}^2))$
for all $(K,S)$.
\item[(6)] We have for all indexes and for $r=1$
{\bf e}gin{equation} \langlebel{eq:ExpHcoeff2} {\bf e}gin{aligned} & a_{\mu \nu }^{(r)} =
\overline{a}_{\nu \mu }^{(r)}\, , \quad a_{\mu \nu }
=\overline{a}_{\nu \mu }\, , \quad G_{\mu \nu } =-\sigma
_1\overline{G}_{\nu \mu } .
\end{aligned}
\end{equation}
\item[(7)] Let $^t\eta = (\zeta , \overline{\zeta}) $ for
$ \zeta \in \mathbb{C}$. For all $(K,S, K',S')$ there is a neighborhood $
\mathcal{U}^{-K',-S'}$ of $ \{ ( 0, 0) \}$ in
$\widetilde{{\mathcal P}}^{-K',-S'}$, see \eqref{eq:PhaseSpace}, such that
we have, for
$a_{\mu \nu } (x, z,f,\eta , \varrho )$ with
$(z,f,\zeta , \varrho )\in \mathcal{U}^{-K',-S'}\times \mathbb{C} \times \R$
{\bf e}gin{equation}\langlebel{eq:coeff a2} \| \nabla _{
z,\overline{z},\zeta,\overline{\zeta},f,\varrho} ^l a_{\mu \nu } \|
_{ H^{K,S}_x(\mathbb{R}^3,\mathbb{C})} \le C_l \text{ for all $l$}.
\end{equation}
\item[(8)] Possibly restricting $\mathcal{U}^{-K',-S'}$, we have also, for
$G_{\mu \nu } (x , z,f,g ,
\varrho ) $,
{\bf e}gin{equation}\langlebel{eq:coeff G2}
\| \nabla _{ z,\overline{z},\zeta,\overline{\zeta},f,\varrho} ^l
G_{\mu \nu } \| _{ H^{K,S}_x(\mathbb{R}^3,\mathbb{C}^2)} \le C_l
\text{ for all $l$}.\end{equation}
\item[(9)] Restricting $\mathcal{U}^{-K',-S'}$ further, we have also,
for $F_j (x ,z,f,g,\varrho )$,
{\bf e}gin{equation} {\bf e}gin{aligned} &
\| \nabla _{ z,\overline{z},\zeta,\overline{\zeta},f,\varrho} ^l F_j
\| _{ H^{K,S}_x(\mathbb{R}^3,B (
(\mathbb{C}^2)^{\otimes j},\mathbb{C} ))} \le C_l
\text{ for all $l$}
.
\end{aligned}\nonumber \end{equation}
\item[(10)] Restricting $\mathcal{U}^{-K',-S'}$
further, we have $\widehat{{\mathcal R}} ^{(1)}
_2(z,f, \varrho ) \in C^\infty ( \mathcal{U}^{-K',-S'}\times
\R , \R)
$ with {\bf e}gin{equation} {\bf e}gin{aligned} & |\widehat{{\mathcal R}} ^{(1)}
_2(z,f, \varrho ) | \le C (|z|+|\varrho |+ \| f \| _{ H^{-K',-S'}})
\| f \| _{ H^{-K',-S'}}^2.
\end{aligned}\nonumber \end{equation}
\end{itemize}
\end{lemma}
\proof By $\mathcal{F}_1({\mathcal P}i _{\omega _0})= {\mathcal P}i _{\omega _0}$,
$K'({\mathcal P}i _{\omega _0})=0$ and $\|
\mathcal{F}_1(U) -U\| _{{\mathcal P} ^{K,S}}\lesssim \| R \| _{L^2}^2$ we
conclude $H'({\mathcal P}i _{\omega _0})=0$ and $H''({\mathcal P}i _{\omega _0})
=K''({\mathcal P}i _{\omega _0})$. In particular, this yields the formula for
$H_2^{(1)}$ for $\| f\| _2=0$. The other terms are obtained by
substituting in \eqref{eq:ExpK} the formulas \eqref{flow5}. By
$\langlenglegle \sigma _3 f, \sigma _1 f\ranglenglegle =0$ we have $\langlenglegle
\sigma _3 \mathcal{H}_{\omega _0 +\delta \omega } f, \sigma _1
f\ranglenglegle =\langlenglegle \sigma _3 \mathcal{H}_{\omega _0 } f, \sigma _1
f\ranglenglegle + \widetilde{F}_2$ where $\widetilde{F}_2$ can be absorbed
in $j=2$ in \eqref{eq:ExpH2resto}. $\psi (\| f\| _2^2)$ arises from
$d(\omega \circ \mathcal{F}_1)-\omega \circ \mathcal{F}_1\| u_0\|
_2^2$. Other terms coming from the latter end up in
\eqref{eq:ExpH2resto}: in particular there are no monomials $\| f\|
_2^j z^\mu \overline{z}^\nu \langlenglegle G, f\ranglenglegle ^i$ with $|\mu +\nu
|+i=1$, because of \eqref{flow6} (applied for $\omega =\omega _0$).
\qed
\section{Canonical transformations}
\langlebel{sec:Canonical} Our goal in this section is to prove the
following result.
{\bf e}gin{theorem}
\langlebel{th:main} For any integer $r\ge 2$ there are a
neighborhood $ \mathcal{U}^{1,0}$ of $ \{ ( 0, 0) \}$ in
$\widetilde{{\mathcal P}}^{1,0}$, see \eqref{eq:PhaseSpace}, and a smooth
canonical transformation ${\mathcal T}_r: \mathcal{U}^{1,0}\to
\widetilde{{\mathcal P}}^{1 ,0}$ s.t.
{\bf e}gin{equation}
\langlebel{eq:bir1} H^{(r)}:=H\circ {\mathcal T}_r=d(\omega _0) -\omega _0\|
u_0\| _2^2+ \psi (\|f\| _2^2)+H_2^{(r)}+Z^{(r)}+{\mathcal R}^{(r)},
\end{equation}
where:
{\bf e}gin{itemize}
\item[(i)] $H_2^{(r)}=H_2^{(2)}$ for $r\ge 2$, is of the
form \eqref{eq:ExpH2}
where $a_{\mu \nu}^{(r)}(\| f\| _2)$ satisfy
\eqref{eq:ExpHcoeff1}--\eqref{eq:ExpHcoeff2};
\item[(ii)]$Z^{(r)}$ is in normal form, in the sense of Definition {\rm e}f{def:normal form} below, with monomials of
degree $\le r$ whose coefficients satisfy \eqref{eq:ExpHcoeff2};
\item[(iii)] the transformation ${\mathcal T} _r$ is of the form (see below)
\eqref{lie.11.a}--
\eqref{lie.11.b} and satisfies \eqref{lie.11.f}--\eqref{lie.11.c}
for $M_0=1$;
\item[(iv)] we have ${\mathcal R}^{(r)} = \sum _{d=0}^6{\mathcal R}^{(r)}_d$
with the following properties:
{\bf e}gin{itemize}
\item[(iv.0)] for all $(K,S, K',S')$ there is a neighborhood
$ \mathcal{U}^{-K',-S'}$ of $ \{ ( 0, 0) \}$ in
$\widetilde{{\mathcal P}}^{-K',-S'}$
such that
{\bf e}gin{equation} {\mathcal R}^{(r)}_0=
\sum _{|\mu +\nu | = r+1 } z^\mu \overline{z}^\nu \int
_{\mathbb{R}^3}a_{\mu \nu }^{(r)}(x,z,f,f(x),\| f\| _2^2 ) dx
\nonumber
\end{equation}
and for $a_{\mu \nu }^{(r)}(z,f,\eta , \varrho )$ with $^t\eta =
(\zeta , \overline{\zeta} )$, $\zeta \in \mathbb{C}$ we have for $(z,f)\in
\mathcal{U}^{-K',-S'}$ and $|\varrho |\le 1$
{\bf e}gin{equation}\langlebel{eq:coeff a} \|\nabla _{ z,\overline{z},
\zeta,\overline{\zeta},f,\varrho} ^l a_{\mu \nu }^{(r)}(\cdot
,z,f,\eta , \varrho )\| _{H^{K,S}(\R ^3, \mathbb{C} )} \le C_l \text{ for
all $l$};
\end{equation}
\item[(iv.1)] possibly taking $\mathcal{U}^{-K',-S'}$
smaller, we have
{\bf e}gin{equation} {\mathcal R}^{(r)}_1=
\sum _{|\mu +\nu | = r }z^\mu \overline{z}^\nu \int
_{\mathbb{R}^3} \left [ \sigma _1 \sigma _3G_{\mu \nu
}^{(r)}(x,z,f,f(x), \| f\| _2^2 )\right ]^*f(x) dx \nonumber
\end{equation}
{\bf e}gin{equation}\langlebel{eq:coeff G}\text{with } \|\nabla _{ z,\overline{z},
\zeta,\overline{\zeta},f,\varrho} ^l G_{\mu \nu }^{(r)}(\cdot
,z,f,\eta , \varrho )\| _{H^{K,S}(\R ^3, \mathbb{C} ^2)} \le C_l \text{ for
all $l$};
\end{equation}
\item[(iv.2--5)] possibly taking $\mathcal{U}^{-K',-S'}$
smaller, we have for $2\le d \le 5$,
{\bf e}gin{equation} {\mathcal R}^{(r)}_d=
\int
_{\mathbb{R}^3} F_d^{(r)}(x, z ,f,f(x),\| f\| _2^2) f^{\otimes
d}(x) dx + \widehat{{\mathcal R}}^{(r)}_d,\nonumber
\end{equation} with for
any $l$
{\bf e}gin{equation}\langlebel{eq:coeff F} \|\nabla _{ z,\overline{z},
\zeta,\overline{\zeta},f,\varrho} ^l F_d^{(r)} (\cdot ,z,f,\eta ,
\varrho )\| _{H^{K,S} (\mathbb{R}^3, B (
(\mathbb{C}^2)^{\otimes d},\mathbb{C} )} \le C_l,
\end{equation}
with $F_2^{(r)}(x, 0 ,0,0,0)=0$ and
with $\widetilde{{\mathcal R}}^{(r)}_d (z ,f, \| f\| _2^2)$ s.t.
{\bf e}gin{equation}\langlebel{eq:Rhat}{\bf e}gin{aligned} &
\widehat{{\mathcal R}}^{(r)}_d (z ,f, \varrho )\in C^\infty
(\mathcal{U}^{-K',-S'}\times \R ,\R ), \\& |
\widehat{{\mathcal R}}^{(r)}_d (z ,f, \varrho )|\le C \| f \|
_{H^{-K',-S'}}^d , \\& | \widehat{{\mathcal R}}^{(r)}_2 (z ,f, \varrho
)| \le C (|z|+|\varrho |+ \| f \| _{ H^{-K',-S'}}) \| f \| _{
H^{-K',-S'}}^2;
\end{aligned}\end{equation}
\item[(iv.6)] $ {\mathcal R}^{(r)}_6=
\int _{\mathbb{R}^3} B ( | f (x)| ^2/2) dx$.
\end{itemize}
\end{itemize}
\end{theorem}
We develop the proof in the following subsections. The basic ideas
are classical. However we need to develop a number of tools, along
the lines of \cite{bambusicuccagna}. The situation here is more
complicated than in \cite{bambusicuccagna} because of the dependence of the coefficients on $\| f\| _2$.
\subsection{Lie transform}
\langlebel{subsec:LieTransf} We consider functions
{\bf e}gin{equation}
\langlebel{chi.1} \chi = \sum_{|\mu +\nu |=M_0 +1}b_{\mu \nu
}(\| f\| _2^2) z^{\mu} \overline{z}^{\nu} + \sum_{|\mu +\nu |=M_0
}z^{\mu} \overline{z}^{\nu}
\langlenglegle \sigma _1\sigma _3B_{\mu \nu
}(\| f\| _2^2)
, f \ranglenglegle \end{equation}
where $ b_{\mu \nu }(\varrho )\in C^{\infty
}(\mathbb{R}_\varrho , \mathbb{C})$ and $ B_{\mu \nu }(x,\varrho)
\in C^{\infty }(\mathbb{R} , P_c(\omega _0)H^{k,s}_x(\mathbb{R}^3,
\mathbb{C}^2))$ for all $k$ and $s$. Assume
{\bf e}gin{equation} \langlebel{chi.11} b_{\mu \nu } =\overline{b}_{\nu
\mu }\text{ and }\sigma _1 B_{\mu \nu }=-\overline{B}_{ \nu \mu}
\text{ for all indexes}. \end{equation} We set for $K>0$ and $S>0$
fixed and large set
{\bf e}gin{equation} \langlebel{chi.12} \| \chi \| = \| \chi (\| f\|
_2^2)\| =
\sum |b_{\mu \nu }(\| f\| _2^2)|+\sum \|B_{\mu \nu }(\| f\|
_2^2)\| _{H^{ K , S }}. \end{equation} Denote by $\phi ^t$ the flow
of the Hamiltonian vector field $X_{\chi}$ ( from now on with
respect to $\Omega _0$ and only in $(z,f)$). The {\it Lie transform}
$\phi =
\phi^t\big|_{t=1}$ is defined in a sufficiently small neighborhood
of the origin and is a canonical transformation.
{\bf e}gin{lemma}\langlebel{lie_trans}
Consider the $\chi$ in \eqref{chi.1} and its Lie transform $\phi$.
Set $(z',f')=\phi (z,f)$. Then there are $ \mathcal{G}(z,f,
\varrho)$, $\Gamma (z,f,\varrho)$, $\Gamma _0 (z,f,\rho)$ and
$\Gamma _1 (z,f,\rho)$ with the following properties.
{\bf e}gin{itemize}
\item[(1)] $\Gamma \in C^\infty
( \U ^{-K',-S'}, \mathbb{C}^{ m}) $, $\Gamma _0, \Gamma _1\in
C^\infty ( \U ^{-K',-S'}, \mathbb{R}) $, with $\U ^{-K',-S'}\subset
\mathbb{C}^{ m} \times H^{-K',-S'}_c(\omega _0)\times \mathbb{R}$ an
appropriately small neighborhood of the origin.
\item[(2)] $ \mathcal{G}\in C^\infty
(\U ^{-K',-S'} , H^{K,S}_c(\omega _0) ) $ for any $K,S$.
\item[(3)] The transformation $\phi$ is of the following form:
{\bf e}gin{eqnarray}
\langlebel{lie.11.a}& z' = z +
\Gamma (z,f,\| f\| _2^2) ,\\
\langlebel{lie.11.b} & f' = e^{ {\rm i} \Gamma _0 (z,f,\| f\|
_2^2)P_c(\omega _0) \sigma _3}f +\mathcal{G}(z,f,\| f\| _2^2) .
\end{eqnarray}
\item[(4)] We have
{\bf e}gin{eqnarray} \langlebel{lie.11.h} & \| f'\| _2^2 =
\| f \| _2^2 +
\Gamma _1(z,f,\| f\| _2^2),
\\& \nonumber
\left | \Gamma _1(z,f,\| f\| _2^2) \right | \le \\&
\langlebel{lie.11.f} C |z|^{M_0 -1} ( |z|^{M_0+2 } + |z|^{2 }\| f \|
_{H^{-K',-S'}} + \| f \| _{H^{-K',-S'}} ^3) .
\end{eqnarray}
\item[(5)] There are constants $c_{K',S'}$ and $c_{K, S,K',S'}$
such that {\bf e}gin{equation}\langlebel{lie.11.c}{\bf e}gin{aligned}&
|\Gamma (z,f,\| f\| _2^2)| \leq c_{K',S'} (\| \chi \| +\text{\eqref{lie.11.f}}) |z | ^{M_0-1}
( |z|+ \norma{f }
_{H^{-K',-S'}} ), \\ & \|\mathcal{G}(z,f,\| f\|
_2^2 )\|_{H^{K,S}}\leq c_{K, S,K',S'} (\| \chi \| +\text{\eqref{lie.11.f}}) |z | ^{{M_0} } , \\
& |\Gamma _{0}(z,f,\| f\| _2^2)| \leq c_{K',S'} |z |^{M_0-1 } ( |z | +\| f \|
_{H^{ -K',-S'}} )^2.
\end{aligned}\end{equation}
\item[(6)] We have
{\bf e}gin{equation} \langlebel{eq:prime f1}
{\bf e}gin{aligned} & e^{ {\rm i} \Gamma _0P_c(\omega _0) \sigma _3 }
=e^{ {\rm i} \Gamma _0 \sigma _3 } +T (\Gamma _0),
\end{aligned}
\end{equation}
where $T (r)\in C^\infty (\mathbb{R}, B ( H ^{-K',-S'},H ^{ K , S
}) ) $ for all $(K,S,K',S')$, with norm $ \| T (r) \| _{B ( H
^{-K',-S'},H ^{ K , S })} \le C (K,S,K',S') |r|.$ More
specifically, the range of $T(r)$ is $R(T(r))\subseteq L^2_d
(\mathcal{H})+L^2_d (\mathcal{H}^*), $ $L^2_d$ defined two lines after
\eqref{eq:spectraldecomp}.
\end{itemize}
\end{lemma}
\proof Claim (6) can be proved independently of the properties of $\Gamma_0$. Recall that
$P_c(\omega )=
1-P_d(\omega )$, see below \eqref{eq:spectraldecomp}, with $P_d(\omega )$ smoothing and of finite rank. Exploiting $\sigma _3P_d(\omega
)=P_d^*(\omega )\sigma _3$ it is elementary to prove
{\bf e}gin{equation} \langlebel{eq:prime f}
{\bf e}gin{aligned} & e^{ {\rm i} \Gamma _0P_c(\omega _0) \sigma _3 }
=e^{ {\rm i} \Gamma _0 \sigma _3 } +T(\Gamma _0) \text{ with
}T(\Gamma _0)= - {\rm i} \sin \left (\Gamma _0\right ) P_d(\omega _0)
\sigma _3+\\&
+ \sum _{n=2}^\infty
\frac{ ({\rm i} \Gamma _0 )^n}{n!} \sum _{j=1}^{\left [\frac{n}{2}\right
]}\left(
{\bf e}gin{matrix}
\left [\frac{n}{2}\right ]\\ j
\end{matrix} \right) K^j ( P_c(\omega _0) \sigma _3)
^{\varepsilon (n)},
\end{aligned}
\end{equation}
with $K=P_d(\omega _0)P_d^*(\omega _0)-P_d(\omega _0)-P_d^*(\omega
_0)$ and $\varepsilon (n) = \frac{1-(-1)^n}{2}$. $T(\Gamma _0)$ has
the properties of Claim (6).
In the sequel we prove Claims (1)--(5).
Set $\varrho = \| f \| _2^2$. For $b_{\mu \nu} '$ and
$B_{\mu \nu}'$ derivatives with respect to $\varrho$, summing on repeated
indexes, consider
{\bf e}gin{equation}\langlebel{eq:gam}{\bf e}gin{aligned}&
\end{aligned} \gamma (z,f ,\varrho):=2(
b_{\mu \nu} '(\varrho)z^\mu \overline{z}^\nu+ \langlenglegle
\sigma _1\sigma _3 B_{\mu \nu}'(\varrho), f\ranglenglegle z^\mu
\overline{z}^\nu ) .
\end{equation}
For $\sigma _1f=\overline{f}$, then $\gamma
(z,f ,\varrho)\in \mathbb{R}$ by \eqref{chi.11}. We set up the following system:
{\bf e}gin{equation}\langlebel{auxHamEq1}{\bf e}gin{aligned}& {\rm i} \dot z_j
=\sum _{|\mu + \nu |=M_0+1}\nu _j \frac{z^\mu \overline{z}^\nu}{\overline{z}_j}b_{\mu
\nu}(\varrho)+\sum _{|\mu + \nu |=M_0 }\nu _j \frac{z^\mu
\overline{z}^\nu}{\overline{z}_j}\langlenglegle \sigma _1\sigma _3 B_{\mu
\nu}(\varrho), f\ranglenglegle \\& {\rm i} \dot f =\sum _{|\mu + \nu |=M_0 }z^\mu \overline{z}^\nu
B_{\mu \nu}(\varrho) + \gamma (z,f,\varrho ) P_c(\omega _0 )\sigma
_3 f
\\& \dot \varrho =- 2{\rm i} \langlenglegle \sum _{|\mu + \nu |=M_0 } z^\mu \overline{z}^\nu
B_{\mu \nu}(\varrho) + \gamma (z,f,\varrho ) (P_c(\omega _0
)-P_c^*(\omega _0 ))\sigma _3 f, \sigma _1 f \ranglenglegle ,
\end{aligned}
\end{equation}
where in the last equation we exploited $\langlenglegle \sigma _3 f, \sigma
_1 f\ranglenglegle =0.$ By \eqref{chi.11} the flow leaves the set with
$\sigma _1f=\overline{f}$ and $\varrho \in \mathbb{R}$ invariant. In
particular, the set where $\varrho = \| f \| _2^2$ is invariant
under the flow of \eqref{auxHamEq1}. In a neighborhood of 0 the
lifespan of the solutions is larger than
1.
\eqref{lie.11.a} can always been written. For $\gamma$ defined in \eqref{eq:gam}, we have
{\bf e}gin{equation} \langlebel{eq:Solsystemchi}
{\bf e}gin{aligned} &
f (t ) = e^{- {\rm i} \int _0^t
\gamma ds P_c(\omega _0) \sigma _3} f -\sum _{|\mu + \nu |=M_0}
{\rm i} \int _0^t z^\mu \overline{z}^\nu
e^{ {\rm i} \int _s^t
\gamma ds' P_c(\omega _0) \sigma _3}
B_{\mu \nu} ds . \end{aligned}\nonumber
\end{equation}
This yields \eqref{lie.11.b}. We can always write
{\bf e}gin{equation} \langlebel{eq:lie.rho1} {\bf e}gin{aligned} & \varrho '=\varrho +\Gamma _1
(z,f, \varrho ) .\end{aligned}
\end{equation}
This yields \eqref{lie.11.h}.
Claims (1)--(2) follow from the regularity of the
flow of \eqref{auxHamEq1} on the initial data. By \eqref{auxHamEq1} we get
{\bf e}gin{equation} \langlebel{eq:closing1} {\bf e}gin{aligned} &
|\varrho (t) - \varrho | \le C \sup _{0\le t'\le t} |z(t')|^{M_0 -1} ( |z(t')|^{M_0+2 } +\\& + |z(t')|^{2 } \| f(t') \|
_{H^{-K',-S'}} + \| f(t') \| _{H^{-K',-S'}} ^3) .\end{aligned}
\end{equation}
Similarly we have
{\bf e}gin{equation} \langlebel{eq:closing2} {\bf e}gin{aligned} &
|z (t) - z | \le C \sup _{0\le t'\le t} |z(t')|^{M_0 - 1} \| \chi (\varrho (t')) \| ( |z(t')| + \| f(t') \|
_{H^{-K',-S'}} ) , \end{aligned}
\end{equation}
{\bf e}gin{equation} \langlebel{eq:closing3} {\bf e}gin{aligned} &
\| \int _0^t z^\mu \overline{z}^\nu
e^{ {\rm i} \int _s^t
\gamma ds' P_c(\omega _0) \sigma _3}
B_{\mu \nu} ds\| _{H^{K,S}} \le C \sup _{0\le t'\le t}|z(t')|^{M_0 } \| \chi (\varrho (t')) \| .
\end{aligned}
\end{equation}
Then $|z(t)|\approx |z|+ \| f \|
_{H^{-K',-S'}}$ with in particular $|z(t)|\approx |z|$ when $M_0>1$.
By Claim (6) and by the fact that the exponent $\Gamma _0(z,f,\varrho )$ in \eqref{lie.11.b} is a uniformly bounded function, we get
$\| f (t) \|
_{H^{ -K',-S'}}\approx |z|+ \| f \|
_{H^{-K',-S'}}$. Then
{\bf e}gin{equation} \langlebel{eq:closing4} {\bf e}gin{aligned} & \left |
\| \chi (\varrho (t')) \| - \| \chi (\varrho ) \| \right |\le \text{\eqref{lie.11.f}}.
\end{aligned}
\end{equation}
This implies that the right hand sides of \eqref{eq:closing1}--\eqref{eq:closing3} are bounded by the bounds of
$\Gamma _1$, $\Gamma $ and $\mathcal{G}$ in the statement. This
yields the desired bounds on $\Gamma _1$, $\Gamma $ and $\mathcal{G}$.
The bound on $\Gamma _0$ follows from
{\bf e}gin{equation} \langlebel{eq:closing5} {\bf e}gin{aligned} & |\int _0^t\gamma (t') dt'|\le C \sup _{0\le t'\le t} |z(t')|^{M_0 } ( |z(t')| +\| f (t') \|
_{H^{ -K',-S'}} ) \\& \le C_1 |z |^{M_0-1 } ( |z | +\| f \|
_{H^{ -K',-S'}} )^2 .
\end{aligned}
\end{equation}
\qed
\subsection{Normal form}
\langlebel{subsec:Normal form}
Recall the notation $\langlembda
_j^0=\langlembda _j(\omega _0)$ and $\delta _j= (\delta _{1j},...,
\delta _{mj})$, see before Lemma {\rm e}f{lem:ExpH}. Let $\mathcal{H}=\mathcal{H }_{\omega _{0}}
P_c(\mathcal{H} _{\omega _{0}} )$. For $r\ge 1$, using the
coefficients in \eqref{eq:ExpH2} of the $H^{(r)}_2$ in Theorem
{\rm e}f{th:main}, let
{\bf e}gin{equation} \langlebel{eq:lambda}\langlembda _j^{(r)}=
\langlembda _j^{(r)} ( \| f\| _2^2 ) =\langlembda _j (\omega _0) + a
_{\delta _j\delta _j} ^{(r)}(\| f\| _2^2 ), \quad \langlembda ^{(r)}=
(\langlembda _1^{(r)}, \cdots, \langlembda _m^{(r)}).\end{equation}
{\bf e}gin{definition}
\langlebel{def:normal form} A function $Z(z,f)$ is in normal form if it
is of the form
{\bf e}gin{equation}
\langlebel{e.12} Z=Z_0+Z_1
\end{equation}
where we have finite sums of the following types:
{\bf e}gin{equation}
\langlebel{e.12a}Z_1= \sum _{|\langlembda ^0 \cdot(\nu-\mu)|>\omega _{0}}
z^\mu \overline{z}^\nu \langlenglegle \sigma _1\sigma _3 G_{\mu \nu}(\| f
\| _2^2 ),f\ranglenglegle
\end{equation}
with $G_{\mu \nu}( x,\varrho )\in C^\infty ( \mathbb{
R}_{\varrho},H_x^{K,S})$ for all $K$, $S$;
{\bf e}gin{equation}
\langlebel{e.12c}Z_0= \sum _{\langlembda ^0\cdot(\mu-\nu)=0} a_{\mu , \nu}
(\| f \| _2^2)z^\mu \overline{z}^\nu
\end{equation}
and $a_{\mu , \nu} (\varrho )\in C^\infty ( \mathbb{ R}_\varrho ,
\mathbb{C})$. We will always assume the symmetries
\eqref{eq:ExpHcoeff2}. \qed\end{definition}
For an $H_2^{(r)}$ as in \eqref{eq:ExpH2} let
$H_2^{(r)}=D_2^{(r)} +(H_2^{(r)}-D_2^{(r)})$ where
{\bf e}gin{equation} \langlebel{eq:Diag} D_2^{(r)}=
\sum _{j=1}^m
\langlembda _j ^{(r)} ( \| f\| _2^2 ) |z^j| + \frac{1}{2} \langlenglegle \sigma _3 \mathcal{H}_{\omega
_0} f, \sigma _1 f\ranglenglegle .
\end{equation}
In the following formulas we set $\langlembda _j= \langlembda _j ^{(r)}$,
$\langlembda = \langlembda
^{(r)}$ and $D_2=D_2^{(r)}$.
We recall ($\langlembda _j' (\varrho)$ is the derivative in $\varrho$)
that by \eqref{eq:PoissonBracket}, summing on repeated indexes,
{\bf e}gin{equation} \langlebel{PoissBra1}
{\bf e}gin{aligned} &\{ D_2, F \} :=dD_2 (X_F)= \partial _j D_2
(X_F)_j+
\partial _{\overline{j}}
D_2 (X_F)_{\overline{j}} +\langlenglegle \nabla _fD_2, (X_F)_f\ranglenglegle \\ &=
-{\rm i} \partial _j D_2\partial _{\overline{j}} F+{\rm i} \partial
_{\overline{j}} D_2\partial _jF- {\rm i} \langlenglegle \nabla _fD_2, \sigma
_3 \sigma _1\nabla _fF \ranglenglegle =\\& {\rm i} \langlembda _j {z}_j\partial
_jF - {\rm i} \langlembda _j \overline{z}_j
\partial _{\overline{j}} F+{\rm i} \langlenglegle \mathcal{H}f,
\nabla _fF \ranglenglegle +2{\rm i} \langlembda _j'
(\| f \| _2^2) |z_j|^2 \langlenglegle f , \sigma _3 \nabla _fF \ranglenglegle
.
\end{aligned}
\end{equation}
In particular, we have, for $G=G(x)$, (we use $\sigma _1{\rm i}
\sigma _2=\sigma _3$)
{\bf e}gin{equation} \langlebel{PoissBra2} {\bf e}gin{aligned} & \{
D_2, z^{\mu} \overline{z}^{\nu} \} = {\rm i} \langlembda \cdot (\mu -
\nu) z^{\mu} \overline{z}^{\nu} ,\\& \{ D_2, \langlenglegle \sigma _1\sigma
_3 G ,f\ranglenglegle \} = -{\rm i} \langlenglegle f ,\sigma _1\sigma _3 \mathcal{H}G
\ranglenglegle -2 \, {\rm i} \sum _{j=1}^{m}\langlembda ' _j |z_j|^2 \langlenglegle \sigma
_1 f,G\ranglenglegle ,
\\& \{ D_2, \frac{1}{2} \| f\| _2^2 \} =\{ D_2, \frac{1}{2} \langlenglegle
f , \sigma _1 f \ranglenglegle \} ={\rm i} \langlenglegle \mathcal{H}f,
\sigma _1 f \ranglenglegle = -{\rm i} \langlenglegle
{\bf e}ta ^\prime (\phi ^2 )\phi ^2 \sigma _3 f,
f \ranglenglegle .
\end{aligned}
\end{equation}
In the sequel we will assume (and prove) that $\| f\| _2$ is small.
We will consider only $|\mu +\nu |\le 2N +3$. Then, $\langlembda
^0\cdot(\mu-\nu)\neq 0$ implies $|\langlembda ^0\cdot(\mu-\nu)|\ge c >0$
for some fixed $c$, and so we can assume also $|\langlembda
\cdot(\mu-\nu)|\ge c/2$. Similarly $|\langlembda
^0\cdot(\mu-\nu)|<\omega _{0} $ (resp. $|\langlembda
^0\cdot(\mu-\nu)|>\omega _{0} $) will be assumed equivalent to
$|\langlembda
\cdot(\mu-\nu)|<\omega _{0} $ (resp. $|\langlembda
\cdot(\mu-\nu)|>\omega _{0} $).
{\bf e}gin{lemma}
\langlebel{sol.homo} Consider
{\bf e}gin{eqnarray}
\langlebel{eq.stru.1} K =\sum _{|\mu +\nu |=M_0+1} k_{\mu\nu} (\| f \|
_2^2 ) z^{\mu} \overline{z}^{\nu} + \sum _{|\mu +\nu |=M_0} z^{\mu}
\overline{z}^{\nu}
\langlenglegle \sigma _1\sigma _3 K_{\mu \nu
}(\| f \| _2^2)
, f \ranglenglegle .
\end{eqnarray}
Suppose that all the terms in \eqref{eq.stru.1} are not in normal
form and that the symmetries
\eqref{eq:ExpHcoeff2} hold. Consider
{\bf e}gin{equation}
\langlebel{solhomo} {\bf e}gin{aligned} &\chi = \sum_{|\mu +\nu
|=M_0+1}\frac{ k_{\mu\nu}(\| f \| _2^2)}{
{\rm i} \langlembda \cdot(\nu-\mu)} z^\mu \overline{z}^\nu \\& -
\sum_{|\mu +\nu
|=M_0}
z^\mu \overline{z}^\nu \langlenglegle \sigma _1\sigma _3
\frac{1}{ {\rm i} (\langlembda \cdot
(\mu -\nu ) -\mathcal{H}) } K_{\mu \nu
}(\| f \| _2^2)
, f \ranglenglegle .\end{aligned}
\end{equation}
Then we have
{\bf e}gin{equation}
\langlebel{eq:homologicalEq} \left\{\chi , D_2 \right\} =K+L
\end{equation}
with, summing on repeated indexes,
{\bf e}gin{equation}
\langlebel{eq:RestohomologicalEq} {\bf e}gin{aligned} & L= 2
\frac{k_{\mu\nu}' }{(\mu -\nu )\cdot \langlembda } z^{\mu}
\overline{z}^{\nu} \langlenglegle
{\bf e}ta ^\prime (\phi ^2 )\phi ^2 \sigma _3 f,
f \ranglenglegle \\& +2 \langlembda ' _j z^\mu \overline{z}^{\nu} |z_j|^2
\left \langlenglegle \sigma _1 f , \frac{1}{(\mu -\nu )\cdot \langlembda
-\mathcal{H}}K_{\mu \nu } \right \ranglenglegle -\\& 2 \langlembda ' \cdot(\mu -\nu ) z^\mu \overline{z}^{\nu} |z_j|^2
\left \langlenglegle \sigma _1 f , \frac{1}{\left ( (\mu -\nu )\cdot \langlembda
-\mathcal{H}\right ) ^2}K_{\mu \nu } \right \ranglenglegle \langlenglegle
{\bf e}ta ^\prime (\phi ^2 )\phi ^2 \sigma _3 f,
f \ranglenglegle
\\& +2z^{\mu}
\overline{z}^{\nu} \left \langlenglegle f ,\sigma _3 \sigma
_1\frac{1}{(\mu -\nu )\cdot \langlembda
-\mathcal{H}}K_{\mu \nu } '\right \ranglenglegle
\langlenglegle
{\bf e}ta ^\prime (\phi ^2 )\phi ^2 \sigma _3 f,f\ranglenglegle . \end{aligned}
\end{equation}
The coefficients in \eqref{solhomo} satisfy \eqref{eq:ExpHcoeff2}.
\end{lemma}
\proof The proof follows by the tables \eqref{PoissBra2}, by the
product rule for the derivative and by the symmetry properties of
$\mathcal{H}$. \qed
We split the proof of Theorem {\rm e}f{th:main} in two stages. We first
prove step $r=2$ of Theorem {\rm e}f{th:main}. We subsequently prove the
case $r>2$.
\subsection{Proof of Theorem {\rm e}f{th:main}: the step $r=2$}
\langlebel{subsec:step1} At this step, our goal is to obtain a
hamiltonian similar to $H$, but with $\widetilde{{\mathcal R} ^{(1)}} =
0$. We will need to solve a nonlinear homological equation. We
consider a $\chi$ like in \eqref{chi.1} with $M_0=1$ satisfying
\eqref{chi.11}. We write
{\bf e}gin{equation} \langlebel{eq:ExpH11} {\bf e}gin{aligned} & H \circ
\phi=d(\omega _0) -\omega _0\| u_0\| _2^2+ \psi (\|f'\| _2^2)
+(H_2^{(1)} +\widetilde{{\mathcal R} ^{(1)}} +\widetilde{{\mathcal R} ^{(2)}})
\circ \phi ,
\end{aligned}
\end{equation}
for $\phi$ the Lie transform of $\chi$. We write
\eqref{lie.11.a}--\eqref{lie.11.b} as follows, where we sum on
repeated indexes and $\nabla _f$ does not act on $\| f\| _2^2$:
{\bf e}gin{eqnarray}
\langlebel{lie.12.a}& z'_j - z _j =
\partial _k\Gamma _j(0,0,\| f\| _2^2)z_k +
\partial _{\overline{k}}\Gamma _j(0,0,\| f\| _2^2)\overline{z}_k +\\&
+
\langlenglegle \nabla _f\Gamma _j(0,0,\| f\| _2^2), f\ranglenglegle + r_j
, \nonumber\\
\langlebel{lie.12.b} & f' - e^{ {\rm i} \Gamma _0 (z,f,\| f\|
_2^2)P_c(\omega _0) \sigma _3}f =
\partial _k\mathcal{G}( 0,0,\| f\| _2^2)z_k +
\partial _{\overline{k}}\mathcal{G}(0,0,\| f\| _2^2)
\overline{z}_k + r_f . \nonumber
\end{eqnarray}
By \eqref{lie.11.c}--\eqref{lie.11.d} the terms in
rhs\eqref{lie.12.a} satisfy (see \eqref{chi.12} for definition of $
\| \chi \| $)
{\bf e}gin{equation}\langlebel{lie.13.a}{\bf e}gin{aligned} &
|\partial _k\Gamma _j |+\cdots \| \partial
_{\overline{k}}\mathcal{G} \| _{H^{ K , S }} \le C \| \chi \|
\\&
|r_j|+\| r_f\| _{H^{K,S}}\le C (|z|^2+\| f\| _{H^{-K',-S'}} ^2).
\end{aligned}
\end{equation} We write the $f'^{\otimes 2}$ in
\eqref{eq:ExpH2resto} schematically as
{\bf e}gin{equation} \langlebel{eq:fotimes2}{\bf e}gin{aligned} &
f '^{2}(x)= \sum _{|\mu +\nu |=2}A_{\mu \nu}(x,\| f\| _2^2)
z^{\mu}\overline{z}^{\nu} +\sum _{|\mu +\nu |=1}
z^{\mu}\overline{z}^{\nu}
\mathcal{A} _{\mu \nu
}(\| f\| _2^2)(x)
f (x) \\& +
(e^{ {\rm i} \Gamma _0 \sigma _3}f + T(\Gamma _0)f) ^{2}(x) +
\varphi (x) r_j f(x)+r_f(x) f(x)+ \varphi (x) r_j ^2+r_f^2(x)
\end{aligned}
\end{equation}
where $ \varphi (x)$ represents an exponentially decreasing smooth
function. \eqref{lie.13.a} implies
{\bf e}gin{equation} \langlebel{eq:fotimes21}{\bf e}gin{aligned} &
\sum _{\mu ,\nu} \| A_{\mu \nu}(x,\| f\| _2^2)
\| _{H^{ K , S }} +\sum _{\mu ,\nu} \|
\mathcal{A}_{\mu \nu
}(\| f\| _2^2)
\| _{H^{ K , S }} \le C \| \chi \| .
\end{aligned}
\end{equation}
We consider
{\bf e}gin{equation} \langlebel{eq:ExpH12}{\bf e}gin{aligned} & H_2^{(1)}\circ
\phi+\widetilde{{\mathcal R} ^{(1)}} \circ \phi +\int _{\mathbb{R}^3}
F_2(x,0 ,0, 0,\| f\| _2^2) f '^{\otimes 2}(x) dx\\& + \langlenglegle
\nabla _f ^2 \widehat{{\mathcal R} }^{(1)}_2 (0,0,\| f\| _2^2), f
'^{\otimes 2}\ranglenglegle .
\end{aligned}
\end{equation}
We will assume for the moment Lemma {\rm e}f{lem:1step1}:
{\bf e}gin{lemma}
\langlebel{lem:1step1} The following difference is formed by terms which
satisfy the properties stated for ${\mathcal R} ^{(2)}$ in Theorem
{\rm e}f{th:main}:
{\bf e}gin{equation} \langlebel{eq:lem:1step1}
\psi (\|f'\| _2^2)+ (H_2^{(1)} +\widetilde{{\mathcal R} ^{(1)}}
+\widetilde{{\mathcal R} ^{(2)}})
\circ \phi -\psi (\|f \| _2^2)-\text{\eqref{eq:ExpH12}} .
\end{equation}
\end{lemma}
We postpone the proof of Lemma {\rm e}f{lem:1step1} and focus on
\eqref{eq:ExpH12} and on the choice of $\chi$.
{\bf e}gin{lemma}
\langlebel{lem:2step1} It is possible to choose $\chi$ such that
there exists $H_2^{(2)}$ as in (i) Theorem
{\rm e}f{th:main} such that the difference
\eqref{eq:ExpH12}$-H_2^{(2)}$ is formed by terms which
satisfy the properties stated for ${\mathcal R} ^{(2)}$ in Theorem
{\rm e}f{th:main}.
\end{lemma}
\proof We have by \eqref{chi.1} and using Definition
{\rm e}f{def:PoissonFunct}
{\bf e}gin{equation} \langlebel{eq:ExpH13}{\bf e}gin{aligned} & H_2^{(1)}\circ
\phi = H_2^{(1)} +\int _0^1 \{ H_2^{(1)}, \chi \} \circ \phi _t dt
= H_2^{(1)}+ \\& \sum_{|\mu +\nu |=2}b_{\mu \nu }(\| f\| _2^2)\int
_0^1 \{ H_2^{(1)}, z^{\mu} \overline{z}^{\nu} \} \circ \phi _t dt
+\\& \sum_{|\mu +\nu |=1 }
\langlenglegle \sigma _3\sigma _1B_{\mu \nu
}(\| f\| _2^2)
, \int _0^1 \{ H_2^{(1)},z^{\mu}
\overline{z}^{\nu} f \} \circ \phi _t dt \ranglenglegle + \widetilde{R}
\end{aligned}
\end{equation}
with $ |\widetilde{R}|\le C (|z|+\| f \| _{H^{ -K',-S'}})^3,$
\eqref{PoissBra2}, Lemma {\rm e}f{lie_trans}. Then, by \eqref{PoissBra2}
for $\langlembda =\langlembda ^{(1)}$, defined in \eqref{eq:lambda},
and substituting $H_2^{(1)}= D_2^{(1)} + (H_2^{(1)}- D_2^{(1)})$ in the last two lines of \eqref{eq:ExpH13}, we get
{\bf e}gin{equation} \langlebel{eq:ExpH133}{\bf e}gin{aligned} & H_2^{(1)}\circ
\phi = H_2^{(1)}
+ {\rm i} \sum_{|\mu +\nu |=2}b_{\mu \nu } \langlembda \cdot (\mu - \nu)
z^{\mu} \overline{z}^{\nu} \\& -{\rm i} \sum_{|\mu +\nu |=1} z^{\mu}
\overline{z}^{\nu} \langlenglegle f ,\sigma _1\sigma _3 (\mathcal{H}
-\langlembda \cdot (\mu - \nu) )B_{\mu \nu } \ranglenglegle +
{\widehat{R}},
\end{aligned} \nonumber
\end{equation}
with $D_2^{(1)}$ defined in \eqref{eq:Diag} and with, by
\eqref{PoissBra2}, \eqref{eq:ExpHcoeff1} and Lemma {\rm e}f{lie_trans},
{\bf e}gin{equation} \langlebel{eq:ExpH1330} |\widehat{R}|\le C (|z|+\| f \|
_{H^{ -K',-S'}})^3 +C \| \chi \| (\| \chi \| + \| f \| _{ 2}^2 )
(|z|+\| f \| _{H^{-K',-S'}})^2. \end{equation} Similarly
{\bf e}gin{equation} \langlebel{eq:ExpH15}{\bf e}gin{aligned}
&\widetilde{{\mathcal R} ^{(1)}} \circ \phi =\widetilde{{\mathcal R}
^{(1)}}+\sum_{|\mu +\nu |=2}\widetilde{l}_{\mu \nu } z^{\mu}
\overline{z}^{\nu} + \sum_{|\mu +\nu |=1} z^{\mu} \overline{z}^{\nu}
\langlenglegle f ,\sigma _1\sigma _3 \widetilde{L}_{\mu \nu } \ranglenglegle
+\underline{R},
\end{aligned}
\end{equation}
{\bf e}gin{equation} \langlebel{eq:ExpH16}{\bf e}gin{aligned}
\text{with } |\widetilde{l}_{\mu \nu }|+ \| \widetilde{L}_{\mu
\nu } \| _{H^{K,S}} \le C \| \chi \| \, \| \widetilde{{\mathcal R}
^{(1)}}\|
\end{aligned}
\end{equation}
{\bf e}gin{equation}\langlebel{eq:ExpH17} | \underline{{R}}|\le
\text{rhs\eqref{eq:ExpH1330}}+\text{rhs\eqref{eq:ExpH16}}.
\end{equation}
In \eqref{eq:ExpH12} we substitute $f'^{\otimes 2}$ using
\eqref{eq:fotimes2}. Then
{\bf e}gin{equation} \langlebel{eq:ExpH18}{\bf e}gin{aligned} & \int
_{\mathbb{R}^3} F_2(x,0 ,0, 0,\| f\| _2^2) f '^{\otimes 2}(x) dx =
\widetilde{\chi } +\mathrm{R}
\end{aligned}
\end{equation}
with: $\widetilde{\chi }$ a polynomial like \eqref{chi.1} with
$M_0=1$ such that $\| \widetilde{\chi } \| \le C\| f\| _2^2 \|
{\chi } \| $ by claims (4) and (9) in Lemma {\rm e}f{lem:ExpH} and by
\eqref{eq:fotimes2}; $\widetilde{\chi }$ satisfies \eqref{chi.11}
by the fact that the rhs\eqref{eq:ExpH18} is real valued;
$\mathrm{R}$ formed by terms with the properties stated for $
{{\mathcal R} ^{(2)}} $ in Theorem {\rm e}f{th:main}, see second line of
\eqref{eq:fotimes2}. By an argument similar to the one for
\eqref{eq:ExpH18}, we have
{\bf e}gin{equation} \langlebel{eq:ExpH182}{\bf e}gin{aligned} &
\langlenglegle
\nabla _f ^2 \widehat{{\mathcal R} }^{(1)}_2 (0,0,\| f\| _2^2), f
'^{\otimes 2}\ranglenglegle = \widetilde{\chi } +\mathrm{R},
\end{aligned}
\end{equation}
with $\widetilde{\chi }$ and $\mathrm{R}$ different from the ones
in \eqref{eq:ExpH18} but with the same properties. Then we have
{\bf e}gin{equation} \langlebel{eq:ExpH19}{\bf e}gin{aligned}&
\text{\eqref{eq:ExpH12}}=H_2^{(1)} +\widetilde{{\mathcal R}} ^{(2)}
+\widehat{\chi} + {\rm i} \sum_{|\mu +\nu |=2}b_{\mu \nu } \langlembda
\cdot (\mu - \nu) z^{\mu} \overline{z}^{\nu} -\\& -{\rm i} \sum_{|\mu
+\nu |=1} z^{\mu} \overline{z}^{\nu} \langlenglegle f ,\sigma _1\sigma _3
(\mathcal{H} -\langlembda \cdot (\mu - \nu) )B_{\mu \nu } \ranglenglegle +
\mathbf{R} ,
\end{aligned}
\end{equation}
where $\mathbf{R} $ satisfies the properties stated for $ {{\mathcal R}
^{(2)}}$ and $\widehat{\chi}$ is a polynomial like
\eqref{chi.1}--\eqref{chi.11} with $M_0=1$ and ($\widehat{Z}$ and
$\widehat{K}$ will be defined in two lines){\bf e}gin{equation}
\langlebel{eq:ExpH20}{\bf e}gin{aligned}&\| \widehat{\chi } \| = \|
\widehat{Z} \| + \| \widehat{K } \| \le C \| {\chi } \| (\| f\| _2^2
+\| \chi \| + \| \widetilde{{\mathcal R}} ^{(2)}\| ) .
\end{aligned}\end{equation}
Here $\widehat{\chi }= \widehat{Z}+\widehat{K}$, where in
$\widehat{Z}=\sum \widehat{b}_{\mu \nu}(\| f\| _2^2) z^\mu
\overline{z}^\nu $ we sum over $|\mu +\nu |=2$, $\langlembda ^0\cdot \mu
=\langlembda ^0\cdot \nu$, i.e. in $\widehat{Z}$ we collect the null
form terms of $\widehat{\chi }$. We set
{\bf e}gin{equation} \langlebel{eq:ExpH2011} H_2^{(2)}=
H_2^{(1)}+\widehat{Z} .
\end{equation}
Up to now $\chi$ is undetermined. We choose $\chi$ with
coefficients $b_{\mu \nu}$ and $B_{\mu \nu } $ such that ${b}_{\mu
\nu}=0$ for $\langlembda ^0\cdot \mu =\langlembda ^0\cdot \nu$ and such that
the following system is satisfied:
{\bf e}gin{equation} \langlebel{eq:ExpH201}{\bf e}gin{aligned}&
\widetilde{{\mathcal R} ^{(1)}}
+\widehat{K} + {\rm i} \sum_{|\mu +\nu |=2}b_{\mu \nu}
\langlembda \cdot (\mu - \nu) z^{\mu} \overline{z}^{\nu} -\\& -{\rm i}
\sum_{|\mu +\nu |=1} z^{\mu} \overline{z}^{\nu} \langlenglegle f ,\sigma
_1\sigma _3 (\mathcal{H} -\langlembda \cdot (\mu - \nu) )B_{\mu \nu
} \ranglenglegle =0.
\end{aligned}
\end{equation}
In coordinates, \eqref{eq:ExpH201} is
{\bf e}gin{equation} \langlebel{eq:ExpH21}{\bf e}gin{aligned}&
a_{\mu \nu} ^{(1)} +\widehat{k}_{\mu \nu} +{\rm i} b_{\mu \nu} \langlembda
\cdot (\mu - \nu)=0 , \, |\mu +\nu |=2, \, \langlembda ^0\cdot \mu
\neq \langlembda ^0\cdot \nu , \\& G_{\mu \nu } +\widehat{K} _{\mu \nu}
- {\rm i} (\mathcal{H}
-\langlembda \cdot (\mu - \nu) )B_{\mu \nu } =0 , \, |\mu +\nu
|=1,
\end{aligned}
\end{equation}
where: $a_{\mu \nu}^{(1)}$ and $G_{\mu \nu } $ are coefficients of
$\widetilde{{\mathcal R} ^{(1)}} $, they are smooth functions of $\varrho
=\| f\| _2^2$, and are equal to 0 for $\varrho =0$; $\widehat{k}_{\mu \nu}\in \mathbb{C}$
and $\widehat{K}_{\mu \nu } \in H ^{K,S}$ are coefficients of
$\widehat{K}$, and are smooth functions of $\varrho =\| f\| _2^2$
and of the coefficients of $\chi$, where $b_{\mu \nu}\in \mathbb{C}$ and
$B_{\mu \nu } \in H ^{K,S}$. By \eqref{eq:ExpH20}
{\bf e}gin{equation} \langlebel{eq:ExpH211} |\widehat{k}_{\mu \nu}|+ \|\widehat{K}_{\mu \nu}\|
_{H^{K,S}}\le C \| {\chi } \| (\| f\| _2^2 +\| \chi \| + \|
\widetilde{{\mathcal R} ^{(1)}} \| ) .
\end{equation}
Then by the implicit function theorem we can solve the nonlinear
system \eqref{eq:ExpH21} with unknown $\chi$ obtaining (we consider
$b_{\mu \nu}$ only for $\langlembda ^0\cdot \mu \neq \langlembda ^0\cdot
\nu$)
{\bf e}gin{equation} \langlebel{eq:ExpH22}{\bf e}gin{aligned}& |
b_{\mu \nu} +\frac{a_{\mu \nu}^{(1)}}{{\rm i} \langlembda \cdot (\mu -
\nu)} |+
\| B_{\mu \nu} + {\rm i} R_{\mathcal{H}}(\langlembda \cdot (\mu - \nu))
G_{\mu \nu} \| _{H^{K,S}}\\& \le C \|\widetilde{{\mathcal R} ^{(1)}} \|
(\| \widetilde{{\mathcal R} ^{(1)}} \| +\| f\| _2^2 ) .
\end{aligned}
\end{equation}
Notice that with the above choice of
$\chi$ and with \eqref{eq:ExpH2011}, \eqref{eq:ExpH19}
yields
{\bf e}gin{equation} \langlebel{eq:ExpH190}{\bf e}gin{aligned}&
\text{\eqref{eq:ExpH12}}=H_2^{(2)} + \mathbf{R} ,
\end{aligned}
\end{equation}
where $\mathbf{R}$ has the properties stated for $ {{\mathcal R} ^{(2)}} $
in Theorem {\rm e}f{th:main}. Hence Lemma {\rm e}f{lem:1step1} is proved.
\qed
{\it Proof of Lemma {\rm e}f{lem:1step1}}
By \eqref{lie.11.h}--\eqref{lie.11.f}, and with the big O smooth in
$z\in \mathbb{C} ^m$, $f\in H^{-K',-S'}_c$,
{\bf e}gin{equation} \langlebel{eq:psi}
\psi (\|f'\| _2^2) = \psi (\|f \| _2^2) +O\left ( |z|^{2 } \| f \|
_{H^{-K',-S'}} + \| f \| _{H^{-K',-S'}} ^3 \right ).\end{equation}
The error term in \eqref{eq:psi} has the properties stated for $
{{\mathcal R} ^{(2)}} $ in Theorem {\rm e}f{th:main}. We consider the terms
$\widetilde{{\mathcal R} ^{(2)}} \circ \phi $. Terms, for $|\mu +\nu | =
3$, like
{\bf e}gin{equation} \langlebel{eq:z3}z'^\mu \overline{z}'^\nu \int
_{\mathbb{R}^3}a (x,z',f',f'(x),\| f'\| _2^2 ) dx , \end{equation}
by \eqref{lie.11.a} and \eqref{lie.11.c} can be written as
{\bf e}gin{equation}
\langlebel{eq:z31}{\bf e}gin{aligned} & (z ^\mu \overline{z} ^\nu +O ((
|z|+ \norma{f } _{H^{-K',-S'}} )^3) )\int _{\mathbb{R}^3}a
(x,z',f',f'(x),\| f'\| _2^2 ) dx ,
\end{aligned}\end{equation} In the notation of Lemma {\rm e}f{lie_trans} we have
{\bf e}gin{equation} \langlebel{eq:z32}{\bf e}gin{aligned}&
a( x,z ' ,f',f'(x ),\| f'\| _2^2) = a\big ( x,z +
\Gamma
,e^{ {\rm i} \Gamma _0 P_c(\omega _0) \sigma _3}f +\mathcal{G},\\& e^{
{\rm i} \Gamma _0 \sigma _3}f (x)+[T(\Gamma _0) f](x)+\mathcal{G}(z,f,\|
f\| _2^2)(x) ,\| f \| _2^2+\Gamma _1 \big ) \\& = a( x,z ,f ,f (x ),\|
f \| _2^2)+ O ( |z|+ \norma{f } _{H^{-K',-S'}} ) .
\end{aligned}\end{equation}
The big O's in \eqref{eq:z31}--\eqref{eq:z32} are smooth in $z\in \mathbb{C}
^m$, $f\in H^{-K',-S'}_c$. Then \eqref{eq:z3} has the properties
stated for $ {{\mathcal R} ^{(2)}} $ in Theorem {\rm e}f{th:main}. Similar
formulas can be used for
{\bf e}gin{equation} \langlebel{eq:zf3}{\bf e}gin{aligned}&
\sum _{|\mu +\nu | =2 }z'^\mu \overline{z}'^\nu \int
_{\mathbb{R}^3} \left [ \sigma _1 \sigma _3G_{\mu \nu }
(x,z',f',f'(x), \| f'\| _2^2 )\right ]^*f'(x) dx +\\& \sum
_{j=3}^5\int _{\mathbb{R}^3} F_j(x,z' ,f', f'(x),\| f'\| _2^2)
f'^{\otimes j}(x) dx +
\int _{\mathbb{R}^3}B (|f'(x)|^2/2 ) dx.
\end{aligned}\end{equation}
We treat with some detail these terms in the step $r>2,$ Subsection
{\rm e}f{subsec:step2}. Next we consider the term with $\int F_2
f'^{\otimes 2}(x) dx$. First of all, we can apply to $F_2$ an
analogue of \eqref{eq:z32} to obtain for $d=2$
{\bf e}gin{equation} \langlebel{eq:F32}{\bf e}gin{aligned}&
F_d( x,z ' ,f',f'(x ),\| f'\| _2^2) = F_d( x,z +
\Gamma
,e^{ {\rm i} \Gamma _0 P_c(\omega _0) \sigma _3}f +\mathcal{G},\\& e^{
{\rm i} \Gamma _0 \sigma _3}f (x)+[T(\Gamma _0) f](x)+\mathcal{G}(z,f,\|
f\| _2^2)(x) ,\| f \| _2^2+\Gamma _1 ) \\& = F_d ( x,0 ,0 ,f (x
),\| f \| _2^2)+ O ( |z|+ \norma{f } _{H^{-K',-S'}} ) .
\end{aligned}\end{equation}
Then, modulo terms with the properties stated for $ {{\mathcal R} ^{(2)}}
$ in Theorem {\rm e}f{th:main}, we get
{\bf e}gin{equation} \langlebel{eq:f2}{\bf e}gin{aligned}&
\int _{\mathbb{R}^3} F_2(x,0 ,0, f (x
),\| f \| _2^2) f'^{\otimes 2}(x) dx=
\int _{\mathbb{R}^3} F_2(x,0 ,0, 0,\| f \| _2^2)
f'^{\otimes 2}(x) dx
\\&+ \int _{\mathbb{R}^3}
G_2(x, f (x),\| f \| _2^2) f(x)\otimes f'^{\otimes 2} (x) dx ,
\end{aligned}\end{equation}
where first term in rhs has been treated in Lemma {\rm e}f{lem:2step1}
and second term has the properties stated for $ {\mathcal R} ^{(2)} _3 $
in Theorem {\rm e}f{th:main}. By a similar argument
{\bf e}gin{equation} \langlebel{eq:f22}{\bf e}gin{aligned}&
\widehat{{\mathcal R}} ^{(1)} _2 (z',f', \| f'\| _2 )- \langlenglegle \nabla _f
^2 \widehat{{\mathcal R} }^{(1)}_2 (0,0,\| f\| _2^2), f '^{\otimes
2}\ranglenglegle
\end{aligned}\end{equation}
has the properties stated for $ {\mathcal R} ^{(2)} $ in Theorem
{\rm e}f{th:main}.
\qed
We denote: $\chi _2=\chi$, ${\mathcal T} _2$ the Lie transformation of
$\chi_2$, $Z^{(2)}=0 $. $H_2^{(2)} $ has been defined in
\eqref{eq:ExpH2011}. We denote ${\mathcal R} ^{(2)}=
\text{\eqref{eq:lem:1step1}} +\text{\eqref{eq:ExpH12}}- H_2^{(2)} $.
This ${\mathcal R} ^{(2)}$ satisfies the conditions in Theorem
{\rm e}f{th:main}. This ends the proof of case $r=2$ in Theorem
{\rm e}f{th:main}.
\subsection{Proof of Theorem {\rm e}f{th:main}: the step $r>2$}
\langlebel{subsec:step2}
Case $r=2$ has been treated in Subsection {\rm e}f{subsec:step1}.
We have defined $H_2^{(2)} $ in \eqref{eq:ExpH2011}.
We proceed by
induction to complete the proof of Theorem {\rm e}f{th:main}. From the
argument below one can see that $H_2^{(r)} =H_2^{(2)} $ for all
$r\ge 2$. For $r\ge 2$, write Taylor expansions
{\bf e}gin{equation}
\langlebel{r00} {\mathcal R}^{(r)}_{0}- {\mathcal R}^{(r)}_{02} = \sum _{|\mu +\nu
| = r+1 } z^\mu \overline{z}^\nu \int _{\mathbb{R}^3}a_{\mu \nu
}^{(r)}(x,0,0,0,\| f\| _2^2 ) dx, \end{equation}
{\bf e}gin{equation}
\langlebel{r11} {\mathcal R}^{(r)}_{1}- {\mathcal R}^{(r)}_{12} =\sum _{|\mu +\nu |
= r }z^\mu \overline{z}^\nu \int _{\mathbb{R}^3} \left [ \sigma _1
\sigma _3G_{\mu \nu }^{(r)}(x,0,0,0, \| f\| _2^2 )\right ]^*f(x) dx.
\end{equation}
We have
{\bf e}gin{equation} \langlebel{eq:r00} {\bf e}gin{aligned} &
{\mathcal R}^{(r)}_{02} + {\mathcal R}^{(r)}_{12} =\sum _{|\mu +\nu
| = r+2 } z^\mu \overline{z}^\nu \int
_{\mathbb{R}^3}\widetilde{a}_{\mu \nu }^{(r)}(x,z,f,0,\| f\| _2^2 )
dx+\\& \sum _{|\mu +\nu | = r+1 }z^\mu \overline{z}^\nu \int
_{\mathbb{R}^3} \left [ \sigma _1 \sigma _3\widetilde{G}_{\mu \nu
}^{(r)}(x,z,f,f(x), \| f\| _2^2 )\right ]^*f(x) dx + \\& \sum _{|\mu
+\nu | = r }z^\mu \overline{z}^\nu \int _{\mathbb{R}^3}
\widetilde{F}_{2 }^{(r)}(x,z,f,f(x), \| f\| _2^2 )
\cdot \left (f(x)\right ) ^{\otimes 2} dx,
\end{aligned}
\end{equation}
with $\widetilde{a}_{\mu \nu }^{(r)}$ satisfying \eqref{eq:coeff a},
$\widetilde{G}_{\mu \nu }^{(r)}$ \eqref{eq:coeff G}
and $\widetilde{F}_{2 }^{(r)}$ \eqref{eq:coeff F}. Since
$H^{(r)}=H\circ {\mathcal T} _r$ is real valued (because $H$ is real valued),
then both sides of equations \eqref{r00}--\eqref{eq:r00} are real
valued. In particular, $ {a}_{\mu \nu }^{(r)}$ and $ {G}_{\mu \nu
}^{(r)}$ satisfy \eqref{eq:ExpHcoeff2}. Set
{\bf e}gin{equation}
\langlebel{pr.3} \widetilde{K}_{r+1}:= \text{rhs\eqref{r00}}
+\text{rhs\eqref{r11}}.
\end{equation}
Split $\widetilde{K}_{r+1}= K_{r+1}+ Z_{r+1}$ collecting inside
$Z_{r+1}$ all the terms of $\widetilde{K}_{r+1}$ in null form. The
coefficients of $ {K}_{r+1}$ and of $Z_{r+1}$ satisfy
\eqref{eq:ExpHcoeff2}, by the argument just before \eqref{pr.3}. We
consider a (momentarily unknown) polynomial $\chi $ like
\eqref{chi.1}--\eqref{chi.11}, $M_0=r$. Denote by $ \phi $ its Lie
transformation. Let $(z',f')=\phi (z,f)$. For $d=2$, in the notation
of Lemma {\rm e}f{lie_trans} we have
{\bf e}gin{equation} \langlebel{binomials1}{\bf e}gin{aligned}&
({\mathcal R}^{(r)}_d - \widehat{{\mathcal R}}^{(r)}_d)(z',f') =
\langlenglegle F_d^{(r)}( z ' ,f',f'(\cdot ),\| f'\| _2^2),
(e^{ {\rm i} \Gamma _0 P_c(\omega _0)\sigma _3 }f +\mathcal{G}
)^{\otimes d} \ranglenglegle .
\end{aligned}\end{equation}
Then rhs\eqref{binomials1}$=$
{\bf e}gin{equation} \langlebel{binomials}{\bf e}gin{aligned}&
= \sum_{j=0}^{d}\left(
{\bf e}gin{matrix}
d\\ j
\end{matrix}
\right) \langlenglegle F_d^{(r)}( z ' ,f',f'(\cdot ),\| f'\| _2^2),
\mathcal{G}
^{\otimes (d-j)}\otimes [e^{ {\rm i} \Gamma _0 P_c(\omega _0)
\sigma _3 }f]^{\otimes j}\ranglenglegle =\\& \sum_{j=0}^{d}\left(
{\bf e}gin{matrix}
d\\ j
\end{matrix}
\right) \sum_{\ell =0}^{j}\left(
{\bf e}gin{matrix}
j\\ \ell
\end{matrix}
\right)\langlenglegle F_d^{(r)}( \cdots ), \mathcal{G}
^{\otimes (d-j)}\otimes [T (\Gamma _0) f]^{\otimes (j-\ell)}
\otimes [e^{ {\rm i} \Gamma _0
\sigma _3 }f]^{\otimes \ell }\ranglenglegle .
\end{aligned}\end{equation}
In the notation of Lemma {\rm e}f{lie_trans} we have for $d=2$
{\bf e}gin{equation} \langlebel{Fd}{\bf e}gin{aligned}&
F_d^{(r)}( z ' ,f',f'(x ),\| f'\| _2^2)(x)=\\& F_d^{(r)}\big ( z +
\Gamma
,e^{ {\rm i} \Gamma _0 P_c(\omega _0) \sigma _3}f +\mathcal{G}, e^{ {\rm i}
\Gamma _0 \sigma _3}f (x)+[T(\Gamma _0) f](x) ,\| f \| _2^2+\Gamma
_1 \big )(x) .
\end{aligned}\end{equation}
Then
{\bf e}gin{equation} \langlebel{Fd2}{\bf e}gin{aligned}&
F_2^{(r)}( z ' ,f',f'(x ),\| f'\| _2^2)(x)= F_2^{(r)}( 0 ,0, f(x)
,\| f \| _2^2 )(x) +\\& O ( |z|+ \norma{f } _{H^{-K',-S'}} )
= F_2^{(r)}( 0 ,0,0 ,\| f \| _2^2 )(x) +\\& G ( 0 ,0, f(x) ,\| f
\| _2^2 )(x) f(x)+ O ( |z|+ \norma{f } _{H^{-K',-S'}} ) ,
\end{aligned}\end{equation}
where the big O are smooth in $z\in \mathbb{C}^m$ and $f\in H^{-K',-S'}$
with values in $H^{K,S} (\mathbb{R}^3, B (
(\mathbb{C}^2)^{\otimes 2},\mathbb{C} )$ and where $G$ has values
in $H^{K,S} (\mathbb{R}^3, B (
(\mathbb{C}^2)^{\otimes 3},\mathbb{C} )$ and satisfies estimates
\eqref{eq:coeff F}. So the last line of \eqref{Fd2}
when plugged in \eqref{binomials} for $d=2$ yields terms
with the properties of $\sum _{d=0}^3{\mathcal R} _d ^{(r+1)}.$
We focus now on the first term in the
rhs of \eqref{Fd2}. Schematically, in analogy to \eqref{eq:fotimes2}
we write
{\bf e}gin{equation} \langlebel{eq:fotimes22}{\bf e}gin{aligned} &
f '^{2}(x)= \sum _{|\mu +\nu |=r} z^{\mu}\overline{z}^{\nu}
\mathcal{A}_{\mu \nu
}(\| f\| _2^2)(x)
f (x) \\&
+\sum _{|\mu +\nu |=2r}A_{\mu \nu}(x,\| f\| _2^2)
z^{\mu}\overline{z}^{\nu} +
(e^{ {\rm i} \Gamma _0 \sigma _3}f + T(\Gamma _0)f) ^{2}(x) \\& +
\varphi (x) r_j f(x)+r_f(x) f(x)+ \varphi (x) r_j ^2+r_f^2(x),
\end{aligned}
\end{equation}
where we have \eqref{eq:fotimes21} and $|r_j|+\| r_f\| _{H^{K,S}}\le
C (|z| +\| f\| _{H^{-K',-S'}} ) ^{r+1}.$ Then
{\bf e}gin{equation} \langlebel{eq:ExpH181}{\bf e}gin{aligned} & \int
_{\mathbb{R}^3} F_2^{(r)}(x,0 ,0, 0,\| f\| _2^2) f '^{\otimes 2}(x)
dx = \widetilde{\chi } _1 +\mathrm{R}_1
\end{aligned}
\end{equation}
with: $\mathrm{R}_1$ formed by terms obtained by the last two lines
of \eqref{eq:fotimes22} has the properties stated for $ {{\mathcal R}
^{(r+1)}} $ in Theorem {\rm e}f{th:main}; $\widetilde{\chi }_1$ a
polynomial like \eqref{chi.1} with $M_0=r$ arising from the first
line of rhs of \eqref{eq:fotimes22} is such that $\| \widetilde{\chi }_1\| \le C\| f\| _2^2 \| \chi \|$ by the inductive hypothesis
$F_2^{(r)}(x,0 ,0, 0,0 )=0$ in (iv.2-5) Theorem {\rm e}f{th:main} and by \eqref{eq:fotimes21};
$\widetilde{\chi }_1$ satisfies \eqref{chi.11} because each side in
\eqref{eq:ExpH181} is real valued. We have
{\bf e}gin{equation} \langlebel{eq:quadr}{\bf e}gin{aligned} &
\widehat{{\mathcal R} }^{(r)}_2(z',f',\| f'\| _2^2) =
\langlenglegle
\nabla _f ^2 \widehat{{\mathcal R} }^{(1)}_2 (0,0,\| f\| _2^2), f
'^{\otimes 2}\ranglenglegle +\\& + (\widehat{{\mathcal R} }^{(r)}_2(z',f',\| f'\|
_2^2)-\langlenglegle \nabla _f ^2 \widehat{{\mathcal R} }^{(r)}_2 (0,0,\| f\|
_2^2), f '^{\otimes 2}\ranglenglegle ),
\end{aligned}
\end{equation}
where the second line on rhs of \eqref{eq:quadr} yields terms which
have the properties of elements of ${{\mathcal R} }^{(r+1)}$. We have
{\bf e}gin{equation} \langlebel{eq:ExpH183}{\bf e}gin{aligned} &
\langlenglegle
\nabla _f ^2 \widehat{{\mathcal R} }^{(r)}_2 (0,0,\| f\| _2^2), f
'^{\otimes 2}\ranglenglegle = \widetilde{\chi }_2 +\mathrm{R}_2
\end{aligned}
\end{equation}
where $\widetilde{\chi }_2$ and $\mathrm{R}_2$ have the same
properties of $\widetilde{\chi }_1$ and $\mathrm{R}_1$ in
\eqref{eq:ExpH183}. Split $H_2^{(r)}=D_2^{(r)}
+(H_2^{(r)}-D_2^{(r)})$ for $D_2^{(r)}$ in \eqref{eq:Diag}. Then
{\bf e}gin{equation}
\langlebel{eq:ExpH1831} \left\{ \chi ,H_2 ^{(r)} -D_2^{(r)}\right\}
=\widetilde{\chi }_3 +\mathrm{R}_3
\end{equation}
where $\widetilde{\chi }_3$ and $\mathrm{R}_3$ have the same
properties of $\widetilde{\chi }_1$ and $\mathrm{R}_1$ in
\eqref{eq:ExpH183}. Set $ \widetilde{\chi }=\sum
_{j=1}^{3}\widetilde{\chi }_j$. Split now $\widetilde{\chi
}=\widetilde{Z}+\widehat{K}$ collecting in $\widetilde{Z}$ the null
form terms in $\widetilde{\chi }$. Then we choose the yet unknown
$\chi$ such that its coefficients $b_{\mu \nu}$ and $B_{\mu \nu }
$ satisfy the system
{\bf e}gin{equation} \langlebel{eq:ExpH20111}{\bf e}gin{aligned}&
\widetilde{K}_{r+1} +\widehat{K} + {\rm i} \sum_{|\mu +\nu
|=2}b_{\mu \nu} \langlembda \cdot (\mu - \nu) z^{\mu} \overline{z}^{\nu}
-\\& -{\rm i} \sum_{|\mu +\nu |=1} z^{\mu} \overline{z}^{\nu} \langlenglegle f
,\sigma _1\sigma _3 (\mathcal{H} -\langlembda \cdot (\mu - \nu)
)B_{\mu \nu } \ranglenglegle =0.
\end{aligned}
\end{equation}
Notice that for $\widehat{K}\equiv 0$ system \eqref{eq:ExpH20111}
would be linear and admit exactly one solution. By $\|
\widetilde{\chi } \| \le C\| f\| _2^2 \| {\chi } \| $ we get $\|
\widehat{K} \| \le C\| f\| _2^2 \| {\chi } \| $. So by the implicit
function theorem there exists exactly one solution of
\eqref{eq:ExpH20111}. This solution is close to the solution of system
\eqref{eq:ExpH20111} when $\widehat{K}\equiv 0$. Furthermore,
this system has solution $\chi_{r+1}=\chi$ which satisfies
\eqref{eq:ExpHcoeff2}, or what is the same, \eqref{chi.11}. For
$L_{r+1}$ of type \eqref{eq:RestohomologicalEq}, $\chi_{r+1}$
satisfies
{\bf e}gin{equation}
\langlebel{HomEqmain} \left\{ \chi_{r+1} ,H_2 ^{(r)}\right\}
=\widetilde{K}_{r+1} +\widehat{K}+ L_{r+1}.
\end{equation} Call $\phi_{r+1}= \phi$ the Lie transform
of $\chi_{r+1}$. For $ {\mathcal T}_{r+1}={\mathcal T}_r\circ\phi_{r+1}$ set
{\bf e}gin{equation}
\langlebel{Hamr} H^{(r+1)}:=H^{(r)}\circ\phi_{r+1}=
H\circ({\mathcal T}_r\circ\phi_{r+1}) = H\circ {\mathcal T}_{r+1}.
\end{equation}
Since $\chi_{r+1}$ satisfies \eqref{eq:ExpHcoeff2}, $H^{(r+1)}$ is
well defined and real valued. Split
{\bf e}gin{eqnarray}
\langlebel{n1.1} & H^{(r)}\circ{\mathcal T}a_{r+1}= H_2^{(r)}+Z^{(r)} +
Z_{r+1}+\widetilde{Z}
\\ & \langlebel{n1.3}
+ (Z^{(r)}\circ{\mathcal T}a _{r+1}-Z^{(r)}) +(\widetilde{Z} \circ{\mathcal T}a
_{r+1} -\widetilde{Z} ) \\ & \langlebel{n1.6} + (\widetilde{K}_{r+1}
+\widehat{K})\circ{\mathcal T}a_{r+1}-\widetilde{K}_{r+1} -\widehat{K}
\\ &
\langlebel{n1.4} + H_2^{(r)}\circ{\mathcal T}a_{r+1}- \left(H_2^{(r)}+\left\{
H_2^{(r)}, \chi_{r+1}\right\}\right)
\\ &
\langlebel{n1.5} + ({\mathcal R}^{(r)}_{02}+{\mathcal R}^{(r)}_{12})\circ{\mathcal T}a_{r+1}
\\ &
\langlebel{n1.7} + \sum _{d=3}^{5}({\mathcal R} ^{(r)}_d -\widehat{{\mathcal R}}
^{(r)}_d) \circ{\mathcal T}a_{r+1}+ \widehat{{\mathcal R}} ^{(r)}_d \circ{\mathcal T}a_{r+1}
\\ & \langlebel{n1.9} +
({\mathcal R} ^{(r)}_2 - \widetilde{Z}-\widehat{K})\circ{\mathcal T}a_{r+1}
\\ & \langlebel{n1.8} + \psi \circ{\mathcal T}a_{r+1}+
{\mathcal R} ^{(r)}_6\circ{\mathcal T}a_{r+1}
\ .
\end{eqnarray}
Define $H^{(r+1)}_2=H^{(r )}_2 $ (this proves $H^{(r )}_2=H^{(2
)}_2 $) and $Z^{(r+1)}:=Z^{(r)}+Z_{r+1}+\widetilde{Z}$. Its
coefficients satisfy \eqref{eq:ExpHcoeff2} (because $H^{(r+1)}$ is
real valued) and it is a normal form. We have already discussed that
\eqref{n1.9} has the properties stated for ${\mathcal R} ^{(r+1)}$. By
expansions \eqref{binomials1}--\eqref{Fd} we get that the first
summation in \eqref{n1.7} has the properties stated for
${\mathcal R} ^{(r+1)}$. By an analogous argument, terms $
\widehat{{\mathcal R}} ^{(r)}_d
(z',f')$ have the properties stated for ${\mathcal R} ^{(r+1)}$.
We have, for $T=T(\Gamma _0)$,
{\bf e}gin{equation} \langlebel{resto61}{\bf e}gin{aligned}& | f' (x)|^2=
| f (x)|^2 +\mathcal{E}(x)\text{ with } \mathcal{E}(x):=2 (T(\Gamma
_0)f(x))^*\sigma _1 e^{ {\rm i} \Gamma _0 \sigma _3}f(x)\\& + | T(\Gamma
_0)f(x)|^2+ 2\mathcal{G} ^* (x)\sigma _1 e^{ {\rm i} \Gamma _0 \sigma
_3}f(x)+2\mathcal{G} ^* (x)\sigma _1 T(\Gamma _0)f(x)+| \mathcal{G}
(x)|^2.
\end{aligned}\end{equation}
Then
{\bf e}gin{equation} \langlebel{resto6}{\bf e}gin{aligned}& {\mathcal R}^{(r)}_6
\circ \phi _{r+1} =
\int _{\mathbb{R}^3} B ( | f' (x)| ^2/2) dx=
\int _{\mathbb{R}^3} B ( | f (x)| ^2/2) dx\\& +\frac{1}{2}
\int _{\mathbb{R}^3} dx\, \mathcal{E}(x)\int _0^1 B '( | f (x)| ^2/2
+s\, \mathcal{E}(x)/2) ds.
\end{aligned}\end{equation}
The last line in \eqref{resto6} has the properties stated for
${{\mathcal R}}^{(r+1)} -{{\mathcal R}}^{(r+1)}_6$ by Lemma {\rm e}f{lie_trans}. By
\eqref{eq:r00} and by the fact that $\widetilde{a}_{\mu \nu }^{(r)}$
satisfies \eqref{eq:coeff a},
$\widetilde{G}_{\mu \nu }^{(r)}$ \eqref{eq:coeff G}
and $\widetilde{F}_{2 }^{(r)}$ \eqref{eq:coeff F}, the terms
${\mathcal R}^{(r)}_{02}+{\mathcal R}^{(r)}_{12}$ has the properties stated for
$\sum _{d=0}^2{{\mathcal R}}^{(r+1)}_d$. The same conclusion holds for
\eqref{n1.5}.
By Lemma
{\rm e}f{lie_trans} and by an analogue of \eqref{eq:psi}, we have that
$\psi \circ \phi _r=\psi + \widetilde{\psi} $ where
$\widetilde{\psi}$ has the properties stated for $\sum
_{d=1}^3{\mathcal R}^{(r+1)}_d$ by \eqref{lie.11.f}. We have
{\bf e}gin{equation} \langlebel{eq:Zcirc}{\bf e}gin{aligned} & Z^{(r)}\circ{\mathcal T}a_{r+1} -Z^{(r)}=
\int_0^1 \{ Z^{(r)} , \chi_{r+1} \}
\circ\phi_{r+1}^t dt.
\end{aligned} \end{equation}
We have
{\bf e}gin{equation} \langlebel{eq:chiZ}{\bf e}gin{aligned} & \left |
\{\chi_{r+1},Z^{(r)} \} \right | \le C (|z|^{r+2} +|z|^{r+1}
\| f\| _{H^{-K',-S'}} ).
\end{aligned} \end{equation}
By \eqref{eq:chiZ} we conclude that \eqref{eq:Zcirc} has the
properties stated for ${\mathcal R}^{(r+1)}$. The same is true for the
other terms in \eqref{n1.3}--\eqref{n1.6}. We have, for
$H_2=H_2^{(r)}$,
{\bf e}gin{equation} \langlebel{eq:Hcirc}{\bf e}gin{aligned} &
H_2\circ\phi_{r+1}-(H_2+\left\{ H_2, \chi_{r+1} \right\})=
\int_0^1 \frac{t^2}{2!} \left\{
\left\{ H_2, \chi_{r+1} \right\}, \chi_{r+1}
\right\}\circ\phi_{r+1}^t dt \\& =- \int_0^1 \frac{t^2}{2!}\left\{
K_{r+1}+\widehat{K} +L_{r+1}, \chi_{r+1} \right\}\circ\phi_{r+1}^t
dt.
\end{aligned} \end{equation}
Then $\left |
\{ K_{r+1}+\widehat{K}
+L_{r+1} , \chi_{r+1} \} \right | \le \text{rhs \eqref{eq:chiZ} }$
implies that \eqref{eq:Hcirc} has the properties stated for
${\mathcal R}^{(r+1)}$.
\qed
\section{Dispersion}
\langlebel{sec:dispersion}
We apply Theorem {\rm e}f{th:main} for $r=2N+1 $ (recall $N=N_1$ where
$N_j\langlembda _j<\omega _0 <(N_j+1)\langlembda _j).$ In the rest of the
paper we work with the hamiltonian $H^{(r)}$. We will drop the upper
index. So we will set $H=H^{(r)}$, $H_2=H_2^{(r)}$, $\langlembda
_j=\langlembda _j^{(r)}$, $\langlembda =\langlembda ^{(r)}$,
$Z_a=Z_a^{(r)}$ for $a=0,1$ and ${\mathcal R} ={\mathcal R} ^{(r)}$. In
particular we will denote by $G_{\mu \nu}$ the coefficients $G_{\mu
\nu}^{(r)}$ of $Z_1^{(r)}$. We will show:
{\bf e}gin{theorem}\langlebel{proposition:mainbounds} There is a fixed
$C >0$ such that for $\varepsilon _0>0$ sufficiently small and for
$\epsilon \in (0, \varepsilon _0)$ we have
{\bf e}gin{eqnarray}
& \| f \| _{L^r_t( [0,\infty ),W^{ 1 ,p}_x)}\le
C \epsilon \text{ for all admissible pairs $(r,p)$}
\langlebel{Strichartzradiation}
\\& \| z ^\mu \| _{L^2_t([0,\infty ))}\le
C \epsilon \text{ for all multi indexes $\mu$
with $\langlembda\cdot \mu >\omega _0 $} \langlebel{L^2discrete}\\& \| z _j \|
_{W ^{1,\infty} _t ([0,\infty ) )}\le
C \epsilon \text{ for all $j\in \{ 1, \dots , m\}$ }
\langlebel{L^inftydiscrete} .
\end{eqnarray}
\end{theorem}
Estimate \eqref{L^inftydiscrete} is a consequence of the classical
proof of orbital stability in Weinstein \cite{W1}. Notice that
\eqref{NLS} is time reversible, so in particular
\eqref{Strichartzradiation}--\eqref{L^inftydiscrete} are true over
the whole real line. The proof, though, exploits that $t\ge 0$,
specifically when for $\langlembda \in \sigma _c(\mathcal{H})$ we
choose $R_{\mathcal{H}}^+(\langlembda )=R_{\mathcal{H}} (\langlembda +{\rm i} 0
)$ rather than $R_{\mathcal{H}}^-(\langlembda )=R_{\mathcal{H}} (\langlembda
-{\rm i} 0 )$ in formula \eqref{eq:g variable}. See the discussion on
p.18 \cite{SW3}.
The proof of
Theorem
{\rm e}f{proposition:mainbounds} involves a standard continuation
argument. We assume
{\bf e}gin{eqnarray}
& \| f \| _{L^r_t([0,T],W^{ 1 ,p}_x)}\le
C _1\epsilon \text{ for all admissible pairs $(r,p)$} \langlebel{4.4a}
\\& \| z ^\mu \| _{L^2_t([0,T])}\le
C_2 \epsilon \text{ for all multi indexes $\mu$
with $\omega \cdot \mu >\omega _0 $} \langlebel{4.4}
\end{eqnarray}
for fixed sufficiently large constants $C_1$, $C_2$ and then we
prove that for $\epsilon $ sufficiently small, \eqref{4.4a} and
\eqref{4.4} imply the same estimate but with $C_1$, $C_2$ replaced
by $C_1/2$, $C_2/2$. Then \eqref{4.4a} and \eqref{4.4} hold with
$[0,T]$ replaced by $[0,\infty )$.
The proof consists in three main steps.
{\bf e}gin{itemize}
\item[(i)] Estimate $f$ in terms of $z$.
\item[(ii)] Substitute the variable $f$ with a
new "smaller" variable $g$ and find smoothing estimates for $g$.
\item[(iii)] Reduce the system for $z$ to a closed system involving
only the $z$ variables, by insulating the part of $f$ which
interacts with $z$, and by decoupling the rest (this reminder is
$g$). Then clarify the nonlinear Fermi golden rule.
\end{itemize}
The first two steps are the same of \cite{cuccagnamizumachi}. The
only novelty of the proof with respect to \cite{cuccagnamizumachi}
is step (iii), specifically the part on the Fermi golden rule. At issue is the non negativity of some crucial
coefficients in the equations of $z$. This point is solved
using the same ideas in Lemma 5.2 \cite{bambusicuccagna}.
The fact that they are not 0 is assumed by hypothesis (H11).
The fact that if not 0 they are positive, is proved here.
Step (i) is encapsulated by the following proposition:
{\bf e}gin{proposition}\langlebel{Lemma:conditional4.2} Assume \eqref{4.4a}
and \eqref{4.4}. Then there exist constants $C=C(C_1,C_2), K_1$,
with $K_1$ independent of $C_1$, such that, if
$C(C_1,C_2) \epsilon $ is sufficiently small, then we have
{\bf e}gin{eqnarray}
& \| f \| _{L^r_t([0,T],W^{ 1 ,p}_x)}\le
K_1 \epsilon \text{ for all admissible pairs $(r,p)$}\ .
\langlebel{4.5}
\end{eqnarray}
\end{proposition}
Consider $Z_1$ of the form \eqref{e.12a}. Set:
{\bf e}gin{equation}\langlebel{eq:G^0} G_{\mu \nu}^0=G_{\mu
\nu}(\| f \| _2^2 ) \text{ for $\| f \| _2^2=0$; $\langlembda
^0_j=\langlembda _j(\omega _0)$}.\end{equation} Then we have (with
finite sums and with the derivative in the variable $\| f \| _2^2$ performed w.r.t. the $\| f \| _2^2$ arguments explicitly emphasized in Theorem {\rm e}f{th:main})
{\bf e}gin{equation}\langlebel{eq:f variable} {\bf e}gin{aligned} &{\rm i} \dot f -
\mathcal{H}f - 2 (\partial _{ \| f \| _2^2} H)
P_c(\omega _0)\sigma _3 f = \sum _{|\langlembda ^0
\cdot(\nu-\mu)|>\omega _0} z^\mu \overline{z}^\nu
G_{\mu \nu}^0 \\& +
\sum _{|\langlembda ^0 \cdot(\nu-\mu)|>\omega _0} z^\mu
\overline{z}^\nu (G_{\mu \nu} - G_{\mu \nu}^0) +\sigma _3
\sigma _1 \nabla _f {\mathcal R} - 2 (\partial _{ \| f \| _2^2} {\mathcal R})
P_c(\omega _0)\sigma _3 f .
\end{aligned}\end{equation}
The proof of Proposition {\rm e}f{Lemma:conditional4.2} is standard and
is an easier version of the arguments in \mathhexbox278 4 in
\cite{cuccagnamizumachi}. The dominating term in the rhs of
\eqref{eq:f variable} is the one on the first line, with
contribution to $f$ bounded by $C(C_2) \epsilon $ by the
endpoint Strichartz estimate and by \eqref{4.4} (we recall that
the third term in the lhs, in part becomes a phase through an
integrating factor, in part goes on the rhs: see
\cite{cuccagnamizumachi}; this trick is due to \cite{BP2}). Notice
also, that Theorem {\rm e}f{proposition:mainbounds} implies by the
arguments on pp. 67--68 in \cite{cuccagnamizumachi}
{\bf e}gin{equation}\langlebel{scattering11} \lim_{t\to +\infty}
\left \| e^{{\rm i} \theta (t) \sigma _3}f (t) -
e^{ {\rm i} t \Delta \sigma_3}{f}_+ \right \|_{H^1}=0
\end{equation}
for a $ f_+\in H^1$ with $ \| {f}_+ \|_{H^1}\le C \epsilon$ and
for $ \theta(t) = t\omega _0+2\int _0^t (\partial _{ \| f \| _2^2} H) (t')dt' .$ We claim that $\theta(t)=\vartheta (t)-\vartheta (0)$.
This claim, Theorem {\rm e}f{th:main}, Theorem {\rm e}f{proposition:mainbounds}
and \eqref{scattering11} imply Theorem {\rm e}f{theorem-1.2}. To prove the claim
we substitute the
last system of coordinates in \eqref{system2} to obtain
{\bf e}gin{equation}\langlebel{scattering12} {\rm i} \dot f -\mathcal{H}f-(\dot \vartheta -\omega _0) P_c(\omega _0)\sigma _3 f =G
\end{equation}
where $G$ is a functional with values in $ \in C(\R, L^1_ {x}) $. The two equations are equivalent. This implies $G=\text{rhs\eqref{eq:f variable}}
$ and $\dot \vartheta -\omega _0= 2 \partial _{ \| f \| _2^2} H. $
This yields the claim $\theta(t)=\vartheta (t)-\vartheta (0)$.
Step (ii) in the proof of Theorem {\rm e}f{proposition:mainbounds}
consists in introducing the variable
{\bf e}gin{equation}
\langlebel{eq:g variable}
g=f+ \sum _{|\langlembda ^0 \cdot(\mu-\nu)|>\omega _0} z^\mu
\overline{z}^\nu
R ^{+}_{\mathcal{H}} (\langlembda ^0 \cdot(\mu-\nu) )
G_{\mu \nu}^0 .
\end{equation}
Substituting the new variable $g$ in \eqref{eq:f variable}, the
first line on the rhs of \eqref{eq:f variable} cancels out. By an
easier version of Lemma 4.3 \cite{cuccagnamizumachi} we have:
{\bf e}gin{lemma}\langlebel{lemma:bound g} For $\epsilon$ sufficiently small
and for $C_0=C_0(\mathcal{H})$ a fixed constant, we have
{\bf e}gin{equation} \langlebel{bound:auxiliary}\| g
\| _{L^2_tL^{2,-S}_x}\le C_0 \epsilon + O(\epsilon
^2).\end{equation}
\end{lemma}
As in \cite{cuccagnamizumachi}, the part of $f$ which couples
nontrivially with $z$ comes from the polynomial in $z$ contained
in \eqref{eq:g variable}. $g$ and $z$ are decoupled.
\subsection{The Fermi golden rule}
\langlebel{subsec:FGR}
We proceed as in the related parts in
\cite{bambusicuccagna,cuccagnamizumachi}. The only difference with
\cite{cuccagnamizumachi} is that the preparatory work in Theorem
{\rm e}f{th:main} makes transparent the positive semidefiniteness
of the crucial coefficients.
Set $R_{\mu \nu }^+=R_{ \mathcal{H} }^+ (\langlembda ^0\cdot (\mu -\nu
) ).$ We will have $\langlembda _j^0=\langlembda _j (\omega _0)$ and
$\langlembda _j=\langlembda _j (\| f\| _2^2)$ as in Section
{\rm e}f{subsec:Normal form}.
$|\langlembda _j^0- \langlembda _j|\lesssim C_1^2\epsilon ^2$
by \eqref{4.4a}, so in the sequel we can assume that $\langlembda ^0$
satisfies the same inequalities of $\langlembda .$ We substitute
\eqref{eq:g variable} in ${\rm i} \dot z_j = \frac{\partial}{\partial
\overline{z}_j} H^{(r)}$ obtaining
{\bf e}gin{equation}\langlebel{eq:FGR0} {\bf e}gin{aligned} & {\rm i} \dot z _j
=
\partial _{\overline{z}_j}(H_2+Z_0) + \sum _{
|\langlembda \cdot (\mu -\nu )| > \omega
_0 } \nu _j\frac{z ^\mu
\overline{ {z }}^ { {\nu} } }{\overline{z}_j} \langlenglegle g ,
\sigma _1\sigma _3 G
_{\mu \nu }\ranglenglegle +
\partial _{ \overline{z} _j} {\mathcal R}
\\ & - \sum _{ \substack{| \langlembda \cdot
(\alpha -{\bf e}ta )|> \omega _0
\\
|\langlembda \cdot (\mu -\nu )|> \omega
_0 }} \nu _j\frac{z ^{\mu +\alpha } \overline{{z }}^ { {\nu}
+{\bf e}ta}}{\overline{z}_j} \langlenglegle R_{ \alpha {\bf e}ta}^+G^0 _{ \alpha
{\bf e}ta },\sigma _1 \sigma _3G _{\mu \nu }\ranglenglegle .
\end{aligned} \end{equation}
We rewrite this as
{\bf e}gin{eqnarray} \langlebel{equation:FGR1}& {\rm i} \dot z _j= \partial _{\overline{z}_j}(H_2+Z_0) +
\mathcal{E}_j
\\ & \langlebel{equation:FGR12} -\sum _{ \substack{ \langlembda
\cdot {\bf e}ta > \omega _0
\\
\langlembda \cdot \nu > \omega
_0 \\ \langlembda \cdot {\bf e}ta -\langlembda _k < \omega _0 \, \forall
\, k \, \text{ s.t. } {\bf e}ta _k\neq 0\\ \langlembda \cdot \nu -\langlembda
_k <\omega _0 \, \forall \, k \, \text{ s.t. } \nu _k\neq 0}} \nu
_j\frac{ \overline{{z }}^ {\nu +{\bf e}ta } }{\overline{z}_j}\langlenglegle
R_{ 0 {\bf e}ta}^+
{ G} _{ 0{\bf e}ta }^0, \sigma _1 \sigma _3G ^0_{0 \nu }\ranglenglegle
\\ & \langlebel{equation:FGR13} -\sum _{ \substack{ \langlembda
\cdot \alpha > \omega _0
\\
\langlembda \cdot \nu > \omega
_0 \\ \langlembda \cdot \alpha -\langlembda _k < \omega _0 \, \forall
\, k \, \text{ s.t. } \alpha _k\neq 0\\ \langlembda \cdot \nu -\langlembda
_k <\omega _0 \, \forall \, k \, \text{ s.t. } \nu _k\neq 0}} \nu
_j\frac{z ^{ \alpha } \overline{{z }}^ {\nu }
}{\overline{z}_j}\langlenglegle R_{ \alpha 0 }^+
G _{ \alpha 0}^0, \sigma _1 \sigma _3G^0 _{0 \nu }\ranglenglegle .
\end{eqnarray}
Here the elements in \eqref{equation:FGR12} will be eliminated
through a new change of variables. $\mathcal{E}_j$ is a reminder
term defined by
{\bf e}gin{equation} {\bf e}gin{aligned} & \mathcal{E}_j:=
\text{rhs\eqref{eq:FGR0}} -\text{\eqref{equation:FGR12}}- \text{\eqref{equation:FGR13}} .
\end{aligned} \nonumber
\end{equation}
Set
{\bf e}gin{equation}\langlebel{equation:FGR2} {\bf e}gin{aligned} &
\zeta _j =z _j -\sum _{ \substack{ \langlembda \cdot {\bf e}ta > \omega
_0
\, , \,
\langlembda \cdot \nu > \omega
_0 \\ \langlembda \cdot {\bf e}ta -\langlembda _k < \omega _0 \, \forall
\, k \, \text{ s.t. } {\bf e}ta _k\neq 0\\ \langlembda \cdot \nu -\langlembda
_k <\omega _0 \, \forall \, k \, \text{ s.t. } \nu _k\neq 0}}
\frac{ \nu _j}{\langlembda ^0 \cdot ({\bf e}ta +\nu) } \frac{ \overline{{z
}}^ {\nu +{\bf e}ta } }{\overline{z}_j}\langlenglegle R_{ 0 {\bf e}ta}^+
{ G} _{ 0 {\bf e}ta }^0, \sigma _1 \sigma _3G _{0 \nu }^0\ranglenglegle \\&
+ \sum _{ \substack{ \langlembda \cdot \alpha > \omega _0
\, , \,
\langlembda \cdot \nu > \omega _0\\
\langlembda ^{0}\cdot \alpha \neq
\langlembda ^{0}\cdot \nu
\\ \langlembda
\cdot \alpha -\langlembda _k < \omega _0 \,
\forall \, k \, \text{ s.t.
} \alpha _k\neq 0\\ \langlembda \cdot \nu -\langlembda _k <\omega _0 \,
\forall \, k \, \text{ s.t. } \nu _k\neq 0}}\frac{ \nu _j}{\langlembda
^0 \cdot (\alpha - \nu) } \frac{z ^{ \alpha } \overline{ z}^ { \nu
}}{\overline{z}_j} \langlenglegle R_{ \alpha 0 }^+ G^0 _{ \alpha 0},\sigma
_1 \sigma _3 G _{0 \nu }^0\ranglenglegle \end{aligned}
\end{equation}
Notice that in \eqref{equation:FGR2}, by $\langlembda \cdot \nu >
\omega _0$, we have $| {\nu} |>1$. Then by \eqref{4.4}
{\bf e}gin{equation} \langlebel{equation:FGR3} {\bf e}gin{aligned} & \| \zeta -
z \| _{L^2_t} \le C \epsilon \sum _{\substack{ \langlembda
\cdot \alpha > \omega
_0 \\ \langlembda \cdot \alpha -\langlembda _k < \omega _0 \, \forall
\, k \, \text{ s.t. } \alpha _k\neq 0 }} \| z ^{\alpha }\| _{L^2_t}
\le CC_2M\epsilon ^2\\& \| \zeta -
z \| _{L^\infty _t} \le C ^3\epsilon ^3
\end{aligned}
\end{equation}
with $C$ the constant in \eqref{L^inftydiscrete} and $M$ the number
of terms in the rhs. In the new variables \eqref{equation:FGR1} is
of the form
{\bf e}gin{equation} \langlebel{equation:FGR4} {\bf e}gin{aligned} &
{\rm i} \dot \zeta
_j=
\partial _{\overline{\zeta}_j}H_2 (\zeta , f ) +
\partial _{\overline{\zeta}_j}Z_0 (\zeta , f )+ \mathcal{D}_j
\\ & -\sum _{ \substack{ \langlembda ^0
\cdot \alpha =\langlembda ^0\cdot \nu > \omega _0
\\ \langlembda
\cdot \alpha -\langlembda _k < \omega _0 \, \forall \, k \, \text{
s.t. } \alpha _k\neq 0\\ \langlembda \cdot \nu -\langlembda _k <\omega _{0} \,
\forall \, k \, \text{ s.t. } \nu _k\neq 0}} \nu _j \frac{\zeta ^{
\alpha } \overline{ \zeta}^ { \nu }}{\overline{\zeta}_j} \langlenglegle R_{
\alpha 0 }^+ G ^0_{ \alpha 0},\sigma _1 \sigma _3 G ^0_{0 \nu
}\ranglenglegle .
\end{aligned}
\end{equation}
From these equations by $\sum _j \langlembda _j ^0 ( \overline{\zeta}_j
\partial _{\overline{\zeta}_j}(H_2+Z_0) - {\zeta}_j
\partial _{ {\zeta}_j}(H_2+Z_0) ) =0$ we get
{\bf e}gin{equation} \langlebel{eq:FGR5} {\bf e}gin{aligned}
&\partial _t \sum _{j=1}^m \langlembda _j ^0
| \zeta _j|^2 = 2 \sum _{j=1}^m \langlembda _j^0{\mathcal I}m \left (
\mathcal{D}_j \overline{\zeta} _j \right ) -\\& -2 \sum _{
\substack{ \langlembda ^0 \cdot \alpha =\langlembda ^0\cdot \nu >
\omega _0
\\ \langlembda
\cdot \alpha -\langlembda _k < \omega _0 \, \forall \, k \, \text{
s.t. } \alpha _k\neq 0\\ \langlembda \cdot \nu -\langlembda _k <\omega _0
\, \forall \, k \, \text{ s.t. } \nu _k\neq 0}} \langlembda ^0\cdot \nu
{\mathcal I}m \left ( \zeta ^{ \alpha } \overline{\zeta }^ { \nu } \langlenglegle
R_{ \alpha 0}^+ G_{ \alpha 0}^0, \sigma _1\sigma _3 G ^0 _{0\nu
}\ranglenglegle \right ) .
\end{aligned}
\end{equation}
We have the following lemma, whose proof (we skip) is similar to Appendix B
\cite{bambusicuccagna}.
{\bf e}gin{lemma}
\langlebel{lemma:FGR1} Assume inequalities \eqref{4.4}. Then for a
fixed constant $c_0$ we have
{\bf e}gin{eqnarray}\langlebel{eq:FGR7} \sum _j\|\mathcal{D}_j \overline{\zeta} _j\|_{
L^1[0,T]}\le (1+C_2)c_0 \epsilon ^{2}
. \end{eqnarray}
\end{lemma}
For the sum in the second line of \eqref{eq:FGR5}
we get
{\bf e}gin{equation} \langlebel{eq:FGR8} {\bf e}gin{aligned} & 2\sum _{r>\omega
_0 } r
{\mathcal I}m \left \langlenglegle R_{
\mathcal{H}}^+ (r )\sum _{ \langlembda ^0\cdot \alpha =r }\zeta ^{
\alpha } G _{ \alpha 0}^0, \sigma _1 \sigma _3\sum _{ \langlembda
^0\cdot \nu =r}\overline{\zeta} ^{ \nu } G ^0_{0\nu
} \right \ranglenglegle =\\& 2\sum _{r>\omega
_0 } r {\mathcal I}m \left \langlenglegle R_{ \mathcal{H}}^+ (r )\sum _{
\langlembda ^0\cdot \alpha =r }\zeta ^{ \alpha } G _{ \alpha 0}^0,
\sigma _3\overline{\sum _{ \langlembda ^0\cdot \alpha =r}\zeta ^{
\alpha } G ^0_{ \alpha 0} }\right \ranglenglegle ,
\end{aligned}
\end{equation}
where we have used $G_{\mu \nu }^0=-\sigma _1 \overline{G^0}_{ \nu
\mu} $. Then we have the key structural result of this paper.
{\bf e}gin{lemma}
\langlebel{lemma:FGR8} We have rhs\eqref{eq:FGR8}$\ge 0.$
\end{lemma}
\proof First of all, it is not restrictive to assume $ G ^0_{ \alpha 0} = P_c(\omega _0) G ^0_{ \alpha 0} $. We have $ G ^0_{ \alpha 0} \in \mathcal{S}(\R ^3, \mathbb{C} ^2)$ for all $\alpha$. For
$W(\omega ) =\lim_{t\to\infty}e^{-it \mathcal{H}_\omega }e^{it\sigma_3
(-\Delta+\omega )}$, there exist $ F_{ \alpha } \in W^{k,p}(\R ^3, \mathbb{C} ^2)$
for all $k\in \R$ and $p\ge 1$ with
$ G ^0_{ \alpha 0} =W(\omega _0)F_\alpha $, \cite{Cu1}.
By standard theory, $R_{ \mathcal{H}}^+ (r )G ^0_{ \alpha 0}\in L^ {2,-s}(\R ^3, \mathbb{C} ^2)$ for any $s>1/2 $ and $r>\omega _0$. Let $\mathbf{G}=\sum _{
\langlembda ^0\cdot \alpha =r}\zeta ^{ \alpha } G^0 _{ \alpha 0}$ and $\mathbf{F}=\sum _{
\langlembda ^0\cdot \alpha =r}\zeta ^{ \alpha } F _{ \alpha }$. Let $^t{\mathbf{F}}=(\mathbf{F}_1,\mathbf{F}_2) $. Then, see Lemma 4.1 \cite{Cu2},
{\bf e}gin{equation} \langlebel{eq:FGR81} {\bf e}gin{aligned} &
{\mathcal I}m \left \langlenglegle R_{ \mathcal{H}}^+ (r ) \mathbf{G},
\sigma _3\overline{ \mathbf{G} }\right \ranglenglegle =\lim _{\varepsilon \searrow 0} {\mathcal I}m \left \langlenglegle R_{ \mathcal{H}} (r +{\rm i} \varepsilon ) \mathbf{G},
\sigma _3\overline{ \mathbf{G} }\right \ranglenglegle \\& =\lim _{\varepsilon \searrow 0} {\mathcal I}m \left \langlenglegle R_{ \sigma _3(-\Delta +\omega _0)} (r +{\rm i} \varepsilon ) \mathbf{F} ,
\sigma _3\overline{ \mathbf{F}}\right \ranglenglegle \\& =\lim _{\varepsilon \searrow 0} {\mathcal I}m \left \langlenglegle R_{ -\Delta } (r-\omega _0 +{\rm i} \varepsilon ) \mathbf{F}_1 ,
\overline{ \mathbf{F}_1}\right \ranglenglegle \\& =\lim _{\varepsilon \searrow 0} \int _{\R ^3} \frac{\varepsilon}{(\xi ^2- (r-\omega _0))^2+\varepsilon ^2 } |\widehat{\mathbf{F}}_1 (\xi )|^2 d\xi \ge 0.
\end{aligned}
\end{equation}
\qed
Now we will assume the following hypothesis.
{\bf e}gin{itemize}
\item[(H11)] We assume
that for some fixed constants for any vector $\zeta \in
\mathbb{C}^n$ we have:
{\bf e}gin{equation} \langlebel{eq:FGR} {\bf e}gin{aligned} & \sum _{
\substack{ \langlembda ^0 \cdot \alpha =\langlembda ^0\cdot \nu >
\omega _0
\\ \langlembda
\cdot \alpha -\langlembda _k < \omega _0 \, \forall \, k \, \text{
s.t. } \alpha _k\neq 0\\ \langlembda \cdot \nu -\langlembda _k <\omega _0
\, \forall \, k \, \text{ s.t. } \nu _k\neq 0}} \langlembda ^0\cdot \nu
{\mathcal I}m \left ( \zeta ^{ \alpha } \overline{\zeta }^ { \nu } \langlenglegle
R_{ \alpha 0}^+ G_{ \alpha 0}^0, \sigma _1\sigma _3 G ^0 _{0\nu
}\ranglenglegle \right )
\\&
\approx \sum _{ \substack{ \langlembda ^0\cdot \alpha
> \omega _0
\\
\langlembda ^0
\cdot \alpha -\langlembda _k ^0 < \omega _0 \, \forall \, k \, \text{
s.t. } \alpha _k\neq 0}} | \zeta ^\alpha | ^2 .
\end{aligned}
\end{equation}
\end{itemize}
By (H11) we have
{\bf e}gin{equation} \langlebel{eq:FGR10} {\bf e}gin{aligned} &
2\sum _j \langlembda _j^0{\mathcal I}m \left ( \mathcal{D}_j \overline{\zeta} _j
\right )\gtrsim \partial _t \sum _j \langlembda _j^0| \zeta _j|^2 + \\&
\sum _{ \substack{ \langlembda ^0\cdot \alpha
> \omega _0
\\
\langlembda ^0
\cdot \alpha -\langlembda _k ^0 < \omega _0 \, \forall \, k \, \text{
s.t. } \alpha _k\neq 0}} | \zeta ^\alpha | ^2
\end{aligned}
\end{equation}
Then, for $t\in [0,T]$ and assuming Lemma {\rm e}f{lemma:FGR1} we have
{\bf e}gin{eqnarray}& \sum _j \langlembda _j ^0 | \zeta
_j(t)|^2 +\sum _{ \substack{ \langlembda ^0\cdot \alpha
> \omega _0
\\
\langlembda ^0
\cdot \alpha -\langlembda _k ^0 < \omega _0\, \forall \, k \, \text{
s.t. } \alpha _k\neq 0}} \| \zeta ^\alpha \| _{L^2(0,t)}^2\lesssim
\epsilon ^2+ C_2\epsilon ^2.\nonumber
\end{eqnarray}
By \eqref{equation:FGR3} this implies $\| z ^\alpha \|
_{L^2(0,t)}^2\lesssim \epsilon ^2+ C_2\epsilon ^2$ for all the above
multi indexes. So, from $\| z ^\alpha \| _{L^2(0,t)}^2\lesssim
C_2^2\epsilon ^2$ we conclude $\| z ^\alpha \|
_{L^2(0,t)}^2\lesssim C_2\epsilon ^2$. This means that we can take
$C_2\approx 1$. This yields Theorem {\rm e}f{proposition:mainbounds}.
{\bf e}gin{remark}
\langlebel{rem:genericity} Notice that by $r>\omega _0$, \eqref{eq:FGR}
appears generic. We do not try to prove this point. It
should not be hard, see for example the genericity result
Proposition 2.2 \cite{bambusicuccagna}.
\end{remark}
{\bf e}gin{remark}
\langlebel{rem:integrability} In general we expect Hypothesis (H11), or
higher order versions, to hold. Specifically, if at some step of the
normal form argument (H11) fails because some of the inequalities as
in Lemma {\rm e}f{lemma:FGR8} is an equality,
one can continue the normal form procedure and obtain some steps
later a new version of (H11). This will yield an analogue of
Theorem {\rm e}f{proposition:mainbounds}, with {\rm e}f{L^2discrete}
replaced by a similar but weaker inequality. We could have stated
(H11) and proved Theorem {\rm e}f{proposition:mainbounds} in this more
general form, but this would have complicated further the
presentation.
\end{remark}
{\bf e}gin{remark}
\langlebel{rem:excited states} If instead of ground states we consider standing
waves with nodes, and if $\dim N_g(\mathcal{H}_{\omega})=2 $ with \eqref{eq:Kernel}, if we assume (H1)--(H11) with \eqref{eq:1.2} in (H5) replaced by $\frac d {d\omega } \| \phi _ {\omega }\|^2_{L^2(\R^3)}\neq 0$,
if we assume $\sigma (\mathcal{H}_{\omega})\subset \R$, then
by \cite{Cu3} it is possible to prove that the hamiltonian $K$ in Lemma {\rm e}f{lem:K} has quadratic part
$$K_2=\sum _{j=1}^{m} \gamma _j \langlembda _j (\omega ) |z_j|^2+
\frac{1}{2} \langlenglegle \sigma _3 \mathcal{H}_{\omega } f, \sigma _1
f\ranglenglegle $$
with $\gamma _j$ equal to either 1 or $-1$ and with at least one
$\gamma _j=-1$ (in other words the energy has a saddle
at $\phi_{\omega }$ in the surface formed by the $ u$ with $\| u \| _{L^2}
=\| \phi_{\omega } \| _{L^2}$). Then a simple elaboration of the proof of the present paper, along the lines of sections 3 or 4 in \cite{Cu3},
can be used to strengthen Theorem 3.2 \cite{Cu3}
showing that $\phi_{\omega } $ is orbitally unstable. Furthermore, following the argument in \cite{Cu3},
it can be shown that if a solution $u(t)$ remains close to ground states as $t\nearrow +\infty$ (resp. $t\searrow -\infty$), it actually scatters to ground states, that is \eqref{scattering} and \eqref{Strichartz} with the plus (resp. minus) sign.
\end{remark}
{\bf e}gin{thebibliography}{CP03}
\bibitem[BC]{bambusicuccagna}
D.Bambusi, S.Cuccagna, {\em On dispersion of
small energy solutions of the nonlinear Klein Gordon equation with a
potential}, http://www.dismi.unimore.it/Members/scuccagna/pubblicazioni/
NLKGsmall11.pdf/view.
\bibitem[BP1]{BP1}
V.Buslaev, G.Perelman, {\em Scattering for the nonlinear
Schr\"odinger equation: states close to a soliton\/}, St.
Petersburg Math.J., 4
(1993), pp. 1111--1142.
\bibitem[BP2]{BP2}
V.Buslaev, G.Perelman, {\em On the stability of solitary waves for
nonlinear Schr\"odinger equations\/}, Nonlinear evolution
equations, editor N.N. Uraltseva, Transl. Ser. 2, 164, Amer. Math.
Soc.,
pp. 75--98, Amer. Math. Soc., Providence (1995).
\bibitem[BS]{BS} V.S.Buslaev, C.Sulem, {\em On the asymptotic
stability of solitary waves of Nonlinear Schr\"odinger equations},
Ann. Inst. H. Poincar\'e. An. Nonlin., {20} (2003), pp. 419--475.
\bibitem[CL]{CL} T.Cazenave, P.L.Lions, {\em Orbital stability of
standing waves for nonlinear Schr\"odinger equations }, Comm.
Math. Phys. 85 (1982), 549--561.
\bibitem[Cu1]{Cu1}
S.Cuccagna, {\em Stabilization of solutions to nonlinear
Schr\"odinger equations}, Comm. Pure App. Math. {54} (2001), pp.
1110--1145, erratum Comm. Pure Appl. Math. 58 (2005), p. 147.
\bibitem[Cu2]{Cu2} S.Cuccagna, {\em On asymptotic stability
of ground states of NLS}, Rev. Math. Phys. {15} (2003), pp.
877--903.
\bibitem[Cu3]{Cu3} S.Cuccagna, {\em On instability of excited states of
the nonlinear Schr\"odinger equation},Physica D, 238
(2009), pp. 38--54.
\bibitem[CM]{cuccagnamizumachi}
S.Cuccagna, T.Mizumachi, {\em On asymptotic stability in energy
space of ground states for Nonlinear Schr\"odinger equations\/},
Comm. Math. Phys., 284
(2008), pp. 51--87.
\bibitem[CPV]{CPV} S.Cuccagna, D.Pelinovsky, V.Vougalter, {\em
Spectra of positive and negative energies in the linearization of
the NLS problem}, Comm. Pure Appl. Math. {58} (2005), pp. 1--29.
\bibitem[CT]{cuccagnatarulli} S.Cuccagna, M.Tarulli, {\em On asymptotic stability of standing
waves of discrete
Schr\"odinger equation in $ Z$} , SIAM J. Math. Anal. 41,
(2009), pp. 861-885
\bibitem[Gz]{Gz} Zhou Gang, {\em Perturbation Expansion and N-th
Order Fermi Golden Rule of the Nonlinear Schr\"odinger Equations
\/}, J. Math. Phys., {48}( 2007), p. 053509
\bibitem[GS]{zhousigal}
Zhou Gang, I.M.Sigal, {\em Relaxation of Solitons in Nonlinear
Schr\"odinger Equations with Potential \/}, Advances in
Math.,
216 (2007), pp. 443-490.
\bibitem[GW1]{zhouweinstein1}
Zhou Gang, M.I.Weinstein, {\em Dynamics of Nonlinear
Schr\"odinger/Gross-Pitaeskii Equations; Mass transfer in Systems
with Solitons and Degenerate Neutral Modes\/}, Anal. PDE 1 (2008),
pp. 267--322.
\bibitem[GW2]{zhouweinstein2}
Zhou Gang, M.I.Weinstein, {\em Equipartition of Energy in Nonlinear
Schr\"odinger/Gross-Pitaeskii Equations \/}, in preparation.
\bibitem[GSS1]{GSS1} M.Grillakis, J.Shatah, W.Strauss, {\em Stability
of solitary waves in the presence of symmetries, I }, Jour. Funct.
An. {74} (1987), pp.160--197.
\bibitem[GSS2] {GSS2} M.Grillakis, J.Shatah, W.Strauss,
{\em Stability of solitary waves in the presence of symmetries, II},
Jour. Funct. An. {94} (1990), pp. 308--348.
\bibitem[GNT]{GNT} S.Gustafson, K.Nakanishi, T.P.Tsai, {\em
Asymptotic Stability and Completeness in the Energy Space for
Nonlinear Schr\"odinger Equations with Small Solitary Waves }, Int.
Math. Res. Notices {66} (2004), pp. 3559--3584.
\bibitem[JSS]{JSS}
J.L.Journe, A.Soffer, C.D.Sogge,
{\em Decay estimates for Schrodinger operators }, Comm.P. Appl. Mat.
44 (1991), pp. 573--604.
\bibitem[K]{kato}
T.Kato, {\em Wave operators and similarity for some non-selfadjoint
operators \/}, Math. Annalen, 162
(1966), pp.
258--269.
\bibitem[KS]{KS} J.Krieger, W.Schlag, {\em Stable manifolds for all
monic supercritical focusing nonlinear Schr\"odinger equations in
one dimension\/}, J. Amer. Math. Soc., 19 (2006), pp. 815--920.
\bibitem[HW]{HW}
A.Hoffman, C. E. Wayne, {\em Asymptotic two-soliton solutions in
the Fermi-Pasta-Ulam model\/}, J. Dynam. Differential Equations 21
(2009), pp. 343--351.
\bibitem[MM1]{MM1} Y.Martel, F.Merle, {\em Asymptotic stability of solitons of the
gKdV equations with general nonlinearity \/}, Math. Ann. 341
(2008), pp. 391--427.
\bibitem[MM2]{MM2} Y.Martel, F.Merle, {\em Stability of two soliton collision for
nonintegrable gKdV equations\/}, Comm. Math. Phys. 286 (2009),
pp. 39--79.
\bibitem[MMT]{MMT}
Y.Martel, F.Merle, T.P.Tsai, {\em Stability in $H^ 1$ of the sum of
$K$ solitary waves for some nonlinear Schrödinger equations\/}, Duke
Math. J. 133 (2006), pp. 405--466.
\bibitem[MR]{MR} F.Merle, P.Raphael, {\em On a sharp lower bound on the blow-up rate for the $L^ 2$ critical nonlinear
Schr\"odinger equation\/}, J. Amer. Math. Soc. 19 (2006), pp.
37--90.
\bibitem[M1]{M1} T.Mizumachi, {\em Asymptotic stability of small
solitons to 1D NLS with potential }, Jour. of Math. Kyoto
University, 48 (2008), pp. 471-497.
\bibitem[M2]{M2} T.Mizumachi, {\em Asymptotic stability of small solitons
for 2D Nonlinear Schr\"{o}dinger equations with potential}, Jour. of
Math. Kyoto University, 43 (2007), pp. 599-620.
\bibitem[M3]{M3} T.Mizumachi, {\em Asymptotic stability
of N-solitons of the FPU lattices}, arXiv:0906.1320v1.
\bibitem[PW]{PW} R.L.Pego, M.I.Weinstein, {\em Asymptotic
stability of solitary
waves} Comm. Math. Phys. {164} (1994), pp.
305--349.
\bibitem[PiW]{PiW}
C.A.Pillet, C.E.Wayne, {\em Invariant manifolds for a class of
dispersive, Hamiltonian partial differential equations} J. Diff. Eq.
141 (1997), pp. 310--326.
\bibitem[S]{Schlag} W.Schlag, {\em Stable manifolds for an orbitally
unstable NLS \/}, Ann. of Math. 169 (2009), pp. 139--227
\bibitem[Si]{sigal} I.M.Sigal, {\em Nonlinear wave and Schr\"odinger
equations. I. Instability of periodic and quasi- periodic solutions
}, Comm. Math. Phys. 153 (1993), pp. 297--320.
\bibitem[ShS]{shatahstrauss} J.Shatah, W.Strauss
{\em Instability of nonlinear bound states}, Comm. Math.
Phys. 100 (1985), pp. 173--190
\bibitem[SW1]{SW1} A.Soffer, M.I.Weinstein, {\em Multichannel nonlinear
scattering for nonintegrable equations \/}, Comm. Math. Phys., 133
(1990), pp. 116--146
\bibitem[SW2]{SW2}
A.Soffer, M.I.Weinstein, {\em Multichannel nonlinear scattering II.
The case of anisotropic potentials and data \/}, J. Diff. Eq., 98
(1992), pp.
376--390.
\bibitem[SW3]{SW3}
A.Soffer, M.I.Weinstein, {\em Resonances, radiation damping and
instability in Hamiltonian nonlinear wave equations \/}, Invent.
Math., 136
(1999), pp.
9--74.
\bibitem[SW4]{SW4} A.Soffer, M.I.Weinstein,
{\em Selection of the ground state for nonlinear Schr\"odinger
equations }, Rev. Math. Phys. 16 (2004), pp. 977--1071.
\bibitem[St]{strauss}
W.Strauss, {\em Nonlinear wave equations}, CBMS Regional Conf.
Ser. Mat. AMS 76 (1989).
\bibitem[T]{T} T.P.Tsai, {\em Asymptotic dynamics of nonlinear
Schr\"odinger equations with many bound states}, J. Diff. Eq.
{192} (2003), pp. 225--282.
\bibitem[TY1]{TY1}
T.P.Tsai, H.T.Yau, {\em Asymptotic dynamics of nonlinear
Schr\"odinger equations: resonance dominated and radiation dominated
solutions}, Comm. Pure Appl. Math. {55} (2002), pp. 153--216.
\bibitem[TY2]{TY2}
T.P.Tsai, H.T.Yau, {\em Relaxation of excited states in
nonlinear Schr\"odinger equations}, Int. Math. Res. Not. {31}
(2002), pp. 1629--1673.
\bibitem[TY3]{TY3}
{ T.P.Tsai, H.T.Yau}, {\em Classification of asymptotic profiles
for nonlinear Schr\"odinger equations with small initial data}, Adv.
Theor. Math. Phys. {6} (2002), pp. 107--139.
\bibitem[W1]{W1} M.I.Weinstein, {\em Lyapunov stability of ground
states of nonlinear dispersive equations}, Comm. Pure Appl. Math.
39 (1986), pp. 51--68.
\bibitem[W2]{W2}
M.I.Weinstein, {\em Modulation stability of ground states of
nonlinear Schr\"odinger equations}, Siam J. Math. Anal. 16 (1985),
pp. 472--491.
\bibitem[Y1]{Y1} K.Yajima, The $W^{k,p}$-continuity of wave operators
for Schr\"{o}dinger operators, J. Math. Soc. Japan, {47} (1995),
pp. 551--581.
\bibitem[Y2]{Y2} K.Yajima, The $W^{k,p}$-continuity of wave operators
for Schr\"{o}dinger operators III., J. Math. Sci. Univ. Tokyo,
{2} (1995), pp. 311--346.
\end{thebibliography}
DISMI University of Modena and Reggio Emilia, Via Amendola 2, Pad.
Morselli, Reggio Emilia 42122, Italy.
{\it E-mail Address}: {\tt [email protected]}
\end{document} |
\begin{document}
\title{Radio numbers for generalized prism graphs}
\date{\today}
\author{Paul Martinez}
\address{\hskip-\parindent
Paul Martinez\\
California State University Channel Islands.}
\email{[email protected]}
\author{Juan Ortiz}
\address{\hskip-\parindent
Juan Ortiz\\
Lehigh University.}
\email{[email protected]}
\author{Maggy Tomova}
\address{\hskip-\parindent
Maggy Tomova\\
The University of Iowa.}
\email{[email protected]}
\author{Cindy Wyels}
\address{\hskip-\parindent
Cindy Wyels\\
California State University Channel Islands.}
\email{[email protected]}
\keywords{radio number, radio labeling, prism graphs}
\thanks{This research was initiated under the auspices of an MAA (SUMMA) Research Experience for Undergraduates program funded by NSA, NSF, and Moody’s, and hosted at CSU Channel Islands during Summer, 2006. We are grateful to all for the opportunities provided.}
\begin{abstract}
A radio labeling is an assignment $c:V(G)
\rightarrow \textbf{N}$ such that every distinct pair of vertices $u,v$ satisfies the inequality
$d(u,v)+|c(u)-c(v)|\geq \diam(G)+1$. The span of a radio labeling is the maximum value. The radio number of
$G$, $rn(G)$, is the minimum span over all radio labelings of $G$.
Generalized prism graphs, denoted $Z_{n,s}$, $s \geq 1$, $n\geq s$, have vertex set $\{(i,j)\,|\, i=1,2 \text{ and } j=1,...,n\}$ and edge set $\{((i,j),(i,j \pm 1))\} \cup \{((1,i),(2,i+\sigma))\,|\,\sigma=-\left\lfloor\frac{s-1}{2}\right\rfloor\,\ldots,0,\ldots,\left\lfloor\frac{s}{2}\right\rfloor\}$. In this
paper we determine the radio number of $Z_{n,s}$ for $s=1,2$ and 3. In the process we
develop techniques that are likely to be of use in determining radio numbers of other families of graphs.
\noindent \textbf{2000 AMS Subject Classification:} 05C78 (05C15)
\end{abstract}
\maketitle
\section{Introduction}
Radio labeling is a graph labeling problem, suggested by Chartrand, et al \cite{CEHZ}, that is analogous to assigning frequencies to FM channel stations so as to avoid signal interference. Radio stations that are close geographically must have frequencies that are very different, while radio stations with large geographical separation may have similar frequencies. Radio labeling for a number of families of graphs has been studied, for example see \cite{hypercube,m-ary trees,trees,DaphneSquare, square paths,multilevel,cycles}. A survey of known results about radio labeling can be found in \cite{survey}. In this paper we determine the radio number of certain generalized prism graphs.
All graphs we consider are simple and connected. We denote by $V(G)$ the vertices of $G$. We use $d_G(u,v)$ for the length of the shortest path in $G$ between $u$ and $v$. The diameter of $G$, $\diam(G)$, is the maximum distance in $G$. A {\em radio labeling} of $G$ is a function $c_G$ that assigns to each vertex $u \in V(G)$ a positive integer $c_G(u)$ such that any two distinct vertices $u$ and $v$ of $G$ satisfy the radio condition:
\[d_G(u,v)+|c_G(u)-c_G(v)|\geq \diam(G)+1.\]
The {\em span} of a radio labeling is the maximum value of $c_G$. Whenever $G$ is clear from context, we simply write $c(u)$ and $d(u,v)$. The {\em radio number} of $G$, $rn(G)$, is the minimum span over all possible radio labelings of $G$\footnote{We use the convention that $\textbf{N}$ consists of the positive integers. Some authors let $\textbf{N}$ include $0$, with the result that radio numbers using this definition are one less than radio numbers determined using the positive integers.}.
In this paper we determine the radio number of a family of graphs that consist of two $n$-cycles together with some edges connecting vertices from different cycles. The motivating example for this family of graphs is the prism graph, $Z_{n,1}$, which is the Cartesian product of $P_2$, the path on $2$ vertices, and $C_n$, the cycle on $n$ vertices. In other words, a prism graph consists of $2$ $n$-cycles with vertices labeled $(1, i), i=1,\ldots,n$ and $(2, i), i=1,\ldots,n$ respectively together with all edges between pairs of vertices of the form $(1,i)$ and $(2,i)$. Generalized prism graphs, denoted $Z_{n,s}$ have the same vertex set as prism graphs but have additional edges. In particular, vertex $(1,i)$ is also adjacent to
$(2,i+\sigma)$ for each $\sigma$ in $\{-\left\lfloor\frac{s-1}{2}\right\rfloor\,\ldots,0,\ldots,\left\lfloor\frac{s}{2}\right\rfloor\}$, see Definition \ref{def:gpg}.
\noindent\textbf{Main Theorem:} {\em Let $Z_{n,s}$ be a generalized prism graph with $1\leq s \leq 3$, and $(n,s)
\neq (4,3)$. Let $n=4k+r$, where $k \geq 1$, and $r=0,1,2,3$. Then \[rn(Z_{n,s})= (n-1)\phi(n,s)+2.\] where $\phi(n,s)$ is given in the following table:}
\begin{center}
$\phi(n,s):\quad$
\begin{tabular}{c|c|c|c}
{} & {\bf \textit{s}=1} & {\bf \textit{s}=2} & {\bf \textit{s}=3} \\ \hline {\bf \textit{r}=0}& {$k+2$} & $k+1$ &
{$k+2$} \\ \hline {\bf \textit{r}=1} & {$k+2$} & $k+2$ & {$k+1$} \\ \hline {\bf \textit{r}=2} & {$k+3$} & $k+2$
&{$k+2$} \\ \hline {\bf \textit{r}=3}& {$k+2$} & $k+3$ & {$k+2$} \\
\end{tabular}
\end{center}
{\em In addition, $rn(Z_{3,3})=6$ and $rn(Z_{4,3})=9$.}
\section{Preliminaries}
We will use pair notation to identify the vertices of the graphs with the first coordinate identifying the cycle, $1$ or $2$, and the second coordinate identifying the position of the vertex within the cycle, $1,...,n$. To avoid complicated notation, identifying a vertex as $(i,j)$ will always imply that the first coordinate is taken modulo 2 with $i \in \{1,2\}$ and the second coordinate is taken modulo $n$ with $j \in \{1,...,n\}$.
\begin{defin} \label{def:gpg}A generalized prism graph, denoted $Z_{n,s}$, $s \geq 1$, $n\geq s$, has vertex set $\{(i,j)\,|\, i=1,2 \textrm{ and } j=1,...,n\}$. Vertex $(i,j)$ is adjacent to $(i,j \pm 1)$. In addition, $(1,i)$ is adjacent to
$(2,i+\sigma)$ for each $\sigma$ in $\{-\left\lfloor\frac{s-1}{2}\right\rfloor\,\ldots,0,\ldots,\left\lfloor\frac{s}{2}\right\rfloor\}$.
The two $n$-cycle subgraphs of $Z_{n,s}$ induced by 1) all vertices of the form $(1,j)$ and 2) all vertices of the form $(2,j)$ are called {\em principal cycles}.
\end{defin}
In this notation, the prism graphs $C_n \square P_2$ are $Z_{n,1}$. We note that $Z_{n,2}$ graphs are isomorphic to
the squares of even cycles, $C_{2n} ^2$, whose radio number is determined in \cite{DaphneSquare}. The graphs
$Z_{8,1}$, $Z_{8,2}$, and $Z_{8,3}$ are illustrated in Figure \ref{fig:3graphpic}.
\begin{figure}
\caption{$Z_{8,1}
\label{fig:3graphpic}
\end{figure}
\begin{rmk} \label{rmk:radius} Note that $diam (Z_{n,s})=\left\lfloor\frac{n+3-s}{2} \right\rfloor$ for $s=1,2,3$.
\end{rmk}
Our general approach to determining the radio number of $Z_{n,s}$ consists of two steps. We first establish a lower
bound for the radio number. Suppose $c$ is a radio labeling of the vertices of $Z_{n,s}$. We can rename the vertices
of $Z_{n,s}$ with $\{\alpha_i\,|\,i=1,...,2n\}$, so that $c(\alpha_i)<c(\alpha_j)$ whenever $i<j$. We determine the
minimum label difference between $c(\alpha_i)$ and $c(\alpha_{i+2})$, denoted $\phi(n,s)$, and use it to establish
that $rn(Z_{n,s}) \geq 2+(n-1)\phi(n,s)$. We then demonstrate an algorithm that establishes that this lower bound is
in fact the radio number of the graph. We do this by defining a {\em position function} $p:V(G) \rightarrow \{\alpha_i
\,|\, i=1,...,2n\}$ and a {\em labeling function} $c:\{\alpha_i\} \rightarrow Z_+$ that has span $(n-1)\phi(n,s)+2$. We
prove that $p$ is a bijection, i.e., every vertex is labeled exactly once, and that all pairs of vertices together with the labeling $c \circ p^{-1}$ satisfy the
radio condition.
Some small cases of generalized prism graphs with $s=3$ do not follow the general pattern, so we discuss these first. First note that
$Z_{3,3}$ has diameter $1$ and thus can be radio-labeled using consecutive integers, i.e., $rn(Z_{3,3})=6$. To
determine $rn(Z_{4,3})$, note that the diameter of $Z_{4,3}$ is 2. Therefore the radio number of $Z_{4,3}$ is the same as the $L(2,1)$-number of the graph as defined in \cite{L21}. This prism graph is isomorphic to the join of two copies of $K_2\cup K_2$ where $K_2 \cup K_2$ is the disconnected graph with two components each with 2 vertices and one edge. By \cite{L21}, it follows that $rn(Z_{4,3})=9$. (We thank the referee for pointing out this proof.)
To simplify many of the computations that follow, we make use of the existence of certain special cycles in the
graphs.
\begin{defin} Suppose a graph $G$ contains a subgraph $H$ isomorphic to
a cycle, and let $v \in V(H)$.
\begin{itemize}
\item We will call $H$ a $v$-tight cycle if for every $u\in V(H)$, $d_G(u,v)=d_H(u,v)$.
\item We will call $H$ a tight cycle if for every pair of vertices $u,w$ in $H$, $d_G(u,w)=d_H(u,w)$.
\end{itemize}
\end{defin}
\noindent We note that $H$ is a tight cycle if and only if $H$ is $v$-tight for every $v \in V(H)$.
\begin{rmk}\label{rmk:principletightcycles}
Each of the two principal $n$-cycles is tight.
\end{rmk}
Particular $v$-tight cycles of maximum length play an important role in our proofs. Figure \ref{fig:3graphpic} uses bold edges to indicate a particular $(1,1)$-tight cycle of maximum length for each of the three types of graphs. The figure illustrates these cycles in the particular case when $n=8$ but it is easy to generalize the construction to any $n$. We will call these particular maximum-length $(1,1)$-tight cycles in $Z_{n,s}$ {\em standard}. Thus each generalized prism graph with $1 \leq s\leq 3$ has a standard cycle. For convenience, we will use a second set of names for the vertices of a standard cycle when focusing on properties of, or distance within, the standard cycles. The vertices of a standard cycle for $Z_{n,s}$ will be labeled $X^s_i$, $i=1,..,n+3-s$, where
\[X^1_i= \left\{
\begin{array}{ll}
$(1,1)$, & \hbox{if $i=1$,} \\
$(1,2)$, & \hbox{if $i=2$,}\\
$(2,$i-1$)$ & \hbox{if $i \geq 3$.}
\end{array}
\right.\]
and for $k=2,3$,
\[X^k_i= \left\{
\begin{array}{ll}
$(1,1)$, & \hbox{if $i=1$,} \\
$(2,$i$)$ & \hbox{if $i \geq 2$.}
\end{array}
\right.\]
These labels are illustrated in Figure \ref{fig:3graphpic}.
\begin{rmk} \label{rmk:standardtightcycles} The standard cycles depicted in Figure \ref{fig:3graphpic} are $(1,1)$-tight and have $n+3-s$ vertices. Therefore each standard cycle has diameter equal to the diameter of its corresponding $Z_{n,s}$ graph.
\end{rmk}
\section{Lower Bound}
Suppose $c$ is a radio labeling of $G$ with minimum span. Intuitively, building such a labeling requires one to find
groups of vertices that are pairwise far from each other so they may be assigned labels that have small pairwise
differences. The following lemma will be used to determine the maximal pairwise distance in a group of 3
vertices in $Z_{n,s}$. This leads to Lemma \ref{lem:phifunct}, in which we determine the minimum difference
between the largest and smallest label in any group of 3 vertex labels.
\begin{lemma} \label{lem:vexdis4AP}
Let $\{u,v,w\}$ be any subset of size 3 of $V(Z_{n,s})$, $1\leq s\leq 3$, with the exception of $\{(1,j), (2,j),(i,l)\}$ in
$V(Z_{n,3})$. Then $d(u,v)+d(v,w)+d(u,w)\leq n+3-s$.
\end{lemma}
\begin{proof} Note that if $u$,$v$, and $w$ lie on a cycle of length $t$, then $d(u,w)+d(v,u)+d(w,u) \leq t$.
If $u$,$v$, and $w$ lie on the same principal $n$-cycle, the desired result follows immediately, as all
three vertices lie on a cycle of length $n$, and $1\leq s\leq 3$.
Suppose $u$, $v$, and $w$ do not all lie on the same principal $n$-cycle. Without loss of generality, assume $u=(1,1)$,
and $v$ and $w$ lie on the second principal $n$-cycle. Then for $s=1\textrm{ or } 2$, the standard cycle includes
$(1,1)$ and all vertices $(2,i)$, so $v$ and $w$ lie on the standard cycle. For $s=3$, the standard cycle includes all
vertices $(2,i)$, $i>1$. As the triple $\{(1,j), (2,j),(i,l)\}$ in $V(Z_{n,3})$ was eliminated in the hypothesis, it
follows that for $s\leq 3$ all three vertices lie on the appropriate standard cycle. As the standard cycle in each
case is of length $n-s+3$, the result follows as above.
\end{proof}
\begin{lemma} \label{lem:phifunct}
Let $c$ be a radio labeling of $Z_{n,s}$, $1\leq s \leq 3$ and $n=4k+r$, where $k \geq 1$, $r=0,1,2,3$, and $(n,s)
\neq (4,3)$. Suppose $V(G)=\{\alpha_i \,|\, i=1,...,2n\}$ and $c(\alpha_i)< c(\alpha_{j})$ whenever $i<j$. Then we
have $|c(\alpha_{i+2})-c(\alpha_{i})| \geq \phi(n,s)$, where the values of $\phi(n,s)$ are given in the following
table.
\begin{center}
$\phi(n,s):\quad$
\begin{tabular}{c|c|c|c}
{} & {\bf \textit{s}=1} & {\bf \textit{s}=2} & {\bf \textit{s}=3} \\ \hline {\bf \textit{r}=0}& {$k+2$} & $k+1$ &
{$k+2$} \\ \hline {\bf \textit{r}=1} & {$k+2$} & $k+2$ & {$k+1$} \\ \hline {\bf \textit{r}=2} & {$k+3$} & $k+2$
&{$k+2$} \\ \hline {\bf \textit{r}=3}& {$k+2$} & $k+3$ & {$k+2$} \\
\end{tabular}
\end{center}
\end{lemma}
\begin{proof}
First assume $\{\alpha_{i}, \alpha_{i+1}, \alpha_{i+2}\}$ are any three vertices in any generalized prism graph with
$1\leq s\leq 3$ except $\{(1,j), (2,j),(i,l)\}$ in $V(Z_{n,3})$.
Apply the radio condition to each pair in the vertex set $\{\alpha_{i}, \alpha_{i+1},
\alpha_{i+2}\}$ and take the sum of the three inequalities. We obtain
\begin{eqnarray}
&d(\alpha_{i+1},\alpha_{i})&+d(\alpha_{i+2},\alpha_{i+1})+d(\alpha_{i+2},\alpha_{i}) \nonumber \\
&&+|c(\alpha_{i+1})-c(\alpha_i)|+|c(\alpha_{i+2})-c(\alpha_{i+1})|+|c(\alpha_{i+2})-c(\alpha_i)|\nonumber \\
&&\geq 3\,\diam(Z_{n,s})+3.
\end{eqnarray}
We drop the absolute value signs because $c(\alpha_i) < c(\alpha_{i+1}) < c(\alpha_{i+2})$, and use Lemma
\ref{lem:vexdis4AP} to rewrite the inequality as
\[c(\alpha_{i+2})-c(\alpha_i) \geq
\frac{1}{2}\left(3+3\,\diam(Z_{n,s})-(n+3-s)\right).\]
The table in the statement of the lemma has been generated by substituting the appropriate values for $\diam(Z_{n,s})$ from Remark \ref{rmk:radius} and simplifying. As the computations are straightforward but tedious, they are not included.
It remains to consider the case $\{\alpha_{i}, \alpha_{i+1}, \alpha_{i+2}\}=\{(1,j), (2,j),(i,l)\}$ in
$V(Z_{n,3})$. From the radio condition, it follows that
\[d\left((1,j),(2,j)\right)+|c(1,j)-c(2,j)|\geq \left\lfloor \frac{n}{2} \right\rfloor+1,\]
and so
\[|c(1,j)-c(2,j)|\geq \left\lfloor \frac{n}{2}\right\rfloor \geq 2k.\]
Thus we may conclude
\[|c(\alpha_{i+2})-c(\alpha_i)|\geq
|c(1,j)-c(2,j)| \geq 2k.\]
If $k \geq 2$, then
\[|c(\alpha_{i+2})-c(\alpha_i)|\geq 2k \geq k+2 \geq \phi(n,3).\]
If $k=1$, recall that $Z_{4,3}$ is excluded in the hypothesis. It is easy to verify that for $n=5,6,7$, $\phi(n,3)=\left\lfloor
\frac{n}{2}\right\rfloor$.
\end{proof}
\begin{rmk} \label{rmk:phi}
For all values of $n$ and $1\leq s\leq 3$, $2\phi(n,s)\geq \diam(Z_{n,s})$.
\end{rmk}
\begin{thm} \label{thm:lowerbound}
For every graph $Z_{n,s}$ with $1\leq s\leq 3$,
\[rn(Z_{n,s})\geq (n-1)\phi(n,s)+2.\]
\end{thm}
\begin{proof}
We may assume $c(\alpha_{1})=1$. By Lemma \ref{lem:phifunct}, $|c(\alpha_{i+2})-c(\alpha_{i})| \geq \phi(n,s)$, so
$c(\alpha_{2i-1})=c(\alpha_{1+2(i-1)})\geq (i-1)\phi(n,s)+1$. Note that all generalized prism graphs have $2n$
vertices. As
\[c(\alpha_{2n-1})\geq(n-1)\phi(n,s) +1,\] we have
\[c(\alpha_{2n})\geq c(\alpha_{2n-1})+1= (n-1)\phi(n,s)+2.\]
\end{proof}
\section{Upper Bound}
To construct a labeling for $Z_{n,s}$ we will define a position function $p :\{ \alpha_i \,|\, i=1,...,2n\}
\rightarrow V(Z_{n,s})$ and then a labeling function $c: \{\alpha_i \,|\, i=1,...,2n\} \rightarrow \textbf{N}$. The
composition $c\circ p^{-1}$ gives an algorithm to label $Z_{n,s}$, and this labeling has span equal to the lower bound
found in Theorem \ref{thm:lowerbound}. The labeling function depends only
on the function $\phi(n,s)$ defined in Lemma \ref{lem:phifunct}.
\begin{defin} \label{defin:labeling}
Let $\{\alpha_i \,|\, i=1,...,2n\}$ be the vertices of $Z_{n,s}$. Define $c:\{\alpha_i \,|\, i=1,...,2n\}\rightarrow
\textbf{N}$ to be the function
\[c(\alpha_{2i-1})=1+(i-1)\phi(n,s)\textrm{, and}\]
\[c(\alpha_{2i})=2+(i-1)\phi(n,s).\]
\end{defin}
Suppose $f$ is any labeling of any graph $G$. If for some $u,v \in V(G)$ the inequality $|f(u)-f(v)| \geq \textrm{diam}(G)$ holds, then the radio condition is always satisfied for $u$ and $v$. The next lemma uses this property to limit the number of vertex pairs for which it must be checked that the labeling $c$ of Definition \ref{defin:labeling} satisfies the
radio condition.
\begin{lemma}\label{lem:fcol}
Let $\{\alpha_i \,|\, i=1,...,2n\}$ be the vertices of $Z_{n,s}$ and $c$ be the labeling of Definition
\ref{defin:labeling}. Then whenever $|l-k|\geq 4$, $d(\alpha_l,\alpha_k)+|c(\alpha_l)-c(\alpha_k)|\geq
\textrm{diam}(Z_{n,s})+1$.
\end{lemma}
\begin{proof}
Without loss of generality, let $l>k$. Since $c(\alpha_{k+4})\leq c(\alpha_l)$, it follows that
\[c(\alpha_l)-c(\alpha_k)\geq c(\alpha_{k+4})-c(\alpha_k)=2\phi(n,s).\]
From Remark \ref{rmk:phi} it follows that
\[|c(\alpha_l)-c(\alpha_k)|+d(\alpha_l,\alpha_k)\geq 2\phi(n,s)+1\geq \textrm{diam}(Z_{n,s})+1.\]
\end{proof}
We will need to consider four different position functions depending on $n$ and $s$. Each of these position functions together with the labeling function in Definition \ref{defin:labeling} gives an algorithm for labeling a particular $Z_{n,s}$.
\begin{center}
\textbf{Case 1: $n=4k+r$, $r=1,2,3$ and $s\leq 3$}
\textbf{except $n=4k+2$ when $k$ is even and $s=3$} \end{center}
The idea is to find a position function which allows pairs of consecutive integers to be used as labels as often as possible. To use consecutive integers we need to find pairs of vertices in $Z_{n,s}$ with distance equal to the diameter. We will do this by taking advantage of the standard cycles for each value of $s$.
\begin{lemma} \label{lem:diameteraway}
For all $n \geq 3$ and $s\leq3$, $d\left((1,y),(2, y+D)\right)=\diam(Z_{n,s})$ where
\[D=\left\{
\begin{array}{ll}
\left\lfloor
{\frac{n+1}{2}}\right\rfloor, & \hbox{for $s=1 \textrm{ and } 3$, } \\ \\
\left\lfloor
{\frac{n+2}{2}}\right\rfloor, & \hbox{for $s=2$.}
\end{array}
\right.\]
\end{lemma}
\begin{proof}
Without loss of generality we may assume that
$(1,y)=(1,1)$. Consider the standard cycle in $Z_{n,s}$. Then $(1,1)=X^s_1$ and $(2, 1+D)=X^s_{\left\lfloor
\frac{n+3-s+1}{2}\right\rfloor+1}$. The result follows by the observation that the standard cycle in each case is
isomorphic to $C_{n+3-s}$.
\end{proof}
The position function for Case 1 is
\begin{eqnarray}
p_1(\alpha_{2i-1})&=\left(1,1+\omega(i-1)\right) \textrm{ and} \nonumber \\
p_1(\alpha_{2i})&=\left(2,1+D+\omega(i-1)\right),
\end{eqnarray}
where $D$ is as defined in Lemma \ref{lem:diameteraway} and
\[\omega=\left\{
\begin{array}{ll}
k, & \hbox{if $n=4k+2$ when $k$ is odd, or $n=4k+1$,} \\
k+1, & \hbox{if $n=4k+2$ when $k$ is even, or $n=4k+3$.}
\end{array}
\right.
\]
\begin{lemma} \label{lem:bijection}
The function $p_1:\{\alpha_j\,|\,j=1,...,2n\}\rightarrow V(Z_{n,s})$ is a bijection.
\end{lemma}
\begin{proof}
Suppose $p_1(\alpha_a)=p_1(\alpha_b)$ with $a>b$. Let $i=\left\lfloor \frac{a}{2}\right\rfloor$ and $j=\left\lfloor
\frac{b}{2}\right\rfloor$. As $p_1(\alpha_a)$ and $p_1(\alpha_b)$ have the same first coordinate, $a$ and $b$ have the same
parity. Examining the second coordinates we can conclude that $i \omega \equiv j \omega \mod n$ or $(i -j)\omega
\equiv 0 \mod n$.
By Euclid's algorithm, $k$ is co-prime to $4k+1$ and $k$ is co-prime to $4k+2$ when $k$ is odd. Also, $k+1$ is co-prime
to $4k+2$ when $k$ is even and $k+1$ is co-prime to $4k+3$ for all $k$. Thus in all cases gcd$(n,\omega)=1$. As $n$
divides $(i-j)\omega$, it follows that $n$ divides $(i-j)$. But then $(i-j)\geq n$ and thus $a-b\geq 2n$, so $a>2n$, a
contradiction.
\end{proof}
Lemma \ref{lem:bijection} establishes that the function $c \circ p_1^{-1}:V(Z_{n,s})\rightarrow \textbf{N}$ assigns each vertex
exactly one label. It remains to show that the labeling satisfies the radio condition. The following lemma simplifies many
of the calculations needed.
\begin{lemma}\label{lem:usefulfacts}
In all cases considered,
\begin{itemize}
\item
\noindent $\phi(n,s)+\omega \geq diam(Z_{n,s})+1$
and
\item $\phi(n,s)-\omega\geq\left\{
\begin{array}{ll}
1, & \hbox{if $n-s$ is even} \\
2, & \hbox{if $n-s$ is odd.}
\end{array}
\right.
$
\end{itemize}
\end{lemma}
\begin{proof}
First, we give the values of $\diam(Z_{n,s})+1$:
\begin{center}
\begin{tabular}{c|c|c|c}
{$diam(Z_{n,s})+1$} & {$s=1$} & {$s=2$} & { $s=3$} \\ \hline
{$r=1$ } & {$2k+2$} & {$2k+2$} & {$2k+1$} \\
\hline
$r=2$ & {$2k+3$} & $2k+2$ & {$2k+2$} \\
\hline
{$r=3$} & {$2k+3$} & {$2k+3$} & {$2k+2$} \\
\hline
\end{tabular}
\end{center}
In each case, $\diam(Z_{n,s})+1 \leq \phi(n,s)+\omega$:
\begin{center}
\begin{tabular}{c|c|c|c}
{$\phi(n,s)+\omega$} & {$s=1$} & {$s=2$} & { $s=3$} \\ \hline
{$r=1$ } & {$2k+2$} & {$2k+2$} & {$2k+1$} \\
\hline
$r=2$, $k$ odd & {$2k+3$} & $2k+2$ & {$2k+2$} \\
\hline
{$r=2$, $k$ even } & {$2k+4$} & {$2k+3$} & {} \\
\hline
{$r=3$} & {$2k+3$} & {$2k+4$} & {$2k+3$} \\
\hline
\end{tabular}
\end{center}
The last table shows the values of $\phi(n,s)-\omega$ with the entries corresponding to $n+s \equiv 0 \mod 2$
in bold.
\begin{center}
\begin{tabular}{c|c|c|c}
{$\phi(n,s)-\omega$} & {$s=1$} & {$s=2$} & { $s=3$} \\ \hline
{$r=1$ } & {${\mathbf 2}$} & {$2$} & {${\mathbf 1}$} \\
\hline
$r=2$, $k$ odd & {$3$} & ${\mathbf 2}$ & {$2$} \\
\hline
{$r=2$, $k$ even } & {$2$} & {${\mathbf 1}$} & {} \\
\hline
{$r=3$} & {${\mathbf 1}$} & {$2$} & {${\mathbf 1}$} \\
\hline
\end{tabular}
\end{center}
\end{proof}
\begin{thm}
The function $c \circ p_1^{-1}:V(Z_{n,s})\rightarrow \textbf{N}$ defines a radio labeling on $Z_{n,s}$ for the values of $n$
and $s$ considered in Case 1.
\end{thm}
\begin{proof}
By Lemma
\ref{lem:vexdis4AP} it is enough to check that all pairs of vertices in the set $\alpha_j,.., \alpha_{j+3}$ satisfy the radio condition. As $d(\alpha_j, \alpha_{j+a})$ depends only on $a$ and on the parity of $j$, it is enough to check all pairs of the form $(\alpha_{2i-1}, \alpha_{2i-1+a})$ and all pairs of the form $(\alpha_{2i}, \alpha_{2i+a})$ for $a \leq 3$ and for some $i$. To simplify the computations, we will check these pairs in the case when $i=1$. For the convenience of the reader, we give the coordinates and the labels of the relevant vertices.
\begin{center}
\begin{tabular}{l|l|l}
& vertex & label value \\
\hline
$\alpha_1$ & $(1,1)$ & $1$ \\
$\alpha_2$ & $(2,1+D)$ & $2$ \\
$\alpha_3$ & $(1,1+\omega)$ & $1+\phi(n,s)$ \\
$\alpha_4$ & $(2,1+D+\omega)$ & $2+\phi(n,s)$ \\
$\alpha_5$ & $(1,1+2\omega)$ & $1+2\phi(n,s)$ \\
\end{tabular}
\end{center}
{\bf Pair $(\alpha_1, \alpha_2)$:} By Lemma
\ref{lem:diameteraway}, $d(\alpha_1,\alpha_2)=\diam(Z_{n,s})$. Thus
$d(\alpha_1,\alpha_2)+c(\alpha_2)-c(\alpha_1)=\diam(Z_{n,s})+1$, as required.
{\bf Pairs $(\alpha_1, \alpha_3)$ and $(\alpha_2, \alpha_4)$:} Note that $\alpha_1$ and $\alpha_3$ lie on the same principal
$n$-cycle and this cycle is tight by Remark \ref{rmk:principletightcycles}. Thus
\[d(\alpha_1, \alpha_3)+c(\alpha_3)-c(\alpha_1)=\omega+\phi(n,s)\geq \diam
(Z_{n,s})+1.\]
The last inequality follows by Lemma \ref{lem:usefulfacts}. The relationship between $\alpha_2$ and
$\alpha_4$ is identical.
{\bf Pair $(\alpha_1, \alpha_4)$:} Note that
\[d(\alpha_1, \alpha_4)\geq d(\alpha_1, \alpha_2)-d(\alpha_2,
\alpha_4)=\diam(Z_{n,s})-\omega.\] Thus
\[d(\alpha_1, \alpha_4)+c(\alpha_4)-c(\alpha_1)\geq
\diam(Z_{n,s})-\omega+\phi(n,s)+1 \geq \diam(Z_{n,s})+2,\]
where the last inequality follows by Lemma
\ref{lem:usefulfacts}.
{\bf Pair $(\alpha_2, \alpha_3)$:} By subtracting $\omega$ from the second coordinate, we see $d(\alpha_2,
\alpha_3)=d\left((1,1),(2,1+D-\omega)\right)$.
When considered in the standard cycle, these vertices correspond to $X^1_1$ and $X^1_{2+D-\omega}$ if $s=1$ and to
$X^k_1$ and $X^s_{1+D-\omega}$ if $s=2$ or $3$. As by Remark \ref{rmk:standardtightcycles} the standard cycle in each case is $X_1^s$-tight, we have
\[d(\alpha_2, \alpha_3)=
\left\{
\begin{array}{ll}
D-\omega+1=\left\lfloor \frac{n+3}{2}\right\rfloor-\omega, & \hbox{$s=1$,} \\
D-\omega=\left\lfloor \frac{n+2}{2}\right\rfloor-\omega, &
\hbox{$s=2$,}\\
D-\omega=\left\lfloor
\frac{n+1}{2}\right\rfloor-\omega, & \hbox{$s=3.$}
\end{array}
\right.\]
Thus in all cases $d(\alpha_2, \alpha_3)= \left\lfloor
\frac{n+3-s+1}{2}\right\rfloor-\omega$, so
\begin{eqnarray}
d(\alpha_2, \alpha_3)&\geq \left\lfloor \frac{n+3-s+1}{2}\right\rfloor-\omega\\
&=
\left\{
\begin{array}{ll}
\diam(Z_{n,s})+1-\omega, & \hbox{$n-s$ even,} \nonumber \\
\diam(Z_{n,s})-\omega, & \hbox{$n-s$ odd.} \nonumber \\
\end{array}
\right.
\end{eqnarray}
By Lemma \ref{lem:usefulfacts},
$\phi(n,s)-\omega \geq 1$ when $n-s$ is even and $\phi(n,s)-\omega \geq 2$ when $n-s$ is odd. Thus
\begin{eqnarray}
d(\alpha_2, \alpha_3)+c(\alpha_3)-c(\alpha_2)&\geq \left\lfloor
\frac{n+3-s+1}{2}\right\rfloor-\omega +(\phi(n,s)-1)\nonumber \\
&\geq \left\{
\begin{array}{ll}
\diam(Z_{n,s})+1+1-1, & \hbox{$n-s$ even,} \nonumber \\
\diam(Z_{n,s})+2-1, & \hbox{$n-s$ odd.} \nonumber \\
\end{array}
\right.
\end{eqnarray}
{\bf Pair $(\alpha_2, \alpha_5)$:} As $|c(\alpha_5)-c(\alpha_2)|=2\phi(n,s)-1$ and $d(\alpha_2,\alpha_5)\geq1$, it
follows that $|c(\alpha_5)-c(\alpha_2)|+d(\alpha_2,\alpha_5)\geq 2\phi(n,s)$, and so by Remark \ref{rmk:phi},
$|c(\alpha_5)-c(\alpha_2)|+d(\alpha_2,\alpha_5)\geq \diam(Z_{n,s})+1.$
This establishes that $c \circ p_1^{-1}$ is a radio labeling
of $Z_{n,s}$.
\end{proof}
\begin{center}
\textbf{Case 2: $n=4k$, $s=1$ or $3$}
\end{center}
In this case the position function is
\begin{eqnarray}
p_2(\alpha_{2i-1})&=(1+l_i, 1+k(i-1)-l_i),\textrm{ and}\nonumber \\
p_2(\alpha_{2i})&=(2+l_i, 1+k(i+1)-l_i),
\end{eqnarray}
where
$l_i=\left\lfloor \frac{i-1}{4}\right\rfloor$.
To simplify notation, we will also denote the value of $l$ associated to a particular vertex $v$ by $l_{(v)}$. Note
that $l_{(\alpha_{2n})}=l_n=l_{4k}=\left\lfloor \frac{4k-1}{4}\right\rfloor\leq k-1$.
\begin{lemma} \label{lem:bijection2}
The function $p_2:\{\alpha_j \,|\, j=1,..,2n\}\rightarrow V(Z_{n,s})$ is a bijection.
\end{lemma}
\begin{proof}
To show that $p_2$ is a bijection, suppose that
$p_2(\alpha_a)=p_2(\alpha_b)$ and let $i=\left\lfloor \frac{a}{2}\right\rfloor$ and $j=\left\lfloor \frac{b}{2}\right\rfloor$. Suppose first
that $a$ and $b$ are even. Then
\[1+k(i-1)-l_{(a)} \equiv 1+k(j-1)-l_{(b)} \mod n.\] Thus
\[k(i-j)+l_{(b)}-l_{(a)} \equiv 0 \mod 4k,\] so $l_{(b)}-l_{(a)}
\equiv 0 \mod k$. As $l_{(b)}-l_{(a)}\leq k-1$, this implies that $l_{(b)}=l_{(a)}$. Then we have that
\[k(i-j)+l_{(b)}-l_{(a)}=k(i-j) \equiv 0 \mod 4k,\] and thus
$(i-j)\geq 4$ or $a-b \geq 8$. However, $a-b \geq 8$ implies that $l_{(b)}\neq l_{(a)}$, a contradiction. The argument
when $a$ and $b$ are odd is similar.
Suppose then that $a$ is even and $b$ is odd. Then $1+l_{(b)} \equiv 2+l_{(a)} \mod 2$ shows that $l_{(a)}$ and
$l_{(b)}$ have different parity and in particular $l_{(a)}-l_{(b)} \neq 0$. On the other hand, considering the second
coordinates of $p_2(\alpha_a)$ and $p_2(\alpha_b) \mod k$, we deduce that $1-l_{(a)} \equiv 1-l_{(b)} \mod k$ or $l_{(a)}
-l_{(b)} \equiv 0 \mod k$. As $l_{(a)}-l_{(b)} \neq 0$ it follows that $|l_{(a)} -l_{(b)}|\geq k$, a contradiction.
\end{proof}
\begin{lemma}
The function $c \circ p_2^{-1}:V(Z_{n,s})\rightarrow \textbf{N}$ defines a radio labeling on $Z_{4k,1}$ and $Z_{4k,3}$.
\end{lemma}
\begin{proof}
The inequality the function must
satisfy when applied to $Z_{4k,1}$ is $d(u,v)+|c(u)-c(v)|\geq \diam(Z_{4k,1})+1=2k+2$. For $Z_{4k,3}$, the
corresponding inequality is $d(u,v)+|c(u)-c(v)|\geq \diam(Z_{4k,3})+1=2k+1$.
For both $Z_{4k,1}$ and $Z_{4k,3}$, $\phi(n,s)=k+2$, thus $c_{Z_{4k,1}}(u)=c_{Z_{4k,3}}(u)$.
If $u$ and $v$ are in the same principal cycle, then $d_{Z_{n,3}}(u,v)=
d_{Z_{n,1}}(u,v)$, as principal cycles are always tight. If $u$ and $v$ are on different principal cycles, it is easy
to verify that $d_{Z_{n,3}}(u,v)=d_{Z_{n,1}}(u,v)-1$ by comparing the standard $u$-tight cycles on the two graphs. Thus we can conclude that $d_{Z_{n,3}}(u,v)\geq d_{Z_{n,1}}(u,v)-1$ and so if the radio condition is satisfied by $c_{Z_{n,1}}$, the corresponding radio condition is satisfied by
$c_{Z_{n,3}}$. We will check the radio condition assuming that $s=1$.
As before, it suffices to check that the radio condition holds for all pairs of the form $(\alpha_{2i-1}, \alpha_{2i-1+a})$ and all pairs of the form $(\alpha_{2i}, \alpha_{2i+a})$ for $a \leq 3$. For the convenience of the reader, the relevant values of $p_2$ and $c$ are provided below.
\begin{center}
\begin{tabular}{l|l|l}
& vertex & label value \\
\hline
$\alpha_{2i-1}$ & $(1+l_{i},1+k(i-1)-l_{i})$ & $1+(i-1)(k+2)$ \\
$\alpha_{2i}$ & $(2+l_{i},1+k(i+1)-l_{i})$ & $2+(i-1)(k+2)$ \\
$\alpha_{2(i+1)-1}$ & $(1+l_{i+1},1+ki-l_{i+1})$ & $1+i(k+2)$ \\
$\alpha_{2(i+1)}$ & $(2+l_{i+1},1+k(i+2)-l_{i+1})$ & $2+i(k+2)$ \\
$\alpha_{2(i+2)-1}$ & $(1+l_{i+2},1+k(i+1)-l_{i+2})$ & $1+(i+1)(k+2)$ \\
\end{tabular}
\end{center}
Note that $l_{(\alpha_{r+a})}-l_{(\alpha_{r})}=0$ or $1$ whenever $a
\leq 3$. As $s=1$, $d\left((x_1, y_1)(x_2,y_2)\right)=|x_2-x_1|+\textrm{min} \{|y_2-y_1|,4k-|y_2-y_1|\}$. The following table has
been generated using this equation.
\begin{center}
\begin{tabular}{c||c|c|c}
{} & {$d(u,v)$} & {$d(u,v)$} & {}\\ {vertex pair} & {$|l_{(u)}-l_{(u)}|=0$} & {$|l_{(u)}-l_{(v)}|=1$}& {$|c(u)-c(v)|$}
\\
\hline \hline
$(\alpha_{2i-1}, \alpha_{2i})$ & {$1+2k$} & {} & $1$
\\\hline
{$(\alpha_{2i-1}, \alpha_{2(i+1)-1})$} & {$0+k$} &{$1+(k-1)$} & $k+2$\\ \hline
{$(\alpha_{2i-1}, \alpha_{2(i+1)})$} & {$1+k$} & {$0+(k+1)$}& $k+3$
\\ \hline
{$(\alpha_{2i}, \alpha_{2(i+1)-1})$} & {$1+k$} & {$0+(k+1)$}& $k+1$
\\ \hline
{$(\alpha_{2i}, \alpha_{2(i+1)})$} & {$0+k$} & {$1+(k-1)$} &$k+2$ \\ \hline
{$(\alpha_{2i}, \alpha_{2(i+2)-1})$} & {$1+0$} & {$0+1$} & $2k+3$\\ \hline
\end{tabular}
\end{center}
It is straightforward to verify that in each case, $d(u,v)+|c(u)-c(v)|\geq 2k+2$.
\end{proof}
\begin{center}
\textbf{Case 3: $n=4k$, $s=2$}
\end{center}
The position function for this case is
\begin{eqnarray}
p_3(\alpha_{2i-1})&=(i,1+k(i-1)-l_i),\textrm{ and}\nonumber \\
p_3(\alpha_{2i})&=(i, 1+k(i+1)-l_i),
\end{eqnarray}
where $l_i= \left\lfloor \frac{i-1}{2} \right\rfloor$. Note that $l_{2n}=l_{4k}=\left\lfloor \frac{4k-1}{2}\right\rfloor \leq 2k-1$.
\begin{lemma} \label{lem:bijection3}
The function $p_3:\{\alpha_j\,|\,j=1,..,2n\} \rightarrow V(Z_{n,s})$ is a bijection.
\end{lemma}
\begin{proof}
Suppose that $p_3(\alpha_a)=p_3(\alpha_b)$ and let $i=\left\lfloor \frac{a}{2} \right\rfloor$ and $j=\left\lfloor \frac{b}{2}
\right\rfloor$. First suppose $a$ and $b$ have the same parity, say even. Then
\[1+k(i+1)-l_{(a)} \equiv 1+k(j+1)-l_{(b)} \mod n.\] Thus
\[k(i-j)+l_{(b)}-l_{(a)} \equiv 0 \mod 4k,\]
so $l_{(b)}-l_{(a)}
\equiv 0\mod k$. As $|l_{(a)}-l_{(b)}|\leq 2k-1$, this implies that $l_{(a)}=l_{(b)}$ or that $l_{(a)}=l_{(b)}+k$.
In the first case it follows that
\[k(i-j)+l_{(b)}-l_{(a)}=k(i-j) \equiv 0 \mod 4k\] and thus
$(i-j)\geq 4$ or $a-b \geq 8$. However, $a-b \geq 8$ implies that $l_{(b)}\neq l_{(a)}$, a contradiction.
If $l_{(a)}=l_{(b)}+k$, it follows that
\[k(i-j)+l_{(b)}-l_{(a)}=k(i-j-1) \equiv 0 \mod 4k,\]
and thus $4$ divides $i-j-1$. We conclude that
$i-j-1$ is even and thus $i-j$ is odd. It follows that $i$ and $j$ have different parities. But in this case
$p_3(\alpha_a)$ and $p_3(\alpha_b)$ have different first coordinates, so $p_3(\alpha_a) \neq p_3(\alpha_b)$. The argument when $a$ and $b$ are odd is similar.
Suppose then that $a$ is even and $b$ is odd. Considering the second coordinate of $p_3(\alpha_a)-p_3(\alpha_b)$ $ \mod k$ gives that $l_{(b)}-l_{(a)} \equiv 0\mod k$. As $|l_{(a)}-l_{(b)}| \leq 2k-1$, we again conclude that
$l_{(a)}=l_{(b)}$ or $l_{(a)}=l_{(b)}+k$. In the first case, considering the second coordinate of
$p_3(\alpha_a)-p_3(\alpha_b)$ $\mod 2k$, we conclude $k(i-j) \equiv 0 \mod 2k$, so $(i-j)\geq 2$. This however
implies that $l_{(b)}\neq l_{(a)}$, a contradiction. If $l_{(a)}=l_{(b)}+k$, then, should the second coordinate of
$p_3(\alpha_a)-p_3(\alpha_b)$ be congruent to 0 $\mod 4k$, we'd have $2k(i-j-1) \equiv 0 \mod 4k$, so $(i-j-1)$ is even. Again this shows that $p_3(\alpha_a)$ and $p_3(\alpha_b)$ have different first coordinates, so can not be equal.
\end{proof}
\begin{lemma}
The function $c \circ p_3^{-1}:V(Z_{n,s})\rightarrow \textbf{N}$ defines a radio labeling on $Z_{4k,2}$.
\end{lemma}
\begin{proof}
As before it suffices to check all pairs of the form $(\alpha_{2i-1}, \alpha_{2i-1+a})$ and all pairs of the form
$(\alpha_{2i}, \alpha_{2i+a})$ for $a \leq 3$. For the convenience of the reader, the values of $p_3$ for the pairs of vertices we must check are provided below.
\begin{center}
\begin{tabular}{l|l|l}
& vertex & label value \\
\hline
$\alpha_{2i-1}$ & $(i,1+k(i-1)-l_{i})$ & $1+(i-1)(k+1)$ \\
$\alpha_{2i}$ & $(i,1+k(i+1)-l_{i})$ & $2+(i-1)(k+1)$\\
$\alpha_{2(i+1)-1}$ & $(i+1,1+ki-l_{i+1})$ & $1+i(k+1)$\\
$\alpha_{2(i+1)}$ & $(i+1,1+k(i+2)-l_{i+1})$ & $2+i(k+1)$\\
$\alpha_{2(i+2)-1}$ & $(i+2,1+k(i+1)-l_{i+2})$ & $=1+(i+1)(k+1)$\\
\end{tabular}
\end{center}
We will have to compute distances in $Z_{n,2}$. It is easy to see that $d\left((1,j),(2,j)\right)=1$ and $d\left((i, j),(i,j')\right)=\textrm{min} \{|j-j'|, 4k-|j-j'| \}$. The distance $d\left((1, j)(2,j')\right)$, $j\neq j'$, is somewhat harder to compute. For this purpose we can use the standard cycle in $Z_{4k,2}$ after appropriate renaming of the
vertices. In particular, $d\left((1, j),(2,j')\right)=d\left((1, j-j+1),(2,j'-j+1)\right)=d\left((1, 1),(2,j'-j+1)\right)$. Let $r \equiv j'-j+1$ $\mod 4k$ and $r \in\{1,...,n\}$. Then $d\left((1, j),(2,j')\right)=d\left((1, 1),(2,r)\right)=d_{C_{n+1}}(X^2_1, X^2_r)=\textrm{min} \{r-1,
n+1-(r-1) \}$. Note that in $Z_{n,2}$, $d\left((1, j)(2,j')\right) \neq d\left((1, j')(2,j)\right)$ thus $d(\alpha_s,
\alpha_t)$ depends on the parities of $\left\lfloor \frac{s}{2} \right\rfloor$ and $\left\lfloor \frac{t}{2} \right\rfloor$.
The following table shows the distances and label differences of the relevant pairs computed using the methods described above.
\begin{center}
\begin{tabular}{c||c|c|c}
{vertex pair} & {$d(u,v)$, $i$ even} & {$d(u,v)$, $i$ odd} & {$|c(u)-c(v)|$}\\
\hline \hline
$(\alpha_{2i-1}, \alpha_{2i})$ & {$2k$} & {$2k$} & {$1$} \\\hline
{$(\alpha_{2i-1}, \alpha_{2(i+1)-1})$} & {$d\left((1,1),(2,3k+2)\right)$} & {$d\left((1,1)(2,k+1)\right)$} & $k+1$
\\
{}&$=k$&$=k$ &{} \\ \hline
{$(\alpha_{2i-1}, \alpha_{2(i+1)})$} & {$d\left((1,1),(2,k+2)\right)$} & {$d\left((1,1),(2,3k+1)\right)$} & $k+2$\\
{}&$=k+1$&$=k+1$&{} \\ \hline
{$(\alpha_{2i}, \alpha_{2(i+1)-1})$} & {$d\left((1,1),(2,k+2)\right)$} & {$d\left((1,1),(2,3k+1)\right)$} & $k$
\\
{}&$=k+1$&$=k+1$&{} \\
\hline
{$(\alpha_{2i}, \alpha_{2(i+1)})$} & {$d\left((1,1),(2,3k+2)\right)$} & {$d\left((1,1),(2,k+1)\right)$} & $k+1$ \\
{}&$=k$&$=k$&{}\\ \hline
{$(\alpha_{2i}, \alpha_{2(i+2)-1})$} & {$\geq 1$} & {$\geq 1$} & $2k+1$ \\ \hline
\end{tabular}
\end{center}
\end{proof}
\begin{center}
\textbf{Case 4: $n=4k+2$ when $k$ is even
and $s=3$}
\end{center}
The position function is
\begin{eqnarray}
p_4(\alpha_{2i-1})&=(l_i, 1+(i-1)k), \textrm{ and} \nonumber \\
p_4(\alpha_{2i})& =(l_i, 2+(i+1)k),
\end{eqnarray}
where
\[l_i=\left\{
\begin{array}{ll}
0, & \hbox{$i \leq 2k+1$,} \\
1, &
\hbox{$2k+1 < i \leq 4k+2$.}\\
\end{array}
\right.\]
\begin{lemma} \label{lem:bijection4}
The function $p_4: \{\alpha_j \,|\, j=1,...,2n\} \rightarrow V(Z_{n,s})$ is a bijection.
\end{lemma}
\begin{proof}
Suppose $p_4(\alpha_{a})=p_4(\alpha_{b})$. Let $i=\left\lfloor \frac{a}{2} \right\rfloor$ and $j=\left\lfloor \frac{b}{2} \right\rfloor$. If
$a$ and $b$ have the same parity, it follows that $ki \equiv kj \mod (4k+2)$, i.e., $(i-j)k=(4k+2)m$ for some integer
$m$. As $k$ is even, $k=2q$ for some integer $q$. Substituting and simplifying, we obtain the equation
$q(i-j)=m(4q+1)$. As $\gcd(q, 4q+1)=1$, it follows that $q|m$ and thus $m \geq q$, so $(i-j) \geq 4q+1=2k+1$. But in this
case $l_j \neq l_i$, so the first coordinates of $p_4(\alpha_{a})$ and $p_4(\alpha_{b})$ are different.
If $a$ is odd and $b$ is even, it follows that $1+(j+1)k-(i-1)k \equiv 0 \mod 4k+2$. So $1+k(j-i+2) \equiv 0 \mod 4k+2$. As $k$ is even by hypothesis, $1+k(j-i+2)$ is odd, but $4k+2$ is even, a contradiction.
\end{proof}
\begin{lemma}
The function $c \circ p_4^{-1}:V(Z_{n,s})\rightarrow \textbf{N}$ defines a valid radio labeling on $Z_{4k+2,3}$ when $k$ is even.
\end{lemma}
\begin{proof}
Since
diam$(Z_{4k+2,3})=2k+1$, we need to show that $d(u,v)+|c(u)-c(v)|\geq 2k+2$ for all pairs $u,v \in Z_{4k+2,3}$. Again
it suffices to check only the pairs $(\alpha_{2i-1}, \alpha_{2i-1+a})$ and the pairs of the form $(\alpha_{2i},
\alpha_{2i+a})$ for $a \leq 3$. Below are given the positions and the labels of these vertices.
\begin{center}
\begin{tabular}{l|l|l}
& vertex & label\\
\hline
$\alpha_{2i-1}$ & $\left(l_i,1+k(i-1)\right)$ & $1+(i-1)(k+2)$ \\
$\alpha_{2i}$ & $\left(l_i,2+k(i+1)\right)$ & $2+(i-1)(k+2)$\\
$\alpha_{2(i+1)-1}$ & $(l_{i+1},1+ki)$ & $1+i(k+2)$\\
$\alpha_{2(i+1)}$ & $\left(l_{i+1},2+k(i+2)\right)$ & $2+i(k+2)$\\
$\alpha_{2(i+2)-1}$ & $\left(l_{i+2},1+k(i+1)\right)$ & $1+(i+1)(k+2)$\\
\end{tabular}
\end{center}
Note that in $Z_{n,3}$, $d\left((x_1,y_1),(x_2,y_2)\right)=\textrm{min} \{|y_2-y_1|, n-|y_1-y_2| \}$ so the first coordinates of
the vertices are irrelevant when computing distances. As $l_i$ only appears in the first coordinates, we do not have to consider the cases of
$l_i=l_{i+1}$ and $l_i \neq l_{i+1}$ separately. Below are given all the relevant distances and label differences. It
is easy to verify that the condition $d(u,v)+|c(u)-c(v)|\geq 2k+2$ is satisfied for all pairs.
\begin{center}
\begin{tabular}{c||c|c}
{vertex pair} & {$d(u,v)$} & {$|c(u)-c(v)|$} \\
\hline \hline
$(\alpha_{2i-1}, \alpha_{2i})$ & {$2k$+1} & $1$ \\\hline
{$(\alpha_{2i-1}, \alpha_{2(i+1)-1})$} & {$k$} & {$k+2$} \\ \hline
{$(\alpha_{2i-1}, \alpha_{2(i+1)})$} & {$1+k$} & {$k+3$} \\ \hline
{$(\alpha_{2i}, \alpha_{2(i+1)-1})$} & {$k+1$} & {$k+1$}
\\ \hline
{$(\alpha_{2i}, \alpha_{2(i+1)})$} & {$k$} & {$k+2$} \\ \hline
{$(\alpha_{2i}, \alpha_{2(i+2)-1})$} & {$1$} & {$2k+3$} \\ \hline
\end{tabular}
\end{center}
\end{proof}
\end{document} |
\begin{document}
\title{Stability of Large Amplitude Viscous Shock Wave for 1-D Isentropic
Navier-Stokes System in the Half Space \thanks{Received date, and accepted date (The correct dates will be entered by the editor).}}
\author{Lin,Chang\thanks{School of Mathematics Science, Beihang University, Beijing, China, ( [email protected]). }}
\pagestyle{myheadings} \markboth{Large amplitude shock wave for impermeable wall problem}{Lin} \maketitle
\begin{abstract}
In this paper, the asymptotic-time behavior of solutions to an initial boundary value problem in the half space for 1-D isentropic Navier-Stokes system is investigated. It is shown that the viscous shock wave is stable for an impermeable wall problem where the velocity is zero on the boundary provided that the shock wave is initially far away from the boundary. Moreover, the strength of shock wave could be arbitrarily large. This work essentially improves the result of [A. Matsumura, M. Mei, Convergence to travelling fronts of solutions of the p-system with viscosity in the presence of a boundary, Arch. Ration. Mech. Anal., 146(1): 1-22, 1999], where the strength of shock wave is sufficiently small.
\end{abstract}
\begin{keywords}
Impermeable wall problem; large amplitude shock; asymptotic stability
\end{keywords}
\begin{AMS}
35Q30; 76N10;
\end{AMS}
\section{Introduction}\label{intro}
We consider a 1-D isentropic Navier-Stokes system for general viscous gas, which reads in the Lagrangian coordinate as,
\begin{equation}\label{1.1}
\left\{ \begin{array}{ll}
&v_t-u_x=0, \\
&u_t+p_x=(\mu(v)\frac{u_x}{v^{ }})_x,
\end{array} \right.
\end{equation}
where $t>0, x\in \mathbb{R_+},$ and $v(x,t)=\frac{1}{\rho(x,t)}$ is the specific volume, $u(x,t)$ the fluid velocity, $p=a v^{-\gamma}$ the pressure with constant $a>0$, $\gamma> 1$ the adiabatic constant, and $\mu(v)=\mu_{0}v^{-\alpha}$ the viscosity coefficient with $\alpha\geq 0$. When the viscosity $\mu(v)\equiv 0 $, the system (\ref{1.1}) becomes the famous Euler system
\begin{equation}
\left\{ \begin{array}{ll}\label{1.2}
&v_t-u_x=0,\\
&u_t+p_x=0,
\end{array} \right.
\end{equation}
that has rich wave phenomena such as shock and rarefaction waves. When $\mu (v)>0 $, the shock wave is mollified as the so-called viscous shock wave. Without loss of generality, we assume $\mu_0=1$ in what follows.
Since the system \eqref{1.1} is regular than the Euler one \eqref{1.2}, it is very interesting and important to study the stability of the viscous version of shock wave, i.e., the viscous shock wave, for the viscous conservation laws such as the NS system \eqref{1.1} with the initial data:
\begin{equation}\label{1.3}
(v, u)(x, 0) = (v_0,u_0) (x)\longrightarrow (v_{\pm},u_{\pm}),\quad \text{as} \quad x\rightarrow \infty.
\end{equation}
The stability of viscous shock wave for the Cauchy problem (\ref{1.1}), (\ref{1.3}) has been extensively studied in a large amount of literature since the pioneer works of \cite{g1986,mn1985}, see the other interesting works \cite{fs1998, hm2009, hlz2017,km1985,l1997,lz2009,lz2015,m,mn1994,sx1993}. It is noted that most of above works require the strength of shock wave is suitably small, that is, the shock is weak. The stability of large amplitude shock (strong shock) is more interesting and challenging in both mathematics and physics, see works \cite{hh2020, mn1985 , km1985 ,mz2004,mw2010,vy2016,z2004}.
Matsumura-Nishihara \cite{mn1985} showed that the viscous shock wave is stable if $ |v_+-v_-|<C(\gamma - 1)^{-1}$, that is, when $ \gamma \rightarrow 1 $, the strength of shock wave could be large. This condition is later relaxed in \cite{km1985} to the condition that $ |v_+-v_-|<C(\gamma - 1)^{-2}$. Recently, the restriction on the strength of shock was removed in \cite{mw2010} by an elegant weighted energy method as $\alpha >\frac{\gamma-1} {2} $. Vasseur-Yao \cite{vy2016} removed the condition $\alpha >\frac{\gamma-1} {2}$ by introducing a beautiful variable transformation. Moreover, He-Huang \cite{hh2020} extended the result of \cite{vy2016} to general pressure $p(v)$ and general viscosity $\mu(v)$, where $\mu(v)$ could be any positive smooth function.
On the other hand, it is also interesting to investigate the stability of viscous shock wave under the effect of boundary. In 1999, Matsumura-Mei \cite{mm1999} considered an impermeable wall problem of \eqref{1.1} in the half space $x\ge 0$, i.e.,
\begin{equation}\label{1.4}
\left\{ \begin{array}{ll}
(v,u)(x,0)=(v_0,u_0)(x) \longrightarrow (v_+,u_+), ~x\to +\infty,\\
u(0,t)=0, ~~ t\in\mathbb{R_+},
\end{array} \right.
\end{equation}
where $v_+>0, u_+<0$. The impermeable wall means that there is no flow across the boundary so that the velocity at the boundary $x=0$ has to be zero. It was proved in \cite{mm1999} that the solution of \eqref{1.1}, \eqref{1.4} with $\alpha=0$ time-asymptotically tends to an outgoing shock wave (2-shock) connecting the left state $(v_-,0)$ and the right one $(v_+,u_+)$
if $ |v_+-v_-|<C(\gamma - 1)^{-2}$, and the outgoing shock is initially far away from the boundary so that the interaction between the shock and the boundary is weak,
\color{black}{}
where $v_-$ is determined by the RH condition, i.e.,
\begin{eqnarray}\label{1.5}
\left\{\begin{array}{ll}
-s(v_+-v_-)-(u_+-u_-)=0, \\
-s(u_+-u_-)+(p(v_+)-p(v_-))=0,
\end{array}
\right.
\end{eqnarray}
with $u_-=0$.
Matsumura-Nishihara \cite{mn2004} removed the condition that the shock is initially far away from the boundary by extending the half space to the whole space, with the price that the shock wave has to be weak even for $\gamma=1$ case.
In this paper, we aim to prove that the large amplitude shock wave is still stable for the impermeable wall problem \eqref{1.1}-\eqref{1.4}. Roughly speaking, there exists a 2-viscous shock wave (outgoing shock) $(V_2,U_2)$ connecting $(v_-, 0 )$ and $ (v_+, u_+ )$ with $ v_- $ determined by the RH condition (\ref{1.5}), and $(V_2,U_2)$ is asymptotically stable if it is initially far away from the boundary. The precise statement of the main result is given in Theorem \ref{theorem}.
We outline the strategy as follows. Motivated by \cite{vy2016} and \cite{hh2020}, we introduce a new variable $h=u -v^{(-\alpha+1)} v_{x}$ and formulate a new equation $\eqref{4.2}_2$ in which the viscous term is moved to the mass equation $\eqref{4.2}_1$ so that the two nonlinear terms ${}{p}_x$ and $(\frac{{}{v}_x}{{}{v}^{\alpha+1}})_x $ are decoupled and the interaction between nonlinear terms is weaken. Since the strength of outgoing shock is arbitrarily large, the interaction between the 2-shock and the boundary $x=0$ is strong. We have to assume that the outgoing shock is initially far away from the boundary so that the interaction is weak. Since the boundary terms with first order derivatives are controlled, we can obtain the low order estimates through careful analysis.
But the idea using the new system \eqref{4.2} does not work in the higher order estimation since it is very difficult to control the second order derivatives of boundary terms for the new variable $h$. Note that the second derivatives of $u$ on the boundary can be controlled, we then turn to original system \eqref{1.1} to obtain the higher order energy estimates, and finally complete the a priori estimates.
\
The rest of the paper will be arranged as follows. In section \ref{Sec.2}, the outgoing shock wave is formulated and the main result is stated. In section \ref{Sec.3}, the problem is reformulated by the anti-derivatives of the perturbations around the viscous shock wave. In section \ref{Sec.4}, the a priori estimates are established. In section \ref{Sec.5}, the main theorem is proved.
\
\textbf { Notation.}
The functional $\|\cdot\|_{L^p(\Omega)}$ is defined by $\| f\|_{L^p(\Omega)} = (\int_{\Omega}|f|^{p}(\xi )\operatorname{ d }\xi )^{\frac{1}{p}}$. The symbol $\Omega$ is often omitted, when $\Omega=(0,\infty)$. As $p=2$, for simplicity we denote,
\begin{equation*}
\| f\| = \left(\int_{ 0}^{ \infty}f^{2}(\xi )\operatorname{ d }\xi \right)^{\frac{1}{2}}.
\end{equation*}
\color{black}{}
In addition, $H^m$ denotes the $m$-th order Sobolev space of functions defined by
\begin{equation*}
\|f\|_{m} = \left( \sum_{k=0}^{m} \|\partial^{k}_{\xi}f\|^2 \right)^{\frac{1}{2}}.
\end{equation*}
\section{Preliminaries and Main Theorem}\label{Sec.2}
\subsection{ Viscous Shock Profile and Location of the Shift.}As pointed out by \cite{mm1999}, the solution of the impermeable wall problem \eqref{1.1}-\eqref{1.4} is expected to tend toward the outgoing viscous shock $(V, U)(\xi)$ satisfying
\begin{equation}
\left\{ \begin{array}{ll}
&{-s }{V}'-{U}'=0,\\
&{-s }U'+p(V)'=\left(\frac{U'}{V^{\alpha+1}}\right)',\\
&(V ,U)(-\infty)=(v_-,0),\quad (V ,U)(+\infty)=(v_+,u_+),
\end{array} \right.
\label{2.1}\end{equation}
where $'=\operatorname{d} / \operatorname{d\xi}$, $\xi=x-s t$, $s $ is the shock speed determined by the RH condition \eqref{1.5} and $v_\pm>0,u_+<0$ are given constants. From $(\ref{2.1})_1 $ and $(\ref{2.1})_2 $, one gets
\begin{align}
\begin{split}
& s^{2}V'+p(V)'= -\left(\frac{s V'}{V^{\alpha+1}}\right)'.
\end{split}&
\label{2.2}\end{align}
Integrating (\ref{2.2}) over $(\pm\infty,\xi)$ gives
\begin{align}\label{2.3}
\begin{split}
&\frac{s V'}{V^{\alpha+1}}=-s^{2}V -p(V) -b=:h(V), V(\pm\infty)=v_\pm,
\end{split}&
\end{align}
\begin{align}
\begin{split}
&U=-s (V-v_-)=-s (V-v_+)+u_+,
\end{split}&
\end{align}
where $b=-s ^2 v_\pm-p(v_\pm) $.
\begin{proposition} [\cite{mm1999}]
There exists a unique viscous shock profile $(V , U)(\xi)$ up to a shift satisfying
\begin{eqnarray}\label{2.5}
0<v_{-}<V(\xi)<v_{+}, \quad
h(V )>0, \quad U'<0,
\end{eqnarray}
\begin{eqnarray}
\left|V (\xi)-v_{\pm}\right|=O(1)\left|v_{+}-v_{-}\right| e^{-C_{\pm}|\xi|},
\end{eqnarray}
as $\xi \rightarrow \pm \infty,$ where $C_\pm=\frac{v_{\pm}^{\alpha+1}}{ s_{ }} |p'(v_\pm)+s_{} ^2|$,
$s_{}=\frac{-u_+}{v_+-v_-}. $
\end{proposition}
We expect $\int_{0}^{\infty} [v(x,t)-V(x-st+\beta_{0}-\beta)] \operatorname{d}x\rightarrow 0$ as $t\rightarrow\infty $. As in \cite{mm1999}, the shift of viscous shock profile is given by
\begin{eqnarray}
\beta_{0}=\frac{1}{v_+-v_-} \left\{\int_{0}^{\infty}[{v}_{0}(x)-V (x-\beta)]\operatorname{d}x+\int_{0}^{\infty}U(-st-\beta) \operatorname{d}t\right\}.
\label{2.7}\end{eqnarray}
\subsection{Main Theorem.}We assume that for $ \beta > 0$, the initial data satisfies
\begin{flalign}
\begin{split}
&v_{0}(x)-V(x-\beta)\in H^1 \cap L^1 \quad u_{0}(x)-U(x-\beta)\in H^1 \cap L^1 ,
\end{split}
\label{2.8}\end{flalign}
and \begin{equation}
u_0(0)=0
\end{equation}
as the compatibility condition. Set
\begin{eqnarray*}
(A_{0},B_0)(x):= -\int_{x}^{\infty} (v_{0}(y)-V(y-\beta), u_{0}(y)-U (y-\beta) )\operatorname{d}y.
\end{eqnarray*}
We further assume that
\begin{eqnarray}
( A_{0} ,B_0 ) \in L^2 .
\label{2.10}\end{eqnarray}
The shift $\beta_{0}$ has the following properties.
\begin{lemma}[\cite{mm1999}]
Under the assumptions (\ref{2.8})-(\ref{2.10}), the shift $\beta_{0}$ defined by (\ref{2.7}) satisfies
$$\beta_{0}\rightarrow 0 \quad\text {as} \quad \|A_0,B_0\|_{2}\rightarrow 0 \quad \text {and}\quad \beta\rightarrow +\infty.$$
\end{lemma}
The main theorem is stated as follows.
\begin{thm}\label{theorem}
For any $u_+<0$ and $v_+>0 $, suppose that (\ref{2.8})-(\ref{2.10}) hold.
Then there exists a positive constant $\delta_{0}$ such that if $$\|(A_{0},B_{0})\|_{2 }+\beta^{-1}\leq \delta_{0}, $$then the initial-boundary value problem (\ref{1.1}), (\ref{1.4}) has a unique global solution $(v,u) (x,t) $, satisfying
\begin{align}\label{2.11}
v
&(x,t)-V (x-st+\beta_{0}-\beta)\in C^0([0,+\infty);H^1 ) \cap L^2([0,+\infty);H^1 ), \nonumber\\
u&(x,t)-U(x-st+\beta_{0}-\beta)\in C^0([0,+\infty);H^1 ) \cap L^2([0,+\infty);H^2 ),
\end{align}
where $s>0$ is defined by (\ref{1.5}), and
\begin{flalign}\label{2.12}
\begin{split}
&\sup_{x\in \mathbb{R}_{+}} | {(v,u)(x,t)}-(V, U) (x-st+\beta_{0}-\beta) | \rightarrow 0, \text{ as } t\rightarrow +\infty.
\end{split}
\end{flalign}
\end{thm}
\begin{remark}
The condition $ v_+-v_- < C(\gamma - 1)^{-2}$ in \cite{mm1999} is removed.
\end{remark}
\section{Reformulation of the Original Problem}\label{Sec.3}
Set
\begin{align}\label{3.1}
\phi
&( x,t)=-\int^\infty_x {}{v}(y,t)-V(y-st+\beta_0-\beta)\operatorname{d}y, \nonumber\\
\psi&( x,t)=-\int^\infty_x {}{u}(y,t)-U(y-st+\beta_0-\beta)\operatorname{d}y,
\end{align}
which means that we look for the solution $(v,u)(x,t)$ in the form
\begin{align}\label{3.2}
v&( x,t)=\phi_x (x,t)+ V(x-st+\beta_0-\beta), \nonumber\\
u&( x,t)=\psi_x (x,t)+ U(x-st+\beta_0-\beta).
\end{align}
The initial perturbations $\phi$ and $\psi$ satisfy
\begin{lemma}[\cite{mm1999}] Under the assumptions (\ref{2.8})-(\ref{2.10}), the initial perturbation $(\phi,\psi)(x,0):=\left(\phi_{0}, \psi_{0}\right)(x) \in H^{2}$ and satisfies
$$
\left\|\left(\phi_{0}, \psi_{0}\right)\right\|_{2} \rightarrow 0 \quad \text { as } \quad\left\|\left(A_{0}, B_{0}\right)\right\|_{2} \rightarrow 0 \text { and } \beta \rightarrow+\infty.
$$
\end{lemma}
Motivated by \cite{mm1999}, substitute (\ref{3.2}) into (\ref{1.1}) and integrate the resulting system with respect to $x$, we have
\begin{equation}
\left\{ \begin{array}{ll}
&\phi_{t}-\psi_{x} =0, \\
&\psi_{t}-f(V) \phi_{x}-\frac{{\psi_{x x}}}{V^{\alpha+1}} =F ,
\end{array} \right. \label{3.3}
\end{equation}
with the initial conditions and Neumann boundary condition:
\begin{align}\label{3.4}
\left(\phi_{0}, \psi_{0}\right)&(x) \in H^{2}, \quad x \geq 0, \nonumber\\
\left.\psi_{x}\right|_{x=0}&=\left.\phi_{t}\right|_{x=0}= -U(st+\beta_0-\beta), \quad t \geq 0,
\end{align}
where
\begin{align}\label{3.5}
f(V)= -p^{\prime}(V) + (\alpha+1)\frac{{} s V_{x}}{V^{\alpha+2}}=-p^{\prime}(V)+(\alpha+1)\frac{h(V) }{V}>0,
\end{align}
\begin{align}\label{3.6}
F&= \frac{{}{u}_{x}}{{}{v}^{\alpha+1}}-\frac{U_{x}}{V^{\alpha+1}} - \frac{\psi_{xx}}{V^{\alpha+1}}+(\alpha+1)\frac{U_x \phi_x }{V^{\alpha+2}}-\left[p({}{v})-p(V)-p'(V)\phi_x\right] \nonumber\\
&=O(1)(|\phi_{x}|^{2}+|\phi_{x}\psi_{xx}|).
\end{align}
We will seek the solution in the functional space $X_{\delta}(0,T)$ for any $0\leq T < +\infty $,
\begin{align*}
\begin{split}
X_{\delta}(0,T):=&\left\{ (\phi,\psi)\in C ([0,T];H^2)|\phi_{x} \in L^2(0,T;H^1) ,\psi_{x} \in L^2(0,T;H^2)\right. \\
& \sup_{0\leq t\leq T}\|(\phi, \psi)(t)\|_2\leq\delta \},
\end{split}&
\end{align*}
where ${ \delta} \ll 1$ is small.
\begin{proposition}\label{prposition3.1} (A priori estimate)
Suppose that $(\phi,\psi) \in X_{\delta}(0,T)$ is the solution of (\ref{3.3}), (\ref{3.4}) for some time $T>0$. There exists a positive constant $\delta_0 $ independent of $T$, such that if
$$ \sup_{0\leq t\leq T}\|(\phi, \psi)(t)\|_{{2}} \leq \delta_{ }\leq \delta_{0},$$ for $t \in [0,T]$,
then
\begin{eqnarray*}
\|(\phi, \psi)(t)\|_{{2}}^{2}+ \int_{0}^t ( \| \phi_{x}(t) \|^2_{1} + \|\psi_{x}(t)\|_{2}^2 ) \operatorname{d}t \leq C_{0} ( \|(\phi_{0},\psi_{0})\|_{{2}}^2+ e^{-C_-\beta}),
\end{eqnarray*}
where $C_{0} >1$ and $C_{-} $ are positive constants independent of $T$.
\end{proposition}
As long as Proposition \ref{prposition3.1} is proved, the local solution $(\phi,\psi)$ can be extend to $T =+\infty. $ We have the following Lemma.
\begin{lemma}\label{lemma 3.2}
If $(\phi_{0},\psi_{0})\in H^2$, there exists a positive constant $\delta_{1}=\frac{\delta_{0}}{\sqrt{C_{0}}}$, such that if
$$\|(\phi_{0},\psi_{0})\|_{{2}}^2+ e^{-C_-\beta} \leq \delta_{1}^{2},$$ then the initial-boundary problem (\ref{3.3}), (\ref{3.4}) has a unique global solution
$(\phi,\psi)\in X_{\delta_{0}}(0,\infty)$ satisfying
\begin{align*}
\sup_{t\geq0}\|(\phi, \psi)(t)\|_{{2}}^{2}+ \int_{0}^\infty ( \| \phi_{x}(t) \|^2_{1} + \|\psi_{x}(t)\|_{2}^2 ) \operatorname{d}t \leq C_{0} ( \|(\phi_{0},\psi_{0})\|_{{2}}^2+ e^{-C_-\beta}).
\end{align*}
\end{lemma}
\section{A Priori Estimate}\label{Sec.4}
Throughout this section, we assume that the problem $(\ref{3.3}), (\ref{3.4})$ has a solution $(\phi,\psi)\in X_{\delta}(0,T), $ for some $T>0$,
\begin{eqnarray}\label{4.1}
\sup_{0\leq t\leq T}\|(\phi, \psi)(t)\|_{2}\leq \delta.
\end{eqnarray}
It follows from the Sobolev inequality that $\frac{1}{2}v_{+}\leq v \leq \frac{3}{2}v_{-}$, and
\begin{align*}
\sup_{0\leq t\leq T} \{ \|(\phi, \psi)(t)\|_{L^{\infty}} + \|(\phi_{x}, \psi_{x})(t)\|_{L^{\infty}} \} \leq {\delta} .
\end{align*}
\subsection{ Low Order Estimate.}In order to remove the condition $ v_+-v_-< C(\gamma-1)^{-2} $ in \cite{mm1999}, we introduce a new perturbation $(\phi,\Psi)$ instead of $(\phi,\psi)$, where $\Psi$ will be defined below.
Inspired by \cite{vy2016} and \cite{hh2020}, we introduce a new variable $h$ which depends on $v$ and $u$, i.e., ${}{h}={}{u}-{}{v}^{-(\alpha+1)}{}{v}_{x}$. Through a direct calculation, $v$ and $h$ satisfy the following system
\begin{equation}
\left\{ \begin{array}{ll}
&{}{v}_t-{}{h}_x=(\frac{{}{v}_{x}}{{}{v}^{\alpha+1}})_{x},\\
&{}{h}_t+{}{p}_x=0.
\end{array} \right.\label{4.2}
\end{equation}
Then the initial-boundary conditions given in (\ref{1.4}) are changed into
\begin{equation*}
\left\{ \begin{array}{ll}
(v, h)(x, 0)= (v_0,u_0 -{}{v_{0}}^{-(\alpha+1)}{}{v}_{0x})(x) \longrightarrow (v_+,u_+), ~x\to +\infty,\\
h(0,t)={u}(0,t)-{}{v(0,t)}^{-(\alpha+1)}{}{v_{x}(0,t)}=-{}{v(0,t)}^{-(\alpha+1)}{}{v_{x}(0,t)}, t\in\mathbb{R_+}.
\end{array} \right.
\end{equation*}
Let $H=U-V^{-(\alpha+1)}V_{x}$. Then (\ref{2.1}) is equivalent to
\begin{equation}
\left\{ \begin{array}{rl}
V_{t}-H_{x}=&\left(\frac{V_{x}}{V^{\alpha+1}}\right)_{x},\\
H_{t}+p(V)_{x}=&0,\\
(V ,H)(-\infty)=&(v_-,0),\quad (V ,H)(+\infty)=(v_+,u_+).
\end{array} \right.\label{4.3}
\end{equation}
We define
\begin{eqnarray}
-\int_{x}^\infty({}{h}-H)\operatorname{d}x=\Psi.
\label{4.4}\end{eqnarray}
Substituting (\ref{4.3}) from (\ref{4.2}) and integrating the resulting system with respect to $x$, we have from (\ref{4.4}), $(\ref{3.1})_1$ that
\begin{equation}
\left\{ \begin{array}{ll}
&\phi_t- \Psi_x-\frac{\phi_{xx}}{V^{\alpha+1}}+(\alpha+1)\frac{V_x \phi_x }{V^{\alpha+2}}=G,\\
&\Psi_t+p'(V)\phi_x=-p( {v}|V) ,
\end{array} \right. \label{4.5}
\end{equation}
where
\begin{eqnarray*}
G=\frac{{}{v}_{x}}{{}{v}^{\alpha+1}}-\frac{V_{x}}{V^{\alpha+1}} - \frac{\phi_{xx}}{V^{\alpha+1}}+(\alpha+1)\frac{V_x \phi_x }{V^{\alpha+2}},
\end{eqnarray*}
\begin{eqnarray*}
p({v}|V)=\left(p({}{v})-p(V)\right)-p'(V)\phi_x,
\end{eqnarray*}
with the initial data
\begin{eqnarray*}
\phi (x,0) \in H^{2}, \quad \Psi (x,0) \in H^{1},
\end{eqnarray*}
and boundary data
\begin{eqnarray*}
\Phi (0,t)=-\int_{x}^{\infty} [ {u} (y,0)-U(y +\beta_{0}-\beta ) ] \operatorname{ d }y +\left( V^{-(\alpha+1)}-{}{v}^{-(\alpha+1)}\right) (x,0).
\end{eqnarray*}
\begin{lemma} (\cite{hh2020})\label{lemma 4.1}
Under the assumption of (\ref{4.1}), it holds that
\begin{flalign*}
\begin{split}
&p({}{v}|V)\leq C \phi_{x}^{2},\\
&|p({}{v}|V)_{x}|\leq C (|\phi_{xx}\phi_{x}|+|V_x|\phi_{x}^{2}),\\
&|G|\leq C (|\phi_{xx}\phi_{x}|+|V_x|\phi^{2}_{x}).
\end{split}
\end{flalign*}
\end{lemma}
In addition, some boundary estimates are given as follows.
\begin{lemma}\label{lemma 4.2}
Under the same assumptions of Proposition \ref{prposition3.1}, for $0 \leq t \leq T$, it holds that:
\begin{align}\label{4.6}
& \left|\int_{0}^{t}(\phi \Psi)|_{x=0} \operatorname{d}t \right| \leq C e^{-C_{-} \beta}, \quad\, \,\, \, \,\, \, \left| \int_{0}^{t}\left(\phi \phi_{x}\right) |_{x=0} \operatorname{d}t \right| \leq C e^{-C_{-} \beta},\\
\label{4.7}
& \left|\int_{0}^{t}(\phi_{x} \phi_{t})|_{x=0} \operatorname{d}t \right| \leq C e^{-C_{-} \beta}, \quad \, \,\, \, \left| \int_{0}^{t}\left(\psi_{x} \psi_{t}\right) |_{x=0} \operatorname{d}t \right| \leq C e^{-C_{-} \beta},\\
\label{4.8}
& \left|\int_{0}^{t}(\psi_{x} \psi_{xx})|_{x=0} \operatorname{d}t \right| \leq C e^{-C_{-} \beta}, \quad \left| \int_{0}^{t}\left(\psi_{xt} \psi_{xx}\right) |_{x=0} \operatorname{d}t \right| \leq C e^{-C_{-} \beta},
\end{align}
and
\begin{eqnarray} \label{4.9}
\| {\Psi}_{0} \|_{1}^{2} &\leq& \| {\psi}_{0} \|_{1}^{2}+ C \| {\phi}_{0} \|_{2}^{2},
\quad\| \psi \|^{2} \leq \| \Psi \| ^2+ C\|\phi \|_{ 1}^2, \nonumber \\
\| \psi_{x} \|^{2} & \leq & \| \Psi_{x} \| ^2+ C\|\phi_{x} \|_{ 1}^2,
\end{eqnarray}
where $C_-=\frac{v_{-}^{\alpha+1}}{ s_{ }} |p'(v_-)+s_{} ^2|>0$.
\end{lemma}
\begin{proof}
Note that
\begin{eqnarray} \label{4.10}
\notag \Psi (x,t)&=&-\int_{x}^{\infty} [ {u} (y,t)-U(y-st +\beta_{0}-\beta ) ] \operatorname{ d }y\\\notag &&+\left( V^{-(\alpha+1)}-{}{v}^{-(\alpha+1)}\right) (x,t) \\\notag
&=:& \psi (x,t) +p(x,t )\leq \psi (x,t) + C \phi_{ x} (x,t),\\
\psi (x,t)&=& \Psi (x,t) -p(x,t )\leq \psi (x,t) + C \phi_{ x} (x,t),
\end{eqnarray}
one have (\ref{4.9}) from (\ref{4.10}) immediately. Motivated by \cite{mm1999}, we have
\begin{align} \label{4.11}
|\psi (0,t)|\leq C, \quad |\phi_{x} (0,t)|\leq C, \quad |\phi_{ } (0,t)|\leq C e^{-C_{-} \beta}e^{-C_{-} st}.
\end{align}
Combining (\ref{4.10}) and (\ref{4.11}), we have (\ref{4.6}). The estimates (\ref{4.7}) and (\ref{4.8}) can be found in \cite{mm1999}. Thus the proof is completed.
\end{proof}
\begin{lemma}\label{lemma4.3}
Under the same assumptions of Proposition \ref{prposition3.1}, it holds that
\begin{align*}
\begin{split}
&\|(\phi,\Psi)\|_{ }^2(t)+ \int_0^t\int_{0}^{\infty} \left(\frac{1}{p'(V)}\right)_t \Psi^2\operatorname{d} x \operatorname{d}t+\int_0^t \| \phi_x\|^2 \operatorname{d}t\\
\leq&~ C_{}\|(\phi_0,\Psi_0)\|_{ }^2+C{\delta} \int_0^t \| \phi_{xx}\|^{2}\operatorname{ d}t + C_{} e^{-C_-\beta}.
\end{split}
\end{align*}
\end{lemma}
\begin{proof}
Multiply \ $(\ref{4.5})_1 $ and $(\ref{4.5})_2$ by $\phi$ and $\frac{\Psi}{-p'(V)}$ respectively, sum them up, and integrate the result with respect to $t$ and $x$ over $ [0,t]\times [0,\infty) $. We have
\begin{align}\label{4.12}
&\frac{1}{2} \int_{0}^{\infty} \left(\phi^2-\frac{\Psi^2}{p'(V)}\right) \operatorname{d }x
+ \int_0^t\int_{0}^{\infty} \left\{ \frac{1}{2} \left(\frac{1}{p'(V)}\right)_t \Psi^2
+ \frac{\phi_{x}^2}{V^{\alpha+1}} \right\} \operatorname{d}x \operatorname{d} t \nonumber \\
=& \int_0^t\int_{0}^{\infty} G_{ }\phi \operatorname{d}x \operatorname{d} t
+ \int_0^t\int_{0}^{\infty} \frac{p(v|V)\Psi}{p'(V)} \operatorname{d}x \operatorname{d}t
\nonumber \\
&- \int_0^t (\phi\Psi+(V^{-(\alpha+1)})\phi\phi_{x} )|_{x=0} \operatorname{ d }t+\frac{1}{2} \int_{0}^{\infty} \left(\phi^2-\frac{\Psi^2}{p'(V)}\right)\Big|_{t=0} \operatorname{d }x \nonumber \\
= :&\sum_{i=1}^4 A_i.
\end{align}
Utilize Lemma \ref{lemma 4.1}, we can get
\begin{align}\label{4.13}
&|A_1+A_2| \nonumber \\
\leq& C \left(\int_0^t\int_{0}^{\infty} \left|\phi_x \phi_{xx} \phi \right| + \left| V_x \phi_x^2 \phi\right| + \left| \Psi \phi_x^2 \right| \operatorname{d}x \operatorname{d}t\right) \nonumber \\
\leq& C \int_0^t \|\phi\|_{L^\infty} \int_{0}^{\infty} \left|\phi_x \phi_{xx} \right| \operatorname{d}x \operatorname{d}t+ C \int_0^t (\|\phi\|_{L^\infty}+ \|\Psi\|_{L^\infty}) \int_{0}^{\infty} \phi_x^2 \operatorname{d}x \operatorname{d}t\nonumber \\
\leq& C (\|\phi\|_{2}+ \|\psi\|_{1} ) \int_0^t \|\phi_x\|^2 +\|\phi_{xx}\|^2 \operatorname{ d}t\nonumber \\
\leq & C \delta \int_0^t \|\phi_x\|^2 +\|\phi_{xx}\|^2 \operatorname{ d}t.
\end{align}
With the help of Lemma \ref{4.2}, one has
\begin{align}\label{4.14}
\begin{split}
|A_3|\leq& C e^{-C_- \beta}.
\end{split}&
\end{align}
Taking $\delta$ sufficiently small, using (\ref{4.12})--(\ref{4.14}), we get Lemma \ref{lemma4.3}.
\end{proof}
\begin{lemma}\label{lemma4.4}
Under the same assumptions of Proposition \ref{prposition3.1}, it holds that
\begin{eqnarray*}
\|(\phi,\Psi)(t)\|_{ 1}^2+\int_0^t\| \phi_x \|_{ 1}^2\operatorname{d} t\leq C_{ }\|(\phi_0,\Psi_0)\|_{ 1}^2 + C_{ } e^{-C_-\beta}.
\end{eqnarray*}
\end{lemma}
\begin{proof}
Multiply $ (\ref{4.5})_1 $ and $ (\ref{4.5})_2 $ by $-\phi_{xx}$ and $\frac{\Psi_{xx}}{p'(V)}$ respectively, sum over the result, integrate the result with respect to $t$ and $x$ over $ [0,t]\times [0,\infty) $. We have
\begin{align}
& \frac{1}{2} \int_{0}^{\infty} \left(\phi_x^2-\frac{\Psi_x^2}{p'(V)}\right) \operatorname{d}x + \int_0^t\int_{0}^{\infty} \left\{ \frac{1}{2} \left(\frac{1}{p'(V)}\right)_t\Psi_x^2 + \frac{\phi^2_{xx}}{V^{\alpha+1}}\right\} \operatorname{d}x \operatorname{d} t \nonumber \\
=&\frac{1}{2} \int_{0}^{\infty} \left(\phi_x^2-\frac{\Psi_x^2}{p'(V)}\right)\Big|_{t=0} \operatorname{d}x \nonumber \\
&-\int_0^t\int_{0}^{\infty} \left[G-(\alpha+1)\frac{V_x }{V^{\alpha+2}} \phi_x \right]\phi_{xx} \operatorname{d}x \operatorname{d} t \nonumber \\
&- \int_0^t\int_{0}^{\infty} \left(\frac{1}{p'(V)}\right)_{x} p'(V) \Psi_{x} \phi_{x} \operatorname{d}x \operatorname{d} t \nonumber \\
&- \int_0^t \left(\phi_{t}\phi_{x}-\phi_{x}\Psi_{x} -\frac{\Psi_{t}\Psi_{x}}{p'(V)}-\frac{p(v|V)} {p'(V)}\Psi_{x}\right)\Big|_{x=0} \operatorname{ d }t \nonumber \\
&+ \int_0^t\int_{0}^{\infty} \frac{1}{p'(V)} p(v|V)_{x}\Psi_{x} \operatorname{d}x \operatorname{d} t \nonumber \\
= : &\frac{1}{2} \int_{0}^{\infty} \left(\phi_x^2-\frac{\Psi_x^2}{p'(V)}\right)\Big|_{t=0} \operatorname{d}x +\sum_{i=1}^{4} B_i.
\label{4.15}\end{align}
Now we estimate $B_i$ term by term. The Cauchy inequality indicates that
\begin{align}
|B_1|& \leq C \int_0^t\int_{0}^{\infty} (|\phi_{xx}\phi_x|+|V_x\phi^{2}_x|)|{ \phi_{xx}}| + | \phi_x \phi_{xx} | \operatorname{d} x \operatorname{d}t \nonumber \\
&\leq (C \delta + \varepsilon )\int_0^t \|\phi_{xx}\|^{2} \operatorname{ d}t + C_{\varepsilon} \int_0^t \|\phi_{x}\|^2\operatorname{ d}t ,
\end{align}
and
\begin{align}
|B_2|\leq& \int_0^t \int_{0}^{\infty} \left|p'(V)\Psi_x \phi_x\left(\frac{1}{p'(V)}\right)_x \right| \operatorname{d} x \operatorname{d}t \nonumber \\
\leq&\frac{1}{4} \int_0^t\int_{0}^{\infty}\left(\frac{1}{ p'(V)}\right)_t\Psi_x ^2 \operatorname{d} x \operatorname{d}t +C \int_0^t \|\phi_x\| ^2 \operatorname{ d}t.
\end{align}
Making use of the estimate (\ref{4.7}) for the boundary, we have
\begin{align}
B_3 &= -\int_0^t \left(\phi_{t}\phi_{x}-\phi_{x}\Psi_{x} -\frac{\Psi_{t}\Psi_{x}}{p'(V)}-\frac{p(v|V)} {p'(V)}\Psi_{x}\right)\Big|_{x=0} \operatorname{ d }t \nonumber \\
&= - \int_0^t (\phi_{t}\phi_{x} )|_{x=0} \leq C e^{-C_- \beta}.
\end{align}
By (\ref{4.9}) and the Sobolev inequality, we obtain
\begin{align}
|B_4|\leq& \int_0^t\int_{0}^{\infty} \left| \frac{1}{p'(V)} p({v}|V)_{x}\Psi_{x}\right| \operatorname{d}x \operatorname{d} t \nonumber \\
\leq & C\int_0^t\int_{0}^{\infty} \left| (\phi_{x}\phi_{xx}+V_x\phi_{x}^{2}) \Psi_{x} \right| \operatorname{d}x \operatorname{d} t \nonumber \\
\leq& C\int_0^t\int_{0}^{\infty} \left\{ \left| (\phi_{x}\phi_{xx}+V_x\phi_{x}^{2}) \psi_{x} \right| + \left| (\phi_{xx}\phi_{xx}+V_x\phi_{x}\phi_{xx} ) \phi_{x} \right| \right\}\operatorname{d}x\operatorname{d} t \nonumber \\
\leq& C (\|\phi\|_{2}+ \|\psi\|_{2} ) \int_0^t \|\phi_x\|^2 +\|\phi_{xx}\|^2 \operatorname{ d}t \nonumber \\
\leq& C\delta \int_0^t (\|\phi_{xx}\|^2+\|\phi_{x}\|^2 ) \operatorname{ d} t.
\label{4.19}\end{align}
From (\ref{4.15})--(\ref{4.19}), we get
\begin{align*}
\begin{split}
& \frac{1}{2} \int_{0}^{\infty} \left(\phi_x^2-\frac{\Psi_x^2}{p'(V)}\right) \operatorname{d }x + \frac{1}{4}\int_0^t\int_{0}^{\infty} \left[\left(\frac{1}{p'(V)}\right)_t\Psi_x^2 +\frac{\phi^2_{xx}}{V^{\alpha+1}} \right] \operatorname{d}x \operatorname{d} t \\
\leq& (C +C\delta+C_{\varepsilon} )\int_0^t \|\phi_{x}\|^2 \operatorname{ d}t
+(C\delta+\varepsilon )\int_0^t \|\phi_{xx}\|^2 \operatorname{ d}t\\
& + C_{ } e^{-C_-\beta}+C_{ } \left( \|\phi_{0x}\|^2 +\|\Psi_{0x}\|^2 \right) .
\end{split}&
\end{align*}
Choosing $\varepsilon$ sufficiently small, together with Lemma \ref{lemma4.3}, we complete the proof of Lemma \ref{lemma4.4}.
\end{proof}
\begin{lemma}\label{lemma4.5}
Under the same assumptions of Proposition \ref{prposition3.1}, it holds that
\begin{eqnarray*}
\int_0^t \|\Psi_{x}(t)\|_{ }^2 \operatorname{d}t \leq C \|(\phi_0,\Psi_0)\|_{ 1}^2 + C e^{-C_-\beta}.
\end{eqnarray*}
\end{lemma}
\begin{proof}
Multiply $(\ref{4.5})_1 $ by $\Psi_{x}$ and make use of $(\ref{4.5})_2$. We get
\begin{align}\label{4.20}
\Psi_{x}^{2}=&(\phi\Psi_{x})_{t}+[\phi (p(v)-p({V}{}))]_{x}-\phi_{x} (p( v)-p({V}{})) \nonumber \\
&-\frac{\Psi_{x}\phi_{xx}}{V^{\alpha+1}}-\Psi_{x}\left[G-(\alpha+1)\frac{V_x\phi_x}{V_{\alpha+2}}\right].
\end{align}
Integrate $ (\ref{4.20})$ with respect to $t$ and $x$ over $ [0,t]\times [0,\infty)$. We have
\begin{align}\label{4.21}
& \int_{0}^{t} \|\Psi_{x}\|^{2} \operatorname{ d}t \nonumber \\
=&-\int_{0}^{\infty} \phi\Psi_{x}|_{t=0}\operatorname{d }x+\int_{0}^{t}\int_{0}^{\infty} -\Psi_{x}\left[G-(\alpha+1)\frac{V_x\phi_x}{V_{\alpha+2}}\right]\operatorname{d}x \operatorname{d}t
\nonumber \\
&+\int_{0}^{\infty}\phi\Psi_{x}\operatorname{d} x-\int_{0}^{t}\int_{0}^{\infty} \frac{\Psi_{x}\phi_{xx}}{V^{\alpha+1}}\operatorname{d}x \operatorname{d}t \nonumber \\
&-\int_{0}^{t}\int_{0}^{\infty}\phi_{x}\left(p( {v})-p(V)\right) \operatorname{d}x \operatorname{d}t-\int_{0}^{t} \phi (p(v)-p({V}{}))|_{x=0} \operatorname{ d}t \nonumber \\
=:&-\int_{0}^{\infty} \phi\Psi_{x}|_{t=0}\operatorname{d}x+\sum_{i=1}^5 H_i.
\end{align}
We estimate $H_i$ term by term. By the Cauchy inequality, it holds that
\begin{align}
H_1&\leq C \int_{0}^{t}\int_{0}^{\infty} \Psi_{x}(|\phi_{x}\phi_{xx}|+|V_{x}\phi_{x}|) \operatorname{d}x \operatorname{d}t \nonumber \\
&\leq \varepsilon \int_{0}^{t} \|\Psi_{x}\|^{2} \operatorname{ d}t+ C_{\varepsilon} \int_{0}^{t} ( \|\phi_{xx}\|^{2} + \|\phi_{x} \|^{2} ) \operatorname{ d}t.
\end{align}
In addition, it is straightforward to imply that
\begin{align}
&H_2+H_3+H_4 \nonumber \\
\leq & \| (\phi^{}, \Psi_{x}) \|^{2}+\varepsilon\int_{0}^{t} \|\Psi_{x}\|^{2}\operatorname{ d}t+C_{\varepsilon}\int_{0}^{t} \|\phi_{xx}\|^2 \operatorname{ d}t+C\int_{0}^{t}\|\phi_{x}\|^2 \operatorname{ d}t.
\end{align}
Making use of the estimate (\ref{4.6}) for the boundary, we have
\begin{align}\label{4.24}
\begin{split}
H_5&= -\int_{0}^{t} \phi (p(v)-p({V}{}))|_{x=0} \operatorname{ d}t \leq C \int_{0}^{t} \phi \phi_{x} |_{x=0} \operatorname{ d}t \leq Ce^{-C_-\beta}.\\
\end{split}&
\end{align}
Collecting (\ref{4.21})-(\ref{4.24}) and using Lemma \ref{lemma4.4}, we complete the proof of Lemma \ref{lemma4.5}.
\end{proof}
Combining Lemma \ref{lemma4.3}-Lemma \ref{lemma4.5}, we obtain the following low order estimates
\begin{align*}
\|(\phi,\Psi)\|_{ 1}^2(t)+ \int_0^t \| \Psi_x\|^2 \operatorname{d}t+\int_0^t \| \phi_x\|_{1}^2 \operatorname{d}t\leq C_{}\|(\phi_0,\Psi_0)\|_{ 1}^2 + C_{} e^{-C_-\beta},
\end{align*}
which can be rewritten by the variables $\phi$ and $\psi$ as
\begin{lemma}\label{lemma4.6}
Under the same assumptions of Proposition \ref{prposition3.1}, it holds that
\begin{align*}
\begin{split}
&(\| \phi \|_{ 1}^2 + \| \psi \| ^2)(t)+\int_0^t \| \psi_x\|^2 \operatorname{d}t+\int_0^t \| \phi_x\|_{1}^2 \operatorname{d}t\leq C_{}\| \phi_0 \|_{2}^2 +C_{}\| \psi_0 \|_{ 1}^2 + C_{} e^{-C_-\beta}.
\end{split}
\end{align*}
\end{lemma}
\subsection{High Order Estimate.}
Since the second derivative of $\Psi$ on the boundary is unknown, we turn to the original equation \eqref{3.3} to study the higher order estimates.
\begin{lemma}\label{lemma4.7}
Under the same assumptions of Proposition \ref{prposition3.1}, it holds that
\begin{align}\label{4.25}
\begin{split}
& \| \psi_{x} \| ^2 (t) +\int_0^t \| \psi_{xx}\| ^2 \operatorname{d}t\leq C_{}\| \phi_0 \|_{2}^2 +C_{}\| \psi_0 \|_{ 1}^2 + C_{} e^{-C_-\beta}.
\end{split}
\end{align}
\end{lemma}
\begin{proof} Multiplying $(\ref{3.3})_{2}$ by $-\psi_{x x}$, integrating the result with respect to $t$ and $x$ over $ [0,t]\times [0,\infty) $ gives
\begin{align} \label{4.26}
& \frac{1}{2}\| \psi_{x}\|^{2} (t)
+\int_{0}^{t}\int_{0}^{\infty} \frac{{\psi_{x x}^{2}}}{V^{\alpha+1}} \operatorname{d}x \operatorname{d}t \nonumber \\
=&\frac{1}{2}\| \psi_{0x}\|^{2} -\int_{0}^{t} \left\{\psi_{x} \psi_{t}\right\}|_{x=0} \operatorname{ d}t
-\int_{0}^{t}\int_{0}^{\infty} f(V) \phi_{x} \psi_{x x} \operatorname{d}x \operatorname{d}t \nonumber \\
&-\int_{0}^{t}\int_{0}^{\infty} F \psi_{x x} \operatorname{d}x \operatorname{d}t
\nonumber \\
=:&\frac{1}{2}\| \psi_{0x}\|^{2} +\sum_{i=1}^3 M_i.
\end{align}
Making use of the estimate (\ref{4.7}) for the boundary, we have
\begin{align} \label{4.27}
M_{1} \leq C_{} e^{-C_-\beta}.
\end{align}
The Cauchy inequality implies that
\begin{align}
M_{2} \leq \varepsilon \int_{0}^{t} \|\psi_{x x}\|^{2}\operatorname{ d}t+ C_{\varepsilon}\int_{0}^{t} \|\phi_{x}\|^{2}\operatorname{ d}t.
\end{align}
By (\ref{3.6}) and the Sobolev inequality, yields
\begin{align} \label{4.29}
M_{3} &\leq C \int_{0}^{t}\int_{0}^{\infty}\left(\left|\phi_{x}\right|^{2} +\left|\phi_{x}\right|\left|\psi_{x x}\right|\right)\left|\psi_{x x}\right| \operatorname{d} x \operatorname{d}t \nonumber \\
&\leq C \int_{0}^{t}\int_{0}^{\infty}\left|\phi_{x}\right|\left(\left|\phi_{x}\right|^{2}+\left|\psi_{x x}\right|^{2}\right) \operatorname{d}x\operatorname{d}t \nonumber \\
&\leq C \delta \int_{0}^{t} \left(\left\|\phi_{x}\right\|^{2}+\left\|\psi_{x x}\right\|^{2}\right) \operatorname{ d}t.
\end{align}
Substituting (\ref{4.27})-(\ref{4.29}) into $(\ref{4.26}) $ and using Lemma \ref{lemma4.6}, we obtain (\ref{4.25}).
\end{proof}
\begin{lemma}\label{lemma4.8}
Under the same assumptions of Proposition \ref{prposition3.1}, it holds that
\begin{align}\label{4.30}
\begin{split}
&\| \phi_{xx} \| ^2 +\int_0^t \| \phi_{xx}\|_{}^2 \operatorname{d}t \leq C_{}\| \phi_0 \|_{2}^2 +C_{}\| \psi_0 \|_{ 1}^2 + C_{} e^{-C_-\beta}+ C\delta \int_{0}^{t}\left\|\psi_{xx x} \right\|^2 \operatorname{d}t.
\end{split}
\end{align}
\end{lemma}
\begin{proof}
Differentiating $(\ref{3.3})_{1}$ with respect to $x$, using $(\ref{3.3})_{2}, $ we have
\begin{align}\label{4.31}
\begin{split}
\frac{ \phi_{x t}}{V^{\alpha+1}}+f(V) \phi_{x}=\psi_{t}-F.
\end{split}
\end{align}
Differentiating $(\ref{4.31})$ in respect of $x$ and multiplying the derivative by $\phi_{x x}$, integrating the result in respect of $t$ and $x$ over $ [0,t]\times [0,\infty) $, using (\ref{2.3}), one has
\begin{align} \label{4.32}
&\frac{1}{2} \int_{0}^{\infty} \frac{ \phi_{x x}^{2}}{ V^{ \alpha+1 }} \operatorname{d }x
+\int_{0}^{t}\int_{0}^{\infty}\left(f(V)-\frac{(\alpha+1)h(V)}{2 V}\right) \phi_{x x}^{2}\operatorname{d}x \operatorname{d}t \nonumber \\
=& \frac{1}{2} \int_{0}^{\infty} \frac{ \phi_{x x}^{2}}{ V^{ \alpha+1 }} \Big|_{t=0}\operatorname{d }x-\int_{0}^{\infty} \psi_{x } \phi_{x x} \Big|_{t=0}\operatorname{d }x+\int_{0}^{\infty} \psi_{x } \phi_{x x} \operatorname{d }x\nonumber \\
&+\int_{0}^{t} \psi_{x } \psi_{x x} \Big|_{x=0}\operatorname{ d}t
+\int_{0}^{t} \| \psi_{x x}\|^{2}\operatorname{ d}t-\int_{0}^{t}\int_{0}^{\infty}F_{x} \phi_{x x}\operatorname{d}x\operatorname{d}t
\nonumber \\
&+(\alpha+1)\int_{0}^{t}\int_{0}^{\infty}\frac{{} V_{x}}{V^{\alpha+2}} \phi_{x t} \phi_{x x}\operatorname{d}x\operatorname{d}t-\int_{0}^{t}\int_{0}^{\infty}f(V)_{x} \phi_{x} \phi_{x x}\operatorname{d}x\operatorname{d}t\nonumber \\
=:&\frac{1}{2} \int_{0}^{\infty} \frac{ \phi_{x x}^{2}}{ V^{ \alpha+1 }} \Big|_{t=0}\operatorname{d }x-\int_{0}^{\infty} \psi_{x } \phi_{x x} \Big|_{t=0}\operatorname{d }x+\sum_{i=1}^6 N_i.
\end{align}
By (\ref{2.5}) and (\ref{3.5}), one has
\begin{align} \label{4.33}
f(V)-\frac{(\alpha+1)h(V)}{2 V}\geq -p'(v_+)>0.
\end{align}
The Cauchy inequality yields
\begin{align}
N_{1}\leq \varepsilon \|\phi_{x x}\|^{2} +C_\varepsilon \|\psi_{x }\| ^{2}.
\end{align}
Making use of the estimate (\ref{4.8}) for the boundary, it follows that
\begin{align}
N_{2} \leq C_{} e^{-C_-\beta}.
\end{align}
$ N_{3} $ can be controlled by (\ref{4.25}). By the Cauchy inequality, we have
\begin{align*}
\begin{split}
|N_{4}| \leq & \varepsilon \int_{0}^{t} \|\phi_{x x}\|^{2} \operatorname{ d}t + C_{\varepsilon} \int_{0}^{t}\left\|F_{ x}\right\|^{2} \operatorname{ d}t. \\
\end{split}
\end{align*}
Using
\begin{align*}
\begin{split}
\left\|F_{x}\right\|^{2} & \leq C \int_{0}^{\infty}\left(\phi_{x}^{4}+\phi_{x}^{2} \phi_{x x}^{2}+\psi_{x x}^{2} \phi_{x x}^{2}+\psi_{x x x}^{2} \phi_{x}^{2}+\phi_{x}^{2} \psi_{x x}^{2}\right)\operatorname{d}x \\
& \leq C \delta\left(\left\|\phi_{x}\right\|_{1}^{2}+\left\|\psi_{x}\right\|_{2}^{2}\right),
\end{split}
\end{align*}
we have the estimate of $N_4$
\begin{align}
\begin{split}
|N_{4}| \leq & \varepsilon \int_{0}^{t} \|\phi_{x x}\|^{2} \operatorname{ d}t + C_{\varepsilon} \delta \int_{0}^{t} \left(\left\|\phi_{x}\right\|_{1}^{2}+\left\|\psi_{x}\right\|_{2}^{2}\right)\operatorname{ d}t. \\
\end{split}
\end{align}
The Cauchy inequality yields
\begin{align}
\begin{split}
|N_{5}| \leq C & \int_{0}^{t}\int_{0}^{\infty} \left|\frac{{} V_{x}}{V^{\alpha+2}} \psi_{x x} \phi_{x x}\right| \operatorname{d}x \operatorname{d}t\leq \varepsilon \int_{0}^{t} \|\phi_{x x}\|^{2} \operatorname{ d}t + C_{\varepsilon} \int_{0}^{t}\left\|\psi_{x x}\right\|^{2} \operatorname{ d}t, \\
\end{split}
\end{align}
\begin{align} \label{4.38}
\begin{split}
|N_{6}| \leq & \varepsilon \int_{0}^{t} \|\phi_{x x}\|^{2} \operatorname{ d}t + C_{\varepsilon} \int_{0}^{t}\left\|\phi_{ x}\right\|^{2} \operatorname{ d}t. \\
\end{split}
\end{align}
Choosing $\varepsilon$ small, substituting (\ref{4.33})-(\ref{4.38}) into (\ref{4.32}) and using Lemma \ref{lemma4.6}, Lemma \ref{lemma4.7}, we have (\ref{4.30}).
\end{proof}
On the other hand, differentiating the second equation of (\ref{3.3}) with respect to $x$, multiplying the derivative by $-\psi_{x x x}$, integrating the resulting equality over $[0, \infty) \times[0, t]$, using Lemma \ref{lemma4.6}-Lemma \ref{lemma4.8}, we can get the highest order estimate in the same way, which is listed as follows and the proof is omitted.
\begin{lemma}\label{lemma4.9}
Under the same assumptions of Proposition \ref{prposition3.1}, it holds that
\begin{align}\label{4.39}
\begin{split}
& \| \psi_{xx} (t)\| ^2+\int_0^t \| \psi_{xxx}\| ^2 \operatorname{d}t \leq C_{}\| ( \phi_0 , \psi_0 )\|_{ 2}^2 + C_{} e^{-C_-\beta} .
\end{split}
\end{align}
\end{lemma}
Finally, Proposition \ref{prposition3.1} is obtained by Lemma \ref{lemma4.5}-Lemma \ref{lemma4.9}.
\section{Proof of Theorem \ref{theorem} }\label{Sec.5}
Now we turn to the proof of main theorem, i.e., Theorem \ref{theorem}. It is straightforward to imply (\ref{2.11}) from Lemma \ref{lemma 3.2}.
It remains to show (\ref{2.12}). We will use the following useful lemma.
\begin{lemma}(\cite{mn1985})\label{lemma5.1}
Assume that the function $f(t) \geq 0\in L^1(0, +\infty) \cap BV(0, +\infty) $. Then it holds that $f(t) \rightarrow0$ as $t \rightarrow \infty$.
\end{lemma}
\begin{proof} {\bf (Proof of Theorem \ref{theorem}.)}\,
Differentiating the first equation of (\ref{3.3}) with respect to $x$, multiplying the
resulting equation by $\phi_{x}$, and integrating on $(0,\infty)$, we have
\begin{equation*}
\left|\frac{\operatorname{ d}}{\operatorname{ d}t}\left(\|\phi_{x}\|^{2}\right)\right|\leq C(\|\phi_{x}\|^{2} +\|\psi_{xx}\|^{2}) .
\end{equation*}
Using Lemma \ref{lemma 3.2}, we have
\begin{equation*}
\int_{0}^{\infty} \left|\frac{\operatorname{ d}}{\operatorname{ d}t}\left(\|\phi_{x}\|^{2}\right)\right| \operatorname{ dt} \leq C\left\{\left\|\left(\phi_{0}, \psi_{0}\right)\right\|_{2}^{2}+e^{-c_{-} \beta}\right\}\leq C,
\end{equation*}
which implies $\|\phi_{ x}\|^{2}\in L^1(0, +\infty) \cap BV(0, +\infty)$. By Lemma \ref{lemma5.1}, we have
\begin{equation*}
\|\phi_{ x}\|\rightarrow0 \quad \text{as} \quad t\rightarrow+\infty.
\end{equation*}
Since $\|\phi_{xx}\|$ is bounded, the Sobolev inequality implies that
\begin{eqnarray*}
\|{}{v}-V\|_{\infty}^{2}=\|\phi_{x}\|_{\infty}^{2} \leq 2\| \phi_{x}(t)\|_{} \| \phi_{xx}(t)\|_{} \rightarrow 0.
\end{eqnarray*}
Similarly, we have
\begin{eqnarray*}
\|{}{u}-U\|_{\infty}^{2}=\|\psi_{x}\|_{\infty}^{2} \leq 2\| \psi_{x}(t)\|_{} \| \psi_{xx}(t)\|_{} \rightarrow 0.
\end{eqnarray*}
Therefore, the proof of Theorem \ref{theorem} is completed.
\end{proof}
\end{document} |
\begin{equation}taegin{document}
\begin{equation}taegin{center}{\Large \begin{equation}taf A simple model of Small-World Quantum Networks}\\
Ashkan Abedi$\footnote{email:[email protected]}$ ,\hskip 1cm Vahid Karimipour$\footnote{email:[email protected]}$\hskip 1cm \\
Department of Physics, Sharif University of Technology, P.O. Box 11155-9161, Tehran, Iran.\\
\end{center}
\begin{equation}taegin{abstract}
A simple model of small world quantum networks, in which a central node plays essential role, is introduced for sharing entanglement over long distances. In view of the challenges in setting up advanced quantum labs which allows only few nodes in a network to play a central role, this kind of small world network may be more relevant for entanglement distribution. It is shown that by only adding a small number of short-cuts, it is possible to produce maximally entangled pairs between arbitrary nodes in the network. Besides this, the threshold values for the initial amount of entanglement and the distance between nodes, for obtaining a highly entangled states shared between remote points are also investigated.
\end{abstract}
\section{Introduction}
When quantum communication \cite{Qcomm} becomes a reality and a commonplace in the technology of the future, entangled states will be an essential ingredient in the networks which are to carry these communications. One can imagine that there will be networks of many nodes where bi-partite maximally entangled states created between distant nodes act in one way or another as carrier of quantum information. The climax of this will be the quantum internet \cite{Qnet}. Being a very fragile resource against noise, which inevitably is more destructive as the distance increases and as time passes, one can use quantum repeaters \cite{repeaters1} \cite{repeaters2} for purifying two partially entangled states shared over short distances to a maximally entangled state over longer distances. The simplest example is shown in figure (\ref{fig:repeater}), where one repeater at site $R$ makes a Bell measurement on the two qubits at this site and thus the partial entanglement \cite{ES} between two pairs $AR$ and $RB$, is swapped with a maximally entangled state at $AB$,
\begin{equation}taegin{equation}\langlebel{eq:ro}
|\phi\rangle_{AR}=|\phi\rangle_{RB}=\sqrt{\phi}\ket{00}+\sqrt{1-\phi}\ket{11} \hskip 1cm \text{ $ 0\leq \langlembda\leq 0.5$}.
\end{equation}
\begin{equation}taegin{figure}[H]
\centering
\begin{equation}taegin{tikzpicture}[
dot/.style={fill,circle, minimum size=5pt,inner sep=0,node contents={}},
circ/.style={draw, circle, minimum size=8mm,inner sep=0pt, node contents={}}
]
\nuode [dot, name=n1];
\nuode [dot, name=n2, right=of n1];
\nuode [dot, name=n3, right=4mm of n2];
\nuode [dot, name=n4, right=of n3];
\draw [line width=0.2pt] (n1) -- (n2) (n3) -- (n4);
\nuode [circ, fit=(n2)(n3), label={[name=R]below:$R$}];
\nuode [circ, left, at=(n1.east), name=A];
\nuode [circ, right, at=(n4.west), name=B];
\nuode at (A |- R) {$A$};
\nuode at (B |- R) {$B$};
\end{tikzpicture}
{\cal C}^{^{\bf B}}ption{A simple quantum repeater. Circles and dots represent nodes and qubits respectively.}
\langlebel{fig:repeater}
\end{figure}
This measurement produces with probability $ P=2\phi(1-\phi) $ a maximally entangled state between the labs A and B $$|\phi_0\rangle_{AB}=\frac{1}{\sqrt{2}}(\ket{00}+\ket{11})$$
and with probability $1-P$ a state $$|\phi_2\rangle_{AB}=\frac{1}{\sqrt{\phi^2+(1-\phi)^2}}(\phi\ket{00}+(1-\phi)\ket{11})$$ which is even less entangled than the original one. This state can in turn be converted (distilled) to a maximally entangled state with probability $\frac{2\phi^2}{\phi^2+(1-\phi)^2}$. The total Singlet Conversion Probability \cite{percolation} sums up to
\begin{equation}tae
SCP(2,\phi)=2\phi(1-\phi)+(1-2\phi(1-\phi))\frac{2\phi^2}{\phi^2+(1-\phi)^2}=2\phi.
\end{equation}
\begin{equation}taegin{figure}[H]
\centering
\begin{equation}taegin{tikzpicture}[
dot/.style={fill,circle, minimum size=5pt,inner sep=0,node contents={}},
circ/.style={draw, circle, minimum size=8mm,inner sep=0pt, node contents={}}
]
\nuode [dot, name=n1];
\nuode [dot, name=n2, right=of n1];
\nuode [dot, name=n3, right=4mm of n2];
\nuode [dot, name=n4, right=of n3];
\nuode [dot, name=n5, right=4mm of n4];
\nuode [dot, name=n6, right=20mm of n5];
\nuode [dot, name=n7, right=4mm of n6];
\nuode [dot, name=n8, right=of n7];
\nuode [dot, name=n9, right=4mm of n8];
\nuode [dot, name=n10, right=of n9];
\draw [line width=0.1pt] (n1) -- (n2) (n3) -- (n4) (n7) -- (n8) (n9) -- (n10);
\draw [line width=0.1pt, dashed] (n5) -- (n6);
\nuode [circ, fit=(n2)(n3), label={[name=R]below:$R_1$}];
\nuode [circ, fit=(n4)(n5), label={[name=R]below:$R_2$}];
\nuode [circ, fit=(n6)(n7), label={[name=R]below:$R_{N-1}$}];
\nuode [circ, fit=(n8)(n9), label={[name=R]below:$R_{N}$}];
\nuode [circ, left, at=(n1.east), name=A];
\nuode [circ, right, at=(n10.west), name=B];
\nuode at (A |- R) {$A$};
\nuode at (B |- R) {$B$};
\end{tikzpicture}
{\cal C}^{^{\bf B}}ption{A 1-D network of n quantum repeaters. Circles and dots represent nodes and qubits respectively.}
\langlebel{fig:n-repeaters}
\end{figure}
For larger distances, i.e. in linear networks like the one shown in figure (\ref{fig:n-repeaters}), when more repeaters are used, one can repeat this procedure and obtain the total Singlet Conversion Probability. This has been done in \cite{e_distribution} and a very good approximate result is given by
\begin{equation}taegin{equation}
SCP(N,\phi)=1-(1-2\phi)\sum_{k=0}^{\lfloor \frac{N}{2} \rfloor} (\phi(1-\phi))^{k} {2k\choose k}.
\langlebel{eq:scp}
\end{equation}
For large $N$, the $SCP(N)$ is bounded above by \cite{e_distribution}
\begin{equation}tae
SCP(N,\phi)\lesssim (4\phi(1-\phi))^{\frac{N}{2}},
\end{equation}
which shows that unless we start with perfect Bell pairs ($\phi=\frac{1}{2}$), the success probability for generating Bell pairs between distant labs decreases exponentially by the number of repeaters. This discouraging result for linear networks, has prompted investigation of entanglement percolation \cite{percolation} and entanglement distribution \cite{e_distribution} in regular one- and two-dimensional networks with various (i.e rectangular, triangular and hexagonal) geometries. In particular it has been shown that the SCP can reach unit value in a finite number of steps, provided that the initial entanglement between pairs exceeds a threshold value $\phi_t$, which depends on the geometry. To come a little closer to networks in real life, quite recently random quantum networks have been studied \cite{q_random}. In these networks, which are adaptation of the Erdos and Renyi random graphs \cite{random} to the quantum domain, pairs of nodes are maximally entangled with a probability $p$ and are disentangled with probability $1-p$. It has been shown that any quantum subgraph can be generated by Local Operation and Classical Communication (LOCC) if $p\sim N^{-2}$, $N$ being the size of the nodes.\\
While random graphs have some interesting features in common with real networks, like short average distance between nodes, they fail to simulate all the properties of real networks.
In particular they do not show the important property of clustering. In real networks, two nodes which are the neighbors of a given node have a much higher probability of being connected together, compared with two arbitrary nodes. This is measured by the clustering coefficient C \cite{book2} which is the the fraction of neighbors of a given site which are nearest neighbors of each other. For a rectangular graph in any dimension $C=0$, and for a fully connected graph, $C=1$. For a regular 2D triangular lattice, $C=\frac{2}{5}$ (any node in such a graph has 6 neighbors which are connected to each other, while the total number of pairs is ${6\choose 2}=15.$). Small world networks \cite{Watts} are models of networks in which both properties, namely small average distance and clustering property are present. \\
The ubiquity of small world networks makes it very desirable to investigate the problem of entanglement generation in such networks. A simple model was first introduced by Watts and Strogatz in \cite{Watts}, (figure (\ref{fig:watts1})), where a ring of $N$ sites is considered in which every site has a number of $s$ neighbors. Depending on the value $s$, this provides the clustering property of the network.
The other element of a real network is the low average distance between the nodes which is provided by random short-cuts in the lattice. Therefore in figure (\ref{fig:watts1}), with probability $p$ a node's connection to its nearest neighbor is cut in favor of a new long range connection to a distant node. Small world networks have been studied in the quantum domain in a number of contexts, like transportation of excitations\cite{muel1, muel2}, and also as realistic models of random networks for entanglement distributions\cite{q_random, e_distribution}.
\begin{equation}taegin{figure}[h]
\centering
\includegraphics[scale=0.5]{watts1-3}
{\cal C}^{^{\bf B}}ption{A simple model of small-world network model with 20 nodes introduced by Watts and Strogatz when $s=4$ and $p=0.1$ in this network.}
\langlebel{fig:watts1}
\end{figure}
In this work, we are not concerned the Watts and Strogaz whose quantum version has been studied in \cite{q_random}. Instead we consider an even simpler model first introduced in \cite{Dorogovtsev}, where all shortcuts are made, with a probability $p$ to a central node in the graph.
This model is much more relevant to quantum networks since it is plausible that there should be only very few nodes (labs) which can afford full-fledged facilities for producing entangled states and sharing it with arbitrary labs in a future quantum network, figure (\ref{fig:swn_example}). The central node in this figure can even represent a satellite to which other labs on the Earth which are out of sight of each other, share entangled states. The shared entangled states with the satellite are depicted by shortcuts to the center in figure (\ref{fig:swn_example}). Entanglement swapping on the satellite then provides entangled states between these remote labs. Under these circumstances, natural questions arise which we will investigate in this paper. In particular we are interested in the following questions: \\
1-How the threshold value of the initial entanglement between the pairs depend on the number of shortcuts, if we want to maximally entangle distant points in this network?\\
2- For a given value of initial entanglement between the pairs, is there a critical number of shortcuts above which arbitrary pairs of labs far from each other can still establish maximally entangled states between themselves? \\
The answers to these questions are summarized in figures (\ref{fig:scp_n_2}) and (\ref{fig:r0}). From figure (\ref{fig:scp_n_2}) we see that even a small number of shortcuts and a small value for clustering coefficient has an appreciable effect on entanglement distribution in these networks. \\
The paper is structured: In section (\ref{small}), we briefly review the small network model of \cite{Dorogovtsev} and derive its properties. In section (\ref{mysmall}) we calculate the singlet conversion probability for this network and present the results in two figures (\ref{fig:scp_n_2}) and (\ref{fig:r0}). The paper ends with a discussion.
\begin{equation}taegin{figure}[h]
\centering
\includegraphics[scale=0.18]{swn_example_2}
{\cal C}^{^{\bf B}}ption{A quantum small-world network with 21 nodes. Circles and dots represent nodes and qubits respectively. $n=20, m=2$ and $p=0.1$ in this network.}
\langlebel{fig:swn_example}
\end{figure}
\section{A simple model of small world network}\langlebel{small}
Figure (\ref{fig:directed1}) shows a simple model of small world network first introduced in \cite{Dorogovtsev}. There is one node (central lab) in the network which has a central role and other nodes in the graph are connected to this node with probability $p$, creating shortcuts which drastically lower the shortest path between remote points which is the characteristic property of small world networks. Due to its simple structure, many of the statistical properties of these networks have already been studied in various papers \cite{SModels}. Here we want to study the same network in the context of quantum technology. Thus in the network shown in figure (\ref{fig:swn_example}), each link denotes a partially entangled state of the form (\ref{eq:ro}) shared between the two nodes (labs) at the end of the link. The structure of the network indicates that each lab can share these states with its nearest neighbor lab, but the central node can share partially entangled states (\ref{eq:ro}) with any other lab. \\
Let $n$ be the total number of nodes on the circle, then the average number of shortcuts is given by $m=np$. When $n\longrightarrow \infty$, two distinct limits can be considered, one in which $p\longrightarrow 0$ such that the number of shortcuts $m$ remains finite and the other in which $p$ is finite in which case the number of shortcuts too tends to infinity. The clustering coefficient of this network is $p^2$, since the probability that the two neighbors of a given node are connected to the central node is $p^2$. For simplicity we count the length of each shortcut as $\frac{1}{2}$. Consider two points whose shortest path along the ring is $r$. We call this the {\begin{equation}taf regular distance} of the two nodes. In the absence of shortcuts, this is the only shortest path between the two points. When $p>0$, the average shortest path between these points is denoted by $\overline{l}(r)$. This is called the {\begin{equation}taf actual distance} of the two nodes. We now consider two different networks, one in which the links on the ring are directed and one in which there is no direction on these links. In both cases the shortcuts to the ring are not directed. Obviously analysis of the directed ring is much easier than the undirected ring. However in the context of quantum networks, where a link means the possibility of creating an entangled pair, there is no point in assuming a direction on the links and it is the undirected graph which we should take into account. Nevertheless it is instructive to first review some of the classical properties of the directed rings for entanglement distribution and then extend this study to the undirected graphs.
\subsection{Small world graph with directed links}
Consider the directed graph in figure (\ref{fig:directed1}). Let $a$ and $b$ be two nodes with regular distance $r$. The probability that their actual distance is $\ell$ is given by \cite{Dorogovtsev}
\begin{equation}taa
P(\ell|r)&=&\ell p^2(1-p)^{\ell-1}, \hskip 1cm \ell<r\cr
P(r|r)&=&(1-p)^{r+1}.
\end{eqnarray}
The average shortest path, between any two points can be calculated in closed form, although the expression is not so illuminating. Instead we draw in {figure (\ref{fig:lbar}) the average shortest path ($\begin{equation}taar{l}$) as a function of $p$ for several values of $N$.
\begin{equation}taegin{figure}[h]
\centering
\includegraphics[scale=0.2]{directed1}
{\cal C}^{^{\bf B}}ption{Simple model of small world graph with directed links.}
\langlebel{fig:directed1}
\end{figure}
\begin{equation}taegin{figure}[h]
\centering
\includegraphics[scale=0.5]{lbarr}
{\cal C}^{^{\bf B}}ption{Average shortest path ($\begin{equation}taar{l}$) with respect to $p$ in a directed small world network for $N=100,200 \text{ and } 500$.}
\langlebel{fig:lbar}
\end{figure}
When the graph is undirected, analytical calculation of the probabilities and the average length become more involved. The result is \cite{Dorogovtsev}
\begin{equation}taa
P(\ell|r)&=&p^2(1-p)^{2\ell-4}(2-p)(2\ell-p\ell-2), \hskip 1cm 1<\ell<r\cr
P(\ell|r)&=&p^2, \hskip 1cm \ell=1\cr
P(r|r)&=&1-\sum_{\ell=1}^{r-1} p(\ell|r).
\langlebel{eq:p_undirected}
\end{eqnarray}
However the qualitative behavior of the probabilities do not have much difference with the directed network.
\section{Entanglement generation in a small world quantum network}\langlebel{mysmall}
We are now in a position to consider the problem of entanglement generation
in the small world network. Let $a$ and $b$ be two points with regular distance $r$. Obviously we should consider only undirected graphs for which $P(\ell|r)$ is given in equation (\ref{eq:p_undirected}). In the absence of any shortcuts, when $p=0$ the singlet conversion probability for these two sites is given by $SCP(r,\phi)$ as in equation (\ref{eq:scp}). When $p\nue 0$, the two sites can create a singlet with probability through the shortest paths provided for them. The average number of shortcuts in this case is equal to $m=np$. Each path is provided with probability $P(\ell|r)$ and the average $SCP$ for these two sites will be given by
\begin{equation}tae
\overline{SCP}(r,\phi,m)=\sum_{\ell=1}^r SCP(\ell,\phi)P(\ell|r),
\end{equation}
where $SCP(\ell,\phi)$ is given in (\ref{eq:scp}) and $P(\ell|r)$ is given in (\ref{eq:p_undirected}). To answer the first question posed in the introduction, i.e. the probability of success for creating a singlet between the two distant labs, we consider
$
SCP(r, \phi,m):=\overline{SCP}(\frac{n}{2},\phi,m)$ for three different distances $r=20, 80$ and $500$ in a network of $N=1000$ nodes. Several interesting features are observed in these figures. First we see that when the initial shared states are maximally entangled (i.e. $\phi=0.5$), there is no need for any shortcuts in order to entangle distant nodes, no matter how far apart they are. This is because these nodes can use the links along the circular network and can successfully extract a Bell state with SCP=1. No matter what is the distance between these labs, the SCP is always 1 in view of Eq. (\ref{eq:scp}). For short distances say $r=20$ (figure 7.a), one can still obtain large SCP, with a small number of shortcuts, even if the shared states are not maximally entangled. For any value of $\phi<0.5$, there is a threshold number of shortcuts above which one can establish maximally entangled states with high SCP. This threshold value increases with distance, but it is seen from the figures (7.b) and (7.c) for large distances, its value approaches a fixed value. This is explicitly depicted in figure (\ref{fig:scp_n_2}) . It is seen that for short distances, the distant labs can always use the links around the circular network to extract maximally entangled states. At these distances the addition of shortcuts does not have much effect. As the distance increases, we find a threshold distance beyond which, a certain number of shortcuts, is needed for attaining an SCP of 2/3. For the value of $\phi=0.45$, the threshold distance is around $r_0\alphapprox 20$ and the threshold value is around $m\alphapprox 50$. If we now increase the threshold SCP to $\frac{3}{4}$, the required number of shortcuts increases to twice the previous value and also the distance below which an SCP of $\frac{3}{4}$ is possible in the absence of shortcuts reduces to half of its previous value. These are shown in figure (\ref{fig:r0}). The interesting feature is that the number of shortcuts remains constant after a distance. The reason is that by increasing the number of shortcuts and making them denser in the network, the shortest distance between two points does not necessarily decrease further.
\begin{equation}taegin{figure}[h]
\subfloat[$r=20$\langlebel{sfig:43}]{
\includegraphics[width=0.32\linewidth]{1}
}\hskip 1cmfill
\subfloat[$r=80$\langlebel{sfig:45}]{
\includegraphics[width=0.32\linewidth]{2}
}\hskip 1cmfill
\subfloat[$r=500$\langlebel{sfig:47}]{
\includegraphics[width=.32\linewidth]{3}
}\hskip 1cmfill
{\cal C}^{^{\bf B}}ption{The singlet conversion probability (indicated by color) for pairs of points at three different distances (r=20, 80 and 500) for a network of $N=1000$ nodes. The plots show dependence of SCP on the amount of initial entanglement and the number of shortcuts. }
\langlebel{fig:scp_n_2}
\end{figure}
\begin{equation}taegin{figure}[h]
\subfloat[$
SCP=2/3,\ \ \phi=0.45$\langlebel{sfig:43}]{
\includegraphics[width=0.38\linewidth]{a}
}\hskip 1cmfill
\subfloat[$SCP=3/4,\ \ \phi=0.45$\langlebel{sfig:45}]{
\includegraphics[width=0.38\linewidth]{b3}
}\hskip 1cmfill
{\cal C}^{^{\bf B}}ption{a) The area in which the threshold is bigger than 2/3 is shown by blue color. The initial value of entanglement is $\phi=0.45$. If the distance between two labs is less than $r_0\alphapprox 20$, they can use the links around the circular network to extract maximally entangled states. At these distances the addition of shortcuts does not have much effect. Beyond this threshold distance, a certain number of shortcuts around 50, is needed for attaining an SCP of 2/3. b) For the same value of initial entanglement $\phi=0.45$, but an SCP of 3/4. The blue area has shrunk. Here for distance bigger than 10 links, shortcuts are necessary in order to achieve an SCP of 3/4. In both figures, the interesting feature is that the number of required shortcuts saturates to a constant value after a specific distance. The reason is that by increasing the number of shortcuts and making them denser in the network, the shortest distance between two points does not necessarily decrease further. This result has certainly practical consequences. }
\langlebel{fig:r0}
\end{figure}
\section{Conclusion}
In this letter we have briefly discussed a very simple model for entanglement distribution in small world networks. The network has a central node ( a central lab, i.e. a satellite, which can share entangled states with all the other labs in the network. We have determined to what extent distant labs in the network can share maximally entangled states, if initially they have shared only partially entangled states with their neighboring labs and with some probability with the central lab. We have obtained the threshold values for the number of shortcuts, the initial value of entanglement and the distance between nodes, for obtaining a highly entangled states shared between remote points. This last quantity is measured by a quantity called Singlet Conversion Probability (SCP). The results are shown in figures (\ref{fig:scp_n_2}) and (\ref{fig:r0}). It would be interesting to investigate how the number of central labs can change this results. Moreover the same model with one central node, can be modified to exhibit one other characteristics of small world networks, namely the clustering property. This is done simply by connecting each node on the ring, not only to its nearest neighbors, but to a small number of its neighbors. We have found that this clustering property does not appreciably affect the main results reported in figures (\ref{fig:scp_n_2}) and (\ref{fig:r0}).
\begin{equation}taegin{thebibliography}{}
\begin{equation}taibitem{Qcomm}
N. Gisin and R. Thew, Quantum communication, Nature Photonics, vol. 1, 165 EP -, Mar.2007.
\begin{equation}taibitem{Qnet} S. Wehner, D. Elkouss, and R. Hanson, "Quantum internet: A vision for the road ahead",Science, vol. 362, no. 6412, 2018.
\begin{equation}taibitem{repeaters1} H.-J. Briegel, W. D\"ur, J. I. Cirac, and P. Zoller, "Quantum repeaters: The role of imperfect localoperations in quantum communication", Phys. Rev. Lett., vol. 81, pp. 5932-5935, 26 Dec. 1998.
\begin{equation}taibitem{repeaters2} W. D\"ur, H.-J. Briegel, J. I. Cirac, and P. Zoller, "Quantum repeaters based on entanglementpurification",Phys. Rev. A, vol. 59, pp. 169-181, 1 Jan. 1999.
\begin{equation}taibitem{ES}
M.\ifmmode \dot{Z}\else \.{Z}\fi{}ukowski, A. Zeilinger, M. A. Horne, and A. K. Ekert, ""event-ready-detectors" bell ex-periment via entanglement swapping",Phys. Rev. Lett., vol. 71, pp. 4287-4290, 26 Dec. 1993.
\begin{equation}taibitem{percolation}
A. Ac{\'\i}n, J. I. Cirac, and M. Lewenstein, "Entanglement percolation in quantum networks",Nature Physics, vol. 3, 256 EP -, Feb. 2007.
\begin{equation}taibitem{e_distribution} S. Perseguers, J. I. Cirac, A. Ac{\'\i}n, M. Lewenstein, and J. Wehr, "Entanglement distribution in pure-state quantum networks",Phys. Rev. A, vol. 77, p. 022 308, 2 Feb. 2008.
\begin{equation}taibitem{q_random} S. Perseguers, M. Lewenstein, A. Ac{\'\i}n, and J. I. Cirac, "Quantum random networks",Nature Physics, vol. 6, 539 EP -, May 2010.
\begin{equation}taibitem{random} P. Erd{\H o}s and Alfr{\'e}d R{\'e}nyi, "On random graphs i.",Publicationes Mathematicae (Debrecen), vol. 6,pp. 290-297, 1959 1959.
\begin{equation}taibitem{book2} Valuation of Network Effects in Software Markets: A Complex Networks Approach. Frankfurt,Germany: Physica, 2009.
\begin{equation}taibitem{Watts} D. J. Watts and S. H. Strogatz, "Collective dynamics of 'small-world' networks",Nature, vol. 393,440 EP -, Jun. 1998.
\begin{equation}taibitem{muel1}
O. Muelken, V. Pernice, and A. Blumen, 'Quantum transport on small-world networks: A continuous-time quantum walk approach'
Phys. Rev. E 76, 051125.
\begin{equation}taibitem{muel2}
O. Muelken, M.Dolgushev and M. Galiceanu,'Complex Quantum Networks: From Universal Breakdown to Optimal Transport',
Phys. Rev. E 93, 022304 (2016).
\begin{equation}taibitem{Dorogovtsev} S. N. Dorogovtsev and J. F. F. Mendes, "Exactly solvable small-world network",EPL (Euro-physics Letters), vol. 50, no. 1, p. 1, 2000.
\begin{equation}taibitem{SModels} M. E. J. Newman, "Models of the small world", Journal of Statistical Physics (2000) 101: 819.
\end{thebibliography}
\end{document} |
\begin{document}
\title{High-dimensional variable selection via tilting}
\begin{abstract}
This paper considers variable selection in linear regression models where
the number of covariates is possibly much larger than the number of observations.
High dimensionality of the data brings in many complications, such as
(possibly spurious) high correlations among the variables,
which result in marginal correlation being unreliable as a measure of association
between the variables and the response.
We propose a new way of measuring the contribution of each variable to the response
which takes into account high correlations among the variables in a data-driven way.
The proposed {\mbox{e}m tilting} procedure provides an adaptive choice between the use of
marginal correlation and {\mbox{e}m tilted correlation} for each variable,
where the choice is made depending on the values of the
hard-thresholded sample correlation of the design matrix.
We study the conditions under which this measure can successfully discriminate
between the relevant and the irrelevant variables and thus be used as a tool for variable selection.
Finally, an iterative variable screening algorithm is constructed
to exploit the theoretical properties of tilted correlation,
and its good practical performance is demonstrated in a comparative simulation study.
\mbox{e}nd{abstract}
\textbf{keywords:} variable selection, correlation, high-dimensional linear regression
\section{Introduction}
\label{sec:intro}
Inferring the relationship between the response and the explanatory variables in linear models is an extremely important
and widely studied statistical problem, from the point of view of both practical applications and theory. In this work,
we consider the following linear model:
\begin{eqnarray}
\mathbf{y}=\mathbf{X}\boldsymbol{\beta}+\boldsymbol{\mbox{e}p}, \label{lp}
\mbox{e}nd{eqnarray}
where $\mathbf{y}=(y_1, \ldots, y_n)^T \in \mathbb{R}^n$ is an $n$-vector of the response, $\mathbf{X}=\left(X_1, \ldots, X_p\right)$ is
an $n \times p$ design matrix and $\boldsymbol{\mbox{e}p}=(\mbox{e}psilon_1, \ldots, \mbox{e}psilon_n)^T \in \mathbb{R}^n$ is an $n$-vector of i.i.d. random errors.
Recent technological advances have led to the explosion of data across many scientific disciplines, where the
dimensionality of the data $p$ can be very large; examples can be found in genomics, functional MRI, tomography
and finance, to name but a few. In such settings, difficulties arise in estimating the coefficient vector $\boldsymbol{\beta}$.
Over the last two decades, substantial progress has been made in tackling this problem under the assumption that
only a small number of variables actually contribute to the response, i.e., $\mathcal{S}=\{1\le j \le p: \ \beta_j \ne 0\}$
is of cardinality $|\mathcal{S}|\ll p$. By identifying $\mathcal{S}$, we can improve both model interpretability and estimation
accuracy.
There exists a long list of literature devoted to the high-dimensional variable selection problem
and an exhaustive survey can be found in \citet{fan2009}.
The Lasso \citep{tibshirani1996} belongs to a class of penalised least squares estimators
where the penalty is on the $\mathit{l}_1$-norm of $\boldsymbol{\beta}$,
which leads to a sparse solution by setting certain coefficients to be exactly zero.
It has enjoyed considerable attention and substantial efforts in studying the consistency of the methodology and its extension can be found e.g. in \citet{meinshausen2006}, \citet{zhang2008}, \citet{zhao2006}, \citet{zou2006}, \citet{meinshausen2008}.
\citet{efron2004} proposed the Least Angle Regression (LARS) algorithm,
which can be modified to compute the Lasso solution path for a range of penalty parameters.
The main criterion for determining which variables should enter the model in the progression of the
LARS algorithm is the screening of the marginal correlations between each variable and the current residual.
That is, denoting the current residual by $\mathbf{z}$, the Lasso solution path is computed by taking a step
of a suitably chosen size in the equiangular direction between those variables which achieve the maximum $|X_j^T\mathbf{z}|$ at
each iteration. The Sure Independence Screening (SIS) proposed in \citet{fan2008} is a dimension
reduction procedure, which screens the marginal correlations $X_j^T\mathbf{y}$ to choose which variables
should remain in the model.
While the aforementioned methods show good theoretical properties as well as performing well in practice,
we note that they heavily rely on marginal correlation to measure the strength of association between $X_j$ and $\mathbf{y}$.
\citet{fan2008} observed that, even when $X_1, \ldots, X_p$ were generated as i.i.d. Gaussian variables,
there might exist spurious correlations among the variables with growing dimensionality $p$.
In general, when there are non-negligible correlations among the variables, whether spurious or not,
an irrelevant variable ($X_j, \ j\not\in\mathcal{S}$) can have large marginal correlation with $\mathbf{y}$
due to its association with the relevant variables ($X_j, \ j\in\mathcal{S}$),
which implies that marginal correlation can be misleading, especially if $p$ is large.
There have been some efforts to introduce new measures of association between each variable and the
response in order to deal with the issue of high correlations among the variables.
\citet{buhlmann2009} proposed the PC-simple algorithm, which uses partial correlation
in order to infer the association between each variable and the response conditional on other variables.
Also, we note that ``greedy'' algorithms such as the traditional forward selection
(see e.g. Chapter 8.5 of \citet{weisberg1980}) or the forward regression \citep{wang2009}
have an interpretation in this context due to their greediness
(in the sense that the locally optimal choice is made at each iteration),
unlike less greedy algorithms generating a solution path, e.g. LARS.
At each iteration, both forward selection and forward
regression algorithms update the current residual $\mathbf{z}$ by taking the greediest step towards the
variables included in the current model, i.e., $\mathbf{z}$ is obtained by projecting $\mathbf{y}$ onto the orthogonal
complement of the current model space and this greedy progression can be seen as taking into account
the correlations between those variables which are in the current model and those which are not.
\citet{radchenko2011} proposed the forward-Lasso adaptive shrinkage (FLASH) which includes
the Lasso and forward selection as special cases at two extreme ends.
FLASH iteratively adds one variable at a time and adjusts each step size by introducing a
new parameter so that their procedure is greedier than the Lasso, yet not as greedy as the forward selection.
The regression framework proposed in \citet{witten2009} accounts for correlations among the variables
using the so-called ``scout'' procedure, which obtains a shrunken estimate of the inverse covariance matrix of $\mathbf{X}$
by maximising a penalised likelihood and then applies it to the estimation of $\boldsymbol{\beta}$.
A more detailed description of the aforementioned methods, in comparison with our proposed methodology,
is provided later in Section \ref{sec:relation}.
In this paper, we propose a new way of measuring the contribution of each variable to the response,
which also accounts for the correlation structure among variables.
It is accomplished by ``tilting'' each column $X_j$ (so that it becomes $X_j^*$) such that
the impact of other variables $X_k, \ k\ne j$ on the ``tilted'' correlation between $X_j^*$ and $\mathbf{y}$
is reduced and thus the relationship between the $j$th covariate and the response
can be identified more accurately. One key ingredient of this methodology, which sets it apart from other
approaches listed above, is the adaptive choice of the set $\mathcal{C}j$ of variables $X_k$ whose
impact on $X_j$ is to be removed. Informally speaking, we note that
$\mathcal{C}j$ cannot include ``too many'' variables, as this would distort the
association between the $j$th covariate and the response due to the large
dimensionality $p$. However, we also observe that those $X_k$'s
which have low marginal correlations with $X_j$ do not individually cause distortion in measuring
this association anyway, so they can safely be omitted from the set $\mathcal{C}j$.
Therefore, it appears natural to include in $\mathcal{C}j$ only those variables $X_k$
whose correlations with $X_j$ exceed a certain threshold in magnitude,
and this hard thresholding step is an important element of our methodology.
Other key steps in our methodology are: projection of each variable onto a subspace chosen
in the hard-thresholding step; and rescaling of such projected variables.
We show that under certain conditions the tilted correlation can successfully discriminate between
relevant and irrelevant variables and thus can be applied as a tool for variable selection.
We also propose an iterative algorithm based on tilting and present its unique features in
relation to the existing methods discussed above.
The remainder of the paper is organised as follows.
In Section \ref{sec:tilt}, we introduce the tilting procedure and study
the theoretical properties of tilted correlation in various scenarios.
Then, in Section \ref{sec:application}, we propose the TCS algorithm,
which iteratively screens the tilted correlations to identify relevant variables,
and compare it in detail to other existing methods. Section \ref{sec:sim} reports the
outcome of extensive comparative simulation studies and
the performance of TCS algorithm is further demonstrated in Section \ref{sec:boston}
on a real world dataset predicting real estate prices.
Section \ref{sec:conclusion} concludes the paper and the proofs of theoretical results are in the Appendix.
\section{Tilting: motivation, definition and properties}
\label{sec:tilt}
\subsection{Notation and model description}
\label{sec:notation}
For an $n$-vector $\mathbf{u}\in\mathbb{R}^n$, we define the $\mathit{l}_1$ and $\mathit{l}_2$-norms as
$\Vert\mathbf{u}\Vert_1=\sum_{j}|u_j|$ and $\Vert\mathbf{u}\Vert_2=\sqrt{\sum_{j}u_j^2}$,
and the latter is frequently referred to as the norm.
Each column of $\mathbf{X}$ is assumed to have a unit norm,
and thus the sample correlation matrix of $\mathbf{X}$ is defined as $\mathbf{C}=\mathbf{X}^T\mathbf{X}=(c_{j,k})_{j,k=1}^p$.
We assume that $\mbox{e}psilon_i, \ i=1, \ldots, n$ are i.i.d. random noise following a normal
distribution $\mathcal{N}(0, \sigma^2/n)$ with $\sigma^2<\infty$, where the $n^{-1}$ in the noise
variance is required due to our normalisation of the columns of $\mathbf{X}$.
We denote the $i$th row of $\mathbf{X}$ as $\mathbf{x}_i=(X_{i,1}, \ldots, X_{i, p})$.
Let $\mathcal{D}$ denote a subset of the index set $\mathcal{J}=\{1, \ldots, p\}$.
Then $\mathbf{X}_\mathcal{D}$ denotes an $n\times |\mathcal{D}|$-submatrix of $\mathbf{X}$ with $X_j, \ j\in\mathcal{D}$ as its columns
for any $n\times p$ matrix $\mathbf{X}$.
In a similar manner, $\boldsymbol{\beta}_\mathcal{D}$ denotes a $|\mathcal{D}|$-subvector of a $p$-vector $\boldsymbol{\beta}$
with $\beta_j, \ j\in\mathcal{D}$ as its elements.
For a given submatrix $\mathbf{X}_\mathcal{D}$, we denote the projection matrix onto the column space of $\mathbf{X}_\mathcal{D}$ by $\Pi_\mathcal{D}$.
Finally, $C$ and $C'$ are used to denote generic positive constants.
\subsection{Tilting: motivation and definition}
\label{sec:motv:def}
In this section, we introduce the procedure of tilting a variable
and define the {\mbox{e}m tilted correlation} between each variable and the response.
We first list typical difficulties encountered in high-dimensional problems,
which were originally pointed out in \citet{fan2008}.
\begin{itemize}
\item[(a)] Irrelevant variables which are highly correlated with the relevant ones
can have high priority to be selected in marginal correlation screening.
\item[(b)] A relevant variable can be marginally uncorrelated but jointly correlated with the response.
\item[(c)] Collinearity can exist among the variables,
i.e., $|c_{j, k}|=|X_j^TX_k|$ for $j\ne k$ can be close to 1.
\mbox{e}nd{itemize}
We note that the marginal correlation between each variable $X_j$ and $\mathbf{y}$ has the following decomposition,
\begin{eqnarray}
X_j^T\mathbf{y}=X_j^T\left(\sum_{k=1}^p\beta_kX_k+\boldsymbol{\mbox{e}p}\right)=
\beta_j+\underline{\underline{\sum_{k\in\mathcal{S}\setminus\{j\}}\beta_kX_j^TX_k}}+X_j^T\boldsymbol{\mbox{e}p},
\label{marginal:corr}
\mbox{e}nd{eqnarray}
which shows that the issues (a) and (b) arise from the underlined summand in (\ref{marginal:corr}).
The main idea behind tilting is to transform each $X_j$ in such a way that the corresponding
underlined summand for the transformed $X_j$ is zero or negligible, while not distorting
the contribution of the $j$th covariate to the response. By examining the form of the
underlined summand and viewing it as a ``bias'' term, it is apparent that its components
are particularly large for those $k$'s for which the corresponding term $X_j^TX_k$ is large.
If we were to transform $X_j$ by projecting it on the space orthogonal to those $X_k$'s,
a corresponding bias term for a thus-transformed $X_j$ would be significantly reduced.
For each $X_j$, denote the set of such $X_k$'s by $\mathcal{C}j$. Without prior knowledge of $\mathcal{S}$,
one way of selecting $\mathcal{C}j$ for each $X_j$ is to identify those variables $X_k, \ k\ne j$ which have
non-negligible correlations with $X_j$. A careful choice of $\mathcal{C}j$ is especially important when the
dimensionality $p$ is high; when $\mathcal{C}j$ is chosen to include too many variables,
any vector in $\mathbb{R}^n$ may be well approximated by $X_k, \ k\in\mathcal{C}j$,
which would result in the association between the transformed $X_j$ and $\mathbf{y}$
failing to reflect the true contribution of the $j$th covariate to the response.
Intuitively, those $X_k$'s having small sample correlations with $X_j$ do not significantly
contribute to the underlined bias term, and thus can be safely omitted from the set
$\mathcal{C}j$. Below, we propose a procedure for selecting $\mathcal{C}j$ adaptively for each $j$,
depending on the sample correlation structure of $\mathbf{X}$.
We first find $\mathbb{P}in\in(0, 1)$ which will act as a threshold on
each off-diagonal entry $c_{j, k}, \ j\ne k$ of the sample correlation matrix $\mathbf{C}$
of $\mathbf{X}$, identifying whether the sample correlation between $X_j$ and $X_k$ is non-negligible.
Then, the subset $\mathcal{C}j$ is identified as
$\mathcal{C}j=\{k\ne j: \ |X_j^TX_k|=|c_{j, k}|>\mathbb{P}in\}$ separately for each variable $X_j$.
We note that although the subset $\mathcal{C}j$ is obviously different for each $j$, the
thresholding procedure for selecting it is always the same.
Our procedure for selecting $\mathbb{P}i_n$ itself is described in
Section \ref{sec:choice:thr}.
Tilting a variable $X_j$ is defined as the procedure of projecting $X_j$ onto
the orthogonal complement of the space spanned by $X_k, \ k\in\mathcal{C}j$,
which reduces to zero the impact of those $X_k$'s on the association
between the projected version of $X_j$ and $\mathbf{y}$.
Hard-thresholding was previously adopted for the estimation of a high-dimensional covariance matrix,
although we emphasise that this was not in the context of variable selection.
In \citet{bickel2008}, an estimator obtained by hard-thresholding the sample covariance matrix
was shown to be consistent with the choice of $C\sqrt{\log{p}/n}$ as the threshold,
provided the covariance matrix was appropriately sparse and the dimensionality $p$ satisfied $\log{p}/n\to 0$.
A similar result was reported in \citet{el2008} with the threshold of magnitude $Cn^{-\gamma}$ for some $\gamma\in(0, 1/2)$.
Our theoretical choice of threshold $\mathbb{P}in$ is described in Section \ref{sec:prop:tilt},
where we also briefly compare it to the aforementioned thresholds.
In practice, we choose $\mathbb{P}in$ by controlling the false discovery rate, as presented in
Section \ref{sec:choice:thr}.
Let $\mathbf{\tilde{X}}_{j}$ denote a submatrix of $\mathbf{X}$ with $X_k, \ k\in\mathcal{C}j$ as its columns,
and $\Pi_{j}$ the projection matrix onto the space spanned by $X_k, \ k\in\mathcal{C}j$,
i.e., $\Pi_{j}\mbox{e}quiv\mathbf{\tilde{X}}_{j}(\mathbf{\tilde{X}}_{j}^T\mathbf{\tilde{X}}_{j})^{-1}\mathbf{\tilde{X}}_{j}^T$.
The tilted variable $X_j^*$ for each $X_j$ is defined as $X_j^*\mbox{e}quiv(\mathbf{I}_n-\Pi_{j})X_j$.
Then the correlation between the tilted variable $X_j^*$ and $X_k, \ k\in\mathcal{C}j$ is reduced to zero,
and therefore such $X_k$'s no longer have any impact on $X_j^{*T}\mathbf{y}$.
However, $X_j^{*T}\mathbf{y}$ cannot directly be used as a measure of association between $X_j$ and $\mathbf{y}$,
since the norm of the tilted variable $X_j^*$, provided $\mathcal{C}j$ is non-empty, satisfies
$\VertX_j^*\Vert_2=X_j^T(\mathbf{I}_n-\Pi_{j})X_j<X_j^TX_j=1$.
Therefore, we need to rescale $X_j^{*T}\mathbf{y}$ so as to make it a reliable criterion for
gauging the contribution of each $X_j$ to $\mathbf{y}$.
Let $a_j$ and $a_jy$ denote the squared proportion of $X_j$ and $\mathbf{y}$ (respectively) represented by $X_k, \ k\in\mathcal{C}j$, i.e.,
$a_j\mbox{e}quiv\Vert\Pi_{j} X_j\Vert_2^2/\Vert X_j\Vert_2^2$ and $a_jy\mbox{e}quiv\Vert\Pi_{j}\mathbf{y}\Vert_2^2/\Vert\mathbf{y}\Vert_2^2$.
We denote the tilted correlation between $X_j$ and $\mathbf{y}$ with respect to a rescaling factor $s_j$ by
$c_j^*(s_j) \mbox{e}quiv s_j^{-1}\cdot X_j^{*T}\mathbf{y}$, and propose two rescaling rules below.
\begin{description}
\item[Rescaling 1.]
Decompose $X_j^{*T}\mathbf{y}$ as
\begin{eqnarray}
X_j^{*T}\mathbf{y}&=&X_j^T(\mathbf{I}_n-\Pi_{j})\mathbf{y}
=X_j^T\left\{\sum_{k=1}^p\beta_k(\mathbf{I}_n-\Pi_{j})X_k+(\mathbf{I}_n-\Pi_{j})\boldsymbol{\mbox{e}p}\right\}
\nonumber \\
&=&\beta_jX_j^T(\mathbf{I}_n-\Pi_{j})X_j+\sum_{k\in\mathcal{S}\setminus\mathcal{C}j, k\ne j}\beta_kX_j^T(\mathbf{I}_n-\Pi_{j})X_k+X_j^T(\mathbf{I}_n-\Pi_{j})\boldsymbol{\mbox{e}p}.
\label{decom:tilt:corr}
\mbox{e}nd{eqnarray}
Provided the second and third summands in (\ref{decom:tilt:corr}) are negligible in comparison with the first,
rescaling the inner product $X_j^{*T}\mathbf{y}$ by $1-a_j=X_j^T(\mathbf{I}_n-\Pi_{j})X_j$
can ``isolate'' $\beta_j$, which amounts to the contribution of $X_j$ to $\mathbf{y}$, in the sense that
$X_j^{*T}\mathbf{y} / (1-a_j)$ can be represented as $\beta_j$ plus a ``small'' term
(our theoretical results later make this statement more precise).
Motivated by this, we use the rescaling factor of $\lambdaj\mbox{e}quiv(1-a_j)$ to define
a rescaled version of $X_j^*$ as $X_j^\mathbf{u}llet\mbox{e}quiv(1-a_j)^{-1}\cdotX_j^*$
and the corresponding tilted correlation as
$c_j^*(\lambdaj)=(1-a_j)^{-1}\cdot X_j^{*T}\mathbf{y}=X_j^{\mathbf{u}llet T}\mathbf{y}$.
\item[Rescaling 2.]
Since $\mathbf{I}_n-\Pi_{j}$ is also a projection matrix,
we note that $X_j^{*T}\mathbf{y}$ is equal to the inner product between $X_j^*=(\mathbf{I}_n-\Pi_{j})X_j$ and $\by_j^*=(\mathbf{I}_n-\Pi_{j})\mathbf{y}$,
with their norms satisfying $\VertX_j^*\Vert_2=\sqrt{1-a_j}$ and $\Vert\by_j^*\Vert_2=\sqrt{1-a_jy}\cdot\Vert\mathbf{y}\Vert_2$.
By rescaling $X_j^*$ and $\by_j^*$ by $\sqrt{1-a_j}$ and $\sqrt{1-a_jy}$ respectively,
we obtain vectors $X_j^\circ\mbox{e}quiv(1-a_j)^{-1/2}\cdotX_j^*$ and $\by_j^\circ\mbox{e}quiv(1-a_jy)^{-1/2}\cdot\by_j^*$, whose
norms satisfy $\|X_j^\circ\|_2 = \|X_j\|_2$ and $\|\by_j^\circ\|_2 = \|\mathbf{y}\|_2$.
Therefore, with the rescaling factor set equal to $\Lambda_j\mbox{e}quiv\{(1-a_j)(1-a_jy)\}^{1/2}$,
we define the tilted correlation as $c_j^*(\Lambda_j)=\{(1-a_j)(1-a_jy)\}^{-1/2}\cdot X_j^{*T}\mathbf{y}=X_j^{\circ T}\by_j^\circ$.
\mbox{e}nd{description}
We note that, with the rescaling factor $\lambdaj$ (rescaling 1),
the tilted correlation $c_j^*(\lambdaj)$ coincides with the ordinary least squares estimate of $\beta_j$
when regressing $\mathbf{y}$ onto $X_k, \ k\in\mathcal{C}j\cup\{j\}$.
When rescaled by $\Lambda_j$ (rescaling 2), the tilted correlation coincides with
the sample partial correlation between $X_j$ and $\mathbf{y}$ given $X_k, \ k\in\mathcal{C}j$ (denoted by $\hat{\rho}_n(j, \mathbf{y}|\mathcal{C}j)$),
up to a constant multiplicative factor $\Vert\mathbf{y}\Vert_2$,
i.e., $c_j^*(\Lambda_j)=\Vert\mathbf{y}\Vert_2\cdot\hat{\rho}_n(j, \mathbf{y}|\mathcal{C}j)$.
Although partial correlation is also used in the PC-simple algorithm \citep{buhlmann2009},
we emphasise that a crucial difference between tilting and PC-simple is that
tilting makes an adaptive choice of the conditioning subset $\mathcal{C}j$ for each $X_j$,
as described earlier in this section. For a detailed discussion of this point, see
Section \ref{sec:relation}. In what follows, whenever the tilted correlation is denoted
by $c_j^*$ without specifying the rescaling factor $s_j$, the relevant statement is valid
for either of the rescaling factors $\lambdaj$ and $\Lambda_j$.
Finally, we note that if the set $\mathcal{C}j$ turns out to be empty for a certain index $j$,
then for such $X_j$, our tilted correlation with either rescaling factor
would reduce to standard marginal correlation, which in this case is expected to work well
(in measuring the association between the $j$th covariate and the response)
due to the fact that no other variables $X_k$ are significantly correlated with $X_j$.
In summary, our proposed tilting procedure enables an adaptive choice
between the use of marginal correlation and tilted correlation for each variable $X_j$,
depending on the sample correlation structure of $\mathbf{X}$.
In the following section, we study some properties of tilted correlation and show
that the corresponding properties do not always hold for marginal correlation. This
prepares the ground for the algorithm proposed in Section \ref{sec:algorithm}
which adopts tilted correlation for variable screening.
\subsection{Properties of the tilted correlation}
\label{sec:prop:tilt}
In studying the theoretical properties of tilted correlation,
we make the following assumptions on the linear model in (\ref{lp}).
\begin{itemize}
\item[(A1)] The number of non-zero coefficients $|\mathcal{S}|$ satisfies $|\mathcal{S}|=O(n^{\deltata})$ for $\deltata\in[0, 1/2)$.
\item[(A2)] The number of variables satisfies $\log p=O(n^{\theta})$ with
$\theta\in[0, 1-2\gamma)$ for $\gamma\in(\deltata, 1/2)$.
\item[(A3)] With the same $\gamma$ as in (A2), the threshold is chosen as $\mathbb{P}in=C_1n^{-\gamma}$ for some $C_1>0$.
We assume that there exists $C>0$ such that $\mathcal{C}j=\{k\ne j: \ |c_{j, k}|>\mathbb{P}in\}$ is of cardinality
$|\mathcal{C}j|\le Cn^\xi$ uniformly over all $j$, where $\xi\in[0, 2(\gamma-\deltata))$.
\item[(A4)] Non-zero coefficients satisfy $\max_{j\in\mathcal{S}}|\beta_j|<M$ for $M\in(0, \infty)$ and
$n^\mu\min_{j\in\mathcal{S}}|\beta_j|\to\infty$ for $\mu\in[0, \gamma-\deltata-\xi/2)$.
\item[(A5)] There exists $\alpha\in(0, 1)$ satisfying $1-X_j^T\Pi_{j} X_j=1-a_j > \alpha$ for all $j$.
\item[(A6)] For those $j$ whose corresponding $\mathcal{C}j$ satisfies $\mathcal{S}\nsubseteq\mathcal{C}j$, we have
\[\
n^\kappa\cdot \frac{\Vert(\mathbf{I}_n-\Pi_{j})\mathbf{X}_\mathcal{S}\boldsymbol{\beta}_\mathcal{S}\Vert_2^2}{\Vert\mathbf{X}_\mathcal{S}\boldsymbol{\beta}_\mathcal{S}\Vert_2^2}\to\infty,
\]
for $\kappa$ satisfying $\kappa/2+\mu\in[0, \gamma-\deltata-\xi/2)$.
\mbox{e}nd{itemize}
In (A1) and (A2), we let the sparsity $|\mathcal{S}|$ and dimensionality $p$ of the linear model grow with the sample size $n$.
Intuitively, if some non-zero coefficients tend to zero too rapidly,
identifying them as relevant variables is difficult.
Therefore (A4) imposes a lower bound on the magnitudes of the non-zero coefficients,
which still allows the minimum non-zero coefficient to decay to 0 as $n$ grows.
It also imposes an upper bound, which is needed to ensure that the ratio
between the largest and smallest coefficients in absolute value does not
grow too quickly with $n$.
We now clarify the rest of assumptions which are imposed on the correlation structure of $\mathbf{X}$,
and compare them to related conditions in existing literature.
It is common practice in high-dimensional variable selection literature
to study the performance of proposed methods under some conditions on $\mathbf{X}$.
For the Lasso, it was shown that the irrepresentable condition \citep{zhao2006},
also referred to as the neighbourhood stability condition \citep{meinshausen2006} on $\mathbf{X}$
was sufficient and almost necessary for consistent variable selection.
This condition required that
\begin{eqnarray*}
\max_{j\notin\mathcal{S}}\left\vert\mbox{sign}(\boldsymbol{\beta}_\mathcal{S})^T(\mathbf{X}_\mathcal{S}^T\mathbf{X}_\mathcal{S})^{-1}\mathbf{X}_\mathcal{S}^TX_j\right\vert<1,
\mbox{e}nd{eqnarray*}
which can roughly be interpreted as saying that the portion of the irrelevant variable
$X_j, \ j\notin\mathcal{S}$, represented by relevant variables $\mathbf{X}_\mathcal{S}$ is bounded from above by 1.
\citet{zhang2008} showed the variable selection consistency of Lasso under the sparse Riesz condition.
It requires the existence of $C>0$ for which the eigenvalues of $\mathbf{X}_\mathcal{D}^T\mathbf{X}_\mathcal{D}$ are bounded
uniformly over any $\mathcal{D}\subset\mathcal{J}$ with $|\mathcal{D}|\le C|\mathcal{S}|$.
\citet{candes2007} showed the consistency of the Dantzig selector under the uniform uncertainty principle (UUP),
which also similarly restricts the behaviour of the sparse eigenvalues of $\mathbf{X}_\mathcal{D}^T\mathbf{X}_\mathcal{D}$.
We note that the assumption (A3) is not directly comparable to the above conditions
in the sense that it requires the number of highly correlated variables for each variable not to exceed
a certain polynomial rate in $n$. This bound is needed in order to guarantee the existence of the
projection matrix $\Pi_{j}$, as well as to prevent tilted correlations from being distorted by high dimensionality
as explained in Section \ref{sec:motv:def}.
We now give an example of when (A3) is satisfied.
Suppose for instance that each observation $\mathbf{x}_i, \ i=1, \ldots, n$ is independently
generated from a multivariate normal distribution $\mathcal{N}_p(\mathbf{z}ero, \Sigma)$ with
$\Sigma_{j, k}=\mbox{var}phi^{|j-k|}$ for some $\mbox{var}phi\in(-1, 1)$.
Then using Lemma 1 in \citet{kalisch2007}, we have that
\begin{eqnarray}
\mathbb{P}\left(\max_{j\ne k}\left\vert c_{j, k}-\Sigma_{j, k}\right\vert
\le C_2n^{-\gamma}\right)\ge 1-\frac{Cnp(p-1)}{2}\cdot\mbox{e}xp\left(-\frac{C_2(n-4)n^{-2\gamma}}{2}\right),
\label{dist:sample:pop}
\mbox{e}nd{eqnarray}
for some $C_2\in(0, C_1)$ and $C>0$.
The right-hand side of (\ref{dist:sample:pop}) tends to 1, provided
$\log\,p = O(n^\theta)$ with $\theta\in[0, 1/2-\gamma)$.
Then (A3) holds with probability tending to 1 since $|c_{j, k}| \le |\mbox{var}phi|^{|j-k|}+C_2n^{-\gamma}<\mathbb{P}in$ for $|j-k|\gg\log n$ ($|a_n| \gg |b_n|$ means $|a_n b_n^{-1}| \to \infty$).
The choice of $\mathbb{P}in=C_1n^{-\gamma}$ is in agreement with \citet{bickel2008} and \citet{el2008}
in the sense that their threshold is also greater than $n^{-1/2}$.
However, as we describe in Section \ref{sec:choice:thr}, our procedure requires
a data-dependent, rather than a fixed threshold, and we propose to choose it
by controlling the false discovery rate.
(A5) is required to rule out strong collinearity among the variables.
From the fact that $1 - a_j = \det\left(\mathbf{X}_{\mathcal{C}j\cup\{j\}}^T\mathbf{X}_{\mathcal{C}j\cup\{j\}}\right)/\det\left(\mathbf{\tilde{X}}_{j}^T\mathbf{\tilde{X}}_{j}\right)$,
we can find a connection between (A5) and the condition requiring
strict positive definiteness of the population covariance matrix of $\mathbf{X}$,
which is often found in the variable selection literature including \citet{fan2001}, \citet{buhlmann2009} and \citet{zou2006}.
Further, we show in Appendix \ref{append:one} that assumptions (A5) and (A6) are satisfied
under a certain mild assumption on $\mathbf{X}$ and $\boldsymbol{\mbox{e}p}$, also used e.g. in \cite{wang2009}.
As far as variable selection is concerned,
if the absolute values of tilted correlations for $j\in\mathcal{S}$ are markedly larger than
those for $j\notin\mathcal{S}$, we can use the tilted correlations for the purpose of variable screening.
Before studying the properties of the tilted correlation in details,
we provide a simple example to throw light on the situations where tilted correlation screening is successful while marginal correlation is not.
The following set-up is consistent with Condition \ref{cond:three} in Section \ref{sec:scen:one}:
$p=3$, $\mathcal{S}=\{1, 2\}$, noise is not present, $|c_{1, 3}|$ and $|c_{2, 3}|$ exceed the threshold.
Then, even when $c_{1, 2}, c_{1, 3}, c_{2, 3}$ and the non-zero coefficients $\beta_1, \beta_2$
are chosen so that the marginal correlation screening fails
(i.e., $|X_3^T\mathbf{y}|>\max(|X_1^T\mathbf{y}|, |X_2^T\mathbf{y}|)$), it is still the case that
$|(X_3^*)^T\mathbf{y}|=0$ and thus tilted correlation screening can avoid picking up $X_3$ as relevant.
In the following Sections \ref{sec:scen:one}--\ref{sec:scen:three},
we introduce different scenarios under which the tilted correlation screening
(with either rescaling factor) achieves separation between relevant and irrelevant variables.
\subsubsection{Scenario 1}
\label{sec:scen:one}
In the first scenario, we assume the following condition on $\mathbf{X}$.
\begin{cond}
There exists $C>0$ such that $\left\vert(\Pi_{j} X_j)^TX_k\right\vert \le Cn^{-\gamma}$
for all $j \in \mathcal{J}$ and $k\in\mathcal{S}\setminus\mathcal{C}j,\ k\ne j$.
\label{cond:one}
\mbox{e}nd{cond}
This condition implies that when $X_j$ is projected onto the space spanned by $X_l, \ l\in\mathcal{C}j$,
any $X_k\in\mathcal{S}$ which are not close to $X_j$ (in the sense that $k\notin\mathcal{C}j$)
remain not ``too close'' to the projected $X_j$ ($\Pi_{j} X_j$).
In Appendix \ref{appendix:one:ex}, it is shown that Condition \ref{cond:one} holds asymptotically
when each column $X_j$ is generated independently as a random vector on a sphere of radius 1,
which is the surface of the Euclidean ball $B_2^n=\left\{\mathbf{x}\in\mathbb{R}^n: \ \sum_{i=1}^nx_i^2\le 1\right\}$.
The following theorem states that, under Condition \ref{cond:one},
the tilted correlations of the relevant variables dominate those of the irrelevant variables.
\begin{thm}
Under assumptions (A1)--(A6), if Condition \ref{cond:one} holds, then $\mathbb{P}(\mathcal{E}_1)\to 1$ where
\begin{eqnarray}
\mathcal{E}_1=\left\{\frac{|c^*_k(s_k)|}{\min_{j\in\mathcal{S}}|c_j^*(s_j)|}\to 0
\mbox{ for all } k\notin\mathcal{S}\right\},
\label{separation}
\mbox{e}nd{eqnarray}
regardless of the choice of the rescaling factor (that is, with $s_j=\lambdaj$ or $s_j=\Lambda_j$).
On the event $\mathcal{E}_1$, the following holds.
\begin{itemize}
\item $n^\mu\cdotc_j^*\to 0$ for $j\notin\mathcal{S}$.
\item $n^\mu\cdot|c_j^*|\to\infty$ for $j\in\mathcal{S}$.
\item With the rescaling 1, $c_j^*(\lambdaj)/\beta_j\to 1$ when $\beta_j\ne 0$.
\mbox{e}nd{itemize}
\label{thm:one}
\mbox{e}nd{thm}
As noted in the Introduction, in high-dimensional problems, the maximum sample correlation
of the columns of $\mathbf{X}$ can be non-negligible, even if the columns are
generated as independent. Therefore marginal correlations $X_j^T\mathbf{y}$ for $j\in\mathcal{S}$ cannot
be expected to have the same dominance over those for $j\notin\mathcal{S}$ as in (\ref{separation}).
\subsubsection{Scenario 2}
\label{sec:scen:two}
Let $\mathcal{K}$ denote a subset of $\mathcal{J}$ such that $X_k, \ k\in\mathcal{K}$ are either relevant ($k\in\mathcal{S}$)
or highly correlated with at least one of relevant variables ($k\in\cup_{j\in\mathcal{S}}\mathcal{C}j$).
That is, $\mathcal{K}=\mathcal{S}\cup\left\{\cup_{j\in\mathcal{S}}\mathcal{C}j\right\}$,
and we impose the following condition on the sample correlation structure of $\mathbf{X}_\mathcal{K}$.
\begin{cond}
For each $j\in\mathcal{S}$, if $k\in\mathcal{K} \setminus \{\mathcal{C}j \cup \{j\}\,\}$, then $\mathcal{C}_k\cap\mathcal{C}j=\mbox{e}mptyset$.
\label{cond:two}
\mbox{e}nd{cond}
In other words, this condition implies that for each relevant variable $X_j$,
if $X_k, \ k\in\mathcal{K}$ is not highly correlated with $X_j$, there does not exist an $X_l, \ l\ne j, k$,
which achieves sample correlations greater than the threshold $\mathbb{P}in$ with both $X_j$ and $X_k$ simultaneously.
Suppose that the sample correlation matrix of $\mathbf{X}_\mathcal{K}$ is ``approximately bandable'',
i.e., $|c_{j,k}|>\mathbb{P}in$ for any $j, k\in\mathcal{K}$ satisfying $|j-k| \le B$ and $|c_{j,k}|<\mathbb{P}in$ otherwise,
with the band width $B$ satisfying $B|\mathcal{S}|^2/p\to 0$.
Then, if $\mathcal{S}$ is selected randomly from $\mathcal{J}$ with each $j\in\mathcal{J}$
having equal probability to be included in $\mathcal{S}$,
Condition \ref{cond:two} holds with probability bounded from below by
\[
\left(1-\frac{4B}{p-1}\right)\cdot\left(1-\frac{8B}{p-2}\right)\cdots \left(1-\frac{4(|\mathcal{S}|-1)B}{p-|\mathcal{S}|+1}\right)\ge \left(1-\frac{4|\mathcal{S}|B}{p-|\mathcal{S}|+1}\right)^{|\mathcal{S}|-1} \to 1.
\]
Another example satisfying Condition \ref{cond:two} is when each column of $\mathbf{X}_\mathcal{K}$
is generated as a linear combination of common factors in such a way that
every off-diagonal element of the sample correlation matrix of $\mathbf{X}_\mathcal{K}$ exceeds the threshold $\mathbb{P}in$.
Under this condition, we can derive a similar result as in Scenario 1,
with the dominance of the tilted correlations for relevant variables restricted within $\mathcal{K}$.
\begin{thm}
Under (A1)--(A6), if Condition \ref{cond:two} holds, then $\mathbb{P}(\mathcal{E}_2)\to 1$ where
\begin{eqnarray*}
\mathcal{E}_2=\left\{\frac{|c^*_k(s_k)|}{\min_{j\in\mathcal{S}}|c_j^*(s_j)|}\to 0
\mbox{ for all } k\in\mathcal{K}\setminus\mathcal{S}\right\},
\mbox{e}nd{eqnarray*}
regardless of the choice of the rescaling factor (that is, with $s_j=\lambdaj$ or $s_j=\Lambda_j$).
On the event $\mathcal{E}_2$, the following holds.
\begin{itemize}
\item $n^\mu\cdotc_j^*\to 0$ for $j\notin\mathcal{S}$.
\item $n^\mu\cdot|c_j^*|\to\infty$ for $j\in\mathcal{S}$.
\item With the rescaling 1, $c_j^*(\lambdaj)/\beta_j\to 1$ when $\beta_j\ne 0$.
\mbox{e}nd{itemize}
\label{thm:two}
\mbox{e}nd{thm}
\subsubsection{Scenario 3}
\label{sec:scen:three}
Finally, we consider a case when $\mathbf{X}$ satisfies a condition weaker than Condition \ref{cond:two}.
\begin{cond}
\begin{itemize}
\item[(C1)]
For each $j\in\mathcal{S}$, if $k\in\mathcal{K} \setminus \{\mathcal{C}j \cup \mathcal{S} \,\}$, then $\mathcal{C}_k\cap\mathcal{C}j=\mbox{e}mptyset$.
\item[(C2)] The marginal correlation between $X_j^*=(\mathbf{I}_n-\Pi_{j})X_j$ for $j\in\mathcal{S}$
and $\mathbb{E}\mathbf{y}=\mathbf{X}_\mathcal{S}\boldsymbol{\beta}_\mathcal{S}$ satisfies \linebreak
$n^\mu\cdot\inf_{j\in\mathcal{S}}\left\vert X_j^{*T}\mathbf{X}_\mathcal{S}\boldsymbol{\beta}_\mathcal{S}\right\vert \to \infty$.
\mbox{e}nd{itemize}
\label{cond:three}
\mbox{e}nd{cond}
It is clear that Condition \ref{cond:two} is stronger than (C1),
as the latter does not impose any restriction between $\mathcal{C}j$ and $\mathcal{C}_k$ if both $j, k\in\mathcal{S}$.
\citet{buhlmann2009} placed a similar lower bound as that in (C2) on the
population partial correlation $\mathbb{P}pc(j, \mathbf{y}|\mathcal{D})$ of relevant variables $X_j, \ j\in\mathcal{S}$,
for \mbox{e}mph{any} subset $\mathcal{D}\subset\mathcal{J}\setminus\{j\}$ satisfying $|\mathcal{D}|\le|\mathcal{S}|$.
Combined with the assumptions (A4)--(A5), (C2) rules out an ill-posed case where
the parameters $\beta_j, \ j\in\mathcal{S}$ take values which cancel out the ``tilted covariance''
among the relevant variables (this statement is explained more precisely
in the proof of Theorem \ref{thm:three}).
It is shown in Appendix \ref{appendix:three} that Condition \ref{cond:three} is satisfied
if Condition \ref{cond:two} holds and thus Condition \ref{cond:three} is indeed weaker than Condition \ref{cond:two}.
With Condition \ref{cond:three}, we can show similar results to those in Theorem \ref{thm:two}.
\begin{thm}
Under (A1)--(A6), if Condition \ref{cond:three} holds, then $\mathbb{P}(\mathcal{E}_3)\to 1$ where
\begin{eqnarray*}
\mathcal{E}_3=\left\{\frac{|c^*_k(s_k)|}{\min_{j\in\mathcal{S}}|c_j^*(s_j)|}\to 0
\mbox{ for all } k\in\mathcal{K}\setminus\mathcal{S}\right\},
\mbox{e}nd{eqnarray*}
regardless of the choice of the rescaling factor (that is, with $s_j=\lambdaj$ or $s_j=\Lambda_j$).
On the event $\mathcal{E}_3$, the following holds.
\begin{itemize}
\item $n^\mu\cdotc_j^*\to 0$ for $j\notin\mathcal{S}$.
\item $n^\mu\cdot|c_j^*|\to\infty$ for $j\in\mathcal{S}$.
\mbox{e}nd{itemize}
\label{thm:three}
\mbox{e}nd{thm}
In contrast to Scenario 2, tilted correlations $c_j^*(\lambdaj)$ no longer necessarily converge to $\beta_j$ as
$n\to\infty$ in this scenario.
In the next section, we use the theoretical properties of tilted correlations derived in this section
to construct a variable screening algorithm.
\section{Application of tilting}
\label{sec:application}
Recalling issues (a)--(c) listed at the beginning of Section \ref{sec:tilt}
which are typically encountered in high-dimensional problems,
it is clear that tilting is specifically designed to tackle the occurrence of (a) and (b).
First turning to (a), for an irrelevant variable $X_j$ which attains high marginal
correlation with $\mathbf{y}$ due to its high correlations with relevant variables
$X_k, \ k\in\mathcal{C}j\cap\mathcal{S}$, the impact of those high correlations is reduced to 0 in
the tilted correlation of $X_j$ and $\mathbf{y}$, and thus tilted correlation provides a more
accurate measure of its association with $\mathbf{y}$, as demonstrated in our theoretical
results of the previous section. Similar arguments apply to (b),
where tilting is capable of fixing {\mbox{e}m low} marginal correlations between
{\mbox{e}m relevant variables} and $\mathbf{y}$.
(As for (c), it is common practice to impose assumptions which rule out strong
collinearity among variables, and we have also followed this route.)
In what follows, we present an algorithm, specifically constructed to exploit our
theoretical study in Section \ref{sec:prop:tilt} by iteratively applying the tilting
procedure.
\subsection{Tilted correlation screening algorithm}
\label{sec:algorithm}
In Scenario 3, under a relatively weaker condition than those in Scenarios 1--2,
it is shown that the tilted correlations of relevant variables dominate
those of irrelevant variables within $\mathcal{K}=\mathcal{S}\cup\left(\cup_{j\in\mathcal{S}}\mathcal{C}j\right)$.
Even though $\mathcal{K}$ is unknown in practice, we can exploit the theoretical results
by iteratively screening both marginal correlations and
tilted correlations within a specifically chosen subset of variables.
When every off-diagonal entry of the sample correlation matrix is small,
marginal correlation screening can be used as a reliable way of measuring the strength of association
between each $X_j$ and $\mathbf{y}$,
and indeed, $c_j^*$ for the variable $X_j$ with an empty $\mathcal{C}j$ is equal to the marginal correlation $X_j^T\mathbf{y}$,
with either choice of the rescaling factor $s_j$.
Therefore if a variable $X_j$ with $\mathcal{C}j=\mbox{e}mptyset$ achieves the maximum marginal correlation,
such $X_j$ is likely to be relevant.
On the other hand, if $\mathcal{C}j\ne\mbox{e}mptyset$, then high marginal correlation between $X_j$ and $\mathbf{y}$
may have resulted from the high correlations of $X_j$ with $X_k, \ k\in\mathcal{C}j\cap\mathcal{S}$, even when $j\notin\mathcal{S}$.
In this case, by screening the tilted correlations of $X_k, \ k\in\mathcal{C}j\cup\{j\}$,
we can choose the variable attaining the maximum $|c^*_k|$ as a relevant variable.
In either case, one variable is selected and added to the \mbox{e}mph{active set} $\mathcal{A}$
which represents the currently chosen model.
The next step is to update the linear model by projecting it onto
the orthogonal complement of the current model space $\mathbf{X}_\mathcal{A}$, i.e.,
\begin{eqnarray}
(\mathbf{I}_n-\Pi_\mathcal{A})\mathbf{y}=(\mathbf{I}_n-\Pi_\mathcal{A})\mathbf{X}\boldsymbol{\beta}+(\mathbf{I}_n-\Pi_\mathcal{A})\boldsymbol{\mbox{e}p}.
\label{update:model}
\mbox{e}nd{eqnarray}
With the updated response and design matrix, we iteratively continue the above screening procedure.
Below we present the algorithm which is referred to as the tilted correlation screening (TCS) algorithm
throughout the paper.
\begin{itemize}
\item[Step 0] Start with an empty active set $\mathcal{A}=\mbox{e}mptyset$, current residual $\mathbf{z}=\mathbf{y}$,
and current design matrix $\mathbf{Z}=\mathbf{X}$.
\item[Step 1]
Find the variable which achieves the maximum marginal correlation with $\mathbf{z}$ and
let $k=\arg\max_{j\notin\mathcal{A}}|Z_j^T\mathbf{z}|$.
Identify $\mathcal{C}_k=\{j\notin\mathcal{A}, j\ne k: \ |Z_k^TZ_j|>\mathbb{P}in\}$ and
if $\mathcal{C}_k=\mbox{e}mptyset$, let $k^*=k$ and go to Step 3.
\item[Step 2]
If $\mathcal{C}_k\ne\mbox{e}mptyset$, screen the tilted correlations $c_j^*$ between $Z_j$ and $\mathbf{z}$ for $j\in\mathcal{C}_k\cup\{k\}$
and find $k^*=\arg\max_{j\in\mathcal{C}_k\cup\{k\}}|c_j^*|$.
\item[Step 3]
Add $k^*$ to $\mathcal{A}$ and update the current residual and the current design matrix
$\mathbf{z}\leftarrow(\mathbf{I}_n-\Pi_\mathcal{A})\mathbf{y}$ and $\mathbf{Z}\leftarrow(\mathbf{I}_n-\Pi_\mathcal{A})\mathbf{X}$,
respectively. Further, rescale each column $j \not\in \mathcal{A}$ of $\mathbf{Z}$ to have norm one.
\item[Step 4] Repeat Steps $1$--$3$ until the cardinality of active set $|\mathcal{A}|$ reaches a pre-specified $m<n$.
\mbox{e}nd{itemize}
We note that Theorems \ref{thm:one}--\ref{thm:three} do not guarantee the selection consistency
of the TCS algorithm itself. However, they do demonstrate a certain `separation' property of the
tilted correlation (as a measure of association). Steps 1--2 of the above algorithm exploit this
property in the sense that they attempt to ``operate'' within the set $\mathcal{K}$
(which is unknown without the knowledge of $\mathcal{S}$),
since we either directly choose a variable indexed $k$ which is believed to lie in the set $\mathcal{S}$ or
screen its corresponding set $\mathcal{C}_k$ (recall that $\mathcal{K}=\mathcal{S}\cup\left\{\cup_{j\in\mathcal{S}}\mathcal{C}j\right\}$).
In Step 4, we need to specify $m$ which acts as a stopping index in the TCS algorithm.
The TCS algorithm iteratively builds a solution path of the active set $\mathcal{A}_{(1)}\subset \cdots \subset \mathcal{A}_{(m)}=\mathcal{A}$,
and the final model $\hat{\mathcal{S}}$ can be chosen as either one of the submodels $\mathcal{A}_{(i)}$ or a subset of $\mathcal{A}$.
We discuss the selection of the final model $\hat{\mathcal{S}}$ in Section \ref{sec:final:model}.
In the simulation study, we used $m=\lfloor n/2 \rfloor$, which was an empirical choice made in order
to ensure that the projections performed in the algorithm were numerically stable,
while a sufficiently large number of variables were selected in the final model, if necessary.
In practice however, if the TCS algorithm combined with the chosen model selection criterion
returned $m$ variables (i.e. if it reached the maximum permitted number of active variables), we
would advise re-running the TCS algorithm with the limit of $m$ slightly raised,
until the number of final active variables was less than the current value of $m$.
During the application of the TCS algorithm, the linear regression model (\ref{lp}) is updated in Step 3
by projecting both $\mathbf{y}$ and $\mathbf{X}$ onto the orthogonal complement of the current model space spanned by $\mathbf{X}_\mathcal{A}$.
Therefore, with a non-empty active set $\mathcal{A}$, it is interesting to observe that the tilted correlation $c_j^*$ measures
the association between $X_j$ and $\mathbf{y}$ conditional on both the current model $X_k, \ k\in\mathcal{A}$ and
the following subset of variables adaptively chosen for each $j\notin\mathcal{A}$,
\begin{eqnarray}
\mathcal{C}ja=\{k\notin\mathcal{A}, k\ne j: \ \hat{\rho}_n(j, k|\mathcal{A})>\mathbb{P}in\},
\label{update:ccj}
\mbox{e}nd{eqnarray}
where $\hat{\rho}_n(j, k|\mathcal{A})$ denotes the sample partial correlation between $X_j$ and $X_k$ conditional on $\mathbf{X}_\mathcal{A}$.
Finally, we discuss the computational cost of the TCS algorithm.
When $p \gg n$, the computational complexity of the algorithm is dominated by
the computation of the threshold at Step 1,
which is $O(np+np^2+p^2\log\,p+p^2)=O(np^2)$.
Since the procedure is repeated $m$ times, with $m$ set to satisfy $m=O(n)$,
the computational complexity of the entire algorithm is $O(n^2p^2)$,
which is $n$ times the cost of computing a $p\times p$ sample covariance matrix.
\subsection{Final model selection}
\label{sec:final:model}
Once the size of the active set reaches a pre-specified value $m$,
the final model $\hat{\mathcal{S}}$ needs to be chosen from $\mathcal{A}$.
In this section, we present two methods which can be applied in our framework.
One of the most commonly used methods for model selection is cross-validation (CV), in which
the observations would be divided into a training set and a test set
such that the models returned after each iteration (i.e. $\mathcal{A}_{(1)}\subset \cdots \subset \mathcal{A}_{(m)}=\mathcal{A}$) could
be tested using an appropriate error measure. However, we expect that for a CV-based method to work well, it
would have to be computationally intensive: for example, a leave-one-out CV or a leave-half-out CV with
averaging over different test and training sets.
One less computationally demanding option is to use e.g. an
extended version of the Bayesian information criterion (BIC) proposed in
\citet{bogdan2004} and \citet{chen2008} as
\begin{eqnarray}
\mbox{BIC}(\mathcal{A})=\log\left\{\frac{1}{n}\Vert(\mathbf{I}_n-\Pi_\mathcal{A})\mathbf{y}\Vert_2^2\right\}+
\frac{|\mathcal{A}|}{n}(\log n+2\log p).
\label{bic}
\mbox{e}nd{eqnarray}
This new BIC takes into account high dimensionality of the data by adding a penalty term dependent on $p$.
Since the TCS algorithm generates a solution path which consists of $m$ sub-models
$\mathcal{A}_{(1)}\subset \cdots \subset \mathcal{A}_{(m)}=\mathcal{A}$,
we can choose our final model as $\hat{\mathcal{S}}=\mathcal{A}_{(m^*)}$ where
$m^*=\arg\min_{1\le i \le m}\mbox{BIC}(\mathcal{A}_{(i)})$.
\citet{chen2008} showed the consistency of this new BIC under stronger conditions
than those imposed in (A1), (A2) and (A4):
the level of sparsity was $|\mathcal{S}|=O(1)$, the dimensionality was $p=O(n^C)$ for $C>0$,
and non-zero coefficients satisfied $\min_{j\in\mathcal{S}}|\beta_j|>C'$ for $C'>0$.
Then, under the asymptotical identifiability condition introduced in \citet{chen2008},
(see (\ref{asym:iden}) in Appendix \ref{append:one}),
the modified BIC as defined in (\ref{bic}) was shown to be consistent in the sense that
\[
\mathbb{P}\left(\min_{|\mathcal{D}|\le m, \ \mathcal{D}\ne\mathcal{S}}\mbox{BIC}(\mathcal{D})>\mbox{BIC}(\mathcal{S})\right)\to 1 \mbox{ for } m\ge|\mathcal{S}|,
\]
i.e., the probability of selecting any model other than $\mathcal{S}$ converged to zero.
It was also noted that the original BIC was likely to fail when $p>n^{1/2}$.
At the price of replacing $\log n/n$ with $n^{-\kappa}$ in (\ref{asym:iden}),
the consistency of the new BIC (\ref{bic}) can be shown
with the level of sparsity growing with $n$ as in (A1) and the dimensionality increasing exponentially with $n$ as in (A2).
The proof of this statement follows the exact line of proof in \citet{chen2008} and so we omit
the details.
\subsection{Relation to existing literature}
\label{sec:relation}
We first note that our use of the term ``tilting'' is
different from the use of the same term in \citet{hall2009},
where it applies to distance-based classification and
denotes an entirely different procedure.
In the Introduction, we briefly discuss a list of existing variable selection
techniques in which care is taken of the correlations among the variables
in measuring the association between each variable and the response.
Having now a complete picture of the TCS algorithm, we provide a more detailed comparison
between our methodology and the aforementioned methods.
\citet{buhlmann2009} proposed the PC-simple algorithm, which iteratively removes variables
having small association with the response.
Sample partial correlations $\hat{\rho}_n(j, \mathbf{y}|\mathcal{D})$ are used as the measure of association
between $X_j$ and $\mathbf{y}$,
where $\mathcal{D}$ is \mbox{e}mph{any} subset of the active set $\mathcal{A}$ (those variables still remaining in the model excluding $X_j$)
with its cardinality $|\mathcal{D}|$ equal to the number of iterations taken so far.
Behind the use of partial correlations lies the concept of partial faithfulness
which implies that, at the population level, if $\mathbb{P}pc(j, \mathbf{y}|\mathcal{D})=0$ for some $\mathcal{D}\subset\mathcal{J}\setminus\{j\}$,
then $\mathbb{P}pc(j, \mathbf{y}|\mathcal{J}\setminus\{j\})=0$.
Their PC-simple algorithm starts with $\mathcal{A}=\mathcal{J}$ and iteratively repeats
the following:
(i) screening sample partial correlations $\hat{\rho}_n(j, \mathbf{y}|\mathcal{D})$ for all $j\in\mathcal{A}$
and for all $\mathcal{D}$ satisfying the cardinality condition,
(ii) applying Fisher's Z-transform to test the null hypotheses $H_0: \mathbb{P}pc(j, \mathbf{y}|\mathcal{D})=0$,
(iii) removing irrelevant variables from $\mathcal{A}$,
until $|\mathcal{A}|$ falls below the number of iterations taken so far.
Recalling the definition of the rescaling factor $\Lambda_j$,
we can see the connection between $c_j^*(\Lambda_j)$ and $\hat{\rho}_n(j, \mathbf{y}|\mathcal{D})$,
as both are (up to a multiplicative factor $\Vert\mathbf{y}\Vert_2$) partial correlations between $X_j$ and $\mathbf{y}$
conditional on a certain subset of variables.
However, a significant difference comes from the fact that
the PC-simple algorithm takes all $\mathcal{D}\subset\mathcal{A}\setminus\{j\}$ with fixed $|\mathcal{D}|$ at each iteration,
whereas our TCS algorithm adaptively selects $\mathcal{C}j$ (or $\mathcal{C}ja$ when $\mathcal{A}\ne\mbox{e}mptyset$) for each $j$.
Also, while $\lambdaj$ is also a valid rescaling factor in our tilted correlation methodology,
partial correlations are by definition computed using $\Lambda_j$ only.
As for the forward regression \citep[FR]{wang2009} and the forward selection (FS),
although the initial stage of the two techniques is simple marginal correlation screening,
their progression has a new interpretation given a non-empty active set ($\mathcal{A}\ne\mbox{e}mptyset$).
Both algorithms obtain the current residual $\mathbf{z}$ by projecting the response $\mathbf{y}$
onto the orthogonal complement of the current model space, i.e., $\mathbf{z}=(\mathbf{I}_n-\Pi_\mathcal{A})\mathbf{y}$.
Therefore they also measure the association between each $X_j, \ j\notin\mathcal{A}$ and $\mathbf{y}$
conditional on the current model space spanned by $\mathbf{X}_\mathcal{A}$ and thus take into account
the correlations between $X_j, \ j\notin\mathcal{A}$ and $X_j, \ j\in\mathcal{A}$.
The difference between FR and FS comes from the fact that
FR updates not only the current residual $\mathbf{z}$ but also the current design matrix as
$\mathbf{Z}=(\mathbf{I}_n-\Pi_\mathcal{A})\mathbf{X}$ (as in Step 3 of the TCS algorithm).
Therefore FR eventually screens the rescaled version of $X_j^T(\mathbf{I}_n-\Pi_\mathcal{A})\mathbf{y}$
with the rescaling factor defined similarly to $\lambdaj$, replacing $\mathcal{C}j$ with $\mathcal{A}$,
i.e., $X_j^T(\mathbf{I}_n-\Pi_\mathcal{A})X_j=1-X_j^T\Pi_\mathcal{A} X_j$.
On the other hand, there is no rescaling step in FS and it screens
the terms $X_j^T(\mathbf{I}_n-\Pi_\mathcal{A})\mathbf{y}, \ j\notin\mathcal{A}$, themselves.
By contrast, we note that while both FR and FS apply straight marginal correlation
at each stage of their progression, our method, if and as necessary, uses the tilted correlation,
which provides an adaptive choice between the marginal correlation and conditional
correlation, depending on the correlation structure of the current design matrix.
Indeed, in the extreme case where $\mathbb{P}in=1$ is used, we have $\mathcal{C}j=\mbox{e}mptyset$ and therefore the TCS
algorithm becomes identical to FR.
Another crucial difference is as already mentioned above in the context of the PC-simple algorithm:
the tilting algorithm employs an adaptive choice of the conditioning set, unlike
FR and FS.
In conclusion, the TCS algorithm, the PC-simple algorithm, FR and FS share the common ingredient of
measuring the contribution of each variable $X_j$ to $\mathbf{y}$ conditional on certain other variables;
however, there are also important differences between them, and Table \ref{table:comp} summarises this
comparison. We emphasise yet again that the TCS algorithm is distinguished from the rest
in its adaptive choice of the conditioning subset via hard-thresholding of the sample correlations among
the variables. Also, we note that the theoretical results of Section \ref{sec:prop:tilt} hold for
\mbox{e}mph{both} rescaling methods, while the other algorithms use only one of them
(FR, PC-simple) or none (FS).
\begin{table}
\caption{Comparison of variable selection methods.}
\label{table:comp}
\centering
\begin{tabular}{ c | c | c | c | c }
\hline
\hline
& TCS algorithm & PC-simple & FR & FS
\\
\hline
Step 0 & $\mathcal{A}=\mbox{e}mptyset$ & $\mathcal{A}=\mathcal{J}$ & $\mathcal{A}=\mbox{e}mptyset$ & $\mathcal{A}=\mbox{e}mptyset$
\\
\hline
\multirow{2}{*}{action} &
one & multiple & one & one \\
& selected & removed & selected & selected
\\
\hline
\multirow{3}{*}{
conditioning set $\mathcal{D}$} &
$\mathcal{A}\cup\mathcal{C}ja$ & remaining & current & current \\
& $=\mathcal{A}\cup\{k\notin\mathcal{A}, k\ne j:$ & variables, & model & model \\
& $|\hat{\rho}_n(j, k|\mathcal{A})|>\mathbb{P}in\}$ & $|\mathcal{D}|$ fixed & $\mathcal{A}$ & $\mathcal{A}$
\\
\hline
rescaling & $\lambdaj$ or $\Lambda_j$ & $\Lambda_j$ & $\lambdaj$ & none \\
\hline
\hline
\mbox{e}nd{tabular}
\mbox{e}nd{table}
Finally, we note the relationship between the TCS algorithm and
the covariance-regularised regression method proposed in \citet{witten2009}.
A key difference between the two is that
the TCS algorithm works with the sample marginal correlations among the variables
whereas in the scout procedure, it is the conditional correlations among the variables
(i.e., $\mathbb{P}pc(j, k|\mathcal{J} \setminus \{j, k\})\ne 0$) that are subject to regularisation.
Also, the scout procedure achieves such regularisation
by maximising a penalised likelihood function rather than hard-thresholding,
and the thus-obtained estimate of the covariance structure of $\mathbf{X}$ is applied to estimate $\boldsymbol{\beta}$,
again by solving an optimisation problem.
By contrast, the tilted correlation method uses the outcome from thresholding
the sample correlation structure to compute the tilted correlations and select the variable with maximum tilted correlation in an iterative algorithm, and therefore does not involve any optimisation problems.
\subsection{Choice of threshold}
\label{sec:choice:thr}
In this section, we discuss the practical choice of the unknown threshold $\mathbb{P}in$ from the sample correlation matrix $\mathbf{C}$.
Due to the lack of information on the correlation structure of $\mathbf{X}$ in general
and the possibility of spurious sample correlation among the variables,
a deterministic choice of $\mathbb{P}in$ is not expected to perform well universally and we need a data-driven way of selecting a threshold.
\citet{bickel2008} proposed a cross-validation method for this purpose,
while \citet{el2008} conjectured the usefulness of a procedure based on controlling the false discovery rate (FDR).
Since our aim is different from the accurate estimation of the correlation matrix itself,
we propose a threshold selection procedure which is a modified version of the approach taken in the latter paper.
In the following, we assume that $\mathbf{X}$ is a realisation of a random matrix with each row generated as
$\mathbf{x}_i\sim_{\mbox{\scriptsize{i.i.d.}}}(\mathbf{z}ero, \Sigma)$,
where each diagonal element of $\Sigma$ equals one.
The procedure is a multiple hypothesis testing procedure and thus requires $p$-values of
the $d=p(p-1)/2$ hypotheses $H_0: \ |\Sigma_{j, k}|=0$ defined for all $j<k$.
We propose to compute the $p$-values as follows.
First, an $n$-vector with i.i.d. Gaussian entries is repeatedly generated $p$ times,
and sample correlations $\{r_{l, m}: \ 1 \le l < m \le p\}$ among those vectors are obtained as a reference.
Then, the p-value for each null hypothesis $H_0: \ |\Sigma_{j, k}|=0$ is defined as
$P_{j, k}=d^{-1}\cdot\left\vert\left\{r_{l, m}: \ 1 \le l < m \le p, \ |r_{l, m}|\ge|c_{j, k}|\right\}\right\vert$.
The next step is to apply the testing technique proposed in \citet{benjamini1995} to control the false discovery rate.
Denoting $P_{(1)}\le \ldots \le P_{(d)}$ as the ordered $p$-values,
we find the largest $i$ for which $P_{(i)}\le i/d\cdot\nu^*$ and reject all $H_{(j)}, \ j=1, \ldots, i$.
Then $\hat{\mathbb{P}i}_{thr}$ is chosen as the absolute value of the correlation corresponding to $P_{(i)}$.
FDR is controlled at level $\nu^*$ and we use $\nu^*=p^{-1/2}$ as suggested in \citet{el2008}.
An extensive simulation study described below confirms good practical performance of the above threshold selection procedure.
We also checked the sensitivity of our algorithm to the choice of threshold by applying a grid of thresholds in model (C) below.
Apart from the threshold $\hat{\mathbb{P}i}_{thr}$ selected as above, we ran versions of our algorithm where $\hat{\mathbb{P}i}_{thr}$ was multiplied by the constant factors of $0.75, 0.9, 1.1, 1.25$ each time it was used.
Performance of our algorithm was similar across the different thresholds,
which provides evidence for robustness of our procedure to the choice of threshold within reason.
\section{Simulation study}
\label{sec:sim}
In this section, we compare the performance of the TCS algorithm on simulated data
with that of other related methods discussed in the Introduction and Section \ref{sec:relation},
which are the PC-simple algorithm, FR, FS, iterative SIS (ISIS) and FLASH (for ease of implementation,
we adopt the ``global'' approach for FLASH), as well as Lasso for completeness.
Furthermore, some non-convex penalised least squares (PLS) estimation techniques
are included in the comparison study,
such as the SCAD \citep{fan2001} and the minimax concave (MC+) penalty \citep{zhang2010c}.
Sub-optimality of the Lasso in terms of model selection
has been noted in recent literature (see e.g. \citet{zhang2008} and \citet{zou2008}),
and non-convex penalties are proposed as a greedier alternative to achieve better variable selection.
In the following simulation study, the SCAD estimator is produced using the local linear approximation \citep{zou2008} and the MC+ penalised criterion is optimised using the SparseNet \citep{mazumder2009}.
The TCS algorithm is applied using both rescaling methods (denoted by TCS1 and TCS2, respectively),
with the maximum cardinality of the active set $\mathcal{A}$ (Step 4) set at $m=\lfloor n/2\rfloor$,
a value also used for FR.
The extended BIC is adopted (see Section \ref{sec:final:model}) to select the final model
for the one-at-a-time algorithms, i.e. TCS1, TCS2, FR and FS.
For the thus-selected final models, the coefficient values are estimated using least squares.
We note that, when the aim is to construct a well-performing predictive model, a shrinkage method
can be applied to the least squares estimate.
However, since our focus is on the variable selection aspect of the different techniques,
we use the plain (i.e. unshrunk) least squares estimates.
As for the rest of the methods, we select the tuning parameters for each method as follows:
the data is divided into the training and validation sets such that
the training observations are used to compute the solution paths over a range of tuning parameters,
and those which give the smallest mean squared error between the response and the predictions on the validation data are selected.
Finally, we note that FS and the Lasso are implemented using the R package \texttt{lars},
and the ISIS and the SCAD by the package \texttt{SIS}.
\subsection{Simulation models}
\label{sec:sim:models}
Our simulation models were generated as below. For models (A)--(C) and (F), the
procedure for generating the sparse coefficient vectors $\boldsymbol{\beta}$ is outlined
below the itemised list which follows.
\begin{description}
\item[(A) Factor model with 2 factors:]
Let $\mathbb{P}hi_1$ and $\mathbb{P}hi_2$ be two independent standard normal variables.
Each variable $X_j$, $j = 1, \ldots, p$, is generated as $X_j=f_{j, 1}\mathbb{P}hi_1+f_{j, 2}\mathbb{P}hi_2+\mbox{e}ta_j$,
where $f_{j, 1}, f_{j, 2}, \mbox{e}ta_j$ are also generated independently from a standard normal distribution.
The model is taken from \citet{meinshausen2008}.
\item[(B) Factor model with 10 factors:]
Identical to (A) but with 10 instead of 2 factors.
\item[(C) Factor model with 20 factors:]
Identical to (A) but with 20 instead of 2 factors.
\item[(D) Taken from \citet{fan2008} Section 4.2.2:]
\begin{eqnarray*}
\mathbf{y}=\beta X_1+\beta X_2+\beta X_3-3\beta \sqrt{\mbox{var}phi}X_4+\mbox{e}psilon,
\mbox{e}nd{eqnarray*}
where $\mbox{e}psilon\sim\mathcal{N}_n(0, \mathbf{I}_n)$ and $(X_{i, 1}, \ldots, X_{i, p})^T$ are generated from
a multivariate normal distribution $\mathcal{N}_n(\mathbf{z}ero, \Sigma)$ independently for $i=1, \ldots, n$.
The population covariance matrix $\Sigma=\left(\Sigma_{j, k}\right)_{j, k=1}^p$ satisfies
$\Sigma_{j, j}=1$ and $\Sigma_{j, k}=\mbox{var}phi, j\ne k$, except $\Sigma_{4,k}=\Sigma_{j,4}=\sqrt{\mbox{var}phi}$,
such that $X_4$ is marginally uncorrelated with $\mathbf{y}$ at the population level.
In the original model of \citet{fan2008}, $\beta=5$ and $\mbox{var}phi=0.5$ were used,
but we chose $\beta=2.5$ and $\mbox{var}phi=0.5, 0.95$ to investigate the performance of the
variable selection methods in more challenging situations.
\item[(E) Taken from \citet{fan2008} Section 4.2.3:]
\begin{eqnarray*}
\mathbf{y}=\beta X_1+\beta X_2+\beta X_3-3\beta \sqrt{\mbox{var}phi}X_4+0.25\beta X_5+\mbox{e}psilon,
\mbox{e}nd{eqnarray*}
with the population covariance matrix of $\mathbf{X}$ as in (D)
except $\Sigma_{5,k}=\Sigma_{j,5}=0$, such that
$X_5$ is uncorrelated with any $X_j, \ j\ne 5$, and relevant. However, it has only a very small
contribution to $\mathbf{y}$.
\item[(F) Leukemia data analysis:]
\citet{golub1999} analysed the Leukaemia dataset from high density Affymetrix oligonucloeotide arrays
(available on \url{http://www.broadinstitute.org/cgi-bin/cancer/datasets.cgi}),
which has 72 observations and 7129 genes (i.e. variables).
In \citet{fan2008}, the dataset was used to investigate the performance of Sure Independence Screening in
a feature selection problem. Here, instead of using the actual response from the dataset,
we used the design matrix to create simulated models as follows.
Each column $X_j$ of the design matrix was normalised to $\Vert X_j\Vert_2^2=n$,
and out of 7129 such columns, $p$ were randomly selected to generate an $n\times p$-matrix $\mathbf{X}$.
Then we generated a sparse $p$-vector $\boldsymbol{\beta}$ and the response $\mathbf{y}$ as in (\ref{lp}).
In this manner, the knowledge of $\mathcal{S}$ could be used to assess the performance of the competing
variable selection techniques. A similar approach was taken in \citet{meinshausen2008} to generate
simulation models from real datasets.
\mbox{e}nd{description}
With the exception of (D)--(E), we generated the simulated data as below.
Sparse coefficient vectors $\boldsymbol{\beta}$ were generated by randomly sampling
the indices of $\mathcal{S}$ from $1, \ldots, p$, with $|\mathcal{S}|=10$.
The non-zero coefficient vector $\boldsymbol{\beta}_\mathcal{S}$ was drawn from a zero-mean normal distribution
such that $\mathbf{C}_{\mathcal{S}, \mathcal{S}}\boldsymbol{\beta}_\mathcal{S}\sim\mathcal{N}_{|\mathcal{S}|}(\mathbf{z}ero, n^{-1}\mathbf{I}_{|\mathcal{S}|})$,
where $\mathbf{C}_{\mathcal{S}, \mathcal{S}}$ denotes the sample correlation matrix of $\mathbf{X}_\mathcal{S}$.
In this manner, $\arg\max_{j\in\mathcal{J}}|X_j^T(\mathbf{X}_\mathcal{S}\boldsymbol{\beta}_\mathbf{X})|$ may not always be attained by $j\in\mathcal{S}$,
which makes the correct identification of relevant variables more challenging.
The noise level $\sigma$ was chosen to set $R^2=\mbox{var}(\mathbf{x}_i^T\boldsymbol{\beta})/\mbox{var}(y_i)$ at $0.3$, $0.6$, or $0.9$,
adopting a similar approach to that taken in \citet{wang2009}.
In models (A)--(E), the number of observations was $n=100$
while the dimensionality $p$ varied from 500 to 2000 (except (D)--(E) where it was fixed at 1000), and
finally, 100 replicates were generated for each set-up.
\subsection{Simulation results}
\label{sec:sim:results}
For each method and simulation setting, we report the following error measures which are often adopted to evaluate the performance of variable selection:
the number of False Positives (FP, the number of irrelevant variables incorrectly identified as relevant),
the number of False Negatives (FN, the number of relevant variables incorrectly identified as irrelevant)
and the L2 distance $\Vert \boldsymbol{\beta}-\boldsymbol{\hat{\beta}} \Vert_2^2$; all averaged over 100 simulated data sets.
The summary of the simulation results can be found in Tables \ref{table:sim:a}--\ref{table:sim:f}.
We also present the receiver operating characteristic (ROC) curves,
which plot the true positive rate (TPR) against the false positive rate (FPR),
in Figures \ref{fig:roc:a}--\ref{fig:roc:f}.
Note that the simulation results from model (B) are discussed in the text only
and the corresponding figure and table are omitted for brevity.
The steep slope of an ROC implies that relevant variables have been selected without including too many irrelevant ones.
Vertical lines are plotted as a guideline to indicate when the FPR reaches $2.5|\mathcal{S}|/p$.
Since the existing R implementation of ISIS (package \verb+SIS+) returns the final selection
of variables only, rather than an entire path, we did not produce the ROC curves for that
method.
\begin{table}
\caption{\footnotesize{Simulation results for model (A) with $|\mathcal{S}|=10$. Results in bold font mean the
value of FP+FN is the lowest or within 10\% of the lowest; the same for L2. The value of
0 means less than $5 \times 10^{-4}$}.}
\label{table:sim:a}
\centering
\footnotesize{
\begin{tabular}{c|c|c|c|c|c|c|c|c|c|c|c|c}
\hline
\hline
$p$ & $R^2$ & & TCS1 & TCS2 & FR & FS & Lasso & ISIS & PCS & MC+ & SCAD & FLASH \\
\hline
500 & 0.3 & FP & 1.2 & 0.55 & 3.8 & 1.04 & 44.93 & 1.06 & 4.59 & 5.33 & 57.28 & 5.66 \\
& & FN & 2.47 & 2.52 & 1.82 & 2.2 & 2.93 & 9.18 & 8.45 & 4.31 & 1.8 & 2.9 \\
& & FP+FN & 3.67 & \textbf{3.07} & 5.62 & \textbf{3.24} & 47.86 & 10.24 & 13.04 & 9.64 & 59.08 & 8.56 \\
& & L2 & \textbf{0.012} & \textbf{0.012} & \textbf{0.012} & \textbf{0.013} & 0.264 & 1.006 & 0.914 & 0.134 & 0.096 & 0.081 \\ \hline
& 0.6 & FP & 1.05 & 0.74 & 4.49 & 1.07 & 47.92 & 1.09 & 4.76 & 3.25 & 40.76 & 6.45 \\
& & FN & 1.07 & 1.12 & 0.87 & 1.16 & 2.24 & 9.29 & 8.45 & 1.96 & 1.06 & 1.94 \\
& & FP+FN & 2.12 & \textbf{1.86 }& 5.36 & 2.23 & 50.16 & 10.38 & 13.21 & 5.21 & 41.82 & 8.39 \\
& & L2 & \textbf{0.002 }& \textbf{0.002} & 0.003 & 0.055 & 0.242 & 1.021 & 0.812 & 0.042 & 0.04 & 0.106 \\ \hline
& 0.9 & FP & 0.92 & 0.57 & 2.64 & 1.17 & 47.97 & 1.06 & 4.52 & 1.57 & 27.48 & 7.15 \\
& & FN & 0.43 & 0.41 & 0.32 & 0.62 & 1.75 & 9.21 & 8.37 & 2.03 & 0.58 & 1.49 \\
& & FP+FN & 1.35 & \textbf{0.98} & 2.96 & 1.79 & 49.72 & 10.27 & 12.89 & 3.6 & 28.06 & 8.64 \\
& & L2 & \textbf{0} & \textbf{0} & 0.001 & 0.074 & 0.292 & 1.075 & 0.982 & 0.085 & 0.02 & 0.205 \\ \hline\hline
1000 & 0.3 & FP & 1.79 & 1.38 & 22.28 & 1.61 & 44.56 & 1.38 & 5.53 & 6.6 & 69.77 & 10.05 \\
& & FN & 2.18 & 2.54 & 1.41 & 2.31 & 4.73 & 9.48 & 8.73 & 5.21 & 2.34 & 4.76 \\
& & FP+FN & \textbf{3.97} & \textbf{3.92} & 23.69 & \textbf{3.92} & 49.29 & 10.86 & 14.26 & 11.81 & 72.11 & 14.81 \\
& & L2 & \textbf{0.01} & 0.027 & 0.035 & 0.039 & 0.463 & 1.073 & 0.787 & 0.219 & 0.159 & 0.318 \\ \hline
& 0.6 & FP & 1.67 & 1.35 & 24.91 & 1.3 & 46 & 1.19 & 5.55 & 4.39 & 55.93 & 7.76 \\
& & FN & 1.13 & 1.17 & 0.78 & 1.65 & 4.16 & 9.33 & 8.63 & 2.79 & 1.51 & 3.33 \\
& & FP+FN & 2.8 & \textbf{2.52} & 25.69 & 2.95 & 50.16 & 10.52 & 14.18 & 7.18 & 57.44 & 11.09 \\
& & L2 & \textbf{0.002} & \textbf{0.003} & 0.009 & 0.126 & 0.498 & 1.016 & 0.868 & 0.13 & 0.117 & 0.32 \\ \hline
& 0.9 & FP & 1.21 & 0.8 & 25.75 & 1.11 & 47.38 & 1.23 & 5.45 & 1.84 & 43.93 & 7.42 \\
& & FN & 0.43 & 0.45 & 0.3 & 1.1 & 3.86 & 9.38 & 8.69 & 2.58 & 0.94 & 2.61 \\
& & FP+FN & 1.64 & \textbf{1.25 }& 26.05 & 2.21 & 51.24 & 10.61 & 14.14 & 4.42 & 44.87 & 10.03 \\
& & L2 & \textbf{0} & \textbf{0} & 0.002 & 0.088 & 0.405 & 0.916 & 0.803 & 0.078 & 0.063 & 0.192 \\ \hline\hline
2000 & 0.3 & FP & 1.77 & 1.65 & 41.53 & 1.64 & 38.27 & 1.48 & 6.71 & 10.53 & 80.9 & 9.07 \\
& & FN & 2.33 & 2.36 & 1.53 & 3.48 & 6.48 & 9.59 & 8.98 & 5.79 & 3.07 & 6.22 \\
& & FP+FN & \textbf{4.1} & \textbf{4.01 }& 43.06 & 5.12 & 44.75 & 11.07 & 15.69 & 16.32 & 83.97 & 15.29 \\
& & L2 & \textbf{0.013 }& 0.016 & 0.047 & 0.116 & 0.603 & 0.99 & 0.804 & 0.311 & 0.199 & 0.467 \\ \hline
& 0.6 & FP & 1.89 & 1.89 & 40.87 & 1.39 & 41.32 & 1.35 & 6.37 & 6.1 & 66.65 & 7.82 \\
& & FN & 1.4 & 1.46 & 0.87 & 2.77 & 6.18 & 9.48 & 8.82 & 4.06 & 2.21 & 5.06 \\
& & FP+FN & \textbf{3.29} & \textbf{3.35} & 41.74 & 4.16 & 47.5 & 10.83 & 15.19 & 10.16 & 68.86 & 12.88 \\
& & L2 & \textbf{0.004} & \textbf{0.004} & 0.024 & 0.252 & 0.752 & 1.243 & 0.989 & 0.338 & 0.18 & 0.496 \\ \hline
& 0.9 & FP & 1.61 & 1.32 & 39.5 & 1.45 & 39 & 1.35 & 6.87 & 19.99 & 59.73 & 6.96 \\
& & FN & 0.44 & 0.56 & 0.68 & 2.21 & 6.32 & 9.55 & 8.9 & 3.88 & 1.6 & 5.11 \\
& & FP+FN & \textbf{2.05} & \textbf{1.88} & 40.18 & 3.66 & 45.32 & 10.9 & 15.77 & 23.87 & 61.33 & 12.07 \\
& & L2 & \textbf{0} & 0.005 & 0.314 & 0.285 & 0.711 & 1.126 & 0.978 & 0.367 & 0.147 & 0.577 \\ \hline\hline
\mbox{e}nd{tabular}}
\mbox{e}nd{table}
\begin{table}
\caption{\footnotesize{Simulation results for model (C) with $|\mathcal{S}|=10$.
Results in bold font mean the
value of FP+FN is the lowest or within 10\% of the lowest; the same for L2.}}
\label{table:sim:c}
\centering
\footnotesize{
\begin{tabular}{c|c|c|c|c|c|c|c|c|c|c|c|c}
\hline
\hline
$p$ & $R^2$ & & TCS1 & TCS2 & FR & FS & Lasso & ISIS & PCS & MC+ & SCAD & FLASH \\
\hline
500 & 0.3 & FP & 4.21 & 3.57 & 9.56 & 8.44 & 43.82 & 1.81 & 5.73 & 38.84 & 42.23 & 19.97 \\
& & FN & 6.27 & 5.45 & 5.81 & 7.44 & 3.08 & 9.81 & 9.42 & 4.49 & 3.69 & 5.19 \\
& & FP+FN & 10.48 & \textbf{9.02} & 15.37 & 15.88 & 46.9 & 11.62 & 15.15 & 43.33 & 45.92 & 25.16 \\
& & L2 & 0.207 & \textbf{0.172} & 0.246 & 0.427 & \textbf{0.166 }& 0.718 & 0.648 & 0.322 & 0.189 & 0.271 \\ \hline
& 0.6 & FP & 6.57 & 4.44 & 15.67 & 15.61 & 45.36 & 1.83 & 5.78 & 64.69 & 38.82 & 19.07 \\
& & FN & 3.44 & 2.01 & 1.57 & 3.35 & 1.99 & 9.83 & 9.31 & 5.73 & 3.4 & 4.09 \\
& & FP+FN & 10.01 & \textbf{6.45} & 17.24 & 18.96 & 47.35 & 11.66 & 15.09 & 70.42 & 42.22 & 23.16 \\
& & L2 & 0.066 & 0.024 & \textbf{0.019} & 0.114 & 0.093 & 0.858 & 0.782 & 0.36 & 0.164 & 0.207 \\ \hline
& 0.9 & FP & 6.89 & 3.49 & 16.22 & 17.58 & 48.62 & 1.79 & 5.9 & 58.78 & 39.17 & 18.66 \\
& & FN & 1.06 & 0.86 & 0.63 & 1.43 & 1.01 & 9.79 & 9.47 & 5.7 & 3.16 & 3.16 \\
& & FP+FN & 7.95 & \textbf{4.35} & 16.85 & 19.01 & 49.63 & 11.58 & 15.37 & 64.48 & 42.33 & 21.82 \\
& & L2 & 0.011 & \textbf{0.002} & 0.025 & 0.078 & 0.035 & 0.82 & 0.752 & 0.374 & 0.157 & 0.2 \\ \hline\hline
1000 & 0.3 & FP & 2.29 & 3.45 & 8 & 6.73 & 45.22 & 1.92 & 5.86 & 109.1 & 114.8 & 19.63 \\
& & FN & 7.9 & 5.77 & 7.75 & 8.67 & 4.33 & 9.92 & 9.58 & 6.48 & 3.63 & 6.92 \\
& & FP+FN & 10.19 & \textbf{9.22} & 15.75 & 15.4 & 49.55 & 11.84 & 15.44 & 115.6 & 118.4 & 26.55 \\
& & L2 & 0.558 & \textbf{0.342} & 0.694 & 0.835 & 0.414 & 0.993 & 0.897 & 0.588 & \textbf{0.343} & 0.554 \\ \hline
& 0.6 & FP & 5.04 & 4.72 & 15.21 & 11.93 & 48.97 & 1.92 & 6.13 & 90.51 & 110.8 & 19.86 \\
& & FN & 5.79 & 3.6 & 4.31 & 6.41 & 3.27 & 9.92 & 9.6 & 6.74 & 2.51 & 5.97 \\
& & FP+FN & 10.83 & \textbf{8.32} & 19.52 & 18.34 & 52.24 & 11.84 & 15.73 & 97.25 & 113.3 & 25.83 \\
& & L2 & 0.286 & \textbf{0.138 }& 0.293 & 0.456 & 0.287 & 1.006 & 0.905 & 0.537 & 0.214 & 0.404 \\ \hline
& 0.9 & FP & 9.15 & 5.44 & 20.3 & 15.99 & 52.41 & 1.8 & 6.23 & 78.06 & 100.4 & 20.67 \\
& & FN & 3.74 & 1.72 & 2.18 & 4.22 & 2.28 & 9.8 & 9.56 & 6.75 & 1.75 & 5.16 \\
& & FP+FN & 12.89 & \textbf{7.16} & 22.48 & 20.21 & 54.69 & 11.6 & 15.79 & 84.81 & 102.1 & 25.83 \\
& & L2 & 0.258 & \textbf{0.058} & 0.147 & 0.52 & 0.174 & 1.09 & 0.985 & 0.612 & 0.137 & 0.43 \\ \hline\hline
2000 & 0.3 & FP & 1.75 & 2.25 & 5.12 & 4.97 & 47.13 & 1.89 & 6.4 & 133.6 & 129.4 & 19.9 \\
& & FN & 8.72 & 7.34 & 9.13 & 9.44 & 5.63 & 9.89 & 9.74 & 7.39 & 4.81 & 7.89 \\
& & FP+FN & \textbf{10.47} & \textbf{9.59} & 14.25 & 14.41 & 52.76 & 11.78 & 16.14 & 141 & 134.3 & 27.79 \\
& & L2 & 0.649 & \textbf{0.446} & 0.855 & 0.894 & 0.499 & 0.951 & 0.87 & 0.669 & \textbf{0.438} & 0.678 \\ \hline
& 0.6 & FP & 3.4 & 4.76 & 11.64 & 6.85 & 49.4 & 1.94 & 6.31 & 187.3 & 125.4 & 20.29 \\
& & FN & 7.83 & 4.62 & 7.27 & 8.66 & 4.56 & 9.94 & 9.78 & 6.67 & 3.68 & 7.69 \\
& & FP+FN & 11.23 & \textbf{9.38} & 18.91 & 15.51 & 53.96 & 11.88 & 16.09 & 194 & 129 & 27.98 \\
& & L2 & 0.512 & \textbf{0.164} & 0.629 & 0.761 & 0.418 & 0.943 & 0.857 & 0.566 & 0.31 & 0.675 \\ \hline
& 0.9 & FP & 7.02 & 4.93 & 19.17 & 10.77 & 52.8 & 1.91 & 6.16 & 149.3 & 117.3 & 20.81 \\
& & FN & 5.75 & 2.64 & 4.3 & 7.17 & 3.87 & 9.91 & 9.65 & 7.25 & 2.85 & 7.3 \\
& & FP+FN & 12.77 & \textbf{7.57 }& 23.47 & 17.94 & 56.67 & 11.82 & 15.81 & 156.6 & 120.2 & 28.11 \\
& & L2 & 0.36 & \textbf{0.104} & 0.292 & 0.516 & 0.284 & 0.796 & 0.708 & 0.552 & 0.196 & 0.56 \\ \hline\hline
\mbox{e}nd{tabular}}
\mbox{e}nd{table}
\begin{table}
\caption{\footnotesize{Simulation results for models (D)--(E) with $|\mathcal{S}|=4$ and $5$.
Results in bold font mean the
value of FP+FN is the lowest or within 10\% of the lowest; the same for L2.}}
\label{table:sim:de}
\centering
\footnotesize{
\begin{tabular}{c|c|c|c|c|c|c|c|c|c|c|c}
\hline
\hline
$\mbox{var}phi$ & & TCS1 & TCS2 & FR & FS & Lasso & ISIS & PCS & MC+ & SCAD & FLASH \\
\hline
0.5 & FP & 0.71 & 2.4 & 22.41 & 27.86 & 58.73 & 1.21 & 2.33 & 27.94 & 111 & 26.18 \\
& FN & 0 & 0 & 0 & 1 & 1 & 3.21 & 1.65 & 0.6 & 1 & 1 \\
& FP+FN & \textbf{0.71} & 2.4 & 22.41 & 28.86 & 59.73 & 4.42 & 3.98 & 28.54 & 112 & 27.18 \\
& L2 & \textbf{0.149} & 0.351 & 2.876 & 33.46 & 30.92 & 47.9 & 38.74 & 19.12 & 30.96 & 31.85 \\ \hline
0.95 & FP & 0.39 & 0.76 & 19.84 & 7.14 & 28.37 & 1.45 & 1.42 & 49.58 & 46.68 & 12.88 \\
& FN & 1.43 & 3.64 & 1.89 & 2.05 & 1.54 & 3.71 & 3.58 & 1.7 & 2.07 & 1.61 \\
& FP+FN & \textbf{1.82} & 4.4 & 21.73 & 9.19 & 29.91 & 5.16 & 5 & 51.28 & 48.75 & 14.49 \\
& L2 & \textbf{26.71} & 71.17 & 76.23 & 70.87 & 65.82 & 73.73 & 71.61 & 67.07 & 69.23 & 67.21 \\ \hline\hline
0.5 & FP & 0.85 & 3.31 & 30.2 & 29.06 & 56.92 & 1.23 & 2.31 & 32.56 & 112.3 & 27.04 \\
& FN & 0.03 & 0.11 & 0.01 & 1.15 & 1.05 & 4.23 & 2.42 & 0.79 & 1.02 & 1.19 \\
& FP+FN & \textbf{0.88} & 3.42 & 30.21 & 30.21 & 57.97 & 5.46 & 4.73 & 33.35 & 113.3 & 28.23 \\
& L2 & \textbf{0.177 }& 0.528 & 4.102 & 33.5 & 31.46 & 48.83 & 39.46 & 22.11 & 31.46 & 32.18 \\ \hline
0.95 & FP & 0.05 & 0.05 & 26.08 & 4.5 & 28.74 & 1.03 & 1.01 & 35.82 & 43.73 & 12.78 \\
& FN & 2.76 & 3.96 & 1.75 & 2.32 & 1.56 & 4.1 & 3.77 & 1.86 & 2.11 & 1.83 \\
& FP+FN & \textbf{2.81} & 4.01 & 27.83 & 6.82 & 30.3 & 5.13 & 4.78 & 37.68 & 45.84 & 14.61 \\
& L2 & \textbf{49.89} & 71.56 & 81.1 & 69.81 & 65.9 & 76.37 & 71.88 & 66.78 & 68.76 & 67.28 \\ \hline\hline
\mbox{e}nd{tabular}}
\mbox{e}nd{table}
\begin{table}
\caption{\footnotesize{Simulation results for model (F) with $|\mathcal{S}|=10$. Results in bold font mean the
value of FP+FN is the lowest or within 10\% of the lowest; the same for L2.}}
\label{table:sim:f}
\centering\footnotesize{
\begin{tabular}{c|c|c|c|c|c|c|c|c|c|c|c|c}
\hline
\hline
$p$ & $R^2$ & & TCS1 & TCS2 & FR & FS & Lasso & ISIS & PCS & MC+ & SCAD & FLASH \\
\hline
1000 & 0.3 & FP & 2.27 & 2.08 & 13.68 & 1.65 & 23.69 & 0.87 & 6.09 & 130.6 & 23.61 & 7.97 \\
& & FN & 7.2 & 6.45 & 5.12 & 8.94 & 8.22 & 9.92 & 8.33 & 7.81 & 8.42 & 5.96 \\
& & FP+FN & 9.47 & \textbf{8.53 }& 18.8 & 10.59 & 31.91 & 10.79 & 14.42 & 138.4 & 32.03 & 13.93 \\
& & L2 & 3.376 & \textbf{2.579} & 3.549 & 6.487 & 6.33 & 7.577 & 5.144 & 6.654 & 6.346 & \textbf{2.605} \\ \hline
& 0.6 & FP & 3.97 & 3.87 & 16.36 & 1.58 & 21.89 & 0.78 & 5.98 & 106.4 & 23.54 & 8.48 \\
& & FN & 4.65 & 4.11 & 4.07 & 9.1 & 8.24 & 9.89 & 8.37 & 7.88 & 8.46 & 5.22 \\
& & FP+FN & \textbf{8.62} & \textbf{7.98 }& 20.43 & 10.68 & 30.13 & 10.67 & 14.35 & 114.2 & 32 & 13.7 \\
& & L2 & 3.029 & \textbf{2.515 }& 6.604 & 10.53 & 10.25 & 11.5 & 7.181 & 10.64 & 10.38 & 4.229 \\ \hline
& 0.9 & FP & 5.97 & 5.17 & 14.54 & 1.77 & 20.29 & 0.83 & 6.1 & 115.2 & 20.72 & 7.73 \\
& & FN & 1.95 & 2.42 & 3.45 & 9.14 & 8.7 & 9.88 & 8.3 & 8.03 & 8.87 & 4.81 \\
& & FP+FN & \textbf{7.92} & \textbf{7.59} & 17.99 & 10.91 & 28.99 & 10.71 & 14.4 & 123.2 & 29.59 & 12.54 \\
& & L2 & \textbf{0.573} & 2.055 & 5.81 & 9.555 & 9.501 & 10.65 & 8.428 & 9.736 & 9.51 & 5.428 \\ \hline\hline
2000 & 0.3 & FP & 1.76 & 1.53 & 12.56 & 1.49 & 21.06 & 0.84 & 6.89 & 154.2 & 26.63 & 8.88 \\
& & FN & 8.66 & 8.25 & 7.73 & 9.48 & 8.89 & 9.9 & 8.75 & 8.37 & 8.86 & 7.06 \\
& & FP+FN & \textbf{10.42} & \textbf{9.78 }& 20.29 & 10.97 & 29.95 & \textbf{10.74} & 15.64 & 162.6 & 35.49 & 15.94 \\
& & L2 & 4.774 & \textbf{3.952} & 5.626 & 6.371 & 6.267 & 7.756 & 5.484 & 6.403 & 6.286 & \textbf{4.27} \\ \hline
& 0.6 & FP & 3.18 & 2.51 & 16.9 & 1.62 & 20.89 & 0.85 & 6.45 & 250.1 & 29.89 & 8.46 \\
& & FN & 6.94 & 7.04 & 6.56 & 9.51 & 8.83 & 9.9 & 8.56 & 8.05 & 8.86 & 6.56 \\
& & FP+FN & \textbf{10.12} & \textbf{9.55} & 23.46 & 11.13 & 29.72 & 10.75 & 15.01 & 258.2 & 38.75 & 15.02 \\
& & L2 & \textbf{2.424} & 2.9 & 5.74 & 6.891 & 6.901 & 8.071 & 6.072 & 7.013 & 6.902 & 4.79 \\ \hline
& 0.9 & FP & 5.4 & 4.42 & 18.96 & 1.83 & 22.73 & 0.83 & 6.73 & 202.3 & 29.23 & 9.04 \\
& & FN & 4.29 & 3.98 & 5.17 & 9 & 8.72 & 9.92 & 8.64 & 8.25 & 8.99 & 5.86 \\
& & FP+FN & 9.69 & \textbf{8.4 }& 24.13 & 10.83 & 31.45 & 10.75 & 15.37 & 210.6 & 38.22 & 14.9 \\
& & L2 & \textbf{1.675 }& \textbf{1.745} & 3.64 & 5.232 & 5.254 & 6.67 & 4.133 & 5.401 & 5.275 & 2.841 \\ \hline\hline
\mbox{e}nd{tabular}}
\mbox{e}nd{table}
Overall, compared with other methods, TCS1, TCS2 and FR achieve a high TPR
more quickly without including too many irrelevant variables and thus tend to achieve a small L2 distance.
While the PC-simple algorithm attains a low FPR, its TPR is also low even when the significant level
for the testing procedure is set to be high.
For certain set-ups, Lasso or SCAD achieves a high TPR but only at the cost of a high FPR.
Specifically, for factor models (A)--(C), it can be observed that TCS1, TCS2, FR
(combined with the extended BIC) and SCAD are superior to other methods in terms of achieving small FN,
especially when $R^2$ is sufficiently high.
However, the FR and SCAD tend to result in a model with too large an FP in comparison to the TCS algorithm,
and therefore the L2 distance obtained from TCS2 is often the smallest.
This becomes more obvious as the dimensionality grows and the number of factors increases,
and the ROC curves in Figures \ref{fig:roc:a}--\ref{fig:roc:c} also support this conclusion,
as those from the TCS algorithm attain a higher TPR for a similar level of FPR.
Note that from our extensive numerical experiments, we observed that
increasing number of factors led to an increased chance of marginal correlation screening being misleading at the very first iteration in the sense that $\arg\max_j|X_j^T\mathbf{y}|\notin\mathcal{S}$.
In such set-ups, the adaptive choice of $\mathcal{C}j$ used by the TCS algorithm turned out to be helpful in
correctly identifying a relevant variable more often than marginal correlation screening.
Between TCS1 and TCS2, while the two perform as well as each other for the two factor models from (A),
it is TCS2 which outperforms the other for the models with more factors.
As for the rest of the methods, FS performs as well as FR for lower dimensionality,
and even better in terms of FP,
but its FN is larger than that of FR as $p$ and the number of factors increase.
Both PCS algorithm and ISIS return final models which are too small and therefore obtain large FN
and small FP; especially ISIS almost always misses the entire set of variables in $\mathcal{S}$.
Lasso is not significantly inferior to, and occasionally better than, TCS1, TCS2 and FR in terms of FN,
but it tends to select a model with a large FP like SCAD.
While the ROC curves of MC+ and FLASH behave better than that of SCAD for certain set-ups
(e.g. for two factor models), final selected models for these methods achieve
larger FN. Finally, in terms of FP, FLASH tends to be better than SCAD, MC+ and Lasso.
For models (D) and (E), the TCS algorithm and FR outperform the rest when $\mbox{var}phi=0.5$,
rapidly identifying all the relevant variables before the FPR reaches $2.5|\mathcal{S}|/p$
(left column of Figure \ref{fig:roc:de}).
However, when correlations among the variables increase with $\mbox{var}phi=0.95$,
ROC curves show that TCS1 is the only method that can identify all the relevant variables
(right column of Figure \ref{fig:roc:de}).
Other methods, including TCS2 and FR, often neglect to include $X_4$ due to its high correlations with the other variables, $\sqrt{\mbox{var}phi}$ being almost 0.975.
We note that while the ROC curves indicate that very often all the relevant variables are recovered by TCS1, the models selected by the extended BIC leave out some of them.
Since the final models from TCS1 tend to contain the smallest number of noisy variables, we conclude that
the extended BIC tends to choose final models which are too small for these particular examples.
The rest of methods behave similarly as in the case of factor models;
while Lasso, MC+, SCAD and FLASH achieve relatively small FN, the FP of their final models is too large
and therefore they end up with a larger L2 distance than that of TCS1.
For the examples generated from the Leukemia dataset (model (F), Figure \ref{fig:roc:f}),
the TCS algorithm with either of the rescaling methods always performs the best,
with its ROC curves always dominating those of others.
FR performs the second best and then follows FLASH. The remaining methods
are not able to identify as many relevant variables as the TCS algorithm or FR even for a high FPR.
The results reported in Table \ref{table:sim:f} also support this observation,
where it is clear that the smallest FP and L2 distance are attained by either TCS1 or TCS2.
Sometimes FR outperforms the two in terms of FN but TCS1 or TCS2 still achieves a smaller L2 distance,
which implies that TCS algorithm, when combined with the extended BIC,
can pick up a smaller model that better mimics the true coefficient vector than that yielded by
FR with the same criterion.
Interestingly, when it comes to the final model, FLASH achieves similar FN and much smaller FP than FR.
We have observed that the two rescaling methods sometimes select variables in different orders,
although it does not necessarily imply that the resulting models are different.
Overall, TCS2 performs better than TCS1 except for the examples from (D)--(E).
In these two models, the variables $X_1, \ldots, X_p$ have a very special correlation structure
in that e.g. $X_4$, a significant variable, can often appear uncorrelated with $\mathbf{y}$ in marginal correlation screening.
Since TCS1 involves the term $\Vert(\mathbf{I}_n-\Pi_\mathcal{A})X_j\Vert_2^2$ in the denominator of the tilted correlation, as opposed to
the term $\Vert(\mathbf{I}_n-\Pi_\mathcal{A})X_j\Vert_2$ in TCS2, it is better at picking up $X_4$ than TCS2.
In the factor model examples, while the overall correlations among the variables are high,
such ``masking'' does not take place as often among the significant variables.
Therefore we conclude that unless the correlations are particularly high,
TCS2 usually performs well.
\section{Boston housing data analysis}
\label{sec:boston}
In this section, we apply the TCS algorithm as well as the methods used in the simulation study in Section \ref{sec:sim} to the Boston housing data, which was previously used to compare the performance of different regression techniques
e.g. in \citet{radchenko2011}.
Originally, the dataset contains $13$ variables which may have influence over the house prices.
As in \citet{radchenko2011}, we include the interaction terms between the variables in the analysis
such that the data has $p=91$ variables and $n=506$ observations.
Note that, due to the way the variables are produced, there exist large sample correlations across the columns of the design matrix $\mathbf{X}$.
We split the data into three with $n_1=91 (=p)$, $n_2=46$ and $n_3=369$ observations each, and use the first $n_1$ observations as the training data (to compute a solution path for each method), the next $n_2$ observations as the validation data (to choose the solution along the path that minimises the sum of the squared residuals for each method), and the last $n_3$ for computing the test error
($n_3^{-1}\Vert\mathbf{y}-\hat{\mathbf{y}}\Vert_2^2$).
Random splitting of the data is repeated 20 times and
Table \ref{table:boston} reports the average test error and number of selected variables,
which shows that TCS2 achieves the minimum test error with the fewest variables in the model
(except for the PC-simple algorithm).
TCS1 also performs second best with more variables selected during the validation step.
FR performs well in terms of both test error and the number of selected variables,
and then follows FLASH.
We note that the PC-simple algorithm chooses too few variables to describe the data well,
while the non-convex penalty algorithms (MC+, SCAD) tend to include many more variables than the rest.
\begin{table}
\caption{Boston housing data: test errors and the number of selected variables averaged over 20 test data sets.}
\label{table:boston}
\centering
\begin{tabular}{c|c|c|c|c|c|c|c}
\hline
\hline
& TCS1 & TCS2 & FR & PC-simple & MC+ & SCAD & FLASH
\\
\hline
test error & 27.03 & 26.43 & 33.10 & 32.47 & 36.47 & 34.95 & 30.14
\\
number of variables & 19.5 & 13.5 & 16.0 & 2.0 & 83.5 & 36.0 & 26.0
\\
\hline
\hline
\mbox{e}nd{tabular}
\mbox{e}nd{table}
\section{Conclusions}
\label{sec:conclusion}
In this paper, we proposed a new way of measuring strength of association between the variables and the response
in a linear model with a possibly large number of covariates, by adaptively taking into account
correlations among the variables. We conclude by listing the new contributions made in this paper.
\begin{itemize}
\item Although tilting is not the only procedure which measures the association
between a variable and the response conditional on other variables,
its selection of the conditioning variables is a step further from
simply using the current model itself or its sub-models, as is done in existing
iterative algorithms. The hard-thresholding step in the tilting procedure enables an adaptive choice of
the conditioning subset $\mathcal{C}j$ for each variable $X_j$.
Recalling the decomposition of the marginal correlation in (\ref{marginal:corr}),
this adaptive choice can be seen as a vital step in capturing the contribution of each variable to the response.
Also, in the case $\mathcal{C}j=\mbox{e}mptyset$, tilted correlation is identical to marginal correlation,
which can be viewed as ``adaptivity'' of our procedure.
\item We propose two rescaling factors to obtain the tilted correlation $c_j^*$.
Rescaling 1 ($\lambdaj$) is also adopted by the forward regression and rescaling 2 ($\Lambda_j$) is also adopted by the PC-simple algorithm,
yet tilting is the only method to meaningfully use both rescaling factors and
our theoretical results in Section \ref{sec:prop:tilt} are valid for either of the two factors.
It would be of interest to identify a way of combining the two rescaling methods,
which we leave as a topic for future research.
\item The separation of relevant and irrelevant variables,
achieved by tilted correlation (as in our Theorems \ref{thm:one}--\ref{thm:three}), cannot always be achieved by marginal correlation,
and similar results to these theorems have not been reported previously to the best of our knowledge.
\item
The proposed TCS algorithm is designed to fully exploit the theoretical properties of the tilted correlation,
and in particular its asymptotic consistency in separating between the relevant and irrelevant variables.
Although we have not yet been able to demonstrate the model selection consistency of the TCS algorithm,
numerical experiments confirm its good performance in comparison with other well-performing methods,
showing that it can achieve high true positive rate without including many irrelevant variables.
The algorithm is simple, easy to implement and does not require the use of advanced computational
tools.
\mbox{e}nd{itemize}
Ending on a slightly more general note, since correlation is arguably the most widely used
statistical measure of association, we would expect our tilted correlation (which can be viewed
as an ``adaptive'' extension of standard correlation) to be more widely applicable in various
statistical contexts beyond the simple linear regression model.
{\bf Acknowledgements}
We would like to thank the Joint Editor, Associate Editor and two Referees for very helpful comments which
led to a substantial improvement of this manuscript.
\appendix
\section{Proof of Theorem \ref{thm:one}}
\label{appendix:one}
The proof of Theorem \ref{thm:one} is divided into Steps 1--3.
Recalling the decomposition of $X_j^{*T}\mathbf{y}$ in (\ref{decom:tilt:corr}),
we first control the inner product between $X_j^*$ and $\boldsymbol{\mbox{e}p}$ uniformly over all $j$ in Step 1.
In Steps 2--3, we control the second summand
$I\mbox{e}quiv\sum_{k\in\mathcal{S}\setminus\mathcal{C}j, k\ne j}\beta_kX_j^T(\mathbf{I}_n-\Pi_{j})X_k$ for $j$ falling into two different categories,
and thus derive the result.
\begin{itemize}
\item[Step 1]
For $\boldsymbol{\mbox{e}p}\sim\mathcal{N}_n(\mathbf{z}ero, n^{-1}\sigma^2\cdot\mathbf{I}_n)$, we observe that,
with probability converging to 1, $\max_{1\le j \le p}|\langle \boldsymbol{\mbox{e}p}, Z_j \rangle| \le \sigma\sqrt{2\log p/n}$
for $Z_1, \ldots, Z_p\in\mathbb{R}^n$ having unit norm as $\Vert Z_j \Vert_2=1$.
From (A2), we have $\sigma\sqrt{2\log p/n}\le Cn^{-\gamma}$ for some $C>0$,
and from (A5), $\VertX_j^*\Vert_2>\sqrt{\alpha}>0$.
Therefore by defining $\mathcal{E}_0=\{\max_j|X_j^{*T}\boldsymbol{\mbox{e}p}|<C n^{-\gamma}\}$,
it follows that $\mathbb{P}(\mathcal{E}_0)\to 1$.
\item[Step 2]
In this step, we turn our attention to those $j$ whose corresponding $\mathcal{C}j$ satisfy
$\mathcal{S}\setminus\{j\}\subseteq\mathcal{C}j$ and thus the corresponding $I=0$ and
$X_j^{*T}\mathbf{y}=\beta_j(1-a_j)+X_j^{*T}\boldsymbol{\mbox{e}p}$.
\begin{description}
\item[Rescaling 1.]
With the rescaling factor $\lambdaj=(1-a_j)$ which is bounded away from 0 by (A5),
it can be shown that if such $j$ belongs to $\mathcal{S}$, its tilted correlation satisfies
$c_j^*(\lambdaj)/\beta_j\to 1$ on $\mathcal{E}_0$, as $|\beta_j|\gg n^{-\mu}$.
On the other hand, if $j\notin\mathcal{S}$, we have $\beta_j(1-a_j)=0$ which leads to
$n^\mu\cdotc_j^*(\lambdaj)\le n^\mu\cdot Cn^{-\gamma}\to 0$ on $\mathcal{E}_0$.
\item[Rescaling 2.]
Note that $j$ whose $\mathcal{C}j$ include all the members of $\mathcal{S}$ cannot be a member of $\mathcal{S}$ itself,
and in this case, $(\mathbf{I}_n-\Pi_{j})\mathbf{y}$ is reduced to $(\mathbf{I}_n-\Pi_{j})\boldsymbol{\mbox{e}p}$.
Since (A3) assumes that each $\mathcal{C}j$ has its cardinality bounded by $Cn^\xi$,
it can be shown that $\mathbb{P}\left(\max_j\Vert\Pi_{j}\boldsymbol{\mbox{e}p}\Vert_2 \le C'n^{-(\gamma-\xi/2)}\right)\to 1$
for some $C'>0$, similarly to Step 1.
Also, Lemma 3 from \citet{fan2008} implies that
$\mathbb{P}\left(\sigma^{-2}\cdot\Vert\boldsymbol{\mbox{e}p}\Vert_2^2 < 1-\omega\right)\to 0$ for any $\omega\in(0, 1)$.
Combining these observations with (A1) and (A4), we derive that
$1-a_jy=\Vert(\mathbf{I}_n-\Pi_{j})\boldsymbol{\mbox{e}p}\Vert_2^2/\Vert\mathbf{y}\Vert_2^2\ge Cn^{-\deltata}$
with probability tending to 1, and eventually we have $\Lambda_j\ge C'n^{-\deltata/2}$ from (A5).
Therefore, if $\mathcal{S}\subseteq\mathcal{C}j$ for some $j\notin\mathcal{S}$,
its corresponding tilted correlation satisfies
$n^\mu\cdotc_j^*(\Lambda_j)\le n^\mu\cdot Cn^{-(\gamma-\deltata/2)}\to 0$ on $\mathcal{E}_0$.
In the case of $\mathcal{S}\nsubseteq\mathcal{C}j$, we can derive from (A6) that for such $j$,
$\Vert(\mathbf{I}_n-\Pi_{j})\mathbf{y}\Vert_2^2/\Vert\mathbf{y}\Vert_2^2=1-a_jy\gg n^{-\kappa}$,
which, combined with (A5), implies that $\Lambda_j\gg n^{-\kappa/2}$.
Then the following holds for such $j$ on $\mathcal{E}_0$:
$n^\mu\cdot|c_j^*(\Lambda_j)|\ge n^\mu\cdot C|\beta_j|\to\infty$ if $j\in\mathcal{S}$,
while $n^\mu\cdotc_j^*(\Lambda_j)\le n^\mu\cdot Cn^{-(\gamma-\kappa/2)}\to 0$ if $j\notin\mathcal{S}$.
\mbox{e}nd{description}
\item[Step 3]
We now consider those $j\in\mathcal{J}$ for which $\mathcal{S}\setminus\{j\}\nsubseteq\mathcal{C}j$ and
consequently the corresponding term $I \ne 0$ in general.
From (A3) and Condition \ref{cond:one}, we derive that for each $j$,
there exists some $C>0$ satisfying the following for all $k\in\mathcal{S}\setminus\mathcal{C}j, \ k\ne j$,
\begin{eqnarray}
|X_j^T(\mathbf{I}_n-\Pi_{j})X_k|\le|X_j^TX_k|+|(\Pi_{j} X_j)^TX_k|\le Cn^{-\gamma}.
\label{appendix:one:step:three}
\mbox{e}nd{eqnarray}
Then from (A1) and (A4), we can bound $I$ as $|I|\le C'n^{-(\gamma-\deltata)}$.
Also when $\mathcal{S}\setminus\{j\}\nsubseteq\mathcal{C}j$, (A5)--(A6) imply that $\Lambda_j\gg n^{-\kappa/2}$.
In summary, we can show that the following claims hold on $\mathcal{E}_0$, similarly as in Step 2:
if $j\notin\mathcal{S}$, with either of the rescaling factors,
$n^\mu\cdotc_j^*(\lambdaj) \le n^\mu\cdot Cn^{-(\gamma-\deltata-\kappa/2)}\to 0$,
whereas if $j\in\mathcal{S}$, its coefficient satisfies $|\beta_j|\gg n^{-\mu}$ and
therefore $n^\mu\cdot|c_j^*|\ge n^\mu\cdot C|\beta_j|\to\infty$ with $c_j^*(\lambdaj)/\beta_j\to 1$ for $j\in\mathcal{S}$.
$\square$
\mbox{e}nd{itemize}
\subsection{An example satisfying Condition \ref{cond:one}}
\label{appendix:one:ex}
In this section, we verify the claim made in Section \ref{sec:scen:one},
which states that Condition \ref{cond:one} holds with probability tending to 1
when each column $X_j$ is generated independently as a random vector on an $n$-dimensional unit sphere.
We first introduce a result from modern convex geometry reported in Lecture 2 of \citet{ball1997},
which essentially implies that, as the dimension $n$ grows,
it is not likely for any two vectors on a $n$-dimensional unit sphere to be within a close distance to each other.
\begin{lem}
Let $S^{n-1}$ denote the surface of the Euclidean ball $B_2^n=\{\mathbf{x}\in\mathbb{R}^n: \ \sum_{i=1}^nx_i^2\le 1\}$
and $\mathbf{u}\in\mathbb{R}^n$ be a vector on $S^{n-1}$ such that $\Vert\mathbf{u}\Vert_2=1$.
Then the proportion of spherical cone defined as $\{\mathbf{v}\in S^{n-1}: \ |\mathbf{u}^T\mathbf{v}|\ge \omega\}$ for any $\mathbf{u}$
is bounded from above by $\mbox{e}xp(-n\omega^2/2)$.
\label{lem:one}
\mbox{e}nd{lem}
We first note that any $X_k, \ k\ne j$ can be decomposed as the summation of
its projection onto $X_j$ and the remainder, i.e., $X_k=c_{j, k}X_j+(\mathbf{I}_n-X_jX_j^T)X_k$.
Then
\[
(\Pi_{j} X_j)^TX_k=c_{j, k}(\Pi_{j} X_j)^TX_j+\left((\mathbf{I}_n-X_jX_j^T)\Pi_{j} X_j\right)^TX_k,
\]
and for $k\in\mathcal{S}\setminus\mathcal{C}j, \ k\ne j$, the first summand is bounded from above by $a_j\cdot\mathbb{P}in\le C_1n^{-\gamma}$.
As for the second summand, note that
\[
\Vert(\mathbf{I}_n-X_jX_j^T)\Pi_{j} X_j\Vert_2^2=(\Pi_{j} X_j)^T(\mathbf{I}_n-X_jX_j^T)\Pi_{j} X_j=a_j(1-a_j),
\]
and thus $\mathbf{w}=\left\{a_j(1-a_j)\right\}^{-1/2}\cdot(\mathbf{I}_n-X_jX_j^T)\Pi_{j} X_j$ satisfies $\mathbf{w}\in S^{n-1}$.
Then the probability of $|\mathbf{w}^TX_k|>Cn^{-\gamma}$ for any $k\in\mathcal{S}\setminus\mathcal{C}j, \ k\ne j$ is
bounded from above by the proportion of the spherical cone
$\left\{X_k\in S^{n-1}: \ |\mathbf{w}^TX_k|>Cn^{-\gamma} \right\}$ in the unit sphere $S^{n-1}$.
Applying Lemma \ref{lem:one},
we can show that such proportion is bounded by $\mbox{e}xp\left(-C^2n^{1-2\gamma}/2\right)$ for each $j$ and $k$.
Therefore, we can find some $C>0$ satisfying
\[
\mathbb{P}\left(\max_{j\in\mathcal{J}; \ k\in\mathcal{S}\setminus\mathcal{C}j, \ k\ne j}|(\Pi_{j} X_j)^TX_k| > Cn^{-\gamma}\right)\ge
1-p|\mathcal{S}|\mbox{e}xp\left(-C'n^{1-2\gamma}/2\right),
\]
where the right-hand side converges to 1 from assumptions (A1)--(A2).
\section{Proof of Theorem \ref{thm:two}}
\label{appendix:two}
For those $j\in\mathcal{K}=\mathcal{S}\cup\left\{\cup_{j\in\mathcal{S}}\mathcal{C}j\right\}$,
Condition \ref{cond:three} implies that $\mathcal{C}_k\cap\mathcal{C}j=\mbox{e}mptyset$ if $k\in\mathcal{S}\setminus\mathcal{C}j$.
Then from (A3), we have $\Vert\Pi_{j} X_k\Vert_2\le Cn^{-(\gamma-\xi/2)}$ and therefore
\begin{eqnarray*}
\left\vert X_j^T(\mathbf{I}_n-\Pi_{j})X_k \right\vert=\left\vert X_j^TX_k-(\Pi_{j} X_j)^T\Pi_{j} X_k\right\vert
\le Cn^{-\gamma}+C'n^{-(\gamma-\xi/2)},
\mbox{e}nd{eqnarray*}
which leads to
\begin{eqnarray}
\left\vert\sum_{k\in\mathcal{S}\setminus\mathcal{C}j, k\ne j}\beta_kX_j^T(\mathbf{I}_n-\Pi_{j})X_k\right\vert
=O\left(n^{-(\gamma-\deltata-\xi/2)}\right)
\label{cond:two:eq}
\mbox{e}nd{eqnarray}
for all $j\in\mathcal{K}$.
Using Step 1 of Appendix \ref{appendix:one}, we derive that
\[
\mathcal{E}_{01}=\left\{\max_{j\in\mathcal{K}}\left\vert
\sum_{k\in\mathcal{S}\setminus\mathcal{C}j, k\ne j}\beta_kX_j^T(\mathbf{I}_n-\Pi_{j})X_k+X_j^T(\mathbf{I}_n-\Pi_{j})\boldsymbol{\mbox{e}p}\right\vert
\le Cn^{-(\gamma-\deltata-\xi/2)}\right\}
\]
satisfies $\mathbb{P}(\mathcal{E}_{01})=\mathbb{P}(\mathcal{E}_0)\to 1$.
Since $\mu+\kappa/2<\gamma-\deltata-\xi/2$, we have $n^\mu\cdotc_j^*\to 0$ for $j\notin\mathcal{S}$ on $\mathcal{E}_{01}$,
whereas $n^\mu\cdot|c_j^*|\to\infty$ and $c_j^*(\lambdaj)/\beta_j\to 1$ for those $j\in\mathcal{S}$.
Therefore the dominance of tilted correlations for $j\in\mathcal{S}$ over those for $j\in\mathcal{K}\setminus\mathcal{S}$ follows.
$\square$
\section{Proof of Theorem \ref{thm:three}}
\label{appendix:three}
Compared to Condition \ref{cond:two},
Condition \ref{cond:three} does not require any restriction on $\mathcal{C}j\cap\mathcal{C}_k$ when both $X_j$ and $X_k$ are relevant,
although it has an additional assumption (C2).
Since $n^\mu\cdot|\beta_j|(1-a_j)\to\infty$ for $j\in\mathcal{S}$ from (A4)--(A5),
(C2) implies that for any $j\in\mathcal{S}$, non-zero coefficients $\beta_k, \ k\in\mathcal{S}\setminus\mathcal{C}j$
do not cancel out all the summands in the following to 0,
\begin{eqnarray}
X_j^T(\mathbf{I}_n-\Pi_{j})\mathbf{X}_\mathcal{S}\boldsymbol{\beta}_\mathcal{S}=\beta_j(1-a_j)+\sum_{k\in\mathcal{S}\setminus\mathcal{C}j, k\ne j}\beta_kX_j^T(\mathbf{I}_n-\Pi_{j})X_k.
\nonumber
\mbox{e}nd{eqnarray}
If (\ref{cond:two:eq}) in Appendix \ref{appendix:two} holds, (C2) follows and therefore it can be
seen that Condition \ref{cond:two} is stronger than Condition \ref{cond:three}.
On the event $\mathcal{E}_0$ (Step 1 of Appendix \ref{appendix:one}),
$|X_j^T(\mathbf{I}_n-\Pi_{j})\mathbf{y}| \gg n^{-\mu}$ for $j\in\mathcal{S}$ under (C2) and therefore
the tilted correlations of relevant variables satisfy $|c_j^*| \gg n^{-\mu}$ with either of the rescaling factors.
In contrast, for $j\in\mathcal{K}\setminus\mathcal{S}$, we can use the arguments in Appendix \ref{appendix:two}
to show that $n^\mu\cdotc_j^*\to 0$.
$\square$
\section{Study of the assumptions (A5) and (A6)}
\label{append:one}
In this section, we show that the assumptions (A5) and (A6) are satisfied
under the following condition from \citet{wang2009}.
Let $\lambda_*(\mathbf{A})$ and $\lambda^*(\mathbf{A})$ represent the smallest and the largest eigenvalues of an arbitrary positive definite matrix $\mathbf{A}$, respectively.
\begin{itemize}
\item Both $\mathbf{X}$ and $\boldsymbol{\mbox{e}p}$ follow normal distributions.
\item There exist two positive constants $0<\tau_*<\tau^*<\infty$ such that
$\tau_*<\lambda_*(\boldsymbol{\Sigma}) \le \lambda^*(\boldsymbol{\Sigma}) < \tau^*$, where $\mbox{cov}(\mathbf{x}_i)=\boldsymbol{\Sigma}$ for $i=1, \ldots, n$.
\mbox{e}nd{itemize}
Then, \citet{wang2009} showed that there exists $\mbox{e}ta\in(0, 1)$ satisfying
\begin{eqnarray}
\tau_* \le \min_{\mathcal{D}}\lambda_*(\mathbf{X}_{\mathcal{D}}^T\mathbf{X}_{\mathcal{D}}) \le \max_{\mathcal{D}}\lambda^*(\mathbf{X}_{\mathcal{D}}^T\mathbf{X}_{\mathcal{D}}) \le \tau^*
\label{lem:wang}
\mbox{e}nd{eqnarray}
with probability tending to 1, for any $\mathcal{D}\subset\{1, \ldots, p\}$ with $|\mathcal{D}| \le n^\mbox{e}ta$.
We use the result from (\ref{lem:wang}) in the following arguments.
\begin{itemize}
\item[(A5)]
Recalling the notations $\mathbf{\tilde{X}}_{j}=\mathbf{X}_{\mathcal{C}j}$ and $\Pi_{j}=\mathbf{\tilde{X}}_{j}(\mathbf{\tilde{X}}_{j}^T\mathbf{\tilde{X}}_{j})^{-1}\mathbf{\tilde{X}}_{j}^T$, we have
\begin{eqnarray*}
1-X_j^T\Pi_{j} X_j = \left\Vert X_j - \mathbf{\tilde{X}}_{j}(\mathbf{\tilde{X}}_{j}^T\mathbf{\tilde{X}}_{j})^{-1}\mathbf{\tilde{X}}_{j}^TX_j \right\Vert^2_2.
\mbox{e}nd{eqnarray*}
We let $\boldsymbol{\theta}=(\mathbf{\tilde{X}}_{j}^T\mathbf{\tilde{X}}_{j})^{-1}\mathbf{\tilde{X}}_{j}^TX_j$ and assume that $\xi$ from assumption (A3) satisfies $\xi \le \mbox{e}ta$ such that, by applying (\ref{lem:wang}), we obtain the following;
\begin{eqnarray*}
&&1-X_j^T\Pi_{j} X_j = (1, \boldsymbol{\theta}) \left(X_j, \mathbf{\tilde{X}}_{j}\right)^T\left(X_j, \mathbf{\tilde{X}}_{j}\right)(1, \boldsymbol{\theta})^T
\\
&\ge&
(1, \boldsymbol{\theta}) \lambda_*\left((X_j, \mathbf{\tilde{X}}_{j})^T(X_j, \mathbf{\tilde{X}}_{j})\right)(1, \boldsymbol{\theta})^T
\ge
(1+\Vert\boldsymbol{\theta}\Vert^2_2)\tau_* \ge \tau_*>0.
\mbox{e}nd{eqnarray*}
\item[(A6)]
We note the link between (A6) and the asymptotic identifiability condition
for high-dimensional problems first introduced in \citet{chen2008}.
The condition can be re-written as
\begin{eqnarray}
\lim_{n\to\infty}\min_{\mathcal{D}\subset\mathcal{J}, |\mathcal{D}|\le|\mathcal{S}|, \mathcal{D}\ne\mathcal{S}}
n(\log n)^{-1}\cdot \frac{\Vert(\mathbf{I}_n-\Pi_\mathcal{D})\mathbf{X}_\mathcal{S}\boldsymbol{\beta}_\mathcal{S}\Vert_2^2}{\Vert\mathbf{X}_\mathcal{S}\boldsymbol{\beta}_\mathcal{S}\Vert_2^2} \to\infty,
\label{asym:iden}
\mbox{e}nd{eqnarray}
after taking into account the column-wise normalisation of $\mathbf{X}$.
Although the rate $n^\kappa$ is less favourable than $n(\log n)^{-1}$,
following exactly the same arguments as in Section 3 of \citet{chen2008},
we are able to show that (A6) is implied by the condition in (\ref{lem:wang}).
That is, letting $\boldsymbol{\theta}=(\mathbf{\tilde{X}}_{j}^T\mathbf{\tilde{X}}_{j})^{-1}\mathbf{\tilde{X}}_{j}^T\mathbf{X}_\mathcal{S}\boldsymbol{\beta}_\mathcal{S}$, we have
\begin{eqnarray}
&& n^\kappa\cdot \frac{\Vert(\mathbf{I}_n-\Pi_{j})\mathbf{X}_\mathcal{S}\boldsymbol{\beta}_\mathcal{S}\Vert_2^2}{\Vert\mathbf{X}_\mathcal{S}\boldsymbol{\beta}_\mathcal{S}\Vert_2^2}
\ge n^\kappa\inf_{j\notin\mathcal{S}}\frac{\Vert
\mathbf{X}_{\mathcal{S}\cap\mathcal{C}j^c}\boldsymbol{\beta}_{\mathcal{S}\cap\mathcal{C}j^c}-\mathbf{\tilde{X}}_{j}\boldsymbol{\theta}\Vert_2^2}{\Vert\mathbf{X}_\mathcal{S}\boldsymbol{\beta}_\mathcal{S}\Vert_2^2}
\nonumber \\
&\ge& Cn^{\kappa-2\deltata}\inf_{j\notin\mathcal{S}}\left\{\left(\boldsymbol{\beta}^T_{\mathcal{S}\cap\mathcal{C}j^c}, -\boldsymbol{\theta}\right)^T
\mathbf{X}_{\mathcal{S}\cup\mathcal{C}j}^T\mathbf{X}_{\mathcal{S}\cup\mathcal{C}j}\left(\boldsymbol{\beta}^T_{\mathcal{S}\cap\mathcal{C}j^c}, -\boldsymbol{\theta}\right)\right\}
\nonumber \\
&\ge& Cn^{\kappa-2\deltata}\lambda_*(\mathcal{S}\cup\mathcal{C}j)\Vert\boldsymbol{\beta}_{\mathcal{S}\cap\mathcal{C}j}\Vert_2^2
\label{eq:one}
\mbox{e}nd{eqnarray}
for some positive constant $C$, where the second inequality is derived under the assumptions (A1) and (A4).
Then a constraint can be imposed on the relationship between $\kappa$, $\deltata$ and $\xi$ such that the right-hand side of the above (\ref{eq:one}) diverges to infinity.
\mbox{e}nd{itemize}
\begin{figure}
\label{fig:roc:a}
\centering
\begin{tabular}{ccc}
\mbox{e}psilonsfig{file=roc_f2n100p500r3.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_f2n100p500r6.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_f2n100p500r9.eps, width=0.33\linewidth,clip=}
\\
\mbox{e}psilonsfig{file=roc_f2n100p1000r3.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_f2n100p1000r6.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_f2n100p1000r9.eps, width=0.33\linewidth,clip=}
\\
\mbox{e}psilonsfig{file=roc_f2n100p2000r3.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_f2n100p2000r6.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_f2n100p2000r9.eps, width=0.33\linewidth,clip=}
\mbox{e}nd{tabular}
\caption{\small{ROC curves for the simulation model (A) with $n=100$:
TCS1 (black empty circle), TCS2 (black filled circle), FR (red empty square), FS (red filled square),
Lasso (green crossed circle) PC-simple algorithm (magenta two triangles), MC+ (blue empty triangle),
SCAD (blue filled triangle) and FLASH (blue reversed triangle); FPR$=2.5|\mathcal{S}|/p$ (vertical dotted);
first row: $p=500$, second row: $p=1000$, third row: $p=2000$;
first column: $R^2=0.3$, second column: $R^2=0.6$, third column: $R^2=0.9$.}}
\mbox{e}nd{figure}
\begin{figure}
\label{fig:roc:c}
\centering
\begin{tabular}{ccc}
\mbox{e}psilonsfig{file=roc_f20n100p500r3.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_f20n100p500r6.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_f20n100p500r9.eps, width=0.33\linewidth,clip=}
\\
\mbox{e}psilonsfig{file=roc_f20n100p1000r3.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_f20n100p1000r6.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_f20n100p1000r9.eps, width=0.33\linewidth,clip=}
\\
\mbox{e}psilonsfig{file=roc_f20n100p2000r3.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_f20n100p2000r6.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_f20n100p2000r9.eps, width=0.33\linewidth,clip=}
\mbox{e}nd{tabular}
\caption{\small{ROC curves for the simulation model (C) with $n=100$.}}
\mbox{e}nd{figure}
\begin{figure}
\label{fig:roc:de}
\centering
\begin{tabular}{cc}
\mbox{e}psilonsfig{file=roc_sis4rho5n100p1000.eps, width=0.45\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_sis4rho95n100p1000.eps, width=0.45\linewidth,clip=}
\\
\mbox{e}psilonsfig{file=roc_sis5rho5n100p1000.eps, width=0.45\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_sis5rho95n100p1000.eps, width=0.45\linewidth,clip=}
\mbox{e}nd{tabular}
\caption{\small{ROC curves for the simulation models (D) (first row) and (E) (second row) with $n=100$;
first column: $\mbox{var}phi=0.5$, second column: $\mbox{var}phi=0.95$.}}
\mbox{e}nd{figure}
\begin{figure}
\centering
\begin{tabular}{ccc}
\mbox{e}psilonsfig{file=roc_realp1000r3.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_realp1000r6.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_realp1000r9.eps, width=0.33\linewidth,clip=}
\\
\mbox{e}psilonsfig{file=roc_realp2000r3.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_realp2000r6.eps, width=0.33\linewidth,clip=} &
\mbox{e}psilonsfig{file=roc_realp2000r9.eps, width=0.33\linewidth,clip=}
\mbox{e}nd{tabular}
\caption{\small{ROC curves for the simulation model (F) with $n=72$;
first row: $p=1000$, second row: $p=2000$;
first column: $R^2=0.3$, second column: $R^2=0.6$, third column: $R^2=0.9$.}}
\label{fig:roc:f}
\mbox{e}nd{figure}
\mbox{e}nd{document} |
\begin{document}
\title[On a smoothness result]{On Smoothness of the elements of some integrable Teichm\"uller spaces}
\author{Vincent Alberge and Melkana Brakalova}
\begin{abstract}
In this paper we focus on the integrable Teichm\"uller spaces $\tei{p}$ ($p>0$) which are subspaces of the symmetric subspace of the universal Teichm\"uller space. We prove that any element of $\tei{p}$ for $0<p\leq 1,$ is a $\mathcal{C}^1$-diffeomorphism.
\end{abstract}
\subjclass[2010]{30C62, 30C99, 30F60}
\keywords{Integrable Teichm\"uller spaces, module, reduced module, symmetric and quasisymmetric mappings}
\maketitle
\section{Introduction}
The universal Teichm\"uler space $\utei$ is the space of \emph{quasisymmetric} homeomorphisms of the unit circle $\mathbb{S}^1$ fixing $1$, $i$, and $-1$. A mapping $f: \mathbb{S}^1\rightarrow \mathbb{S}^1$ is said to be quasisymmetric if there exists $M>0$ such that
$$
\forall \theta\in\mathbb{R},\forall t>0,\; \frac{1}{M}\leq\left|\frac{f(e^{i(\theta+t)})-f(e^{i\theta})}{f(e^{i\theta})-f(e^{i(\theta-t)})}\right|\leq M .
$$
Due to a well-known result by Ahlfors and Beurling \cite{beurling&ahlfors} one can give an equivalent description of $\utei$. More precisely, the universal Teichm\"uller space can be defined as the set of \emph{Teichm\"uller equivalence classes} of \emph{quasiconformal mappings} of the unit disc $\dd$ fixing $1$, $i$, and $-1$ where two such mappings are Teichm\"uller equivalent if they coincide on $\mathbb{S}^1$. A mapping $F:D \rightarrow F(D),$ where $D\subset \mathbb C$ is a domain, is called quasiconformal (or q.c. for short) if it is an orientation-preserving homeomorphism and if its distributional derivatives $\partial_z F$ and $\partial_{\overline{z}}F$ can be represented by locally square integrable functions (also denoted by $\partial_z F$ and $\partial_{\overline{z}}F$) on $D$ such that
$$
\left\Vert \frac{\partial_{\overline{z}}F}{\partial_z F} \right\Vert_{\infty} = \underset{z\in D}{\textrm{ ess.sup}}\left| \frac{\partial_{\overline{z}}F\left(z\right)}{\partial_z F\left( z \right)} \right| <1.
$$
We also recall that for $z=x+iy$, $\partial_{\overline{z}}=\tfrac{1}{2}(\partial_{x}+i\partial_{y})$ and $\partial_{z}=\tfrac{1}{2}(\partial_{x}-i\partial_{y})$. Furthermore, if $F$ is a quasiconformal mapping, the function $\mu_F =\tfrac{\partial_{\overline{z}}F}{\partial_z F},$ defined a.e., is called the \emph{Beltrami coefficient} associated with $F$. By the measurable Riemann mapping theorem, if a measurable function $\mu$ on $D$ is such that $\left\Vert \mu \right\Vert_{\infty}<1$, then it is the Beltrami coefficient of some quasiconformal mapping, which we will denote here by $F^{\mu}$.
Let us now introduce an important subspace of $\utei$, namely, the \emph{symmetric Teichm\"uller space} denoted here by $\stei$. Following a terminology introduced by Gardiner and Sullivan \cite{gardiner&sullivan}, it is the space of \emph{symmetric} homeomorphism of $\mathbb{S}^1$ fixing $1$, $i$, and $-1$. One recalls that $f:\mathbb{S}^1\rightarrow \mathbb{S}^1$ is symmetric if it is an orientation-preserving homeomorphism of $\mathbb{S}^1$ such that
\begin{equation}\label{eq:0}
\frac{f(e^{i(\cdot+t)})-f(e^{i\cdot})}{f(e^{i\cdot})-f(e^{i(\cdot-t)})} \underset{t\rightarrow 0^+}{\longrightarrow} 1,
\end{equation}
with respect to the uniform convergence on $\mathbb{R}$. As for the universal Teichm\"uller space one has an equivalent description of such a space that involves quasiconformal mappings. Indeed, Gardiner and Sullivan proved (see Theorem 2.1 in \cite{gardiner&sullivan}) that $\stei$ corresponds to the space of Teichm\"uller equivalent classes of quasiconformal mappings of $\dd$ fixing $1$, $i$, and $-1$ admitting a representative which is \emph{asymptotically conformal} on $\mathbb{S}^1$. Let us recall that a quasiconformal mapping $F: \dd \rightarrow \dd$ is said to be asymptotically conformal on $\mathbb{S}^1$ if for every $\epsilon>0$, there exists a compact subset $K_{\epsilon}$ of $\dd$ such that for any $z\in \dd\setminus K_{\epsilon}$, $\left| \mu_{F}(z)\right| <\epsilon$.
Here we focus on some interesting infinite dimensional subspaces of $\utei$, the $p$-\emph{integrable Teichm\"uller spaces}, which we define for each $p>0$ as the set
$$
\tei{p}=\left\lbrace f\in\utei\mid \exists F:\dd\rightarrow \dd, \textrm{ q.c. such that } F_{\vert_{\mathbb{S}^1}} = f \textrm{ and }\mu_F \in\inthyp{p} \right\rbrace,
$$
where $\sigma$ is the hyperbolic measure on $\dd$, that is, for any $z=x+iy \in\dd$, $d\sigma (z) = (1-\left| z \right|^2)^{-2}dxdy$. It is elementary to observe from such a definition that if $q>p>0$, then $\tei{p} \subset \teiq$. The spaces $\tei{p}$, $p\geq 2$, were first introduced by Guo \cite{guo} through an equivalent description involving univalent functions. At about the same time, Cui \cite{cui} studied the case $p=2$ and gave a few important characterizations of the elements of $\tei{2}$. In particular, he proved that the Beltrami coefficient associated with the \emph{Douady--Earle extension} (see \cite{douady&earle}) of any element of $\tei{2}$ belongs to $\inthyp{2}$. Later on, Takhtajan and Teo \cite{takhtajan&teo} introduced a Hilbert manifold structure on the universal Teichm\"uller space that makes the space $\tei{2}$ the connected component of the identity mapping $\id_{\mathbb{S}^1}$. With respect to such a structure, they proved that the so-called \emph{Weil--Petersson} metric is a Riemannian metric on $\utei$. Following Takhtajan and Teo's work, the space $\tei{2}$ is now reffered to as the \emph{Weil--Petersson Teichm\"uller space}. For further results on $\tei{2}$ we refer to \cite{shen}. Let us point out that one can obtain $\tei{2}\subset\stei$ by combining \cite[Theorem 2 and Lemma 2]{cui} and \cite[Theorem 4]{earle&markovic&saric}, see \cite[Section 3]{fan&hu} for a more detailed explanation. One can also mention the paper \cite{tang} by Tang where in particular, Cui's result concerning the Douady--Earle extension is extended to all spaces $\tei{p}$ with $p\geq 2$. Recently, the second author of this paper proved in \cite{brakalovaxx} that $\tei{2}\subset\stei$ using an approach based on module techniques and the so-called \emph{Teichm\"uller's Modulsatz} (see \cite[\S 4]{T13}), and later on using a different method she proved that for any $p>0$, $\tei{p}\subset \stei$ (see \cite{brakalova18}).
In this paper we only deal with $\tei{p}$ for $0<p\leq 1$ and we give a proof of the following result:
\begin{theorem}\label{main-result}
Let $p\leq1$. Then, any element of $\tei{p}$ is a $\mathcal{C}^1$-diffeomorphism.
\end{theorem}
The strategy of the proof takes advantage of an approach used by the second author of this paper and J. A. Jenkins \cite{brakalova&jenkins02}, modified to the case of the unit disc. We first use the \emph{Teichm\"uller--Wittich--Bellinski\u{\i}} to show that each element of $\tei{1}$ has a non-vanishing derivative at each point of $\mathbb{S}^1$. Then, we use properties of the \emph{reduced module} of a simply-connected domain to show that the derivatives of the elements of $\tei{1}$ are continuous. As mentioned earlier, since $\tei{p} \subset \tei1$ for $0<p\leq 1$, it follows immediately that for $0<p\leq 1$, any element in $\tei{p}$ is continuously differentiable with non-vanishing derivative.
\section{Background}
In this section we recall some classic notions from geometric function theory. Such notions are most notably and thoroughly investigated in Teichm\"uller's \emph{Habilitationsschrift} (Habilitation Thesis) \cite{T13}.
\subsection{Module of a doubly-connected domain} Let $D$ be a (non-degenerate) doubly-connected domain of the extended complex plane, that is, the complement of $D$ is an union of two disjoint simply-connected domains, each bounded by a Jordan curve. It is well known (see \cite{lehto&virtanen,T13}) that there exists a biholomorphic function that maps $D$ onto an annulus of inner radius $r_1$ and outer radius $r_2$ for some $0<r_2<r_1<\infty$. The \emph{module} $\Mod{D}$ of $D$ is $\ln\left( \tfrac{r_2}{r_2}\right)$. It is a \emph{conformal invariant}, namely, if $\Psi : D \rightarrow \Psi(D)$ is a biholomorphic function, then $\Mod{D}=\Mod{\Psi(D)}$.
It is also well known (see \cite{lehto&virtanen,T13}) that the module is \emph{superadditive}. More precisely, if $D_1$ and $D_2$ are two disjoint doubly-connected subdomains of a doubly-connected domain $D_3$, where each separates some $z_0 \in\mathbb{C}$ from $\infty$, then
\begin{equation}\label{eq:21}
\Mod{D_1}+\Mod{D_2}\leq \Mod{D_3}.
\end{equation}
In saying that a doubly-connected domain separates $z_0$ from $\infty$, we mean that one component of its complement contains $z_0$ in its interior while the other component contains $\infty$.
Let us now recall two inequalities that will be used in the proof of the main result. For $0<r_2<r_1$ and $\zeta \in\mathbb{C}$ we set $A_{\zeta,r_2 , r_1}=\left\lbrace z \mid r_2 < \left| z -\zeta \right| < r_1\right\rbrace$. Let $F:A_{\zeta,r_2 , r_1} \rightarrow F\left(A_{\zeta,r_2 , r_1} \right)$ be a quasiconformal mapping. Then setting $z=\zeta+re^{i\theta}, r_2<r<r_1$ we have
\begin{equation}\label{eq:22}
\Mod{F\left(A_{\zeta , r_2 , r_1}\right)}\leq \frac{1}{2\pi}\iint_{A_{\zeta , r_2 , r_1}}{\frac{1+\left| \mu_{F}(z)\right|}{1-\left| \mu_{F}(z)\right|}\cdot\frac{dxdy}{\left| z-\zeta\right|^2}},
\end{equation}
and
\begin{equation}\label{eq:23}
2\pi\int_{r_2}^{r_1}{\frac{1}{\int_{0}^{2\pi}{\frac{1+\left| \mu_{F}\left(z \right)\right|}{1-\left| \mu_{F}\left(z \right)\right|}d\theta}}\cdot\frac{dr}{r}}\leq \Mod{F\left(A_{\zeta , r_2 , r_1}\right)}.
\end{equation}
These estimates could be obtained following Teichm\"uller's approach based on the \emph{length-area method} in \cite[\S 6.3]{T13}, where he arrived at weaker versions of (\ref{eq:22}) and (\ref{eq:23}). Estimates equivalent to (\ref{eq:22}) and (\ref{eq:23})---some proved under more general assumptions and different methods---can be found in \cite{reich&walczak,gutlyanskii&martio,brakalova10} and others.
\subsection{Reduced module of a simply-connected domain} Let $\Omega$ be a simply-connected domain of the complex plane different from $\mathbb{C}$. Let $\zeta \in \Omega$. For $r>0$, let $D(\zeta,r)$ denote the disc of radius $r$ centered at $\zeta$ and let $0<r_2<r_1$ be small enough so that $D(\zeta,r_1)\subset \Omega$. From (\ref{eq:21}) follows$$
\Mod{\Omega\setminus D(\zeta,r_1)} + \ln\left( \frac{r_1}{r_2}\right) \leq \Mod{\Omega\setminus D(\zeta,r_2)},
$$
and therefore
$$
\Mod{\Omega\setminus D(\zeta,r_1)} +\ln\left( r_1\right) \leq \Mod{\Omega\setminus D(\zeta,r_2)} +\ln\left( r_2\right).
$$
One defines the reduced module $\modred{\Omega}{\zeta}$ of $\Omega$ at $\zeta$ as $\lim_{r\rightarrow 0} \Mod{\Omega \setminus D(\zeta,r)}+\ln(r)$. Using, for example, \emph{Koebe distortion theorem} one can show that this limit is finite and $\modred{\Omega}{\zeta} = \ln\left( \left| \Psi^{\prime}(0)\right|\right)$, where $\Psi : \dd \rightarrow \Omega$ is a biholomorphic function mapping $0$ onto $\zeta$. A detailed proof can be found in \cite[§1.6]{T13}. From here it follows directly that $\zeta\mapsto \modred{\Omega}{\zeta}$ is continuous.
Before concluding this subsection let us add one more property of the reduced module that we will use later.
If $F:\mathbb{C}\rightarrow \mathbb{C}$ is a homeomorphism then, for any $r>0,$ the function $\zeta\mapsto \modred{F\left( D(\zeta,r) \right)}{F(\zeta)}$ is continuous. Indeed, if $\zeta_n \underset{n\rightarrow \infty}{\longrightarrow}\zeta$, then by applying a sequence of biholomorphic functions $z\mapsto F(z+\zeta_n-\zeta)-F(\zeta_n)+F(\zeta), z\in D(\zeta,r),$ one obtains a sequence of domains $D_n,$ which are all images of $D(\zeta,r)$. Since $F(z)$ is a homeomorphisms it follows that $D_n \underset{n\rightarrow \infty}{\longrightarrow} F\left( D(\zeta,r)\right)$ (with respect to the topology induced by the Hausdorff distance on the set of subsets of $\mathbb{C}$). Consider the sequence of biholomorphic functions $\Psi_n : \mathbb{D} \rightarrow D_n$ mapping $0$ onto $F(\zeta),$ normalized by $\Psi_n'(0)>0$. Then for any $n$, $\ln\left( \Psi_{n}^{\prime}(0)\right)=\modred{D_n}{F(\zeta)}=\modred{F\left( D(\zeta_n,r) \right)}{F(\zeta_n)}$ since a translation does not change the reduced module. Furthermore, the sequence of functions $\Psi_n$ forms a normal family and thus, up to a subsequence, $\Psi_n$ converges uniformly (on any compact subset of $\mathbb{D}$) to a biholomorphic function $\Psi_{\infty}: \mathbb{D}\rightarrow F\left( D(\zeta,r) \right)$ mapping $0$ onto $\zeta$. This implies
\begin{align*}
\modred{F\left(D(\zeta,r)\right)}{F(\zeta)} & =\ln\left( \Psi_{\infty}^{\prime}(0)\right) \\ & = \lim_{n\rightarrow \infty}\ln\left( \Psi_{n}^{\prime}(0) \right) \\ & =\lim_{n\rightarrow \infty}\modred{F\left( D(\zeta_n,r) \right)}{F(\zeta_n)},
\end{align*}
and thus we have continuity.
\subsection{Teichm\"uller--Wittich--Bellinski\u{\i} theorem} First, let us recall that a mapping $F:\mathbb{C}\rightarrow\mathbb{C}$ is said to be \emph{conformal} at $z_0 $ if $\lim_{z\rightarrow z_0}{\tfrac{F(z)-F(z_0)}{z-z_0}}$ exists and is different from $0$. Following \cite[Chapter V, Theorem 6.1]{lehto&virtanen} the well-known Teichm\"uller--Wittich--Bellinski\u{\i} theorem can be stated as follows:
\begin{theorem}\label{theorem:1}
Let $D$ be a domain of the complex plane and let $z_0\in D$. Let $F:D\rightarrow F(D)$ be a quasiconformal mapping. If there exists a neighborhood $\mathcal{U}$ of $z_0$ contained in $D$ such that
$$
\iint_{\mathcal{U}}{\frac{\left|\mu_{F}(z)\right|}{\left| z-z_0 \right|^2}dxdy}<\infty;
$$
then $F$ is conformal at $z=z_0$.
\end{theorem}
The history of this theorem and its extensions is rather long and we may refer the curious reader to some of the following papers \cite{belinskii,reich&walczak,drasin,brakalova&jenkins94,gutlyanskii&martio,brakalova10,shishikura} and to \cite{alberge&brakalova&papadopoulos}.
\section{Proof of the main result}\label{sec:3}
Let $f\in \tei{1}$. By definition, there exists a quasiconformal extension $F$ of $f$ to the closed unit disc such that
\begin{equation}\label{eq:3}
\iint_{\dd}{\left|\mu_{F}(z)\right| d\sigma(z)}<\infty.
\end{equation}
Let $\widetilde{\mu}$ be a function defined on the extended complex plane which coincides with $\mu$ on $\dd$ and which is identically $0$ outside the disc. Let $F^{\widetilde{\mu}}$ be the unique quasiconformal mapping of the complex plane with Beltrami coefficient $\widetilde{\mu}$ that fixes $1$, $i$, and $-i$. Therefore, we have $F^{\widetilde{\mu}}_{\vert_{\dd}}=F$ and $F^{\widetilde{\mu}}_{\vert_{\mathbb{S}^1}}=f$.
\begin{claim}\label{claim1}
The quasiconformal mapping $F^{\widetilde{\mu}}$ is conformal at any point of $\mathbb{S}^1$. Therefore, $f$ is a diffeomorphism of $\mathbb{S}^1$.
\end{claim}
We apply Theorem \ref{theorem:1} to derive the conformality of $F^{\widetilde{\mu}}$.
\begin{proof}[Proof of Claim \ref{claim1}]
Let $\zeta_0\in\mathbb{S}^1$. Because of (\ref{eq:3}), one can find a compact subset $K$ of $\dd$ such that
\begin{equation}\label{eq:4}
\iint_{\dd\setminus K}{\left|\mu_{F}(z)\right| d\sigma(z)}<1.
\end{equation}
Let $r>0$ be such that $\dd \setminus D(\zeta_0, r)\subset \dd\setminus K$. One first observes that
\begin{align*}
\forall z \in D(\zeta_0, r)\cap \dd, \; \left(1-\left| z\right|^2 \right)^2& = \left( 1-\left|z\right| \right)^2 \cdot \left( 1 +\left| z\right|\right)^2 \\ & \leq \left| \zeta_0 -z \right|^2 \cdot \left(1+\left| z \right|\right)^2 \\
&< 4\cdot \left| \zeta_0 -z \right|^2,
\end{align*}
and therefore
\begin{equation}\label{eq:5}
\forall z \in D(\zeta_0, r)\cap \dd, \, \frac{1}{\left| z-\zeta_0\right|^2}< 4\cdot \frac{1}{\left( 1-\left|z\right|^2 \right)^2}.
\end{equation}
It follows
\begin{align*}
\iint_{D(\zeta_0,r)}{\frac{\left| \widetilde{\mu} (z)\right|}{\left| z-\zeta_0 \right|^2}dxdy}& = \iint_{D(\zeta_0,r)\cap{\dd}}{\frac{\left| \mu_{F} (z)\right|}{\left| z-\zeta_0 \right|^2}dxdy} \\ & \leq 4 \iint_{D(\zeta_0,r)\cap{\dd}}{\left| \mu_{F} (z)\right| d\sigma(z)}\\
& \leq 4.
\end{align*}
We deduce, by Theorem \ref{theorem:1}, that $F^{\widetilde{\mu}}$ is conformal at $z=\zeta_0$ which proves that $f$ is differentiable at $\zeta_0$ and $\left| f^{\prime}(\zeta_0)\right|>0$. Since this is true for any $\zeta_0 \in \mathbb{S}^1,$ we deduce that $f$ is a diffeomorphism of $\mathbb{S}^1$.
\end{proof}
The following two additional results will be needed in the proof of the continuity of $f^{\prime}$ on $\mathbb{S}^1$.
\begin{claim}\label{claim2}
Let $\epsilon >0$. Then, there exists $r_{\epsilon}>0$ such that
$$
\forall \zeta\in\mathbb{S}^1, \forall 0<\rho_2 <\rho_1\leq r_{\epsilon},\; \left| \Mod{F^{\widetilde{\mu}}\left( A_{\zeta,\rho_2 , \rho_1}\right)} - \ln\left(\frac{\rho_{1}}{\rho_2} \right)\right| < \epsilon.
$$
\end{claim}
\begin{claim}\label{claim3}
Let $\zeta\in\mathbb{S}^1$ and $r>0$. Then,
$$
\lim_{\rho\rightarrow 0}{\Mod{F^{\widetilde{\mu}}\left( A_{\zeta,\rho,r}\right)}+\ln\left( \left| f^{\prime}(\zeta)\right|\rho\right)} = \modred{F^{\widetilde{\mu}}\left( D\left( \zeta, r\right)\right)}{f(\zeta)}.
$$
\end{claim}
\begin{proof}[Proof of Claim \ref{claim2}]
Let $\zeta\in\mathbb{S}^1$ and $0<\rho_2 < \rho_1$. One the one hand, by applying (\ref{eq:22}) one gets
\begin{align}
\Mod{F^{\widetilde{\mu}}\left( A_{\zeta,\rho_2 , \rho_1}\right)} - \ln\left(\frac{\rho_{1}}{\rho_2} \right) & \leq \frac{1}{2\pi}\iint_{A_{\zeta , \rho_2 , \rho_1}}{\frac{1+\left| \widetilde{\mu}(z)\right|}{1-\left| \widetilde{\mu}(z)\right|}\cdot\frac{dxdy}{\left| z-\zeta\right|^2}} - \ln\left(\frac{\rho_{1}}{\rho_2} \right) \nonumber\\
& = \frac{1}{2\pi}\iint_{A_{\zeta , \rho_2 , \rho_1}}{\left(\frac{1+\left| \widetilde{\mu}(z)\right|}{1-\left| \widetilde{\mu}(z)\right|}-1\right)\cdot\frac{dxdy}{\left| z-\zeta\right|^2}} \nonumber\\
& \leq \frac{1}{\pi \left( 1-\left\Vert \mu_F\right\Vert_{\infty} \right)}\iint_{A_{\zeta , \rho_2 , \rho_1}\cap\dd}{\left| \mu_{F}(z)\right|\cdot \frac{dxdy}{\left|z-\zeta \right|^2}}.\label{eq:31}
\end{align}
On the other hand since
$$
\int_{0}^{2\pi}{\frac{1+\left| \widetilde{\mu}\left(z \right)\right|}{1-\left| \widetilde{\mu}\left(z \right)\right|}d\theta}\geq 2\pi ,
$$
by means of (\ref{eq:23}) one obtains
\begin{align}
\Mod{F^{\widetilde{\mu}}\left( A_{\zeta,\rho_2 , \rho_1}\right)} - \ln\left(\frac{\rho_{1}}{\rho_2} \right) & \geq 2\pi\int_{\rho_2}^{\rho_1}{\frac{1}{\int_{0}^{2\pi}{\frac{1+\left| \widetilde{\mu}\left(z \right)\right|}{1-\left| \widetilde{\mu}\left(z \right)\right|}d\theta}}\cdot\frac{dr}{r}} -\ln\left(\frac{\rho_{1}}{\rho_2} \right) \nonumber \\ & = \int_{\rho_2}^{\rho_1}{\frac{2\pi-\int_{0}^{2\pi}{\frac{1+\left| \widetilde{\mu}\left(z \right)\right|}{1-\left| \widetilde{\mu}\left(z \right)\right|}d\theta}}{\int_{0}^{2\pi}{\frac{1+\left| \widetilde{\mu}\left(z \right)\right|}{1-\left| \widetilde{\mu}\left(z \right)\right|}d\theta}}\cdot\frac{dr}{r}} \nonumber \\
& = \int_{\rho_2}^{\rho_1}{\frac{\int_{0}^{2\pi}{\frac{-2\left| \widetilde{\mu}\left(z \right)\right|}{1-\left| \widetilde{\mu}\left(z \right)\right|}d\theta}}{\int_{0}^{2\pi}{\frac{1+\left| \widetilde{\mu}\left(z \right)\right|}{1-\left| \widetilde{\mu}\left(z \right)\right|}d\theta}}\cdot\frac{dr}{r}} \nonumber\\
& \geq \frac{-1}{\pi} \iint_{A_{\zeta , \rho_2 , \rho_1}\cap\dd}{\frac{\left| \mu_{F}(z)\right|}{1-\left| \mu_{F}\left( z \right)\right|}\cdot \frac{dxdy}{\left|z-\zeta \right|^2}}\nonumber \\
& \geq -\frac{1}{\pi \left( 1-\left\Vert \mu_F\right\Vert_{\infty} \right)}\iint_{A_{\zeta , \rho_2 , \rho_1}\cap\dd}{\left| \mu_{F}(z)\right|\cdot \frac{dxdy}{\left|z-\zeta \right|^2}}.\label{eq:32}
\end{align}
Let $\epsilon>0$. Still because of (\ref{eq:3}) there exists a compact set $K_{\epsilon}$ of $\dd$ such that
\begin{equation}\label{eq:6}
\iint_{\dd\setminus K_{\epsilon}}{\left| \mu_{F}(z)\right| d\sigma(z)} <\frac{\pi(1-\left\Vert \mu_{F} \right\Vert_{\infty})}{4}\epsilon.
\end{equation}
Let $r_{\epsilon}>0$ be the distance between $\mathbb{S}^1$ and $K_{\epsilon}$. Thus, for any $0<\rho_2 < \rho_1\leq r_{\epsilon}$ one obtains by combining (\ref{eq:31}), (\ref{eq:32}), (\ref{eq:5}) and (\ref{eq:6})
$$
\forall \zeta\in\mathbb{S}^1,\; -\epsilon < \Mod{F^{\widetilde{\mu}}\left( A_{\zeta,\rho_2 , \rho_1}\right)} - \ln\left(\frac{\rho_{1}}{\rho_2} \right) < \epsilon,
$$
and therefore Claim \ref{claim2} follows.
\end{proof}
\begin{proof}[Proof of Claim \ref{claim3}]
Let $\zeta\in\mathbb{S}^1$ and let $r>0$. For any $0<\rho<r$, let
$$m(\rho)=\min_{\left| z -\zeta\right|=\rho}{\left| F^{\widetilde{\mu}}(z)-f(\zeta)\right|} \textrm{ and } M(\rho)=\max_{\left| z -\zeta \right|=\rho}{\left| F^{\widetilde{\mu}}(z)-f(\zeta)\right|}.
$$
Since $F^{\widetilde{\mu}}$ is conformal at $\zeta$ one has
\begin{equation}\label{eq7}
\lim_{\rho\rightarrow 0}\frac{\left| f^{\prime}(\zeta)\right|\rho}{M(\rho)}=\lim_{\rho\rightarrow 0}\frac{\left| f^{\prime}(\zeta)\right|\rho}{m(\rho)}= 1.
\end{equation}
Furthermore, it is evident that
\begin{align*}
\Mod{F^{\widetilde{\mu}}\left( D(f(\zeta),r )\right) \setminus D(\zeta, M(\rho))} & \leq \Mod{F^{\widetilde{\mu}}\left( A_{\zeta, \rho, r} \right)} \\ & \hphantom{dsds} \leq \Mod{F^{\widetilde{\mu}}\left( D(\zeta,r )\right)\setminus D(f(\zeta), m(\rho))}.
\end{align*}
Therefore, by adding $\ln\left(\left| f^{\prime}(\zeta)\right|\rho\right)$, using (\ref{eq7}), and letting $\rho \rightarrow 0$ it follows that
$$
\lim_{\rho\rightarrow 0} \Mod{F^{\widetilde{\mu}}\left( A_{\zeta, \rho, r} \right)}+\ln\left(\left| f^{\prime}(\zeta)\right|\rho\right) = \modred{F^{\widetilde{\mu}}\left( D\left( \zeta, r\right)\right)}{f(\zeta)},
$$
which proves Claim \ref{claim3}.
\end{proof}
We have now all the ingredients necessary to complete the proof of our main Theorem \ref{main-result}.
Let $\zeta_0 \in\mathbb{S}^1$. Let $\epsilon>0$. Let $r_{\frac{\epsilon}{5}}>0$ be as in Claim \ref{claim2}. By the continuity of the reduced module discussed earlier one can find a $\delta_{\frac{\epsilon}{5}}>0$ such that if $\zeta\in\mathbb{S}^1$ and $\left|\zeta -\zeta_0\right|<\delta_{\frac{\epsilon}{5}}$ then
\begin{equation}\label{eq8}
\left| \modred{F^{\widetilde{\mu}}\left( D(\zeta, r_{\frac{\epsilon}{5}})\right)}{f(\zeta)}-\modred{F^{\widetilde{\mu}}\left( D(\zeta_0, r_{\frac{\epsilon}{5}})\right)}{f(\zeta_0)}\right| <\frac{\epsilon}{5}.
\end{equation}
Let $\zeta \in \mathbb{S}^1$ be such that $\left|\zeta -\zeta_0\right|<\delta_{\frac{\epsilon}{5}}$. By Claim \ref{claim3} there exist $r_{\zeta_0,1}, r_{\zeta,1}<r_{\frac{\epsilon}{5}}$ such that for any $\rho\leq r_{\zeta_0,1}$
\begin{equation}\label{eq9}
\left| \Mod{F^{\widetilde{\mu}}\left( A_{\zeta_0, \rho, r_{\frac{\epsilon}{5}}} \right)}+\ln\left(\left| f^{\prime}(\zeta_0)\right|\rho\right) - \modred{F^{\widetilde{\mu}}\left( D\left( \zeta_0, r_{\frac{\epsilon}{5}}\right)\right)}{f(\zeta_0)} \right|<\frac{\epsilon}{5} ,
\end{equation}
and for any $\rho\leq r_{\zeta,1}$
\begin{equation}\label{eq10}
\left| \Mod{F^{\widetilde{\mu}}\left( A_{\zeta, \rho, r_{\frac{\epsilon}{5}}} \right)}+\ln\left(\left| f^{\prime}(\zeta)\right|\rho\right) - \modred{F^{\widetilde{\mu}}\left( D\left( \zeta, r_{\frac{\epsilon}{5}}\right)\right)}{f(\zeta)} \right|<\frac{\epsilon}{5} .
\end{equation}
Thus, from the triangle inequality, Claim \ref{claim2}, and Inequalities (\ref{eq8}), (\ref{eq9}), and (\ref{eq10}) we obtain
\begin{align*}
& \left| \ln\left( \left| f^{\prime}(\zeta) \right|\right)- \ln\left( \left| f^{\prime}(\zeta_0) \right|\right) \right| \\ &= \left| \ln\left( \left| f^{\prime}(\zeta) \right| r_{\zeta,1}\right)-\ln\left(r_{\zeta,1} \right)- \ln\left( \left| f^{\prime}(\zeta_0) \right|r_{\zeta_0,1}\right) +\ln\left( r_{\zeta_0,1}\right)\right| \\
& \leq \left| \ln\left( \left| f^{\prime}(\zeta) \right| r_{\zeta,1}\right) +\Mod{F^{\widetilde{\mu}}\left( A_{\zeta, r_{\zeta,1}, r_{\frac{\epsilon}{5}}} \right)}- \modred{F^{\widetilde{\mu}}\left( D\left( \zeta, r_{\frac{\epsilon}{5}}\right)\right)}{f(\zeta)}\right| \\
& \hphantom{g}+\left| \ln\left( \left| f^{\prime}(\zeta_0) \right| r_{\zeta_0,1}\right) +\Mod{F^{\widetilde{\mu}}\left( A_{\zeta_0, r_{\zeta_0,1}, r_{\frac{\epsilon}{5}}} \right)}- \modred{F^{\widetilde{\mu}}\left( D\left( \zeta_0, r_{\frac{\epsilon}{5}}\right)\right)}{f(\zeta_0)}\right| \\
& \hphantom{ghg}+\left| \modred{F^{\widetilde{\mu}}\left( D\left( \zeta, r_{\frac{\epsilon}{5}}\right)\right)}{f(\zeta)}-\modred{F^{\widetilde{\mu}}\left( D\left( \zeta_0, r_{\frac{\epsilon}{5}}\right)\right)}{f(\zeta_0)}\right| \\
& \hphantom{gh}+\left| -\Mod{F^{\widetilde{\mu}}\left( A_{\zeta, r_{\zeta,1}, r_{\frac{\epsilon}{5}}} \right)}-\ln\left( r_{\zeta,1}\right)+ \Mod{F^{\widetilde{\mu}}\left( A_{\zeta_0, r_{\zeta_0,1}, r_{\frac{\epsilon}{5}}} \right)}+\ln\left( r_{\zeta_0,1}\right)\right| \\
& \leq 3\frac{\epsilon}{5} +\left| -\Mod{F^{\widetilde{\mu}}\left( A_{\zeta, r_{\zeta,1}, r_{\frac{\epsilon}{5}}} \right)}+\ln\left( \frac{r_{\frac{\epsilon}{5}}}{r_{\zeta,1}}\right) \right| \\
& \hphantom{dsdsdsdd}+ \left|\Mod{F^{\widetilde{\mu}}\left( A_{\zeta_0, r_{\zeta_0,1}, r_{\frac{\epsilon}{5}}} \right)}-\ln\left( \frac{r_{\frac{\epsilon}{5}}}{r_{\zeta_0,1}}\right)\right| \\
& \leq \epsilon.
\end{align*}
This shows the continuity of $\left| f^{\prime} \right|$ at any $\zeta_0\in \mathbb{S}^1,$ thus $f^{\prime}$ is continuously differentiable on $\mathbb{S}^1$ and since the derivative is never $0$, any element $f\in \tei{1}$ is a $\mathcal{C}^1$-diffeomorphism on $\mathbb{S}^1$. Since $ \tei{p}\subset \tei{1}$ ($p\leq 1)$ we have shown that Theorem \ref{main-result} holds.
Since every differentiable quasisymmetric function $f$ on $\mathbb{S}^1 $ is symmetric in the sense of (\ref{eq:0}), the following already known property follows from Theorem \ref{main-result}.
\begin{corollary}
Let $0<p\leq 1$. Then, $\tei{p}\subset \stei$.
\end{corollary}
Let us point out that although $\tei{1}\subset \stei$, the quasiconformal extension $F$ of $f$ we were working with may not necessarily be asymptotically conformal on $\mathbb{S}^1$ and Claim \ref{claim2} is not obvious. However, for $p\geq 2$, if one specifically employs the Douady--Earle extension, then Claim \ref{claim2} holds. It seems natural to ask:
\begin{question}
Let $f\in\tei{p}$ (with $0<p\leq 2$). Is there a quasiconformal asymptotically conformal extension $F$ of $f$ to the closed unit disc for which $\mu_{F} \in \inthyp{p}$?
\end{question}
Furthermore, since we obtain smoothness properties for the elements of $\tei{p}$ (for $p\leq 1$), we suggest that one can show higher and higher order of smoothness for $p<1,$ as $p$ gets smaller and smaller. If this is the case we would like to find sharp results on how the order of smoothness depends on $p$, a question that seems to be similar to finding a characterization of $\tei{p}$ using Sobolev spaces for $p\geq 2$. In addition, we pose the following question:
\begin{question}
What is $\bigcap_{p>0}\tei{p}$?
\end{question}
\textsc{Vincent Alberge, Fordham University, Department of Mathematics, 441 East Fordham Road, Bronx, NY 10458, USA}
\textit{E-mail address:} { \href{mailto:[email protected]}{\tt [email protected]} }
\textsc{Melkana Brakalova, Fordham University, Department of Mathematics, 441 East Fordham Road, Bronx, NY 10458, USA}
\textit{E-mail address:} { \href{mailto:[email protected]}{\tt [email protected]} }
\end{document} |
\begin{document}
\maketitle
\begin{abstract}
This article contains several results for $\lambda$-Robertson functions, i.e., analytic functions $f$ defined on the unit disk $\mathbb{D}$ satisfying $f(0) = f'(0)-1=0$ and $\mathbb{R}e e^{-i\lambda}\{1+zf''(z)/f'(z)\} > 0$ in $\mathbb{D}$, where $\lambda \in (-\pi/2,\pi/2)$.
We will discuss about conditions for boundedness and quasiconformal extension of Robertson functions.
In the last section we provide another proof of univalence for Robertson functions by using the theory of L\"owner chains.
\end{abstract}
\section{Introduction}
\par Let $\mathcal{A}$ be the family of functions $f$ analytic in the unit disc $\mathbb{D}=\{z\in \mathbb{C}:\, |z|<1\}$ with the usual normalization $f(0)=f'(0)-1=0$, and $\mathcal{S}$ be the subclass of $\mathcal{A}$ consisting of functions univalent in $\mathbb{D}$.
Let $\lambda$ be a real constant between $-\pi/2$ and $\pi/2$.
The curve $\gamma_{\lambda}(t)=\exp(te^{i\lambda})$, $t\in \mathbb{R}$, and its rotations $e^{i\theta}\gamma_{\lambda}(t)$, $\theta\in \mathbb{R}$, are called \textit{$\lambda$-spirals}.
A domain $\Omega$ with $0\in \Omega$ is called \textit{$\lambda$-spirallike} (with respect to 0) if for every $w \in \Omega$, the $\lambda$-spiral which connects $w$ and 0 lies in $\Omega$.
A function $f\in \mathcal{A}$ is said to be a \textit{$\lambda$-spirallike function} if $f$ maps $\mathbb{D}$ univalently onto a $\lambda$-spirallike domain and the class of such functions is denoted by $\mathcal{SP}(\lambda)$.
Spirallike functions are introduced by \v Spa\v cek \cite{Spacek:1932} in 1933.
We note that $0$-spirallike functions are precisely starlike functions.
It is known that a necessary and sufficient condition for $f\in \mathcal{A}$ to be in $\mathcal{SP}(\lambda)$ is that
\[
\mathbb{R}e
\left\{
e^{-i\lambda}\frac{zf'(z)}{f(z)}
\right\}
>0
\]
for all $z \in \mathbb{D}$.
In \cite{KimSugawa:pre01}, Kim and Sugawa introduce the notion of \textit{$\lambda$-argument}.
Let us set $\theta=\arg_{\lambda} w$ if $w\in e^{i\theta}\gamma_{\lambda}(\mathbb{R})$.
We note that $\arg_{0}w=\arg w$. For some more properties of $\lambda$-argument, the reader may be referred to \cite{KimSugawa:pre01}.
By utilizing $\lambda$-argument, another equivalence can be obtained
\[
f\in \mathcal{SP}(\lambda) \Leftrightarrow \frac{\partial}{\partial\theta}\left(\arg_{\lambda}f(re^{i\theta})\right)>0 \quad (\theta \in \mathbb{R},\,0<r<1).
\]
For general references about spirallike functions, see e.g. \cite{Duren:1983} or \cite{AhujaSilverman:1991}.
A function $f\in \mathcal{A}$ is said to be a \textit{$\lambda$-Robertson function} \cite{Kulshrestha:1976} if $f$ satisfies
\[
\mathbb{R}e
\left\{
e^{-i\lambda}\left(1+\frac{zf''(z)}{f'(z)}\right)
\right\}
>0
\]
for all $z \in \mathbb{D}$.
Let $\mathcal{R}(\lambda)$ be the set of those functions.
The definition of $\lambda$-Robertson functions shows immediately that $\mathcal{R}(0)$ is precisely the class of convex functions which is usually denoted by $\mathcal{K}$.
Furthermore in view of the definitions of spirallike and Robertson functions, for a function $f\in \mathcal{A}$ the following relations are true;
\begin{eqnarray}
\label{relationship}
f\in \mathcal{R}(\lambda)
&\Leftrightarrow&
zf'(z)\in \mathcal{SP}(\lambda)\\[5pt]
&\Leftrightarrow&
\int_{0}^{z}f'(\zeta)^{\alpha}d\zeta\in \mathcal{K}\noindentnumber\\[5pt]
&\Leftrightarrow&
\frac{\partial}{\partial\theta}\left[\arg_{\lambda}\left(\frac{\partial}{\partial\theta}f(re^{i\theta})\right)\right]>0 \quad (\theta\in\mathbb{R},\,0<r<1),\noindentnumber
\end{eqnarray}
where $\alpha=e^{-i\lambda}/\cos\lambda$.
A distinguished member of $\mathcal{R}(\lambda)$ is
\begin{equation}\label{extremal}
f_{\lambda}(z)=\frac{(1-z)^{1-2e^{i\lambda}\cos\lambda}-1}{2e^{i\lambda}\cos\lambda -1}.
\end{equation}
The class $\mathcal{R}(\lambda)$ was first introduced by Robertson \cite{Robertson:1969}.
He showed that all functions in $\mathcal{R}(\lambda)$ are univalent if $0 < \cos \lambda \leq x_{0}$, where $x_{0} =0.2034\cdots$ is the unique positive root of the equation $16x^{3} + 16 x^{2} + x -1 = 0$ (in the original paper $x_{0}$ is evaluated as $0.2315\cdots$ which seems to be erroneous \cite{KimSugawa:2007}).
Later Libera and Ziegler \cite{LiberaZiegler:1972} and Chichra \cite{Chichra:1975} gave some improvements on the range of $\lambda$ for which $\mathcal{R}(\lambda) \subset \mathcal{S}$ by estimating the norm of the Schwarzian derivatives for the class $\mathcal{R}(\lambda)$.
Finally Pfaltzgraff \cite{Pfaltzgraff:1975} showed that $\mathcal{R}(\lambda) \subset \mathcal{S}$ if $0 < \cos \lambda \leq 1/2$ or $\cos \lambda = 1$.
This value is best possible.
Indeed, Robertson also presented in \cite{Robertson:1969} a non-univalent function which belongs to $\mathcal{R}(\lambda)$ for each $\lambda$ in the range $1/2 < \cos \lambda < 1$ by making use of Roysters's example \cite{Royster:1965}
$f_{\mu}^{*}(z) = ((1-z)^{-\mu} - 1)/\mu$, where $\mu$ is a number which satisfies $\mu + 1 = |\mu + 1| e^{i\lambda}, |\mu| \leq 1, |\mu+1|>1$ and $|\mu-1|>1$.
The class of $\lambda$-Robertson functions has been investigated by various authors.
Recently the class $\mathcal{R}(\lambda)$ is still an interesting topic in geometric function theory (e.g. \cite{PonnuYanagihara:2008}).
Actually, under the relationship \eqref{relationship} many properties of Robertson functions follows from those of spirallike functions.
For instance the coefficient estimate of $\mathcal{R}(\lambda)$ is an easy consequence of a result of Zamorski \cite{Zamorski:1960} (see also \cite{BajpaiM:1973}).
For some more information about Robertson functions, the reader is referred to e.g. \cite[Section 8]{AhujaSilverman:1991}.
In the present paper we would like to give several new results for the $\lambda$-Robertson functions.
In section 2 we will show that $\lambda$-Robertson functions are bounded whenever $\cos\lambda<1/\sqrt{2}$ which improves a result of Kim and Sugawa in \cite{KimSugawa:2007}.
Quasiconformal extension criteria which are related with Robertson functions are shown in section 3.
One of the criteria is also obtained by using the technique of L\"owner's theory.
We will discuss this problem in the last section and give an explicit L\"owner chain for Robertson functions.
\section{Boundedness of $\mathcal{R}(\lambda)$}
\subsection{Result and auxiliary lemma}
The boundedness of $\lambda$-Robertson function is analyzed by Kim and Sugawa \cite{KimSugawa:2007}.
It can be stated as follows after being translated to our notations.
\begin{knownthm}[\cite{KimSugawa:2007}]
$\lambda$-Robertson functions are bounded by a constant depending only on $\lambda$ when $\cos\lambda<1/2$.
\end{knownthm}
They remark that the bound $1/2$ cannot be replaced by any number greater than $1/\sqrt{2}$ since the function given by ($\ref{extremal}$) is unbounded when $\cos\lambda>1/\sqrt{2}$. Our next result will verify that the bound $1/\sqrt{2}$ is best possible.
\begin{thm}
Let $f\in \mathcal{R}(\lambda)$ with $\cos\lambda<1/\sqrt{2}$, then $f$ is bounded.
\end{thm}
In order to prove the above result, the growth theorem of spirallike functions in \cite{Singh:1969} or \cite{AhujaSilverman:1991} is needed. Since those known forms are complicated there, we simplify them as follows.
\begin{lem}\label{lem2}
Let $f\in \mathcal{SP}(\lambda)$, then for $|z|=r<1$, we have
\[
\Psi_{1}(r)\leq |f(z)|\leq \Psi_{2}(r)
\]
where
\[
\Psi_{1}(r)=\left|P_{\lambda}(re^{i\theta_{1}}) \right|=\frac{r\exp\left(-\sin2\lambda\arcsin(r\sin\lambda)\right)}{(r\cos\lambda-\sqrt{1-r^2\sin^2\lambda})^{2\cos^2\lambda}}
\]
and
\[
\Psi_{2}(r)=\left|P_{\lambda}(re^{i\theta_{2}}) \right|=\frac{r\exp\left(\sin2\lambda\arcsin(r\sin\lambda)\right)}{(r\cos\lambda-\sqrt{1-r^2\sin^2\lambda})^{2\cos^2\lambda}}
\]
where
\[P_{\lambda}(z)=\frac{z}{(1-z)^{1+e^{2i\lambda}}}
\]
belongs to $\mathcal{SP}(\lambda)$ and $\theta_{j}$ ($j=1,2$) satisfy
\[
\sin(\lambda+\theta_{j})=r\sin\lambda \hspace{15pt}(j=1,2)
\]
and $\cos(\lambda+\theta_{1})<0$ and $\cos(\lambda+\theta_{2})>0$ respectively.
\end{lem}
\subsection{Proof of Theorem 1}
Equivalence (1) and Lemma \ref{lem2} show that
\begin{eqnarray*}
|f(z)| &=&\left|\int_{0}^{z}f'(\zeta)d\zeta\right|=\left| \int_{0}^{r} \frac{z}{r}f'(tz/r)dt\right|\\[5pt]
&\leq &\int_{0}^{r}|f'(tz/r)|dt\leq \int_{0}^{r}\frac{\exp(\sin(2\lambda)\arcsin(t\sin\lambda))}{(\sqrt{1-t^2\sin^2\lambda}-t\cos\lambda)^{2\cos^2\lambda}}dt
\end{eqnarray*}
where $0<|z|=r<1$.
Since the numerator in the above integrand is bounded over $[0,1]$, it is sufficient to estimate only the denominator.
Upon a change in the variable $s=1-t$, we obtain
\begin{eqnarray*}
\sqrt{1-t^2\sin^2\lambda}-t\cos\lambda
&=&
\sqrt{1-(1-s)^2\sin^2\lambda}-(1-s)\cos\lambda\\
&=&
\sqrt{\cos^2\lambda+2s\sin^2\lambda-s^2\sin^2\lambda}-(1-s)\cos\lambda\\
&=&
\cos\lambda \sqrt{1+2s\tan^2\lambda-s^2\tan^2\lambda}-(1-s)\cos\lambda\\
&=&
\cos\lambda[1+1/2(2s\tan^2\lambda-s^2\tan^2\lambda)+O(s^2)]-(1-s)\cos\lambda\\
&=&
\frac{s}{\cos\lambda}+O(s^2)
\end{eqnarray*}
when $s\to 0$.
Therefore $f(z)$ is bounded whenever $2\cos^2\lambda<1$, that is, $\cos\lambda<1/\sqrt{2}$. The example given by $(\ref{extremal})$ ensures the sharpness of the value $1/\sqrt{2}$.
$\square$
\begin{rem}
Note that our method is not applicable for the case when $\cos\lambda=1/\sqrt{2}$. Since the function $f_{\lambda}(z)$ given in ($\ref{extremal}$) is bounded when $\cos\lambda=1/\sqrt{2}$, we may expect that $\mathcal{R}(\lambda)$ consists of bounded functions as well in this case.
\end{rem}
\section{Quasiconformal Extension}
\subsection{Results}
In this section we would like to discuss about the new quasiconformal extension criteria for Robertson functions.
Let us denote by $\mathcal{S}(k)$ the family of functions lie in $\mathcal{S}$ and can be extended to quasiconformal automorphisms of $\mathbb{C}$ so that the complex dilatation $\mu_{f} = (\partial f / \partial \bar{z}) / (\partial f / \partial z)$ satisfies $|\mu_{f}(z)| \leq k < 1$ for almost every $z \in \mathbb{C}$.
We will show the following which is an extension of a result of Ruscheweyh \cite[Corollary 1]{Ruscheweyh:1976};
\begin{thm}\label{result}
Let $f \in \mathcal{A},\,k \in [0,1)$ and $\lambda\in(-\pi/2,\pi/2),\,q>-1$ be related by
\begin{equation}\label{eq02}
0 < \cos \lambda \leq \left\{
\begin{array}{llc}
k/2, & \textit{if}& -1 < q \leq 0 , \\[5pt]
k/(2+4q), & \textit{if}& 0 < q.
\end{array}
\right.
\end{equation}
If f satisfies
\begin{equation}\label{eq01}
\mathbb{R}e
\left\{
e^{-i\lambda}
\left(
1 + \frac{zf''(z)}{f'(z)} + q \frac{zf'(z)}{f(z)}
\right)
\right\}
>0
\end{equation}
for all $z \in \mathbb{D}$, then $f \in \mathcal{S}(k)$.
If, in addition, $f''(0)=0$, $\eqref{eq02}$ can be replaced by
\begin{equation}\label{eq05}
0 < \cos \lambda \leq \left\{
\begin{array}{llc}
k, & \textit{if}& -1 < q \leq 0 , \\[5pt]
k/(1+2q), & \textit{if}& 0 < q.
\end{array}
\right.
\end{equation}
\end{thm}
We note that when $q=0$ Theorem 3 claims quasiconformal extension of $\lambda$-Robertson functions which can be stated explicitly as follows;
\begin{cor}\label{qccor}
Let $f \in \mathcal{R}(\lambda)$ with $\lambda\in (-\pi/2,\pi/2)$ satisfying
\begin{equation*}\label{eq06}
0 < \cos \lambda \leq k/2,
\end{equation*}
then $f\in\mathcal{S}(k)$.
If, in addition, $f''(0)=0$ and $\eqref{eq06}$ can be replced by
\begin{equation*}
0 < \cos \lambda \leq k,
\end{equation*}
then $f\in \mathcal{S}(k)$.
\end{cor}
\par We note here that the second case in Corollary 4 also implies that function $f \in \mathcal{R}(\lambda)$ with $f''(0)=0$ for arbitrary $\lambda\in (-\pi/2,\pi/2)$ is univalent which was proved by Singh and Chichra \cite{SinghChichra:1977b} by means of Ahlfors's criterion for univalence as well.
\subsection{Preliminaries}
The following several results will be used later in our arguments.
Here, set
$$
H_{s}(z) = s \left(1 + \frac{zf''(z)}{f'(z)}\right) + (1-s)\frac{zf'(z)}{f(z)}.
$$
\begin{knownthm}[\cite{Hotta:2010b}]\label{main}
Let $a>0,\,b \in \mathbb{R},\,s = a + i b,\,k \in [0,1)$ and $f \in \mathcal{A}$.
Assume that for a constant $c \in \mathbb{C}$ and all $z \in \mathbb{D}$
\begin{equation*}
\left|
c|z|^{2}+s-a(1-|z|^{2})H_{s}(z)
\right|
\leq M
\end{equation*}
with
$$
M =
\left\{
\begin{array}{ll}
a k |s| + (a-1)|s+c|, &\textit{if}\hspace{10pt} 0 < a \leq 1, \\[5pt]
k |s|, &\textit{if}\hspace{10pt} a>1,
\end{array}
\right.
$$
then $f \in \mathcal{S}(l)$, where
\begin{equation*}\label{mainl}
l=
\frac
{
2ka+(1-k^{2}) |b|
}
{
(1+k^{2})a+(1-k^{2}) |s|
}
<1.
\end{equation*}
\end{knownthm}
We note that in the above theorem $l=k$ if and only if $b=0$ (\cite{Hotta:2010b}).
\begin{knownlem}[e.g. \cite{Ruscheweyh:1976}]\label{lemB}
Let $p(z) = 1 + a_{n}z^{n} + \cdots$ be analytic and $\mathbb{R}e p(z)>0$ on $\mathbb{D}$.
Then
\begin{equation*}
\left|
p(z) -1-\frac{2|z|^{2n}}{1-|z|^{2n}}
\right|
\leq
\frac{2|z|^{2n}}{1-|z|^{2n}}
\end{equation*}
for all $z \in \mathbb{D}$.
\end{knownlem}
\subsection{Proof of Theorem \ref{result}}
Let $s = 1/(1+q)$ and $f(z)=z+\sum_{n=2}^{\infty}a_{n}z^n$, then for
\begin{eqnarray*}
p(z) &=& \frac{e^{-i\lambda} H_{s}(z) + i\sin \lambda}{\cos \lambda}\\
&=& 1 + \frac{e^{-i\lambda}}{\cos \lambda} (s+1)a_{2}z + \cdots.
\end{eqnarray*}
we have $p'(0)=0$ if and only if $f''(0)=0$.
Condition \eqref{eq01} implies that $p(z)$ is analytic in $\mathbb{D}$ and fulfills $\mathbb{R}e p(z) > 0$ for all $z \in \mathbb{D}$.
With $\displaystyle (c+s) = \frac 2 n se^{i\lambda}\cos\gamma,\,n=1, 2$, we obtain from Lemma $\ref{lemB}$ that
$$
\begin{tabular}{llllll}
\multicolumn{1}{l}
{$\displaystyle \left|\frac{(c+s)|z|^{2}}{1-|z|^{2}} - s(H_{s}(z) -1)\right|$} \\[12pt]
$\hspace{60pt}\displaystyle \leq
s|\cos \lambda|
\left\{
\left|\frac{2|z|^{2n}}{1-|z|^{2n}} - (p(z) -1)\right| +
\left|\frac{2|z|^{2n}}{1-|z|^{2n}} - \frac2n \frac{|z|^{2}}{1-|z|^{2}}\right|
\right\}$\\[12pt]
$\hspace{60pt}\displaystyle \leq \frac{2s}{n}\frac{|\cos \lambda|}{1-|z|^{2}}$.
\end{tabular}
$$
\noindent
Therefore by Theorem \ref{main} $f$ can be extended to a $k$-quasiconformal automorphism of $\mathbb{C}$ whenever
$$
\frac2n s|\cos \lambda| \leq \left\{
\begin{tabular}{llc}
$\displaystyle k s^{2} + \frac2n s |\cos \lambda| (s-1)$, & \textit{if}&$0 < s \leq 1$, \\[5pt]
$k s$, & \textit{if}& $1 < s$,
\end{tabular}
\right.
$$
which is equivalent to \eqref{eq02} if $n=1$ and to \eqref{eq05} if $n=2$.
$\square$
\section{L\"owner chain}
We can find another proof for univalency of Robertson functions by making use of the theory of L\"owner chains.
The following theorem is well known.
Here, we denote $\partial f / \partial t$ and $\partial f / \partial z$ by $\dot{f}$ and $f'$ respectively.
\begin{knownthm}[\cite{Pom:1965}, see also \cite{Hotta:2010a}]
Let $0 < r_{0} \leq 1$.
Let $f_{t}(z) = \sum_{n=1}^{\infty}a_{n}(t)z^{n}$, $a_{1}(t) \neq 0$,\, be analytic for each $t \in [0,\infty)$ in $\mathbb{D}_{r_{0}}$ and locally absolutely continuous in $[0,\infty)$, locally uniformly with respect to $\mathbb{D}_{r_{0}}$, where $a_{1}(t)$ is a complex-valued function on $[0,\infty)$.
For almost all $t \in [0,\infty)$ suppose
\begin{equation}\label{LDE}
\dot{f_{t}}(z) =z f_{t}'(z) p_{t}(z) \hspace{20pt} (z \in \mathbb{D}_{r_{0}})
\end{equation}
where $p_{t}(z)$ is analytic in $\mathbb{D}$ and satisfies $\mathbb{R}e p_{t}(z)>0,\,z \in \mathbb{D}$.
If $|a_{1}(t)| \to \infty$ for $t \to \infty$ and if $\{f_{t}(z)/a_{1}(t)\}$ forms a normal family in $\mathbb{D}_{r_{0}}$, then for each $t \in [0,\infty)$ $f_{t}(z)$ can be continued analytically to a univalent function on $\mathbb{D}$.
\end{knownthm}
The next lemma is needed for our discussion;
\begin{knownlem}[\cite{Wang:preprint}, Theorem 3]\label{lemma3}
Suppose that $\lambda \in (-\pi/2,\pi/2)$.
Let $p(z)$ be an analytic function defined on $\mathbb{D}$ which satisfies $p(0)=1$ and $\mathbb{R}e e^{-i\lambda}p(z) >0$ for all $z \in \mathbb{D}$.
Then we have
\begin{equation*}
\left|p(z)-\left(\frac{1}{1-r^2}+\frac{r^2}{1-r^2}e^{2i\lambda}\right)\right|\leq
\frac{2r}{1-r^2}\cos \lambda.
\end{equation*}
where $r=|z|<1$.
\end{knownlem}
Now we suppose that $|\lambda| \in [\pi/3, \pi/2)$ and $f$ is a $\lambda$-Robertson function.
Let us put
\begin{equation}\label{LC}
f_{t}(z) = f(e^{-t}z) - e^{-2i\lambda}(e^{2t}-1)e^{-t}zf'(e^{-t}z).
\end{equation}
Here we should note that a more general form of \eqref{LC} appears in \cite{Ruscheweyh:1976}.
It suffices to prove that $p_{t}(z)$ in \eqref{LDE} lies in the right-hand side of the complex plane $\mathbb{C}$
for all $z \in \mathbb{D}$ and a.e. $t \in [0,\infty)$.
This is equivalent to
$$
\left|
\frac{\dot{f_{t}}(z) - zf_{t}'(z)}{\dot{f_{t}}(z) +zf_{t}'(z)}
\right|
< 1.
$$
Then a calculation shows that
\begin{equation}\label{eq03}
\left|
e^{-2t} e^{2i\lambda} + 1 -
\left(
1 - e^{-2t}
\right)
\left(
1 + \frac{e^{-t}zf''(e^{-t}z)}{f'(e^{-t}z)}
\right)
\right|
<1
\end{equation}
implies univalency of $f$ and \eqref{eq03} follows from maximum modulus principle and Lemma \ref{lemma3} when $\cos \lambda \leq 1/2$.
\begin{rem}
Applying Becker's theorem \cite{Becker:1976} with \eqref{LC}, we also obtain the quasiconformal extension criterion for $\mathcal{R}(\lambda)$ which is in Corollary \ref{qccor}.
\end{rem}
\end{document} |
\begin{document}
\title{{\TheTitle}
\begin{abstract}
Recent works showed that pressure-robust modifications of mixed finite element methods for the Stokes equations outperform their standard versions in many cases. This is achieved by divergence-free reconstruction operators and results in pressure independent velocity error estimates which are robust with respect to small viscosities. In this paper we develop a posteriori error control which reflects this robustness.
The main difficulty lies in the volume contribution of the standard residual-based approach
that includes the \(L^2\)-norm of the right-hand side. However, the velocity is only steered by the divergence-free part of this source term. An efficient error estimator must approximate this divergence-free part in a proper manner, otherwise it can be dominated by the pressure error.
To overcome this difficulty a novel approach is suggested
that uses arguments from the stream function and vorticity formulation of the Navier--Stokes equations. The novel error estimators only take the $\mathrm{curl}$
of the right-hand side into account and so lead to provably reliable, efficient and pressure-independent upper bounds
in case of a pressure-robust method in particular in pressure-dominant situations. This is also confirmed by some numerical examples with the novel pressure-robust modifications of the Taylor--Hood and mini finite element methods.
\keywords{incompressible Navier--Stokes equations \and mixed finite elements \and pressure robustness \and a posteriori error estimators \and adaptive mesh refinement
}
\end{abstract}
\section{Introduction}
This paper studies a posteriori error estimators for the velocity of the Stokes equation with a special focus on pressure-robust finite element methods. Pressure-robustness
is closely related to the \(L^2\)-orthogonality of divergence-free functions onto gradients of \(H^1\)-functions. In particular, the exact velocity
\(\vecb{u}\) of the Stokes equations (with zero boundary data),
\begin{align*}
-\nu \Delta \vecb{u} + \nabla p & = \vecb{f} \text{ in } \Omega \quad \text{and} \quad \vecb{u} \in \vecb{V}_0 := \lbrace \vecb{v} \in H^1_0(\Omega)^2 : \mathrm{div} \vecb{v} = 0 \rbrace,
\end{align*}
is orthogonal onto any \(q \in L^2(\Omega)\) in the sense that \(\int_\Omega q \mathrm{div}(\vecb{u}) \dx = 0\).
Consequently, \(\vecb{u}\) also solves the Stokes equations
with \(\vecb{f}\) replaced by \(\vecb{f} + \nabla q\) for \(q \in H^1(\Omega)\).
This invariance property is in general not preserved for discretely divergence-free
testfunctions of most classical finite element methods that
relax the divergence constraint to attain inf-sup stability.
With an inf-sup-stable pair of a discrete velocity space \(\vecb{V}_h\)
and some discrete pressure space \(Q_h\) and the discretely divergence-free functions
\(\vecb{V}_{0,h} \subset \vecb{V}_h\),
the consistency error from the relaxed divergence constraint can be
expressed by the discrete dual norm , for any \(q \in L^2(\Omega)\),
\begin{align}\label{intro_discrete_orthogonality}
\| \nabla q \|_{\vecb{V}_{0,h}^\star}
:= \!\! \sup_{\vecb{v}_h \in V_{0,h} \setminus \lbrace \vecb{0} \rbrace} \!\! \frac{\int_\Omega q \mathrm{div} \vecb{v}_h \dx}{\| \nabla \vecb{v}_h \|_{L^2}}
\leq \begin{cases}
\min_{q_h \in Q_h} \| q - q_h \|_{L^2} & \text{if } \vecb{V}_{0,h} \not\subseteq \vecb{V}_0,\\
0 & \text{if } \vecb{V}_{0,h} \subseteq \vecb{V}_0.
\end{cases}
\end{align}
Besides some expensive or exotic divergence-free methods like the Scott-Vogelius finite element method \cite{scott:vogelius:conforming,Zhang05}, most classical inf-sup stable
mixed finite element methods, including the popular Taylor--Hood \cite{HT74} and mini finite element families \cite{ABF84} have \(V_{0,h} \not\subseteq \vecb{V}_0\) and so
the term from \eqref{intro_discrete_orthogonality} appears in their a priori velocity gradient error estimate \cite{MR3097958} scaled with \(1/\nu\), i.e.
\begin{align}\label{intro_apriori}
\| \nabla(\vecb{u} - \vecb{u}_h) \|^2_{L^2} \leq \! \inf_{\substack{\vecb{v}_h \in V_{0,h},\\ \vecb{u}_h
= \vecb{v}_h \text{ on } \partial \Omega}} \! \| \nabla(\vecb{u} - \vecb{v}_h) \|^2_{L^2} + \frac{1}{\nu^2} \| \nabla p \|^2_{V_{0,h}^\star}.
\end{align}
This factor \(1/\nu\) causes a locking phenomenon. Indeed, for \(\nu \rightarrow 0\) or very complicated pressures, the pressure
contribution may dominate and lead to a very bad solution for the discrete velocity \(\vecb{u}_h\) \cite{Lin14,LM2015,wias:preprint:2177,LM2016}.
By a trick of \cite{Lin14} one can introduce a reconstruction operator \(\Pi\), that maps discretely divergence-free functions onto exactly divergence-free ones,
into the right-hand side and so transform any classical finite element method into a pressure-robust one. This replaces the pressure-dependent term in \eqref{intro_apriori} by a small consistency error of optimal order \cite{Lin14,BLMS15,LMT15,LM2016,2016arXiv160903701L}
and independent of \(1/\nu\).
Although this fixes the locking phenomenon and leads to huge gains in many numerical examples, efficient a posteriori error control for these methods
is an open problem. Efficient error estimators for the velocity error not only have to cope with the variational crime but also, and more importantly,
have to mimic the pressure-independence.
Standard residual-based a posteriori error estimators \(\eta\) usually have the form
\begin{align*}
\| \nabla(\vecb{u} - \vecb{u}_h) \|_{L^2} \lesssim \eta := \eta_\text{vol} + \text{other terms}
\end{align*}
with a volume contribution \(\eta_\text{vol}\) and some other terms, like norms of the normal jumps of \(\vecb{u}_h\), data oscillations
or consistency errors.
In the standard residual-based error estimator for classical finite element methods \cite{MR2995179,MR3064266,MR993474,MR3425376}
the volume contribution takes the form (for any \(q \in H^1(\Omega)\) and piecewise Laplacian \(\Delta_\mathcal{T}\))
\begin{align}\label{intro_volume1}
\eta_\text{vol} & = \nu^{-1} \| \nabla q \|_{V_{0,h}^\star} + \nu^{-1} \| h_\mathcal{T}(\vecb{f} - \nabla q + \nu \Delta_\mathcal{T} \vecb{u}_h) \|_{L^2}\\
& \lesssim \| \nabla (\vecb{u} - \vecb{u}_h) \|_{L^2} + \nu^{-1} \left(\| p - q \|_{L^2} + \min_{q_h \in Q_h} \| q - q_h \|_{L^2} + \mathrm{osc_k}(\vecb{f} - \nabla q,\mathcal{T}) \right).\nonumber
\end{align}
The inequality above states efficiency, i.e. beeing also a lower bound of the real error,
and its dependence on the choice of \(q\).
Note, that for \(q \in Q_h\) the terms \(\| \nabla q \|_{V_{0,h}^\star} \leq \min_{q_h \in Q_h} \| q - q_h \|_{L^2} = 0\) vanish, but \(\| p - q \|_{L^2}\) remains, whereas
for \(q = p\) the term \(\| p - q \|_{L^2}\) vanishes, but the other two remain.
If the velocity error is at best as good as the error in the pressure (scaled by \(1/\nu\)), as it is the case for classical pressure-inrobust methods,
this estimate is fine (e.g.\ for \(q\) chosen as an \(H^1\)-interpolation of \(p_h\)).
As a result classical a posteriori error estimates, see e.g.~\cite{MR2995179,MR3064266,MR993474,MR3425376}, often perform the error analysis in a norm that
combines the velocity error and the pressure error. A pressure-robust method, however, allows for a decoupled error analysis of velocity error and pressure error
and so gives more control over both.
For a pressure-robust finite element method, the term \eqref{intro_volume1} can be replaced by
\begin{align}\label{intro_volume2}
\eta_\text{vol} &= \nu^{-1} \| h_\mathcal{T}(\vecb{f} - \nabla q + \nu \Delta_\mathcal{T} \vecb{u}_h) \|_{L^2} \\
&\lesssim \| \nabla (\vecb{u} - \vecb{u}_h) \|_{L^2} + \nu^{-1} \left(\| p - q \|_{L^2} + \mathrm{osc_k}(\vecb{f} - \nabla q,\mathcal{T}) \right). \nonumber
\end{align}
Here, the choice \(q = p\) leads to a pressure-independent efficient estimate. However, this
cannot be considered a posteriori, since \(p\) is unknown. Hence, an efficient error estimator of this form for pressure-robust methods
hinges on a good approximation of \(q \approx p\) as already investigated in \cite{MR1471083,MR3366087}.
The main result of this paper concerns a different approach to estimate the velocity error that yields an estimator with the volume contribution
\begin{align}\label{intro_volume3}
\eta_\text{curl} &= \nu^{-1} \| h_\mathcal{T}^2 \mathrm{curl}_\mathcal{T} (\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h) \|_{L^2} \\
&\lesssim \| \nabla (\vecb{u} - \vecb{u}_h) \|_{L^2} + \nu^{-1} \mathrm{osc_k}( h_\mathcal{T} \mathrm{curl}_\mathcal{T}(\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h),\mathcal{T}). \nonumber
\end{align}
The advantage of \(\eta_\text{curl}\) over \(\eta_\text{vol}\) is that the \(\mathrm{curl}\) operator automatically cancels any \(\nabla q\) from the Helmholtz decomposition of \(\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h\)
and therefore no approximation of \(p\) as in \eqref{intro_volume2} is needed. Also note, that \(\eta_\text{curl}\) is similar to the volume contribution of a residual-based error
estimator for the Navier--Stokes equations in streamline and vorticity formulation \cite{MR1645033}.
However, the error estimator with this volume contribution is valid for any pressure-robust
finite element method like the Scott--Vogelius finite element method
\cite{scott:vogelius:conforming,Zhang05} or the novel family of pressure-robustly modified finite element methods of \cite{Lin14,BLMS15,LMT15,LM2016,2016arXiv160903701L} that allow for an interesting interplay between the Fortin interpolator \(I\)
and the reconstruction operator \(\Pi\) manifestated in the required assumption
\begin{align}\label{intro_assumption}
\int_\Omega (1 - \Pi I) \vecb{v} \cdot \vecb{\theta} \dx
\lesssim \| \nabla \vecb{v} \|_{L^2} \| h_\mathcal{T}^{2} \mathrm{curl} \vecb{\theta} \|_{L^2} \quad \text{for all } \vecb{\theta} \in H(\mathrm{curl},\Omega) \text{ and } \vecb{v} \in \vecb{V}_0.
\end{align}
We prove this assumption for certain popular finite element methods including the Taylor--Hood and mini finite element methods, and some elements with discontinuous pressure approximations. However, we only focus on the two-dimensional case, since the proofs
for the three-dimensional case are much more involved and therefore discussed in a future
publication.
The rest of the paper is structured as follows. Section~\ref{sec:stokes} introduces the Stokes equations and preliminaries as well as notation used throughout the paper.
Section~\ref{sec:prmethods} focuses on classical finite element methods and their recently developed pressure-robust siblings that are based on a suitable reconstruction operator.
Section~\ref{sec:standard_estimates} is concerned with standard residual-based error estimates for classical and pressure-robust finite element methods and
the efficiency of its contributions, in particular \eqref{intro_volume1} and \eqref{intro_volume2}, especially in the pressure-dominated regime.
Section~\ref{novelestimates} derives some novel a posteriori error bounds with the volume contribution \eqref{intro_volume3}
that are efficient and easy to evaluate for the pressure-robust finite element methods that satisfy Assumption \eqref{intro_assumption}.
In Section~\ref{sec:assumptionproof} this assumption is verified for many popular finite element methods and
their pressure-robust siblings.
Section~\ref{sec:numerics} studies numerical examples and employs the local contributions of the a posteriori error estimates as refinement indicators for
adaptive mesh refinement. The numerical examples verify the theory and show that the pressure-robust finite element methods converge with the optimal order
also in non-smooth examples.
\section{Model problem and preliminaries}\label{sec:stokes}
This section states our model problem and the needed notation.
\subsection{Stokes equations and Helmholtz projector}
The Stokes model problem seeks a vector-valued velocity field \(\vecb{u}\) and a scalar-valued pressure field \(p\) on a bounded Lipschitz domain \(\Omega \subset \mathbb{R}^2\) with Dirichlet data \(\vecb{u} = \vecb{u}_D\) along \(\partial \Omega\) and
\begin{align*}
-\nu \Delta \vecb{u} + \nabla p & = \vecb{f} \quad \text{and} \quad \mathrm{div} \vecb{u} = 0 \quad \text{in } \Omega.
\end{align*}
The weak formulation characterises \(\vecb{u} \in H^1(\Omega)^2\) by \(\vecb{u} = \vecb{u}_D\) along \(\partial \Omega\) and
\begin{align*}
\nu (\nabla \vecb{u}, \nabla \vecb{v}) - (p, \mathrm{div} \vecb{v}) & = (\vecb{f}, \vecb{v}) && \text{for all } \vecb{v} \in V := H^1_0(\Omega)^2,\\
(q, \mathrm{div} \vecb{u}) & = 0 && \text{for all } q \in Q := L^2_0(\Omega).
\end{align*}
In the set of divergence-free functions \(\vecb{V}_0 := \lbrace \vecb{v} \in V\, \lvert \, \mathrm{div} \vecb{v} = 0\rbrace\), \(\vecb{u}\)
satisfies
\begin{align*}
\nu (\nabla \vecb{u}, \nabla \vecb{v}) = (\vecb{f}, \vecb{v}) \qquad \text{for all } \vecb{v} \in \vecb{V}_0.
\end{align*}
The Helmholtz decomposition decomposes every vector field into
\begin{align*}
\vecb{f} = \nabla \alpha + \beta =: \nabla \alpha + \mathbb{P} \vecb{f}
\end{align*}
with \(\alpha \in H^1(\Omega) / \mathbb{R}\) and
\(\beta =: \mathbb{P} \vecb{f} \in L^2_\sigma(\Omega) := \lbrace \vecb{w} \in H(\mathrm{div},\Omega) \, \vert \, \mathrm{div} \vecb{w} = 0, \vecb{w} \cdot \vecb{n} = 0 \text{ along } \partial D\rbrace\)
\cite{GR86}.
Note in particular, that the continuous Helmholtz projector satisfies \(\mathbb{P}(\nabla q) = 0\) for all \(q \in H^1(\Omega)\) which implies
\begin{align*}
\nu (\nabla \vecb{u}, \nabla \vecb{v}) = (\mathbb{P} \vecb{f}, \vecb{v}) \qquad \text{for all } \vecb{v} \in \vecb{V}_0,
\end{align*}
hence \(\vecb{u}\) is steered only by the Helmholtz projector \(\mathbb{P} \vecb{f}\) of the right-hand side.
\subsection{Notation}
The set \(\mathcal{T}\) denotes a regular triangulation of \(\Omega\)
into two dimensional simplices with edges \(\mathcal{E}\) and nodes \(\mathcal{N}\).
The three edges of a simplex \(T \in \mathcal{T}\) are denoted by \(\mathcal{E}(T)\). Similarly,
\(\mathcal{N}(T)\) consists of the three nodes that belong to \(T \in \mathcal{T}\),
\(\mathcal{N}(E)\) consists of the two nodes that belong to \(E \in \mathcal{E}\)
and \(\mathcal{T}(z)\) for a vertex \(z \in \mathcal{N}\)
consists of all cells \(T \in \mathcal{T}\) with \(z \in \mathcal{N}(T)\). Finally we define $\mathcal{E}^\circ$ as the set of all inner.
As usual \(L^2(\Omega)\), \(H^1(\Omega)\), \(H(\mathrm{div},\Omega)\) and \(H(\mathrm{curl},\Omega)\) denote the Sobolev spaces and
\(L^2(\Omega)^2\), \(H^1(\Omega)^2\) denote their vector-valued versions.
Moreover, several discrete function spaces are used throughout the paper. The set \(P_k(T)\)
denotes scalar-valued polynomials up to order \(k\) that live on the simplex \(T \in \mathcal{T}\)
and generate the global piecewise polynomials of order \(k\), i.e.
\begin{align*}
P_k(\mathcal{T}) := \lbrace q_h \in L^2(\Omega) \, | \, \forall T \in \mathcal{T} : v_h|_T \in P_k(T) \rbrace.
\end{align*}
The function \(\pi_{P_k(\omega)}\) denotes the $L^2$ best approximation into \(P_k(\omega)\)
for any subdomain \(\omega \subset \Omega\).
For approximation of functions in $H(\mathrm{div},\Omega)$ we use the set of Brezzi-Douglas-Marini functions of order \(k \geq 1\) denoted by $BDM_k(\mathcal{T}):= P_k(\mathcal{T})^2 \cap H(\mathrm{div},\Omega)$
and the subset of Raviart-Thomas functions of order $k \ge 0$ denoted by $RT_{k}(\mathcal{T})$, see \cite{RTelements}.
The functions \(I_{RT_k}\) and \(I_{BDM_k}\) denotes the standard interpolator into \(RT_k(\mathcal{T})\) and \(BDM_k(\mathcal{T})\), respectively,
see e.g.\ \cite{MR3097958}. We are also using lowest order N\'ed\'elec (type I) functions $\mathcal{N}_{0}(\mathcal{T})$ defined as the 90 degree rotated lowest order Raviart-Thomas functions with the corresponding interpolator \(I_{\mathcal{N}_{0}}\), see \cite{MR592160}.
The diameter of a simplex \(T \in \mathcal{T}\) is denoted by \(h_T\) and \(h_\mathcal{T} \in P_0(\mathcal{T})\)
is the local mesh width function, i.e. \(h_\mathcal{T}|_T := h_T\) for all \(T \in \mathcal{T}\). Similarly, \(h_E\)
denotes the diameter of the side \(E \in \mathcal{E}\).
At some point certain bubble functions are used. The cell bubble function on a cell \(T \in \mathcal{T}\) is defined
by \(b_T = \prod_{z \in \mathcal{N}} \varphi_z\) where \(\varphi_z\) is the nodal basis function of the node \(z \in \mathcal{N}\),
i.e.\ \(\varphi_z(z) = 1\) and \(\varphi_z(y) = 0\) for \(y \in \mathcal{N} \setminus \lbrace z \rbrace\). Similarly, the
face bubble \(b_E\) for some side \(E \in \mathcal{E}\) is defined by \(b_E = \prod_{z \in \mathcal{E}} \varphi_z\).
The vector \(\vecb{n}_E\) denotes the unit normal vector of the side \(E \in \mathcal{E}\) with arbitrary but fixed orientation, such that
the normal jump \([ \vecb{v} \cdot \vecb{n}]\) of some function \(\vecb{v}\) has a well-defined sign. The vector \(\vecb{\tau}_E\) denotes a unit tangential vector of \(E\).
\section{Classical and pressure-robust finite element methods}\label{sec:prmethods}
This section recalls classical (usually not presssure-robust) inf-sup stable finite element methods and
a pressure-robust modification of these methods.
\subsection{Classical inf-sup stable finite element methods}
Classical inf-sup stable finite element methods choose ansatz spaces \(V_h \subseteq V = H^1_0(\Omega)^2\) and \(Q_h \subseteq Q = L^2_0(\Omega)\) with the inf-sup property
\begin{align}\label{eqn:infsupconstant}
0 < c_0 := \inf_{q_h \in Q_h \setminus \lbrace 0 \rbrace} \sup_{\vecb{v}_h \in V_h \setminus \lbrace \vecb{0} \rbrace}
\frac{\int_\Omega q_h \mathrm{div} \vecb{v}_h \dx}{\| \nabla \vecb{v}_h \|_{0} \| q_h \|_{L^2}}.
\end{align}
This guarantees surjectivity of the discrete divergence operator
\begin{align*}
\mathrm{div}_h \vecb{v}_h = \Pi_{Q_h} (\mathrm{div} \vecb{v}_h) := \argmin_{q_h \in Q_h} \| \mathrm{div} \vecb{v}_h - q_h \|_{L^2},
\end{align*}
but also leads to the set of only discretely divergence-free testfunctions
\begin{align*}
V_{0,h} = \lbrace \vecb{v}_h \in V_h \, \vert \, \mathrm{div}_h \vecb{v}_h = 0 \rbrace ,
\end{align*}
that in general is not a subset of the really divergence-free functions \(\vecb{V}_0\).
Table~\ref{tab:FEMtable} lists some classical finite element methods that are inf-sup stable and are considered in this paper. Besides
the Scott-Vogelius finite element method (on a barycentric refined mesh \(\mathrm{bary}(\mathcal{T})\) to ensure the inf-sup stability \cite{scott:vogelius:conforming,Zhang05}),
all of them are not divergence-free. The space \(P_k^+(\mathcal{T})\) in case of the P2-bubble \cite{crouzeix:raviart:1973} or the mini finite element methods \cite{ABF84}
indicates that the \(P_k(\mathcal{T})\) space is enriched with
the standard cell bubbles \(b_T\) for all \(T \in \mathcal{T}\).
For the Bernardi--Raugel finite element method
normal-weighted face bubbles are added \cite{BernardiRaugel} defining the space $ P_1^\text{BR}(\mathcal{T}) := P_1(\mathcal{T})^2 \cup \lbrace b_E \vecb{n}_E : E \in \mathcal{E} \rbrace. $
\begin{table}
\begin{center}
\caption{\label{tab:FEMtable}List of classical finite element methods that are considered in this paper and their expected velocity gradient error convergence order \(k\).}
\footnotesize
\begin{tabular}{l@{~~} @{~~}l @{~~} @{~~}l @{~~} @{~~}l @{~~} }
FEM name \& reference \& order & abbr. & $V_h$ & $Q_h$\\
\hline
Bernardi--Raugel FEM \cite{BernardiRaugel} (\(k=1\)) & $\mathrm{BR}$ & $P_1^\text{BR}(\mathcal{T}) \cap V$ & $P_{0}(\mathcal{T})$\\
Mini FEM \cite{ABF84} (\(k = 1\)) & $\mathrm{MINI}$ & $P_1^+(\mathcal{T})^2 \cap V$ & $P_{1}(\mathcal{T}) \cap H^1(\Omega)$\\
$P_{k+1} \times P_{k-1}$ FEM (\(k \geq 1\)) & $\mathrm{P2P0}$,... & $P_{k+1}(\mathcal{T})^2 \cap V$ & $P_{k-1}(\mathcal{T})$\\
P2-bubble FEM \cite{crouzeix:raviart:1973} (\(k=2\)) & $\mathrm{P2B}$ & $P_2^+(\mathcal{T})^2 \cap V$ & $P_{1}(\mathcal{T})$\\
Taylor--Hood FEM \cite{HT74} (\(k \geq 2\)) & $\mathrm{TH}_k$ & $P_k(\mathcal{T})^2 \cap V$ & $P_{k-1}(\mathcal{T}) \cap H^1(\Omega)$\\
Scott-Vogelius FEM \cite{scott:vogelius:conforming,Zhang05} (k=2) & $\mathrm{SV}$ & $P_{2}(\mathrm{bary}(\mathcal{T}))^2 \cap V$ & $P_{1}(\mathrm{bary}(\mathcal{T}))$\\
\hline
\end{tabular}
\end{center}
\end{table}
The relaxation of the divergence constraint leads to the usual best approximation error in the pressure ansatz space, i.e.
\begin{align}
\| \nabla p \|_{V_{0,h}^\star}
&:= \sup_{\vecb{v}_h \in V_{0,h} \setminus \lbrace \vecb{0} \rbrace} \frac{\int_\Omega p \mathrm{div} \vecb{v}_h \dx}{\| \nabla \vecb{v}_h \|_{L^2}} \label{eqn:divrelax_consistency_error}\\
& = \sup_{\vecb{v}_h \in V_{0,h} \setminus \lbrace \vecb{0} \rbrace} \frac{\int_\Omega (p - q_h) \mathrm{div} \vecb{v}_h \dx}{\| \nabla \vecb{v}_h \|_{L^2}}
\leq \min_{q_h \in Q_h} \| p - q_h \|_{L^2},\nonumber
\end{align}
and divergence-free methods are characterised by
\begin{align*}
V_{0,h} \subseteq \vecb{V}_0 \ \Leftrightarrow \ \| \nabla p \|_{V_{0,h}^\star} = 0 \text{ for all } p \in L^2(\Omega).
\end{align*}
For completeness, we shortly prove the classical a priori error estimate in the following theorem for the discrete solution \(\vecb{u}_h \in \vecb{u}_{D,h} + V_h\)
(where \(\vecb{u}_{D,h}\) is some suitable approximation of \(\vecb{u}_{D}\))
and \(p_h \in Q_h\) defined by
\begin{align}\label{eqn:classical_solution}
\nu ( \nabla \vecb{u}_h, \nabla \vecb{v}_h) - (p_h,\mathrm{div} \vecb{v}_h) & = (\vecb{f},\vecb{v}_h) && \text{for all } \vecb{v}_h \in V_h,\\
(q_h,\mathrm{div} \vecb{u}_h) & = 0 && \text{for all } q_h \in Q_h,\nonumber
\end{align}
or, equivalently,
\begin{align*}
\nu ( \nabla \vecb{u}_h, \nabla \vecb{v}_h) = (\vecb{f},\vecb{v}_h) \qquad \text{for all } \vecb{v}_h \in V_{0,h}.
\end{align*}
\begin{theorem}[A priori estimate for classical finite element methods]\label{thm:apriori_classical}
For the discrete velocity $\vecb{u}_h$ of \eqref{eqn:classical_solution}, it holds
\begin{align*}
\| \nabla(\vecb{u} - \vecb{u}_h) \|^2_{L^2} \leq \! \inf_{\substack{\vecb{v}_h \in V_{0,h},\\ \vecb{u}_h = \vecb{v}_h \text{ on } \partial \Omega}} \! \| \nabla(\vecb{u} - \vecb{v}_h) \|^2_{L^2} + \frac{1}{\nu^2} \| \nabla p \|^2_{V_{0,h}^\star}.
\end{align*}
\end{theorem}
\begin{proof}
The best approximation \(\vecb{w}_h \in V_{0,h}\) with boundary data \(\vecb{w}_h = \vecb{u}_h\) along \(\partial \Omega\) of \(\vecb{u}\) in the \(H^1\)-seminorm satisfies in particular
the orthogonality \( ( \nabla (\vecb{u} - \vecb{w}_h), \nabla (\vecb{u}_h - \vecb{w}_h) ) = 0\)
and therefore allows for the Pythagoras theorem
\begin{align}
\| \nabla(\vecb{u} - \vecb{u}_h) \|^2_{L^2}
&= \| \nabla(\vecb{u} - \vecb{w}_h) \|^2_{L^2} + \| \nabla(\vecb{u}_h - \vecb{w}_h) \|^2_{L^2} \nonumber\\
&= \!\!\! \inf_{\substack{\vecb{v}_h \in V_{0,h},\\ \vecb{u}_h = \vecb{v}_h \text{ on } \partial \Omega}} \!\!\! \| \nabla(\vecb{u} - \vecb{v}_h) \|^2_{L^2} + \| \nabla(\vecb{u}_h - \vecb{w}_h) \|^2_{L^2}.\label{eqn:Pythagoras_apriori}
\end{align}
The same orthogonality allows to estimate
\begin{align*}
\| \nabla(\vecb{u}_h - \vecb{w}_h) \|^2_{L^2}
& = ( \nabla (\vecb{u} - \vecb{u}_h), \nabla (\vecb{u}_h - \vecb{w}_h) )\\
& = \nu^{-1} ( p, \mathrm{div} (\vecb{u}_h - \vecb{w}_h))
\leq \nu^{-1} \| \nabla p \|_{V_{0,h}^\star} \| \nabla(\vecb{u}_h - \vecb{w}_h) \|_{L^2}.\
\end{align*}
\end{proof}
The malicious influence of the pressure-dependent error and the factor \(1/\nu\) in front of it
for classical finite element methods that are not divergence-free
was demonstrated and observed in many benchmark examples, see e.g.\ \cite{LM2015,LM2016,wias:preprint:2177,2016arXiv160903701L}.
\subsection{Pressure-robust finite element methods}
A method is called pressure-robust if its discrete velocity is pressure-independent, i.e.\ if the a priori error estimate for the velocity error
is independent of the pressure.
The key feature behind pressure-robustness for the Stokes problem
is that the testfunctions in the right-hand side are diver\-gence-free.
This can be achieved e.g.\ by fully divergence-free finite element methods (like the Scott-Vogelius finite element method)
or, focused on in this paper, by the application of some reconstruction
operator \(\Pi\) in the right-hand side of the equation (and in further terms in case of the stationary and transient Navier--Stokes equations \cite{LM2016,WIASPreprint2368}).
Hence, the modified pressure-robust finite element method (of any classical pair of inf-sup stable spaces
\(V_h\) and \(Q_h\)) searches
\(\vecb{u}_h \in \vecb{u}_{D,h} + V_h\) and \(p_h \in Q_h\) with
\begin{align}\label{eqn:modified_solution}
\nu ( \nabla \vecb{u}_h, \nabla \vecb{v}_h) - (p_h,\mathrm{div} \vecb{v}_h) & = (\vecb{f},\Pi \vecb{v}_h) = (\mathbb{P} \vecb{f},\Pi \vecb{v}_h) && \text{for all } \vecb{v}_h \in V_h,\\
(q_h,\mathrm{div} \vecb{u}_h) & = 0 && \text{for all } q_h \in Q_h.\nonumber
\end{align}
The operator \(\Pi\) maps discretely divergence-free functions onto exactly divergence-free ones, i.e.
\begin{align}\label{eqn:def_reconstoperator}
\Pi : V_h \rightarrow H(\mathrm{div},\Omega) \quad \text{with} \quad \mathrm{div} (\Pi\vecb{v}_h) = 0 \quad \text{for all } \vecb{v}_h \in V_{0,h}.
\end{align}
This implicitly defines a modified discrete Helmholtz projector
\begin{align*}
\mathbb{P}_h^\star \vecb{f} = \argmin_{\vecb{v}_h \in V_{0,h}} \| \vecb{f} - \Pi \vecb{v}_h \|_{L^2}
\end{align*}
with \(\mathbb{P}_h^\star(\nabla q) = 0\) for any \(q \in H^1(\Omega)\) or \(\| \nabla q \|^2_{(\Pi V_{0,h})^\star} = 0\) for all \(q \in L^2(\Omega)\) and so allows for a pressure-independent and locking-free a priori velocity error estimate.
\begin{theorem}[A priori estimate for pressure-robust finite element methods]\label{thm:apriori_probust}
For the solution \(\vecb{u}_h\) of \eqref{eqn:modified_solution} with a reconstruction operator \(\Pi\)
that satisfies \eqref{eqn:def_reconstoperator}, it holds
\begin{align*}
\| \nabla(\vecb{u} - \vecb{u}_h) \|^2_{L^2} \leq \! \inf_{\substack{\vecb{v}_h \in V_{0,h},\\ \vecb{u}_h = \vecb{v}_h \text{ on } \partial \Omega}} \! \| \nabla(\vecb{u} - \vecb{v}_h) \|^2_{L^2} + \| \Delta \vecb{u} \circ (1 - \Pi) \|^2_{V_{0,h}^\star}
\end{align*}
with the consistency error
\begin{align}\label{def:Pi_consistency_error}
\| \Delta \vecb{u} \circ (1 - \Pi) \|^2_{V_{0,h}^\star} :=
\sup_{\vecb{v}_h \in V_{0,h} \setminus \lbrace \vecb{0} \rbrace} \frac{\int_\Omega \Delta \vecb{u}
\cdot (1 - \Pi)\vecb{v}_h \dx}{\| \nabla \vecb{v}_h \|_{L^2}}.
\end{align}
Note, that divergence-free methods (like the Scott-Vogelius finite element method)
allow for \(\Pi = 1\) and so attain the same estimate as Theorem~\ref{thm:apriori_classical}.
\end{theorem}
\begin{proof}
Similar to the proof of Theorem~\ref{thm:apriori_classical}, it remains to estimate
the second term on the right-hand side of \eqref{eqn:Pythagoras_apriori}.
Using the orthogonality \( ( \nabla (\vecb{u} - \vecb{w}_h), \nabla (\vecb{u}_h - \vecb{w}_h) ) = 0\) we get $\| \nabla(\vecb{u}_h - \vecb{w}_h) \|^2_{L^2} =
( \nabla (\vecb{u} - \vecb{u}_h), \nabla (\vecb{u}_h - \vecb{w}_h) )$. The insertion of \(\vecb{f} = - \nu \Delta \vecb{u} + \nabla p\)
and \(\int_\Omega \nabla p \cdot \Pi (\vecb{u}_h - \vecb{w}_h) = 0\) (thanks to \eqref{eqn:def_reconstoperator}) then further shows
\begin{align*}
( \nabla (\vecb{u} - \vecb{u}_h), \nabla (\vecb{w}_h - \vecb{u}_h) )
& = ( \Delta \vecb{u} , \vecb{u}_h - \vecb{w}_h ) + \frac{1}{\nu} ( \vecb{f} , \Pi (\vecb{u}_h - \vecb{w}_h) ) \\
& = ( \Delta \vecb{u} , \vecb{u}_h - \vecb{w}_h ) + ( \Delta \vecb{u} , \Pi (\vecb{u}_h - \vecb{w}_h) ) \\
&\leq \| \Delta \vecb{u} \circ (1 - \Pi) \|_{V_{0,h}^\star} \| \nabla(\vecb{u}_h - \vecb{w}_h) \|_{L^2}.
\end{align*}
This concludes the proof.
\end{proof}
To gain optimal convergence behavior of \eqref{def:Pi_consistency_error}, the reconstruction operator additionally has to satisfy another important
property that concerns the consistency error of the modified method.
For a finite element method with optimal \(H^1\)-velocity convergence order \(k\)
and pressure \(L^2\)-convergence order \(q\) we require, for all \(\vecb{v}_h \in V_{0,h}\),
\begin{align}
(\vecb{g}, (1 - \Pi) \vecb{v}_h) \label{eq:orthogonality_on_polynomials}
& \lesssim \| h_\mathcal{T}^{q+1} D^{q-1}\vecb{g} \|_{L^2(\Omega)} \| \nabla \vecb{v}_h \|_{L^2} \quad && \text{for any } \vecb{g} \in H^{q-1}(\Omega)^2.
\end{align}
In particular, for \(\Delta \vecb{u} \in H^{q-1}(\Omega)^2\), this property directly implies
\begin{align}\label{eq:consistency_error}
\|\Delta \vecb{u} \circ (1-\Pi)\|_{V_{0,h}^\star}
\lesssim \| h_\mathcal{T}^{q+1} D^{q-1} \Delta \vecb{u} \|_{L^2(\Omega)}
\end{align}
and so ensures that the modified method still converges with the optimal order.
To be more precise, we require that the reconstruction operator satisfies some local splitting and orthogonality property that can
be formulated by
\begin{align}\label{eqn:reconstop_local_orthogonalities}
(1 - \Pi) \vecb{v}_h & = \sum_{K \in \mathcal{K}} \vecb{\sigma}_K|_{K} \ \text{with} \ \| \vecb{\sigma}_K \|_{L^2(K)}
\lesssim h_K \| \nabla \vecb{v}_h \|_{L^2(K)} \ \text{and}\\
\int_{K} \vecb{\sigma}_K \cdot \vecb{g}_h \dx & = 0 \quad \text{for all } \vecb{g}_h \in P_{q-1}(K),\nonumber
\end{align}
with $h_K := \mathrm{diam}(K)$.
Reconstruction operators \(\Pi\) with the properties \eqref{eqn:def_reconstoperator}-\eqref{eq:orthogonality_on_polynomials}
were already successfully designed for finite element methods with discontinuous pressure spaces, like the nonconforming Crouzeix-Raviart
finite element method \cite{Lin14,BLMS15}, or the Bernardi--Raugel \cite{LM2016} and \(P^2\)-bubble finite element methods \cite{LMT15,LM2016}.
In all these cases \(\Pi\) can be chosen as standard BDM interpolators with
elementwise-orthogonality with resepect to \(\mathcal{K} = \mathcal{T}\).
Recently, also for Taylor--Hood and mini finite element methods (with $k=q$) of arbitrary order such an operator was found \cite{2016arXiv160903701L}. For these
vertex-based constructions Property \eqref{eqn:reconstop_local_orthogonalities} holds with \(\mathcal{K} = \lbrace \omega_z : z \in \mathcal{N} \rbrace\).
Table~\ref{tab:RECtable} summarizes
suitable reconstruction operators, that satisfy the needed properties, for the methods from Table~\ref{tab:FEMtable}.
\begin{table}
\begin{center}
\caption{\label{tab:RECtable}Suitable reconstruction operators $\Pi$ for the classical FEMs of Table~\ref{tab:FEMtable}.}
\footnotesize
\begin{tabular}{l@{~~} @{~~}l @{~~} @{~~}l @{~~} }
FEM name & abbr. & $\Pi$ \& reference\\
\hline
Bernardi--Raugel FEM & $\mathrm{BR}$ & $I_{BDM_1}$, see \cite{LM2016}\\
Mini FEM & $\mathrm{MINI}$ & see \cite{2016arXiv160903701L} \\
$P_{k+1} \times P_{k-1}$ FEM (\(k \geq 1\))& $\mathrm{P2P0}$, $\mathrm{P3P1}$, ... & $I_{BDM_k}$ \\
P2-bubble FEM & $\mathrm{P2B}$ & $I_{BDM_2}$, see \cite{LMT15,LM2016}\\
Taylor--Hood FEM (\(k \geq 1\)) & $\mathrm{TH}_k$ & see \cite{2016arXiv160903701L}\\
Scott-Vogelius FEM & $\mathrm{SV}$ & $1$ (identity)\\
\hline
\end{tabular}
\end{center}
\end{table}
\section{(Limits of) Standard a posteriori residual-based error bounds}\label{sec:standard_estimates}
This section states and proves a posteriori error bounds for the classical and the pressure-robust finite element methods
by classical means. The resulting bounds reflect the pressure-robustness but are, in case of a
pressure-robust finite element method, rather unhandy as their efficiency relies on a good approximation of \(\mathbb{P} \vecb{f}\).
To stress this observation, the analysis is performed in some detail.
First, we define the residual for the Stokes equations by
\begin{align*}
r(\vecb{v}) &:= \int_\Omega \vecb{f} \cdot \vecb{v} \dx - \int_\Omega \nu \nabla \vecb{u}_h : \nabla \vecb{v}\dx \quad \text{for all } \vecb{v} \in \vecb{V}_0.
\end{align*}
The dual norm of the residual \(r\) with respect to \(\vecb{V}_0\) defined by
\begin{align*}
\| r \|_{\vecb{V}_0^\star} := \sup_{\vecb{v}_h \in V_{0} \setminus \lbrace \vecb{0} \rbrace} \frac{r(\vecb{v})}{\| \nabla \vecb{v} \|_{L^2}}
\end{align*}
enters the generalised error bound as the central object of a posteriori error estimation.
The error analysis also assumes the existence of a Fortin interpolation operator \(I\)
that maps from $\vecb{V}_0$ to $V_{0,h}$ and has first-order approximation properties and is \(H^1\)-stable, i.e,
for all \(\vecb{v} \in \vecb{V}_0\), it holds
\begin{align}
\| (1- I) \vecb{v} \|_{L^2(T)} & \lesssim h_T \| \nabla \vecb{v} \|_{L^2(\omega_T)} \quad \text{for all } T\in \mathcal{T},\label{eqn:Fortinprops1} \\
\| \nabla I\vecb{v} \|_{L^2} & \lesssim \| \nabla \vecb{v} \|_{L^2} \label{eqn:Fortinprops2} .
\end{align}
For many classical finite element methods such an operator can be found in \cite{MR3097958}.
For its existence and design in the Taylor--Hood case we refer to \cite{Mardal2013,MR3272546}.
Some more details are given in Section~\ref{sec:assumptionproof} below.
The following theorem establishes a general estimate similar to \cite[Theorem 5.1]{MR2995179}
and can be extended to nonconforming methods in a similar fashion. However, our focus is
on the consistency errors \eqref{eqn:divrelax_consistency_error} and \eqref{def:Pi_consistency_error} and the dependency on \(\nu\).
\begin{theorem}\label{thm:errorbounds}
The following velocity error estimates hold:
\begin{itemize}
\item[(a)]
In general, the \(L^2\) gradient error can be estimated by
\begin{align*}
\| \nabla(\vecb{u} - \vecb{u} _h) \|_{L^2}^2 \leq \nu^{-2} \| r \|_{\vecb{V}_0^\star}^2 + 1/c_0^2 \| \mathrm{div} \vecb{u}_h \|_{L^2}^2.
\end{align*}
\item[(b)]
For the discrete solution \(\vecb{u}_h\) of the modified method \eqref{eqn:modified_solution}
(or of the classical method \eqref{eqn:classical_solution} with \(\Pi = 1\)),
the dual norm of the residual \(r\) can be bounded by
\begin{align*}
\| r \|_{\vecb{V}_0^\star} \lesssim \eta_\text{class}(\sigma,q) :=
\eta_\text{vol}(\sigma,q)
+ \eta_\text{avg}(\sigma)
+ \eta_\text{jump}(\sigma)
+ \eta_\text{cons,1}(\sigma)
+ \eta_\text{cons,2}(q)
\end{align*}
for arbitrary \(q \in H^1(\Omega)\) and \(\sigma \in H^1(\mathcal{T})^{2 \times 2}\). The
subterms read
\begin{align*}
\eta_\text{vol}(\sigma,q) & := \| h_{\mathcal{T}}(\vecb{f} - \nabla q + \nu \mathrm{div}_h(\sigma)) \|_{L^2}\\
\eta_\text{avg}(\sigma) & := \nu \| \nabla \vecb{u}_h - \sigma \|_{L^2}\\
\eta_\text{jump}(\sigma) & := \| h_\mathcal{E}^{1/2} [\nu \sigma \vecb{n}_E] \|_{L^2(\bigcup \mathcal{E}^\circ)}\\
\eta_\text{cons,1}(\sigma) & := \| \nu \mathrm{div}_h (\sigma) \circ (1 - \Pi) \|_{V_{0,h}^\star}\\
\eta_\text{cons,2}(q) & := \| \nabla q \|_{(\Pi V_{0,h})^\star},\hspace{1cm}
\end{align*}
Note that \(q\) acts as a conforming approximation of the pressure \(p\) and \(\sigma\) acts as an
approximation of \(\nabla \vecb{u}\) (in particular \(\sigma = \nabla \vecb{u}_h\) is allowed).
\end{itemize}
\end{theorem}
\begin{proof}
The proof of (a) can be found in \cite{MR2164088,ccmerdon:nonconforming2} and is based on the decomposition
$ \nu \nabla (\vecb{u} - \vecb{u} _h) = \nu \nabla \vecb{z} + y$
into some \(\vecb{z} \in \vecb{V}_0\) and some remainder
\begin{align*}
y \in Y := \left\lbrace y \in L^2(\Omega)^{d \times d} \, \vert \, \int_\Omega y : \nabla v \dx = 0 \text{ for all } v \in \vecb{V}_0 \right\rbrace.
\end{align*}
The orthogonality relations between \(\vecb{z}\) and \(y\) lead to
\begin{align*}
\| \nu^{1/2} \nabla(\vecb{u} - \vecb{u} _h) \|_{L^2}^2 = \|\nu^{1/2} \nabla \vecb{z} \|_{L^2}^2 + \| \nu^{-1/2} y \|_{L^2}^2.
\end{align*}
Since
\begin{align*}
\|\nu^{1/2} \nabla \vecb{z} \|_{L^2}^2 = \int_\Omega \nu \nabla(\vecb{u} - \vecb{u}_h) : \nabla \vecb{z} \dx = r(\vecb{z}) \leq \nu^{-1/2} \| r \|_{\vecb{V}_0^\star} \|\nu^{1/2} \nabla \vecb{z} \|_{L^2},
\end{align*}
one arrives at
$ \|\nu^{1/2} \nabla \vecb{z} \|_{L^2} \leq \nu^{-1/2} \| r \|_{\vecb{V}_0^\star}$.
This is in fact an identity, since
\begin{align*}
r(\vecb{v}) = \int_\Omega \nu \nabla \vecb{z} : \nabla \vecb{v} \dx \leq \nu^{1/2} \|\nu^{1/2} \nabla \vecb{z} \|_{L^2} \|\nabla \vecb{v} \|_{L^2}
\quad \text{for any } \vecb{v} \in \vecb{V}_0.
\end{align*}
Furthermore, there exists some \(w \in L^2(\Omega)\) such that (see \cite{MR2164088})
\begin{align*}
\| \nu^{-1/2} y \|_{L^2}^2
& = \int_\Omega \nabla (\vecb{u} - \vecb{u} _h) : y \dx
= \int_\Omega w \mathrm{div} (\vecb{u} - \vecb{u_h}) \dx \\
& \leq \| w \|_{L^2} \| \mathrm{div} (\vecb{u} - \vecb{u_h}) \|_{L^2}
\leq \nu^{1/2}/c_0 \| \nu^{-1/2} y \|_{L^2} \| \mathrm{div} \vecb{u}_h \|_{L^2}.
\end{align*}
Hence, \(\| \nu^{-1/2} y \|_{L^2} \leq \nu^{1/2}/c_0 \| \mathrm{div} \vecb{u}_h \|_{L^2}\).
This concludes the proof of (a) and it remains to prove (b).
Given any \(\vecb{v} \in \vecb{V}_0\), subtraction of its Fortin interpolation \(I \vecb{v} \in V_{0,h}\)
and \eqref{eqn:modified_solution} lead to
\begin{align*}
r(\vecb{v}) &= \int_\Omega \vecb{f} \cdot \vecb{v} \dx - \int_\Omega \nu \nabla \vecb{u}_h : \nabla \vecb{v} \dx \\
& = \int_\Omega \vecb{f} \cdot (1 - \Pi I) \vecb{v} \dx - \int_\Omega \nu \nabla \vecb{u}_h : \nabla (1 - I) \vecb{v} \dx \nonumber \\
& = \int_\Omega \vecb{f} \cdot (1 - \Pi I) \vecb{v} \dx
- \int_\Omega \nu \sigma : \nabla (1- I)\vecb{v} \dx - \int_\Omega \nu (\nabla \vecb{u}_h - \sigma): \nabla (1- I)\vecb{v}\dx \nonumber\\
& = \int_\Omega (\vecb{f} -\nabla q + \nu \mathrm{div}_h \sigma) \cdot (1 - \Pi I) \vecb{v} \dx + \sum_T \int_{\partial T} (\nu \sigma \vecb{n}) \cdot (1 - I) \vecb{v} \ds \nonumber\\
& - \int_\Omega \nu (\nabla \vecb{u}_h - \sigma): \nabla (1- I)\vecb{v} \dx + \int_\Omega \nu \mathrm{div}_h \sigma \cdot (1 - \Pi) I \vecb{v} \dx + \int_\Omega \nabla q \cdot \Pi I \vecb{v} \dx. \nonumber
\end{align*}
In the last step it was used that \(\int \nabla q \cdot \vecb{v} \dx = 0\) for any \(q \in H^1(\Omega)\), since \(\vecb{v} \in \vecb{V}_0\) is divergence-free.
The third integral is estimated by a Cauchy inequality and the \(H^1\)-stability of \(I\).
The last two integrals are estimated by discrete dual norms and the \(H^1\)-stability of \(I\).
Properties \eqref{eqn:Fortinprops1}-\eqref{eqn:Fortinprops2} of \(I\) and \eqref{eqn:reconstop_local_orthogonalities} of \(\Pi\) show
\begin{align*}
\| h_\mathcal{T}^{-1} (1 - \Pi I) \vecb{v} \|_{L^2(T)}
& \leq \| h_\mathcal{T}^{-1} (1 - I) \vecb{v} \|_{L^2(T)} + \| h_\mathcal{T}^{-1} (1 - \Pi) I \vecb{v} \|_{L^2(T)}\\
& \lesssim \| \nabla \vecb{v} \|_{L^2(\omega_T)} + \|h_\mathcal{T} \nabla (I \vecb{v}) \|_{L^2(\omega_T)}
\lesssim \|\nabla \vecb{v} \|_{L^2(\omega_T)}
\end{align*}
and hence together with some Cauchy inequalities
\begin{align*}
\int_\Omega (\vecb{f} -\nabla q + \nu \Delta_\mathcal{T} \vecb{u}_h) &\cdot (1 - \Pi I) \vecb{v} \dx \\
& \leq \sum_{T \in \mathcal{T}} \| h_\mathcal{T}(\vecb{f} -\nabla q + \nu \Delta_\mathcal{T} \vecb{u}_h) \|_{L^2(T)} \| h_\mathcal{T}^{-1} (1 - \Pi I) \vecb{v} \|_{L^2(T)}\\
& \lesssim \eta_\text{vol}(\sigma,q) \left(\sum_{T \in \mathcal{T}} \|\nabla \vecb{v} \|_{L^2(\omega_T)}^2\right)^{1/2} \lesssim \eta_\text{vol}(\sigma,q) \|\nabla \vecb{v} \|_{L^2}.
\end{align*}
Similar arguments hold for the edge-based integral using a trace inequality and Properties \eqref{eqn:Fortinprops1}-\eqref{eqn:Fortinprops2}, i.e.
\begin{align*}\sum_{E \in \mathcal{E}^\circ}
\int_E [\nu \sigma\vecb{n}] \cdot (\vecb{v} - I \vecb{v}) \ds
& \leq \sum_{E \in \mathcal{E}^\circ} \| [\nu \sigma\vecb{n}] \|_{L^2(E)} \|\vecb{v} - I \vecb{v}\|_{L^2(E)}\\
& \leq \sum_{E \in \mathcal{E}^\circ} h_E^{1/2} \| [\nu \sigma\vecb{n}] \|_{L^2(E)} \|\nabla \vecb{v}\|_{L^2(\omega_E)}\\
& \leq \|h_\mathcal{E}^{1/2} [\nu \sigma\vecb{n}] \|_{L^2( \mathcal{E}^\circ)} \| \nabla \vecb{v} \|_{L^2} = \eta_\text{jump}(\sigma) \| \nabla \vecb{v} \|_{L^2}.
\end{align*}
This concludes the proof of (b).
\end{proof}
\begin{remark}
Some remarks are in order:
\begin{itemize}[fullwidth]
\item
The existence of \(w\) in the last part of the proof of (a) needs \(\vecb{u} - \vecb{u}_h \in H^1_0(\Omega)^2\).
In case of inhomogeneous Dirichlet boundary data or nonconforming discretisations \(\vecb{u}_h \notin H^1(\Omega)^2\), one can introduce a function \(\vecb{w} \in H^1(\Omega)\)
(e.g.\ the harmonic extension of the boundary data error \(\vecb{u}_D - \vecb{u}_{D,h}\) \cite{MR2101782}
plus some \(H^1\)-conforming boundary-data preserving interpolation of \(\vecb{u}_h\) \cite{MR2164088,MR2995179,ccmerdon:nonconforming2})
with \(\vecb{w} = \vecb{u}_D\) along \(\partial \Omega\) and attains \(\vecb{u} - \vecb{w} \in H^1_0(\omega)\). Then, a modified estimation of the second term yields
\begin{align*}
\| \nu^{-1/2} y \|_{L^2} \leq \nu^{1/2}/c_0 \| \mathrm{div} \vecb{w} \|_{L^2} + \nu^{1/2} \| \nabla_h ( \vecb{u}_h - \vecb{w}) \|_{L^2}.
\end{align*}
\item
The term \(\eta_\text{cons,1}(\sigma) = \| \nu \Delta_\mathcal{T} (\mathrm{div} \sigma) \circ (1 - \Pi) \|_{V_{0,h}^\star}\) only appears for \(\Pi \neq 1\)
as in the novel pressure-robust methods and equals the consistency error
\eqref{def:Pi_consistency_error} for \(\sigma = \nabla \vecb{u}_h\).
\item Recall that \(\eta_\text{cons,2}(q) = 0\) if \(\Pi\) satisfies \eqref{eqn:def_reconstoperator} or if \(q \in Q_h\) and \(\Pi = 1\).
\end{itemize}
\end{remark}
The following theorem studies the efficiency of the contributions of the standard residual error estimators from Theorem~\ref{thm:errorbounds}
for the explicit choice \(\sigma = \nabla \vecb{u}_h\).
\begin{theorem}[Efficiency for \(\sigma = \nabla \vecb{u}_h\)]\label{thm:efficiency}
For \(\sigma = \nabla \vecb{u}_h\) all terms of the residual-based error estimator of Theorem~\ref{thm:errorbounds} are efficient
possibly up to data oscillations
\begin{align*}
\mathrm{osc}_k(\bullet,\mathcal{T})^2 := \sum_{T \in \mathcal{T}} h_T^2 \| (1 - \pi_{P_k(T)}) \bullet \|_{L^2(T)}^2
\end{align*}
and up to pressure contributions (either from the lack of pressure-robustness or from the quality of the approximation of \(p\) by \(q\))
in the following sense.
\begin{itemize}
\item[(a)] For the divergence term it holds \(\| \mathrm{div} \vecb{u}_h \|_{L^2} \leq \| \nabla (\vecb{u} - \vecb{u}_h) \|_{L^2}.\)
\item[(b)] For the volume term \(\eta_\text{vol}(q,\nabla \vecb{u}_h)\), it holds
\begin{align*}
\nu^{-1} \| h_T(\vecb{f} - \nabla q + \nu \Delta_\mathcal{T} \vecb{u}_h) \|_{L^2}
&\lesssim \| \nabla (\vecb{u} - \vecb{u}_h) \|_{L^2} \\
& \quad + \nu^{-1} \left(\| p - q \|_{L^2} + \mathrm{osc_k}(\vecb{f} - \nabla q,\mathcal{T})\right).
\end{align*}
\item[(c)] For the jump term \(\eta_\text{jump}(\nabla \vecb{u}_h)\), it holds
\begin{align*}
\nu^{-1} \| h_\mathcal{E}^{1/2} [\nu \nabla \vecb{u}_h \vecb{n}_E] \|_{L^2(\bigcup \mathcal{E^\circ})} \lesssim \| \nabla (\vecb{u} - \vecb{u}_h) \|_{L^2} + \nu^{-1} \mathrm{osc_k}(\vecb{f} - \nabla p,\mathcal{T}).
\end{align*}
\item[(d)] If \(\Pi\) satisfies \eqref{eqn:reconstop_local_orthogonalities}, the consistency error \(\eta_\text{cons,1}(\nabla \vecb{u})\) is efficient in the sense
\begin{align*}
\| \Delta_\mathcal{T} \vecb{u}_h \circ (1 - \Pi) \|_{V_{0,h}^\star}
\lesssim \| \nabla (&\vecb{u} - \vecb{u}_h) \|_{L^2} \\ &+ \nu^{-1} \left(\mathrm{osc}_{k}(\vecb{f} - \nabla p,\mathcal{T}) + \mathrm{osc}_{q-1}(\vecb{f} - \nabla p,\mathcal{K}) \right)
\end{align*}
\item[(e)] For the consistency error \(\eta_\text{cons,2}(q)\), it holds
\begin{align*}
\| \nabla q \|_{(\Pi V_{0,h})^\star} & \leq
\begin{cases}
0 & \text{if } \Pi \text{ satisfies } \eqref{eqn:def_reconstoperator},\\
\min_{q_h \in Q_h} \| q - q_h \|_{L^2} & \text{if } \Pi = 1 \text{ without } \eqref{eqn:def_reconstoperator}.
\end{cases}
\end{align*}
\end{itemize}
\end{theorem}
\begin{proof}
The proof of (a) simply uses \(\mathrm{div} \vecb{u} = 0\) to estimate
\begin{align*}
\| \mathrm{div} \vecb{u}_h \|_{L^2} = \| \mathrm{div} (\vecb{u} - \vecb{u}_h) \|_{L^2} \leq \| \nabla (\vecb{u} - \vecb{u}_h) \|_{L^2}.
\end{align*}
The last inequality follows from the identity \(\| \nabla \vecb{v} \|^2 = \| \mathrm{curl} \vecb{v} \|^2 + \| \mathrm{div} \vecb{v} \|^2\) for any \(\vecb{v} \in H^1_0(\Omega)^2\),
see e.g.\ \cite[Remark 2.6]{MR1626990}.
The proof of (b) and (c) is standard and employs the bubble-technique of Verf\"urth, see e.g. \cite{MR993474,MR1213837} or into the proof of Theorem~\ref{thm:efficiency_new_estimate} below.
To show (d), observe that Property \eqref{eqn:reconstop_local_orthogonalities} leads to
\begin{align*}
&\int_\Omega \nu \Delta_\mathcal{T} \vecb{u}_h \cdot (1 - \Pi) \vecb{v}_h \dx = \sum_{K \in \mathcal{K}} \int_{K} \nu \Delta_\mathcal{T} \vecb{u}_h \cdot \sigma_K \dx\\
& = \sum_{K \in \mathcal{K}} \int_{K} (\vecb{f} - \nabla p + \nu \Delta_\mathcal{T} \vecb{u}_h) \cdot \sigma_K \dx - \int_{K} (1 - \pi_{P_{q-1}(K)})(\vecb{f} - \nabla p )\cdot \sigma_K \dx\\
& \lesssim \sum_{K \in \mathcal{K}} h_K \left( \| \vecb{f} - \nabla p+ \nu \Delta_\mathcal{T} \vecb{u}_h\|_{L^2(K)} \right. \\ &\qquad \qquad \qquad \qquad\qquad \qquad \left. + \|(1 - \pi_{P_{q-1}(K)}) (\vecb{f} - \nabla p) \|_{L^2(K)} \right)\| h_K^{-1} \sigma_K \|_{L^2(K)}\\
& \lesssim \left( \sum_{K \in \mathcal{K}} \| h_K ( \vecb{f} - \nabla p + \nu \Delta_\mathcal{T} \vecb{u}_h) \|_{L^2(K)}^2\right)^{1/2} \| \nabla \vecb{v}_h \|_{L^2}
+ \mathrm{osc}_{q-1}(\vecb{f} - \nabla p,\mathcal{K}) \| \nabla \vecb{v}_h \|_{L^2}\\
& = \left(\eta_\text{vol}(p,\nabla \vecb{u}) + \mathrm{osc}_{q-1}(\vecb{f} - \nabla p,\mathcal{K})\right) \| \nabla \vecb{v}_h \|_{L^2}.
\end{align*}
A division by \(\| \nabla \vecb{v}_h \|_{L^2}\) and the result from (b) conclude the proof of (d).
The proof of (e) is straight forward and employs integration by parts and the
orthogonality of \(\mathrm{div}(\vecb{v}_h)\) onto all \(q_h \in Q_h\) if \(\Pi = 1\) does not satisfy \eqref{eqn:def_reconstoperator}.
Otherwise, if \(\Pi\) satisfies \eqref{eqn:def_reconstoperator}, the assertion follows from \(\mathrm{div}(\Pi \vecb{v}_h) = 0\).
\end{proof}
\begin{remark}
Theorem~\ref{thm:efficiency}.(b) shows the pressure-dependence also in the efficiency estimate. The volume term \(\eta_\text{vol}(q,\nabla \vecb{u}_h)\) scales with
the term \(\nu^{-1} \| p - q\|_{L^2}\). Hence, a pressure-robust method is only efficient with a good approximation \(q \approx p\).
In the hydrostatic (worst) case with \(\vecb{u}_h = 0\) and \(\vecb{f} = \nabla p\), \(\eta_\text{vol}(q,\nabla \vecb{u})\) is not zero
(hence inefficient with efficiency index infinity) as long as \(q \neq p\) is inserted.
To compute the correct pressure is in general impossible or expensive.
Some strategy to find an approximation that at least yields a higher-order term is discussed
in \cite{MR3366087}.
Note however, that \(\eta_\text{vol}(q,\nabla \vecb{u}_h)\) is efficient for a classical pressure-inrobust method with \(q_h = p_h\) (or some suitable \(H^1\)-approximation),
since then the discrete velocity error and its velocity error
also depends on \(\nu^{-1} \|p - p_h\|_{L^2}\), see e.g.\ our numerical examples in Section~\ref{sec:numerics}.
\end{remark}
\section{Refined residual-based error bounds}\label{novelestimates}
This section offers an alternative a posteriori error estimator and is related to the stream function and vorticity formulation of the Navier--Stokes
equations. The analysis employs the two-dimensional curl operators for vector and scalar fields
\begin{align*}
\mathrm{curl} \vecb{\phi} := ( \partial \phi_2 / \partial x -\partial \phi_1 / \partial y)
\quad &\text{for } \vecb{\phi}=(\phi_1,\phi_2) \in H^1(\Omega)^2,\\
\mathrm{curl} \phi := \begin{pmatrix} -\partial \phi / \partial y \\ \partial \phi / \partial x \end{pmatrix} \quad &\text{for } \phi \in H^1(\Omega).
\end{align*}
The outcome of this alternative approach is a different volume term that only takes
\(\mathrm{curl}(\vecb{f})\) into account and so automatically cancels the gradient part of
the Helmholtz decomposition. Hence, no knowledge or good approximation of \(\mathbb{P} \vecb{f}\) is needed.
The resulting terms are related to the terms in \cite{MR1645033} where error indicators for
discretisations of the streamline and vorticity formulation were derived. However, our error estimator holds for pressure-robust finite element
methods for the velocity and pressure formulation of the Navier--Stokes equations.
Given a Fortin interpolator \(I\) and a reconstruction operator \(\Pi\) with
\eqref{eqn:def_reconstoperator} (possibly \(\Pi = 1\) for divergence-free finite element methods like
the Scott-Vogelius finite element method),
the novel approach exploits that \(\Pi I \vecb{v}\) for some divergence-free function \(\vecb{v} \in \vecb{V}_0\) is again a divergence-free function in \(L^2_\sigma(\Omega)\). Our analysis needs the following assumption on the two operators
additional to \eqref{eqn:def_reconstoperator} and \eqref{eqn:Fortinprops1}-\eqref{eqn:Fortinprops2}.
\begin{assumption} \label{assumption1}
For every \(\vecb{v} \in \vecb{V}_0\), the Fortin interpolator \(I\) and the reconstruction operator \(\Pi\) satisfy
\begin{align*}
\Pi I \vecb{v} \in L^2_\sigma(\Omega) \quad \text{and hence} \quad \int_\Omega (1 - \Pi I) \vecb{v} \cdot \nabla q \dx = 0 \quad \text{for all } q \in H^1(\Omega),
\end{align*}
and the estimate
\begin{align*}
\int_\Omega (1 - \Pi I) \vecb{v} \cdot \vecb{\theta} \dx
\lesssim \| \nabla \vecb{v} \|_{L^2} \| h_\mathcal{T}^{2} \mathrm{curl} \vecb{\theta} \|_{L^2} \quad \text{for all } \vecb{\theta} \in H(\mathrm{curl},\Omega).
\end{align*}
\end{assumption}
\begin{theorem}[Novel error estimator for pressure-robust methods]\label{thm:eta_averaging_pr}
For \(\vecb{u}_h\) of \eqref{eqn:modified_solution}
and any \(\sigma \in H^1(\mathcal{T})^{2 \times 2}\) (that approximates or equals \(\nabla \vecb{u}_h\)), the error estimator
\begin{align*}
\eta_\text{new}(\sigma) :=
\eta_\text{curl}(\sigma)
+ \eta_\text{jump}(\sigma)
+ \eta_\text{jump,2}(\sigma)
+ \eta_\text{avg}(\sigma)
+ \eta_\text{cons,1}(\sigma)
\end{align*}
with the subterms
\begin{align*}
\eta_\text{curl}(\sigma) & := \| h_\mathcal{T}^2 \mathrm{curl}_\mathcal{T} (\vecb{f} + \nu \mathrm{div}_h \sigma) \|_{L^2} \\
\eta_\text{jump}(\sigma) & := \| h_\mathcal{E}^{1/2} [\nu \sigma \vecb{n}_E] \|_{L^2(\mathcal{E}^\circ)}\\
\eta_\text{jump,2}(\sigma) & := \| h_\mathcal{E}^{3/2} [(\vecb{f} + \nu \mathrm{div}_h \sigma) \cdot \vecb{\tau_E}] \|_{L^2(\mathcal{E}^\circ)}\\
\eta_\text{avg}(\sigma) & := \nu \| \nabla \vecb{u}_h - \sigma \|_{L^2}\\
\eta_\text{cons,1}(\sigma) & := \| \nu \mathrm{div}_h (\sigma) \circ (1 - \Pi) \|_{V_{0,h}^\star}
\end{align*}
satisfies
\begin{align*}
\| r \|_{\vecb{V}_0^\star} \lesssim \eta(q)
\quad \text{and hence} \quad \| \nabla(\vecb{u} - \vecb{u}_h) \|_{L^2}^2 \lesssim \nu^{-2} \eta(q)^2 + \frac{1}{c_0^2} \| \mathrm{div} \vecb{u}_h \|^2_{L^2} .
\end{align*}
Note in particular, that the volume contribution \(\eta_\text{vol}(q,\sigma)\) from Theorem~\ref{thm:errorbounds} has been replaced by the quantity \(\eta_\text{curl}(\sigma)\)
that is pressure-independent (or \(q\)-independent).
\end{theorem}
\begin{proof}
As in the estimation of \(\| r \|_{\vecb{V}_0^\star}\) in the proof of Theorem~\ref{thm:errorbounds}.(b), we subtract the Fortin interpolation \(I\vecb{v}\)
of any testfunction \(\vecb{v}\) by employing \eqref{eqn:modified_solution}, i.e.
\begin{align*}
r(v) & = \int_\Omega \vecb{f} \cdot (\vecb{v} - \Pi I \vecb{v}) \dx
- \nu \int_\Omega \nabla \vecb{u}_h : \nabla(\vecb{v} - I \vecb{v}) \dx.
\end{align*}
Given any \(\sigma \in H^1(\mathcal{T})^{2 \times 2}\), an (element-wise) integration by parts shows
\begin{multline*}
r(v) = \int_\Omega (\vecb{f} + \nu \mathrm{div}_h \sigma) \cdot (\vecb{v} - \Pi I \vecb{v}) \dx
+ \nu \int_\Omega (\sigma - \nabla \vecb{u}_h) : \nabla (\vecb{v} - I \vecb{v}) \dx\\
+ \nu \sum_{E \in \mathcal{E}^\circ} \int_E [\sigma\vecb{n}] \cdot (\vecb{v} - I \vecb{v}) \ds
+ \nu \int_\Omega (\mathrm{div}_h \sigma) \cdot (\Pi I \vecb{v} - I\vecb{v}) \dx
=: A + B + C + D.
\end{multline*}
The terms \(B,C\) and \(D\) are estimated as in Theorem~\ref{thm:errorbounds}.(b) by
\begin{align*}
B & := \nu \sum_{T \in \mathcal{T}}
\int_T (\sigma - \nabla \vecb{u}_h): \nabla (\vecb{v} - I \vecb{v}) \dx
\leq \nu \| \sigma - \nabla \vecb{u}_h \|_{L^2} \| \nabla \vecb{v} \|_{L^2}\\
C & := \nu \sum_{E \in \mathcal{E}^\circ}
\int_E [\sigma\vecb{n}] \cdot (\vecb{v} - I \vecb{v}) \ds
\leq \nu \|h_\mathcal{E}^{1/2} [\sigma\vecb{n}] \|_{L^2( \mathcal{E}^\circ)} \| \nabla \vecb{v} \|_{L^2}\\
D &:= \nu \int_\Omega (\mathrm{div}_h \sigma) \cdot (\Pi I \vecb{v} - I\vecb{v}) \dx
\leq \nu \| (\mathrm{div}_h \sigma) \circ (1 - \Pi) \|_{V_{0,h}^\star} \| \nabla \vecb{v} \|_{L^2}.
\end{align*}
It remains to estimate term \(A\). As $\vecb{v} - \Pi I \vecb{v}$ is exactly divergence free and has a zero normal trace we can apply Theorem 3.1, chapter 1 in \cite{GR86} to find a scalar potential $\psi \in H^1_0(\Omega)$ with $\mathrm{curl} \psi = \vecb{v} - \Pi I \vecb{v}$.
In the following we bound the weighted $L^2$ norm of $\psi$. Note that from $ h_{\mathcal{T}}^{-4} \psi \in L^2(\Omega)$ follows $h_{\mathcal{T}}^{-2} \psi \in h_{\mathcal{T}}^{2} \mathrm{curl}( H(\mathrm{curl}, \Omega))$, due to the surjectivity of the $\mathrm{curl}$ operator (de Rham complex) and so
\begin{align*}
\| h_{\mathcal{T}}^{-2} \psi \|_{L^2(\Omega)} &= \frac{\int_{\Omega} h_{\mathcal{T}}^{-2} \psi h_{\mathcal{T}}^{-2} \psi \dx}{\| h_{\mathcal{T}}^{-2} \psi \|_{L^2(\Omega)}} \\
&\le \sup\limits_{\vecb{\theta} \in H(\mathrm{curl}, \Omega)} \frac{\int_{\Omega} h_{\mathcal{T}}^{-2} \psi h_{\mathcal{T}}^{2} \mathrm{curl} \vecb{\theta}\dx}{\| h_{\mathcal{T}}^{2} \mathrm{curl}\vecb{\theta} \|_{L^2(\Omega)} } = \sup\limits_{\vecb{\theta} \in H(\mathrm{curl}, \Omega)} \frac{\int_{\Omega} \psi \mathrm{curl} \vecb{\theta}\dx}{\| h_{\mathcal{T}}^{2} \mathrm{curl}\vecb{\theta} \|_{L^2(\Omega)} }.
\end{align*}
On the other hand one can bound the supremum by $\| h_{\mathcal{T}}^{-2} \psi \|_{L^2(\Omega)}$ with a simple Cauchy Schwarz estimate. Using Assumption~\ref{assumption1} it follows by an integration by parts and $\psi \in H^1_0(\Omega)$ that
\begin{align} \label{eqn:psiltwonorm}
\| h_{\mathcal{T}}^{-2} \psi \|_{L^2(\Omega)} & = \sup\limits_{\vecb{\theta} \in H(\mathrm{curl}, \Omega)} \frac{\int_{\Omega} \psi \mathrm{curl} \vecb{\theta} \dx}{\| h_{\mathcal{T}}^{2} \mathrm{curl} \vecb{\theta} \|_{L^2(\Omega)} } \\
&= \sup\limits_{\vecb{\theta} \in H(\mathrm{curl}, \Omega)} \frac{\int_{\Omega} \mathrm{curl} \psi \cdot \vecb{\theta} \dx}{\|h_{\mathcal{T}}^{2} \mathrm{curl} \vecb{\theta} \|_{L^2(\Omega)} } \lesssim \| \nabla \vecb{v} \|_{L^2(\Omega)}. \nonumber
\end{align}
With $\vecb{\theta}_h := \vecb{f} + \nu \mathrm{div}_h \sigma$ and $\psi = 0 $ on $\partial \Omega$ a piecewise integration by parts yields
\begin{align*}
A &:= \int_\Omega \vecb{\theta}_h \cdot (\vecb{v} - \Pi I \vecb{v}) \dx = \int_\Omega \vecb{\theta}_h \cdot \mathrm{curl} \psi \dx\\
&= \sum_{T \in \mathcal{T}} \int_T \mathrm{curl} \vecb{\theta}_h \psi \dx + \sum_{E \in \mathcal{E}^\circ} \int_E [\vecb{\theta}_h \cdot \vecb{\tau_E}] \psi \ds\\
& \lesssim \sum_{T \in \mathcal{T}} \| h_T^2 \mathrm{curl} \vecb{\theta}_h \|_{L^2(T)} \| h_T^{-2} \psi \|_{L^2(T)} + \sum_{E \in \mathcal{E}^\circ} \| h_E^{3/2} [\vecb{\theta}_h \cdot \vecb{\tau_E}]\|_{L^2(E)} \| h_E^{- 3/2} \psi \|_{L^2(E)}\\
&\lesssim \left( \| h_\mathcal{T}^2 \mathrm{curl}_\mathcal{T} \vecb{\theta}_h \|_{L^2(\Omega)} + \| h^{3/2} [\vecb{\theta}_h \cdot \vecb{\tau_E}]\|_{L^2(\mathcal{E}^\circ)} \right) \left( \| h_\mathcal{T}^{-2} \psi \|_{L^2(\Omega)} + \| h_\mathcal{E}^{- 3/2} \psi \|_{L^2(\mathcal{E}^\circ)} \right).
\end{align*}
Using a standard scaling argument we get, for each edge $E \in \mathcal{E}^\circ$,
\begin{align*}
\| h_E^{- 3/2} \psi \|_{L^2(E)} \lesssim h_T^{- 2} \|\psi\|_{L^2(T)} + h_T^{-1} \| \nabla \psi \|_{L^2(T)}.
\end{align*}
For the second term in the previous estimate we have
\begin{align*}
h_T^{-1} \| \nabla \psi \|_{L^2(T)} = h_T^{-1} \| \mathrm{curl} \psi \|_{L^2(T)} = h_T^{-1} \| \vecb{v} - \Pi I \vecb{v} \|_{L^2(T)} \lesssim \| \nabla \vecb{v} \|_{L^2(\omega_T)}.
\end{align*}
Together with \eqref{eqn:psiltwonorm} and an overlap argument this leads to
\begin{align*}
\| h_\mathcal{T}^{- 3/2} \psi \|_{L^2(\mathcal{E}^\circ)} &\lesssim \|h_\mathcal{T}^{- 2} \psi \|_{L^2(\Omega)} + \|h_\mathcal{T}^{-1} \nabla \psi \|_{L^2(\Omega)} \lesssim \| \nabla \vecb{v} \|_{L^2(\Omega)}.
\end{align*}
This concludes the estimate for $A$, i.e.
\begin{align*}
A \lesssim \left( \eta_\text{curl}(\sigma) + \eta_\text{jump,2}(\sigma) \right) \| \nabla \vecb{v} \|_{L^2(\Omega)}.
\end{align*}
The collection of all separate estimates for \(A\) to \(D\) shows
\begin{align*}
r(v) \lesssim \eta(\sigma) \| \nabla \vecb{v} \|_{L^2}
\end{align*}
and a division by \(\| \nabla \vecb{v} \|_{L^2}\) concludes the proof.
\end{proof}
The same techniques also a yield a novel error estimate for classical methods.
\begin{proposition}[Novel error estimator for classical methods]\label{thm:eta_averaging_cl}
For \(\vecb{u}_h\) of \eqref{eqn:classical_solution}
and any \(\sigma \in H^1(\mathcal{T})^{2 \times 2}\) (that approximates or equals \(\nabla \vecb{u}_h\)),
the error estimator
\begin{multline*}
\eta_\text{new}(\sigma) :=
\eta_\text{curl}(\sigma)
+ \eta_\text{jump}(\sigma)
+ \eta_\text{avg}(\sigma)
+ \| (\vecb{f} + \nu \mathrm{div}_h \sigma) \circ (1 - \Pi) \|_{V_{0,h}^\star}
\end{multline*}
satisfies
\begin{align*}
\| r \|_{\vecb{V}_0^\star} \lesssim \eta(q)
\quad \text{and hence} \quad \| \nabla(\vecb{u} - \vecb{u}_h) \|_{L^2}^2 \lesssim \nu^{-2} \eta(q)^2 + \frac{1}{c_0^2} \| \mathrm{div} \vecb{u}_h \|^2_{L^2}.
\end{align*}
Note, that \(\Pi\) is used only in the error estimator here, but not in the calculation of \(\vecb{u}_h\). It is not allowed to set \(\Pi = 1\)
if the classical method is not divergence-free, i.e. \(\Pi\) has to satisfy \eqref{eqn:def_reconstoperator}. The difference to the previous theorem lies in the
appearence of \(\vecb{f}\) in the consistency error \(\| (\vecb{f} + \nu \mathrm{div}_h \sigma) \circ (1 - \Pi) \|_{V_{0,h}^\star}\).
\end{proposition}
\begin{proof}
The proof follows the proof of Theorem~\ref{thm:eta_averaging_pr} but one has to add the
term \(\int_\Omega \vecb{f} \cdot (\vecb{v} - \Pi \vecb{v}) \dx\) which can be added to the estimate of term \(C\).
\end{proof}
The next theorem establishes the efficiency of the novel terms \(\eta_\text{curl}(\sigma)\)
and \(\eta_\text{jump,2}(\sigma)\) for \(\sigma = \nabla \vecb{u}_h\). For the efficiency
of the other terms see Theorem~\ref{thm:efficiency}.
\begin{theorem}[Efficiency for \(\sigma = \nabla \vecb{u}_h\)]\label{thm:efficiency_new_estimate}
It holds
\begin{itemize}
\item[(a)] \( \nu^{-1} h_T^2 \| \mathrm{curl}_\mathcal{T}(\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h) \|_{L^2(T)} \lesssim \|\nabla ( \vecb{u} - \vecb{u}_h) \|_{L^2(T)} \)\\
\hspace*{5cm} \( + \nu^{-1} h_T \mathrm{osc}_k(\mathrm{curl}(\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h),T),\)
\item[(b)] \( \nu^{-1} h_E^{3/2} \| [(\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h) \cdot \vecb{\tau_E}] \|_{L^2(E)} \lesssim \|\nabla ( \vecb{u} - \vecb{u}_h) \|_{L^2(\omega_E)}\) \\
\hspace*{5cm} \(+ \nu^{-1} h_E \mathrm{osc}_k(\mathrm{curl}_\mathcal{T}(\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h),\mathcal{T}(E))\)\\
\hspace*{5cm} \(+ \nu^{-1} h_E \mathrm{osc}_k([f \cdot \vecb{\tau_E}],E) + \mathrm{osc_k}(\vecb{f} - \nabla p,\mathcal{T}(E)),\)
\end{itemize}
for all \(T \in \mathcal{T}\) and \(E \in \mathcal{E}^\circ\).
\end{theorem}
\begin{proof}
The proof employs the standard Verf\"urth bubble-technique. To shorten the notion in the proof
of (a), we define
\begin{align*}
Q_T := \mathrm{curl}(\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h)|_T \quad \text{for any } T \in \mathcal{T}.
\end{align*}
Then, it holds (similarly to \cite{MR1213837})
\begin{align*}
\| &\pi_{P_k(T)} Q_{T} \|_{L^2(T)} \\
& \lesssim \sup_{\vecb{v}_T \in P_k(T)^2} \int_T \pi_{P_k(T)} Q_{T} \cdot (b_T^2 \vecb{v}_T) \dx / \| \vecb{v}_T \|_{L^2(T)}\\
& \leq \sup_{\vecb{v}_T \in P_k(T)^2} \frac{\int_T Q_{T} b_T^2 \vecb{v}_T \dx}{\| \vecb{v}_T \|_{L^2(T)}}
+ \sup_{\vecb{v}_T \in P_k(T)^2} \frac{\|Q_{T} - \pi_{P_k(T)} Q_{T}\|_{L^2(T)} \| b_T^2 \vecb{v}_T \|_{L^2(T)}}{ \| \vecb{v}_T \|_{L^2(T)}} .
\end{align*}
Testing the continuous system with the (divergence-free) testfunction \(\mathrm{curl} (b_T^2 \vecb{v}_T) \in H^2(T)^2 \cap H^1_0(\Omega)^2\) and an integration by parts leads to
\begin{align*}
\int_T Q_{T} b_T^2 \vecb{v}_T \dx
& = \int_T (\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h) \cdot \mathrm{curl} (b_T^2 \vecb{v}_T) \dx\\
& = \int_T \nu \nabla ( \vecb{u} - \vecb{u}_h) : \nabla \mathrm{curl} (b_T^2 \vecb{v}_T) \dx\\
& \leq \nu \| \nabla ( \vecb{u} - \vecb{u}_h) \|_{L^2(T)} \| \nabla \mathrm{curl} (b_T^2 \vecb{v}_T) \|_{L^2(T)}.
\end{align*}
A discrete inverse inequality shows \(\| \nabla \mathrm{curl} (b_T^2 \vecb{v}_T) \|_{L^2(T)} \lesssim h_T^{-2} \| b_T^2 \vecb{v}_T \|_{L^2(T)}\).
This and the norm equivalence \(\| b_T^2 \vecb{v}_T \|_{L^2(T)} \approx \| \vecb{v}_T \|_{L^2(T)}\) lead to
\begin{align*}
h_T^2 \| \pi_{P_k(T)} Q_{T} \|_{L^2(T)} \lesssim \nu \|\nabla ( \vecb{u} - \vecb{u}_h) \|_{L^2(T)} + h_T^2 \|Q_{T} - \pi_{P_k(T)} Q_{T}\|_{L^2(T)}.
\end{align*}
This concludes the proof of (a).
In the proof of (b), we use the notation
\begin{align*}
Q_E := [\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h] \cdot \vecb{\tau_E} \quad \text{for any } E \in \mathcal{E}
\end{align*}
and the face bubble \(b_E\) with support \(\omega_E\) for every face \(E \in \mathcal{E}\).
Then,
\begin{align*}
\| \pi_{P_k(E)} Q_E \|_{L^2(E)} \lesssim \sup_{\vecb{v}_E \in P_k(E)^2} \frac{\int_E Q_E \cdot (b_E^2 \vecb{v}_E) \ds}{ \| \vecb{v}_E \|_{L^2(E)}}
+ \| Q_E - \pi_{P_k(E)} Q_E \|_{L^2(E)}.
\end{align*}
Testing the continuous equation with the divergence-free testfunction \(\mathrm{curl} (b_E^2 \vecb{v}_E) \in H^1_0(\Omega)\) (where \(\vecb{v}_E\) is reasonably extended to \(\omega_E\))
and an integration by parts show
\begin{align*}
\int_E Q_E & \cdot (b_E^2 \vecb{v}_E) \ds\\
& = \int_E [(\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h) \cdot \vecb{\tau_E}] \cdot (b_E^2 \vecb{v}_E) \ds\\
& = \int_{\omega_E} (\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h) : \mathrm{curl}(b_E^2 \vecb{v}_E) \dx
- \int_{\omega_E} \mathrm{curl}(\vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h) : (b_E^2 \vecb{v}_E) \dx\\
& \leq \| \vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h \|_{L^2(\omega_E)} \| \mathrm{curl} (b_E^2 \vecb{v}_E) \|_{L^2(\omega_E)}
+ \| Q_T \|_{L^2(\omega_E)} \| b_E^2 \vecb{v}_E \|_{L^2(\omega_E)}.
\end{align*}
A discrete inverse inequality \(\| \mathrm{curl} (b_E^2 \vecb{v}_E) \|_{L^2(\omega_E)} \lesssim h_T^{-1} \| b_E^2 \vecb{v}_E \|_{L^2(\omega_E)}\)
and a scaling argument (see \cite{MR1213837}), that yields \(\| b_E^2 \vecb{v}_E \|_{L^2(\omega_E)} \lesssim h_T^{1/2} \| \vecb{v}_E \|_{L^2(E)}\), show
\begin{align*}
h_E^{3/2} \| \pi_{P_k(E)} Q_{E} \|_{L^2(E)} \lesssim & h_T \| \vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h \|_{L^2(\omega_E)} + h_T^{2}\| Q_T \|_{L^2(\omega_E)} \\
&+ h_E^{3/2} \| Q_E - \pi_{P_k(E)} Q_E \|_{L^2(E)}.
\end{align*}
The proof of Theorem~\ref{thm:efficiency}.(c) yields
\begin{align*}
\| \vecb{f} + \nu \Delta_\mathcal{T} \vecb{u}_h \|_{L^2(\omega_E)}
\lesssim \nu \| \nabla (\vecb{u} - \vecb{u}_h) \|_{L^2} + \mathrm{osc_k}(\vecb{f} - \nabla q,\mathcal{T}(E)).
\end{align*}
This and the already proven result from (a) conclude the proof.
\end{proof}
\section{Proof of Assumption~\ref{assumption1} for certain finite element methods}\label{sec:assumptionproof}
This section proves Assumption~\ref{assumption1} for certain finite element methods. For the analysis several standard interpolation operators that are related to the de
Rahm complex (see e.g.\ \cite{MR2373173}) are employed. These are a (projection based) nodal interpolation operator \( I_{\mathcal{L}} \), the lowest order Raviart-Thomas interpolation
operator \( I_{RT_0} \) and the lowest-order N\'ed\'elec interpolation operator \(I_{\mathcal{N}_0}\). These operators satisfy in particular the commuting diagram properties in two dimensions (see \cite{MR1746160})
\begin{align}\label{eqn:commuting_props}
\mathrm{curl} (I_{\mathcal{L}} \vecb{v}) = I_{RT_0} (\mathrm{curl} \vecb{v})
\quad \text{and} \quad
\nabla (I_{\mathcal{L}} \vecb{v}) = I_{\mathcal{N}_0} (\nabla \vecb{v})
\end{align}
for arbitrary sufficiently smooth functions \(\vecb{v}\). Furthermore we need a refined Helmholtz decomposition.
\begin{lemma}[\cite{MR2373173}]\label{lem:decomp_curlestimate}
It exists an operator \( \Pi_{\mathcal{N}_0} : H(\mathrm{curl},\Omega) \rightarrow \mathcal{N}_0(\mathcal{T})\) with the property:
for every \(\vecb{\theta} \in H(\mathrm{curl},\Omega)\)
exists a decomposition
\begin{align*}
\vecb{\theta} - \Pi_{\mathcal{N}_0} \vecb{\theta} = \nabla \phi + \vecb{y}
\end{align*}
with \(\phi \in H^1(\Omega)\), \(\vecb{y} \in H^1(\Omega)^2\), and
\begin{align*}
h_T^{-1} \| \vecb{y} \|_{L^2(T)} + \| \nabla \vecb{y} \|_{L^2(T)} \lesssim \| \mathrm{curl} \vecb{\theta} \|_{L^2(T)} \quad \text{for all } T \in \mathcal{T}.
\end{align*}
\end{lemma}
\begin{proof}
In \cite{MR2373173} a proof for three dimensions is given. The two dimensional case follows similarly.
\end{proof}
\begin{lemma}[Regular decomposition]\label{lem:reg_decomposition}
For each $\vecb{\theta} \in H(\mathrm{curl}, \omega)$ there exists a decomposition with $\alpha \in H^2(\omega)$ and $\vecb{\beta} \in H^1(\omega)^2$ such that
\begin{align*}
\vecb{\theta} = \nabla \alpha + \vecb{\beta},
\end{align*}
with
\begin{align*}
|| \nabla \vecb{\beta} ||_{L^2(\omega)} \lesssim || \mathrm{curl} \vecb{\theta}||_{L^2(\omega)} \quad \textrm{and} \quad \int_\omega \vecb{\beta} \dx = 0.
\end{align*}
\end{lemma}
\begin{proof}
Let $q := \mathrm{curl}\vecb{\theta}$ and $\tilde{\omega}$ be a convex domain such that $\omega \subset \tilde{\omega}$. We define $\tilde{q}$ as a trivial extension of $q$ by zero, i.e.\ $\tilde{q}|_\omega = q$ and $\tilde{q}|_{\tilde{\omega} \setminus \omega } = 0$. In the next step we seek the solution $w \in H^1(\tilde{\omega})$ of the Poisson problem $ \Delta w = \mathrm{curl} \mathrm{curl} w = \tilde{q}$ on $\tilde{\omega}$. Using a regularity estimate for the Poisson problem on the convex domain $\tilde{\omega}$, it follows for $\tilde{\vecb{\beta}}:= \mathrm{curl} w$ and $\vecb{\beta} := \tilde{\vecb{\beta}}|_\omega - \int_\omega \tilde{\vecb{\beta}} \dx / |\omega| $
that
\begin{align*}
|| \nabla \vecb{\beta} ||_{L^2(\omega)} \lesssim || \nabla \tilde{\vecb{\beta}} ||_{L^2(\tilde{\omega})} \lesssim || w ||_{H^2(\tilde{\omega})} \lesssim || \tilde{q} ||_{L^2(\tilde{\omega})} = ||q ||_{L^2(\omega)} = || \mathrm{curl} \vecb{\theta} ||_{L^2(\omega)}.
\end{align*}
Since $\mathrm{curl}(\vecb{\theta} - \vecb{\beta}) =0$ in \(\omega\), its exists a vector potential $\alpha \in H^2(\omega)$ such that $\nabla \alpha =\vecb{\theta} - \vecb{\beta}$. This concludes the proof.
\end{proof}
\begin{theorem}[Proof of Assumption~\ref{assumption1} for finite element methods with \(P_0\) pressure space]
If the reconstruction operator \(\Pi\) and the Fortin operator \(I\) satisfy \eqref{eqn:reconstop_local_orthogonalities} and
\begin{align}\label{eq:P2P0_I2_property}
\int_E (1 - I) \vecb{v} \cdot \vec{n}_E \ds = \int_E (1 - \Pi I) \vecb{v} \cdot \vec{n}_E \ds = 0 \quad \text{for all } E \in \mathcal{E},
\end{align}
also Assumption~\ref{assumption1} is satisfied.
\end{theorem}
\begin{remark}
Condition \eqref{eq:P2P0_I2_property} is satisfied for the Forint interpolators for the
\(P_2 \times P_0\), \(P_3 \times P_0\) and the Bernardi--Raugel finite element methods \cite[Section 8.4.3]{MR3097958}.
For these methods the reconstruction operator \(\Pi\) is the standard interpolation into the space \(BDM_{1}\) or \(RT_0\) \cite{LM2016}.
\end{remark}
\begin{proof}
Since every function \(\vecb{g} \in H^1(T)\) with \(\int_E \vecb{g} \cdot \vecb{n} \ds = 0\) along all edges \(E \in \mathcal{E}(T)\) of \(T\) satisfies a discrete Friedrichs inequality
\(\| \vecb{g} \|_{L^2(T)} \lesssim h_T \| \nabla \vecb{g} \|_{L^2(T)}\), see e.g.\ \cite{533fbff7}, it follows together with \eqref{eqn:reconstop_local_orthogonalities}
\begin{align*}
\| (1 - \Pi I) \vecb{v} \|_{L^2(T)}
& \leq \| (1 - I) \vecb{v}\|_{L^2(T)} + \| (1-\Pi)(I \vecb{v})\|_{L^2(T)}\\
& \lesssim h_T \| \nabla \vecb{v}\|_{L^2(T)} + h_T \| \nabla I \vecb{v} \|_{L^2(T)}
\lesssim h_T \| \nabla \vecb{v}\|_{L^2(T)}.
\end{align*}
Since \((1 - \Pi I) \vecb{v}\) is divergence-free, it holds \((1 - \Pi I) \vecb{v} = \mathrm{curl} \psi\) for some \(\psi \in H^1_0(\Omega) \cap H^2(\Omega)\), see Corollary 3.2 in \cite{GR86}.
Condition \eqref{eq:P2P0_I2_property} implies that the standard interpolator into \(RT_0\) vanishes, i.e.\ \(I_{RT_0} \mathrm{curl} \psi = 0\). Moreover, by the commuting properties
\eqref{eqn:commuting_props} of the de Rham complex, it also holds \(\mathrm{curl} (I_{\mathcal{L}} \psi) = I_{RT_0} \mathrm{curl} \psi = 0\).
An integration by parts and standard interpolation estimates yield
\begin{align*}
\int_\Omega \vecb{\theta} \cdot (1 - \Pi I) \vecb{v} \dx &= \int_\Omega \vecb{\theta} \cdot \mathrm{curl} (\psi - I_{\mathcal{L}} \psi) \dx = \int_\Omega\mathrm{curl} \vecb{\theta} \cdot (\psi - I_{\mathcal{L}} \psi) \dx \\
& \leq \| h_\mathcal{T}^{2} \mathrm{curl} \vecb{\theta} \|_{L^2} \| h_\mathcal{T}^{-2} (\psi - I_{\mathcal{L}} \psi) \|_{L^2} \\
& \leq \| h_\mathcal{T}^{2} \mathrm{curl} \vecb{\theta} \|_{L^2} \| h_\mathcal{T}^{-1} \nabla(\psi - I_{\mathcal{L}} \psi) \|_{L^2}\\
& = \| h_\mathcal{T}^{2} \mathrm{curl} \vecb{\theta} \|_{L^2} \| h_\mathcal{T}^{-1} \mathrm{curl}(\psi) \|_{L^2} \\
& = \| h_\mathcal{T}^{2} \mathrm{curl} \vecb{\theta} \|_{L^2} \| h_\mathcal{T}^{-1} (1 - \Pi I) \vecb{v} \|_{L^2} \leq \| h_\mathcal{T}^{2} \mathrm{curl} \vecb{\theta} \|_{L^2} \| \nabla \vecb{v} \|_{L^2},
\end{align*}
where we used that the curl is just the rotated gradient in two dimensions.
This concludes the proof.
\end{proof}
\begin{theorem}[Proof of Assumption~\ref{assumption1} for finite element methods with discontinuous \(P_1\) pressure space]\label{thm:proof_asssumption_P1dc}
If the reconstruction operator \(\Pi\) and the Fortin operator \(I\) satisfy
\begin{align}\label{eq:P2+P1_I2_property}
\int_T (1 - I) \vecb{v} \dx = \int_T (1 - \Pi I) \vecb{v} \dx = 0 \quad \text{for all } T \in \mathcal{T},
\end{align}
also Assumption~\ref{assumption1} is satisfied.
\end{theorem}
\begin{remark}
Condition \eqref{eq:P2+P1_I2_property} is satisfied by the \(P_2\)-bubble finite element method
and its Fortin interpolator \cite[Section 8.6.2]{MR3097958}.
A suitable reconstruction operator \(\Pi\) is the standard interpolation into the space \(BDM_{2}\) or \(RT_1\) \cite{LMT15,LM2016}.
Moreover, the result generalises to all \(P_k \times P_{k-2}\) finite element methods with \(k > 2\).
\end{remark}
\begin{proof}
A triangle inequality, interpolation properties of \(\Pi\), a Poincar\'e inequality, and the \(H^1\)-stability of \(I\) show
\begin{align*}
\| (1 - \Pi I) \vecb{v} \|_{L^2(T)} \leq \| (1 - \Pi) I\vecb{v} \|_{L^2(T)} + \| (1 - I) \vecb{v} \|_{L^2(T)} \lesssim h_T \| \nabla \vecb{v}\|_{L^2(T)}.
\end{align*}
To estimate the dual norm, Lemma~\ref{lem:decomp_curlestimate} yields \(\vecb{\theta} - \Pi_{\mathcal{N}_0} \vecb{\theta} = \nabla \phi + \vecb{y}\)
with
\begin{align*}
\| h_\mathcal{T} \vecb{y} \|_{L^2} \lesssim \|h^2_\mathcal{T} \mathrm{curl}\vecb{\theta} \|_{L^2}.
\end{align*}
Also note that due to $\Pi_{\mathcal{N}_0} \vecb{\theta} \in H(\mathrm{curl},T)$ we can use the regular decomposition from Lemma \ref{lem:reg_decomposition} to find
\begin{align*}
\Pi_{\mathcal{N}_0} \vecb{\theta}|_T = \nabla \alpha_T + \vecb{\beta}_T \quad \text{for all } T \in \mathcal{T}
\end{align*}
with some $\alpha_T \in H^2(T)$ and $\vecb{\beta}_T \in [H^1(T)]^2$ such that \(\int_{T} \vecb{\beta}_T \dx = 0\) and
$$\| \nabla \vecb{\beta}_T \|_{L^2(T)} \lesssim \| \mathrm{curl} (\Pi_{\mathcal{N}_0} \vecb{\theta}) \|_{L^2(T)} \lesssim \| \mathrm{curl} \vecb{\theta} \|_{L^2(T)}.$$
Together with the projection property of \(I_{\mathcal{N}_0}\), the commuting properties \eqref{eqn:commuting_props} of the de Rham complex and
the continuity of the nodal interpolation $I_{\mathcal{L}}$ for $H^2$ functions, the Helmholtz decomposition can be cast into the discrete version
\begin{align*}
\Pi_{\mathcal{N}_0} \vecb{\theta}|_T = I_{\mathcal{N}_0} (\nabla \alpha_T + \vecb{\beta}_T) = \nabla (I_{\mathcal{L}} \alpha_T) + I_{\mathcal{N}_0} \vecb{\beta}_T.
\end{align*}
The combination of all decompositions defines some function \(\alpha_\mathcal{T} \in P_1(\mathcal{T})\) and \(\beta_\mathcal{T} \in P_1(\mathcal{T})^2\)
with
$$
\| h_\mathcal{T}^2 \nabla_h \vecb{\beta}_\mathcal{T} \|_{L^2} \lesssim \| h_\mathcal{T}^2 \mathrm{curl} \vecb{\theta} \|_{L^2}.
$$
Since \(\vecb{z} := (1 - \Pi I) \vecb{v}\) is orthogonal onto piecewise constants (by \eqref{eq:P2+P1_I2_property}), in particular
the piecewise constant function \(\nabla (I_{\mathcal{L}} \alpha)_\mathcal{T} \in P_0(\mathcal{T})^2\),
and gradients (because \(\vecb{z}\) is divergence-free and has zero boundary data), it follows
\begin{align*}
\int_\Omega \vecb{\theta} \cdot (1 - \Pi I) \vecb{v} \dx = \int_\Omega \vecb{z} \cdot \vecb{\theta} \dx &= \int_\Omega \vecb{z} \cdot (\vecb{\theta} - \Pi_{ND_0} \vecb{\theta}) \dx
+ \int_\Omega \vecb{z} \cdot \Pi_{ND_0} \vecb{\theta} \dx \\
&=\int_\Omega \vecb{z} \cdot \vecb{y} \dx + \int_\Omega \vecb{z} \cdot \vecb{\beta}_\mathcal{T} \dx \\
&= \int_\Omega h_\mathcal{T}^{-1} \vecb{z} \cdot h_\mathcal{T} \vecb{y} \dx + \int_\Omega h_\mathcal{T}^{-1} \vecb{z} \cdot h_\mathcal{T} \vecb{\beta}_\mathcal{T} \dx \\
& \lesssim \|h_\mathcal{T}^{-1} \vecb{z} \|_{L^2} (\| h_\mathcal{T} \vecb{y} \|_{L^2} + \| h_\mathcal{T}^2 \nabla_h \vecb{\beta}_\mathcal{T} \|_{L^2} ) \\
&\lesssim \|h_\mathcal{T}^{-1} \vecb{z} \|_{L^2}\| h_\mathcal{T}^{2} \mathrm{curl} \vecb{\theta} \|_{L^2} \lesssim \| \nabla \vecb{v} \|_{L^2}\| h_\mathcal{T}^{2} \mathrm{curl} \vecb{\theta} \|_{L^2}.
\end{align*}
Note, that we used an elementwise Poincar\'e inequality for \(\vecb{\beta}_\mathcal{T}\) (which has piecewise integral mean zero). This concludes the proof.
\end{proof}
\begin{theorem}[Proof of Assumption~\ref{assumption1} for the mini finite element method]\label{thm:proof_asssumption_mini}
The mini finite element method family with the reconstruction operator from \cite{2016arXiv160903701L} and a Fortin operator \(I\) with
the property (see e.g.\ \cite[Section 8.4.2]{MR3097958})
\begin{align}\label{eq:mini_I2_property}
\int_T (1 - I) \vecb{v} \ds = 0 \quad \text{for all } ~ T \in \mathcal{T}
\end{align}
satisfies Assumption~\ref{assumption1}.
\end{theorem}
\begin{proof}
For the mini finite element method, the reconstruction operator is given in \cite{2016arXiv160903701L}.
It in particular satisfies \eqref{eqn:reconstop_local_orthogonalities} in the sense
\begin{align}\label{def:MINIdecomp}
(1-\Pi) I \vecb{v} = \sum_{y \in \mathcal{N}} \vecb{\sigma}_y
\end{align}
where \(\vecb{\sigma}_y \in BDM_{2}(\mathcal{T}(\omega_y))\) satisfies \(\| \vecb{\sigma}_y \|_{L^2(\omega_y)} \lesssim h_y \|\nabla I \vecb{v} \|_{L^2(\omega_y)}\)
on the nodal patch \(\omega_y\) of the node \(y \in \mathcal{N}\) and
(at least) the local orthogonality
\begin{align*}
\int_{\omega_y} \vecb{\sigma}_y \dx = 0.
\end{align*}
Furthermore we have $\vecb{\sigma}_y \cdot n = 0$ on the boundary $\partial \omega_y$. This time, the operators \(I\) and \(\Pi\) do not share the same orthogonality on cell-wise constants as in Theorem~\ref{thm:proof_asssumption_P1dc}, but one can split up the
\(L^2\)-norm by a triangle inequality
\begin{align*}
\| (1 - \Pi I) \vecb{v} \|_{L^2} \leq \| (1- \Pi) I\vecb{v} \|_{L^2} + \| (1- I) \vecb{v} \|_{L^2}.
\end{align*}
Due to \eqref{eq:mini_I2_property} the norm \(\| (1- I) \vecb{v} \|_{L^2(T)}\) can be estimated
as in Theorem~\ref{thm:proof_asssumption_P1dc} and it remains to estimate \(\| (1- \Pi) I\vecb{v} \|_{L^2}\).
For the first one, it holds
\begin{align*}
\| (1- \Pi) I \vecb{v} \|_{L^2(T)}^2
= \sum_{z \in \mathcal{N}(T)} \int_T \vecb{\sigma} _z \cdot (1-\Pi) I \vecb{v} \dx\\
\leq \sum_{z \in \mathcal{N}(T)} \| \vecb{\sigma}_z \|_{L^2(\omega_z)} \| (1-\Pi) I \vecb{v} \|_{L^2(T)}\\
\leq h_T \|\nabla I \vecb{v} \|_{L^2(\omega_T)} \| (1-\Pi) I \vecb{v}_h \|_{L^2(T)}
\end{align*}
and hence
\begin{align*}
\| (1- \Pi) I \vecb{v} \|_{L^2(T)} \lesssim h_T \|\nabla I \vecb{v} \|_{L^2(\omega_T)} \lesssim h_T \| \nabla \vecb{v} \|_{L^2(\omega_T)}.
\end{align*}
For the estimate of the dual norm, inserting the decomposition from Lemma~\ref{lem:decomp_curlestimate} leads to
\begin{align*}
\int_\Omega \vecb{\theta} \cdot (1 - \Pi I) \vecb{v} \dx = \int_\Omega \vecb{z} \cdot \vecb{\theta} \dx = \int_\Omega \vecb{z} \cdot \vecb{y} \dx + \int_\Omega \vecb{z} \cdot \Pi_{\mathcal{N}_0} \vecb{\theta} \dx.
\end{align*}
The first integral can be estimated as in Theorem~\ref{thm:proof_asssumption_P1dc} and it remains to estimate the second integral where we employ
the decomposition \eqref{def:MINIdecomp} for \((1 - \Pi) I \vecb{v} = \sum_{y \in \mathcal{N}} \vecb{\sigma}_y \) and its orthogonality properties, i.e.
\begin{align}\label{eqn:intemediate_mini}
\int_\Omega \vecb{z} \cdot \Pi_{\mathcal{N}_0} \vecb{\theta} \dx
= \int_\Omega (1 - I) \vecb{v} \cdot \Pi_{\mathcal{N}_0} \vecb{\theta} \dx + \sum_{y \in \mathcal{N}} \int_{\omega_y} h_y^{-1} \vecb{\sigma}_y \cdot h_y \Pi_{\mathcal{N}_0} \vecb{\theta} \dx
\end{align}
and we bound both integrals separately.
The first integral of \eqref{eqn:intemediate_mini} can be estimated exactly as in Theorem~\ref{thm:proof_asssumption_P1dc} due to \eqref{eq:mini_I2_property} by a element-wise Helmholtz decomposition such that
\begin{align*}
\int_\Omega (1 - I) \vecb{v} \cdot \Pi_{\mathcal{N}_0} \vecb{\theta} \dx
\lesssim \| \nabla \vecb{v} \|_{L^2} \| h_\mathcal{T}^2 \mathrm{curl} \vecb{\theta} \|_{L^2}.
\end{align*}
For the second integral, first note that due to $\Pi_{\mathcal{N}_0} \vecb{\theta} \in H(\mathrm{curl},\omega_y)$ we can use the regular decomposition of Lemma \ref{lem:reg_decomposition} on each patch to get
\begin{align*}
\Pi_{\mathcal{N}_0} \vecb{\theta}|_{\omega_y} = \nabla \alpha_y + \vecb{\beta}_y \quad \text{for all } y \in \mathcal{N}.
\end{align*}
with some $\alpha_y \in H^1(\omega_y)$ and $\vecb{\beta}_y \in [H^1(\omega_y)]^2$ such that \(\int_{\omega_y} \vecb{\beta}_y \dx = 0\) and
$$\| \vecb{\beta}_y \|_{H^1(\omega_y)} \lesssim \| \mathrm{curl} (\Pi_{ND_0} \vecb{\theta}) \|_{L^2(\omega_y)} \lesssim \| \mathrm{curl} \vecb{\theta} \|_{L^2(\omega_y)}.$$
Next note, that on each element $T \subset \omega_y$ we have
$\Pi_{ND_0} \vecb{\theta}|_T \in [H^1(T)]^2$ and thus
\begin{align*}
\nabla \alpha_y|_T = \Pi_{ND_0} \vecb{\theta}|_T - \vecb{\beta_y}|_T \in [H^1(T)]^2 \quad \Rightarrow \quad \alpha_y|_T \in H^2(T).
\end{align*}
Together with the projection property of \(I_{\mathcal{N}_0}\), the commuting properties \eqref{eqn:commuting_props} of the de Rham complex and
the continuity of the nodal interpolation $I_\mathcal{N}$ for $H^2$ functions, the Helmholtz decomposition can be cast into the discrete version
\begin{align*}
\Pi_{ND_0} \vecb{\theta}|_{\omega_y} = I_{\mathcal{N}_0}(\nabla \alpha_y + \vecb{\beta}_y) = \nabla (I_{\mathcal{L}} \alpha_y) + I_{\mathcal{N}_0} \vecb{\beta}_y.
\end{align*}
Finally, a scaling argument and a Poincar\'e inequality shows
\begin{align*}
\| I_{\mathcal{N}_0} \vecb{\beta}_y \|_{L^2(\omega_y)} \lesssim \| \vecb{\beta}_y \|_{L^2(\omega_y)} + h_y \| \nabla \vecb{\beta}_y \|_{L^2(\omega_y)}
\lesssim h_y \| \nabla \vecb{\beta}_y \|_{L^2(\omega_y)} \lesssim h_y \| \mathrm{curl} \vecb{\theta} \|_{L^2(\omega_y)}.
\end{align*}
Furthermore, note that the reconstruction operator is orthogonal on gradients of continuous \(P_1\)-functions like \(\nabla (I_{\mathcal{L}} \alpha_y)\)
due to \cite[Proposition 16.ii]{2016arXiv160903701L}, i.e. \(\int_{\omega_y} \sigma_y \cdot \nabla (I_{\mathcal{L}} \alpha_y) \dx = 0\).
Now, the second integral of \eqref{eqn:intemediate_mini} is bounded by
\begin{align*}
\sum_{y \in \mathcal{N}} \int_{\omega_y} h_y^{-1} \sigma_y \cdot h_y \Pi_{\mathcal{N}_0} \vecb{\theta} \dx
& \lesssim \sum_{y \in \mathcal{N}} \| h_y^{-1} \sigma_y\|_{L^2(\omega_y)} \| h_y^2 \nabla \vecb{\beta}_y \|_{L^2(\omega_y)}\\
& \lesssim \sum_{y \in \mathcal{N}} \| \nabla \vecb{v} \|_{L^2} \| h_y^2 \mathrm{curl} \vecb{\theta} \|_{L^2(\omega_y)} \lesssim \| \nabla \vecb{v} \|_{L^2} \| h_\mathcal{T}^2 \mathrm{curl} \vecb{\theta} \|_{L^2}.
\end{align*}
The combination of all previous results concludes the proof.
\end{proof}
\begin{theorem}[Proof of Assumption~\ref{assumption1} for the Taylor--Hood finite element method]
The Taylor--Hood finite element method family with the reconstruction operator from \cite{2016arXiv160903701L} and the Fortin operator \(I\) from \cite{Mardal2013,MR3272546}
in two dimensions with the property
\begin{align}\label{eq:TH_I2_property}
\int_\Omega (1 - I) \vecb{v} \cdot \vecb{w} \ds = 0 \quad \text{for all } \vecb{w} \in \widetilde{\mathcal{N}_0}(\mathcal{T}),
\end{align}
where \(\widetilde{\mathcal{N}_0}(\mathcal{T})\) is a subset of \(\mathcal{N}_0(\mathcal{T})\) as defined in \cite{Mardal2013,MR3272546},
satisfy Assumption~\ref{assumption1}.
\end{theorem}
\begin{remark}
The proof requires some assumption on the mesh, i.e. we require
that each interior face \(E \in E^\circ\)
has at most one node on the boundary \(\partial \Omega\).
This assumption was also needed in \cite{Mardal2013} for the construction of a stable the Fortin interpolator and was later removed in \cite{MR3272546}. Maybe similar arguments can be used in our case.
\end{remark}
\begin{proof}
A triangle inequality, properties of \(\Pi\), and the \(H^1\)-stability of \(I\) show
\begin{align*}
\| (1 - \Pi I) \vecb{v} \|_{L^2(T)} & \leq \| (1 - \Pi) I\vecb{v} \|_{L^2(T)} + \| (1 - I) \vecb{v} \|_{L^2(T)} \lesssim h_T \| \nabla \vecb{v}\|_{L^2(T)}.
\end{align*}
Again using the decomposition from Lemma~\ref{lem:decomp_curlestimate} and the orthogonality
between gradients and \((1 - \Pi I) \vecb{v}\) leads to
\begin{align*}
\int_\Omega (1 - \Pi I) \vecb{v} \cdot \vecb{\theta} \dx &= \int_\Omega (1 - \Pi I) \vecb{v} \cdot \vecb{y} \dx + \int_\Omega (1 - \Pi I) \vecb{v} \cdot \Pi_{\mathcal{N}_0} \vecb{\theta} \dx
\end{align*}
The first integral can be estimated similarly as in the proof of Theorem~\ref{thm:proof_asssumption_mini}. For the second integral we use \((1 - \Pi) I \vecb{v} = \sum_{y \in \mathcal{N}} \vecb{\sigma}_y \) to get
\begin{align*}
\int_\Omega (1 - \Pi I) \vecb{v} \cdot \Pi_{\mathcal{N}_0} \vecb{\theta} \dx = \int_\Omega (1 - I) \vecb{v} \cdot \Pi_{\mathcal{N}_0} \vecb{\theta} \dx + \sum_{y \in \mathcal{N}} \int_{\omega_y} h_y^{-1}\vecb{\sigma}_y \cdot h_y \Pi_{\mathcal{N}_0} \vecb{\theta} \dx.
\end{align*}
Similarly as in the proof of Theorem~\ref{thm:proof_asssumption_mini} we bound the first term. However the integral (using the orthogonality \eqref{eq:TH_I2_property})
\begin{align*}
\int_\Omega (1 - I) \vecb{v} \cdot \Pi_{\mathcal{N}_0} \vecb{\theta} \dx = \int_\Omega (1 - I) \vecb{v} \cdot (1-I_{\widetilde{\mathcal{N}_0}})\Pi_{\mathcal{N}_0} \vecb{\theta} \dx
\end{align*}
needs a different treatment. To estimate this integral we have to design a proper interpolation \(I_{\widetilde{\mathcal{N}_0}}(\Pi_{\mathcal{N}_0} \vecb{\theta})\)
of \(\Pi_{\mathcal{N}_0} \vecb{\theta}\) into the space \(\widetilde{\mathcal{N}_0}(\mathcal{T})\). To do so, we can write \(\Pi_{\mathcal{N}_0} \vecb{\theta}\) as a linear combination
\begin{align*}
\Pi_{\mathcal{N}_0} \vecb{\theta} = \sum_{E \in \mathcal{E}} \alpha_E N_E \quad \text{with coefficients } \alpha_E := \int_E \Pi_{\mathcal{N}_0} \vecb{\theta} \cdot \vecb{\tau}_E \ds
\end{align*}
and N\'ed\'elec basis functions $N_E$ with \(\int_F N_E \vecb{\tau}_F \ds = \delta_{EF}\) for \(E,F \in \mathcal{E}\).
Then, we choose \(I_{\widetilde{\mathcal{N}_0}}(\Pi_{\mathcal{N}_0} \vecb{\theta})\) as
\begin{align*}
I_{\widetilde{\mathcal{N}_0}}(\Pi_{\mathcal{N}_0} \vecb{\theta}) := \sum_{E \in \mathcal{E}^0} \alpha_E \widetilde{N}_E
\end{align*}
where \(\mathcal{E}^0\) are the interior edges and \(\widetilde{N}_E\) are the modified basis functions as in \cite{MR3272546}, i.e.
\(\widetilde{N}_E = N_E\) for all edges \(E\) with two interior endpoints and \(\widetilde{N}_E = N_E \pm N_F\) for interior edges \(E\) with
one boundary endpoint and \(F\) is a boundary edge with the same boundary endpoint and in the same triangle of \(E\). The sign depends on the orientation of the tangent vectors. Assume a boundary triangle \(T_E\) with
nodes \(1,2,3\), boundary edge \(E_3 = \mathrm{conv} \lbrace 1, 2 \rbrace\) and two adjacent interior edges \(E_1\) and \(E_2\) as depicted in Figure~\ref{fig:numbering}.
We further assume, that the tangential vectors are pointing from the lower to the larger node number. Then, according to \cite{MR3272546}, the modified basis functions read
\(\widetilde{N}_{E_2} = N_{E_2} + N_{E_3}\) and \(\widetilde{N}_{E_1} = N_{E_1} - N_{E_3}\). Hence, locally on \(T\), we have
\begin{align*}
\left((1-I_{\widetilde{\mathcal{N}_0}})\Pi_{\mathcal{N}_0} \vecb{\theta}\right)|_T
& = \alpha_{E_1} N_{E_1} + \alpha_{E_2} N_{E_2} + \alpha_{E_3} N_{E_3} - (\alpha_{E_1} \widetilde{N}_{E_1} + \alpha_{E_2} \widetilde{N}_{E_2})\\
& = N_{E_3} (\alpha_{E_3} + \alpha_{E_1} - \alpha_{E_2}).
\end{align*}
\begin{figure}
\caption{
Enumeration of the vertices and edges in a boundary triangle with boundary edge \(E_3\).}
\label{fig:numbering}
\end{figure}
The definition of \(\alpha_{E_j}\) and an easy calculation plus the Stokes theorem show
\begin{align*}
\alpha_{E_3} + \alpha_{E_1} - \alpha_{E_2} = \int_{\partial T} \Pi_{\mathcal{N}_0} \vecb{\theta} \cdot \vecb{\tau} \ds = \int_{T} \mathrm{curl} (\Pi_{\mathcal{N}_0} \vecb{\theta}) \dx
\end{align*}
and hence the estimate
\begin{align*}
\| (1-I_{\widetilde{\mathcal{N}_0}})\Pi_{\mathcal{N}_0} \vecb{\theta} \|_{L^2(T)}
\leq \left\lvert \int_{T} \mathrm{curl} (\Pi_{\mathcal{N}_0} \vecb{\theta}) \dx \right\rvert \| N_E \|_{L^2(T)}
\lesssim h_T \| \mathrm{curl} (\Pi_{\mathcal{N}_0} \vecb{\theta}) \|_{L^2(T)}.
\end{align*}
On interior triangles, it holds \(\Pi_{\mathcal{N}_0} \vecb{\theta} - I_{\widetilde{\mathcal{N}_0}}(\Pi_{\mathcal{N}_0} \vecb{\theta}) = 0\) and hence
\begin{align*}
\int_\Omega (1 - I) \vecb{v} \cdot & \Pi_{\mathcal{N}_0} \vecb{\theta} \dx
= \int_\Omega (1 - I) \vecb{v} \cdot (\Pi_{\mathcal{N}_0} \vecb{\theta} - I_{\widetilde{\mathcal{N}_0}}(\Pi_{\mathcal{N}_0} \vecb{\theta})) \dx\\
& \lesssim \sum_{T \in \mathcal{T}(\partial \Omega)} h_T^2 \| \mathrm{curl} (\Pi_{\mathcal{N}_0} \vecb{\theta}) \|_{L^2(T)} \| \nabla \vecb{v} \|_{L^2(\omega_T)}
\lesssim \| h_{\mathcal{T}}^2 \mathrm{curl} \vecb{\theta} \|_{L^2} \| \nabla \vecb{v} \|_{L^2}.
\end{align*}
This concludes the proof.
\end{proof}
\section{Numerical experiments}\label{sec:numerics}
In the following two numerical examples, the novel error estimator
\begin{align*}
\mu_\text{new}^2 := \nu^{-2} \eta_\text{new}(\nabla \vecb{u}_h)^2 + \| \mathrm{div} \vecb{u}_h \|^2_{L^2}
\end{align*}
from Theorem~\ref{thm:eta_averaging_pr} (for pressure-robust methods) or Proposition~\ref{thm:eta_averaging_cl} (for classical methods)
is compared to the classical error estimator
\begin{align*}
\mu_\text{class}^2 := \nu^{-2} \eta_\text{class}(\nabla u_h,p_h)^2
+ \| \mathrm{div} \vecb{u}_h \|^2_{L^2}
\end{align*}
from Theorem~\ref{thm:errorbounds}, with respect to the $H^1$-seminorm $\textrm{err}_{H^1}(u_h) := || \nabla u - \nabla u_h ||_{L^2}$.
Our adaptive mesh refinement algorithm follows the loop
\begin{align*}
\mathrm{SOLVE} \rightarrow \mathrm{ESTIMATE} \rightarrow \mathrm{MARK} \rightarrow \mathrm{REFINE} \rightarrow \mathrm{SOLVE} \rightarrow \ldots
\end{align*}
and employs the local contributions to the error estimator as
element-wise refinement indicators.
In the marking step, an element \(T \in \mathcal{T}\) is marked for refinement if \(\mu(T) \geq \frac{1}{4} \max\limits_{K \in \mathcal{T}} \mu(K)\).
The refinement step refines all marked elements plus further elements in a closure step to guarantee a regular triangulation. The implementation and numerical examples where performed with NGSolve/Netgen \cite{ngsolve}, \cite{netgen}.
\begin{remark}
For reducing the costs of the estimator, we estimated the consistency error $\eta_\text{cons,1}(\nabla u_h) = \| \nu \mathrm{div}_h (\nabla u_h) \circ (1 - \Pi) \|_{V_{0,h}^\star}$ according to
\eqref{eqn:reconstop_local_orthogonalities} by
\begin{align*}
\eta_\text{cons,1}(\nabla u_h) \lesssim \nu \left( \sum_{K \in \mathcal{K}}
h_K^2 \| (1 - \pi_{P_{q-1}(K)}) \Delta_h u_h \|^2_{L^2(K)} \right)^{1/2}.
\end{align*}
\end{remark}
\subsection{Smooth example on unit square} \label{curltwodimexample}
This example concerns the Stokes problem for
\begin{align*}
\vecb{u}(x,y) := \mathrm{curl} \left(x^2(x-1)^2y^2(y-1)^2\right) \quad \text{and} \quad p(x,y) := x^5 + y^5 - 1/3
\end{align*}
on the unit square \(\Omega := (0,1)^2\) with matching right-hand side \(\vecb{f} := - \nu \Delta \vecb{u} + \nabla p\)
for variable viscosity \(\nu\).
\begin{table}
\begin{center}
\caption{\label{tab:CurlExampleFixedmesh}The $H^1$ error and the old and new error estimators including the efficiency for the example of section \ref{curltwodimexample} for varying $\nu$ using the classical Taylor Hood element $\text{TH}_2$ and its pressure robust modification.}
\footnotesize
\begin{tabular}{c@{~~} @{~~}c@{~~} @{~~}c@{~~} @{~~}c@{~~} @{~~}c@{~~} @{~~}c@{~~} @{~~}c@{~~} @{~~}c@{~~}}
\toprule
& \multicolumn{3}{c}{ (classical)} &\multicolumn{3}{c}{(p-robust)}\\
$\nu$ & $ \textrm{err}_{H^1}(u_h)$ & $\mu_\text{class}$ & $\frac{\mu_\text{class}}{\textrm{err}_{H^1}(u_h)} $
& $\textrm{err}_{H^1}(u_h)$ & $\mu_\text{new}$ & $\frac{\mu_\text{new}}{\textrm{err}_{H^1}(u_h)} $\\
\midrule
$10^{1}$& \numcoef{0.001265847525399444}& \numcoef{0.01994406784451481}& \numcoef{15.755505654775735}& \numcoef{0.001303824956633806}& \numcoef{0.05192592441325167}& \numcoef{39.82584023189215}\\
$10^{0}$& \numcoef{0.001297267918076333}& \numcoef{0.01416918684944461}& \numcoef{10.922328882113677}& \numcoef{0.0013038249566338068}& \numcoef{0.034652174672599206}& \numcoef{26.57732120887117}\\
$10^{-1}$& \numcoef{0.0031200912873200794}& \numcoef{0.11172402531285176}& \numcoef{35.80793477642578}& \numcoef{0.0013038249566338315}& \numcoef{0.032924996269452764}& \numcoef{25.252620071376253}\\
$10^{-2}$& \numcoef{0.028546945385180832}& \numcoef{1.1123820930363155}& \numcoef{38.96676432546687}& \numcoef{0.0013038249566340158}& \numcoef{0.03275228154554611}& \numcoef{25.12015234782754}\\
$10^{-3}$& \numcoef{0.28519134950247516}& \numcoef{11.121735407091114}& \numcoef{38.99745005061799}& \numcoef{0.0013038249566365867}& \numcoef{0.03273501010618053}& \numcoef{25.106905600752906}\\
$10^{-4}$& \numcoef{2.851885435299173}& \numcoef{111.21554661028321}& \numcoef{38.99719996943577}& \numcoef{0.0013038249566595386}& \numcoef{0.032733282963005116}& \numcoef{25.105580926192225}\\
$10^{-5}$& \numcoef{28.518851311410526}& \numcoef{1112.1536864539069}& \numcoef{38.997141726003846}& \numcoef{0.0013038249568637459}& \numcoef{0.03273311025353761}& \numcoef{25.105448458568148}\\
$10^{-6}$& \numcoef{285.1885125743553}& \numcoef{11121.535087671366}& \numcoef{38.997135569307765}& \numcoef{0.0013038249590302574}& \numcoef{0.03273309302043661}& \numcoef{25.105435199509003}\\
\bottomrule
\end{tabular}
\end{center}
\end{table}
Table~\ref{tab:CurlExampleFixedmesh} lists the error of the classical and pressure-robust Taylor-Hood finite element methods with their error estimators \(\mu_\text{class}\) and \(\mu_\text{new}\)
on a fixed mesh with 1139 degrees of freedom but varying viscosities \(\nu \in (10^{-6},10)\).
As expected by the a priori error estimates of Theorems~\ref{thm:apriori_classical} and Theorem~\ref{thm:apriori_probust},
the error of the classical solution scales with \(\nu^{-1}\), while the error of the pressure-robust method is \(\nu\)-invariant.
Another observation is that both error estimators are efficient for their respective discrete solution.
Figure~\ref{curltwodexample} compares the errors and error estimators of the
Taylor--Hood finite element method of order $2$ and the MINI finite element method
with and without the pressure robust modification for uniform mesh refinement
as in the case \(\nu = 1\) and a pressure-dominant case with \(\nu = 10^{-3}\).
In the pressure dominant case $\nu = 10^{-3}$ the right hand side $\vecb{f}$ tends to have a large irrotational part.
The left plot of Figure~\ref{curltwodexample} confirms once again that the velocity error scales with
$1 / \nu$ and that pressure-robust methods result in much more accurate solutions.
For the classical methods both estimators $\mu_\text{new}$ and $\mu_\text{class}$ are efficient,
i.e.\ have comparable overestimation factors and the same optimal convergence order as the velocity error.
In case of the MINI finite element method, all quantities even converge quadratically.
This is due to the dominance of the pressure error and the higher approximation order of the pressure.
In this sense, we are in a pre-asymptotic range and the error will convergence linearly as soon as the
\(\nu^{-3}\)-weighted pressure error is of same magnitude (as it is the case for $\nu=1$ from the very beginning).
Also for the classical MINI element $\mu_\text{new}$ and $\mu_\text{class}$ are efficient with a comparable overestimation factor.
For the pressure-robust methods we observe that for both elements the novel estimator $\mu_\text{new}$ is much smaller than $\mu_\text{class}$. To be more precise, it scales with $\mu_\text{new} \approx 1/ \nu ~ \mu_\text{class}$ in case of the Taylor-Hood method as expected by the theory.
This is again due to the discrete pressure that is used in $\mu_\text{class}$ ($p_h$ replaced by some better approximation of $p$ would reduce the gap between $\mu_\text{new}$ and $\mu_\text{class}$).
Hence, $\mu_\text{new}$ is efficient and $\mu_\text{class}$ is not efficient for the pressure-robust Taylor--Hood finite element method.
In case of the pressure-robustly modified MINI method, the velocity error and the novel estimator $\mu_\text{new}$ now have the expected optimal linear order of the MINI finite element method.
Otherwise, the conclusions are similar to the ones for the Taylor--Hood method.
In this case \(\nu = 1\) the irrotational part and the rotational part of the right hand side $\vecb{f}$ have the same magnitude, thus the pressure error has not
such a big impact on the accuracy of the discrete velocity.
Accordingly, there is only little to no improvement by the application of the pressure-robust modification.
Thus, in the right plots of Figure~\ref{curltwodexample} we can see that the velocity error of both methods, the pressure robust and the classical one, is of the same magnitude and order. Both estimators are efficient with slightly less overestimation by $\eta_\text{class}$.
\begin{figure}
\caption{The $H^1$-error, $\mu_\text{class}
\label{curltwodexample}
\end{figure}
\begin{figure}
\caption{Error for L-shape example of section \ref{lshapeexample}
\label{fig:lshape:dcontpres}
\end{figure}
\begin{figure}
\caption{Error for L-shape example of section \ref{lshapeexample}
\label{fig:lshape:contpres}
\end{figure}
\subsection{L-shape example} \label{lshapeexample}
This example studies a velocity \(\vecb{u}\) and a pressure \(p_0\) on the L-shaped domain
\(\Omega := (-1,1)^2 \setminus \left((0,1) \times (-1,0)\right)\) taken from \cite{MR993474}
that satisfy \(-\nu \Delta \vecb{u} + \nabla p_0 = 0\). The fields are defined in polar coordinates and read
\begin{align*}
\vecb{u}(r,\varphi)
& :=r^\alpha
\begin{pmatrix}
(\alpha+1)\sin(\varphi)\psi(\varphi) + \cos(\varphi)\psi^\prime(\varphi)
\\
-(\alpha+1)\cos(\varphi)\psi(\varphi) + \sin(\varphi)\psi^\prime(\varphi)
\end{pmatrix}^T,\\
p_0 &:= \nu^{-1} r^{(\alpha-1)}((1+\alpha)^2 \psi^\prime(\varphi)+\psi^{\prime\prime\prime}(\varphi))/(1-\alpha)
\end{align*}
where
\begin{multline*}
\psi(\varphi) :=
1/(\alpha+1) \, \sin((\alpha+1)\varphi)\cos(\alpha\omega) - \cos((\alpha+1)\varphi)\\
- 1/(\alpha-1) \, \sin((\alpha-1)\varphi)\cos(\alpha\omega) + \cos((\alpha-1)\varphi)
\end{multline*}
and \(\alpha = 856399/1572864 \approx 0.54\), \(\omega = 3\pi/2\).
To have a nonzero right-hand side we add the pressure \(p_+ := \sin(xy\pi)\), i.e. \(p := p_0 + p_+\) and \(f := \nabla(p_+)\).
We generate a pressure dominant case by using a small viscosity $\nu = 10^{-3}$.
In Figure~\ref{fig:lshape:dcontpres} and \ref{fig:lshape:contpres} the velocity error and the
novel estimator $\eta_\text{new}$ are plotted for the classical and modified version of
four different finite element methods and uniform and adaptive mesh refinement.
For this example an adaptive refinement is expected to refine the generic singularity
of the velocity in the corner $(0,0)$.
We first discuss the pressure-robust variants of the finite element methods. Looking at the left plots of Figure~\ref{fig:lshape:dcontpres} and \ref{fig:lshape:contpres} we can see that there is a major difference between adaptive and uniform mesh refinement. The adaptive algorithm results in optimal orders of the velocity error and the estimator, while uniform refinement only leads to suboptimal orders as the singularity is not resolved well enough. The only exception is the MINI finite element method which pre-asymptotically converges
with quadratic speed. This is again thanks to the better polynomial order in the pressure ansatz space and the smooth pressure \(p^+\). Asymptotically also the MINI finite element method shows
the suboptimal behaviour in case of uniform mesh refinement and first-order convergence in case of adaptive mesh refinement. In all cases, the new error estimator \(\mu_\text{new}\) is efficient
and gives reasonable refinement indicators.
\begin{figure}
\caption{
(a): according to \(\mu_\text{new}
\label{fig:adaptref}
\end{figure}
In case of the classical variants of the finite element methods, totally different observations can be made.
In the right pictures of Figure~\ref{fig:lshape:dcontpres} and \ref{fig:lshape:contpres} we first note that the error is much larger compared to the pressure-robust method.
Furthermore similar as before only adaptive mesh refinement leads to optimal orders. However, it is important to note that the gap between the velocity error of the classical method and the
velocity error of the pressure-robust method stays as large as in the beginning also under adaptive mesh refinement. A possible explanation is given by Figure~\ref{fig:adaptref}
which shows that the classical method refines the mesh almost uniformly. This is reasonable in the sense that the pressure error of the smooth
pressure \(p_+\) dominates the (real and the estimated) discretisation error in the beginning.
The pressure-robust method on the other hand is not polluted by this influence and can concentrate immediately on the corner singularity. However, it is important that the
error estimator is also pressure-robust. If the refinement indicators are taken from \(\mu_\text{class}\), the corner singularity remains unrefined until the dominance of the pressure
error in the error bound is removed.
Hence, the main conclusion is that only a pressure-robust finite element method with a pressure-robust error estimator leads to optimal
meshes with the smallest velocity error.
\end{document} |
\begin{document}
\title{Simple proof of Chebotar\"ev's theorem on roots of
unity\footnote{Keywords and phrases: roots of unity, non-zero minors}
\footnote{2000 Mathematics Subject classification: 11T22 (primary),
42A99, 11C20 (secondary)}}
\author{P. E. Frenkel}
\date{}
\mathrm {nilpotent}ewcommand{\mathbf Q}{\mathbf Q}
\mathrm {nilpotent}ewcommand{\mathbf F}{\mathbf F}
\mathrm {nilpotent}ewcommand{\mathbf Z}{\mathbf Z}
\mathrm {nilpotent}ewcommand{\mathcal E}{\mathcal E}
\mathrm {nilpotent}ewcommand{\mathbf C}{\mathbf C}
\mathrm {nilpotent}ewcommand{\mathbf 1}{\mathbf 1}
\mathrm {nilpotent}ewcommand{\mathrm {nilpotent}}{\mathrm {nilpotent}}
\maketitle
\begin{abstract} We give a simple proof of Chebotar\"ev's theorem:
Let $p$ be a prime and $\omega $ a primitive $p$th root of unity. Then all minors of the matrix
$\left(\omega^ {ij}\right)_{i,j=0}^{p-1}$ are non-zero.
\end{abstract}
Let $p$ be a prime and $\omega $ a primitive $p$th root of unity.
We write $\mathbf F_p$ for the field with $p$ elements.
In 1926, Chebotar\"ev proved the following theorem (see \cite{SL}):
\begin{theorem} For any sets $I,J\subseteq \mathbf F_p$
with equal cardinality, the matrix
$(\omega^{ij})_{i\in I,j\in J}$ has non-zero determinant.
\end{theorem}
Several independent proofs have been
given, including ones by
Dieudonn\'e \cite{D}, Evans and Isaacs \cite{EI}, and Terence Tao \cite{T}. Tao points out that the theorem is equivalent to
the inequality $|{\mathrm {supp}} f|+|{\mathrm {supp}} \hat f|\geq p+1$ holding for any function $0\mathrm {nilpotent}ot\equiv f:\mathbf F_p\to \mathbf C$
and its Fourier transform $\hat f$,
a fact also discovered independently by Andr\'as Bir\'o.
Bir\'o posed this as Problem 3 of the 1998 Schweitzer Competition. The proof I
gave in the competition
(the one in the present article) is published in Hungarian in
\cite[pp. 53--54.]{matlap}.
It was also discovered (as part of a more general investigation) by Daniel
Goldstein, Robert M. Guralnick and I. M. Isaacs \cite[Section 6]{GGI}.
The proof is based on the following two lemmas. Lemma~\ref{1}
is covered by \cite[Chapter 1]{W}, but we include a proof for the sake of completeness.
\begin{lemma}\label{1}
$\mathbf Z[\omega]/(1-\omega)=\mathbf F_p.$
\end{lemma}
\begin{proof}
Let $\Omega$ be an indeterminate and let
$\Phi_p(\Omega)=1+\Omega+\dots+\Omega^{p-1}$ be the minimal polynomial of
the algebraic integer $\omega$. Consider the surjective ring homomorphisms
$$\mathbf Z[\Omega]\to\mathbf Z[\Omega]/(\Phi_p(\Omega))=\mathbf Z[\omega], \qquad \Omega\mapsto \omega$$ and
$$\mathbf Z[\Omega]\to\mathbf Z[\Omega]/(1-\Omega,p)=\mathbf F_p, \qquad\Omega\mapsto 1.$$
The latter kernel
contains the former one
since $\Phi_p(\Omega)\equiv p \mod (1-\Omega)$.
Therefore, the latter homomorphism
factors through the former one
via a surjective homomorphism $\mathbf Z[\omega]\to\mathbf F_p$ whose kernel is
the ideal $$(1-\Omega, p)/(\Phi_p(\Omega))=(1-\omega, p)=(1-\omega),$$ the
last equality following from
$p\equiv\Phi_p(\omega)=0\mod (1-\omega)$.
\end{proof}
\begin{lemma}\label{2}
Let
$0\mathrm {nilpotent}ot\equiv g(x)\in \mathbf F_p[x]$ be a polynomial of degree $<p$. Then
the multiplicity of any element $0\mathrm {nilpotent}eq a\in \mathbf F_p$ as a root of
$g(x)$ is strictly less than the number of non-zero coefficients of $g(x)$.
\end{lemma}
\begin{proof}
For $g(x)$ constant, the lemma is obviously true. Assume that
it is true for any
$g(x)$ of degree $<k$, with some fixed
$1\leq k<p$ , and take $g(x)$ of degree
$k$. If $g(0)=0$, then $g(x)$ has the same number of non-zero coefficients
and the same multiplicity of vanishing at $a$ as $g(x)/x$ does, so the lemma is true for $g(x)$. If $g(0)\mathrm {nilpotent}eq 0$, then the number of non-zero coefficients exceeds the corresponding number for
the derivative $g'(x)$ by 1, and the multiplicity of vanishing at
$a$ exceeds that of $g'(x)$ by at most 1. Now $g'(x)\mathrm {nilpotent}ot\equiv 0$
since $g(x)$ is of positive degree $k<p$, so the inequality of the
lemma holds for $g'(x)$
and therefore also for $g(x)$.
\end{proof}
\begin{proofof}
The theorem is equivalent to saying that if numbers $a_j\in \mathbf Q(\omega)$
$(j\in J)$
satisfy
$\sum_{j\in J} a_j\omega^{ij}=0$ for all $i\in I$, then all $a_j$ must be zero.
In fact, we may clearly assume that $a_j\in \mathbf Z[\omega]$.
The above equalities mean that the polynomial $$g(x)=\sum_{j\in J} a_j x^j\in \mathbf Z[\omega][x]$$
vanishes at $\omega^i$ for all $i\in I$. So $g(x)$ is divisible by
$\prod_{i\in I}(x-\omega^i)$. Applying the homomorphism
$\mathbf Z[\omega]\to \mathbf Z[\omega]/(1-\omega)=\mathbf F_p$ to the coefficients of $g(x)$
we get a polynomial $\bar g(x)\in \mathbf F_p[x]$
that is divisible by $(x-1)^{|I|}$. On the other hand,
$\bar g(x)$ has at most $|J|$ non-zero coefficients.
As $|I|=|J|$, we deduce from Lemma~\ref{2} that
$\bar g(x)\equiv 0$. This means that all $a_j$ are divisible by $1-\omega$.
We may divide all of them by $1-\omega$ and iterate the argument. This leads to
{\it descente infinie} unless all $a_j$ are zero.
\end{proofof}
\mathrm {nilpotent}oindent
{\bf Address.} Mathematics Institute,
Budapest University of Technology and Economics, Egry J. u. 1., Budapest,
1111 Hungary.
E-mail: [email protected]
\end{document} |
\begin{document}
\title{Classification of punctures on complete flat surfaces}
\gdef\@thefnmark{}\@footnotetext{\textup{2000} \textit{Mathematics Subject Classification}:
51F99, 57M50}
\begin{abstract}
We investigate the behavior of a complete flat metric on a surface near a puncture. We call a puncture on a flat surface regular if it has a neighborhood which is isometric to that of a point at infinity of a cone. We prove that there are punctures which are not regular if and only if the curvature at the puncture is $4\mathfrak pi$.
\end{abstract}
\keywords{flat surface, regular puncture, irregular puncture}
\thanks{}
\section{Introduction}
\thanks{}
Flat surfaces are obtained by gluing Euclidean triangles along their edges appropriately. They appear in several areas of mathematics and physics. For example, they are studied in dynamics of billiard tables. It is known that each rational polygon can be covered by a flat surface with trivial holonomy group. Such surfaces are called translation surfaces and have been studied extensively \cite{dynamics}. Also, these surfaces are quite useful in Teichm\"{u}ller theory. Together with quadratic differentials, they are used in the proofs of Teichm\"{u}ller's theorems \cite{teichmuller}. They appear in quantum gravity and topological quantum field theory as well. See \cite{gravity} and \cite{quantum-field}.
These surfaces are interesting for their own sake. For example, Thurston obtained complex hyperbolic orbifolds from moduli spaces of certain flat spheres \cite{thurston}. Following Thurston, Bavard and Ghys obtained real hyperbolic orbifolds from the moduli spaces of certain polygons in the plane \cite{bavard-ghys}. Troyonov introduced certain geometric structures on Teichm\"{u}ller spaces by considering the moduli spaces of flat surfaces with prescribed curvature data \cite{Tro-handbook}.
Compact flat surfaces are examples of length spaces. There is a length minimizing geodesic between any two points of such a surface.
\cite{Gromov}, \cite{dimitri}. They can be triangulated with finitely many triangles. In addition, Gauss-Bonnet formula holds for these surfaces.
See \cite{Tro-enseign}, \cite{Tro-compact}.
Flat surfaces with \textit{regular} punctures have been studied in \cite{Tro-open}. By a regular puncture on a flat surface, we mean a puncture which has a neighborhood isometric to that of the point at infinity of a cone. Flat surfaces with possibly irregular punctures have been studied in \cite{saglam}. We now state the main results of \cite{saglam}. Let $\bar{S}$ be a complete flat surface.
\begin{enumerate}
\item
$\bar{S}$ can be triangulated with finitely many types of triangles.
\item
Gauss-Bonnet formula holds for $\bar{S}$.
\item
Each loop on $\bar{S}$ has a geodesic representative in its free homotopy class.
\end{enumerate}
Our objective is to understand the behavior of a complete flat metric near a puncture. More precisely, we classify complete flat metrics on a disk with a puncture up to the modification equivalence, where two flat metrics on a disk are equivalent if they are "same" on a neighborhood of the puncture. Now we state the main results of the present paper.
Let $\bar{S}$ be a flat surface.
\begin{itemize}
\item
If the curvature at a punctured interior point equals to $4\mathfrak pi$, then there are uncountably many
modification-equivalence classes of complete flat metrics near the puncture.
\item
If the curvature at a punctured interior point is not equal to $4\mathfrak pi$, then any complete flat metric is modification-equivalent to a cone near the puncture.
\end{itemize}
\subsection{Doubly labeled surfaces}
\label{notation}
In this paper, we use the notation in \cite{saglam}. A doubly labeled surface is a compact surface together with labeled points.
\begin{definition}
Let $S$ be a connected compact topological surface perhaps with boundary $B$.
Let $\mathfrak{l}, \mathfrak{p},\mathfrak{l'},\mathfrak{p'}$ be finite disjoint subsets of $S$ so that
\begin{itemize}
\item
$\mathfrak{l}$ and $\mathfrak{p}$ are subsets of the \textit{interior} of $S$,
\item
$\mathfrak{p'}, \mathfrak{l'}$ are subsets of $B$.
\end{itemize}
An element in $\mathfrak{l}$ will be called a \textit{labeled interior} point. An element in $\mathfrak{p}
$ will be called a punctured interior point. Other points in the interior of $S$ are called \textit{ordinary interior points}. An element in $B$ will be called a boundary point. An element
in $\mathfrak{l'}$ will be called a \textit{labeled boundary} point. An element in $\mathfrak{p'}$
will be called \textit{punctured boundary} point.
Other points in the boundary will be called \textit{ordinary boundary points}.
A \textit{doubly labeled surface}, shortly DL surface, is the tuple
$$ (S,B, \mathfrak{l},\mathfrak{p}, \mathfrak{l'}, \mathfrak{p'} )$$
\end{definition}
Also we will use the following notation:
\begin{enumerate}
\item
$S_B=S-B$.
\item
$S_{\frak{l}}=S-\frak{l}$
\item
$S_{B,\frak{l}}=S-(B\cup \frak{l})$
\item
\dots
\end{enumerate}
We will denote a doubly labeled surface $ (S,B, \mathfrak{l},\mathfrak{p},\mathfrak{l'}, \mathfrak{p'} ) $ as $S^L$. Underlying compact surface of $S^L$ will simply be denoted by $S$.
Note that DL surfaces can be considered as punctured surfaces with puncture set $\frak{p} \cup \mathfrak{p'}$. Indeed, $S_{\frak{p,p'}}$ is the punctured surface that we consider.
We point out that the punctured and labeled points may lie in the boundary.
A cone having angle $\theta>0$, or equivalently curvature $\kappa =2\mathfrak pi-\theta$, is the set
\begin{align}
\{(r, \mathfrak psi): r \in {\mathbf R}^{\geq 0}, \mathfrak psi \in {\mathbf R}/\theta \mathbb{Z}\}
\end{align}
with the metric
\begin{align}
\mu=dr^2+r^2d\mathfrak psi^2.
\end{align}
We can consider a cone as a DL sphere with one punctured and one labeled interior point. The point $(0,0)$ is called vertex or the origin of the cone.
We will denote it by $v_{0}$. Let $\theta(v_0)=\theta$ and $\kappa(v_0)=2\mathfrak pi - \theta$. Note that we may talk about the \textit{point at infinity} or the punctured point.
We shall denote this point by $v_{\infty}$.
\begin{definition}
Consider a cone with angle $\theta >0$.
\begin{enumerate}
\item
$\kappa{(v_\infty)}=2\mathfrak pi+\theta$ is called the curvature at $v_{\infty}$.
\item
$\theta{(v_\infty)}=-\theta$ is called the angle at $v_{\infty}$.
\end{enumerate}
\end{definition}
\noindent We will denote
a cone with angle $\theta$ by $C_{\theta}$.
\begin{definition}
A cylinder of width $r$, $C_{0r}$, is a metric space obtained by identifying edges of an infinite strip in the Euclidean plane having width $r$ through \textit{opposite} points.
\end{definition}
\noindent Observe that a cylinder can be considered as DL sphere with two punctured points. By convention, the angles at these punctures are $0$. We also call a cylinder as a cone of angle $0$. Also, again by convention, the curvature at each of the punctured points, is $2\mathfrak pi$.
\begin{definition}
A (flat) cone metric on a DL surface $S^L$ is a metric on $S_{\frak{p,p'}}$ so that each point $x$ in $S_{\frak{p,p'}}$ has a neighborhood isometric to a neighborhood of the apex of the cone $C_{\theta}=C_{\theta_x}$ or a section of a cone $V_{\theta}=V_{\theta_x}$, and
\begin{enumerate}
\item
$\frak{l}=\{y \in S_{\frak{p},B}: \ \theta_y \neq 2\mathfrak pi \}$,
\item
$\frak{l'}= \{ y \in B-\frak{p'}: \ \theta_y \neq \mathfrak pi \}$.
\end{enumerate}
Angle at $x$, $\theta(x)$ is defined to be $\theta_x$. If $x \in S_{\frak{p},B}$, then
the curvature at $x$, $\kappa(x)$, is defined as $2\mathfrak pi - \theta(x) $. If $x \in B-\frak{p'}$, then the curvature is $\kappa(x)=\mathfrak pi -\theta(x)$. $x$ is called
singular if $\kappa(x)\neq 0$. Otherwise it is called non-singular.
\end{definition}
\noindent Note that the conditions {\it 1.} and {\it 2.} assure that the set of singular points and $\frak{l} \cup \frak{l'}$ are same.
A flat surface with a flat cone metric is called a flat DL surface.
\begin{definition}
A punctured interior point on a flat DL surface is called regular if it has a neighborhood isometric to a neighborhood of the point at infinity of a cone. Otherwise, it is called irregular.
\end{definition}
An example of a flat DL surface with an irregular punctured interior point is given in \cite{saglam}. Also, the curvature at a punctured interior (or boundary) point of a flat DL surface was defined in \cite{saglam}. For a DL surface, the following theorem holds. See \cite{saglam} for a proof.
\begin{theorem}[Gauss-Bonnet formula]
Let $S^L$ be a complete flat DL surface. The following formula holds:
\begin{align}
\sum_{x\in S}\kappa(x)= 2\mathfrak pi\chi(S),
\end{align}
where $\chi(S)$ is the Euler characteristics of $S$.
\end{theorem}
\subsection{Modification }
\label{modification1}
If we have a complete flat metric on a DL surface with boundary, then we can cut a triangle having one edge incident to the boundary to get another complete flat metric. Note that the behavior of the metric near the punctures remains unchanged after this operation. By a modification of flat DL surface, we mean the surface obtained by removing finitely many triangles which are incident to the boundary.
We denote a closed disk with one punctured interior point by $D^L$. We assume that $D^L$ has no labeled interior points and punctured boundary points. As usual, we denote the underlying closed disk by $D$. Let $\bar{D}^L$ be a closed disk with one punctured boundary point. We assume that $\bar{D}^L$ has no labeled interior points or punctured interior points. We denote the underlying closed disk by $\bar{D}$.
\begin{proposition}[\cite{saglam}]
\label{mod2}
Each complete cone metric on $D^L$ can be modified so that resulting disk
does not have any points with positive curvature on its boundary.
\end{proposition}
\section{ Flat metrics on a disk with one punctured point}
\label{modification}
\subsection{Modification Equivalence}
Recall that a modification
of a flat metric on $D^L$ is a flat metric obtained by successively cutting Euclidean triangles which are incident to its boundary. In this section, we will classify complete flat metrics on $D^L$ up to modification equivalence. Two flat complete metrics $\mu$ and $\eta$ are called the modification equivalent if they can be modified so that there is an orientation preserving isometry between the resulting complete flat disks. Lemma \ref{mod2} implies that any flat complete metric on $D^L$ contains a metric with non-positive curvature data in its equivalence class. Thus, from now on, by a complete flat metric on $D^L$, we mean a metric with non-positive curvature data. We will also study the case of the disc with one punctured boundary point. Note that one can define modification equivalence for flat complete metrics on $\bar{D}^L$ in a similar way.
\subsection{Reducing number of singular points}
Let $\mu$ be a complete cone metric on $D^L$ and $\frak{K}$ be the total curvature on its boundary. If $\mathfrak{K}=0$ then $D^L$ is modification isometric to a half-cylinder. Therefore we need to consider the case $\mathfrak{K}<0$.
\noindent We start with a simple fact.
\begin{lemma}
Let $a_1,\dots,a_k$ be real numbers so that not all of them are equal. Let
\begin{equation}
a=\frac{\sum_{i=1}^k a_i}{k}
\end{equation}
be their avarage. There exists i so that
\begin{itemize}
\item
$a_i\geq a$ and $a_{i+1} < a$, or
\item
$a_i < a$ and $a_{i+1} \geq a$,
\end{itemize}
where $a_{k+1}=a_1$.
\end{lemma}
\begin{figure}
\caption{After the modification, curvature at
the $i$-th vertex $b_i$ becomes $\frac{\frak{K}
\label{fig.cut1}
\end{figure}
\begin{lemma}
\label{homojen}
Assume that $D^L$ has $k$ labeled boundary points. $D^L$ can be modified
so that resulting metric has also $k$ singular points and these points have same curvature.
\begin{proof}
First label singular points as $b_1, \dots, b_k$ so that they are in a cyclic order on the boundary. Assume that not all of the curvatures are equal. By above lemma, there exists $i$ so that either
$$\kappa_i<\frac{\frak{K}}{k}\ \textup{and} \ \kappa_{i+1} \geq\frac{\frak{K}}{k}.$$
or,
$$\kappa_i \geq \frac{\frak{K}}{k}\ \textup{and} \ \kappa_{i+1} < \frac{\frak{K}}{k},$$
\noindent Let us consider the first case. Remove the triangle with vertices
$b_i,b_{i+1},b_{i+1}'$ having angles $\lvert \kappa_i \rvert - \frac{\lvert \frak{K} \rvert}{k}, \ \lvert \kappa_{i+1} \rvert, \ \mathfrak pi + \frac{\lvert \frak{K} \rvert}{k}-\lvert \kappa_i \rvert - \lvert \kappa_{i+1}\rvert$ from $D^L$, respectively. See Figure \ref{fig.cut1}.
\begin{itemize}
\item
if $\kappa_{i+1}>\frac{\frak{K}}{k}$, then resulting
metric has less singular points having curvature not equal to
$\frac{\frak{K}}{n}$,
\item
if $\kappa_{i+1}=\frac{\frak{K}}{k}$, then $i$-th vertex has curvature $\kappa_{i+1}=\frac{\frak{K}}{k}$ and $i+1$-th
vertex has curvature $\kappa_i$ in the resulting disc.
\end{itemize}
Observe that if $\kappa_i \geq \frac{\frak{K}}{k}\ \textup{and} \ \kappa_{i+1} < \frac{\frak{K}}{k}$, there is a similar cutting operation with the following properties:
\begin{itemize}
\item
if $\kappa_{i}>\frac{\frak{K}}{k}$ then resulting metric resulting metric has less singular points having curvature not equal to
$\frac{\frak{K}}{k}$.
\item
if $\kappa_{i}=\frac{\frak{K}}{k}$, then $i+1$-th vertex has curvature $\frac{\frak{K}}{k}$ and $i$-th
vertex has curvature $\kappa_{i+1}$ in the resulting
disc.
\end{itemize}
Apply the following algorithm to $D^L$ repeatedly.
\begin{enumerate}
\item
If there is an index $i$ so that either
$$\kappa_i > \frac{\frak{K}}{k}\ \textup{and} \ \kappa_{i+1} < \frac{\frak{K}}{k},$$
or,
$$\kappa_i<\frac{\frak{K}}{k}\ \textup{and} \ \kappa_{i+1} >\frac{\frak{K}}{k}.$$
apply cutting operation described above. Observe that the number of
singular points having curvature which is not equal to $\frac{\frak{K}}{k}$
decreases after this operation. Do this repeadetly so that
there are no index $i$ satifying any of the properties above.
\item
After the first step, if all curvatures of the singular points of the resulting metric are equal, then we are done. If this is not the case, we can permute curvatures to get a modification for which
there exists $i$ so that
$$\kappa_i > \frac{\frak{K}}{k}\ \textup{and} \ \kappa_{i+1} < \frac{\frak{K}}{k},$$
or,
$$\kappa_i<\frac{\frak{K}}{k}\ \textup{and} \ \kappa_{i+1} >\frac{\frak{K}}{k}.$$
\item
Apply the first step to the resulting metric.
\end{enumerate}
Since the number of singular points having curvature not equal
to $\frac{\frak{K}}{k}$ decreases at each \textit{run} of the algorithm, we get a cone metric of desired type in finitely many steps.
\end{proof}
\end{lemma}
\begin{lemma}
\label{base-reduce}
Assume that $0\leq\lvert \frak{K} \rvert < \mathfrak pi$. Let $x$ be a boundary point on $D^L$ and $L$ be a half-line originating from $x$ and directing toward its interior. This disc can be modified so that resulting metric has at most \textit{one} singular point and no points except $x$ on $L$ is in the triangles removed during the modification.
\begin{proof}
The case $\frak{K}=0$ is trivial since there are no singular points for this case.
Desired modification for the cone metrics having singular points is described in Figure \ref{reduce-to-vertex}.
Draw two half lines $L_1,L_2$ originating from $x$ making an angle of $\mathfrak pi$ with each other. Gauss-Bonnet formula implies that $L_1$ and $L_2$ intersect only at 2 points, one of which is $x$. These
lines, together with the boundary of $S^L$, bound a compact region with polygonal geodesic boundary. We can obtain desired modification by removing this region.
\end{proof}
\end{lemma}
\begin{figure}
\caption{Cut the annulus bounded by $L_1,L_2$ and the boundary of $D^L$ to obtain a cone metric with one singular point having curvature $\frak{K}
\label{reduce-to-vertex}
\end{figure}
\begin{proposition}
\label{L-cut}
Assume that $n>0$ and $\frak{K} \leq 0$ so that
$$(n-1)\mathfrak pi \leq \lvert \frak{K} \rvert < n \mathfrak pi.$$
Also assume that the metric has $n+m, \ m\geq 0$ singular points. Let $x$ be one of them and $L$
be a half-line originating from $x$ and pointing interior of the disc.
The metric can be modified so that the resulting metric has $n$ singular points of negative curvature and the removed triangles contain no points on $L$ other than $x$.
\begin{proof}
We will prove the statement by induction on the number $n$. The base case $n=1$ is done by
the Lemma \ref{base-reduce}. Assume that $n\geq 1$ and
$$ n\mathfrak pi \leq \lvert \frak{K} \rvert < (n+1)\mathfrak pi.$$
Take a cone metric on the disc having $n+1+m$ singular points. Take a boundary segment with vertices $x,y$ and half-lines $L_1, L_2$ making angles $\mathfrak pi$ with the segment. See Figure \ref{L1-L2}. Cut the half plane and glue the half-lines $L_1$ and $L_2$. By this way we get a new cone metric on the punctured disc so that the total curvature at its boundary is $\frak{K'}=\frak{K}+\mathfrak pi$, thus satisfies the inequality below:
$$(n-1)\mathfrak pi \leq \lvert \frak{K'} \rvert < n\mathfrak pi.$$
Let $L_{12}$ the half line formed by gluing $L_1$ with $L_2$. We show the vertex obtained by $x$ and $y$ by $xy$. Observe that by induction hypothesis, we can modify this new metric so that the resulting metric has $n$ singular points and removed triangles
do not contain any point of $L_{12}$ except $x$. Now cut this new punctured disc together with the induced metric through $L_{12}$, and glue the half-plane
that we removed as in the figure. Resulting cone metric has $n+1$ singular points and is a modification of the metric we started with. Also observe that compact part removed during the modification does not intersect with half-line $L$ except at the vertex $x$.
\end{proof}
\end{proposition}
\begin{figure}
\caption{Remove half plane determined by $L_1, L_2 $ and boundary segment $[x,y]$ and glue
$L_1$ and $L_2$ to get a cone metric of total curvature curvature $\frak{K'}
\label{L1-L2}
\end{figure}
\begin{theorem}
\label{best-reduction}
Every complete cone metric on $D^L$ with total boundary curvature
$$(n-1)\mathfrak pi \leq \lvert \frak{K}\rvert < n\mathfrak pi$$
\noindent can be modified so that resulting metric has $n$ singular points, and curvature of each of these singular points is $\frac{\frak{K}}{n}$.
\begin{proof}
First modify the metric as in Proposition \ref{L-cut} to get a metric with $n$ singular points of negative curvature. Modify this new metric as in Lemma \ref{homojen}, to get a metric of desired type.
\end{proof}
\end{theorem}
\subsection{The case $0 \leq \lvert \frak{K} \rvert < 2\mathfrak pi$}
\begin{lemma}
\label{main-interior0}
If $0 \leq \lvert \frak{K} \rvert < 2\mathfrak pi$, then the puncture
on any complete flat metric on $D^L$ is regular.
\begin{proof}
We know that if $\frak{K}=0$ then $D^L$ is isometric to a half-cylinder. Thus the puncture is regular.
First assume that $0<\lvert \frak{K} \rvert < \mathfrak pi$. By Theorem \ref{best-reduction}, this flat metric can be modified so that there is only one singular point on its boundary. Let $l$ be the length of its boundary. Take a isosceles triangle with angles $-\frak{K}, \frac{\mathfrak pi + \kappa}{2}, \frac{\mathfrak pi+\kappa}{2}$ with the length of the edge opposite to the vertex with angle $\frak{-K}$ is equal to $l$. If you glue the equal edges of this triangle and glue the resulting flat disk with $D^L$, you get the cone $C_{-\kappa}$. Hence the puncture is regular. See Figure \ref{cone1}.
Assume that $\mathfrak pi \leq \lvert \frak{K} \rvert < 2\mathfrak pi$. Then we can modify $D^L$ so that it has two singular points of curvature $\frac{\frak{K}}{2}$. Call the singular points $b_1$ and $b_2$. Observe that the boundary has two components and these components connect $b_1$ and $b_2$. Let $l_1$ and $l_2$ be length of these components.
Then we can find two isosceles triangles with the following properties:
\begin{itemize}
\item
First triangle has edges of length $l_1, a, a$ and the angles at its vertices are $\alpha,\alpha, \gamma$. Also the vertex with angle $\gamma$ is opposite the edge having length $l_1$.
\item
Second triangle has edges of length $l_2, a, a$ and the angles at its vertices are $\beta,\beta, \gamma$. Also the vertex with angle $\gamma'$ is opposite the edge having length $l_1$.
\item
$\gamma+\gamma'=-\frak{K}$.
\end{itemize}
Now glue the triangles along the edges having length $a$ to get a flat disk one singular interior point and two singular boundary points. Note that the angle at the singular interior point is $-\kappa$. If you appropriately glue this disk with $D^L$ along their boundaries, you get
the cone $C_{-\kappa}$. See Figure \ref{cone2}. Hence the puncture is regular.
\end{proof}
\end{lemma}
\begin{figure}
\caption{Glue unlabeled edges of the isosceles triangle to get a flat disk. Then glue this disk with $D^L$ to get a cone.}
\label{cone1}
\end{figure}
\begin{figure}
\caption{Glue the isosceles triangles through the edges having length $a$ to get a flat disk with one singular interior point and two singular boundary points. Then glue this disk with $D^L$ to get a cone. }
\label{cone2}
\end{figure}
\subsection{Principal modifications }
Regarding Theorem \ref{best-reduction} , for each $\frak{K}<0$ so that $(n-1)\mathfrak pi \leq \lvert \frak{K} \rvert < n \mathfrak pi$, we will study modification-equivalence on the set
\begin{align*}
\frak{C}(\frak{K},n)
=\{\textup{Flat metrics on} \ D^L \ \textup{with n singular boundary points of curvature }\frac{\frak{K}}{n}\}/\textup{isometry}
\end{align*}
Note that two elements $\mu$ and $\eta$ are equivalent, $\mu \sim \eta$, if there is an orientation preserving isometry between them which respects the labeling of the vertices.
Each element in $\frak{C}(\frak{K},n)$ is uniquely determined by the lengths of the boundary segments of the punctured disc. See \cite{saglam}[Theorem 1]. Let \\ $Ld(\mu)=(l[b_1,b_2],\dots ,l[b_n,b_{n+1}])$, where $l[b_i,b_{i+1}]$ is the length of the boundary segment joining $b_i$ and $b_{i+1}$. We call $Ld(\mu)$ as length data of $\mu$. Therefore below map is a bijection:
$$ Ld: \frak{C}(\frak{K},n) \rightarrow \mathbf R_+^n$$
$$\mu \rightarrow Ld(\mu), $$
where $\mathbf R_+$ is the set of positive real numbers.
We will denote the set of equivalence classes of cone metrics on the disc by
$$\frak{M}(\frak{K})=\frak{M}(\frak{K},n):=\frak{C}(\frak{K},n)/\text{Modification}.$$
Now we define \textit{principal operations} which can be thought as maps
$\frak{C}(\frak{K},n) \rightarrow \frak{C}(\frak{K},n)$.
Let $n\geq 3$. Take an element in $\mu \in \frak{C}(\frak{K},n)$. Fix an index $j \in \{1,2 \dots n \}$ and
a non-negative real number $r$. From the punctured disc (together with the metric $\mu$), subtract a quadrangle having angles
$$(\lvert \frac{\frak{K}}{n} \rvert, \mathfrak pi - \lvert \frac{\frak{K}}{n} \rvert , \mathfrak pi - \lvert \frac{\frak{K}}{n} \rvert,\lvert \frac{\frak{K}}{n} \rvert ),$$
and edge lengths
$$(r,\ l[b_j,b_{j+1}]+ 2r \cos{(\mathfrak pi - \frac{\frak{K}}{n})},\ r, \ l[b_j,b_{j+1}] ),$$
\noindent which has the segment $[b_j,b_{j+1}]$ as an edge. Since $\frac{\lvert \frak{K} \rvert}{n}+\frac{\lvert \frak{K} \rvert}{n}=2\frac{\lvert \frak{K} \rvert}{n}>\mathfrak pi$, for each $r>0$ such a quadrangle exists. See Figure \ref{fig-principal-1}.
By this way, we obtain another element in $\frak{C}(\frak{K},n)$, denote this map by
$$\Theta_{j,r}: \frak{C}(\frak{K},n) \rightarrow \frak{C}(\frak{K},n).$$
\noindent From the Figure \ref{fig-principal-1}, description of this map in terms of length data is clear:
$$
\Theta_{j,r}: \mathbf R_+^n \rightarrow \mathbf R_+^n
$$
\begin{align}
\Theta_{j,r}([l_1,\dots l_n])=[l_1,\dots,l_{j-2},l_{j-1}+r ,l_j+ 2r \cos(\mathfrak pi - \frac{\frak{K}}{n}),l_{j+1}+r, l_{j+2}, \dots] \\
=[l_1,\dots,l_n]+[0,\dots,0,\stackrel{j-1}{ \stackrel{\downarrow}{r}} ,2r \cos(\mathfrak pi - \frac{\frak{K}}{n}),r, 0, \dots,0]
\end{align}
Observe that above formulas imply the following:
\begin{enumerate}
\item
$\Theta_{j,0}$ is identity map on $\frak{C}(\frak{K},n)$ (or on $\mathbf R^n$),
\item
$\Theta_{j,r}\circ \Theta_{j,r'}= \Theta_{j,r+r'}$,
\item
$\Theta_{j,r}\circ \Theta_{j',r'} = \Theta_{j',r'}\circ \Theta_{j,r}$.
\end{enumerate}
\begin{definition}
We call semi-group generated by $\Theta_{j,r}$'s, either as maps on $\frak{C}(\frak{K},n)$ or $\mathbf R_+^n$, the principal semigroup , and denote it as $\mathbb{T}=\mathbb{T}(\frak{K})$.
\end{definition}
\begin{remark}
\label{compact}
Let $C$ be a compact set of $D^L$ and $\mu \in \frak{C}(\frak{K},n)$. There exists an element $\frak{T}$ in $\mathbb{T}(\frak{K})$ so that $C$ is a subset of the removed part of the once punctured sphere after the modification with respect to
$\frak{T}$.
\end{remark}
\begin{figure}
\caption{An example of principal modifications : new cone metric is obtained by removing the quadrilateral in light grey. Note that $l_j=l[b_j,b_{j+1}
\label{fig-principal-1}
\end{figure}
\label{modification-equivalence}
\subsubsection{Circulant matrices} We recall the basic properties of the circulant matrices.
\label{circulant-properties} A circulant matrix $\mathcal{C}$ is a $m\times m$ matrix obtained from
one column vector $\mathbf{c}=[c_0,\dots,c_{m-1}]^T$ so that columns of $\mathcal{C}$ are determined
by cyclic permutations of $\mathbf{c}$ as below:
\begin{align*}
\mathcal{C}=
\begin{bmatrix}
c_0 & c_{m-1} & \dots & c_{2} & c_{1} \\
c_{1} & c_0 & c_{m-1} & & c_{2} \\
\vdots & c_{1}& c_0 & \ddots & \vdots \\
c_{m-2} & & \ddots & \ddots & c_{m-1} \\
c_{m-1} & c_{m-2} & \dots & c_{1} & c_0 \\
\end{bmatrix}.
\end{align*}
See \cite{circulant} for the basic properties of circulant matrices. $f_{\mathcal{C}}(x)=c_0+c_1x+c_2x^2+\dots +c_{n-1}x^{n-1}$ is called associated polynomial of $\mathcal{C}$.
Let $\omega_j=\exp \left(\tfrac{2\mathfrak pi \imath j}{n}\right)$ for each $j=0\dots,n-1$, where $\imath=\sqrt{-1}$.
\begin{itemize}
\item
The set of eigenvalues of $\mathcal{C}$ is
\begin{align}
\{\lambda_j = c_0+c_{n-1} \omega_j + c_{n-2} \omega_j^2 + \ldots + c_{1} \omega_j^{n-1}: \ j=0, 1,\dots, n-1\}.
\end{align}
\item
Determinant of $\mathcal{C}$, $\mathrm{det}(\mathcal{C})$ , is
\begin{align}
\label{determinant}
\mathfrak prod_{j=0}^{n-1} (c_0 + c_1 \omega_j + c_2 \omega_j^2 + \dots + c_{n-1}\omega_j^{n-1}).
\end{align}
\item
Rank of $\mathcal{C}$ is the $n-d$ where $d$ is the degree of greatest common divisor of the polynomials $f_{\mathcal{C}}(x)$ and $x^{n}-1$.
\item
Eigenvector with eigenvalue $\lambda_j$ is
$$v_j= [1,~ \omega_j,~ \omega_j^2,~ \ldots,~ \omega_j^{n-1}]^T,\quad j=0, 1,\ldots, n-1.$$
Observe that a circulant matrix is diagonalizable and the eigenvectors of such a matrix do not depend on the coefficients $c_0, c_1, \dots, c_{n-1}$.
\end{itemize}
\subsubsection{From the principal modifications to the circulant matrices}
Each $\Theta_{j,r}$ can be thought as a translation map $\mathbf R^n \to \mathbf R^n$. The vector space of translation maps can be identified with $\mathbf R^n$ and its canonical basis can be identified with the basis of $\mathbf R^n$ that consists of the vectors
$$e_1=(1,0,\dots,0)$$
$$e_2=(0,1, \dots,0)$$
$$\dots$$
$$e_n=(0,0,\dots,1)$$
\noindent With respect to this basis, $\Theta_{j,1}$ has coordinates
$$[0,\dots,0,\stackrel{j-1}{ \stackrel{\downarrow}{1}} ,2 \cos(\mathfrak pi - \frac{\frak{K}}{n}),1, 0, \dots,0]^T$$
\noindent Therefore the matrix that the coordinates of $\Theta_{1,1},\Theta_{1,1},\dots, \Theta_{n,1}$ form is a circulant matrix with $c_0=1,c_1=2 \cos(\mathfrak pi - \frac{\frak{K}}{n}),c_2=1$ and $c_j=0$ if $j\neq 0,1,2$. Call this matrix $\mathcal{C}$.
\begin{lemma}
\label{determinant-key}
If $\frak{K}\neq -2\mathfrak pi$, then $1+\cos{(\mathfrak pi-\frac{\frak{K}}{n}})\omega_j+\omega_j^2\neq 0$.
\begin{proof}
Let $\omega_j=\exp \left(\tfrac{2\mathfrak pi \imath j}{n}\right)$. Assume that $(n-1)\mathfrak pi \leq \lvert \frak{K}\rvert < n\mathfrak pi$ and $n>3$. Then
$$(\frac{n-1}{n}+1)\mathfrak pi \leq \mathfrak pi-\frac{\frak{K}}{n}< 2\mathfrak pi$$
$$\cos{\frac{\mathfrak pi}{n}}\leq \cos{(\mathfrak pi-\frac{\frak{K}}{n}}) < 1.$$
In particular, $\frac{1}{2}<\cos{(\mathfrak pi-\frac{\frak{K}}{n}})$. Now assume that $1+2\cos{(\mathfrak pi-\frac{\frak{K}}{n}})\omega_j+\omega_j^2=0$. It follows that $\lvert 1+\omega_j^2 \rvert= 2\cos{(\mathfrak pi-\frac{\frak{K}}{n}})>2$, which is impossible. Hence $1+2\cos{(\mathfrak pi-\frac{\frak{K}}{n}})\omega_j+\omega_j^2\neq 0$ for all $j$. A similar argument shows that $1+2\cos{(\mathfrak pi-\frac{\frak{K}}{n}})\omega_j+\omega_j^2\neq 0$ when $n=3$ and $\kappa\neq -2\mathfrak pi$.
\end{proof}
\end{lemma}
\begin{proposition}
\label{main-interior1}
If $\kappa< - 2\mathfrak pi$, then $\frak{M}(\kappa)$ consists of one single point, that
is, there are no irregular punctures on $D^L$ when the total curvature on the boundary is not equal to $-2\mathfrak pi$.
\begin{proof}
We will show that $\frak{C}(\frak{K},n)/\mathbb{T}(\frak{K})$ consists of a single point. Clearly this implies that $\frak{M}(\kappa)$ consists of one point. Lemma \ref{determinant-key} implies that det$(\mathcal{C})\neq 0$. Therefore the group generated by $\Theta_{1,1},\dots,\Theta_{n,1}$ is the full group of translations of $\mathbf R^n$. So this group has one orbit. It follows that $$\frak{C}(\frak{K},n)/\mathbb{T}(\frak{K})=\mathbf R_+^n/\mathbb{T}(\frak{K})$$
has only one point.
\end{proof}
\end{proposition}
\noindent Now we consider the case $\frak{K}=-2\mathfrak pi$.
\begin{proposition}
\label{main-interior2}
There is a bijection between $\frak{M}(-2\mathfrak pi)$ and $\mathbf R^2_+$.
\begin{proof}
Consider the map $\mathbf R^2_+\to \frak{M}(-2\mathfrak pi)$ sending $(\alpha,\beta)$
to the modification-equivalence class of the metric in $\frak{C}(-2\mathfrak pi,3)$ having boundary segments of length $1,\alpha, \beta$. Let us denote the modification-equivalence class of the metric on $D^L$ with boundary segment of length $a,b,c$ by $[a,b,c]$. Since $\cos (\mathfrak pi-\frac{2\mathfrak pi}{3})=\frac{1}{2}$, it follows that $\Theta_{1,r}$ sends $[a,b,c]$ to $[a+r,b+r,c+r]$. Therefore it is easy to see that this map is surjective. Now assume that $[1,b,c]$ and $[1,b',c']$ are modification equivalent. It follows that there is an orientation preserving isometry sending the flat disk $D^L$ whose boundary segments have length $1+r,b+r,c+r$ to the flat disk $D^L$ whose boundary segments have length $1+r',b'+r',c'+r'$, where $r,r' \geq 0$. It follows that $r=r'$, $b=b'$ and $c=c'$. So the map is injective.
\end{proof}
\end{proposition}
Now we collect the results in Lemma \ref{main-interior0}, Proposition \ref{main-interior1} and Proposition \ref{main-interior2} in a single theorem.
\begin{theorem}
\label{main-interior}
\begin{enumerate}
\item
If $\frak{K}\neq-2\mathfrak pi$, then $\frak{M(\frak{K})}$ consists of one singular point. In other words there are no complete flat metrics on $D^L$ so that the puncture is irregular when curvature at the puncture is not equal to $4\mathfrak pi$.
\item
$\frak{M}(-2\mathfrak pi)\equiv \mathbf R^2_+ $.
\end{enumerate}
\end{theorem}
\end{document} |
\begin{document}
\author[A. T. Bernardino \and D. Pellegrino \and J.B. Seoane-Sep\'{u}lveda \and M.L.V. Souza]{A.T. Bernardino \and D. Pellegrino\textsuperscript{*} \and J.B. Seoane-Sep\'{u}lveda\textsuperscript{**} \and M.L.V. Souza}
\address{Centro de Ensino Superior do Serid\'{o},\newline\indent Universidade Federal do Rio Grande do Norte, \newline\indent Rua Joaquim Greg\'{o}rio, S/N - Penedo, \newline\indent Caic\'{o}, 59300-000, Brazil.}
\email{[email protected]}
\address{Departamento de Matem\'{a}tica,\newline\indent Universidade Federal da Para\'{\i}ba,\newline\indent 58.051-900 - Jo\~{a}o Pessoa, Brazil.}
\email{[email protected]}
\thanks{\textsuperscript{*}Supported by CNPq Grant 301237/2009-3.}
\address{Departamento de An\'{a}lisis Matem\'{a}tico,\newline\indent Facultad de Ciencias Matem\'{a}ticas, \newline\indent Plaza de Ciencias 3, \newline\indent Universidad Complutense de Madrid,\newline\indent Madrid, 28040, Spain.}
\email{[email protected]}
\thanks{\textsuperscript{**}Supported by the Spanish Ministry of Science and Innovation, grant MTM2009-07848.}
\address{Departamento de Matem\'{a}tica/ICENE,\newline\indent UFTM - Universidade Federal do Tri\^angulo Mineiro,\newline\indent
Rua Get\'ulio Guarit\'a, 159, \newline\indent CEP 38.025-440 - Uberaba-MG, Brazil.}
\email{[email protected]}
\subjclass[2010]{46G25, 47H60, 47B10}
\keywords{Absolutely summing operators, coherent ideals, compatible ideals, Banach
polynomial ideals}
\title[Absolutely summing operators revisited]{Absolutely summing operators revisited: new directions in the nonlinear theory}
\begin{abstract}
In the last decades many authors have become interested in the study of multilinear and polynomial generalizations of families of operator ideals (such as, for instance, the ideal of absolutely summing operators). However, these generalizations must keep the essence of the given operator ideal and there seems not to be a universal method to achieve this. The main task of this paper is to discuss, study, and introduce multilinear and polynomial extensions of the aforementioned operator ideals taking into account the already existing methods of evaluating the adequacy of such generalizations. Besides this subject's intrinsic mathematical interest, the main motivation is our belief (based on facts that shall be presented) that some of the already existing approaches are not adequate.
\end{abstract}
\maketitle
\section{Introduction and historical background}
A well-known fact from an undergraduate Analysis course states that, in
$\mathbb{R}$, a series converges absolutely if and only if it is
unconditionally convergent; this result was proved by J.P.G.L. Dirichlet in
1829. For infinite-dimensional Banach spaces the situation is quite different:
on the one hand for $\ell_{p}$ spaces with $1<p<\infty$, for example, it is
quite easy to construct an unconditionally convergent series which fails to be
absolutely convergent. On the other hand, for $\ell_{1}$ and some other Banach
spaces the answer to this problem is far from being straightforward. The
special case of $\ell_{1}$ was solved in 1947 by M.S. Macphail \cite{Mac}
through a very elaborated construction.
The question of whether every infinite-dimensional Banach space has an
unconditionally convergent series which fails to be absolutely convergent was
raised by Banach \cite[p. 40]{Banach32} (see also Problem 122 in the Scottish
Book \cite{Mau}, proposed by S. Mazur and W. Orlicz). In 1950, A. Dvoretzky
and C.A. Rogers \cite{DR} solved this question in the positive:
\textbf{Theorem} (Dvoretzky-Rogers, 1950). The unconditionally convergent
series and absolutely summing convergent series coincide in a Banach space $E
$ if and only if $\dim E=\infty.$
The above result encouraged the curiosity of the genius of A. Grothendieck,
who rapidly presented a different proof of this result in his Ph.D.
dissertation \cite{Gro1955}. Grothendieck's famous R\'{e}sum\'{e} \cite{gro}
(see also \cite{dies01} for a modern and thorough study) and \cite{Gro1955}
are, essentially, the beginning of the theory of absolutely summing operators.
More precisely, in view of Dvoretzky-Rogers' striking result, the idea of
investigating linear operators that transform unconditionally convergent
series into absolutely convergent series seemed natural and was the birth of
the notion of absolutely summing operators (a linear operator $u:E\rightarrow
F$ is absolutely summing if ${\textstyle\sum} u(x_{j})$ is absolutely
convergent whenever ${\textstyle\sum} x_{j}$ is unconditionally convergent).
Soon after, Grothendieck proved a quite surprising result asserting that every
continuous linear operator from $\ell_{1}$ to $\ell_{2}$ (or to any Hilbert
space) is absolutely summing (this kind of result is now called a
\emph{coincidence theorem}). This result is a consequence of an intriguing
inequality which Grothendieck himself called \textquotedblleft the fundamental
theorem of the metric theory of tensor products\textquotedblright.
Grothendieck's inequality has important applications (\cite{AAA, FFF}) and
still has some hidden mysteries such as the precise value of Grothendieck's
constant. For a recent work on the estimates for Grothendieck's constant we
refer to \cite{NN}.
The modern notion of absolutely $(p;q)$-summing operators was introduced in
the 1960's by A. Pietsch \cite{stu} and B. Mitiagin and A. Pe\l czy\'{n}ski
\cite{MPel}. Besides its intrinsic mathematical interest and deep mathematical
motivation, it has shown to be a very important tool in general Banach space
theory. For instance, and just to cite some, using the theory of absolutely
summing operators one can show that every normalized unconditional basis of
$\ell_{1}$ is equivalent to the unit vector basis of $\ell_{1}$ and also that,
for $1<p<\infty$, there is a normalized unconditional basis of $\ell_{p}$
which is not equivalent to the unit vector basis of $\ell_{p}$.
Throughout this paper $\mathbb{N}$ represents the set of all positive integers
and $\mathbb{N}_{m}:=\{1,...,m\}$. Also, $E,E_{1},\ldots,E_{n},F,G,G_{1}
,...,G_{n},H$ will stand for Banach spaces over $\mathbb{K}=\mathbb{R}$ or
$\mathbb{C}$, the topological dual of $E$ is represented by $E^{\ast}$ and
$B_{E^{\ast}}$ denotes its closed unit ball. The symbol $W\left( B_{E^{\ast}
}\right) $ represents the probability measures in the Borel sets of
$B_{E^{\ast}}$ with the weak-star topology. We will denote the space of all
continuous $n$-linear operators from $E_{1}\times\cdots\times E_{n}$ into $F$
by $\mathcal{L}(E_{1},\ldots,E_{n};F) $ or $\mathcal{L}_{n}(E_{1},\ldots
,E_{n};F).$ Also, we recall that an $n$-homogeneous polynomial $P:E\rightarrow
F$ is a map so that $P(x)=\check{P}(x,\ldots,x),$ where $\check{P}$ represents
the unique symmetric $n$-linear map associated to $P$. The corresponding space
(endowed with the sup norm) is represented by $\mathcal{P}(^{n}E;F)$. For the
theory of polynomials and multilinear operators acting on Banach spaces we
refer to \cite{Dineen, Mu}.
For $0<p<\infty$, the space of all sequences $\left( x_{j}\right)
_{j=1}^{\infty}$ in $E$ such that $\left( \varphi\left( x_{j}\right)
\right) _{j=1}^{\infty}\in\ell_{p}$, for every $\varphi\in E^{\ast}$ is
denoted by $\ell_{p}^{w}\left( E\right) .$ When endowed with the norm
($p$-norm if $0<p<1 $)
\[
\left\Vert \left( x_{j}\right) _{j=1}^{\infty}\right\Vert _{w,p}
{\small :=}\sup\{({\textstyle\sum\limits_{j=1}^{\infty}}\left\vert
\varphi\left( x_{j}\right) \right\vert ^{p})^{1/p}:\varphi\in B_{E^{\ast}
}\}{\small ,}
\]
the space $\ell_{p}^{w}\left( E\right) $ is complete. We recall that if
$0<q\leq p<\infty$ a continuous linear operator $u:E\rightarrow F$ is
absolutely $(p;q)$-summing if $\left( u(x_{j})\right) _{j=1}^{\infty}\in
\ell_{p}\left( F\right) $ whenever $\left( x_{j}\right) _{j=1}^{\infty}
\in\ell_{q}^{w}\left( E\right) .$ In this case we write $u\in\Pi
_{(p;q)}(E;F)$. For $p=q=1$ this notion coincides with the concept of
absolutely summing operator. For classical results on absolutely summing
operators we refer to \cite{dies0, pisier, T1} and references therein (recent
results can also be checked in \cite{PellZ, Lima, Ok}). The concept of
absolutely summing operators has some natural linear extensions such as the
notions of mixing $(p;q)$-summing operators (due to A. Pietsch and B. Maurey)
and $(p;q;r)$-summing operators (due to A. Pietsch). It is worth mentioning
that these concepts were not just constructed to simply generalize the notion
of absolutely $(p;q)$-summing operators; these notions have their particular
reasons to be investigated (see \cite[p. 359]{hist}).
In the 1980's, Pietsch \cite{Pie} suggested a multilinear approach to the
theory of absolutely summing operators and, more generally, to the theory of
operator ideals. Since then, several authors were attracted by the subject and
also non-multilinear approaches have appeared (see \cite{CDo, Chen, Junek,
Nach00, MST, adv}). The adequate way of lifting the notion of a given operator
ideal to the multilinear and polynomial settings is a delicate matter. For
example, in the case of the ideal of absolutely summing linear operators,
there are several different approaches to the polynomial and multilinear
contexts (see \cite{adv, ppaa} and references therein). The abstract notions
of (global) holomorphy types (see \cite{BBJMs, Nachbin}), coherent and
compatible ideals (see \cite{CDM09}) shed some light on what kind of approach
is more adequate.
Recently, in 2003, the notion of multiple summing multilinear operators (and
polynomials) was introduced (see \cite{Matos, pv}) but, as a matter of fact,
the origin of this notion dates back to \cite{bh, lit, Ram}. Several
indicators from the theory of summing operators and from the theory of
(multi-) ideals show that this is one of the most adequate approaches to the
nonlinear theory of absolutely summing operators. For results on multiple
summing multilinear operators we refer to \cite{Na, df, davidstudia, pv,
PopJM, Popa}.
Notwithstanding the quick success of the theory of multiple summing
multilinear operators, some recent papers related to multilinear summability
seem to have overlooked its advantages. More precisely, the multilinear
notions of mixing summing operators and absolutely $\left( p;q;r\right)
$-summing multilinear operators were introduced following a different
perspective (see \cite{achour, Carlos Alberto}). The point is that these
approaches do not carry out the essence of the respective linear concepts and
this lack is clearly corroborated by the notions of coherence, compatibility
and holomorphy types.
In this paper we present multilinear and polynomial notions of absolutely
$\left( p;q;r\right) $-summing operators and mixing summing operators which
follow the philosophy of the idea of multiple summability. Among other
results, the adequacy of our approach is evaluated by proving that our new
definitions provide coherent sequences, compatible and also (global)
holomorphy types.
Below we recall the notions of mixing summing operators and absolutely
$\left( p;q;r\right) $-summing operators.
\subsection{Mixing summing operators\label{xxzzz}}
Let $0<p\leq s\leq\infty$ and $r$ such that $\frac{1}{r}+\frac{1}{s}=\frac
{1}{p}.$ A sequence $(x_{i})_{i=1}^{\infty}$ in $E$ is $(s;p)$-mixed summable
if
\[
x_{i}=\tau_{i}y_{i}
\]
with $(\tau_{i})_{i=1}^{\infty}\in\ell_{r}$ and $(y_{i})_{i=1}^{\infty}\in
\ell_{s}^{w}(E)$.
In this case, consider
\[
\left\Vert \left( x_{i}\right) _{i=1}^{\infty}\right\Vert _{mx(s,p)}
:=\inf\left\{ \left\Vert \left( \tau_{i}\right) _{i=1}^{\infty}\right\Vert
_{r}\left\Vert \left( y_{i}\right) _{i=1}^{\infty}\right\Vert _{w,s}
\right\} ,
\]
where the infimum is taken over all possible representations of $\left(
x_{i}\right) _{i=1}^{\infty}$ in the above form. The space of all
$(s;p)$-mixed summable sequences in $E$ is represented by $\ell_{(s,p)}
^{mx}(E).$ It is not difficult to prove that $\ell_{(s,p)}^{mx}(E)$ is a
complete normed ($p$-normed if $0<p<1$) space.
It is immediate that, for $0<p\leq s\leq\infty,$ one always has
\begin{itemize}
\item $\ell_{p}(E)\subset\ell_{(s,p)}^{mx}(E)\subset\ell_{p}^{w}(E)$ with
\begin{equation}
\left\Vert \left( z_{j}\right) _{j=1}^{\infty}\right\Vert _{w,p}
\leq\left\Vert \left( z_{j}\right) _{j=1}^{\infty}\right\Vert _{mx(s,p)}
\leq\left\Vert \left( z_{j}\right) _{j=1}^{\infty}\right\Vert _{p},
\label{prop}
\end{equation}
\item $\ell_{p}^{w}(E)=\ell_{(p,p)}^{mx}(E)$ and $\ell_{p}(E)=\ell
_{(\infty,p)}^{mx}(E)$ isometrically.
\end{itemize}
Let us now recall the linear concept of mixing summing linear operators (see
\cite{pp1}):
Let $0<p\leq s\leq\infty.$ A continuous linear operator $u:E\rightarrow F$ is
mixing $(s,p)$-summing ($u\in\Pi_{mx(s,p)}(E;F)$) if there exists a constant
$\sigma\geq0$ such that
\begin{equation}
\left\Vert \left( u(x_{j})\right) _{j=1}^{m}\right\Vert _{mx(s,p)}\leq
\sigma\left\Vert (x_{j})_{j=1}^{m}\right\Vert _{w,p} \label{rst}
\end{equation}
for all $x_{1},\ldots,x_{m}\in E$ and $m\in\mathbb{N}.$ The infimum of all
such constants $\sigma$ is represented by $\pi_{mx(s,p)}(u).$
The terminology \textquotedblleft mixing\textquotedblright\ is motivated by
the fact that a continuous linear operator $u:E\rightarrow F$ is $\left(
s,p\right) $-mixing summing precisely when $u$ maps every weakly $p$-summable
sequence $\left( x_{i}\right) _{i=1}^{\infty}$ in $E$ into a sequence which
can be written as a product $\left( \tau_{i}y_{i}\right) _{i=1}^{\infty}$ of
an absolutely $r$-summable scalar sequence $\left( \tau_{i}\right)
_{i=1}^{\infty}$ and a weakly $s$-summable sequence $\left( y_{i}\right)
_{i=1}^{\infty}$ in $F$, where $\frac{1}{s}+\frac{1}{r}=\frac{1}{p}.$ Many of
the classical results of mixing summing operators are due to B. Maurey
\cite{Maurey} and the theory has shown to be sufficiently rich to be
investigated by its own (see \cite[Section 32]{Flore}).
\subsection{Absolutely $(p;q;r)$-summing operators\label{Sub2}}
The concept of absolutely $(p;q;r)$-summing linear operators is due to A.
Pietsch \cite{pp0, pp1}. If $0<p,q<\infty$ and $0<r\leq\infty$ and
\[
\frac{1}{p}\leq\frac{1}{q}+\frac{1}{r},
\]
a continuous linear operator $u:E\rightarrow F$ is absolutely $\left(
p;q;r\right) $-summing ($u\in\Pi_{as\left( p;q;r\right) }\left(
E;F\right) $) if there is a constant $C>0$ such that
\begin{equation}
\left( \sum_{j=1}^{m}\left\vert \varphi_{j}\left( u\left( x_{j}\right)
\right) \right\vert ^{p}\right) ^{\frac{1}{p}}\leq C\left\Vert \left(
x_{j}\right) _{j=1}^{m}\right\Vert _{w,q}\left\Vert \left( \varphi
_{j}\right) _{j=1}^{m}\right\Vert _{w,r} \label{1807}
\end{equation}
for all positive integer $m$, and all $x_{1},\ldots,x_{m}$ in $E$ and
$\varphi_{1},\ldots,\varphi_{m}$ in $F^{\ast}$. When $r=\infty$, we recover
the classical notion of absolutely $(p;q)$-summing operators.\ For details we
refer to \cite{Lap, pp1, hist}.
The space composed by all continuous linear operators from $E$ to $F$ that are
absolutely $\left( p;q;r\right) $-summing shall be represented by
$\Pi_{as\left( p;q;r\right) }\left( E;F\right) $. The infimum of the
constants $C$ satisfying the inequality (\ref{1807}) defines a norm ($p$-norm
if $0<p<1$) in $\Pi_{as\left( p;q;r\right) }\left( E;F\right) ,$ denoted
by $\pi_{\left( p;q;r\right) }(u).$ If $r=\infty$ we use the classical
notation of absolutely $\left( p;q\right) $-summing operators, $\Pi_{\left(
p;q\right) }\left( E;F\right) $ and $\pi_{\left( p;q\right) }$ for the norm.
If we allow $\frac{1}{p}>\frac{1}{q}+\frac{1}{r}$ we would have $\Pi
_{as\left( p;q;r\right) }\left( E;F\right) =\left\{ 0\right\} $ (see
\cite[p. 196]{djt}) and, for this reason, we ask for $\frac{1}{p}\leq\frac
{1}{q}+\frac{1}{r}$ in the definition above.
\subsection{Operator ideals, multi-ideals and polynomial ideals}
The theory of operator ideals goes back to J.W. Calkin \cite{cal}, H. Weyl
\cite{we} and further work of A.\ Grothendieck \cite{grote}. However, only in
the 70's, with A. Pietsch \cite{pp1}, the theory was organized in the modern
presentation (see also \cite{ddjj, HP}). For historical details we suggest
\cite{hist} and for applications we refer to \cite{ddjj}.
An operator ideal $\mathcal{I}$ is a subclass of the class $\mathcal{L}_{1}$
of all continuous linear operators between Banach spaces such that for all
Banach spaces $E$ and $F$ its components
\[
\mathcal{I}(E;F):=\mathcal{L}_{1}(E;F)\cap\mathcal{I}
\]
satisfy the following:
(Oa) $\mathcal{I}(E;F)$ is a linear subspace of $\mathcal{L}_{1}(E;F)$ which
contains the finite rank operators.
(Ob) If $u\in\mathcal{I}(E;F)$, $v\in\mathcal{L}_{1}(G;E)$ and $w\in
\mathcal{L}_{1}(F;H)$, then $w\circ u\circ v\in\mathcal{I}(G;H)$.
The operator ideal is called a normed operator ideal if there is a function
$\Vert\cdot\Vert_{\mathcal{I}}\colon\mathcal{I}\longrightarrow\lbrack
0,\infty)$ satisfying
(Ob1) $\Vert\cdot\Vert_{\mathcal{I}}$ restricted to $\mathcal{I}(E;F)$ is a
norm, for all Banach spaces $E$, $F$.
(Ob2) $\Vert P_{1}\colon\mathbb{K}\longrightarrow\mathbb{K}:P_{1}
(\lambda)=\lambda\Vert_{\mathcal{I}}=1.$
(Ob3) If $u\in\mathcal{I}(E;F)$, $v\in\mathcal{L}_{1}(G;E)$ and $w\in
\mathcal{L}_{1}(F;H)$, then
\[
\Vert w\circ u\circ v\Vert_{\mathcal{I}}\leq\Vert w\Vert\Vert u\Vert
_{\mathcal{I}}\Vert v\Vert.
\]
When $\mathcal{I}(E;F)$ with the norm above is always complete, $\mathcal{I}$
is called a Banach operator ideal.
Absolutely summing operators and the two related aforementioned concepts are
examples of operator ideals. Other examples include the compact, weakly
compact, strictly singular operators, etc.
The notion of multi-ideals is also due to Pietsch \cite{Pie}. For each
positive integer $n$, let $\mathcal{L}_{n}$ denote the class of all continuous
$n$-linear operators between Banach spaces. An ideal of multilinear mappings
(or multi-ideal) $\mathcal{M}$ is a subclass of the class $\mathcal{L}=
{\textstyle\bigcup\limits_{n=1}^{\infty}}
\mathcal{L}_{n}$ of all continuous multilinear operators between Banach spaces
such that for a positive integer $n$, Banach spaces $E_{1},\ldots,E_{n}$ and
$F$, the components
\[
\mathcal{M}_{n}(E_{1},\ldots,E_{n};F):=\mathcal{L}_{n}(E_{1},\ldots
,E_{n};F)\cap\mathcal{M}
\]
satisfy:
(Ma) $\mathcal{M}_{n}(E_{1},\ldots,E_{n};F)$ is a linear subspace of
$\mathcal{L}_{n}(E_{1},\ldots,E_{n};F)$ which contains the $n$-linear mappings
of finite type.
(Mb) If $T\in\mathcal{M}_{n}(E_{1},\ldots,E_{n};F)$, $u_{j}\in\mathcal{L}
_{1}(G_{j};E_{j})$ for $j=1,\ldots,n$ and $v\in\mathcal{L}_{1}(F;H)$, then
\[
v\circ T\circ(u_{1},\ldots,u_{n})\in\mathcal{M}_{n}(G_{1},\ldots,G_{n};H).
\]
Moreover, $\mathcal{M}$ is a (quasi-) normed multi-ideal if there is a
function $\Vert\cdot\Vert_{\mathcal{M}}\colon\mathcal{M}\longrightarrow
\lbrack0,\infty)$ satisfying
(Mb1) $\Vert\cdot\Vert_{\mathcal{M}}$ restricted to $\mathcal{M}_{n}
(E_{1},\ldots,E_{n};F)$ is a (quasi-) norm, for all Banach spaces
$E_{1},\ldots,E_{n}$ and $F.$
(Mb2) $\Vert T_{n}\colon\mathbb{K}^{n}\longrightarrow\mathbb{K}:T_{n}
(\lambda_{1},\ldots,\lambda_{n})=\lambda_{1}\cdots\lambda_{n}\Vert
_{\mathcal{M}}=1$ for all $n$,
(Mb3) If $T\in\mathcal{M}_{n}(E_{1},\ldots,E_{n};F)$, $u_{j}\in\mathcal{L}
_{1}(G_{j};E_{j})$ for $j=1,\ldots,n$ and $v\in\mathcal{L}_{1}(F;H)$, then
\[
\Vert v\circ T\circ(u_{1},\ldots,u_{n})\Vert_{\mathcal{M}}\leq\Vert
v\Vert\Vert T\Vert_{\mathcal{M}}\Vert u_{1}\Vert\cdots\Vert u_{n}\Vert.
\]
When all the components $\mathcal{M}_{n}(E_{1},\ldots,E_{n};F)$ are complete
under this (quasi-) norm, $\mathcal{M}$ is called a (quasi-) Banach
multi-ideal. For a fixed multi-ideal $\mathcal{M}$ and a positive integer $n$,
the class
\[
\mathcal{M}_{n}:=\cup_{E_{1},\ldots,E_{n},F}\mathcal{M}_{n}\left( E_{1}
,\ldots,E_{n};F\right)
\]
is called ideal of $n$-linear mappings.
Similarly, for each positive integer $n$, let $\mathcal{P}_{n}$ denote the
class of all continuous $n$-homogeneous polynomials between Banach spaces. A
polynomial ideal $\mathcal{Q}$ is a subclass of the class $\mathcal{P}=
{\textstyle\bigcup\limits_{n=1}^{\infty}} \mathcal{P}_{n}$ of all continuous
homogeneous polynomials between Banach spaces so that for all $n\in\mathbb{N}
$ and all Banach spaces $E$ and $F$, the components
\[
\mathcal{Q}_{n}\left( ^{n}E;F\right) :=\mathcal{P}_{n}\left( ^{n}
E;F\right) \cap\mathcal{Q}
\]
satisfy:
(Pa) $\mathcal{Q}_{n}\left( ^{n}E;F\right) $ is a linear subspace of
$\mathcal{P}_{n}\left( ^{n}E;F\right) $ which contains the finite-type polynomials.
(Pb) If $u\in\mathcal{L}_{1}\left( G;E\right) $, $P\in\mathcal{Q}_{n}\left(
^{n}E;F\right) $ and $w\in\mathcal{L}_{1}\left( F;H\right) $, then
\[
w\circ P\circ u\in\mathcal{Q}_{n}\left( ^{n}G;H\right) .
\]
If there exists a map $\left\Vert \cdot\right\Vert _{\mathcal{Q}}
:\mathcal{Q}\rightarrow\lbrack0,\infty\lbrack$ satisfying
(Pb1) $\left\Vert \cdot\right\Vert _{\mathcal{Q}}$ restricted to
$\mathcal{Q}_{n}(^{n}E;F)$ is a (quasi-) norm for all Banach spaces $E$ and
$F$ and all $n$;
(Pb2) $\left\Vert P_{n}:\mathbb{K}\rightarrow\mathbb{K};\text{ }P_{n}\left(
\lambda\right) =\lambda^{n}\right\Vert _{\mathcal{Q}}=1$ for all $n$;
(Pb3) If $u\in\mathcal{L}_{1}(G;E)$, $P\in\mathcal{Q}_{n}(^{n}E;F)$ and
$w\in\mathcal{L}_{1}(F;H),$ then
\[
\left\Vert w\circ P\circ u\right\Vert _{\mathcal{Q}}\leq\left\Vert
w\right\Vert \left\Vert P\right\Vert _{\mathcal{Q}}\left\Vert u\right\Vert
^{n},
\]
$\mathcal{Q}$ is called (quasi-) normed polynomial ideal. If all components
$\mathcal{Q}_{n}\left( ^{n}E;F\right) $ are complete, $\left(
\mathcal{Q},\left\Vert \cdot\right\Vert _{\mathcal{Q}}\right) $ is called a
(quasi-) Banach ideal of polynomials (or (quasi-) Banach polynomial ideal).
For a fixed ideal of polynomials $\mathcal{Q}$ and $n\in\mathbb{N}$, the
class
\[
\mathcal{Q}_{n}:=\cup_{E,F}\mathcal{Q}_{n}\left( ^{n}E;F\right)
\]
is called ideal of $n$-homogeneous polynomials.
A crucial question in the theory of Banach polynomial ideals (and
multi-ideals) is the following:
\begin{quote}
\textit{Given an operator ideal, is there a natural method to define a related
multi-ideal and polynomial ideal without loosing its essence?}
\end{quote}
As mentioned before, in general a given operator ideal has several different
possible extensions to multi-ideals and polynomial ideals. In an attempt of
filtering what approaches are better than others the notions of coherence,
compatibility (and in some sense holomorphy types) are quite helpful.
In the last decades several authors have been interested in investigating
multilinear and polynomial generalizations of certain operator ideals, such as
the ideal of absolutely summing operators. But the search for the correct
approach is not an easy task. The generalizations must keep the essence of the
given operator ideal and there seems to be no universal receipt for it.
The main goal of this paper is to discuss and introduce multilinear and
polynomial extensions of the aforementioned operator ideals (from Subsections
\ref{xxzzz} and \ref{Sub2}) taking into account the existent methods of
evaluating the adequacy of such generalizations. Besides the intrinsic
mathematical interest of the subject, the main motivation of this paper is
that we believe (based on concrete facts) that the previous approaches were
not adequate.
\section{Coherence and compatibility}
The notions of coherent sequences of ideals of polynomials and compatible
ideals of polynomials, which we recall below, are important tools for
evaluating polynomial extensions of a given operator ideal. The essence of
these concepts rests in the searching of harmony between the levels of
homogeneity ($n$-linearity) of a polynomial ideal and connections
(compatibility) with the case of linear operators ($n=1$). In the following if
$P\in\mathcal{P}\left( ^{n}E;F\right) $, then $P_{a^{k}}\in\mathcal{P}
\left( ^{n-k}E;F\right) $ is defined by
\[
P_{a^{k}}(x):=\check{P}(a,\ldots,a,x,\ldots,x).
\]
\begin{definition}
[Compatible ideals, \cite{CDM09}]\label{IdeaisCompativeis}Let $\mathcal{U}$ be
a normed ideal of linear operators. A normed ideal of $n$-homogeneous
polynomials $\mathcal{U}_{n}$ is compatible with $\mathcal{U}$ if there exist
positive constants $\alpha_{1}$ and $\alpha_{2}$ such that for every Banach
spaces $E$ and $F$, the following conditions hold:
$\left( i\right) $ For each $P\in\mathcal{U}_{n}\left( E;F\right) $ and
$a\in E$, $P_{a^{n-1}}$ belongs to $\mathcal{U}\left( E;F\right) $ and
\[
\left\Vert P_{a^{n-1}}\right\Vert _{\mathcal{U}\left( E;F\right) }\leq
\alpha_{1}\left\Vert P\right\Vert _{\mathcal{U}_{n}\left( E;F\right)
}\left\Vert a\right\Vert ^{n-1}.
\]
$\left( ii\right) $ For each $T\in\mathcal{U}\left( E;F\right) $ and
$\gamma\in E^{\ast}$, $\gamma^{n-1}T$ belongs to $\mathcal{U}_{n}\left(
E;F\right) $ and
\[
\left\Vert \gamma^{n-1}T\right\Vert _{\mathcal{U}_{n}\left( E;F\right) }
\leq\alpha_{2}\left\Vert \gamma\right\Vert ^{n-1}\left\Vert T\right\Vert
_{\mathcal{U}\left( E;F\right) }.
\]
\end{definition}
For the sake of simplicity, we will sometimes write \textquotedblleft the
sequence $\left( \mathcal{U}_{n}\right) _{n=1}^{\infty}$ is compatible with
$\mathcal{U}$\textquotedblright\ instead of writing \textquotedblleft
$\mathcal{U}_{n}$ is compatible with $\mathcal{U}$ for every $n$
\textquotedblright. Besides, when we write \textquotedblleft the sequence
$\left( \mathcal{U}_{n}\right) _{n=1}^{\infty}$ fails to be compatible with
$\mathcal{U}$\textquotedblright\ we are saying that at least for some $n$, the
ideal $\mathcal{U}_{n}$ is not compatible with $\mathcal{U}$.
\begin{definition}
[Coherent sequence of polynomial ideals \cite{CDM09}]\label{IdeaisCoerentes}
Consider the sequence $\left( \mathcal{U}_{k}\right) _{k=1}^{N}$, where for
each $k$, $\mathcal{U}_{k}$ is an ideal of $k$-homogeneous polynomials and $N$
is eventually infinite. The sequence $\left( \mathcal{U}_{k}\right)
_{k=1}^{N}$ is a coherent sequence of polynomial ideals if there exist
positive constants $\beta_{1}$ and $\beta_{2}$ such that for every Banach
spaces $E$ and $F$, the following conditions hold for $k\in\{1,\ldots,N-1\}$:
$\left( i\right) $ For each $P\in\mathcal{U}_{k+1}\left( E;F\right) $ and
$a\in E$, $P_{a}$ belongs to $\mathcal{U}_{k}\left( E;F\right) $ and
\[
\left\Vert P_{a}\right\Vert _{\mathcal{U}_{k}\left( E;F\right) }\leq
\beta_{1}\left\Vert P\right\Vert _{\mathcal{U}_{k+1}\left( E;F\right)
}\left\Vert a\right\Vert .
\]
$\left( ii\right) $ For each $P\in\mathcal{U}_{k}\left( E;F\right) $ and
$\gamma\in E^{\ast}$, $\gamma P$ belongs to $\mathcal{U}_{k+1}\left(
E;F\right) $ and
\[
\left\Vert \gamma P\right\Vert _{\mathcal{U}_{k+1}\left( E;F\right) }
\leq\beta_{2}\left\Vert \gamma\right\Vert \left\Vert P\right\Vert
_{\mathcal{U}_{k}\left( E;F\right) }.
\]
\end{definition}
\section{The first multilinear and polynomial approaches to summability}
In 1989, R. Alencar and M.C. Matos \cite{am} explored the following concept of
absolutely summing multilinear operators, which was essentially introduced by Pietsch:
\begin{definition}
\label{pro}Let $p,p_{1},\ldots,p_{n}\in(0,\infty),$ with $\frac{1}{p}\leq
\frac{1}{p_{1}}+\cdots+\frac{1}{p_{n}}.$ A mapping $T\in\mathcal{L}
(E_{1},\ldots,E_{n};F)$ is absolutely $(p;p_{1},\ldots,p_{n})$
-summing\textbf{\ (}or\textbf{\ }$(p;p_{1},\ldots,p_{n})$-summing\textbf{)} if
there exists a $C\geq0$ such that
\begin{equation}
\left( \overset{m}{\underset{i=1}{\sum}}\left\Vert T(x_{i}^{(1)},\ldots
,x_{i}^{(n)})\right\Vert ^{p}\right) ^{\frac{1}{p}}\leq C\overset
{n}{\underset{k=1}{\prod}}\left\Vert \left( x_{j}^{(k)}\right) _{j=1}
^{m}\right\Vert _{w,p_{k}} \label{llo}
\end{equation}
for every $m\in\mathbb{N}$ and $x_{i}^{(k)}\in E_{k},$ with $\left(
i,k\right) \in\left\{ 1,\ldots,m\right\} \times\left\{ 1,\ldots,n\right\}
$. Analogously an $n$-homogeneous polynomial $P\in\mathcal{P}(^{n}E;F)$
is\textbf{\ }absolutely\textbf{\ }$(p;q)$-summing if there exists a constant
$C\geq0$ such that
\[
\left( \sum\limits_{j=1}^{m}\left\Vert P\left( x_{j}\right) \right\Vert
^{p}\right) ^{\frac{1}{p}}\leq C\left\Vert \left( x_{j}\right) _{j=1}
^{m}\right\Vert _{w,q}^{n}
\]
for all $m\in\mathbb{N}$ and $x_{j}\in E,$ with $j=1,\ldots,m.$
\end{definition}
The space of all $n$-linear operators satisfying (\ref{llo}) will be denoted
by $\mathcal{L}_{as(p;p_{1},\ldots,p_{n})}(E_{1},\ldots,E_{n};F).$ When
$p_{1}=\cdots=p_{n}=q,$ we simply write $\mathcal{L}_{as(p;q)}(E_{1}
,\ldots,E_{n};F)$. For $n=1$ we use the classical notation $\Pi_{(p;q)}$
instead of $\mathcal{L}_{as(p;q)}.$ For polynomials we write $\mathcal{P}
_{as(p;q)}(^{n}E;F).$
For other approaches we mention \cite{port, Choi, df, Dimant} and references
therein. The successful notion of multiple summing multilinear operators will
be mentioned in the Section \ref{MMMA}.
In the case of mixing summing operators, the multilinear/polynomial theory was
investigated by C.A. Soares in his Ph.D. dissertation \cite{Carlos Alberto}.
However, the definition considered in \cite{Carlos Alberto} is an extension of
Definition \ref{pro} and, as it happens to the concept of absolutely summing
multilinear operators, it inherits its weaknesses.
\begin{definition}
Let $0<q\leq s\leq\infty$ and $0<p_{1},\ldots,p_{n}\leq\infty.$ An $n$-linear
operator $T\in\mathcal{L}(E_{1},\ldots,E_{n};F)$ is\textbf{\ }$(s,q;p_{1}
,\ldots,p_{n})$-mixing summing if there exists a constant $\sigma\geq0$ such
that
\begin{equation}
\left\Vert \left( T(x_{j}^{(1)},\ldots,x_{j}^{(n)})\right) _{j=1}
^{m}\right\Vert _{mx(s,q)}\leq\sigma\prod_{k=1}^{n}\left\Vert (x_{j}
^{(k)})_{j=1}^{m}\right\Vert _{w,p_{k}} \label{xxc}
\end{equation}
for every $m\in\mathbb{N}$ $,$ $x_{1}^{(1)},\ldots,x_{m}^{(1)}\in E_{1}
,\ldots,x_{1}^{(n)},\ldots,x_{m}^{(n)}\in E_{n}.$ Analogously $P\in
\mathcal{P}(^{n}E;F)$ is\textbf{\ }mixing\textbf{\ }$(s,q;p)$-summing if there
exists a constant $C\geq0$ such that
\[
\left\Vert \left( P(x_{j})\right) _{j=1}^{m}\right\Vert _{mx(s,q)}\leq
C\left\Vert \left( x_{j}\right) _{j=1}^{m}\right\Vert _{w,p}^{n}
\]
for all $m\in\mathbb{N}$ and $x_{j}\in E,$ with $j=1,\ldots,m.$
\end{definition}
If $p_{1}=\cdots=p_{n}=p,$ the operator $T$ is said $(s,q;p)$-mixing summing.
The following multilinear generalization of $(p;q;r)$-summing operators was
recently introduced by D. Achour \cite{achour}:
\begin{definition}
\label{ach}Let $0<p,q_{1},\ldots,q_{n}<\infty$ and $0<r\leq\infty$ with
\[
\frac{1}{p}\leq\frac{1}{q_{1}}+\cdots+\frac{1}{q_{n}}+\frac{1}{r}.
\]
An $n$-linear map $T$ $\in\mathcal{L}{(E_{1},\ldots,E_{n};F)}$ is absolutely
$(p;q_{1},\ldots,q_{n};r)$-summing if there is a $C\geq0$ so that
\begin{equation}
\left( \sum\limits_{j=1}^{m}\left\vert \varphi_{j}\left( T\left(
x_{j}^{(1)},\ldots,x_{j}^{(n)}\right) \right) \right\vert ^{p}\right)
^{\frac{1}{p}}\leq C\left\Vert \left( \varphi_{j}\right) _{j=1}
^{m}\right\Vert _{w,r}\prod_{i=1}^{n}\left\Vert \left( x_{j}^{(i)}\right)
_{j=1}^{m}\right\Vert _{w,q_{i}} \label{des}
\end{equation}
for all $m\in\mathbb{N}$, $\varphi_{j}\in F^{\ast}$ and $x_{j}^{(i)}\in
E_{i},$ with $\left( i,j\right) \in\{1,\ldots,n\}\times\{1,\ldots,m\}.$
Analogously an $n$-homogeneous polynomial $P\in\mathcal{P}(^{n}E;F)$
is\textbf{\ }absolutely\textbf{\ }$(p;q;r)$-summing if there exists a constant
$C\geq0$ such that
\[
\left( \sum\limits_{j=1}^{m}\left\vert \varphi_{j}\left( P\left(
x_{j}\right) \right) \right\vert ^{p}\right) ^{\frac{1}{p}}\leq C\left\Vert
\left( \varphi_{j}\right) _{j=1}^{m}\right\Vert _{w,r}\left\Vert \left(
x_{j}\right) _{j=1}^{m}\right\Vert _{w,q}^{n}
\]
for all $m\in\mathbb{N}$, $\varphi_{j}\in F^{\ast}$ and $x_{j}\in E,$ with
$j=1,\ldots,m.$
\end{definition}
We denote{\ the space of all absolutely }$(p;q_{1},\ldots,q_{n};r)$-summing
$n$-linear operators by
\[
\mathcal{L}_{as(p;q_{1},\ldots,q_{n};r)}\left( {E_{1},\ldots,E_{n};F}\right)
.
\]
When $q_{1}=\cdots=q_{n}=q$ we just write $\mathcal{L}_{as(p;q;r)}\left(
{E_{1},\ldots,E_{n};F}\right) $. When $r=\infty$ we recover the notion of
absolutely $\left( p;q_{1}, \ldots,q_{n}\right) $-summing multilinear
mappings $\mathcal{L}_{as(p;q_{1},\ldots,q_{n})}$ due to Alencar and Matos
\cite{am}. More precisely,
\begin{equation}
\mathcal{L}_{as(p;q_{1},\ldots,q_{n};\infty)}=\mathcal{L}_{as(p;q_{1}
,\ldots,q_{n})}. \label{s32}
\end{equation}
If $\frac{1}{p}>\frac{1}{q_{1}}+\cdots+\frac{1}{q_{n}}+\frac{1}{r}$ and $T$ is
absolutely $(p;q_{1},\ldots,q_{n};r)$-summing, then $T=0$. \ It is not
difficult to prove that
\begin{equation}
\mathcal{L}_{as\left( p;q_{1},\ldots,q_{n}\right) }\left( E_{1}
,\ldots,E_{n};F\right) \subset\mathcal{L}_{as\left( p;q_{1},\ldots
,q_{n};r\right) }\left( E_{1},\ldots,E_{n};F\right) \label{mov}
\end{equation}
for all Banach spaces $E_{1},\ldots,E_{n},F$ and $r>0$.
\subsection{The lack of coherence and compatibility}
The class of absolutely $\left( p;q\right) $-summing $n$-homogeneous
polynomials will be denoted by $\mathcal{P}_{as(p;q)}^{n}.$ As before, the
space of all $n$-homogeneous polynomials $P:E\rightarrow F$ in $\mathcal{P}
_{as(p;q)}^{n}$ is represented by $\mathcal{P}_{as(p;q)}\left( ^{n}
E;F\right) .$ The notions of absolutely $\left( p;q;r\right) $-summing
polynomials and mixing summing polynomials are denoted in a similar way.
It can be easily seen that $\left( \mathcal{P}_{as(p;q)}^{n}\right)
_{n=1}^{\infty}$ in general fails to be coherent and compatible with
$\Pi_{as(p;q)}$. In fact for any positive integer $n\geq2$ and any real number
$1\leq p\leq2$ we know that
\[
\mathcal{P}_{as(1;1)}\left( ^{n}\ell_{p};F\right) =\mathcal{P}\left(
^{n}\ell_{p};F\right)
\]
for all Banach spaces $F$. This result is an obvious deviation from the spirit
of the linear ideal of absolutely summing operators since
\[
\Pi_{as(1;1)}\left( \ell_{p};F\right) =\mathcal{L}\left( \ell_{p};F\right)
\]
if and only if $p=1$ and $F$ is a Hilbert space (see \cite{LP}). This
situation also proves that $\left( \mathcal{P}_{as(1;1)}^{n}\right)
_{n=1}^{\infty}$ is not coherent or compatible with $\Pi_{as(1;1)}.$ We also
know that $\left( \mathcal{P}_{as(p;q)}^{n}\right) _{n=1}^{\infty}$ in
general is not a (global) holomorphy type.
Since $\mathcal{P}_{as\left( p;q;\infty\right) }^{n}=\mathcal{P}_{as\left(
p;q\right) }^{n}$ and $\mathcal{P}_{mxs\left( \infty;p\right) }
^{n}=\mathcal{P}_{as\left( p;p\right) }^{n}$ these deficiencies of $\left(
\mathcal{P}_{as(1;1)}^{n}\right) _{n=1}^{\infty}$are inherited by the
polynomial analogues of the concepts of Subsections \ref{xxzzz} and
\ref{Sub2}. These deficiencies shall be fixed by the alternative concepts
introduced in the next sections.
\section{Multiple summing multilinear operators: the ``nice
prototype''\label{MMMA}}
Multiple $(p;q)$-summing multilinear were introduced in 2003 \cite{Matos, pv}.
The origins of this notion date back to the 1930's with Littlewood's $4/3$
inequality \cite{lit} which asserts that
\[
\left( \sum\limits_{i,j=1}^{N}\left\vert T(e_{i},e_{j})\right\vert ^{\frac
{4}{3}}\right) ^{\frac{3}{4}}\leq\sqrt{2}\left\Vert T\right\Vert
\]
for every bilinear form $T:\ell_{\infty}^{N}\times\ell_{\infty}^{N}
\rightarrow\mathbb{K}$ and every positive integer $N.$ In 1931 H.F.
Bohnenblust and E. Hille \cite{bh} provided a deep generalization of this
result to multilinear mappings: for every positive integer $n$ there is a
$C_{n}>0$ so that
\[
\left( \sum\limits_{i_{1},\ldots,i_{n}=1}^{N}\left\vert T(e_{i_{^{1}}}
,\ldots,e_{i_{n}})\right\vert ^{\frac{2n}{n+1}}\right) ^{\frac{n+1}{2n}}\leq
C_{n}\left\Vert T\right\Vert
\]
for every $n$-linear mapping $T:\ell_{\infty}^{N}\times\cdots\times
\ell_{\infty}^{N}\rightarrow\mathbb{C}$ and every positive integer $N$. This
result has important applications in operator theory in Banach spaces,
harmonic analysis, complex analysis and analytic number theory. For recent
advances related to the Bohnenblust-Hille inequality we refer to \cite{an,
df,DPell, Mun, Mu2}.
In his Ph.D. dissertation, D. P\'{e}rez-Garc\'{\i}a \cite{Da} remarked that
the Bohnenblust-Hille inequality can be viewed as a result of the theory of
multiple summing operators.
\begin{theorem}
[Bohnenblust-Hille]\label{ytr}If $E_{1},\ldots,E_{n}$ are Banach spaces and
$T\in\mathcal{L}(E_{1},\ldots,E_{n};\mathbb{K}),$ then there exists a constant
$C_{n}\geq0$ such that
\begin{equation}
\left( \sum_{j_{1},\ldots,j_{n}=1}^{N}\left\vert T(x_{j_{1}}^{(1)}
,\ldots,x_{j_{n}}^{(n)})\right\vert ^{\frac{2n}{n+1}}\right) ^{\frac{n+1}
{2n}}\leq C_{n}\prod_{k=1}^{n}\left\Vert (x_{j}^{(k)})_{j=1}^{N}\right\Vert
_{w,1} \label{juo}
\end{equation}
for every positive integer $N$ and $x_{j}^{(k)}\in E_{k}$, $k=1,\ldots,n$ and
$j=1,\ldots,N.$
\end{theorem}
The inequality above can be regarded as a result in the theory of multiple
summing multilinear operators. Recall that for $1\leq q_{1},\ldots,q_{n}\leq
p<\infty,$ an $n$-linear operator $T:E_{1}\times\cdots\times E_{n}\rightarrow
F$ is multiple\emph{\ }$(p;q_{1},\ldots,q_{n})$-summing ($T\in\mathcal{L}
_{mas(p;q_{1},\ldots,q_{n})}(E_{1},\ldots,E_{n};F)$) if there exists $C>0$
such that
\begin{equation}
\left( \sum_{j_{1},\ldots,j_{n}=1}^{\infty}\Vert T(x_{j_{1}}^{(1)}
,\ldots,x_{j_{n}}^{(n)})\Vert^{p}\right) ^{1/p}\leq C\prod\limits_{k=1}
^{n}\Vert(x_{j}^{(k)})_{j=1}^{\infty}\Vert_{w,q_{k}}\text{ } \label{jup2}
\end{equation}
for every $(x_{j}^{(k)})_{j=1}^{\infty}\in\ell_{q_{k}}^{w}(E_{k})$,
$k=1,\ldots,n$.
The infimum of all $C$'s satisfying (\ref{jup2}), denoted by $\left\Vert
T\right\Vert _{(r;r_{1},\ldots,r_{n})},$ defines a complete norm if $r\geq1$
($r$-norm, if $r\in(0,1)$) in $\mathcal{L}_{mas(r;r_{1},\ldots,r_{n})}
(E_{1},\ldots,E_{n};F).$ If $r_{1}=\cdots=r_{n}=s$ we just write $(r;s),$ and
when $r=s$ we replace $\left( r;r\right) $ by $r$. For $n=1$ this concept
also coincides with the classical notion of absolutely summing linear
operators and, for this reason, we keep the usual notation $\pi_{(r;s)}\left(
T\right) $ instead of $\left\Vert T\right\Vert _{(r;s)}$ for the norm of $T.$
The essence of the notion of multiple summing multilinear operators, for
bilinear operators, can also be traced back to \cite{Ram}. For recent results
in the theory of multiple summing operators we refer to \cite{BBJP, se,
davidstudia, PopJM} and references therein.
\section{Multiple $\left( p;q_{1},\ldots,q_{n};r\right) $-summing
multilinear operators\label{ss33}}
In this section we introduce the notion of multiple $\left( p;q_{1}
,\ldots,q_{n};r\right) $-summing multilinear operators and, as we shall see
in the next sections, the polynomial version of this concept is coherent and
compatible with the (linear) operator ideal of $(p;q;r)$-summing operators.
\begin{definition}
Let $m\in\mathbb{N},p,r,q_{1},\ldots,q_{n}\geq1$ and $E_{1},\ldots,E_{n},F$ be
Banach spaces. A continuous multilinear operator $T:E_{1}\times\cdots\times
E_{n}\rightarrow F$ is multiple $\left( p;q_{1},\ldots,q_{n};r\right)
$-summing when
\[
\left( \varphi_{j_{1}\ldots j_{n}}\left( T\left( x_{j_{1}}^{\left(
1\right) },\ldots,x_{j_{n}}^{\left( n\right) }\right) \right) \right)
_{j_{1},\ldots,j_{n}\in\mathbb{N}}\in\ell_{p}\left( \mathbb{N}^{n}\right)
\]
whenever $\left( x_{j}^{\left( i\right) }\right) _{j=1}^{\infty}\in
\ell_{q_{i}}^{w}\left( E_{i}\right) ,i=1,\ldots,n$ and $\left(
\varphi_{j_{1}\ldots j_{n}}\right) _{j_{1},\ldots,j_{n}\in\mathbb{N}}\in
\ell_{r}^{w}\left( F^{\ast},\mathbb{N}^{n}\right) .$
\end{definition}
Sometimes we shall simply write $j\in\mathbb{N}^{n}$ to denote $j=(j_{1}
,\ldots,j_{n})\in\mathbb{N}^{n}.$ The vector space formed by the multiple
$\left( p;q_{1},\ldots,q_{n};r\right) $-summing multilinear operators from
$E_{1}\times\cdots\times E_{n}$ to $F$ shall be represented by $\mathcal{L}
_{mas\left( p;q_{1},\ldots,q_{n};r\right) }\left( E_{1},\ldots
,E_{n};F\right) $). When $q_{1}=\cdots=q_{n}=q$, we simply write
$\mathcal{L}_{mas\left( p;q;r\right) }\left( E_{1},\ldots,E_{n};F\right) $.
As it happens in other similar classes, the class $\mathcal{L}_{mas\left(
p;q_{1},\ldots,q_{n};r\right) }\left( E_{1},\ldots,E_{n};F\right) $ has a
characterization by means of inequalities:
\begin{theorem}
\label{1.2}The following assertions are equivalent for $T\in\mathcal{L}\left(
E_{1},\ldots,E_{n};F\right) $:
\begin{itemize}
\item[(i)] $T\in\mathcal{L}_{mas\left( p;q_{1},\ldots,q_{n};r\right)
}\left( E_{1},\ldots,E_{n};F\right) ;$
\item[(ii)] There is a $C\geq0$ such that
\begin{align}
& \left( \sum_{j_{1},\ldots,j_{n}=1}^{\infty}\left\vert \varphi_{j_{1}\ldots
j_{n}}\left( T\left( x_{j_{1}}^{\left( 1\right) },\ldots,x_{j_{n}
}^{\left( n\right) }\right) \right) \right\vert ^{p}\right) ^{\frac{1}
{p}}\label{11:06}\\
& \leq C\left\Vert \left( \varphi_{j_{1}\ldots j_{n}}\right) _{j_{1}
,...,j_{n}\in\mathbb{N}}\right\Vert _{w,r}\prod_{i=1}^{n}\left\Vert \left(
x_{j}^{\left( i\right) }\right) _{j=1}^{\infty}\right\Vert _{w,q_{i}
}\nonumber
\end{align}
whenever $\left( x_{j}^{\left( i\right) }\right) _{j=1}^{\infty}\in
\ell_{q_{i}}^{w}\left( E_{i}\right) ,i=1,\ldots,n$ and $\left(
\varphi_{j_{1}\ldots j_{n}}\right) _{j\in\mathbb{N}^{n}}\in\ell_{r}
^{w}\left( F^{\ast},\mathbb{N}^{n}\right) ;$
\item[(iii)] There is a $C\geq0$ such that
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left\vert \varphi_{j_{1}\ldots
j_{n}}\left( T\left( x_{j_{1}}^{\left( 1\right) },\ldots,x_{j_{n}
}^{\left( n\right) }\right) \right) \right\vert ^{p}\right) ^{\frac{1}
{p}}\\
& \leq C\left\Vert \left( \varphi_{j_{1}\ldots j_{n}}\right) _{j_{1}
,...,j_{n}\in\mathbb{N}_{m}}\right\Vert _{w,r}\prod_{i=1}^{n}\left\Vert
\left( x_{j}^{\left( i\right) }\right) _{j=1}^{m}\right\Vert _{w,q_{i}}
\end{align*}
for all $m\in\mathbb{N},$ $x_{1}^{\left( i\right) },\ldots,x_{m}^{\left(
i\right) }\in E_{i},i=1,\ldots,n$ and $\left( \varphi_{j_{1}\ldots j_{n}
}\right) _{j\in\mathbb{N}_{m}^{n}}\in\ell_{r}^{w}\left( F^{\ast}
,\mathbb{N}_{m}^{n}\right) .$
\end{itemize}
The infimum of all $C$ satisfying (\ref{11:06}) defines a norm in
$\mathcal{L}_{mas\left( p;q_{1},\ldots,q_{n};r\right) }\left( E_{1}
,\ldots,E_{n};F\right) .$
\end{theorem}
Similarly to (\ref{mov}) it can also be proved that
\begin{equation}
\mathcal{L}_{mas\left( p;q_{1},\ldots,q_{n}\right) }\subset\mathcal{L}
_{mas\left( p;q_{1},\ldots,q_{n};r\right) } \label{inc222}
\end{equation}
for all $r>0.$ From Theorem \ref{1.2} we can conclude that if
\[
\frac{1}{p}>\frac{1}{q_{i}}+\frac{1}{r}
\]
for some $i$, then $\mathcal{L}_{mas\left( p;q_{1},\ldots,q_{n};r\right)
}\left( E_{1},\ldots,E_{n};F\right) =\left\{ 0\right\} $. In fact, we
first prove that if $T\in\mathcal{L}_{mas\left( p;q_{1},\ldots,q_{n}
;r\right) }\left( E_{1},\ldots,E_{n};F\right) ,$ then, for any $a\in E_{1}
$, the map
\begin{equation}
T_{a}:E_{2}\times\cdots\times E_{n}\longrightarrow F:T_{a}\left( x_{2}
,\ldots,x_{n}\right) =T\left( a,x_{2},\ldots,x_{n}\right) \label{wsq}
\end{equation}
is multiple $\left( p;q_{2},\ldots,q_{n};r\right) $-summing and
\begin{equation}
\left\Vert T\right\Vert _{mas\left( p;q_{2},\ldots,q_{n};r\right) }
\leq\left\Vert a\right\Vert \left\Vert T\right\Vert _{mas\left(
p;q_{1},\ldots,q_{n};r\right) }. \label{wsa}
\end{equation}
So, if $\frac{1}{p}>\frac{1}{q_{i}}+\frac{1}{r}$ for some $i$, then
$\mathcal{L}_{mas\left( p;q_{1},\ldots,q_{n};r\right) }\left( E_{1}
,\ldots,E_{n};F\right) =\left\{ 0\right\} $. In fact, suppose that
$\frac{1}{p}>\frac{1}{q_{1}}+\frac{1}{r}.$ So, using \ (\ref{wsq}), we know
that if $T\in\mathcal{L}_{mas\left( p;q_{1},\ldots,q_{n};r\right) }\left(
E_{1},\ldots,E_{n};F\right) $ then $T_{a_{2},\ldots,a_{n}}\in\mathcal{L}
_{as\left( p;q_{1};r\right) }\left( E_{1};F\right) $ for all $a_{2}\in
E_{2},\ldots,a_{n}\in E_{n}$. It follows that $T_{a_{2},\ldots,a_{n}}=0$ and
hence $T=0.$ So, in order to avoid trivialities we shall suppose $\frac{1}
{p}\leq\frac{1}{q_{i}}+\frac{1}{r}$ for all $i.$
\subsection{Coherence and compatibility \label{ss44}}
Standard calculations show that
\[
\left( \mathcal{L}_{mas\left( p;q_{1},\ldots,q_{n};r\right) },\left\Vert
\cdot\right\Vert _{mas\left( p;q_{1},\ldots,q_{n};r\right) }\right)
\]
is a Banach multi-ideal. If $\mathcal{M}$ is a (quasi-) normed ideal of
multilinear mappings, the class
\[
\mathcal{P}_{\mathcal{M}}=\left\{ P\in\mathcal{P}^{n};\check{P}\in
\mathcal{M},n\in\mathbb{N}\right\} \text{,}
\]
with $\left\Vert P\right\Vert _{\mathcal{P}_{\mathcal{M}}}:=\left\Vert
\check{P}\right\Vert _{\mathcal{M}},$ is a (quasi-) normed ideal of
polynomials, called polynomial ideal generated by $\mathcal{M}$. If
$\mathcal{M}$ is (quasi-) Banach, then $\mathcal{P}_{\mathcal{M}}$ is (quasi-)
Banach (see \cite[p. 46]{BBJMs}).
Thus, the class
\[
\mathcal{P}_{mas\left( p;q;r\right) }^{n}=\left\{ P\in\mathcal{P}
^{n};\check{P}\in\mathcal{L}_{mas\left( p;q;r\right) }^{n}\right\} ,
\]
with
\[
\left\Vert P\right\Vert _{\mathcal{P}_{mas\left( p;q;r\right) }^{n}
}:=\left\Vert \check{P}\right\Vert _{mas\left( p;q;r\right) },
\]
ia a Banach polynomial ideal$.$
\begin{theorem}
$\left( \mathcal{P}_{mas\left( p;q;r\right) }^{n},\left\Vert .\right\Vert
_{\mathcal{P}_{mas\left( p;q;r\right) }^{n}}\right) _{n=1}^{\infty}$ is
coherent and, for each fixed $n$, compatible with $\mathcal{L}_{mas\left(
p;q;r\right) }$.
\end{theorem}
\begin{proof}
If $P\in\mathcal{P}_{mas\left( p;q;r\right) }^{n}\left( ^{n}E;F\right) $
and $a\in E$, then $\check{P}\in\mathcal{L}_{mas\left( p;q;r\right) }
^{n}\left( ^{n}E;F\right) $ and, from (\ref{wsq}) and (\ref{wsa}),
$\check{P}_{a}\in\mathcal{L}_{mas\left( p;q;r\right) }^{n-1}\left(
^{n-1}E;F\right) .$ Hence $P_{a}\in\mathcal{P}_{mas\left( p;q;r\right)
}^{n-1}\left( ^{n-1}E;F\right) $ with
\[
\left\Vert P_{a}\right\Vert _{\mathcal{P}_{mas\left( p;q;r\right) }^{n-1}
}\leq\left\Vert a\right\Vert \left\Vert P\right\Vert _{\mathcal{P}_{mas\left(
p;q;r\right) }^{n}}.
\]
Let $\gamma\in E^{\ast}.$ Note that
\[
\left( \gamma P\right) ^{\vee}\left( x_{1},\ldots,x_{n+1}\right) =\frac
{1}{n+1}\sum_{k=1}^{n+1}\gamma\left( x_{k}\right) \check{P}\left(
x_{1},\overset{\left[ k\right] }{\ldots},x_{n+1}\right) ,
\]
where $\overset{\left[ k\right] }{\ldots}$ means that the $k$-th coordinate
is missing.
Let $m\in\mathbb{N}$, $x_{j}^{(k)}\in E$, with $j=1,\ldots.,m$ and
$k=1,\ldots,n+1;$ let $\varphi_{j_{1}\ldots j_{n+1}}\in F^{\ast}$ with
$j_{1},\ldots,j_{n+1}=1,\ldots.,m.$ Using the triangle inequality we have
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert \varphi_{j_{1}\ldots
j_{n+1}}\left( \left( \gamma P\right) ^{\vee}\left( x_{j_{1}}^{\left(
1\right) },\ldots,x_{j_{n+1}}^{\left( n+1\right) }\right) \right)
\right\vert ^{p}\right) ^{\frac{1}{p}}\\
& =\left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert \varphi_{j_{1}\ldots
j_{n+1}}\left( \frac{1}{n+1}\sum_{k=1}^{n+1}\gamma\left( x_{j_{k}}^{\left(
k\right) }\right) \check{P}\left( x_{j_{1}}^{\left( 1\right) }
,\overset{\left[ k\right] }{\ldots},x_{j_{n+1}}^{\left( n+1\right)
}\right) \right) \right\vert ^{p}\right) ^{\frac{1}{p}}\\
& =\frac{1}{n+1}\left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert
\varphi_{j_{1}\ldots j_{n+1}}\left( \sum_{k=1}^{n+1}\gamma\left( x_{j_{k}
}^{\left( k\right) }\right) \check{P}\left( x_{j_{1}}^{\left( 1\right)
},\overset{\left[ k\right] }{\ldots},x_{j_{n+1}}^{\left( n+1\right)
}\right) \right) \right\vert ^{p}\right) ^{\frac{1}{p}}\\
& =\frac{1}{n+1}\left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert
\sum_{k=1}^{n+1}\varphi_{j_{1}\ldots j_{n+1}}\left( \gamma\left( x_{j_{k}
}^{\left( k\right) }\right) \check{P}\left( x_{j_{1}}^{\left( 1\right)
},\overset{\left[ k\right] }{\ldots},x_{j_{n+1}}^{\left( n+1\right)
}\right) \right) \right\vert ^{p}\right) ^{\frac{1}{p}}\\
& \leq\frac{1}{n+1}\left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left(
\sum_{k=1}^{n+1}\left\vert \varphi_{j_{1}\ldots j_{n+1}}\left( \gamma\left(
x_{j_{k}}^{\left( k\right) }\right) \check{P}\left( x_{j_{1}}^{\left(
1\right) },\overset{\left[ k\right] }{\ldots},x_{j_{n+1}}^{\left(
n+1\right) }\right) \right) \right\vert \right) ^{p}\right) ^{\frac{1}
{p}}\\
& =\frac{1}{n+1}\left\Vert \left( \sum_{k=1}^{n+1}\left\vert \varphi
_{j_{1}\ldots j_{n+1}}\left( \gamma\left( x_{j_{k}}^{\left( k\right)
}\right) \check{P}\left( x_{j_{1}}^{\left( 1\right) },\overset{\left[
k\right] }{\ldots},x_{j_{n+1}}^{\left( n+1\right) }\right) \right)
\right\vert \right) _{j_{1},\ldots,j_{n+1}=1}^{m}\right\Vert _{p}\\
& =(\ast).
\end{align*}
Thus, from the Minkowski inequality we have
\begin{align}
(\ast) & =\nonumber\\
& =\frac{1}{n+1}\left\Vert \sum_{k=1}^{n+1}\left( \left\vert \varphi
_{j_{1}\ldots j_{n+1}}\left( \gamma\left( x_{j_{k}}^{\left( k\right)
}\right) \check{P}\left( x_{j_{1}}^{\left( 1\right) },\overset{\left[
k\right] }{\ldots},x_{j_{n+1}}^{\left( n+1\right) }\right) \right)
\right\vert \right) _{j_{1},\ldots,j_{n+1}=1}^{m}\right\Vert _{p}\\
& \leq\frac{1}{n+1}\sum_{k=1}^{n+1}\left\Vert \left( \left\vert
\varphi_{j_{1}\ldots j_{n+1}}\left( \gamma\left( x_{j_{k}}^{\left(
k\right) }\right) \check{P}\left( x_{j_{1}}^{\left( 1\right) }
,\overset{\left[ k\right] }{\ldots},x_{j_{n+1}}^{\left( n+1\right)
}\right) \right) \right\vert \right) _{j_{1},\ldots,j_{n+1}=1}
^{m}\right\Vert _{p}\nonumber\\
& =\frac{1}{n+1}\sum_{k=1}^{n+1}\left( \sum_{j_{1},\ldots,j_{n+1}=1}
^{m}\left\vert \varphi_{j_{1}\ldots j_{n+1}}\left( \gamma\left( x_{j_{k}
}^{\left( k\right) }\right) \check{P}\left( x_{j_{1}}^{\left( 1\right)
},\overset{\left[ k\right] }{\ldots},x_{j_{n+1}}^{\left( n+1\right)
}\right) \right) \right\vert ^{p}\right) ^{\frac{1}{p}}\nonumber\\
& =\frac{1}{n+1}\left[ \left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert
\varphi_{j_{1}\ldots j_{n+1}}\left( \check{P}\left( \gamma\left( x_{j_{1}
}^{\left( 1\right) }\right) x_{j_{2}}^{\left( 2\right) },\ldots
,x_{j_{n+1}}^{\left( n+1\right) }\right) \right) \right\vert ^{p}\right)
^{\frac{1}{p}}+\cdots\right. \nonumber\\
& \left. \cdots+\left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert
\varphi_{j_{1}\ldots j_{n+1}}\left( \check{P}\left( \gamma\left(
x_{j_{n+1}}^{\left( n+1\right) }\right) x_{j_{1}}^{\left( 1\right)
},\ldots,x_{j_{n}}^{\left( n\right) }\right) \right) \right\vert
^{p}\right) ^{\frac{1}{p}}\right] .\nonumber
\end{align}
Hence
\begin{align}
& \left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert \varphi_{j_{1}\ldots
j_{n+1}}\left( \left( \gamma P\right) ^{\vee}\left( x_{j_{1}}^{\left(
1\right) },\ldots,x_{j_{n+1}}^{\left( n+1\right) }\right) \right)
\right\vert ^{p}\right) ^{\frac{1}{p}}\label{estta}\\
& \leq\frac{1}{n+1}\left[ \left( \sum_{j_{1},\ldots,j_{n+1}=1}
^{m}\left\vert \varphi_{j_{1}\ldots j_{n+1}}\left( \check{P}\left(
\gamma\left( x_{j_{1}}^{\left( 1\right) }\right) x_{j_{2}}^{\left(
2\right) },\ldots,x_{j_{n+1}}^{\left( n+1\right) }\right) \right)
\right\vert ^{p}\right) ^{\frac{1}{p}}+\cdots\right. \nonumber\\
& \left. \cdots+\left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert
\varphi_{j_{1}\ldots j_{n+1}}\left( \check{P}\left( \gamma\left(
x_{j_{n+1}}^{\left( n+1\right) }\right) x_{j_{1}}^{\left( 1\right)
},\ldots,x_{j_{n}}^{\left( n\right) }\right) \right) \right\vert
^{p}\right) ^{\frac{1}{p}}\right] .\nonumber
\end{align}
Note that each one of the $n+1$ terms of (\ref{estta}) can be re-written as
\[
\left( \sum_{j_{2}=1}^{m^{2}}\sum_{j_{3},\ldots,j_{n+1}=1}^{m}\left\vert
\widetilde{\varphi}_{j_{2}\ldots j_{n+1}}\left( \check{P}\left( z_{j_{2}
}^{(2)},\ldots,z_{j_{n+1}}^{\left( n+1\right) }\right) \right) \right\vert
^{p}\right) ^{\frac{1}{p}}
\]
for adequate choices of $\widetilde{\varphi}_{j_{2}\ldots j_{n+1}}$ and
$z_{j_{k}}^{(k)}$, with $k=2,\ldots,n+1.$
In fact, for
\[
\left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert \varphi_{j_{1}\ldots
j_{n+1}}\left( \check{P}\left( \gamma\left( x_{j_{1}}^{\left( 1\right)
}\right) x_{j_{2}}^{\left( 2\right) },\ldots,x_{j_{n+1}}^{\left(
n+1\right) }\right) \right) \right\vert ^{p}\right) ^{\frac{1}{p}},
\]
we choose
\[
\left\{
\begin{array}
[c]{c}
z_{j_{2}}^{\left( 2\right) }=\gamma\left( x_{1}^{\left( 1\right)
}\right) x_{j_{2}}^{\left( 2\right) }\text{ for all }j_{2}=1,\ldots.,m,\\
z_{m+j_{2}}^{\left( 2\right) }=\gamma\left( x_{2}^{\left( 1\right)
}\right) x_{j_{2}}^{\left( 2\right) }\text{ for all }j_{2}=1,\ldots.,m,\\
\vdots\\
z_{\left( m-1\right) m+j_{2}}^{\left( 2\right) }=\gamma\left(
x_{m}^{\left( 1\right) }\right) x_{j_{2}}^{\left( 2\right) }\text{ for
all }j_{2}=1,\ldots.,m,\\
z_{j_{i}}^{(i)}=x_{j_{i}}^{\left( i\right) }\text{ for all }j_{i}
=1,\ldots,m,i=3,\ldots,n+1
\end{array}
\right.
\]
and
\[
\left\{
\begin{array}
[c]{c}
\widetilde{\varphi}_{j_{2},\ldots.j_{n+1}}=\varphi_{1j_{2}\ldots j_{n+1}
}\text{ for all }j_{2}=1,\ldots.,m,\\
\widetilde{\varphi}_{m+j_{2},\ldots.j_{n+1}}=\varphi_{2j_{2}\ldots j_{n+1}
}\text{ for all }j_{2}=1,\ldots.,m,\\
\vdots\\
\widetilde{\varphi}_{(m-1)m+j_{2},\ldots.j_{n+1}}=\varphi_{mj_{2}\ldots
j_{n+1}}\text{ for all }j_{2}=1,\ldots.,m.
\end{array}
\right.
\]
For these choices one can check that
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert \varphi_{j_{1}\ldots
j_{n+1}}\left( \check{P}\left( \gamma\left( x_{j_{1}}^{\left( 1\right)
}\right) x_{j_{2}}^{\left( 2\right) },\ldots,x_{j_{n+1}}^{\left(
n+1\right) }\right) \right) \right\vert ^{p}\right) ^{\frac{1}{p}}\\
& =\left( \sum_{j_{2}=1}^{m^{2}}\sum_{j_{3},\ldots,j_{n+1}=1}^{m}\left\vert
\widetilde{\varphi}_{j_{2}\ldots j_{n+1}}\left( \check{P}\left( z_{j_{2}
}^{(2)},\ldots,z_{j_{n+1}}^{\left( n+1\right) }\right) \right) \right\vert
^{p}\right) ^{\frac{1}{p}}
\end{align*}
and the other cases are similar. Then
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert \varphi_{j_{1}\ldots
j_{n+1}}\left( \check{P}\left( \gamma\left( x_{j_{1}}^{\left( 1\right)
}\right) x_{j_{2}}^{\left( 2\right) },\ldots,x_{j_{n+1}}^{\left(
n+1\right) }\right) \right) \right\vert ^{p}\right) ^{\frac{1}{p}}\\
& =\left( \sum_{j_{2},\ldots,j_{n+1}=1}^{m^{2},m,\ldots,m}\left\vert
\widetilde{\varphi}_{j_{2}\ldots j_{n+1}}\left( \check{P}\left( z_{j_{2}
}^{(2)},\ldots,z_{j_{n+1}}^{\left( n+1\right) }\right) \right) \right\vert
^{p}\right) ^{\frac{1}{p}}\\
& \leq\left\Vert \check{P}\right\Vert _{mas\left( p;q;r\right) }\left\Vert
\left( \widetilde{\varphi}_{j_{2}\ldots j_{n+1}}\right) _{j_{2}
,\ldots,j_{n+1}}^{m^{2},m,\ldots,m}\right\Vert _{w,r}\left\Vert \left(
z_{j_{2}}^{\left( 2\right) }\right) _{j_{2}=1}^{m^{2}}\right\Vert
_{w,q}\prod_{i=3}^{n+1}\left\Vert \left( z_{j_{i}}^{\left( i\right)
}\right) _{j_{i}=1}^{m}\right\Vert _{w,q}\\
& =\left\Vert \check{P}\right\Vert _{mas\left( p;q;r\right) }\left\Vert
\left( \varphi_{j_{1}\ldots j_{n+1}}\right) _{j\in\mathbb{N}_{m}^{n+1}
}\right\Vert _{w,r}\left\Vert \left( \gamma\left( x_{j_{1}}^{\left(
1\right) }\right) x_{j_{2}}^{\left( 2\right) }\right) _{j_{1},j_{2}
=1}^{m}\right\Vert _{w,q}\prod_{i=3}^{n+1}\left\Vert \left( x_{j}^{\left(
i\right) }\right) _{j=1}^{m}\right\Vert _{w,q}.
\end{align*}
Since
\begin{align*}
& \left\Vert \left( \gamma\left( x_{j_{1}}^{\left( 1\right) }\right)
x_{j_{2}}^{\left( 2\right) }\right) _{j_{1},j_{2}=1}^{m}\right\Vert
_{w,q}\\
& \leq\left\Vert \left( \gamma\left( x_{j_{1}}^{\left( 1\right) }\right)
\right) _{j_{1}=1}^{m}\right\Vert _{\infty}\sup_{\left\Vert \varphi
\right\Vert \leq1}\left( \sum_{j=1}^{m}\left\vert \varphi\left( x_{j_{2}
}^{\left( 2\right) }\right) \right\vert ^{q}\right) ^{\frac{1}{q}}\\
& \leq\left\Vert \left( \gamma\left( x_{j_{1}}^{\left( 1\right) }\right)
\right) _{j_{1}=1}^{m}\right\Vert _{q}\left\Vert \left( x_{j_{2}}^{\left(
2\right) }\right) _{j_{2}=1}^{m}\right\Vert _{w,q}\\
& \leq\left\Vert \gamma\right\Vert \left\Vert \left( x_{j_{1}}^{\left(
1\right) }\right) _{j_{1}=1}^{m}\right\Vert _{w,q}\left\Vert \left(
x_{j_{2}}^{\left( 2\right) }\right) _{j_{2}=1}^{m}\right\Vert _{w,q},
\end{align*}
we have
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert \varphi_{j_{1}\ldots
j_{n+1}}\left( \check{P}\left( \gamma\left( x_{j_{1}}^{\left( 1\right)
}\right) x_{j_{2}}^{\left( 2\right) },\ldots,x_{j_{n+1}}^{\left(
n+1\right) }\right) \right) \right\vert ^{p}\right) ^{\frac{1}{p}}\\
& \leq\left\Vert \gamma\right\Vert \left\Vert \check{P}\right\Vert
_{mas\left( p;q;r\right) }\left\Vert \left( \varphi_{j_{1}\ldots j_{n+1}
}\right) _{j\in\mathbb{N}_{m}^{n+1}}\right\Vert _{w,r}\prod_{i=1}
^{n+1}\left\Vert \left( x_{j}^{\left( i\right) }\right) _{j=1}
^{m}\right\Vert _{w,q}.
\end{align*}
Using the same idea for the other $n$ terms of (\ref{estta}), we obtain
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert \varphi_{j_{1}\ldots
j_{n+1}}\left( \check{P}\left( \gamma\left( x_{j_{2}}^{\left( 2\right)
}\right) x_{j_{1}}^{\left( 1\right) },x_{j_{3}}^{\left( 3\right) }
\ldots,x_{j_{n+1}}^{\left( n+1\right) }\right) \right) \right\vert
^{p}\right) ^{\frac{1}{p}}\\
& \leq\left\Vert \gamma\right\Vert \left\Vert \check{P}\right\Vert
_{mas\left( p;q;r\right) }\left\Vert \left( \varphi_{j_{1}\ldots j_{n+1}
}\right) _{j\in\mathbb{N}_{m}^{n+1}}\right\Vert _{w,r}\prod_{i=1}
^{n+1}\left\Vert \left( x_{j}^{\left( i\right) }\right) _{j=1}
^{m}\right\Vert _{w,q},
\end{align*}
\[
\vdots
\]
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert \varphi_{j_{1}\ldots
j_{n+1}}\left( \check{P}\left( \gamma\left( x_{j_{n+1}}^{\left(
n+1\right) }\right) x_{j_{1}}^{\left( 1\right) },x_{j_{2}}^{\left(
2\right) }\ldots,x_{j_{n}}^{n}\right) \right) \right\vert ^{p}\right)
^{\frac{1}{p}}\\
& \leq\left\Vert \gamma\right\Vert \left\Vert \check{P}\right\Vert
_{mas\left( p;q;r\right) }\left\Vert \left( \varphi_{j_{1}\ldots j_{n+1}
}\right) _{j\in\mathbb{N}_{m}^{n+1}}\right\Vert _{w,r}\prod_{i=1}
^{n+1}\left\Vert \left( x_{j}^{\left( i\right) }\right) _{j=1}
^{m}\right\Vert _{w,q}.
\end{align*}
Therefore
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n+1}=1}^{m}\left\vert \varphi_{j_{1}\ldots
j_{n+1}}\left( \left( \gamma P\right) ^{\vee}\left( x_{j_{1}}^{\left(
1\right) },\ldots,x_{j_{n+1}}^{\left( n+1\right) }\right) \right)
\right\vert ^{p}\right) ^{\frac{1}{p}}\\
& \leq\frac{1}{n+1}\left[ \left\Vert \gamma\right\Vert \left\Vert \check
{P}\right\Vert _{mas\left( p;q;r\right) }\left\Vert \left( \varphi
_{j_{1}\ldots j_{n+1}}\right) _{j\in\mathbb{N}_{m}^{n+1}}\right\Vert
_{w,r}\prod_{i=1}^{n+1}\left\Vert \left( x_{j}^{\left( i\right) }\right)
_{j=1}^{m}\right\Vert _{w,q}+\cdots\right. \\
& \left. \cdots+\left\Vert \gamma\right\Vert \left\Vert \check{P}\right\Vert
_{mas\left( p;q;r\right) }\left\Vert \left( \varphi_{j_{1}\ldots j_{n+1}
}\right) _{j\in\mathbb{N}_{m}^{n+1}}\right\Vert _{w,r}\prod_{i=1}
^{n+1}\left\Vert \left( x_{j}^{\left( i\right) }\right) _{j=1}
^{m}\right\Vert _{w,q}\right] \\
& =\left\Vert \gamma\right\Vert \left\Vert \check{P}\right\Vert _{mas\left(
p;q;r\right) }\left\Vert \left( \varphi_{j_{1}\ldots j_{n+1}}\right)
_{j\in\mathbb{N}_{m}^{n+1}}\right\Vert _{w,r}\prod_{i=1}^{n+1}\left\Vert
\left( x_{j}^{\left( i\right) }\right) _{j=1}^{m}\right\Vert _{w,q}.
\end{align*}
Finally we conclude that $\gamma P$ is multiple $\left( p;q;r\right)
$-summing and
\begin{align*}
\left\Vert \gamma P\right\Vert _{\mathcal{P}_{mas\left( p;q;r\right) }
^{n+1}} & \leq\left\Vert \gamma\right\Vert \left\Vert \check{P}\right\Vert
_{mas\left( p;q;r\right) }\\
& =\left\Vert \gamma\right\Vert \left\Vert P\right\Vert _{\mathcal{P}
_{mas\left( p;q;r\right) }^{n}}.
\end{align*}
The items (i) and (ii) from Definition \ref{IdeaisCompativeis} are obtained in
a similar way.
\end{proof}
\section{Multiple mixing summing operators}
In this section we introduce the notion of multiple mixing summing multilinear
operators (and polynomials) which is coherent and compatible with the
respective operator ideal. As another indicator that this is a correct
approach to nonlinear mixing summability, we prove a quotient theorem for
multilinear operators similar to the one for mixing summing linear operators.
\begin{definition}
Let $0<p_{1},\ldots,p_{n}\leq q\leq s<\infty$ . An $n$-linear operator
$A\in\mathcal{L}(E_{1},\ldots,E_{n};F)$ is multiple $(s,q;p_{1},\ldots,p_{n}
)$-mixing summing if there exists a constant $\sigma\geq0$ such that
\begin{equation}
\left\Vert \left( A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})\right)
_{j_{1},\ldots,j_{n}=1}^{m}\right\Vert _{mx(s,q)}\leq\sigma\prod_{k=1}
^{n}\left\Vert (x_{j}^{(k)})_{j=1}^{m}\right\Vert _{w,p_{k}} \label{III}
\end{equation}
for every $m\in\mathbb{N}$ $,$ $x_{1}^{(1)},\ldots,x_{m}^{(1)}\in E_{1}
,\ldots,x_{1}^{(n)},\ldots,x_{m}^{(n)}\in E_{n}.$
\end{definition}
In this case we define
\[
\left\Vert A\right\Vert _{mx(s,q;p_{1},\ldots,p_{n})}=\inf\sigma.
\]
If $p_{1}=\cdots=p_{n}=p,$ we say that $A$ is multiple $(s,q;p)$-mixing
summing. The space of all multiple $(s,q;p_{1},\ldots,p_{n})$-mixing summing
is represented by $\Pi_{mx(s,q;p_{1},\ldots,p_{n})}.$
In order to avoid trivialities in the definition of multiple $(s,q;p_{1}
,\ldots,p_{n})$ mixing summing operators,\ we assume that $p_{k}\leq q$, for
all $k=1,\ldots,n.$ In fact, one can check that if $T\in\mathcal{L}(E_{1}
,\ldots,E_{n};F)$ is multiple $(s,q;p_{1},\ldots,p_{n})$ mixing summing and
$q<p_{k},$ for some $k,$ then $T=0.$
The following result, whose proof is standard and we omit, characterizes
multiple $(s,q;p_{1},\ldots,p_{n})$ mixing summing operators as those which
take adequate weakly summable sequences into adequate mixed summable sequences:
\begin{proposition}
\label{Primeira Prop}Let $0<p_{1},\ldots,p_{n}\leq q\leq s<\infty.$ An
operator $A\in\mathcal{L}(E_{1},\ldots,E_{n};F)$ is multiple $(s,q;p_{1}
,\ldots,p_{n} )$-mixing summing if, and only if,
\[
\left( A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})\right) _{j_{1},\ldots,j_{n}
=1}^{\infty}\in\ell_{(s,q)}^{mx}\left( F,\mathbb{N}^{n}\right)
\]
regardless of the choice of $(x_{i}^{(1)})_{i=1}^{\infty}\in\ell_{p_{1}}
^{w}(E_{1}),\ldots,$ $(x_{i}^{(n)})_{i=1}^{\infty}$ $\in\ell_{p_{n}}^{w}
(E_{n}).$
\end{proposition}
In fact the proof of the previous proposition also shows that $A$ is multiple
$(s,q;p_{1},\ldots,p_{n})$-mixing summing if, and only if, the $n$-linear
operator
\[
\tilde{A}\left( (x_{i}^{(1)})_{i=1}^{\infty},\ldots,(x_{i}^{(n)}
)_{i=1}^{\infty}\right) =\left( A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}
^{(n)})\right) _{j_{1},...,j_{n}=1}^{\infty}
\]
belongs to $\mathcal{L}(\ell_{p_{1}}^{w}(E_{1}),\ldots,\ell_{p_{n}}^{w}
(E_{n});\ell_{(s,q)}^{mx}\left( F,\mathbb{N}^{n}\right) )$. Moreover
\[
\left\Vert A\right\Vert _{mx(s,q;p_{1},\ldots,p_{n})}=\left\Vert \tilde
{A}\right\Vert .
\]
The main result of this section (Theorem \ref{criterio}) is a consequence of
the following powerful characterization of mixed summable sequences due to
Maurey \cite{Maurey} (see also \cite[16.4.3]{pp1}):
\begin{theorem}
[Maurey]\label{caracter} Let $0<q<s<\infty.$ A sequence $\left( z_{j}\right)
_{j=1}^{\infty}$ in $E$ is mixed $(s,q)$-summable if, and only if,
\[
\left( \left( \int_{B_{E^{\ast}}}\left\vert \left\langle \varphi
,z_{j}\right\rangle \right\vert ^{s}d\mu(\varphi)\right) ^{\frac{1}{s}
}\right) _{j=1}^{\infty}\in\ell_{q}\text{ whenever }\mu\in W(B_{E^{\ast}}).
\]
Besides
\[
\left\Vert \left( z_{j}\right) _{j=1}^{\infty}\right\Vert _{mx(s,q)}
=\sup_{\mu\in W(B_{E^{\ast}})}\left( \sum_{j=1}^{\infty}\left(
\int_{B_{E^{\ast}}}\left\vert \left\langle \varphi,z_{j}\right\rangle
\right\vert ^{s}d\mu(\varphi)\right) ^{\frac{q}{s}}\right) ^{\frac{1}{q}}.
\]
The next theorem shows that our concept has a characterization similar to the
linear case (see \cite{Flore}):
\end{theorem}
\begin{theorem}
\label{criterio}Let $0<p_{1},\ldots,p_{n}\leq q\leq s<\infty.$ An operator
$A\in\mathcal{L}(E_{1},\ldots,E_{n};F)$ is multiple $(s,q;p_{1},\ldots,p_{n})$
mixing summing if, and only if, there is a constant $\sigma\geq0$ such that
\begin{align}
& \left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left( \sum_{j=1}^{k}\left\vert
\left\langle \varphi_{j},A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)}
)\right\rangle \right\vert ^{s}\right) ^{\frac{q}{s}}\right) ^{\frac{1}{q}
}\label{jj}\\
& \leq\sigma\prod_{l=1}^{n}\left\Vert (x_{i}^{(l)})_{i=1}^{m}\right\Vert
_{w,p_{l}}\left\Vert (\varphi_{j})_{j=1}^{k}\right\Vert _{s}\nonumber
\end{align}
for all $k,m\in\mathbb{N}$, $x_{i}^{(l)}\in E_{l};$ $i=1,\ldots,m,$
$l=1,\ldots,n$ and $\varphi_{j}\in F^{\ast}$ with $j=1,\ldots,k.$ Furthermore,
\[
\left\Vert A\right\Vert _{mx(s,q;p_{1},\ldots,p_{n})}=\inf\sigma.
\]
\end{theorem}
\begin{proof}
We split the proof into two cases.
(i) Case $s=q.$
From (\ref{jj}) we conclude that
\[
\left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left\vert \left\langle \varphi
,A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})\right\rangle \right\vert
^{q}\right) ^{\frac{1}{q}}\leq\sigma\prod_{l=1}^{n}\left\Vert (x_{i}
^{(l)})_{i=1}^{m}\right\Vert _{w,p_{l}}
\]
for all $\varphi\in B_{F^{\ast}}.$ Thus
\begin{equation}
\left\Vert \left( A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})\right)
_{j_{1},...,j_{n}\in\mathbb{N}_{m}}\right\Vert _{w,q}\leq\sigma\prod_{l=1}
^{n}\left\Vert (x_{i}^{(l)})_{i=1}^{m}\right\Vert _{w,p_{l}} \label{I}
\end{equation}
and so by Theorem \ref{caracter} and by (\ref{I}) we obtain
\begin{align*}
& \left\Vert \left( A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})\right)
_{j_{1},...,j_{n}\in\mathbb{N}_{m}}\right\Vert _{mx(q,q)}\\
& =\sup_{\mu\in W(B_{F^{\ast}})}\left( \sum_{j_{1},\ldots,j_{n}=1}
^{m}\left( \int_{B_{F^{\ast}}}\left\vert \left\langle \varphi,A(x_{j_{1}
}^{(1)},\ldots,x_{j_{n}}^{(n)})\right\rangle \right\vert ^{q}d\mu
(\varphi)\right) ^{\frac{q}{q}}\right) ^{\frac{1}{q}}\\
& \leq\sup_{\mu\in W(B_{F^{\ast}})}\left( \int_{B_{F^{\ast}}}\sup_{\psi\in
B_{F^{\ast}}}\sum_{j_{1},\ldots,j_{n}=1}^{m}\left\vert \left\langle
\psi,A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})\right\rangle \right\vert
^{q}d\mu(\varphi)\right) ^{\frac{1}{q}}\\
& =\sup_{\mu\in W(B_{F^{\ast}})}\left( \int_{B_{F^{\ast}}}\left\Vert \left(
A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})\right) _{j_{1},...,j_{n}
\in\mathbb{N}_{m}}\right\Vert _{w,q}^{q}d\mu(\varphi)\right) ^{\frac{1}{q}}\\
& \leq\left\Vert \left( A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})\right)
_{j_{1},...,j_{n}\in\mathbb{N}_{m}}\right\Vert _{w,q}\\
& \leq\sigma\prod_{l=1}^{n}\left\Vert (x_{i}^{(l)})_{i=1}^{m}\right\Vert
_{w,p_{l}}.
\end{align*}
Hence, $A\in\Pi_{mx(q,q;p_{1},\ldots,p_{n})}(E_{1},\ldots,E_{n};F)$ and
$\left\Vert A\right\Vert _{mx(q,q;p_{1},\ldots,p_{n})}\leq\sigma$.
Conversely, suppose that $A\in\Pi_{mx(q,q;p_{1},\ldots,p_{n})}(E_{1}
,\ldots,E_{n};F)$. Given
\[
x_{1}^{(1)},\ldots,x_{m}^{(1)}\in E_{1},\ldots,x_{1}^{(n)},\ldots,x_{m}
^{(n)}\in E_{n}
\]
and $\varphi_{1},\ldots,\varphi_{k}\in F^{\ast}$, if
\[
A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})=\tau_{j_{1},\ldots,j_{n}}
.y_{j_{1},\ldots,j_{n}},
\]
where $(\tau_{j_{1},\ldots,j_{n}})_{j_{1},...,j_{n}\in\mathbb{N}}\in
\ell_{\infty}$ and $(y_{j_{1},\ldots,j_{n}})_{j_{1},...,j_{n}\in\mathbb{N}}
\in\ell_{q}^{w}(F;\mathbb{N}^{n})$ we have
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left( \sum_{j=1}^{k}\left\vert
\left\langle \varphi_{j},A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)}
)\right\rangle \right\vert ^{q}\right) ^{\frac{q}{q}}\right) ^{\frac{1}{q}
}\\
& =\left( \sum_{j=1}^{k}\left( \left\Vert \varphi_{j}\right\Vert ^{q}
\sum_{j_{1},\ldots,j_{n}=1}^{m}\left\vert \left\langle \frac{\varphi_{j}
}{\left\Vert \varphi_{j}\right\Vert },\tau_{j_{1},\ldots,j_{n}}y_{j_{1}
,\ldots,j_{n}}\right\rangle \right\vert ^{q}\right) \right) ^{\frac{1}{q}}\\
& =\left( \sum_{j=1}^{k}\left\Vert \varphi_{j}\right\Vert ^{q}\right)
^{\frac{1}{q}}\left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left\vert \tau
_{j_{1},\ldots,j_{n}}\right\vert ^{q}\left\vert \left\langle \frac{\varphi
_{j}}{\left\Vert \varphi_{j}\right\Vert },y_{j_{1},\ldots,j_{n}}\right\rangle
\right\vert ^{q}\right) ^{\frac{1}{q}}\\
& \leq\left\Vert (\varphi_{j})_{j=1}^{k}\right\Vert _{q}\left\Vert
(\tau_{j_{1},\ldots,j_{n}})_{j\in\mathbb{N}^{n}}\right\Vert _{\infty
}\left\Vert (y_{j_{1},\ldots,j_{n}})_{j\in\mathbb{N}^{n}}\right\Vert _{w,q}.
\end{align*}
Taking the infimum in both sides, we obtain
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left( \sum_{j=1}^{k}\left\vert
\left\langle \varphi_{j},A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)}
)\right\rangle \right\vert ^{q}\right) ^{\frac{q}{q}}\right) ^{\frac{1}{q}
}\\
& \leq\left\Vert (\varphi_{j})_{j=1}^{k}\right\Vert _{q}\left\Vert \left(
A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})\right) _{j\in\mathbb{N}_{m}^{n}
}\right\Vert _{m,(q,q)}\\
& \leq\left\Vert (\varphi_{j})_{j=1}^{k}\right\Vert _{q}\left\Vert
A\right\Vert _{mx(q,q;p_{1},\ldots,p_{n})}\prod_{l=1}^{n}\left\Vert
(x_{i}^{(l)})_{i=1}^{m}\right\Vert _{w,p_{l}}.
\end{align*}
Therefore $\inf\sigma\leq\left\Vert A\right\Vert _{mx(q,q;p_{1},\ldots,p_{n}
)}$ and with the last inequality we obtain
\[
\left\Vert A\right\Vert _{mx(q,q;p_{1},\ldots,p_{n})}=\inf\sigma.
\]
(ii) Case $s>q.$
Let $A\in\Pi_{mx(s,q;p_{1},\ldots,p_{n})}(E_{1},\ldots,E_{n};F).$ Given
$0\neq\varphi_{1},\ldots,\varphi_{k}\in F^{\ast}$ we define the probability
measure
\[
\nu=\sum_{j=1}^{k}\nu_{j}\delta_{j},\text{ where }\nu_{j}=\frac{\left\Vert
\varphi_{j}\right\Vert ^{s}}{\sum_{j=1}^{k}\left\Vert \varphi_{j}\right\Vert
^{s}}
\]
and $\delta_{j}$ is the Dirac measure at the point $\tilde{\varphi}_{j}
=\frac{\varphi_{j}}{\left\Vert \varphi_{j}\right\Vert }.$
For $x_{1}^{(1)},\ldots,x_{m}^{(1)}\in E_{1},\ldots$, $x_{1}^{(n)}
,\ldots,x_{m}^{(n)}\in E_{n}$, note that
\begin{align*}
& \int_{B_{F^{\ast}}}\left\vert \left\langle \varphi,A(x_{j_{1}}^{(1)}
,\ldots,x_{j_{n}}^{(n)})\right\rangle \right\vert ^{s}d\nu(\varphi)\\
& =\sum_{j=1}^{k}\left\vert \left\langle \tilde{\varphi_{j}},A(x_{j_{1}
}^{(1)},\ldots,x_{j_{n}}^{(n)})\right\rangle \right\vert ^{s}\nu
(\tilde{\varphi_{j}})\\
& =\sum_{j=1}^{k}\left\vert \left\langle \frac{\varphi_{j}}{\left\Vert
\varphi_{j}\right\Vert },A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)}
)\right\rangle \right\vert ^{s}.\nu_{j}.\delta_{j}(\tilde{\varphi_{j}})\\
& =\sum_{j=1}^{k}\left\vert \left\langle \frac{\varphi_{j}}{\left\Vert
\varphi_{j}\right\Vert },A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)}
)\right\rangle \right\vert ^{s}.\frac{\left\Vert \varphi_{j}\right\Vert ^{s}
}{\sum_{j=1}^{k}\left\Vert \varphi_{j}\right\Vert ^{s}}\\
& =\frac{1}{\left\Vert (\varphi_{j})_{j=1}^{k}\right\Vert _{s}^{s}}\sum
_{j=1}^{k}\left\vert \left\langle \varphi_{j},A(x_{j_{1}}^{(1)},\ldots
,x_{j_{n}}^{(n)})\right\rangle \right\vert ^{s}.
\end{align*}
From the previous equalities and from Theorem \ref{caracter} we have
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left( \sum_{j=1}^{k}\left\vert
\left\langle \varphi_{j},A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)}
)\right\rangle \right\vert ^{s}\right) ^{\frac{q}{s}}\right) ^{\frac{1}{q}
}\\
& =\left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left( \int_{B_{F^{\ast}}
}\left\vert \left\langle \varphi,A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}
^{(n)})\right\rangle \right\vert ^{s}d\nu(\varphi)\right) ^{\frac{q}{s}
}\right) ^{\frac{1}{q}}\left\Vert (\varphi_{j})_{j=1}^{k}\right\Vert _{s}\\
& \leq\left\Vert \left( A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})\right)
_{j\in\mathbb{N}_{m}^{n}}\right\Vert _{m,(s,q)}\left\Vert (\varphi_{j}
)_{j=1}^{k}\right\Vert _{s}\\
& \leq\left\Vert A\right\Vert _{mx(s,q;p_{1},\ldots,p_{n})}\prod_{l=1}
^{n}\left\Vert (x_{i}^{(l)})_{i=1}^{m}\right\Vert _{w,p_{l}}\left\Vert
(\varphi_{j})_{j=1}^{k}\right\Vert _{s}.
\end{align*}
and we obtain (\ref{jj}) with $\inf\sigma\leq\left\Vert A\right\Vert
_{mx(s,q;p_{1},\ldots,p_{n})}.$
Reciprocally, with the same idea and using (\ref{jj}), given $\nu=\sum
_{i=1}^{k}\nu_{i}\delta_{i}$ a discrete probability measure onto $B_{F^{\ast}
}$ we obtain
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left( \int_{B_{F^{\ast}}
}\left\vert \left\langle \varphi,A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}
^{(n)})\right\rangle \right\vert ^{s}d\nu(\varphi)\right) ^{\frac{q}{s}
}\right) ^{\frac{1}{q}}\\
& =\left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left( \sum_{j=1}^{k}\left\vert
\left\langle \nu_{j}^{\frac{1}{s}}\varphi_{j},A(x_{j_{1}}^{(1)},\ldots
,x_{j_{n}}^{(n)})\right\rangle \right\vert ^{s}\right) ^{\frac{q}{s}}\right)
^{\frac{1}{q}}\\
& \leq\sigma\prod_{l=1}^{n}\left\Vert (x_{i}^{(l)})_{i=1}^{m}\right\Vert
_{w,p_{l}}\left\Vert (\nu_{j}^{\frac{1}{s}}\varphi_{j})_{j=1}^{k}\right\Vert
_{s}\\
& \leq\sigma\prod_{l=1}^{n}\left\Vert (x_{i}^{(l)})_{i=1}^{m}\right\Vert
_{w,p_{l}}.
\end{align*}
The previous inequality holds for every $\nu\in W(B_{F^{\ast}})$, since the
discrete probability measures are dense in $W(B_{F^{\ast}})$. Therefore, from
Theorem \ref{caracter} we obtain
\[
\left\Vert \left( A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})\right)
_{j\in\mathbb{N}_{m}^{n}}\right\Vert _{mx(s,q)}\leq\sigma\prod_{l=1}
^{n}\left\Vert (x_{i}^{(l)})_{i=1}^{m}\right\Vert _{w,p_{l}},
\]
for all $m\in\mathbb{N}$ and
\[
\left\Vert A\right\Vert _{mx(s,q;p_{1},\ldots,p_{n})}=\inf\sigma.
\]
\end{proof}
\subsection{A quotient theorem}
For linear operators, $S\in\mathcal{L}\left( E;F\right) $ is $\left(
s,p\right) $-mixing summing if and only if $TS$ is absolutely $p$-summing for
all $T\in\Pi_{s}\left( F;G\right) $. In other words
\[
\Pi_{mx\left( s,p\right) }\left( E;F\right) =\left( \Pi_{s}\left(
F;G\right) \right) ^{-1}\circ\Pi_{p}\left( E;G\right) .
\]
For details we refer to \cite[Section 32]{Flore} and \cite{pp1}. In this
section we show that our approach provides a perfect multilinear extension of
this result. We show that the following assertions are equivalent:
\begin{itemize}
\item $T\in\mathcal{L}(E_{1},\ldots,E_{n};F)$ is multiple $\left( s,q;p_{1}
,\ldots,p_{n}\right) $-mixing summing.
\item $u\circ T\in\mathcal{L}_{mas(q;p_{1},\ldots,p_{n})}(E_{1},\ldots
,E_{n};G)$ for all $u\in\Pi_{s}\left( F;G\right) $ and $T\in\mathcal{L}
(E_{1},\ldots,E_{n};F).$
\end{itemize}
Using a different notation, we will show the following quotient theorem:
\begin{equation}
\Pi_{mx\left( s,q;p_{1},\ldots,p_{n}\right) }\left( E_{1},\ldots
,E_{n};F\right) =\left( \Pi_{s}\left( F;G\right) \right) ^{-1}
\circ\mathcal{L}_{mas\left( q;p_{1},\ldots,p_{n}\right) }\left(
E_{1},\ldots,E_{n};G\right) \label{dtt}
\end{equation}
for all $E_{1},\ldots,E_{n},F$ and $G.$
The quotient theorem (\ref{dtt}) is a direct consequence of the forthcoming
Propositions \ref{pp9} and \ref{pp99}. First we need the following lemma:
\begin{lemma}
\label{jlk}Let $A\in\mathcal{L}(E_{1},\ldots,E_{n};F)$ be so that
\[
u\circ A\in\mathcal{L}_{mas(p;p_{1},\ldots,p_{n})}(E_{1},\ldots,E_{n};G)
\]
for all Banach space $G$ and all $u\in\Pi_{r}(F;G).$ Then, there is a $C\geq0$
such that
\begin{equation}
\left\Vert u\circ A\right\Vert _{(p;p_{1},\ldots,p_{n})}\leq C\pi
_{r}(u).\label{gtr}
\end{equation}
\end{lemma}
\begin{proof}
Suppose that (\ref{gtr}) is not true. So, for all positive integer $k$ there
exist Banach spaces $F_{k}$ and $u_{k}\in\Pi_{r}(F;F_{k})$ so that
\[
\pi_{r}(u_{k})\leq\frac{1}{2^{k}}\text{ and }\left\Vert u_{k}\circ
A\right\Vert _{(p;p_{1},\ldots,p_{n})}\geq k.
\]
Let $J_{k}:F_{k}\rightarrow\ell_{2}\left( \left( F_{k}\right)
_{k=1}^{\infty}\right) $ and $Q_{j}:\ell_{2}\left( \left( F_{k}\right)
_{k=1}^{\infty}\right) \rightarrow F_{j}$ be the canonical maps for all
positive integers $j,k$. Since
\[
\pi_{r}\left( \sum\limits_{k=n_{1}}^{n_{2}}J_{k}\circ u_{k}\right) \leq
\sum\limits_{k=n_{1}}^{n_{2}}\pi_{r}\left( J_{k}\circ u_{k}\right) \leq
\sum\limits_{k=n_{1}}^{n_{2}}\pi_{r}\left( u_{k}\right) \leq\sum
\limits_{k=n_{1}}^{n_{2}}\frac{1}{2^{k}}
\]
it follows that
\[
u:=\sum\limits_{j=1}^{\infty}J_{j}\circ u_{j}\in\Pi_{r}(F;\ell_{2}\left(
\left( F_{k}\right) _{k=1}^{\infty}\right) ).
\]
Since $u_{k}=Q_{k}\circ u$, we thus have
\[
k\leq\left\Vert u_{k}\circ A\right\Vert _{(p;p_{1},\ldots,p_{n})}=\left\Vert
Q_{k}\circ u\circ A\right\Vert _{(p;p_{1},\ldots,p_{n})}\leq\left\Vert u\circ
A\right\Vert _{(p;p_{1},\ldots,p_{n})},
\]
a contradiction.
\end{proof}
\begin{proposition}
\label{pp9}If $A\in\mathcal{L}(E_{1},\ldots,E_{n};F)$ is so that $u\circ
A\in\mathcal{L}_{mas(q;p_{1},\ldots.,p_{n})}(E_{1},\ldots,E_{n};G)$ for all
$u\in\Pi_{s}\left( F;G\right) ,$ then
\[
A\in\Pi_{mx(s,q;p_{1},\ldots.,p_{n})}(E_{1},\ldots,E_{n};F).
\]
\end{proposition}
\begin{proof}
Let $x_{i}^{(j)}\in E_{j}$ with $\left( i,j\right) \in\left\{
1,\ldots,m\right\} \times\left\{ 1,\ldots,n\right\} .$ Consider
$S:F\rightarrow\ell_{s}^{k}$ defined by
\[
S(y)=\left( \varphi_{j}\left( y\right) \right) _{j=1}^{k}.
\]
It is not difficult to show that
\[
\pi_{s}\left( S\right) \leq\left\Vert \left( \varphi_{j}\right) _{j=1}
^{k}\right\Vert _{s}.
\]
Since $S\circ A\in\mathcal{L}_{mas(q;p_{1},\ldots.,p_{n})}(E_{1},\ldots
,E_{n};\ell_{s}^{k})$ and invoking Lemma \ref{jlk}, there is a constant $C>0$
so that
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left( \sum_{j=1}^{k}\left\vert
\left\langle \varphi_{j},A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)}
)\right\rangle \right\vert ^{s}\right) ^{\frac{q}{s}}\right) ^{\frac{1}{q}
}\\
& =\left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left\Vert S\circ A\left(
x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)}\right) \right\Vert _{s}^{q}\right)
^{\frac{1}{q}}\\
& \leq\left\Vert S\circ A\right\Vert _{(q;p_{1},\ldots.,p_{n})}
\prod\limits_{j=1}^{n}\left\Vert \left( x_{i}^{(j)}\right) _{i=1}
^{m}\right\Vert _{w,p_{j}}\\
& \leq C\pi_{s}\left( S\right) \prod\limits_{j=1}^{n}\left\Vert \left(
x_{i}^{(j)}\right) _{i=1}^{m}\right\Vert _{w,p_{j}}\\
& \leq C\left\Vert \left( \varphi_{j}\right) _{j=1}^{k}\right\Vert
_{s}\prod\limits_{j=1}^{n}\left\Vert \left( x_{i}^{(j)}\right) _{i=1}
^{m}\right\Vert _{w,p_{j}}.
\end{align*}
\end{proof}
\begin{proposition}
\label{pp99}If $A\in\Pi_{mx(s,q;p_{1},\ldots.,p_{n})}(E_{1},\ldots,E_{n};F),$
then
\begin{equation}
u\circ A\in\Pi_{(q;p_{1},\ldots.,p_{n})}(E_{1},\ldots,E_{n};G) \label{ggr}
\end{equation}
and
\begin{equation}
\left\Vert u\circ A\right\Vert _{(q;p_{1},\ldots.,p_{n})}\leq\pi_{s}\left(
u\right) \left\Vert A\right\Vert _{mx(s,q;p_{1},\ldots.,p_{n})} \label{ggs}
\end{equation}
for all $u\in\Pi_{s}\left( F;G\right) .$
\end{proposition}
\begin{proof}
Let $x_{i}^{(j)}\in E_{j}$ with $\left( i,j\right) \in\left\{
1,\ldots,m\right\} \times\left\{ 1,\ldots,n\right\} .$ Given $\varepsilon
>0$ there are $\tau_{j_{1},\ldots,j_{n}}\in K$ and $y_{j_{1},\ldots,j_{n}}\in
F$ so that
\[
A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})=\tau_{j_{1},\ldots,j_{n}}
y_{j_{1},\ldots,j_{n}}
\]
and
\begin{align*}
& \left\Vert \left( \tau_{j_{1},\ldots,j_{n}}\right) _{j_{1},\ldots
,j_{n}=1}^{m}\right\Vert _{r}\left\Vert \left( y_{j_{1},\ldots,j_{n}}\right)
_{j_{1},\ldots,j_{n}=1}^{m}\right\Vert _{w,s}\\
& <\left( 1+\varepsilon\right) \left\Vert \left( A(x_{j_{1}}^{(1)}
,\ldots,x_{j_{n}}^{(n)})\right) _{j_{1},\ldots,j_{n}=1}^{m}\right\Vert
_{mx\left( s,q)\right) }\\
& \leq\left( 1+\varepsilon\right) \left\Vert A\right\Vert _{mx(s,q;p_{1}
,\ldots.,p_{n})}\prod\limits_{j=1}^{n}\left\Vert \left( x_{i}^{(j)}\right)
_{i=1}^{m}\right\Vert _{w,p_{j}}.
\end{align*}
Hence, using H\"{o}lder's Inequality we obtain
\begin{align*}
& \left\Vert \left( u\circ A(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}^{(n)})\right)
_{j_{1},\ldots,j_{n}=1}^{m}\right\Vert _{q}\\
& \leq\left\Vert \left( \tau_{j_{1},\ldots,j_{n}}\right) _{j_{1}
,\ldots,j_{n}=1}^{m}\right\Vert _{r}\left\Vert \left( u\left( y_{j_{1}
,\ldots,j_{n}}\right) \right) _{j_{1},\ldots,j_{n}=1}^{m}\right\Vert _{s}\\
& \leq\left( 1+\varepsilon\right) \pi_{s}\left( u\right) \left\Vert
A\right\Vert _{mx(s,q;p_{1},\ldots.,p_{n})}\prod\limits_{j=1}^{n}\left\Vert
\left( x_{i}^{(j)}\right) _{i=1}^{m}\right\Vert _{w,p_{j}}
\end{align*}
and making $\varepsilon\rightarrow0$ we get (\ref{ggr}) and (\ref{ggs}).
\end{proof}
\subsection{Coherence and compatibility}
The polynomial version of multiple mixing summing operators can be stated by
using the symmetric multilinear operator associated to the polynomials:
\begin{definition}
Let $0<p\leq s<\infty.$ A polynomial $P\in\mathcal{P}(^{n}E;F)$ is multiple
$(s,p)$-mixing summing if $\check{P}$ is multiple $(s,p;p)$-mixing summing.
Besides,
\[
\left\Vert P\right\Vert _{mx(s,p)}:=\left\Vert \check{P}\right\Vert
_{mx(s,p;p)}.
\]
\end{definition}
The following proposition, whose proof is standard, shows that, as it happens
to multiple summing multilinear operators, coincidence results for multiple
mixing summing multilinear operators imply in coincidence results for smaller degrees:
\begin{proposition}
If $\mathcal{L}(E_{1},\ldots,E_{n};F)=\Pi_{mx(s,q;p_{1},\ldots,p_{n})}
(E_{1},\ldots,E_{n};F)$ then
\[
\mathcal{L}(E_{k_{1}},\ldots,E_{k_{j}};F)=\Pi_{mx(s,q;p_{k_{1}},\ldots
,p_{k_{j}})}(E_{k_{1}},\ldots,E_{k_{j}};F)
\]
whenever $1\leq j<n$ and $\left\{ k_{1}<\cdots<k_{j}\right\} \subset\left\{
1,\ldots,n\right\} $.
\end{proposition}
Similarly to the previous section one can show that $\left( \mathcal{P}
_{mx(s,p)}^{n},\left\Vert \cdot\right\Vert _{mx(s,p)}\right) _{n=1}^{\infty}$
is coherent and for each $n$ it is compatible with the operator ideal $\left(
\Pi_{mx(s,p)},\pi_{mx(s,p)}\right) $. For example we prove (i) of Definition
\ref{IdeaisCoerentes}:
\begin{proposition}
If $P\in\mathcal{P}_{mx(s,p)}(^{n}E;F)$ and $a\in E$, then $P_{a}
\in\mathcal{P}_{mx(s,p)}(^{n-1}E;F)$ and
\[
\left\Vert P_{a}\right\Vert _{mx(s,p)}\leq\left\Vert P\right\Vert
_{mx(s,p)}\left\Vert a\right\Vert .
\]
\end{proposition}
\begin{proof}
Since $\check{P}\in\Pi_{mx(s,p)}(^{n}E;F)$ we have
\[
\left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left( \sum_{j=1}^{k}\left\vert
\left\langle \varphi_{j},\check{P}(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}
^{(n)})\right\rangle \right\vert ^{s}\right) ^{\frac{p}{s}}\right)
^{\frac{1}{p}}\leq\sigma\prod_{l=1}^{n}\left\Vert (x_{i}^{(l)})_{i=1}
^{m}\right\Vert _{w,p}\left\Vert (\varphi_{j})_{j=1}^{k}\right\Vert _{s}.
\]
and by choosing $x_{1}^{(n)}=a$ and $x_{j}^{(n)}=0$ for $j>1$ we have
\begin{align*}
& \left( \sum_{j_{1},\ldots,j_{n-1}=1}^{m}\left( \sum_{j=1}^{k}\left\vert
\left\langle \varphi_{j},\check{P}_{a}(x_{j_{1}}^{(1)},\ldots,x_{j_{n-1}
}^{(n-1)})\right\rangle \right\vert ^{s}\right) ^{\frac{p}{s}}\right)
^{\frac{1}{p}}\\
& =\left( \sum_{j_{1},\ldots,j_{n-1}=1}^{m}\left( \sum_{j=1}^{k}\left\vert
\left\langle \varphi_{j},\check{P}(x_{j_{1}}^{(1)},\ldots,x_{j_{n-1}}
^{(n-1)},a)\right\rangle \right\vert ^{s}\right) ^{\frac{p}{s}}\right)
^{\frac{1}{p}}\\
& =\left( \sum_{j_{1},\ldots,j_{n}=1}^{m}\left( \sum_{j=1}^{k}\left\vert
\left\langle \varphi_{j},\check{P}(x_{j_{1}}^{(1)},\ldots,x_{j_{n}}
^{(n)})\right\rangle \right\vert ^{s}\right) ^{\frac{p}{s}}\right)
^{\frac{1}{p}}\\
& \leq\left\Vert P\right\Vert _{mx(s,p)}\left\Vert a\right\Vert \prod
_{l=1}^{n-1}\left\Vert (x_{i}^{(l)})_{i=1}^{m}\right\Vert _{w,p}\left\Vert
(\varphi_{j})_{j=1}^{k}\right\Vert _{s}.
\end{align*}
\end{proof}
\section{Final comments and directions for further research}
The concepts of multiple mixing summing and multiple $\left( p;q;r_{1}
,\ldots,r_{n}\right) $-summing polynomials/multilinear operators, as natural
extensions of the notion of multiple summing multilinear operators, can be
further investigated following different directions: coincidence theorems,
generalizations to holomorphic mappings, or inclusion theorems, among others.
The study of coincidence theorems may follow the lines of \cite{Na} combined
with the results from the respective linear theories; the study of holomorphic
mappings may follow \cite{Junek} and for inclusion theorems \cite{davidstudia}
is certainly a good source of inspiration.
We encourage the interested reader to investigate other variants of mixing
summability and $\left( p;q;r_{1},\ldots,r_{n}\right) $-summability
following the lines given in \cite{jgdd, Nach00, MST, parc}.
\end{document} |
\begin{document}
\title{Optimal Volume-Sensitive Bounds for Polytope Approximation}
\author{
Sunil Arya\thanks{Research supported by the Research Grants Council of Hong Kong, China under projects number 16213219 and 16214721.}\\
Department of Computer Science and Engineering \\
The Hong Kong University of Science and Technology, Hong Kong\\
[email protected] \\
\and
David M. Mount\\
Department of Computer Science and
Institute for Advanced Computer Studies \\
University of Maryland, College Park, Maryland \\
[email protected] \\
}
\date{}
\maketitle
\begin{abstract}
Approximating convex bodies is a fundamental question in geometry and has a wide variety of applications. Consider a convex body $K$ of diameter $\Delta$ in $\mathbb{R}^d$ for fixed $d$. The objective is to minimize the number of vertices (alternatively, the number of facets) of an approximating polytope for a given Hausdorff error $\varepsilon$. It is known from classical results of Dudley (1974) and Bronshteyn and Ivanov (1976) that $\Theta((\Delta/\varepsilon)^{(d-1)/2})$ vertices (alternatively, facets) are both necessary and sufficient. While this bound is tight in the worst case, that of Euclidean balls, it is far from optimal for skinny convex bodies.
A natural way to characterize a convex object's skinniness is in terms of its relationship to the Euclidean ball. Given a convex body $K$, define its \emph{volume diameter} $\Delta_d$ to be the diameter of a Euclidean ball of the same volume as $K$, and define its \emph{surface diameter} $\Delta_{d-1}$ analogously for surface area. It follows from generalizations of the isoperimetric inequality that $\Delta \geq \Delta_{d-1} \geq \Delta_d$.
Arya, da Fonseca, and Mount (SoCG 2012) demonstrated that the diameter-based bound could be made surface-area sensitive, improving the above bound to $O((\Delta_{d-1}/\varepsilon)^{(d-1)/2})$. In this paper, we strengthen this by proving the existence of an approximation with $O((\Delta_d/\varepsilon)^{(d-1)/2})$ facets.
This improvement is a result of the combination of a number of new ideas. As in prior work, we exploit properties of the original body and its polar dual. In order to obtain a volume-sensitive bound, we explore the following more general problem. Given two convex bodies, one nested within the other, find a low-complexity convex polytope that is sandwiched between them. We show that this problem can be reduced to a covering problem involving a natural intermediate body based on the harmonic mean. Our proof relies on a geometric analysis of a relative notion of fatness involving these bodies.
\end{abstract}
\section{Introduction} \label{sec:intro}
Approximating convex bodies by polytopes is a fundamental problem, which has been extensively studied in the literature (see, e.g., Bronstein~\cite{Bro08}). We are given a convex body $K$ in Euclidean $d$-dimensional space and an error parameter $\varepsilon > 0$. The problem is to determine the minimum combinatorial complexity of a polytope that is $\varepsilon$-close to $K$ according to some measure of similarity. In this paper, we define similarity in terms of the Hausdorff distance~\cite{Bro08}, and we define combinatorial complexity in terms of the number of facets. Throughout, we assume that the dimension $d$ is a constant.
Approximation bounds presented in the literature are of two common types. In both cases, it is shown that there exists $\varepsilon_0 > 0$ such that the bounds hold for all $\varepsilon \leq \varepsilon_0$. The first of these are \emph{nonuniform bounds}, where the value of $\varepsilon_0$ may depend on properties of $K$, for example, bounds on its maximum curvature~\cite{Bor00, Cla06, Gru93a, McV75, Sch87, Tot48}. This is in contrast to \emph{uniform bounds}, where the value of $\varepsilon_0$ is independent of $K$ (but may depend on $d$).
Examples of uniform bounds include the classical work of Dudley~\cite{Dud74} and Bronshteyn and Ivanov~\cite{BrI76}. Dudley showed that, for $\varepsilon \leq 1$, any convex body $K$ can be $\varepsilon$-approximated by a polytope $P$ with $O((\Delta/\varepsilon)^{(d-1)/2})$ facets, where $\Delta$ is $K$'s diameter. Bronshteyn and Ivanov showed the same bound holds for the number of vertices. Constants hidden in the $O$-notation depend only on $d$. These results have numerous applications in computational geometry, for example the construction of coresets~\cite{AHV05,ArC14,AFM17b}.
The approximation bounds of both Dudley and Bronshteyn-Ivanov are tight in the worst case up to constant factors (specifically when $K$ is a Euclidean ball)~\cite{Bro08}. However, these bounds may be significantly suboptimal if $K$ is ``skinny''. A natural way to characterize a convex object's skinniness is in terms of its relationship to the Euclidean ball. Given a convex body $K$, define its \emph{volume diameter} $\Delta_d$ to be the diameter of a Euclidean ball of the same volume as $K$, and define its \emph{surface diameter} $\Delta_{d-1}$ analogously for surface area. These quantities are closely related (up to constant factors) to the classical concepts of \emph{quermassintegrals} and of \emph{intrinsic volumes} of the convex body \cite{McM75,McM91}. It follows from generalizations of the isoperimetric inequality that $\Delta \geq \Delta_{d-1} \geq \Delta_d$~\cite{McM91}.
Arya, da Fonseca, and Mount~\cite{AFM12b} proved that the diameter-based bound could be made surface-area sensitive, improving the above bound to $O((\Delta_{d-1}/\varepsilon)^{(d-1)/2})$. In this paper, we strengthen this to the following volume-sensitive bound.
\begin{theorem} \label{thm:main}
Consider real $d$-space, $\mathbb{R}^d$. There exists a constant $c_d$ (depending on $d$) such that for any convex body $K \subseteq \mathbb{R}^d$ and any $\varepsilon > 0$, if the width of $K$ in any direction is at least $\varepsilon$, then there exists an $\varepsilon$-approximating polytope $P$ whose number of facets is at most
\[
\left(\frac{c_d \Delta_d}{\varepsilon} \right)^{\kern-2pt\frac{d-1}{2}}.
\]
\end{theorem}
This bound is the strongest to date. For example, in $\mathbb{R}^3$, the area-sensitive bound yields better bounds for pencil-like objects that are thin along two dimensions, while the volume-sensitive bound yields better bounds for pancake-like objects as well, which are thin in just one dimension.
The minimum-width assumption seems to be a technical necessity, since it is not difficult to construct counterexamples where this condition does not hold. But this is not a fundamental impediment. If the body's width is less than $\varepsilon$ in some direction, then by projecting the body onto a hyperplane orthogonal to this direction, it is possible to reduce the problem to a convex approximation problem in one lower dimension. This can be repeated until the body's width is sufficiently large in all remaining dimensions, and the stated bound can be applied in this lower dimensional subspace, albeit with volume measured appropriate to this dimension.
While our uniform bound trivially holds in the nonuniform setting, we present a separate (and much shorter) proof that the same bounds hold in the nonuniform setting, assuming that $K$'s boundary is $C^2$ continuous.
\confonly{This is presented in the full version~\cite{SoCG23arxiv}.}
\arxivonly{This is presented in Section~\ref{s:nonuniform}.}
\begin{restatable}{theorem}{RLnonunifbound}
\label{thm:nonunif-bound}
Consider real $d$-space, $\mathbb{R}^d$. There exists a constant $c_d$ (depending on $d$) such that for any convex body $K \subseteq \mathbb{R}^d$ of $C^2$ boundary, as $\varepsilon$ approaches zero, there exists an $\varepsilon$-approximating polytope $P$ whose number of facets is at most
\[
\left(\frac{c_d \Delta_d}{\varepsilon} \right)^{\kern-2pt\frac{d-1}{2}}.
\]
\end{restatable}
\section{Overview of Techniques} \label{s:techniques}
Broadly speaking, the problem of approximating a convex body by a polytope involves ``sandwiching'' a polytope between two nested convex bodies, call them $K_0$ and $K_1$. For example, $K_0$ may be the original body to be approximated and $K_1$ is an expansion based on the allowed error bound. Most of the prior work in this area has focused on the specific manner in which $K_1$ is defined relative to $K_0$, which is typically confined to Euclidean space (for Hausdorff distance) or affine space (for the Banach-Mazur distance).
Recent approaches to convex approximation have been based on covering the body to be approximated with convex objects that respect the local shape of the body being approximated~\cite{AAFM22,AFM23}. Macbeath regions have been a key tool in this regard. Given a convex body $K$ and a point $x$ in $K$'s interior, the Macbeath region at $x$, $M_K(x)$, is the largest centrally symmetric body nested within $K$ and centered at $x$ (see Figure~\ref{f:macbeath-cover}(a)). A Macbeath region that has been shrunken by some constant factor $\lambda$ is denoted by $M_K^{\lambda}(x)$. Shrunken Macbeath regions have nice packing and covering properties, and they behave much like metric balls.
\begin{figure}
\caption{\label{f:macbeath-cover}
\label{f:macbeath-cover}
\end{figure}
A natural way to construct a sandwiching polytope between two nested bodies $K_0$ and $K_1$ is to construct a collection of shrunken Macbeath regions that cover $K_0$ but lie entirely within $K_1$ (see Figure~\ref{f:macbeath-cover}(b)). If done properly, a sandwiching polytope can be constructed by sampling a constant number of points from each of these Macbeath regions, and taking the convex hull of their union. Thus, the number of Macbeath regions provides an upper bound on the number of vertices in the sandwiched polytope.
The ``sandwiching'' perspective described above yields additional new challenges. Consider the two bodies $K_0$ and $K_1$ shown in Figure~\ref{f:rel-fat}, where $K_0$ is a diamond shape nested within the square $K_1$. Consider $1/2$-scaled Macbeath region centered at a point $x$ that lies at the top vertex of $K_0$. Observe that almost all of its volume lies outside of $K_0$. This is problematic because our analysis is based on the number of Macbeath regions needed to cover the boundary of a body, in this case $\partial K_0$. We want a significant amount of the volume of each Macbeath region to lie within $K_0$. In cases like that shown in Figure~\ref{f:rel-fat}, only a tiny fraction of the volume can be charged in this manner against $K_0$.
\begin{figure}
\caption{\label{f:rel-fat}
\label{f:rel-fat}
\end{figure}
Intuitively, while the body $K_0$ is ``fat'' in a standard sense
\footnote{That is, the largest ball enclosed in $K_0$ and the smallest ball containing $K_0$ differ in size by a constant.},
it is not fat ``relative'' to the enclosing body $K_1$. To deal with this inconvenience, we will replace $K_1$ with an intermediate body between $K_0$ and $K_1$ that satisfies this property. In Section~\ref{s:am-hm} we formally define this notion of relative fatness, and we present an intermediate body, called the \emph{harmonic-mean body}, that satisfies this notion of fatness. We will see that this body can be used as a proxy for the sake of approximation.
\section{Preliminaries} \label{s:prelim}
In this section, we introduce terminology and notation, which will be used throughout the paper. This section can be skipped on first reading (moving directly to Section~\ref{s:hm-fat}).
Let us first recall some standard notation. Given vectors $u, v \in \mathbb{R}^d$, let $\ang{u,v}$ denote their dot product, and let $\|v\| = \sqrt{\ang{v,v}}$ denote $v$'s Euclidean length. Throughout, we will use the terms \emph{point} and \emph{vector} interchangeably. Given points $p,q \in \mathbb{R}^d$, let $\|p q\| = \|p - q\|$ denote the Euclidean distance between them. Let $\vol(\cdot)$ and $\area(\cdot)$ denote the $d$-dimensional and $(d-1)$-dimensional Lebesgue measures, respectively.
\subsection{Polarity and Centrality Properties} \label{s:centrality}
Given a bounded convex body $K \subseteq \mathbb{R}^d$ that contains the origin $O$ in its interior, define its \emph{polar}, denoted $K^*$, to be the convex set
\[
K^*
~ = ~ \{ u \,:\, \ang{u,v} \le 1, \hbox{~for all $v \in K$} \}.
\]
The polar enjoys many useful properties (see, e.g., Eggleston~\cite{Egg58}). For example, it is well known that $K^*$ is bounded and $(K^*)^* = K$. Further, if $K_1$ and $K_2$ are two convex bodies both containing the origin such that $K_1 \subseteq K_2$, then $K_2^* \subseteq K_1^*$.
Given a nonzero vector $v \in \mathbb{R}^d$, we define its ``polar'' $v^*$ to be the hyperplane that is orthogonal to $v$ and at distance $1/\|v\|$ from the origin, on the same side of the origin as $v$. The polar of a hyperplane is defined as the inverse of this mapping. We may equivalently define $K^*$ as the intersection of the closed halfspaces that contain the origin, bounded by the hyperplanes $v^*$, for all $v \in K$.
Given a convex body $K \subseteq \mathbb{R}^d$ and $x \in \interior(K)$, there are many ways to characterize the property that $x$ is ``central'' within $K$~\cite{Gru63, Tot15}. For our purposes, we will make it precise using the concept of Mahler volume. Define $K$'s \emph{Mahler volume}, denoted $\mu(K)$, to be the product $\vol(K) \cdot \vol(K^*)$. The Mahler volume is well studied (see, e.g.~\cite{San49,MeP90,Sch93}). It is invariant under linear transformations, and it depends on the location of the origin within $K$. We say that $K$ is \emph{well-centered} with respect to a point $x \in \interior(K)$ if the Mahler volume $\mu(K-x)$ is at most $O(1)$. When $x$ is not specified, it is understood to be the origin. We have the following lemma~\cite{AFM23,MiP00}.
\begin{lemma}
\label{lem:centroid}
Any convex body $K$ is well-centered with respect to its centroid.
\end{lemma}
Lower bounds on the Mahler volume have also been extensively studied and it is known that the following bound holds irrespective of the location of the origin~\cite{BoM87,Kup08,Naz12}.
\begin{lemma}
\label{lem:mahler-bounds}
Given a convex body $K \subseteq \mathbb{R}^d$ whose interior contains the origin, $\mu(K) = \Omega(1)$.
\end{lemma}
\subsection{Caps, Rays, and Relative Measures} \label{s:cap-prop}
Consider a compact convex body $K$ in $d$-dimensional space $\mathbb{R}^d$ with the origin $O$ in its interior. A \emph{cap} $C$ of $K$ is defined to be the nonempty intersection of $K$ with a halfspace. Letting $h_1$ denote a hyperplane that does not pass through the origin, let $\pcap{K}{h_1}$ denote the cap resulting by intersecting $K$ with the halfspace bounded by $h_1$ that does not contain the origin (see Figure~\ref{f:widray}(a)). Define the \emph{base} of $C$, denoted $\base(C)$, to be $h_1 \cap K$. Letting $h_0$ denote a supporting hyperplane for $K$ and $C$ parallel to $h_1$, define an \emph{apex} of $C$ to be any point of $h_0 \cap K$.
\begin{figure}
\caption{\label{f:widray}
\label{f:widray}
\end{figure}
We define the \emph{absolute width} of cap $C$ to be $\dist(h_1,h_0)$. When a cap does not contain the origin, it will be convenient to define distances in relative terms. Define the \emph{relative width} of such a cap $C$, denoted $\width_K(C)$, to be the ratio $\dist(h_1,h_0) / \dist(O,h_0)$ and, to simplify notation, define $\width_K(h_1) = \width_K(\pcap{K}{h_1})$. Observe that as a hyperplane is translated from a supporting hyperplane to the origin, the relative width of its cap ranges from $0$ to a limiting value of $1$.
We also characterize the closeness of a point to the boundary in both absolute and relative terms. Given a point $p_1 \in K$, let $p_0$ denote the point of intersection of the ray $O p_1$ with the boundary of $K$. Define the \emph{absolute ray distance} of $p_1$ to be $\|p_1 p_0\|$, and define the \emph{relative ray distance} of $p_1$, denoted $\ray_K(p_1)$, to be the ratio $\|p_1 p_0\| / \|O p_0\|$. Relative widths and relative ray distances are both affine invariants, and unless otherwise specified, references to widths and ray distances will be understood to be in the relative sense.
We can also define volumes in a manner that is affine invariant. Recall that $\vol(\cdot)$ denotes the standard Lebesgue volume measure. For any region $\Lambda \subseteq K$, define the \emph{relative volume} of $\Lambda$ with respect to $K$, denoted $\vol_K(\Lambda)$, to be $\vol(\Lambda)/\vol(K)$.
With the aid of the polar transformation we can extend the concepts of width and ray distance to objects lying outside of $K$. Consider a hyperplane $h_2$ parallel to $h_1$ that lies beyond the supporting hyperplane $h_0$ (see Figure~\ref{f:widray}(a)). It follows that $h_2^* \in K^*$, and we define $\width_K(h_2) = \ray_{K^*}(h_2^*)$ (see Figure~\ref{f:widray}(b)). Similarly, for a point $p_2 \notin K$ that lies along the ray $O p_1$, it follows that the hyperplane $p_2^*$ intersects $K^*$, and we define $\ray_K(p_2) = \width_{K^*}(p_2^*)$. By properties of the polar transformation, it is easy to see that $\width_K(h_2) = \dist(h_0,h_2) / \dist(O,h_2)$. Similarly, $\ray_K(p_2) = \|p_0 p_2\| / \|O p_2\|$. Henceforth, we will omit references to $K$ when it is clear from context.
Some of our results apply only when we are sufficiently close to the boundary of $K$. Given $\alpha \le \frac{1}{2}$, we say that a cap $C$ is \emph{$\alpha$-shallow} if $\width(C) \le \alpha$, and we say that a point $p$ is \emph{$\alpha$-shallow} if $\ray(p) \le \alpha$. We will simply say \emph{shallow} to mean $\alpha$-shallow, where $\alpha \le \frac{1}{2}$ is a sufficiently small constant.
\arxivonly{
We state some useful technical results on ray distances and cap widths. The missing proofs can be found in~\cite[Section~2.3]{AFM23arxiv}.
\begin{lemma} \label{lem:raydist-width}
Let $C$ be a cap of $K$ that does not contain the origin and let $p$ be a point in $C$. Then $\ray(p) \leq \width(C)$.
\end{lemma}
There are two natural ways to associate a cap with any point $p \in K$. The first is the \emph{minimum volume cap}, which is any cap whose base passes through $p$ of minimum volume among all such caps. For the second, assume that $p \neq O$, and let $p_0$ denote the point of intersection of the ray $O p$ with the boundary of $K$. Let $h_0$ be any supporting hyperplane of $K$ at $p_0$. Take the cap $C$ induced by a hyperplane parallel to $h_0$ passing through $p$. As shown in the following lemma this is the cap of minimum width containing $p$.
\begin{lemma}
\label{lem:min-width-cap}
For any $p \in K \setminus \{O\}$, consider the cap $C$ defined above. Then $\width(C) = \ray(p)$ and further, $C$ has the minimum width over all caps that contain $p$.
\end{lemma}
The next lemma shows that cap widths behave nicely under containment.
\begin{lemma} \label{lem:cap-containment-width}
Let $C_1$ and $C_2$ be two caps not containing the origin such that $C_1 \subseteq C_2$. Then $\width(C_1) \le \width(C_2)$.
\end{lemma}
\begin{proof}
Consider the point of intersection $p$ of the base of $C_1$ with the ray joining $O$ to $C_1$'s apex. By Lemma~\ref{lem:min-width-cap} and the remarks preceding it, $\ray(p) = \width(C_1)$. Since $C_1 \subseteq C_2$, it follows that $p \in C_2$, and so by Lemma~\ref{lem:raydist-width}, $\ray(p) \le \width(C_2)$. The lemma follows.
\end{proof}
Given any cap $C$ and a real $\lambda > 0$, we define its $\lambda$-expansion, denoted $C^{\lambda}$, to be the cap of $K$ cut by a hyperplane parallel to the base of $C$ such that the absolute width of $C^{\lambda}$ is $\lambda$ times the absolute width of $C$. (Notice that the expansion of a cap may contain the origin, and indeed, if the expansion is large enough, it may be the same as $K$.) An easy consequence of convexity is that, for $\lambda \ge 1$, $C^{\lambda}$ is a subset of the region obtained by scaling $C$ by a factor of $\lambda$ about its apex. This implies the following lemma.
\begin{lemma} \label{lem:cap-exp}
Given any cap $C$ and a real $\lambda \ge 1$, $\vol(C^{\lambda}) \leq \lambda^d \vol(C)$.
\end{lemma}
}
\subsection{Macbeath Regions and MNets} \label{s:mac-prop}
Given a convex body $K$ and a point $x \in K$, and a scaling factor $\lambda > 0$, the \emph{Macbeath region} $M_K^\lambda(x)$ is defined as
\[
M_K^\lambda(x) ~= ~ x + \lambda ((K - x) \cap (x - K)).
\]
It is easy to see that $M_K^1(x)$ is the intersection of $K$ with the reflection of $K$ around $x$, and so $M_K^1(x)$ is centrally symmetric about $x$. Indeed, it is the largest centrally symmetric body centered at $x$ and contained in $K$. Furthermore, $M_K^\lambda(x)$ is a copy of $M_K^1(x)$ scaled by the factor $\lambda$ about the center $x$ (see Figure~\ref{f:macbeath-cover}(a)). We will omit the subscript $K$ when the convex body is clear from the context. As a convenience, we define $M(x) = M^1(x)$.
\arxivonly{
We summarize important properties of Macbeath regions and MNets. The missing proofs can be found in~\cite[Section~2.5]{AFM23arxiv} unless indicated otherwise. The first lemma implies that a (shrunken) Macbeath region can act as a proxy for any other (shrunken) Macbeath region overlapping it~\cite{BCP93,ELR70,AFM17a}.
\begin{lemma} \label{lem:mac-mac}
Let $K$ be a convex body and let $\lambda \le \frac{1}{5}$ be any real. If $x, y \in K$ such that $M^{\lambda}(x) \cap M^{\lambda}(y) \neq \emptyset$, then $M^{\lambda}(y) \subseteq M^{4\lambda}(x)$.
\end{lemma}
The following lemmas are useful in situations when we know that a Macbeath region overlaps a cap of $K$, and allow us to conclude that a constant factor expansion of the cap will fully contain the Macbeath region. The first applies to shrunken Macbeath regions and the second to Macbeath regions with any scaling factor. The proof of the first appears in~\cite{AFM17c} (Lemma~2.5), and the second is an immediate consequence of the definition of Macbeath regions.
\begin{lemma} \label{lem:mac-cap}
Let $K$ be a convex body. Let $C$ be a cap of $K$ and $x$ be a point in $K$ such that $C \cap M^{1/5}(x) \neq \emptyset$. Then $M^{1/5}(x) \subseteq C^2$.
\end{lemma}
\begin{lemma} \label{lem:mac-cap-var}
Let $K$ be a convex body and $\lambda > 0$. If $x$ is a point in a cap $C$ of $K$, then $M^\lambda(x) \cap K \subseteq C^{1+\lambda}$.
\end{lemma}
The next three lemmas relate the volume of caps and associated Macbeath regions.
\begin{lemma}[B{\'a}r{\'a}ny~\cite{Bar07}] \label{lem:min-vol-cap1}
Given a convex body $K \subseteq \mathbb{R}^d$, let $C$ be a $\frac{1}{3}$-shallow cap of $K$, and let $p$ be the centroid of $\base(C)$. Then $C \subseteq M^{2d}(p)$.
\end{lemma}
\begin{lemma}
\label{lem:wide-cap}
Let $0 < \beta < 1$ be any constant. Let $K \subseteq \mathbb{R}^d$ be a well-centered convex body, $p \in K$, and $C$ be the minimum volume cap associated with $p$. If $C$ contains the origin or $\width(C) \ge \beta$, then $\vol_K(M(p)) = \Omega(1)$.
\end{lemma}
\begin{lemma} \label{lem:min-vol-cap2}
Given a convex body $K \subseteq \mathbb{R}^d$, let $C$ be a $\frac{1}{3}$-shallow cap of $K$, and let $p$ be the centroid of $\base(C)$. Then $\vol(M(p)) = \Theta(\vol(C))$.
\end{lemma}
In the next lemma, we show that the width of the minimum volume cap for $p$ is within a constant factor of the ray distance of $p$.
\begin{lemma} \label{lem:min-vol-cap3}
Let $K$ be a convex body, $p \in K$, and $C$ be the minimum volume cap associated with $p$. If $C$ is $\frac{1}{3}$-shallow, then $\width(C) \le (2d+1) \cdot \ray(p)$.
\end{lemma}
\begin{proof}
We may assume that $\ray(p) \leq 1/(3(2d+1))$, since otherwise the lemma holds trivially. By a well-known property of minimum volume caps, $p$ is the centroid of the base of $C$~\cite{ELR70}. By Lemma~\ref{lem:min-vol-cap1}, we have $C \subseteq M^{2d}(p)$. By definition, $C \subseteq K$, and so $C \subseteq M^{2d}(p) \cap K$. Applying Lemma~\ref{lem:mac-cap-var} to point $p$ and the minimum width cap $W$ for $p$, we have $M^{2d}(p) \cap K \subseteq W^{2d+1}$. Thus $C \subseteq W^{2d+1}$. By Lemma~\ref{lem:min-width-cap}, $\width(W) = \ray(p)$, and so $\width(W^{2d+1}) = (2d+1) \ray(p)\leq 1/3$. Since $C$ and $W^{2d+1}$ are both $(1/3)$-shallow, and $C \subseteq W^{2d+1}$, it follows from Lemma~\ref{lem:cap-containment-width} that $\width(C) \le \width(W^{2d+1})$. Thus $\width(C) \le (2d+1) \ray(p)$, as desired.
\end{proof}
The next lemma states lower and upper bounds on the relative volume of a Macbeath region based on the width of the associated cap or the ray distance of its center.
\begin{lemma} \label{lem:vol-mac-bounds}
Let $\varepsilon > 0$ be sufficiently small and let $K \subseteq \mathbb{R}^d$ be a well-centered convex body. Then:
\begin{enumerate}
\item[$(i)$] Let $M$ be a Macbeath region centered at the centroid of the base of a cap $C \subseteq K$ of width $\varepsilon$. Then $\vol_K(M) = O(\varepsilon)$ and $\vol_K(M) = \Omega(\varepsilon^d)$.
\item[$(ii)$] Let $M$ be a Macbeath region centered at a point $x \in K$ whose ray distance is $\varepsilon$. Then $\vol_K(M) = O(\varepsilon)$ and $\vol_K(M) = \Omega(\varepsilon^d)$.
\end{enumerate}
\end{lemma}
\begin{proof}
By Lemma~\ref{lem:mac-cap-var}, $M \subseteq C^2$. Also, $C^2 \subseteq S_K$, where $S_K = K \setminus (1-2\varepsilon)K$. Thus
\[
\vol_K(M) \leq \vol_K(C^2) \le \vol_K(S_K) = 1 - (1-2\varepsilon)^d = O(\varepsilon).
\]
Similarly, in part (ii), by considering the cap defined by the supporting hyperplane of $K(1-\varepsilon)$ at $x$, we can show that $\vol_K(M) = O(\varepsilon)$.
Next we show the lower bound on $\vol_K(M)$ in part (i). Let $y$ denote the point $\psi(C) \in K^*$ and let $M'$ denote the Macbeath region $M^{1/5}_{K^*}(y)$. Note that $\ray_{K^*}(y) = \varepsilon$. As the cap $C$ and Macbeath region $M'$ satisfy the conditions of Lemma~\ref{lem:mahler-mac}, we have $\vol_K(C) \cdot \vol_{K^*}(M') = \Omega(\varepsilon^{d+1})$. By the upper bound in (ii), we have $\vol_{K^*}(M') = O(\varepsilon)$ and thus $\vol_K(C) = \Omega(\varepsilon^d)$. Also, by Lemma~\ref{lem:min-vol-cap2}, we have $\vol_K(M) = \Omega(\vol(C)$. Thus $\vol_K(M) = \Omega(\varepsilon^d)$, as desired. Similarly, we can establish the lower bound in part (ii).
\end{proof}
}
The following lemma states that points in a shrunken Macbeath region all have similar ray distances. \confonly{The proof appears in ~\cite[Section~2.5]{AFM23arxiv}.}
\begin{lemma} \label{lem:core-ray}
Let $K$ be a convex body. If $x$ is a $\frac{1}{2}$-shallow point in $K$ and $y \in M^{1/5}(x)$, then $\ray(x)/2 \leq \ray(y) \leq 2 \ray(x)$.
\end{lemma}
The next lemma shows that translated copies of a Macbeath region act as proxies for Macbeath regions in the vicinity.
\confonly{The proof appears in the full version~\cite[Section~3.3]{SoCG23arxiv}.}
\begin{lemma} \label{lem:mac-trans}
Let $\lambda \le 1/2$ and $\gamma \le 1/10$. Let $x$ be a point in a convex body $K$. Let $R = M(x)-x$. Let $y$ be a point in $x + \lambda R$. Then $y + \gamma R \subseteq M^{2\gamma}(y)$.
\end{lemma}
\arxivonly{
\begin{proof}
Treating $x$ as the origin, we have $R \subseteq K$ and $y \in \lambda R$. It follows that $y + (1-\lambda)R \subseteq K$. Recall that the Macbeath region $M(y)$ is the maximal centrally symmetric convex body centered at $y$ and contained within $K$. Thus $y + (1-\lambda) R \subseteq M(y)$. This implies that $y + \gamma R \subseteq M^{\frac{\gamma}{1-\lambda}}(y) \subseteq M^{2\gamma}(y)$.
\end{proof}
}
We employ Macbeath region-based coverings in our polytope approximation scheme. In particular, we employ the concept of MNets, as defined in~\cite{AFM23}. Let $K \subseteq \mathbb{R}^d$ be a convex body, let $\Lambda$ be an arbitrary subset of $\interior(K)$, and let $c \geq 5$ be any constant. Given $X \subseteq K$, define $\mathscr{M}_K^{\lambda}(X) = \{ M_K^{\lambda}(x) : x \in X\}$. Define a \emph{$(K, \Lambda,c)$-MNet} to be any maximal set of points $X \subseteq \Lambda$ such that the shrunken Macbeath regions $\mathscr{M}_K^{1/4c}(X)$ are pairwise disjoint. We refer to $c$ as the expansion factor of the MNet. The following lemma, proved in~\cite{AFM23}, summarizes the key properties of MNets.
\begin{lemma}[\cite{AFM23}] \label{lem:delone}
Given a convex body $K \subseteq \mathbb{R}^d$, $\Lambda \subset \interior(K)$, and $c \ge 5$, a $(K,\Lambda,c)$-MNet $X$ satisfies the following properties:
\begin{itemize}
\item (Packing) The elements of $\mathscr{M}_K^{1/4c}(X)$ are pairwise disjoint.
\item (Covering) The union of $\mathscr{M}_K^{1/c}(X)$ covers $\Lambda$.
\item (Buffering) The union of $\mathscr{M}_K(X)$ is contained within $K$.
\end{itemize}
\end{lemma}
For the purposes of this paper, $c$ will be any sufficiently large constant, specifically $c \geq 5$. To simplify notation, we use $(K,\Lambda)$-MNet to refer to such an MNet.
As mentioned before, we reduce our polytope approximation problem to that of finding a polytope which is sandwiched between two convex bodies. In turn we tackle this problem using MNets as indicated in the next lemma.
\confonly{The proof appears in the full version~\cite[Section~3.3]{SoCG23arxiv}.}
\begin{lemma} \label{lem:MNet-approx}
Let $K_0 \subset K_1$ be two convex bodies. Let $X$ be a $(K_1,\partial K_0)$-MNet. Then there exists a polytope $P$ with $O(|X|)$ vertices such that $K_0 \subseteq P \subseteq K_1$.
\end{lemma}
\arxivonly{
\begin{proof}
Define a \emph{half-ellipsoid} to be the intersection of an ellipsoid with an halfspace whose bounding hyperplane passes through its center. Let $c$ be the expansion factor of the MNet $X$. For each Macbeath region $M \in \mathscr{M}^{1/c}(X)$, choose a net~\cite{Mus22} so that, for a suitable constant $c'$, any half-ellipsoid contained within $M$ of volume at least $c' \vol(M)$ contains at least one point of the net. It follows from standard results~\cite{BEHM89,Har11} that half-ellipsoids have constant VC-dimension, and so the size of the resulting net is $O(1)$. The polytope $P$ is defined to be the convex hull of the points of the nets associated with all the Macbeath regions of $\mathscr{M}^{1/c}(X)$.
We claim that $K_0 \subset P \subset K_1$. The second containment follows from the fact that the Macbeath regions of $\mathscr{M}^{1/c}(X)$ are contained within $K_1$. To prove that $K_0 \subset P$, we will show that our construction chooses a point in every cap of $K_1$ induced by a supporting hyperplane of $K_0$. Towards this end, let $h$ be a supporting hyperplane at point $p \in \partial K_0$, let $H$ be the halfspace bounded by $h$ and not containing $K_0$, and let $C$ be the cap $K_1 \cap H$. Let $M_p = M^{1/4c}(p)$. By the packing property of MNets, there is a point $x \in X$ such that $M_p \cap M_x \neq \emptyset$, where $M_x = M^{1/4c}(x)$. Letting $M'_x = M^{1/c}(x)$ and applying Lemma~\ref{lem:mac-mac}, we have $M_p \subseteq M'_x$ and $M_x \subseteq M'_p$. Thus, $\vol(M_p) = \Omega(\vol(M'_p)) = \Omega(\vol(M_x))$. By John's Theorem~\cite{Joh48}, $M_p$ contains an ellipsoid $E$ centered at $p$, such that $\vol(E) = \Omega(\vol(M_p))$. Putting it together, it follows that the half-ellipsoid $E' = E \cap H$ has volume $\Omega(\vol(M_x))$. Since $\vol(M'_x) = O(\vol(M_x))$, it follows that a point of the net constructed for $M'_x$ is contained in $E'$ (for sufficiently small constant $c'$). Noting that $E' \subseteq C$ completes the proof.
\end{proof}
}
\confonly{
The following lemma bounds the sizes of MNets in important special cases involving points at roughly the same ray distance. These bounds will be useful in obtaining our volume-sensitive bounds. The proof appears in the full version~\cite[Section~4]{SoCG23arxiv}.
\begin{lemma} \label{lem:fixed-ray}
Let $0 < \varepsilon \leq 1/2$ be sufficiently small and let $K \subseteq \mathbb{R}^d$ be a well-centered convex body. Let $\Lambda$ be the points of $K$ at ray distances between $\varepsilon$ and $2\varepsilon$, and let $X$ be a $(K,\Lambda)$-MNet. Then:
\begin{enumerate}
\item[$(i)$] $|X| = O(1/\varepsilon^{(d-1)/2})$.
\item[$(ii)$] For any positive real $f \le 1$, let $X_f \subseteq K$ be such that the total relative volume of the Macbeath regions of $\mathscr{M}^{1/4c}(X_f)$ is $O(f\varepsilon)$. Then $|X_f| = O(\sqrt{f}/\varepsilon^{(d-1)/2})$.
\end{enumerate}
\end{lemma}
}
\subsection{Concepts from Projective Geometry} \label{s:projective}
In this section we present some relevant standard concepts from projective geometry. For further details see any standard reference (e.g., \cite{Ric11}). Given four collinear points, $a,b,c,d$ (not necessarily in this order), the \emph{cross ratio} $(a,b;c,d)$ is defined to be $(\|ac\| / \|ad\|) / (\|bc\| / \|bd\|)$, where these are understood to be signed distances determined by the orientations of the segments along the line. We follow the convention of using symbols $a,b,c,d,\ldots$ for points, and the distinction from other uses (such as $d$ for the dimension) should be clear from the context.
It is well known that cross ratios are preserved under projective transformations. If the cross ratio $(a,b;c,d)$ is $-1$, we say that this quadruple of points forms a \emph{harmonic bundle} (see Figure~\ref{f:harmonic-bundle}). This is an important special case which occurs frequently in constructions. In this case, the points lie on the line in the order of $a, d, b, c$ and the ratio in which $a$ divides $c$ and $d$ externally (i.e., $\|ac\| / \|ad\|$) is the same as the ratio in which $b$ divides $c$ and $d$ internally (i.e., $\|bc\| / \|bd\|$). The sign is negative since $bc$ and $bd$ have opposite directions. If the point $a$ is at infinity, the cross ratio degenerates to $\|bd\|/\|bc\|$, implying that $b$ is midway between $c$ and $d$.
\begin{figure}
\caption{Harmonic bundle (from the quadrilateral construction~\cite{Ric11}
\label{f:harmonic-bundle}
\end{figure}
\arxivonly{
Given a convex body $K$, it induces a cross ratio and a distance metric, called the \emph{Hilbert distance}, for any two points in its interior. This notion was introduced by David Hilbert as a generalization of Cayley's formula for distance in the Cayley-Klein model of hyperbolic geometry~\cite{Hil95}. Given two points $x,y$ in the interior of $K$, suppose that the line passing through $x$ and $y$ intersects $\partial K$ at points $a$ and $b$ such that $a,x,y,b$ appear in this order on the line. Then the Hilbert distance between $x$ and $y$, induced by $K$, is defined as $d_K(x,y) = \inv{2} \ln (x,y;b,a)$. As a convenience, we define the cross ratio between $x$ and $y$, induced by $K$, to be $r_K(x,y) = -(a,y;b,x)$. It is easy to verify that $(x,y;b,a) \geq 1$. By standard results, $(a,y;b,x) = 1 - (x,y;b,a)$, which implies that $r_K(x,y) = (x,y;b,a) - 1$. Thus $r_K(x,y) \geq 0$. Note also that the Hilbert distance can be equivalently expressed as $d_K(x,y) = \ln (1+r_K(x,y)) / 2$.
Let $B_H(x,r)$ denote a Hilbert ball of radius $r$ centered at $x$. An important property of Macbeath regions is that they can act as proxies to Hilbert balls~\cite{AbM18,VeW16}.
\begin{lemma} \label{lem:nesting}
Let $K$ be a convex body and let $x$ be a point in $K$. For any $0 < \lambda < 1$,
\[
B_H\left(x, \frac{1}{2} \ln(1+\lambda)\right)
~\subseteq~ M^{\lambda}(x)
~\subseteq~ B_H\left(x, \frac{1}{2} \ln\frac{1+\lambda}{1-\lambda}\right).
\]
\end{lemma}
Recalling that the Hilbert distance $d_K(x,y) = \ln (1+r_K(x,y)) / 2$, we have the following corollary from the first containment in the statement of the lemma.
\begin{corollary} \label{cor:nesting}
Let $K$ be a convex body and let $x$ be a point in $K$. For any $0 < \lambda < 1$, any point $y \in K$ that is not contained in the interior of $M^{\lambda}(x)$ satisfies $r_K(x,y) \ge \lambda$.
\end{corollary}
}
\subsection{Intermediate Bodies}\label{s:am-hm}
In this section we explore the concept of relative fatness, which was introduced in Section~\ref{s:techniques}. Given two convex bodies $K_0$ and $K_1$ such that $K_0 \subset K_1$ and $0 < \gamma < 1$, we say that $K_0$ is \emph{relatively $\gamma$-fat} with respect to $K_1$ if, for any point $p \in \partial K_0$, and any scaling factor $0 < \lambda \leq 1$, at least a constant fraction $\gamma$ of the volume of the Macbeath region $M = M_{K_1}^{\lambda}(p)$ lies within $K_0$, that is, $\vol(M \cap K_0)/\vol(M) \geq \gamma$.
We say that $K_0$ is \emph{relatively fat} with respect to $K_1$ if it is relatively $\gamma$-fat for some constant $\gamma$. Relative fatness will play an important role in our analyses. Since an arbitrary nested pair $K_0 \subset K_1$ may not necessarily satisfy this property, it will be useful to define an intermediate body sandwiched between $K_0$ and $K_1$ that does.
There are a few natural ways to define such an intermediate body. Given two convex bodies $K_0$ and $K_1$, where $K_0 \subseteq K_1$, the \emph{arithmetic-mean body}, $K_A(K_0, K_1)$, is defined to be the convex body $\frac{1}{2}(K_0 \oplus K_1)$, where ``$\oplus$'' denotes Minkowski sum. Equivalently, for any unit vector $u$ consider the two supporting halfspaces of $K_0$ and $K_1$ orthogonal to $u$, and take the halfspace that is midway between the two. The arithmetic-mean body is obtained by intersecting such halfspaces for all unit vectors $u$.
\begin{figure}
\caption{\label{f:arihar}
\label{f:arihar}
\end{figure}
Another natural choice arises from a polar viewpoint. Assume that $K_0 \subset K_1$ and the origin $O \in \interior(K_0)$. The \emph{harmonic-mean body}, $K_H(K_0, K_1)$, was introduced by Firey~\cite{Fir61} and is defined as follows. For any ray $r$ from the origin $O$, let $b_r$ and $d_r$ denote the points of intersection of $r$ with $\partial K_0$ and $\partial K_1$, respectively (see Figure~\ref{f:arihar}(b)). Let $c_r$ be the point on the ray such that $1/\|O c_r\| = (1/\|O b_r\| + 1/\|O d_r\|)/2$. Equivalently, the cross ratio $(O, c_r; d_r, b_r)$ equals $-1$, that is, this quadruple forms a harmonic bundle. Clearly, $c_r$ lies between $b_r$ and $d_r$, and hence the union of these points over all rays $r$ defines the boundary of a body that is sandwiched between $K_0$ and $K_1$. This body is the harmonic-mean body. By considering the supporting hyperplanes orthogonal to the ray $r$, it is easy to see that the arithmetic-mean body of $K_0$ and $K_1$ is mapped to the harmonic-mean body of $K_0^*$ and $K_1^*$ under polarity, that is, $(K_A(K_0, K_1))^* = K_H(K_0^*, K_1^*)$. Therefore, $K_H(K_0, K_1)$ is convex. When $K_0$ and $K_1$ are clear from context, we will just write $K_A$ and $K_H$, omitting references to their arguments.
In order to understand why these intermediate bodies are useful to us, recall the diamond and square bodies $K_0$ and $K_1$ from Figure~\ref{f:rel-fat} (see Figure~\ref{f:rel-fat-har}(a)). Recall the issue that a large fraction of the volume of the Macbeath region $M^{1/2}_{K_1}(x)$ lies outside of $K_0$. If we replace $K_1$ with $K_H = K_H(K_0, K_1)$ and compute the Macbeath region with respect to $K_H$ instead (see Figure~\ref{f:rel-fat-har}(b) and (c)), we see that a constant fraction of the volume of the Macbeath region lies within $K_0$ and so relative fatness is satisfied.
\begin{figure}
\caption{\label{f:rel-fat-har}
\label{f:rel-fat-har}
\end{figure}
In Section~\ref{s:hm-fat}, we will present an important result by showing that the inner body $K_0$ is relatively fat with respect to the harmonic-mean body $K_H(K_0, K_1)$. The proof makes heavy use of concepts from projective geometry, such as the harmonic bundle. This fact will be critical to establishing the volume-sensitive bounds given in this paper.
\arxivonly{
\section{Bounding MNet Sizes}
\label{a:mnet-sizes}
In this section, we bound the sizes of MNets in important special cases involving points at roughly the same ray distance. These bounds will be useful in obtaining our volume-sensitive bounds. We begin by recalling definitions and technical tools from~\cite{AFM23arxiv}. We say that two caps $C_1$ and $C_2$ are \emph{$\lambda$-similar} for $\lambda \ge 1$, if $C_1 \subseteq C_2^{\lambda}$ and $C_2 \subseteq C_1^{\lambda}$. If two caps are $\lambda$-similar for constant $\lambda$, we say that the caps are \emph{similar}. Note that this is an affine-invariant notion of closeness between caps.
Arya {\textit{et al.}}~\cite[Section~2.6 and Section~3.2]{AFM23arxiv} showed certain important relationships between caps in $K$ and associated Macbeath regions in $K^*$. In order to state their result, consider the following mapping. Consider a point $z \in K^*$. Let $\hat{z} \not\in K^*$ be the point on the ray $Oz$ such that $\ray(\hat{z}) = \varepsilon$. The dual hyperplane $\hat{z}^*$ intersects $K$, and so induces a cap, which we call $z$'s \emph{$\varepsilon$-representative cap}. They showed that points lying within the same shrunken Macbeath regions have similar representative caps, which implies Lemma~\ref{lem:mahler-mac}(i). Further, by extending and generalizing results in \cite{AAFM22,AFM12b,NNR20}, they established a Mahler-type reciprocal relationship between the volume of caps in $K$ and the associated Macbeath regions in $K^*$. This is stated in Lemma~\ref{lem:mahler-mac}(ii).
\begin{lemma}[\cite{AFM23arxiv}] \label{lem:mahler-mac}
Let $0 < \varepsilon \leq \frac{1}{16}$ and let $K \subseteq \mathbb{R}^d$ be a well-centered convex body. Let $C$ be a cap of $K$ such that $\varepsilon/2 \leq \width(C) \leq 2\varepsilon$. Suppose that the ray shot from the origin orthogonal to the base of $C$ intersects a Macbeath region $M = M^{1/5}(y)$ of $K^*$, where $\ray(y) = \varepsilon$ (see Figure~\ref{f:sandwich}(b)). Then:
\begin{enumerate}
\item[$(i)$] The cap $C$ and the $\varepsilon$-representative cap of any point $z \in M$ are 16-similar.
\item[$(ii)$] $\vol_K(C) \cdot \vol_{K^*}(M) = \Omega(\varepsilon^{d+1})$.
\end{enumerate}
\end{lemma}
\begin{figure}
\caption{\label{f:sandwich}
\label{f:sandwich}
\end{figure}
Next we present a general tool which will be useful in bounding the sizes of the MNets of interest to us. Let $K \subseteq \mathbb{R}^d$ be a well-centered convex body. For any shallow cap $C$ of $K$, define a point $\psi(C)$ in $K^*$ as follows. In the polar space, consider the ray shot from $O$ orthogonal to the base of $C$. We let $\psi(C) \in K^*$ be the point on this ray with ray distance $\width(C)$.
Let $\mathscr{C}$ be a set of shallow caps of $K$, let $\Lambda \subseteq K$ denote the set of centroids of the bases of the caps of $\mathscr{C}$, and let $\Lambda' = \{\psi(C) : C \in \mathscr{C}\}$. Let $X$ be a $(K,\Lambda)$-MNet, and let $Y$ be a $(K^*,\Lambda')$-MNet. For each $x \in \Lambda$, let $C_x$ denote a cap of $\mathscr{C}$ such that $x$ is the centroid of its base. (Clearly, such a cap exists. If there is more than one, then we choose one arbitrarily.) Also, for each $x \in X$, define $M_x = M_K^{1/4c}(x)$, where $c$ is the expansion factor of the MNets. Similarly, for $y \in Y$, define $M_y = M_{K^*}^{1/4c}(y)$. The following lemma shows that it is possible to construct a bipartite graph $(X,Y)$ with certain properties.
\begin{lemma} \label{lem:bipartite}
Given a well-centered convex body $K \subseteq \mathbb{R}^d$, and the entities $\mathscr{C}, \Lambda, \Lambda', X, Y$ as defined above, there is a bipartite graph $(X,Y)$ such that there is exactly one edge incident to each vertex of $X$ and the degree of each vertex of $Y$ is $O(1)$. Furthermore, for any $x \in X$ and $y \in Y$, if there is an edge $(x,y)$, then $\vol_K(M_x) \cdot \vol_{K^*}(M_y) = \Omega(\delta^{d+1})$, where $\delta = \width(C_x)$.
\end{lemma}
\begin{proof}
First we show how to construct the bipartite graph $(X,Y)$. Let $x$ be any point of $X$ and let $y' = \psi(C_x)$. By the covering property of MNets, there exists $y \in Y$ such that $M^{1/c}(y)$ contains $y'$. We add an edge in the bipartite graph between $x$ and $y$. It follows from our construction that there is exactly one edge incident to each vertex of $X$.
Next we show that if there is an edge $(x,y)$, then $\vol_K(M_x) \cdot \vol_{K^*}(M_y) = \Omega(\delta^{d+1})$. By definition, $\ray(y') = \width(C_x) = \delta$. Letting $\varepsilon = \ray(y)$ and applying Lemma~\ref{lem:core-ray}, we have $\varepsilon/2 \leq \ray(y') \leq 2 \varepsilon$. Thus $\varepsilon/2 \leq \width(C_x) \leq 2 \varepsilon$. Observe that the cap $C_x$ and the Macbeath region $M^{1/5}(y)$ satisfy the conditions of Lemma~\ref{lem:mahler-mac}. Recalling that $c$ is a constant $\geq 5$, and $M_y$ and $M^{1/c}(y)$ differ by a constant scaling factor, by part (ii) of this lemma, we have $\vol_K(C_x) \cdot \vol_{K^*}(M_y) = \Omega(\varepsilon^{d+1})$. Also, by Lemma~\ref{lem:min-vol-cap2}, $\vol(M_x) = \Omega(\vol(C_x))$. Thus $\vol_K(M_x) \cdot \vol_{K^*}(M_y) = \Omega(\varepsilon^{d+1})$.
It remains to prove that the degree of each vertex of $Y$ is $O(1)$. Let $y$ be any vertex of $Y$ and let $\varepsilon = \ray(y)$. For any edge $(x,y)$, we showed above that the cap $C_x$ and the Macbeath region $M^{1/5}(y)$ satisfy the conditions of Lemma~\ref{lem:mahler-mac}. By part (i) of this lemma, it follows that the cap $C_x$ and the $\varepsilon$-representative cap of $y$ are $16$-similar.
Letting $C_y$ denote the $\varepsilon$-representative cap of $y$, we have $C_x \subseteq C_y^{16}$ and $C_y \subseteq C_x^{16}$. Applying Lemma~\ref{lem:cap-exp}, we have $\vol(C_x) = \Omega(\vol(C_x^{16})) = \Omega(\vol(C_y))$, and by Lemma~\ref{lem:min-vol-cap2}, we have $\vol(M_x) = \Omega(\vol(C_x))$. Thus, $\vol(M_x) = \Omega(\vol(C_y))$. Recall that half of the Macbeath region $M_x$ lies inside $C_x$, and hence inside $C_y^{16}$. By Lemma~\ref{lem:cap-exp}, $\vol(C_y^{16}) = O(\vol(C_y))$. Since the Macbeath regions of $\mathscr{M}^{1/4c}(X)$ are disjoint, a straightforward packing argument implies that $y$ has $O(1)$ neighbors.
\end{proof}
Expressing the total number of edges in the graph as the sum of the degrees of the vertices of $Y$, we see this quantity is $O(|Y|)$. The following corollary is immediate.
\begin{corollary}
\label{cor:bipartite}
Given a convex body $K \subseteq \mathbb{R}^d$, and the entities $\mathscr{C}, \Lambda, \Lambda', X, Y$ as defined above, then $|X| = O(|Y|)$.
\end{corollary}
We are now ready to bound the sizes of MNets in the important special case involving caps of roughly the same width, which map in the polar to points at roughly the same ray distance. Lemmas~\ref{lem:fixed-width} and \ref{lem:fixed-ray} bound the sizes of MNets in these cases. We also bound the cardinality of important subsets that arise in our applications.
\begin{lemma} \label{lem:fixed-width}
Let $0 < \varepsilon \leq 1/2$ and let $K \subseteq \mathbb{R}^d$ be a well-centered convex body. Let $\mathscr{C}$ be the set of caps of $K$ of width between $\varepsilon$ and $2\varepsilon$, let $\Lambda \subseteq K$ denote the set of centroids of the bases of the caps of $\mathscr{C}$, and let $X$ be a $(K,\Lambda)$-MNet. Then:
\begin{enumerate}
\item[$(i)$] $|X| = O(1/\varepsilon^{(d-1)/2})$.
\item[$(ii)$] For any positive real $f \le 1$, let $X_f \subseteq X$ be such that the total relative volume of the Macbeath regions of $\mathscr{M}^{1/4c}(X_f)$ is $O(f \varepsilon)$. Then $|X_f|$ is $O(\sqrt{f} / \varepsilon^{(d-1)/2})$.
\end{enumerate}
\end{lemma}
Note that if $f$ is $o(\varepsilon^{d-1})$, then $\sqrt{f}/\varepsilon^{(d-1)/2}$ is $o(1)$ and so $X_f = \emptyset$.
\begin{proof}
If $\varepsilon > \varepsilon_0$, where $\varepsilon_0$ is any constant, we can show that $|X| = O(1)$ as follows. Associate a minimum volume cap $C_x$ with each point $x \in X$. Recall that $x$ is the centroid of the base of $C_x$. Let $M_x = M^{1/4c}(x)$. By Lemma~\ref{lem:wide-cap}, if the width of $C_x$ exceeds a constant, then $\vol_K(M_x) = \Omega(1)$. Thus, the number of points $x \in X$ such that $\width(C_x) > \varepsilon_0$ is at most $O(1)$ and the lemma follows trivially.
In the remainder of the proof, we will assume that $\varepsilon \leq \varepsilon_0$, where $\varepsilon_0$ is a sufficiently small constant. Let $\Lambda'$ be the points in $K^*$ at ray distances between $\varepsilon$ and $2\varepsilon$, and let $Y$ be a $(K^*,\Lambda')$-MNet. Note that $\Lambda' = \{\psi(C) : C \in \mathscr{C}\}$, where $\psi$ is as defined above. Thus the entities $\mathscr{C}, \Lambda, \Lambda', X, Y$ satisfy the preconditions of Lemma~\ref{lem:bipartite}.
Arguing as in Lemma~\ref{lem:vol-mac-bounds}, we can show that all the Macbeath regions of $\mathscr{M}^{1/4c}(X)$ lie in the shell $S_K = K \setminus (1-4\varepsilon)K$, all the Macbeath regions of $\mathscr{M}^{1/4c}(Y)$ lie in the shell $S_{K*} = K^* \setminus (1-4\varepsilon)K^*$, $\vol_K(S_K) = O(\varepsilon)$ and $\vol_{K^*}(S_{K^*}) = O(\varepsilon)$.
Define the \emph{fractional volume} of a Macbeath region $M \in \mathscr{M}^{1/4c}(X)$, denoted $\vol_f(M)$, to be $\vol(M) / \vol(S_K)$. Similarly, for $M \in \mathscr{M}^{1/4c}(Y)$, define $\vol_f(M) = \vol(M) / \vol(S_{K^*})$. Consider the bipartite graph with vertex sets $X$ and $Y$ described in Lemma~\ref{lem:bipartite}. Recall that there is exactly one edge incident to each vertex of $X$ and the degree of each vertex of $Y$ is $O(1)$. Further, if there is an edge $(x,y)$, then $\vol_K(M_x) \cdot \vol_{K^*}(M_y) = \Omega(\varepsilon^{d+1})$. Thus
\[
\vol_f(M_x) \cdot \vol_f(M_y) =
\Omega\left(\frac{\vol_K(M_x)}{\vol_K(S_K)} \cdot \frac{\vol_{K^*}(M_y)}{\vol_{K^*}(S_{K^*})} \right) =
\Omega\left(\frac{\varepsilon^{d+1}}{\varepsilon \cdot \varepsilon} \right) =
\Omega\left(\varepsilon^{d-1}\right).
\]
It follows that the quantity $\vol_f(M_x) + \vol_f(M_y)$ is $\Omega(\varepsilon^{(d-1)/2})$ for any edge $(x,y)$. Summing this quantity over all the edges in the graph, we obtain a lower bound of $\Omega(|X| \, \varepsilon^{(d-1)/2})$. To upper bound this quantity, note that by disjointness, $\sum_{x \in X} \vol_f(M_x) = O(1)$, $\sum_{y \in Y} \vol_f(M_y) = O(1)$, and the degree of each vertex is $O(1)$. Thus, the sum of this quantity over all the edges is $O(1)$. The lower and upper bounds together imply that $|X| = O(1/\varepsilon^{(d-1)/2})$.
The proof of (ii) is similar to (i). (In fact, (i) is a special case of (ii) for $f=1$.) By Lemma~\ref{lem:vol-mac-bounds}(i), the relative volume of any Macbeath region of $\mathscr{M}^{1/4c}(X)$ is $\Omega(\varepsilon^d)$. It follows that if $f = o(\varepsilon^{d-1})$ then $X_f = \emptyset$ and so (ii) holds. We may therefore assume that $f = \Omega(\varepsilon^{d-1})$. Letting $S'_K \subseteq S_K$ denote the union of the Macbeath regions of $\mathscr{M}^{1/4c}(X_f)$, we are given that $\vol_K(S'_K) = O(f \varepsilon)$. We modify the definition of fractional volume of a Macbeath region $M \in \mathscr{M}^{1/4c}(X_f)$, denoted $\vol_f(M)$, to be $\vol(M) / \vol(S'_K)$. Note that we keep the same definition of fractional volume for the Macbeath regions of $\mathscr{M}^{1/4c}(Y)$, that is, for $M \in \mathscr{M}^{1/4c}(Y)$, $\vol_f(M) = \vol(M) / \vol(S_{K^*})$. Arguing as in (i), but using the bound $\vol_K(S'_K) = O(f \varepsilon)$ in place of $\vol_K(S_K) = O(\varepsilon)$, it follows that for any edge $(x,y)$ such that $x \in X_f$ and $y \in Y$, we have
\[
\vol_f(M_x) \cdot \vol_f(M_y) = \Omega\left(\frac{\varepsilon^{d-1}}{f} \right).
\]
Thus $\vol_f(M_x) + \vol_f(M_y) = \Omega(\varepsilon^{(d-1)/2}/\sqrt{f})$ for any such edge $(x,y)$. As in (i), summing this quantity over all the edges incident to the vertices of $X_f$, we obtain a lower bound of $\Omega(|X_f| \, \varepsilon^{(d-1)/2}/\sqrt{f})$, and an upper bound of $O(1)$. The lower and upper bounds together imply that $|X_f| = O(\sqrt{f}/\varepsilon^{(d-1)/2})$, as desired.
\end{proof}
The following lemma is analogous to Lemma~\ref{lem:fixed-width}, but for points at similar ray distances. We will use this lemma in Section~\ref{s:hausdorff} together with the relative fatness properties of the harmonic-mean body to establish our volume-sensitive bound.
\begin{lemma} \label{lem:fixed-ray}
Let $0 < \varepsilon \leq 1/2$ be sufficiently small and let $K \subseteq \mathbb{R}^d$ be a well-centered convex body. Let $\Lambda$ be the points of $K$ at ray distances between $\varepsilon$ and $2\varepsilon$, and let $X$ be a $(K,\Lambda)$-MNet. Then:
\begin{enumerate}
\item[$(i)$] $|X| = O(1/\varepsilon^{(d-1)/2})$.
\item[$(ii)$] For any positive real $f \le 1$, let $X_f \subseteq K$ be such that the total relative volume of the Macbeath regions of $\mathscr{M}^{1/4c}(X_f)$ is $O(f\varepsilon)$. Then $|X_f| = O(\sqrt{f}/\varepsilon^{(d-1)/2})$.
\end{enumerate}
\end{lemma}
Note that if $f$ is $o(\varepsilon^{d-1})$, then $\sqrt{f}/\varepsilon^{(d-1)/2}$ is $o(1)$ and so $X_f = \emptyset$.
\begin{proof}
We associate a minimum volume cap $C_x$ with each point $x \in X$. Recall that $x$ is the centroid of the base of $C_x$. Let $M_x = M^{1/4c}(x)$. By Lemma~\ref{lem:wide-cap}, if the width of $C_x$ exceeds a constant, say $1/3$, then $\vol_K(M_x) = \Omega(1)$. Thus, the number of points $x \in X$ such that $\width(C_x) > 1/3$ is at most $O(1)$. Next we bound the remaining points of $X$.
Since $\varepsilon \leq \ray(x) \leq 2\varepsilon$, it follows from Lemmas~\ref{lem:raydist-width} and \ref{lem:min-vol-cap3} that $\varepsilon \leq \width(C_x) \leq 2(2d+1)\varepsilon$. Let $\varepsilon_i = 2^i \varepsilon$. We partition the remaining points of $X$ into $O(\log d)$ groups, where the points in group $i$ have associated minimum volume caps whose widths lie between $\varepsilon_i$ and $2 \varepsilon_i$. By Lemma~\ref{lem:fixed-width}(i), the number of points in group $i$ is $O(1/\varepsilon_i^{(d-1)/2})$. Summing over all groups $i$, it follows that $|X| = O(1/\varepsilon^{(d-1)/2})$, which proves (i).
The proof of (ii) is similar. By Lemma~\ref{lem:vol-mac-bounds}(ii), the relative volume of any Macbeath region of $M^{1/4c}(X)$ is $\Omega(\varepsilon^d)$. It follows that if $f = o(\varepsilon^{d-1})$ then $X_f = \emptyset$ and so (ii) holds. We may therefore assume that $f = \Omega(\varepsilon^{d-1})$. Arguing as in (i), we can show that the number of points $x \in X_f$ such that $\width(C_x) > 1/3$ is $O(1)$. We partition the remaining points into $O(\log d)$ groups as before. Applying Lemma~\ref{lem:fixed-width}(ii) to each group, and summing the result proves (ii).
\end{proof}
}
\section{Relative Fatness and the Harmonic-Mean Body} \label{s:hm-fat}
In this section, we establish properties of the harmonic-mean body that are critical to the main results of this paper. In particular, given two bodies $K_0 \subset K_1$, we show that $K_0$ is relatively fat with respect to $K_H$. In fact, we present a stronger result in Lemma~\ref{lem:HM-fat-main}, which implies relative fatness as an immediate consequence. We will employ this stronger result in Section~\ref{s:hausdorff} to obtain our volume-sensitive bounds for polytope approximation.
The proof of Lemma~\ref{lem:HM-fat-main} is based on the following technical lemma. For constant $\lambda$, it implies that for any point $b \in K_0$ that is not too close to the boundary of $K_0$, the Macbeath regions centered at $b$ with respect to $K_0$ and $K_H$, respectively, are roughly similar up to a constant scaling factor. This is formally stated in the corollary following the lemma.
\begin{lemma}
\label{lem:HM-fat-aux}
Let $0 < \lambda < 1$ be a parameter. Let $K_0 \subset K_1$ be two convex bodies, where the origin $O$ lies in the interior of $K_0$. Let $K_H$ denote the harmonic-mean body of $K_0$ and $K_1$. Consider any ray emanating from the origin $O$. Let $c$ and $d$ denote the points of intersection of this ray with $\partial K_0$ and $\partial K_1$, respectively (see figure). Let $b \in K_0$ be a point on this ray such that the cross ratio $(O,c;d,b) \leq -\lambda$. Consider any line passing through $b$. Let $c'$ and $c''$ denote the points of intersection of this line with $\partial K_H$. Then
\[
\min(\|b c' \cap K_0\| , \|b c'' \cap K_0\|)
~ \geq ~ s(\lambda) \cdot \min(\|b c'\|, \|b c''\|), \qquad\text{where $s(\lambda) = \lambda / 6$}.
\]
\end{lemma}
\begin{figure}
\caption{Lemma~\ref{lem:HM-fat-aux}
\label{f:harmonic-mean-aux}
\end{figure}
\begin{proof}
\confonly{We sketch the key ideas and present the full proof in~\cite{SoCG23arxiv}.}
Consider the two dimensional flat that contains the origin and the line $\ell$ that passes through the points $c'$, $b$, and $c''$. Henceforth, let $K_0, K_1, K_H$ refer to the two dimensional convex bodies obtained by intersecting the respective bodies with this flat. Let $b'$ and $d'$ denote the points of intersection of the ray $O c'$ with $\partial K_0$ and $\partial K_1$, respectively, and define $b''$ and $d''$ analogously for $O c''$. All these points lie on the flat, and it follows from the definition of the harmonic-mean body that $(O,c';d',b') = (O,c'';d'',b'') = -1$ (see Figure~\ref{f:harmonic-mean-aux}(a)).
By rotating space, we may assume that $\ell$ is horizontal and above the origin. Through an infinitesimal perturbation, we may assume that there is a supporting line for $K_1$ at $d$ that is not parallel to $\ell$. Without loss of generality, we may assume that it intersects $\ell$ to the left of $b$. Since $c'$ and $c''$ are symmetrical in the statement of the lemma, we may assume that $c'$ lies to left of $b$ and $c''$ lies to its right. Let $f$ denote the intersection point of the line $d d'$ with $\ell$ (see Figure~\ref{f:harmonic-mean-aux}(a)). Clearly, the left-to-right order of points along $\ell$ is $\ang{f, c', b, c''}$. Observe that the points $c$, $d$, $d'$, and $d''$ all lie strictly above $\ell$, and the points $b'$ and $b''$ lie strictly below.
Let $e'$ denote the point of intersection of the segment $c b'$ with segment $b c'$, and define $e''$ analogously for segment $c b''$. Since $c$, $b'$ and $b''$ all lie on $\partial K_0$, by convexity, $e'$ and $e''$ are contained in $K_0$. Thus, to prove the lemma, it suffices to show that
\begin{equation} \label{eq:hm-fat-1}
\min(\|b e'\|, \|b e''\|)
~ \geq ~ s(\lambda) \cdot \min(\|b c'\|, \|b c''\|).
\end{equation}
We begin by proving bounds on two cross ratios:
\begin{enumerate}
\item [$(i)$] $-(f,e'; c',b) \geq \lambda/2$, and
\item[$(ii)$] $-(f,e''; c'',b) \geq \lambda/2$.
\end{enumerate}
Because projective transformations preserve cross ratios, it will be convenient to prove these bounds after first applying a projective transformation. In particular, this transformation maps $O$ and $f$ to infinity so that lines through $O$ map to vertical lines and lines through $f$ map to horizontal lines (see Figure~\ref{f:harmonic-mean-aux}(b)).
After this transformation, $O c'$, $O c$, and $O c''$ are vertical and directed upwards and $d' d$ and $c' b$ are horizontal and directed to the right. Clearly, $\|c' d'\| = \|b d\|$. Since $d''$ lies above $\ell$ and below the line $d' d$ we have $\|c'' d''\| \leq \|b d\|$. By definition of $b$, we have $(O,c; d,b) = -1/(\|c d\| / \|c b\|) \leq -\lambda$. Since $\|c b\| + \|c d\| = \|b d\|$, we have $\|c b\| \geq \|b d\| \lambda/(1+\lambda)$.
Given that $f$ is at infinity, the above cross ratios reduce to simple ratios. Thus, it suffices to show:
\begin{enumerate}
\item [$(i)$] $\|e' b\|/\|e' c'\| \geq \lambda/2$, and
\item[$(ii)$] $\|e'' b\|/\|e'' c''\| \geq \lambda/2$.
\end{enumerate}
To show~(i), observe that since $(O, c'; d', b') = -1$ and since $O$ is at infinity and $c'$ lies between $b'$ and $d'$, this is equivalent to $1/(\|c' d'\| / \|c' b'\|) = 1$, that is, $\|c' b'\| = \|c' d'\|$. By similar triangles $\triangle e'b c$ and $\triangle e'c'b'$, the fact that $\|c'b'\| = \|c'd'\| = \|b d\|$, and our bounds on $\lambda$, we have
\begin{equation} \label{eq:hm-fat-2}
\frac{\|e' c'\|}{\|e' b\|}
~ = ~ \frac{\|c' b'\|}{\|c b\|}
~ \leq ~ \frac{\|b d\|}{\|b d\|\lambda/(1+\lambda)}
~ = ~ \frac{1+\lambda}{\lambda}
~ \leq ~ \frac{2}{\lambda},
\end{equation}
which implies~(i).
The analysis for~(ii) is essentially the same as above. Since $(O, c''; d'', b'') = -1$ we have $\|c'' b''\| = \|c'' d''\|$. By similar triangles $\triangle e''b c$ and $\triangle e''c''b''$ and the fact that $\|c''b''\| = \|c''d''\| \leq \|b d\|$, the inequalities of Eq.~\eqref{eq:hm-fat-2} (with double primes for single primes) show that
\[
\frac{\|e'' c''\|}{\|e'' b\|}
~ \leq ~ \frac{2}{\lambda},
\]
which implies~(ii).
These inequalities hold only in transformed configuration, but the cross ratios of~(i) and~(ii) hold unconditionally.
\confonly{Returning to the original configuration and using (i), we can show that $\|be'\| / \|bc'\| \geq \lambda/3$ and from (ii), we can show that either $\|be''\| / \|bf\| \geq \lambda/6$ or $\|be''\| / \|e''c''\| \ge \lambda/5$. We omit the details of this calculation, which can be found in the full version~\cite{SoCG23arxiv}. In both cases, we are able to establish Eq.~\eqref{eq:hm-fat-1}, as desired.
}
\arxivonly{Let's return to the original configuration. Since $\|f c'\|/\|f b\| < 1$, observation~(i) implies that $\|e' c'\|/\|e' b\| \leq 2/\lambda$. Thus, we have
\begin{equation} \label{eq:hm-fat-3}
\frac{\|e' b\|}{\|c' b\|}
~ = ~ \frac{\|e' b\|}{\|e' c'\| + \|e' b\|}
~ \geq ~ \frac{\|e' b\|}{\|e' b\|(2/\lambda) + \|e' b\|}
~ = ~ \frac{\lambda}{\lambda + 2}
~ \geq ~ \frac{\lambda}{3}.
\end{equation}
Next, we claim that~(ii) implies that either
\begin{equation} \label{eq:hm-fat-5}
\frac{\|b e''\|}{\|b f\|}
~ \geq ~ \frac{\lambda}{6} \qquad\text{or}\qquad
\frac{\|b e''\|}{\|e'' c''\|}
~ \geq ~ \frac{\lambda}{5}.
\end{equation}
To see why, suppose to the contrary that both inequalities fail to hold. Then we would then have
\begin{align*}
-(f, e''; c'', b)
& ~ = ~ \frac{\|f c''\| / \|f b\|}{\|e'' c''\| / \|e'' b\|}
~ = ~ \frac{\|f b\| + \|b e''\| + \|e'' c''\|}{\|f b\|} \cdot \frac{\|e'' b\|}{\|e'' c''\|} \\[2pt]
& ~ = ~ \frac{\|e'' b\|}{\|e'' c''\|} + \left( \frac{\|b e''\|}{\|b f\|} \cdot \frac{\|b e''\|}{\|e'' c''\|} \right)
+ \frac{\|b e''\|}{\|b f\|} \\[2pt]
& ~ < ~ \frac{\lambda}{5} + \left( \frac{\lambda}{6} \cdot \frac{\lambda}{5} \right) + \frac{\lambda}{6}
~ = ~ \frac{2}{5} \lambda
~ < ~ \frac{\lambda}{2},
\end{align*}
which contradicts~(ii).
To complete the proof, we consider two cases depending on which inequality of Eq.~\eqref{eq:hm-fat-5} holds. First, if $\|b e''\|/\|b f\| \geq \lambda/6$, then clearly $\|b e''\|/\|b c'\| \geq \lambda/6$, and so using Eq.~\eqref{eq:hm-fat-3} we have
\[
\min(\|b e'\|, \|b e''\|)
~ \geq ~ \min \left( \frac{\lambda}{3} \|b c'\|, ~ \frac{\lambda}{6} \|b c'\| \right)
~ = ~ \frac{\lambda}{6} \|b c'\|
~ \geq ~ \frac{\lambda}{6} \min(\|b c'\|, \|b c''\|),
\]
as desired.
Otherwise, $\|b e''\|/\|e'' c''\| \geq \lambda/5$, which implies that
\[
\frac{\|b e''\|}{\|b c''\|}
~ = ~ \frac{\|b e''\|}{\|b e''\| + \|e'' c''\|}
~ \geq ~ \frac{\|b e''\|}{\|b e''\| + \|b e''\|(5/\lambda)}
~ = ~ \frac{\lambda}{\lambda+5}
~ \geq ~ \frac{\lambda}{6}.
\]
Therefore,
\[
\min(\|b e'\|, \|b e''\|)
~ \geq ~ \min \left( \frac{\lambda}{3} \|b c'\|, ~ \frac{\lambda}{6} \|b c''\| \right)
~ \geq ~ \frac{\lambda}{6} \min(\|b c'\|, \|b c''\|),
\]
again, as desired.
}
\end{proof}
The following corollary is immediate from the definition of Macbeath regions.
\begin{corollary} \label{cor:HM-fat-aux}
Assume all entities to be as defined in the statement of Lemma~\ref{lem:HM-fat-aux}. Then $M_{K_H}^{s(\lambda)}(b) \subseteq M_{K_0}(b)$, where $s(\lambda) = \lambda / 6$.
\end{corollary}
We have the following lemma which in conjunction with Corollary~\ref{cor:HM-fat-aux} will be useful in proving Lemma~\ref{lem:HM-fat-main}.
\confonly{The proof is presented in the full version~\cite{SoCG23arxiv}.}
\begin{lemma} \label{lem:cr-lb}
Let $\lambda, K_0, K_1, K_H$, the origin $O$, and points $c$ and $d$ be as in Lemma~\ref{lem:HM-fat-aux}. Let $h$ denote the point of intersection of the ray $Oc$ with the boundary of $K_H$. Then:
\begin{enumerate}
\item[$(i)$] $\|Oc\| \geq \|hc\|$.
\item[$(ii)$] Let $b$ be a point on segment $Oc$, which is not contained in the interior of $M_{K_H}^{\lambda}(c)$. Then $(O,c;d,b) \leq -\lambda / 2$.
\end{enumerate}
\end{lemma}
\arxivonly{
\begin{proof}
Let $h$ and $h'$ denote the points of intersection of the line $Oc$ with the boundary of $K_H$ such that $h', b, c,$ and $h$ appear in this order on the line. Since $(O,h;d,c)$ forms a harmonic bundle, we have $\|Od\|/\|Oc\| =\|hd\|/|hc\|$. Since $\|Od\| \geq \|hd\|$, it follows that $\|Oc\| \geq \|hc\|$, which proves (i).
To prove (ii), observe that since $b$ is not contained in the interior of $M_{K_H}^{\lambda}(c)$, by Corollary~\ref{cor:nesting}, $r_{K_H}(b,c) = -(h',c;h,b) \geq \lambda$. Thus $(h',c;h,b) \leq -\lambda$. It is straightforward from the definition that this cross ratio decreases on replacing $h'$ by $O$. Thus $(O,c;h,b) \leq -\lambda$. Next we will show that $(O,c;d,b) = (O,c;h,b) / 2$, which will prove the lemma.
To see this, we apply a projective transformation that maps $O$ to a point at infinity. (Recall that collinearity of points and cross ratios are preserved under a projective transformation.) We have $(O,c;d,b) = -\|cb\| / \|cd\|$, $(O,c;h,b) = -\|cb\| / \|ch\|$, and $(O,h;d,c) = -\|ch\| / \|hd\|$. Also, by definition of harmonic body, $(O,h;d,c)$ is a harmonic bundle and so $\|ch\| = \|hd\|$. Thus
\[
(O,c;d,b) = -\frac{\|cb\|}{\|cd\|} = -\frac{\|cb\|}{\|ch\| + \|hd\|} = -\frac{\|cb\|}{2\|ch\|} = \frac{1}{2} \, (O,c;h,b),
\]
as desired.
\end{proof}
}
We now have all the key ingredients to present the main result of this section. The relative fatness of $K_0$ with respect to $K_H$ is an immediate consequence of parts (i) and (ii) of this lemma. In order to state part (iii), we need a definition. Given a convex body $K$ with the origin $O$ in its interior and a region $R \subseteq K$, define the \emph{shadow} of $R$ with respect to $K$, denoted \emph{$\shadow_K(R)$}, to be the set of points $x \in K$ such that the segment $Ox$ intersects $R$.
\begin{lemma} \label{lem:HM-fat-main}
Let $0 < \beta \le 1$ be a real parameter. Let $K_0 \subset K_1$ be two convex bodies, let the origin $O$ lie in the interior of $K_0$, and let $K_H$ denote the harmonic-mean body of $K_0$ and $K_1$. Let $c$ be any point on the boundary of $K_0$ and let $M = M_{K_H}^{\beta}(c)$. Then there exists a convex body $M'$ such that
\begin{enumerate}
\item[$(i)$] $\vol(M') = \Omega(\vol(M))$,
\item[$(ii)$] $M' \subseteq M \cap K_0$, and
\item[$(iii)$] $\shadow_{K_0}(M') \subseteq M$.
\end{enumerate}
\end{lemma}
\begin{proof}
\confonly{We sketch the proof of (i) and (ii) here, and present the full proof in~\cite{SoCG23arxiv}.}
For the sake of convenience, assume that the ray $Oc$ is directed vertically upwards. Let $h$ be the point of intersection of the ray $Oc$ with $\partial K_H$. Let $R = M_{K_H}(c)-c$ be the recentering of $M_{K_H}(c)$ about the origin. By definition, $M = M_{K_H}^{\beta}(c) = c + \beta R$. Let $b$ be the point of intersection of the segment $Oc$ with the boundary of $M_{K_H}^{\lambda}(c) = c + \lambda R$, where $\lambda = \beta/\kappa$ for a suitable large constant $\kappa \geq 2$ (independent of dimension). Recalling from Lemma~\ref{lem:cr-lb}(a) that $\|ch\| \leq \|Oc\|$, it follows that $b$ is vertically below $c$ at a distance of $\lambda \|ch\|$. Recalling $s(\lambda)$ from Corollary~\ref{cor:HM-fat-aux}, let $M' = b + \gamma R$ for
\[
\gamma
~ = ~ \frac{s(\lambda/2)}{10}
~ = ~ \frac{s(\beta/2\kappa)}{10}
~ = ~ \frac{\beta}{120\kappa}
\]
(see Figure~\ref{f:hm-fat-main}(a)). Since $M'$ and $M$ are translated copies of $R$ scaled by a factor of $\gamma$ and $\beta$, respectively, we have $\vol(M') = (\gamma/\beta)^d \vol(M) = (1/120\kappa)^d \vol(M)$. This proves (i).
\begin{figure}
\caption{\label{f:hm-fat-main}
\label{f:hm-fat-main}
\end{figure}
To prove (ii), we will show that $M' \subseteq M$ and $M' \subseteq K_0$. Since $b \in c + \lambda R$ and $M' = b + \gamma R$, it follows that $M' \subseteq c + (\lambda + \gamma) R$. For large $\kappa$, we have $\lambda + \gamma \le \beta$, and thus $M' \subseteq c + \beta R = M$.
Next we show that $M' \subseteq K_0$. Let $d$ denote the point of intersection of the ray $Oc$ with $\partial K_1$. Applying Lemma~\ref{lem:cr-lb}(b), it follows that the cross ratio $(O,c;d,b) \leq -\lambda / 2$. Applying Corollary~\ref{cor:HM-fat-aux} with $\lambda/2$ in place of $\lambda$ and recalling that $s(\lambda/2) = 10\gamma$, we have $M_{K_H}^{10 \gamma}(b) \subseteq M_{K_0}(b)$. Also, by Lemma~\ref{lem:mac-trans}, we have $M' = b + \gamma R \subseteq M_{K_H}^{2\gamma}(b)$. Thus $M' \subseteq M_{K_0}^{1/5}(b)$. By definition of Macbeath regions, $M_{K_0}(b) \subseteq K_0$, and so $M' \subseteq K_0$, as desired.
\arxivonly{
To prove (iii), let $S = \shadow_{K_0}(M')$, and let $M''$ be the convex body obtained by scaling $M'$ by the factor
\[
f
~ = ~ 1 + 4 \lambda \frac{\|ch\|}{\|Oc\|}
\]
about $O$ (see Figure~\ref{f:hm-fat-main}(b)). Letting $b''$ denote the center of $M''$, we have $M'' = b'' + f \gamma R$. We claim that
\begin{enumerate}
\item[(a)] $S$ is contained in the convex hull of $M' \cup M''$, and
\item[(b)] the convex hull of $M' \cup M''$ is contained in $M$.
\end{enumerate}
Together, this would imply that $S$ is contained in $M$, and complete the proof.
To prove (a), let $c'$ be any point in $S \cap \partial K_0$ and let $b'$ be any point in the intersection of segment $Oc'$ with $M'$. Since $b' \in M'$ and $M' \subseteq M_{K_0}^{1/5}(b)$, we have $b' \in M_{K_0}^{1/5}(b)$. By Lemma~\ref{lem:core-ray}, we have $\ray_{K_0}(b') \le 2 \ray_{K_0}(b)$, that is, $\|b'c'\| / \|Oc'\| \le 2 \|bc\| / \|Oc\|$. Recalling that $\|bc\| = \lambda \|ch\|$, it follows that
\[
\frac{\|Oc'\|}{\|Ob'\|}
~ = ~ \frac{\|Oc'\|}{\|Oc'\| - \|b'c'\|}
~ = ~ \frac{1}{1 - \frac{\|b'c'\|}{\|Oc'\|}}
~ \leq ~ \frac{1}{1 - 2 \lambda \frac{\|ch\|}{\|Oc\|}}
~ \leq ~ 1 + 4 \lambda \frac{\|ch\|}{\|Oc\|}.
\]
Recall that we defined the quantity on the right hand side to be the scaling factor $f$ and $M''$ to be the $f$-factor expansion of $M'$ about $O$. Since $\|Oc'\| \le f \|Ob'\|$, it follows that for any ray passing through $M'$, the points of $S$ on this ray lie between the lowest point on the ray contained in $M'$ and the highest point on the ray contained in $M''$. It follows that $S$ is contained in the convex hull of $M' \cup M''$.
It remains to prove (b). By convexity of $M$, it suffices to show that both $M'$ and $M''$ are contained in $M$. We have already shown in part (ii) that $M' \subseteq M$. To complete the proof, it suffices to show that $M'' \subseteq M$. Note that the point corresponding to $c$, obtained by scaling by a factor of $f$ about the origin, is at distance $4 \lambda \|ch\|$ vertically above $c$. Clearly $b''$ lies on the segment $bc''$, and so $b'' \in c + 4 \lambda R$. Recalling that $M''= b'' + f \gamma R$, it follows that $M'' \subseteq c + (4 \lambda + f \gamma) R$. Since $\lambda = \beta/\kappa$, and $\gamma = \beta/120\kappa$, and $f = 1 + 4\lambda \|ch\|/\|Oc\| \le 1 + 4 \lambda$, we have $4\lambda + f\gamma \le \beta$, for large $\kappa$. Thus $M'' \subseteq c + \beta R = M$, which completes the proof.
}
\end{proof}
The following corollary is immediate from parts (i) and (ii) of the above lemma.
\begin{corollary}
Let $K_0 \subset K_1$ be two convex bodies, let the origin $O$ lie in the interior of $K_0$, and let $K_H$ denote the harmonic-mean body of $K_0$ and $K_1$. Then $K_0$ is relatively fat with respect to $K_H$.
\end{corollary}
\section{Uniform Volume-Sensitive Bounds}
\label{s:hausdorff}
In this section, we present the proof of Theorem~\ref{thm:main}. Let $\varepsilon > 0$ and let $K_0$ denote the convex body $K$ described in this theorem. Let $K_1 = K_0 \oplus \varepsilon$ denote the Minkowski sum of $K_0$ with a ball of radius $\varepsilon$. Also recall that $\Delta_d(K_0)$ denotes the \emph{volume diameter} of $K_0$. Let $C(K_0,\varepsilon)$ be a shorthand for $(\Delta_d(K_0)/\varepsilon)^{(d-1)/2}$, the desired number of facets.
We will show that there exists a polytope with $O(C(K_0,\varepsilon))$ facets sandwiched between $K_0$ and $K_1$. As mentioned above, we will transform the problem by mapping to the polar. Through an appropriate translation, we may assume that the origin $O$ coincides with the centroid of $K_0$. Note that the arithmetic-mean body $K_A$ of $K_0$ and $K_1$ is given by $K_0 \oplus \frac{\varepsilon}{2}$, and recall from Section~\ref{s:am-hm} that $K_H = K_A^*$ is the harmonic-mean body of $K_1^*$ and $K_0^*$.
Our construction is based on Lemma~\ref{lem:MNet-size-hausdorff}, which shows that there is a $(K_H,\partial K_1^*)$-MNet $X$ of size $O(C(K_0,\varepsilon))$. Applying Lemma~\ref{lem:MNet-approx}, it follows that there exists a polytope $P$ sandwiched between $K_1^*$ and $K_H$ with $O(|X|)$ vertices. By polarity, this implies that $P^*$ is a polytope sandwiched between $K_A$ and $K_1$ having $O(|X|)$ facets. Since $K_0 \subseteq K_A$, this polytope is also sandwiched between $K_0$ and $K_1$, which proves Theorem~\ref{thm:main}.
All that remains is showing that $|X| = O(C(K_0,\varepsilon))$. For this purpose, we will utilize the tools for bounding the sizes of MNets in conjunction with the relative fatness of the harmonic-mean body (established in Section~\ref{s:hm-fat}).
\begin{lemma} \label{lem:MNet-size-hausdorff}
Let $\varepsilon > 0$ and let $K_0, K_1, K_A, K_H$ be convex bodies as defined above. Let $X$ be a $(K_H,\partial K_1^*)$-MNet. Then $|X| = O(C(K_0,\varepsilon))$.
\end{lemma}
\begin{proof}
We begin by showing that $\vol(K_H) = \Omega(1/\vol(K_0))$, and its Mahler volume $\mu(K_H)$ is at most $O(1)$ (implying that $K_H$ is well-centered). To see this, recall that the width of $K_0$ in any direction is at least $\varepsilon$ and $K_A = K_0 \oplus \frac{\varepsilon}{2}$. It is well-known that the ratio of the distances of the centroid from any pair of supporting hyperplanes is at most $d$~\cite{Gru63,Min1897,Rad1916}. It follows that a ball of radius $\varepsilon/(d+1)$ centered at the origin lies within $K_0$. Thus, a constant-factor expansion of $K_0$ contains $K_A$, implying that $\vol(K_A) = O(\vol(K_0))$. Also, because $K_H = K_A^*$, by Lemma~\ref{lem:mahler-bounds}, $\vol(K_A) \cdot \vol(K_H) = \Omega(1)$. Thus, $\vol(K_H) = \Omega(1/\vol(K_0))$. To upper bound $\mu(K_H)$, note that by polarity, $K_H \subseteq K_0^*$, and thus
\[
\mu(K_H)
~ = ~ \vol(K_A) \cdot \vol(K_H)
~ = ~ O(\vol(K_0) \cdot \vol(K_0^*))
~ = ~ O(\mu(K_0))
~ = ~ O(1),
\]
where in the last step, we have used Lemma~\ref{lem:centroid} and our assumption that the origin coincides with the centroid of $K_0$.
To simplify notation, for the remainder of the proof we assume that ray distances, Macbeath regions, and volumes are defined relative to $K_H$, that is, $\ray \equiv \ray_{K_H}$, $M \equiv M_{K_H}$, and $\vol \equiv \vol_{K_H}$.
For any point $p \in \partial K_1^*$, let $p'$ denote the point of intersection of the ray $O p$ with $\partial K_H$. We first establish a bound on the relative ray distance $\ray(p)$. Observe that since $p$ and $p'$ lie on $\partial K_1^*$ and $\partial K_H$, respectively, their polar hyperplanes, $p^*$ and ${p'}^*$, are supporting hyperplanes for $K_1$ and $K_H^* = K_A$, respectively. Letting $r$ denote the distance between ${p'}^*$ and the origin, it follows from the definition of $K_A$ that the distance between $p^*$ and the origin is $r + \frac{\varepsilon}{2}$. The distance of $p'$ and $p$ from the origin are the reciprocals of these. Therefore, we have
\[
\ray(p)
~ = ~ \frac{\|p p'\|}{\|O p'\|}
~ = ~ \frac{\|O p'\| - \|O p\|}{\|O p'\|}
~ = ~ \frac{\frac{1}{r} - \frac{1}{r + (\varepsilon/2)}}{\frac{1}{r}}
~ = ~ 1 - \frac{r}{r + (\varepsilon/2)}
~ = ~ \frac{\varepsilon/2}{r + (\varepsilon/2)}.
\]
Since $\frac{1}{\|O p'\|} = r = \Omega(\varepsilon)$, we have $\ray(p) = \Theta(\varepsilon/r) = \Theta(\varepsilon \|O p'\|)$. (It is noteworthy and somewhat surprising that this relative ray distance is not a dimensionless quantity, since it depends linearly on $\|O p'\|$.)
To analyze $|X|$, we partition it into groups based on $\|O x'\|$ for each $x \in X$. Define $R_0 = (\vol(K_H))^{1/d}$. By our earlier remarks, $\vol(K_H) = \Omega(1/\vol(K_0))$, and so $R_0 = \Omega(1/\Delta_d(K_0))$. For any integer $i$ (possibly negative), define $R_i = 2^i R_0$ and $\varepsilon_i = \varepsilon R_i$. We can express $X$ as the disjoint union of sets $X_i$, where $X_i$ consists of points $x$ such that $R_i \leq \|Ox'\| < 2 R_i$. Recall that for any $x \in X_i$, we have $\ray(x) = \Theta(\varepsilon \|O x'\|) = \Theta(\varepsilon R_i) = \Theta(\varepsilon_i)$.
We will bound the contributions of the $|X_i|$ to $|X|$ based on the sign of $i$. Let us first consider the nonnegative values of $i$. We remark that $|X_i| = 0$ for large $i$ (specifically, for $i = \omega(\log(1/\varepsilon R_0))$) because a ball of radius $\Omega(\varepsilon)$ centered at the origin is contained within $K_0$, and so by polarity $K_0^*$, and hence $K_1^*$, is contained within a ball of radius $O(1/\varepsilon)$. Recalling that $K_H$ is well-centered and applying Lemma~\ref{lem:fixed-ray}(i), we have (up to constant factors)
\begin{align*}
\sum_{i \geq 0} |X_i|
& ~ \leq ~ \sum_{i \geq 0} \left( \frac{1}{\varepsilon_i}\right)^{\kern-2pt\frac{d-1}{2}}
~ = ~ \sum_{i \geq 0} \left( \frac{1}{\varepsilon 2^i R_0}\right)^{\kern-2pt\frac{d-1}{2}}
~ \leq ~ \sum_{i \geq 0} \left( \frac{\Delta_d(K_0)}{\varepsilon 2^i}\right)^{\kern-2pt\frac{d-1}{2}} \\
& ~ = ~ \left( \frac{\Delta_d(K_0)}{\varepsilon} \right)^{\kern-2pt\frac{d-1}{2}} \sum_{i \geq 0} \left(\frac{1}{2}\right)^{\kern-2pt\frac{i(d-1)}{2}}
~ \leq ~ \left( \frac{\Delta_d(K_0)}{\varepsilon} \right)^{\kern-2pt\frac{d-1}{2}}
~ = ~ O(C(K_0,\varepsilon)).
\end{align*}
In order to bound the contributions to $|X|$ for negative values of $i$, we need a more sophisticated strategy. Our approach is to first bound the total relative volume of the Macbeath regions of $\mathscr{M}^{1/4c}(X_i)$, which we assert to be $O(\varepsilon_i 2^{id})$. Assuming this assertion for now, we complete the proof as follows. By applying Lemma~\ref{lem:fixed-ray}(ii) with $f = O(2^{id})$ and recalling that $\varepsilon_i = \varepsilon R_i = 2^i \varepsilon R_0$, we have (up to constant factors)
\begin{align*}
\sum_{i < 0} |X_i|
& ~ \leq ~ \sum_{i < 0} \frac{\sqrt{f}}{\varepsilon_i^{(d-1)/2}}
~ = ~ \sum_{i < 0} \frac{2^{i d/2}}{(2^i \varepsilon R_0)^{(d-1)/2}}
~ = ~ \sum_{i < 0} \frac{2^{i(d-(d-1))/2}}{(\varepsilon R_0)^{(d-1)/2}}
~ = ~ \sum_{i < 0} \frac{2^{i/2}}{(\varepsilon R_0)^{(d-1)/2}} \\
& ~ = ~ \sum_{i < 0} 2^{i/2} C(K_0,\varepsilon)
~ = ~ C(K_0,\varepsilon) \sum_{i > 0} \left( \frac{1}{2} \right)^{\kern-1pt\frac{i}{2}}
~ = ~ O(C(K_0, \varepsilon)).
\end{align*}
It remains only to prove the assertion on the total relative volume of $\mathscr{M}^{1/4c}(X_i)$. Let $x \in X_i$ and let $M_x = M^{1/4c}(x)$. By Lemma~\ref{lem:HM-fat-main} (with $x$, $K_1^*$, and $K_H$ playing the roles of $c$, $K_0$, and $K_H$, respectively), there is an associated convex body $M'_x$ such that
\begin{center}
(i) $\vol(M'_x) = \Omega(\vol(M_x))$, ~~
(ii) $M'_x \subseteq M_x \cap K_1^*$, ~~and~~
(iii) $\shadow_{K_1^*}(M'_x) \subseteq M_x$.
\end{center}
We will use $S_x$ as a shorthand for $\shadow_{K_1^*}(M'_x)$. Since $\vol(M_x) = O(\vol(M'_x)) = O(\vol(S_x))$, it suffices to show that the total relative volume of the shadows $\{S_x : x \in X_i\}$ is $O(\varepsilon_i 2^{id})$.
For $x \in X_i$, we define cone $\Psi_x$ to be the intersection of $K_H$ with the infinite cone consisting of rays emanating from the origin that contain a point of $S_x$ (see Figure~\ref{f:mnet-size-hausdorff}). Since the Macbeath regions of $\mathscr{M}^{1/4c}(X_i)$ are disjoint, it follows from (iii) that the associated shadows intersect $\partial K_1^*$ in patches that are also disjoint. Thus the set of cones $\Psi = \{\Psi_x : x \in X_i\}$ are disjoint.
\begin{figure}
\caption{\label{f:mnet-size-hausdorff}
\label{f:mnet-size-hausdorff}
\end{figure}
Consider a ray emanating from the origin that is contained in any cone $\Psi_x$. Let $q$ and $q'$ be the points of intersection of this ray with $\partial K_1^*$ and $\partial K_H$, respectively. Let $q''$ be any point on this ray that lies inside shadow $S_x$. Since $q'' \in M_x$, by Lemma~\ref{lem:core-ray}, we have $\ray(q'') = \Theta(\ray(x)) = \Theta(\varepsilon_i)$. By the same reasoning, $\ray(q) = \Theta(\varepsilon_i) = \Theta(\varepsilon R_i)$. Also, recalling our earlier bounds on the relative ray distance of points on $\partial K_1^*$, we have $\ray(q) = \Theta(\varepsilon \|Oq'\|)$. Equating the two expressions for $\ray(q)$, we obtain $\|Oq'\| = \Theta(R_i)$.
Since the cones of $\Psi$ are disjoint and any ray emanating from the origin and contained in a cone of $\Psi$ has length $\Theta(R_i)$, it follows that the total volume of these cones is $O(R_i^d)$. Further, since only a fraction $\varepsilon_i$ of any such ray is contained in the associated shadow, it follows that the total volume of all the shadows $\{S_x : x \in X_i\}$ is $O(\varepsilon_i R_i^d)$. Recalling that $\vol(K_H) = R_0^d$ and $R_i = 2^i R_0$, it follows that the total relative volume of these shadows is $O(\varepsilon_i R_i^d / R_0^d) = O(\varepsilon_i 2^{id})$. This establishes the assertion on the total relative volume of $\mathscr{M}^{1/4c}(X_i)$ and completes the proof.
\end{proof}
\arxivonly{
\section{Nonuniform Volume-Sensitive Bounds} \label{s:nonuniform}
A nonuniform bound very similar to ours can be derived from a result due to Gruber \cite{Gru93a}, who showed that if $K$ is a strictly convex body and $\partial K$ is twice differentiable ($C^2$ continuous), then as $\varepsilon$ approaches zero, the number of bounding halfspaces needed to achieve an $\varepsilon$-approximation of $K$ is
\begin{equation} \label{eq:gruber}
O\left( \left( \frac{1}{\varepsilon} \right)^{\kern-2pt(d-1)/2} \int_{\partial K} \kappa(x)^{1/2} d\sigma(x) \right),
\end{equation}
where $\kappa$ and $\sigma$ denote the Gaussian curvature of $K$ and ordinary surface area measure, respectively. (B{\" o}r{\" o}czky showed that the requirement that $K$ be ``strictly'' convex can be eliminated \cite{Bor00}.)
Assume that the origin coincides with the centroid of $K$. Let $S^{d-1}$ denote the unit Euclidean sphere in $\mathbb{R}^d$. For $u \in S^{d-1}$, let $h(u) = \max \, \{ \ang{x,u} : x \in K\}$ denote the support function of $K$ and let $\rho(u) = \max \, \{\lambda > 0: \lambda u \in K^*\}$ denote the radial function of $K^*$.
Letting $n$ denote the exterior normal unit vector of $K$ and applying the Cauchy-Schwarz inequality, we obtain
\[
\left(\int_{\partial K} \kappa(x)^{1/2} \, d\sigma(x) \right)^2
~ \leq ~ \left( \int_{\partial K} \frac{\kappa(x)}{h(n(x))} \, d\sigma(x) \right) \cdot
\left(\int_{\partial K} h(n(x)) \, d\sigma(x)\right).
\]
The second integral on the right hand side is easily seen to be $d \cdot \vol(K)$. To bound the first integral on the right hand side, we express it as an integral over the unit sphere $S^{d-1}$.
\[
\int_{\partial K} \frac{\kappa(x)}{h(n(x))} \, d\sigma(x)
~ = ~ \int_{S^{d-1}} \frac{1}{h(u)} \, d\sigma(u).
\]
Letting $\varsigma_{d-1} = \area(S^{d-1})$ and applying Jensen's inequality, we have
\[
\frac{1}{\varsigma_{d-1}} \int_{S^{d-1}} \frac{1}{h(u)} \, d\sigma(u)
~ \leq ~ \left(\frac{1}{\varsigma_{d-1}} \int_{S^{d-1}} \frac{1}{h(u)^d} \, d\sigma(u)\right)^{1/d}
~ = ~ \left(\frac{1}{\varsigma_{d-1}} \int_{S^{d-1}} \rho(u)^d \, d\sigma(u)\right)^{1/d},
\]
where we have used the polar relationship $\rho(u) = 1/h(u)$ in the last step. It is easy to see that this integral is $d \cdot \vol(K^*)$. Neglecting constant factors depending on $d$, we have thus shown that the first integral on the right hand side of Eq.~\eqref{eq:gruber} is $O(\vol(K^*)^{1/d})$. Thus
\[
\left(\int_{\partial K} \kappa(x)^{1/2} \, d\sigma(x) \right)^2
~ = ~ O(\vol(K^*)^{1/d} \cdot \vol(K))
~ = ~ O(\vol(K)^{1-1/d}),
\]
where we have used the Blaschke–Santaló inequality, which implies that the product $\vol(K) \cdot \vol(K^*) = O(1)$, when the origin coincides with $K$'s centroid (see Lemma~\ref{lem:centroid}). Substituting in Eq.~\eqref{eq:gruber} and recalling that the volume diameter of $K$, $\Delta_d(K) = \Theta((\vol(K))^{1/d})$, we obtain the following theorem.
{\RLnonunifbound*}
Note that the bound in this theorem matches the uniform bound of Theorem~\ref{thm:main}. However, this approach does not produce a uniform bound, since Eq.~\eqref{eq:gruber} only holds in the limit as $\varepsilon$ approaches zero. (See~\cite{AFM12b} for a counterexample showing that this equation could be violated otherwise.)
}
\end{document} |
\begin{document}
\title{Discussion of ``Geodesic Monte Carlo on Embedded Manifolds''}
\section*{Comment: Connections and Extensions}
\subsection*{Persi Diaconis, Christof Seiler and Susan
Holmes\footnote{Statistics Department, Stanford University, CA 94305}}
\subsubsection*{Historical Context}
We welcome this paper of Byrne and Girolami [BG]; it breathes even more life
into the emerging area of hybrid Monte Carlo Markov chains by introducing
original tools for dealing with Monte Carlo simulations on constrained spaces
such as manifolds. We begin our comment with a bit of history. Using
geodesics to sample from the uniform distribution on Stiefel manifold was
proposed by \citet{Asimov} in his work on the Grand Tour for exploratory data
analysis. For data $x_1,x_2,\ldots,x_n$ in ${\mathbb R}^p$, it is natural to
inspect low dimensional projections $\gamma x_1, \gamma x_2,\ldots, \gamma
x_n$ for $\gamma :{\mathbb R}^p\longrightarrow {\mathbb R}^k $. In the [BG]
paper the authors have a space of k-frames in ${\mathbb R}^{p}$, called
$V_{k,p}$. If one chooses $\gamma$ at random from this space, the views would
be too `disconnected' or `jerky' for human observers. A better tactic turned
out to be to choose a few $\gamma_i, 1\leq i\leq L$, at random and then moving
smoothly from $\gamma_i$ to $\gamma_j$ by available closed form
geodesics. While in a historical mode, we point to the little known papers of
\citet{McLachlan2003} and more recent papers by Betancourt on hybrid
Monte Carlo \citep{betancourt2013}.
\subsubsection*{Discrete Hamiltonian Dynamics}
The paper of [BG] uses Hamiltonian dynamics to move around on a manifold in an intelligent way to get proposals for the Metropolis algorithm. There are also many problems where samples are needed for constrained discrete spaces. These
include sampling contingency tables with given row and column sums as in
\citet{DiaconisSturmfels}. We recently encountered the following problem in
a quantum physics context \citep{ChatterjeeDiaconis}. Consider boxes labeled ($1,2,3,\ldots)$. Drop $N$ balls into these boxes according to Bose-Einstein
allocation resulting in $N_i$ balls in box labeled $i$. Interest is on samples conditional on $\sum_{i} N_{i} i^{2} = E$. This is a discrete
version of the author's sampling from simplices and spheres. We do not currently have discrete versions of Hamiltonian dynamics apart from numerical schemes (leapfrog) that are used to solve the resulting differential equations as proposed by \citet{neal2011}. In contrast, [BG] compute the dynamics by splitting up the Hamiltonian into two analytically solvable parts.
We wonder whether the author's can suggest adaptions of their ideas to the discrete framework.
\subsubsection*{Non-Smooth Manifolds}
[BG] start with the Hausdorff measure from \textit{geometric measure theory} \citep{federer1969,morgan2009,diaconis2012} as a general way to define surface areas for non-smooth manifolds in arbitrary dimensions. We wonder if this is a bit misleading, since all subsequent developments and examples in the paper focus on homogeneous smooth manifolds.
One example for which the methodology presented runs into difficulties is the barbell \citep{Grayson1989}, parametrized as:
\begin{equation*}
B\begin{pmatrix}x \\ \theta \end{pmatrix} = \begin{pmatrix} x \\ f(x) \cos(\theta) \\ f(x) \sin(\theta) \end{pmatrix}, \hspace{0.2cm} 0 \le \theta < 2\pi,
\end{equation*}
with changing radius:
\begin{equation*}
f(x) =
\begin{cases}
r \cosh\left(\frac{|x|-l}{r}\right) & \text{if } |x| > l \\
r & \text{otherwise.}
\end{cases}
\end{equation*}
The difficulties arise at the corner of the transition from the bar to the bell section in the first coordinate of $B$ at position $|x| = l$. The derivative at these point is not defined.
In contrast, the geometric measure theory approach handles such difficulties by realizing that sets of area 0 do not influence the integral over a manifold. The intuition is that the line dividing the bar and the bell is a line which is negligible for computing two-dimensional integrals.
Following this approach as described in \citet{diaconis2012}, we sample $x$ from the unnormalized surface measure:
\begin{equation*}
\sqrt{\operatorname{det} \left[ \operatorname{\bold{D}}B\begin{pmatrix}x \\ \theta \end{pmatrix} \right]^{T} \left[ \operatorname{\bold{D}}B\begin{pmatrix}x \\ \theta \end{pmatrix} \right] } =
\begin{cases}
r \cosh^{2}\left(\frac{|x|-l}{r}\right) & \text{if } |x| > l \\
r & \text{otherwise.}
\end{cases}
\end{equation*}
The R code snippet (Code \ref{Code:RejectionSampling}) generates samples using rejection sampling for parameter $x$.
\begin{code}[b]
\begin{verbatim}
n = 5e3; r = 1; l = 2; L = 4
xprop = runif(n, min = -L, max = L)
eta = runif(n, min = 0, max = (r * cosh((abs(L) - l)/r)^2))
x = c()
for (i in 1:length(xprop)) {
if (abs(xprop[i]) > l) {
if (eta[i] < (r * cosh((abs(xprop[i]) - l)/r)^2)) {
x = c(x, xprop[i])
}
} else {
if (eta[i] < r) {
x = c(x, xprop[i]) }}}
\end{verbatim}
\caption{Rejection sampling yielding $x$.}
\label{Code:RejectionSampling}
\end{code}
From these samples, and $\theta$ drawn uniformly between $0$ and $2\pi$, we can plot the barbell with points uniformly distributed with respect to its surface area (Figure \ref{fig:BarbellUniformSufraceArea}). If we sampled points uniformly from the parameters $x$, we would obtain higher point density on the bar than on the bell section due to higher curvatures.
\begin{figure}
\caption{The barbell is an example of a non-smooth manifold.}
\label{fig:BarbellUniformSufraceArea}
\end{figure}
We are curious to know why [BG] decided to include the geometric measure theory part in the introduction and not to just simply focused on Riemannian manifolds and the Riemmanian volume form.
\subsubsection*{Consistency of Bayes Estimates on Manifolds.}
Two different philosophical view points are crucial to study the consistency of Bayes estimates, namely ``classical'' and ``subjectivistic''.
The classical view point studies the consistency of Bayes estimates assuming the existence of a fixed underlying parameter. In this context, we consider the posterior Bayes estimate to be consistent w.r.t. a prior if it converges to the underlying parameter as the number of imaginary observations tends to infinity.
On the other hand, the subjectivistic view point is nihilistic of a fixed underlying parameter. In this context, we rather evaluate if two different priors created by two different imaginary statisticians converge to the same posterior estimate as the number of imaginary observations tends to infinity.
We can analyze the derivative of the map that sends the prior to the posterior measure. This helps to evaluate how the posterior reacts to small changes in the prior. In this fashion, we can study an infinite amount of imaginary statisticians and how their beliefs affect the outcome of Bayesian analysis.
We introduced these concept in \citet{DiaconisFreedman1986} for Euclidean spaces, and we are interested in how these results translate to the case of smooth and non-smooth manifolds. Some initial work towards addressing these questions can be found in \citet{Bhattacharya2012}.
\subsubsection*{Manifold and Metric Learning}
In the absence of a manifold parametrization we might want to estimate it from
data. Recent advances by \citet{PerraulJoncas2013} on unifying manifold
learning methods into a consistent framework by learning the Riemannian metric
in addition to the manifold and its embedding are promising but build upon the
assumption of uniform sampling density on the manifold
\citep{VonLuxburg2008,Belkin2007}. But what if the sampling of the data is not
related to the geometry of the manifold? In this case, we want to find the
manifold that is consistent for a family of distributions for a given set of
data points. From a Bayesian perspective, we could study non-uniform density
distributions on the manifold through the derivative of the map from prior to
posterior measure analog to consistency evaluations of Bayes estimates.
\subsubsection*{Applications in Computational Anatomy}
Among many potential fields of application, we would like to highlight \textit{computational anatomy} \citep{Miller2004,Younes2010,Marsland2012}.
The main goal of computational anatomy is to compare shapes of organs (e.g. brain, heart and spine) observed from computed tomography (CT) and magnetic resonance imaging (MRI). Statistical analysis of shape differences can be useful to understand disease related changes of anatomical structures. The key idea is to estimate transformations between a template and patient anatomies. These transformation encode the structural differences in a population of patients. There is a wide range of groups of transformations that have been studied, ranging from rigid rotations to infinite dimensional groups of diffeomorphisms. What elements across groups have in common is that they do not live in Euclidean space but on more general manifolds. Currently, most transformation estimators are based on optimization of a cost function. In the future, we envision Bayesian approaches along the line of \cite{SeilerGSI2013} with the help of methodologies proposed in this paper.
\subsubsection*{Future Directions}
The paper suggests new research questions: how long should the new algorithms be run to ensure that the resulting distributions are usefully close to their stationary distribution? We haven't seen any careful analysis of Hybrid Monte Carlo in continuous
problems (we mean quantitative, non-asymptotic bounds as in \citet{HorbertJones}).
A first effort was made in a toy problem in \citet{DiaconisHolmesNeal}.
The authors work with `nice manifolds', often manifolds are only given implicitly,
with local coordinate patches. Our work \citep{diaconis2012} did not deal with this problem, we would love to have help from the authors to make progress in these types of applications.
\section*{Comment}
\subsection*{Ian L.~Dryden\footnote{School of Mathematical Sciences, University of Nottingham}}
The authors have introduced an interesting and mathematically intricate method for
Markov chain Monte Carlo simulation on an embedded manifold. The geodesic Monte Carlo (MC) method provides large
proposals as part of the scheme, which are devised by careful study of the
Riemannian geometry of the space and the geodesics in
particular. The aim of the resulting algorithm is to produce a chain with low
autocorrelation and high acceptance probabilities. As displayed by the authors,
the method is well geared up for simulating from unimodal distributions on a manifold
via the gradient of the log-density and the geodesic flow. They also demonstrate its effective
use in multimodal scenarios via parallel tempering. Given that there are always many choices of embedding,
should one choose as low dimensional embedding as possible?
There are various levels of approximation in the algorithm and so it is worth exploring in
any specific application if simpler algorithms can end up providing more efficient or more
accurate simulations. Consider the Fisher-Bingham example, and recall that the
Fisher-Bingham $(c,A)$ distribution can be defined as
$$\{ X | \|X\|=1 \} \; \; \; {\rm where} \; \; X \sim N_p( \mu , \Sigma ) , $$
with $\mu = -\frac{1}{2}(A+aI_p)^{-1}c, \Sigma = -\frac{1}{2}(A + aI_p)^{-1}$,
$a$ is chosen such that $(A+aI_p)$ is negative definite \citep[see][p.175]{mardia2000} and $I_p$ is the $p \times p$ identity matrix.
Since the Fisher-Bingham density is unchanged by adding $aI_p$ to $A$, we can, for example,
choose $a$ such that ${\rm trace}(\Sigma) = 1$.
The integrating constant of the Fisher-Bingham
can be expressed in terms of the density of a linear combination of noncentral
$\chi^2_1$ random variables \citep{Kumewood05}, which can be evaluated using a saddlepoint approximation.
Hence simulation via rejection methods is feasible.
An even simpler approach when $c$ is small could be to simulate from
$Y \sim N_p( \mu , \Sigma )$, and then keep only the observations that fall within
$| \|Y \|-1 | < \nu$, for small $\nu > 0$. This naive conditioning method
might appear rather inefficient, but the accepted
observations are independent draws. Note that if the dimension
$p$ is large and $X$ Bingham distributed with ${\rm trace}(\Sigma) = 1, {\rm trace}(\Sigma^2) \approx 1$,
$c=0$ then from \citet{Dryden05} we have the approximation
$X \approx N_p( 0 , \Sigma)$. Hence, even for large $p$ this can still be a practical
method for certain $\Sigma$.
In Figure \ref{FIG1} we show the results of this algorithm in the example from
Section 5.1 of the paper, with $c=0$ and with 2 billion proposals and $\nu = 2 \times 10^{-6}$. Here
$a=-23.06176$
and the acceptance rate is $0.00033\%$.
\begin{figure}
\caption{Simulated values of $x_5$ for the Fisher-Bingham example with $c=0$. There are 6588
simulated values from 2 billion proposals.}
\label{FIG1}
\end{figure}
There is always a trade-off with any simulation method, and one needs to compromise
between the level of approximation (through $\nu$ here), the efficiency in run time, the independence
of observations and the amount of coding involved in the implementation.
For this Bingham example the naive conditional method
seems reasonable here, giving independent, near exact realisations and very minimal effort in coding.
However, the beauty of the geodesic MC method of the paper is that the algorithm is quite general, and so
can be tried out in a range of scenarios where there may be no reasonable alternative.
\section*{Comment}
\subsection*{John T. Kent\footnote{Department of Statistics, University of Leeds, Leeds LS2 9JT, UK}}
Statistical distributions on manifolds have become an increasingly
important component of geometrically-motivated high-dimensional
sophisticated statistical models in recent years. For example,
\citet{green2006} used the matrix Fisher distribution for random
$3 \times 3$ rotation matrices as part of a high-dimensional Bayesian
model to align two unlabelled configurations of points in
$\mathbb{R}^3$, with an application to a problem of protein alignment
in bioinformatics. MCMC simulations often form the standard
methodology for fitting such high-dimensional models. Hence there is
a growing interest in developing efficient and general methods for
simulating distributions on manifolds in their own right. The paper
makes a very valuable contribution in this area.
However, although MCMC is a very general and very powerful
methodology, it is inherently potentially slow and cumbersome to use
in practice, due to the formal need to run a Markov chain to
convergence. Hence when quicker alternatives (such as acceptance
rejection algorithms) are available, it is important to be aware of
them.
Recent developments in acceptance rejection algorithms on spheres and
related manifolds have greatly increased the scope of acceptance
rejection methods for distributions such as Fisher, Bingham and
Fisher-Bingham. The underlying idea is to use the angular central
Gaussian distribution (which is easy to simulate) as an envelope for a
Bingham distribution. In turn the Bingham distribution can be used as
an envelope for the Fisher and Fisher-Bingham distributions. The
basic idea works in all dimensions. Further the efficiencies can
often be guaranteed to be very reasonable.
As an elegant application of this general methodology, consider the
matrix Fisher distribution on SO(3), the special orthogonal group of
$3 \times 3$ rotation matrices. This distribution is often used to
model unimodal behavior about a preferred rotation matrix. There is
an elegant mathematical identity between $SO(3)$ and $S_3$, the unit
sphere in 4 dimensions, and it also follows that the matrix Fisher
distribution on $SO(3)$ can be identified with the Bingham
distribution on $S_3$. Hence the new method for the Bingham
distribution can be used directly for the matrix Fisher in this
setting. It can be shown that the efficiency of this new acceptance
rejection simulation method is very respectable; it is bounded below
by 45\% for all values of the parameters. More details can be found
in \citet{kent2013}.
It must be conceded that this new acceptance rejection methodology is
not a panacea. In particular, for product manifolds there is often
currently no alternative to MCMC. But for the simpler cases, the
acceptance rejection methods can be very effective.
\section*{Comment}
\subsection*{Marcelo Pereyra\footnote{Department of Mathematics, University of Bristol}}
I congratulate the authors for an interesting paper and an important methodological contribution to the problem of sampling from probability distributions on manifolds. As an image processing researcher I shall restrict my comments to the potential of the proposed methodology for statistical signal and image processing. There are numerous new and exciting signal and image processing applications that require performing statistical inference on parameter spaces constrained to submanifolds of $\mathbb{R}^n$ and for which the proposed HMC algorithm is potentially interesting. For example, there are many \emph{unmixing} or \emph{source separation} problems that require estimating parameters that, because of physical considerations, are subject to positivity and sum-to-one constraints (i.e. constrained to a simplex) \citep{Golbabaee_2012a}. For instance, the estimation of abundances (or proportions) of different materials and substances within the pixels of a satellite hyperspectral image \citep{Bioucas_IEEE_JSTARS_2012}. These images are increasingly used in environmental sciences to monitor the evolution of vegetation in rainforests and in agriculture to forecast crop yield. Similar spectral imaging technologies are now used in material science and chemical analysis \citep{Dobigeon_Ultramicroscopy_2012}. Moreover, another important example of signal processing on manifolds is dictionary learning for sparse signal representation and compressed sensing, which involves estimating a set of orthonormal vectors constrained to a Stiefel manifold \citep{Dobigeon_IEEE_Trans_SP_2010}. Similar models arise in compressed sensing of low-rank matrices, which find applications in sensor networks and sparse principal component analysis \citep{Golbabaee_2012b}. The methodology presented in this paper is potentially very interesting for these and many other modern applications. However, in order for the proposed HMC algorithm to be widely adopted in signal processing it is fundamental to introduce efficient adaptation mechanisms to tune the HMC parameters automatically. I wonder whether the authors have considered an adaptive version of their algorithm, possibly by using an approach similar to the one recently presented in \citet{wang2013} for other HMC algorithms. The publication of an open-source MATLAB toolbox would also contribute greatly to its dissemination in the statistical signal and image processing communities.
Modern signal processing and machine learning applications have motivated the development of powerful new methods to perform statistical inference on high-dimensional manifolds. Most effort has been devoted to the development of new optimization methods that give access to maximum a posteriori estimates \citep{Combettes2011,Alfonso2012}. Sampling methods in general and the proposed HMC algorithm in particular can allow performing a significantly richer Bayesian analysis (i.e. they allow approximating expectations such as posterior moments, posterior probabilities or quantiles, and Bayesian factors useful for hypothesis testing and model choice). Therefore the methodology presented in this paper has the potential to not only impact the specific applications mentioned above, but to sustain and promote the adoption of Bayesian methods in general in signal and image processing.
Finally, it would be interesting to explore connections between the proposed HMC algorithm and state-of-the-art optimisation methods for parameters constrained to manifolds \citep{Combettes2011,Alfonso2012}. A first step in this direction could be the paper I authored \citep{Pereyra2013} which highlights the great potential for synergy between MCMC and modern convex optimisation.
\section*{Comment}
\subsection*{Babak Shahbaba\footnote{Department of Statistics and Department
of Computer Science, University of California, Irvine, USA.}, Shiwei
Lan\footnote{Department of Statistics, University of California, Irvine,
USA.} and Jeffrey Streets\footnote{Department of Mathematics, University of California, Irvine, USA.} }
We would like to start by congratulating Byrne and Girolami for writing such a thoughtful and extremely interesting paper. This is in fact a worthy addition to other high impact papers recently published by Professor Griolami's lab in this field. The common theme of these papers is to use geometrically motivated methods to improve efficiency of sampling algorithms. In their seminal paper, \citet{girolami2011} propose a novel HMC method, called Riemannian Manifold Hamiltonian Monte Carlo (RMHMC), that adapts to the local geometry of the parameter space. While this is a natural and beautiful idea, there are significant computational difficulties which arise in effectively implementing this algorithm. In contrast, in this current contribution, Byrne and Girolami focus on special probability distributions which give rise to particularly nice Riemannian geometries. In particular, the examples under consideration described in section 4 allow for closed-form
solutions to the geodesic equation, which can be used to reduce computational cost of geometrically motivated Monte Carlo methods.
While the proposed splitting algorithm is quiet interesting, we initially doubted its impact since Riemannian metrics with closed-form geodesics are extremely rare. However, we are now convinced that this approach will likely see application beyond what is outlined herein. For example, we believe that this approach can be used to improve computational efficiency of sampling algorithms when the parameter space is constrained. The standard HMC algorithm needs to evaluate each proposal to ensure it is within the boundaries imposed by the constraints. Alternatively, as discussed by \citet{neal2011}, one could modify standard HMC so the sampler bounces back after hitting the boundaries. In Appendix A, Byrne and Girolami discuss this approach for geodesic updates on the simplex.
In many cases, a constrained parameter space can be bijectively mapped to a unit ball, ${\bf B}_0^D(1):=\{\theta\in\mathbb R^D: \Vert \theta\Vert_2 =\sqrt{\sum_{i=1}^D \theta_i^2}\leq 1\}$. Augmenting the parameter space with an extra auxiliary variable
$\theta_{D+1} = \sqrt{1-\Vert \theta\Vert_2^2}$, we could form an extended parameter space, $\tilde \theta = (\theta, \theta_{D+1})$ so that the domain of the target distribution
changes from unit ball ${\bf B}_0^D(1)$ to \emph{$D$-Sphere} ${\bf S}^D =\{\tilde \theta\in
\mathbb R^{D+1}: \Vert \tilde\theta\Vert_2=1\}$,
\begin{equation}\label{b2s}
T_{{\bf B}\to {\bf S}}: {\bf B}_0^D(1)\longrightarrow {\bf S}^D, \quad \theta \mapsto \tilde\theta = (\theta, \pm\sqrt{1-\Vert \theta\Vert_2^2})
\end{equation}
Sampling from the distribution of $\tilde\theta$ on ${\bf S}^D$ can be done efficiently using the Geodesic Monte Carlo approach, which allows the sampler to move freely on ${\bf S}^D$, while its projection onto the original space always remains within the boundary. This way, passing across the equator from one hemisphere to the other will be equivalent to reflecting off the boundaries as shown in Figure \ref{fig:B2S}.
\begin{figure}
\caption{Transforming unit ball ${\bf B}
\label{fig:B2S}
\end{figure}
Our last comment is related to the embedding procedure discussed in Section 3.2. We wonder if such embedding and the resulting extra step for projection could be avoided by writing the dynamics in terms of $(q, v)$ in the first place and splitting it as follows:
\begin{eqnarray}
\left\{\begin{array}{lcl}
\dot q & = & 0\\
\dot v & = & G^{-1} \nabla \log \pi_{\mathcal H}(q)
\end{array}\right.\label{LD:U}
\qquad
\left\{\begin{array}{lcl}
\dot q & = & v\\
\dot v & = & -v^T \Gamma v
\end{array}\right.\label{LD:K}
\end{eqnarray}
where $\Gamma$ is the Christoffel symbol of second kind. The second dynamics in \eqref{LD:K} is regarded as the general geodesic equation:
\begin{equation}
\ddot q + \dot q^T \Gamma \dot q = 0
\end{equation}
The first dynamics in \eqref{LD:U} is solved in terms of $(q,v)$ in a more natural way:
\begin{equation}
q(t) = q(0) \quad\textrm{and}\quad v(t) = v(0) + tG(q)^{-1}\nabla_{q} \log \pi_{\mathcal H}(q) \big|_{q=q(0)}
\end{equation}
This way, we avoid the additional projection step and have $v(t)\in T_{q(t)}\mathcal M$ as long as $v(0)\in T_{q(0)}\mathcal M$. This also serves to isolate what seems to be the key point in this work, which is not that the dynamics are taking place on an embedded manifold, but that they are taking place on a manifold \emph{whose geodesics are known explicitly}. With this viewpoint the applicability of the ideas of this paper should be further expanded.
\section*{Comment}
\subsection*{Daniel Simpson\footnote{Department of Mathematical Sciences,
Norwegian University of Science and Technology, N-7491 Trondheim, Norway. Email: \texttt{[email protected]}}}
The basic idea of simulation-based inference is that we can approximately
calculate anything we like about a probability distribution if we can draw
independent samples from it. This means that we can use sampling to explore
the posterior distribution and it turns out that the quantities we compute
will usually have an error of $\mathcal{O}(N^{-1/2})$ if they are calculated
from $N$ samples. Unfortunately, in almost any realistic situation, we cannot
directly simulate from the posterior, however the remarkable (and their
ubiquity really shouldn't detract from just how remarkable MCMC methods are)
Markov Chain Monte Carlo idea says that it's enough to take a chain of
dependent simulations that are heading towards the posterior distribution and
use these simulations to calculate any quantities of interest. The variance in
the estimators still decay like $\mathcal{O}(N^{-1/2})$ and they pretty much
always work eventually. (There is, of course, an entire world of details
being suppressed within the world `eventually'.)
The problem with vanilla (Metropolis Hastings) MCMC methods is that they are
slow. It's fairly easy to see why this is true: whereas perfect Monte Carlo
methods `know' enough about the posterior to produce perfect samples,
Metropolis Hastings algorithms only require the ability to calculate ratios of
the posterior density. For simple models, this may not be a problem, but as
the posterior distribution becomes more complicated, it's fairly
straightforward to imagine the the efficiency of schemes based on simple
proposals will plummet. Byrne and Girolami consider the even more complicated
situation where the natural parameters of the model have a non-linear
structure. These type of models arise frequently in ecology. A simple example
occurs when modelling community structure in ecology, in which case the
association between the occurrence of different species is modelled as a
symmetric positive definite matrix \citep{ovaskainen2011making}. A more
complicated example occurs in paleoclimate reconstruction, where one is often
required to model `compositional data', that is proportions (rather than
counts) of different types of pollen in a sample \citep{salter2006modelling}.
A simple model for proportions is the Dirichlet distribution, however this is
frequently unsuitable due to real compositional data having a large number of
zero proportions. More complicated distributions for proportions can be
written as distributions on a simplexes, which are considered by
Byrne and Girolami. In these situations, it is often not even obvious how to
construct {bad} proposals, let alone efficient ones!
Typically, however, we know a lot about the model that we are trying to infer.
In this case, it makes sense to include all of the information that we have in
order to make the MCMC algorithm explore the posterior in a more efficient
manner. In particular, people working within well understood statistical
frameworks, such as modelling with latent Gaussian models, have been able to
use analytical results to design MCMC schemes \citep{art192,art412} or other
approximate inference methods \citep{art451}. For more general statistical
models, \citet{girolami2011} constructed a general framework for
constructing efficient MCMC schemes based on the classical links between
statistical modelling and differential geometry.
The innovation of \citet{girolami2011} is to provide an essentially
automatic way to improve MCMC performance by using standard concepts from
statistical asymptotics. The idea is that, even if we don't know everything we
would like to know about the posterior distribution, we can approximate what
it's like ``on average''. Specifically, this means that we can, for each
point in the parameter space, find a Gaussian distribution that locally looks
like an average posterior (where the average is taken over the data). We can
then construct a proposal distribution based on this approximation and it is
reasonable to expect it to perform better than a naive choice.
\citeauthor{girolami2011} proposed two basic types of algorithm: The
first was a version of Metropolis-adjusted Langevin algorithm (MALA) uses this
approximation to propose a new value that's nearby the current point, while
the second algorithm is version of Hamiltonian Monte Carlo (HMC) chains
together a number of these local approximations to try to make a proposal in a
distant part of the parameter space. As such, one expects HMC to be more
statistically efficient (that is, the samples are less dependent), while the
MALA proposals are more computationally efficient (that is, they take less
time to compute).
The method described by \citet{girolami2011} is more general than the one described above. Their framework, which is described in the language of differential geometry, allows for almost any type of local second-order structure. For common problems, where the parameter space is $\mathbb{R}^d$, the only requirement is that each point in the parameter space is associated in a smooth way with a symmetric positive definite matrix. In this case, it makes sense for these matrices to be built from local approximations to the posterior distribution and the whole scheme can be easily described without ever appealing to the slightly intimidating notion of a manifold.
The case considered by Byrne and Girolami is different. Here the parameter space isn't flat and the notion of a manifold becomes essential to defining good inference schemes. The methods considered by Byrne and Girolami are different from the geometrically simpler models considered by \citet{girolami2011}. Rather than introducing a geometric structure in order to better explore a distribution on $\mathbb{R}^n$, Byrne and Girolami use the \emph{natural} geometry of the parameter space to construct a proposal. It is unsurprising that this strategy results in efficient MCMC schemes: it is almost universally true that numerical methods that are consistent with the underlying structure of the problem are more efficient than those that aren't!
That is not to say that the extra efficiency from using the problems natural manifold structure comes for free. Hamiltonian Monte Carlo methods are based on the approximate integration of Hamilton's equations, which are symplectic ordinary differential equations in position and momentum space. Integrating symplectic ODEs is an active field of research and actually implementing these integrators can be quite challenging. In particular, the HMC method proposed by \citet{girolami2011} requires, at each step, the solution of a non-linear system of equations, which can cause the manifold HMC proposal to catastrophically fail if it is programmed incorrectly. Fortunately, Byrne and Girolami show that when the parameter space is an embedded manifold, it is possible to use a much simpler integrator. In order for their splitting technique to be applicable, it is necessary to have an explicit expression for geodesic flow on the parameter manifold and, in the cases considered in the paper, this exists. Given an explicit form of the geodesic, one only has two choices left: the step size $\epsilon$ and the number of steps $N$ in each proposal. The performance of HMC methods are known to be very sensitive to these parameters, however recent advances in (non-manifold) HMC suggests that it is possible to adaptively select these in an efficient manner \citep{hoffman2013}.
As Byrne and Girolami have focused on building HMC methods on embedded manifolds, it is instructive to examine the barriers to similarly generalising the manifold MALA schemes. Recall that MALA-type methods on $\mathbb{R}^n$ are biased random walks that propose a new value $\theta^*$ by as $$\theta^* - \theta^{(k)} \sim N(\mu( \theta^{(k)}), H( \theta^{(k)})^{-1}),$$ where the specific forms of $\mu(\cdot)$ and $H(\cdot)$ are irrelevant to this discussion. The problem with generalising this type of proposal to a manifold is obvious: the subtraction operation does not make sense. One way around this problem is to take a lesson from the optimisation literature and note that we can make sense of this proposal using tangent spaces and exponential mappings (or, more generally, retractions)\citep{absil2009optimization}. In this case, we propose $$
\theta^* = R_{\theta^{(k)}}(p^{(k)}),
$$ where $R_{\theta^{(k)}}(\cdot) : T_{\theta^{(k)}}\mathcal{M} \rightarrow \mathcal{M}$ is a retraction map and $p^{(k)} \sim N(\mu( \theta^{(k)}), H( \theta^{(k)})^{-1})$ is a random vector in the tangent space $T_{\theta^{(k)}}\mathcal{M}$ \citep{absil2009optimization}. The problem with this proposal mechanism is that it is not obvious how to compute the proposal density, which is required when computing the acceptance probability. Hence, there is no clear way to design a MALA-type scheme that respects the non-linear structure of the parameter space.
\section*{Rejoinder}
\subsection*{Simon Byrne and Mark Girolami\footnote{Department of Statistical
Science, University College London}}
We would like to thank all respondents for their interesting comments, which
clearly identify exciting areas for further investigation.
Both Kent and Dryden highlight recent developments in rejection sampling
methods for obtaining independent samples form distributions on
manifolds. Such methods are obviously preferable when available, however as
mentioned in section 5.1, the danger being is that rejection-based techniques
can have exponentially low acceptance rates, particularly in
higher-dimensional problems. Indeed the impressive results of Kent, Ganeiber,
and Mardia in avoiding this problem by obtaining constant lower-bounds of the
acceptance rates highlights the importance of considering the underlying
geometry of the manifold.
Pereyra and Simpson point out the many links with optimisation: indeed
optimisation over manifolds has a rich history, and there is a wealth of
literature with many interesting algorithms. However, as Simpson points out,
many of these algorithms are based on projection operators, and thus we face
what could be described as the "Curse of Detailed Balance": the difficulty of
computing of the reverse proposal, which is required for the evaluation of the
acceptance ratio to ensure we are targeting the correct invariant
density. Hamiltonian-based methods are able to exploit symplectic geometric
structure---namely reversibility and volume preservation---in a manner that
makes this almost trivial,
We are very excited to see that Shahbaba, Lan and Streets have had success
with this methods. We agree entirely with their point that it is the explicit
geodesics, and not the embedding, which makes this method successful: our
reason for using the embeddings is that in all cases we identified, the
embeddings proved convenient to work with. Our reason for using the projection
is that this is typically of lower computational cost than inversion of $G$.
As several commenters point out, despite its long history, remarkably little
is known about the theoretical properties of the HMC algorithm, especially
when compared to say Gibbs sampling and Metropolis--Hastings algorithms based
on random-walks and Langevin diffusions. In particular one open question is
the optimal tuning of the step-size and integration length
parameters. Unfortunately HMC is not readily amenable to the usual
probabilistic tools, such as links to diffusions, due to the precise property
that makes it so powerful: the ability to simulate long trajectories and make
distant proposals. This is an open question, attracting interest from numerous
researchers.
The paper by \citet{wang2013} propose an empirical Bayesian optimisation
approach, but this comes with significant overhead in obtaining sufficient
samples on which to base the objective function, and provides little insight
into theoretical behaviour. We think that future advances will perhaps require
a larger set of tools, such as exploiting the rich geometric structure and
elegant numerical properties of Hamiltonian
methods \citet[\eg][]{hairer2006}. This is already an area of active
research, for instance the recent work of \citet{beskos2013} utilises the
tools of backward error analysis to obtain an asymptotic-in-dimension bound of
the optimal acceptance rate. Other recent advances are the "no U-turn"
approach of \citet{hoffman2013}, which seeks to truncate the integration
path based on a geometric criterion, and the general-purpose SoftAbs metric
of \citet{betancourt2013} for RMHMC. Nevertheless, there are many
interesting open questions in this area, which we intend to pursue further.
As Pereyra points out, the development of software toolboxes will greatly
lower the barrier to implementation, enhancing the utility of these
methods. Indeed, the venerable BUGS software and its descendants have
revolutionised applied Bayesian statistics over the past twenty years. The
rapidly-developing STAN library \citep{stan}, aims to do the same using
HMC, incorporating tools such as automatic differentiation to simplify the interace,
and its impressive early results seem set to make it the heir-apparent to
BUGS. As we mention in the section on product manifolds, our methods dovetail
elegantly within a larger HMC scheme, and so would be a natural fit for such
software.
Although we derived Geodesic Monte Carlo in terms of smooth manifolds, it can
be easily extended to manifolds made of smooth patches, such as the barbell
example proposed by Diaconis, Seiler and Holmes, by appropriately modifying
the direction of the particle whenever it passes the boundary, in a similar
manner to the reflections used to constrain the particle to the simplex. Of
course this requires an explicit form of the geodesic of each patch, as well
as computing the point at which the particle crossed the boundary. A more
desirable approach would be to transform the space to a smooth manifold,
ideally preserving the topology, for instance the barbell could be transformed
into a cylinder.
One great challenge is extending HMC beyond Euclidean spaces. As mentioned by
Diaconis et. al., there is not an obvious analogue of HMC for discrete
spaces. In certain circumstances, it can be possible to augment the space with
additional continuous variables, which can allow the discrete variables to be
easily marginalised out, for example \citet{zhang2012} use a
Hubbard--Stratonovich transformation to apply HMC to the Ising model. Diaconis
et. al. also mention infinite-dimensional spaces such as diffeomorphism
groups: \citet{beskos2011} has demonstrated that HMC can be defined and
implemented for Hilbert spaces, and it would be exciting, both from a
theoretical and a numerical perspective, to extend it to yet more general
spaces.
These many open research questions will no doubt be developed in the coming
years, both theoretical analysis, methodological development and applications
to significant new and exciting areas.
\printbibliography
\end{document} |
\begin{document}
\title{f A Linear-Quadratic Stackelberg Differential Game with Mixed Deterministic and Stochastic Controls
hanks{Shi acknowledges the financial support by the National Key R\&D Program of China under Grant No. 2018YFB1305400, and by the NSFC under Grant Nos. 11971266, 11571205, 11831010. Wang acknowledges the financial support by the NSFC for Distinguished Young Scholars under Grant No. 61925306.}
\noindent{\bf Abstract:}\quad This paper is concerned with a linear-quadratic (LQ) Stackelberg differential game with mixed deterministic and stochastic controls. Here in the game, the follower is a random controller which means that the follower can choose adapted random processes, while the leader is a deterministic controller which means that the leader can choose only deterministic time functions. An open-loop Stackelberg equilibrium solution is considered. First, an optimal control process of the follower is obtained by maximum principle of controlled stochastic differential equation (SDE), which is a linear functional of optimal state variable and control variable of the leader, via a classical Riccati equation. Then an optimal control function of the leader is got via a direct calculation of derivative of cost functional, by the solution to a system of mean-field forward-backward stochastic differential equations (MF-FBSDEs). And it is represented as a functional of expectation of optimal state variable, together with solutions to a two-point boundary value problem of ordinary differential equation (ODE), by a system consisting of two coupled Riccati equations. The solvability of this new system of Riccati equation is discussed.
\noindent{\bf Keywords:}\quad Stackelberg differential game, mixed deterministic and stochastic controls, linear-quadratic control, feedback representation of optimal control, mean-field forward-backward stochastic differential equation
\noindent{\bf Mathematics Subject Classification:}\quad 93E20, 49K45, 49N10, 49N70, 60H10
\section{Introduction}
In this paper, we use $\mathbb{R}^n$ to denote the Euclidean space of $n$-dimensional vectors, $\mathbb{R}^{n\times d}$ to denote the space of $n\times d$ matrices, $\mathbb{S}^n$ to denote the space of $n\times n$ symmetric matrices. For a matrix-valued function $R:[0,T]\rightarrow\mathbb{S}^n$, we denote by $R\geqslant0$ that $R_t$ is uniformly positive semi-definite for any $t\in[0,T]$. For a matrix-valued function $R:[0,T]\rightarrow\mathbb{S}^n$, we denote by $R\gg0$ that $R_t$ is uniformly positive definite, i.e., there is a positive real number $\alpha$ such that $R_t\geq\alpha I$ for any $t\in[0,T]$. $\langle\cdot,\cdot\rangle$ and $|\cdot|$ are used to denote the scalar product and norm in some Euclidean space, respectively. $A^\top$ appearing in the superscript of a matrix, denotes its transpose. $\mbox{trace}[A]$ denotes the trace of a square matrix $A$. $f_x,f_{xx}$ denote the first- and second-order partial derivatives with respect to $x$ for a differentiable function $f$, respectively.
Let $(\Omega,\mathcal{F},\mathbb{P})$ be a complete probability space, on which an $\mathbb{R}^d$-valued standard Brownian motion $\{W_t\}_{t\geq0}=\{W^1_t,W^2_t,\cdots,W^d_t\}_{t\geq0}$ is defined. $\{\mathcal{F}_t\}_{t\geq0}$ is the natural filtration generated by $W(\cdot)$ which is augmented by all $\mathbb{P}$-null sets, and $T>0$ is a fixed finite time duration. $\mathbb{E}$ denotes the expectation with respect to the probability measure $\mathbb{P}$.
We will use the following notations. $L_{\mathcal{F}_T}^2(\Omega;\mathbb{R}^n)$ denotes the set of $\mathbb{R}^n$-valued, $\mathcal{F}_T$-measurable random vectors $\xi$ with $\mathbb{E}\big[|\xi|^2\big]<\infty$, $L^2_\mathcal{F}(0,T;\mathbb{R}^n)$ denotes the set of $\mathbb{R}^n$-valued, $\mathcal{F}_t$-adapted processes $f$ on $[0,T]$ with $\mathbb{E}\big[\int_0^T|f(t)|^2dt\big]<\infty$, $L^2_\mathcal{F}(0,T;\mathbb{R}^{n\times d})$ denotes the set of $n\times d$-matrix-valued, $\mathcal{F}_t$-adapted processes $\Phi$ on $[0,T]$ with $\mathbb{E}\big[\int_0^T|\Phi(t)|^2dt=\mathbb{E}\big[\int_0^T\mbox{trace}[\Phi(t)^\top\Phi(t)]dt\big]<\infty$, and $L^2(0,T;\mathbb{R}^n)$ denotes the set of $\mathbb{R}^n$-valued functions $f$ on $[0,T]$ with $\int_0^T|f(t)|^2dt<\infty$.
We consider the state process $x^{u,w}:\Omega\times[0,T]\rightarrow\mathbb{R}^n$ satisfies a linear SDE
\begin{equation}\label{state equation}
\left\{
\begin{aligned}
dx^{u,w}_t&=\big(A_tx^{u,w}_t+B^1_tu_t+B^2_tw_t\big)dt+\big(C_tx^{u,w}_t+D^1_tu_t+D^2_tw_t\big)dW_t,\ t\in[0,T],\\
x^{u,w}_0&=x.
\end{aligned}
\right.
\end{equation}
Here for simplicity, we denote $\big(C_tx^{u,w}_t+D^1_tu_t+D^2_tw_t\big)dW_t=\sum\limits_{j=1}^d\big(C_t^jx^{u,w}_t+D^{1j}_tu_t+D^{2j}_tw_t\big)dW_t^j$ with $A,B^1,B^2,C^j,D^{1j}$ and $D^{2j}$ being all bounded Borel measurable functions from $[0,T]$ to $\mathbb{R}^{n\times n},\mathbb{R}^{n\times k_1},\mathbb{R}^{n\times k_2},\mathbb{R}^{n\times n},\mathbb{R}^{n\times k_1}$ and $\mathbb{R}^{n\times k_2}$, respectively. Similar notations are used in the rest of this paper. In the above, $u:\Omega\times[0,T]\rightarrow\mathbb{R}^{k_1}$ is the follower's control process and $w:[0,T]\rightarrow\mathbb{R}^{k_2}$ is the leader's control function. Let $\mathcal{U}^1_{ad}=L^2_\mathcal{F}(0,T;\mathbb{R}^{k_1})$ and $\mathcal{U}^2_{ad}=L^2(0,T;\mathbb{R}^{k_2})$ be the {\it admissible control} sets of the follower and the leader, respectively. That is to say, the control process $u$ of the follower is taken from $\mathcal{U}^1_{ad}$ and the control function $w$ of the leader is taken from $\mathcal{U}^2_{ad}$.
For given initial value $x\in\mathbb{R}^n$ and $(u,w)\in\mathcal{U}^1_{ad}\times\mathcal{U}^2_{ad}$, it is classical that there exists a unique solution $x^{u,w}\in L^2_\mathcal{F}(0,T;\mathbb{R}^n)$ to (\ref{state equation}). Thus, we could define the cost functionals of the players as follows:
\begin{equation}\label{cost functional-follower}
\begin{aligned}
J_1(x;u,w)=\frac{1}{2}\mathbb{E}\left[\int_0^T\Big(\big\langle Q^1_tx^{u,w}_t,x^{u,w}_t\big\rangle+2\big\langle S^1_tx^{u,w}_t,u_t\big\rangle+\big\langle R^1_tu_t,u_t\big\rangle\Big)dt+\big\langle G^1x^{u,w}_T,x^{u,w}_T\big\rangle\right],
\end{aligned}
\end{equation}
\begin{equation}\label{cost functional-leader}
\begin{aligned}
J_2(x;u,w)=\frac{1}{2}\mathbb{E}\left[\int_0^T\Big(\big\langle Q^2_tx^{u,w}_t,x^{u,w}_t\big\rangle+2\big\langle S^2_tx^{u,w}_t,w_t\big\rangle+\big\langle R^2_tw_t,w_t\big\rangle\Big)dt+\big\langle G^2x^{u,w}_T,x^{u,w}_T\big\rangle\right],
\end{aligned}
\end{equation}
where $Q^1,Q^2,S^1,S^2,R^1,R^2$ are bounded Borel measurable functions from $[0,T]$ to $\mathbb{S}^n,\mathbb{S}^n,\mathbb{R}^{k_1\times n},\\\mathbb{R}^{k_2\times n},\mathbb{S}^{k_1},\mathbb{S}^{k_2}$, respectively, and $G^i$ are $\mathbb{S}^n$-valued matrices for $i=1,2$.
We formulate the Stackelberg game by two steps. In the first step, for any chosen $w\in\mathcal{U}^2_{ad}$ and a fixed initial state $x\in\mathbb{R}^n$, the follower would like to choose a $u^*\in\mathcal{U}^1_{ad}$ such that $J_1(x;u^*,w)$ is the minimum of the cost functional $J_1(x;u,w)$ over $\mathcal{U}^1_{ad}$. In a more rigorous way, the follower wants to find a map $\alpha^*:\mathcal{U}^2_{ad}\times[0,T]\rightarrow\mathcal{U}^1_{ad}$, such that
\begin{equation}\label{follower}
\begin{aligned}
J_1(x;\alpha^*[w,x],w)=\min\limits_{u\in\mathcal{U}^1_{ad}}J_1(x;u,w),\mbox{\ for all }w\in\mathcal{U}^2_{ad}.
\end{aligned}
\end{equation}
In the second step, knowing that the follower would take $u^*\equiv\alpha^*[w,x_0]$, the leader wishes to choose some $w^*$ to minimize $J_2(x_0;\alpha^*[w,x],w)$ over $\mathcal{U}^2_{ad}$. That is to say, the leader wants to find a control function $w^*$ such that
\begin{equation}\label{leader}
\begin{aligned}
J_2(x;\alpha^*[w^*,x],w^*)=\min\limits_{w\in\mathcal{U}^2_{ad}}J_2(x;\alpha^*[w,x],w).
\end{aligned}
\end{equation}
If $(\alpha^*[\cdot],w^*)$ exists, we refer to it as an {\it open-loop Stackelberg equilibrium solution} to the above {\it LQ Stackelberg differential game with mixed deterministic and stochastic controls}. In this paper, we will make a great effort to find a state feedback representation for the open-loop Stackelberg equilibrium solution.
The Stackelberg differential game is also known as leader-follower differential game, which attracts more and more research attention recently, since it has wide practical backgrounds, especially in economics and finance. The earliest work about the game can be traced back to Stackelberg \cite{S52}, where the concept of Stackelberg equilibrium solution was defined for economic markets when some firms have power of domination over others. Bagchi and Ba\c{s}ar \cite{BB81} discussed an LQ stochastic Stackelberg differential game, where state and control variables do not enter diffusion coefficient in state equation. Yong \cite{Yong02} considered an LQ Stackelberg differential game in a rather general framework, with random coefficient, control dependent diffusion and weight matrix for controls in cost functional being not necessarily nonnegative definite. \O ksendal et al. \cite{OSU13} obtained a maximum principle for Stackelberg differential game in the jump-diffusion case, and applied the result to a newsvendor problem. Bensoussan et al. \cite{BCS15} investigated several information structures for stochastic Stackelberg differential game, whereas diffusion coefficient does not contain control variable. Shi et al. \cite{SWX16} introduced a new explanation for the asymmetric information feature of Stackelberg differential game, and an LQ stochastic Stackelberg differential game with noisy observation was solved, where not all the diffusion coefficients contain control variables. Shi et al. \cite{SWX17} studied an LQ stochastic Stackelberg differential game with asymmetric information, where control variables enter both diffusion coefficients of state equation. Xu and Zhang \cite{XZ16} and Xu et al. \cite{XSZ18} addressed a Stackelberg differential game with time-delay. Li and Yu \cite{LY18} applied FBSDE with a multilevel self-similar domination-monotonicity structure, to characterize the unique equilibrium of an LQ generalized Stackelberg game with hierarchy. Moon and Ba\c{s}ar \cite{MB18} investigated an LQ mean field Stackelberg differential game with adapted open-loop information structure of the leader where there are only one leader but arbitrarily large number of followers. See also Lin et al. \cite{LJZ19}, Wang et al. \cite{WWZ20} for recent developments on open-loop LQ Stackelberg game of mean-field type stochastic systems.
Recently, an interesting paper by Hu and Tang \cite{HT19}, considered a mixed deterministic and random optimal control problem of linear stochastic system with quadratic cost functional, with two controllers---one can choose only deterministic time functions which is called the deterministic controller, while the other can choose adapted random processes which is called the random controller. The optimal control is characterized via a system of fully coupled FBSDEs of mean-field type, whose solvability is proved by solutions to two (not coupled) Riccati equations. Inspired by \cite{HT19}, here in this paper we consider an LQ Stackelberg differential game with mixed deterministic and random controls, where the follower is a random controller and the leader is a deterministic controller. In practical applications such as in Stackelberg's type financial market, some securities investor is the follower and the government who makes macro policies is the leader. The novelty and contribution of this paper can be summarized as follows.
\begin{itemize}
\item The game problem is new. To the best of our knowledge, it is the first paper to consider the mixed deterministic and random controls in the study of Stackelberg games. So this paper can be regarded as a continuation of \cite{HT19}, from control to game problems.
\item The problem of the leader is related with a system of MF-FBSDEs, via a direct calculation of derivative of cost functional. This interesting feature is different from \cite{Yong02}.
\item A feedback representation of optimal control function of the leader with respect to the expectation of optimal state variable, is obtained by solutions to a system of two coupled Riccati equations and a two-point value problem of ODEs. This is also different from \cite{Yong02}, where a dimensional-expansion technique is applied.
\end{itemize}
The rest of this paper is organized as follows. In Section 2, the game problem is solved in two subsections. The problem of the follower is discussed in Subsection 2.1, and that of the leader is studied in Subsection 2.2. First, an optimal control process of the follower is obtained by maximum principle of controlled SDE, which is a linear functional of optimal state variable and control variable of the leader, via a classical Riccati equation. Then an optimal control function of the leader is got via a direct calculation of derivative of cost functional, via the solution to a system of MF-FBSDEs. And it is represented as a functional of expectation of optimal state variable, together with solutions to a two-point boundary value problem of ODEs, by a system consisting of two coupled Riccati equations. The solvability of this new system of Riccati equation is discussed. Finally, Section 3 gives some concluding remarks.
\section{Main Result}
We split this section into two subsections, to deal with the problems of the follower and the leader, respectively.
\subsection{Problem of the Follower}
For given control function $w\in\mathcal{U}^1_{ad}$, assume that $u^*$ is an optimal control process of the follower and the corresponding optimal state is $x^{u^*,w}$. Define the Hamiltonian function $H_1:[0,T]\times\mathbb{R}^n\times\mathbb{R}^{k_1}\times\mathbb{R}^{k_2}\times\mathbb{R}^n\times\mathbb{R}^{n\times d}\rightarrow\mathbb{R}$ of the follower as
\begin{equation}\label{Hamiltonian-follower}
\begin{aligned}
H_1\big(t,x,u,w,q,k\big)&=\langle q,Ax+B_1u+B_2w\rangle+\langle k,Cx+D_1u+D_2w\rangle\\
&\quad-\frac{1}{2}\langle Q^1x,x\rangle-\langle S^1x,u\rangle-\frac{1}{2}\langle R^1u,u\rangle.
\end{aligned}
\end{equation}
By the maximum principle (see, e.g., Chapter 6 of Yong and Zhou \cite{YZ99}), there exists a unique pair of processes $(q,k\equiv(k^1,k^2,\cdots,k^d))\in L^2_\mathcal{F}(0,T;\mathbb{R}^n)\times (L^2_\mathcal{F}(0,T;\mathbb{R}^n))^d$ satisfying the {\it backward SDE} (BSDE)
\begin{equation}\label{adjoint equation-follower}
\left\{
\begin{aligned}
-dq_t=&\big[A_t^\top q_t+C_t^\top k_t-(S^1_t)^\top u_t-Q^1_tx^{u^*,w}_t\big]dt-k_tdW_t,\ t\in[0,T],\\
q_T=&-G^1x^{u^*,w}_T,
\end{aligned}
\right.
\end{equation}
and the optimality condition holds true
\begin{equation}\label{optimality condition-follower}
\begin{aligned}
0=R^1_tu^*_t+S^1_tx^{u^*,w}_t-(B^1_t)^\top q_t-(D^1_t)^\top k_t,\ t\in[0,T].
\end{aligned}
\end{equation}
We wish to obtain a state feedback representation of $u^*$. Noticing the terminal condition of (\ref{adjoint equation-follower}) and the appearance of the control function $w$, we set
\begin{equation}\label{supposed form of q}
q_t=-P_tx^{u^*,w}_t-\varphi_t,\ t\in[0,T],
\end{equation}
for some differentiable function $P$ and $\varphi$ from $[0,T]$ to $\mathbb{S}^n$ and $\mathbb{R}^n$, respectively, satisfying $P_T=G^1$ and $\varphi_T=0$.
Applying It\^{o}'s formula to (\ref{supposed form of q}), we have
\begin{equation}\label{applying Ito's formula to q}
\begin{aligned}
-dq_t&=\big(\dot{P}_tx^{u^*,w}_t+P_tA_tx^{u^*,w}_t+\dot{\varphi}_t+P_tB^1_tu^*_t+P_tB^2_tw_t\big)dt\\
&\quad+P_t\big(C_tx^{u^*,w}_t+D^1_tu^*_t+D^2_tw_t\big)dW_t.
\end{aligned}
\end{equation}
Comparing the $dW_t$ term in (\ref{applying Ito's formula to q}) with that in (\ref{adjoint equation-follower}), we arrive at
\begin{equation}\label{comparing dW}
\begin{aligned}
k_t=-P_t\big(C_tx^{u^*,w}_t+D^1_tu^*_t+D^2_tw_t\big),\ t\in[0,T].
\end{aligned}
\end{equation}
Plugging (\ref{supposed form of q}) and (\ref{comparing dW}) into optimality condition (\ref{optimality condition-follower}), and supposing that
\noindent{\bf (A2.1)}\quad{\it $R^1_t+(D^1_t)^\top P_tD^1_t$ is convertible, for all $t\in[0,T]$,}
\noindent we immediately arrive at
\begin{equation}\label{optimal control-follower}
\begin{aligned}
u^*_t&=-\big(R^1_t+(D^1_t)^\top P_tD^1_t\big)^{-1}\Big\{\big[(B^1_t)^\top P_t+(D^1_t)^\top P_tC_t+S^1_t\big]x^{u^*w}_t\\
&\qquad+(D^1_t)^\top P_tD^2_tw_t+(B^1_t)^\top\varphi_t\Big\},\ t\in[0,T].
\end{aligned}
\end{equation}
Comparing the $dt$ term in (\ref{applying Ito's formula to q}) with that in (\ref{adjoint equation-follower}), noting (\ref{supposed form of q}), (\ref{comparing dW}) and (\ref{optimal control-follower}), we can obtain that if
\begin{equation}\label{Riccati equation-P}
\left\{
\begin{aligned}
&\dot{P}_t+A_t^\top P_t+P_tA_t+C_t^\top P_tC_t+Q^1_t-\big[P_tB^1_t+C_t^\top P_tD^1_t+(S^1_t)^\top\big]\\
&\quad\times\big(R^1_t+(D^1_t)^\top P_tD^1_t\big)^{-1}\big[(B^1_t)^\top P_t+(D^1_t)^\top P_tC_t+S^1_t\big]=0,\ t\in[0,T],\\
&P_T=G_1,
\end{aligned}
\right.
\end{equation}
admits a unique differentiable solution $P\in\mathbb{S}^n$, then
\begin{equation}\label{varphi-equation}
\left\{\begin{aligned}
&\dot{\varphi}_t+\big[A^\top_t-(P_tB^1_t+C_t^\top P_tD^1_t+(S^1_t)^\top)\big(R^1_t+(D^1_t)^\top P_tD^1_t\big)^{-1}(B^1_t)^\top\big]\varphi_t\\
&+\big[P_tB^2_t+C_t^\top P_tD^2_t-\big(P_tB^1_t+C_t^\top P_tD^1_t+(S^1_t)^\top\big)\\
&\quad\times\big(R^1_t+(D^1_t)^\top P_tD^1_t\big)^{-1}(D^1_t)^\top P_tD^2_t\big]w_t=0,\ t\in[0,T],\\
&\varphi_T=0.
\end{aligned}\right.
\end{equation}
For the solvability of Riccati equation (\ref{Riccati equation-P}), in the following standard assumption that
\noindent{\bf (A2.2)}\quad{\it $R^1\gg0,\ G^1\geqslant0,\ Q^1-S^1(R^1)^{-1}(S^1)^\top\geqslant0$,}
\noindent(\ref{Riccati equation-P}) admits a unique differentiable solution $P\geqslant0$ by Theorem 7.2, Chapter 6 of \cite{YZ99}. For given $w\in\mathcal{U}^2_{ad}$, the solvability of ODE (\ref{varphi-equation}) is obvious.
Under {\bf (A2.2)}, the map $u\mapsto J_1(x;u,w)$ is uniformly convex, thus (\ref{optimal control-follower}) is also sufficient for $(u^*,x^{u^*,w})$ being a unique optimal pair of the follower.
Now, inserting (\ref{optimal control-follower}) into the state equation of (\ref{state equation}), we have
\begin{equation}\label{optimal state equation-follower}
\left\{
\begin{aligned}
dx^{u^*,w}_t&=\Big\{\big[A_t-B^1_t\big(R^1_t+(D^1_t)^\top P_tD^1_t\big)^{-1}\big((B^1_t)^\top P_t+(D^1_t)^\top P_tC_t\big)+S^1_t\big]x^{u^*,w}_t\\
&\qquad+\big[B^2_t-B^1_t\big(R^1_t+(D^1_t)^\top P_tD^1_t\big)^{-1}(D^1_t)^\top P_tD^2_t\big]w_t\\
&\qquad-B^1_t\big(R^1_t+(D^1_t)^\top P_tD^1_t\big)^{-1}(B^1_t)^\top\varphi_t\Big\}dt\\
&\quad+\Big\{\big[C_t-D^1_t\big(R^1_t+(D^1_t)^\top P_tD^1_t\big)^{-1}\big((B^1_t)^\top P_t+(D^1_t)^\top P_tC_t\big)+S^1_t\big]x^{u^*,w}_t\\
&\qquad+\big[D^2_t-D^1_t\big(R^1_t+(D^1_t)^\top P_tD^1_t\big)^{-1}(D^1_t)^\top P_tD^2_t\big]w_t\\
&\qquad-D^1_t\big(R^1_t+(D^1_t)^\top P_tD^1_t\big)^{-1}(B^1_t)^\top\varphi_t\Big\}dW_t,\ t\in[0,T],\\
x^{u^*,w}_0&=x,
\end{aligned}
\right.
\end{equation}
which admits a unique solution $x^{u^*,w}\in L^2_\mathcal{F}(0,T;\mathbb{R}^n)$, for given $w\in\mathcal{U}^2_{ad}$.
Moreover, we have the result.
\noindent{\bf Theorem 2.1}\quad{\it Let {\bf (A2.1), (A2.2)} hold, $P\geqslant0$ satisfy (\ref{Riccati equation-P}). For chosen control function $w\in\mathcal{U}^2_{ad}$ of the leader, there is a unique optimal control process $u^*\in\mathcal{U}^1_{ad}$ of the follower, whose state feedback representation is given by (\ref{optimal control-follower}), where $x^{u^*,w}\in L^2_\mathcal{F}(0,T;\mathbb{R}^n)$ is the optimal state satisfying (\ref{optimal state equation-follower}) and the differential function $\varphi$ satisfy (\ref{varphi-equation}). The optimal value is given by
\begin{equation}\label{optimal value-follower}
\begin{aligned}
J_1(x;u^*,w)&=\frac{1}{2}\langle P_0x,x\rangle+\langle\varphi_0,x\rangle
+\int_0^T\Big(\big\langle(B^2_t)^\top\varphi_t,w_t\big\rangle+\big\langle(D^2_t)^\top P_tD^2_tw_t,w_t\big\rangle\\
&\quad-\big|\big(R^1_t+(D^1_t)^\top P_tD^1_t\big)^{-\frac{1}{2}}\big[(B^2_t)^\top\varphi_t+(D^2_t)^\top P_tD^2_tw_t\big]\big|^2\Big)dt.
\end{aligned}
\end{equation}}
\begin{proof}
We only need to prove (\ref{optimal value-follower}). However, it can be easily obtained by applying It\^{o}'s formula to $\langle Px^{u^*,w},x^{u^*,w}\rangle+\langle\varphi,x^{u^*,w}\rangle$, together with the completion of squares technique. We omit the detail.
\end{proof}
The results in this subsection is a special case of those in Section 2 of Yong \cite{Yong02}, but with the cross term. We display those here with some refined derivation for the self-integrity of this paper.
\subsection{Problem of the Leader}
Since the leader knows that the follower will take his optimal control process $u^*\in\mathcal{U}^1_{ad}$ by (\ref{optimal control-follower}), the state equation of the leader now writes
\begin{equation}\label{state equation-leader}
\left\{
\begin{aligned}
dx^w_t&=\big(\widetilde{A}_tx^w_t+\widetilde{B}^1_t\varphi_t+\widetilde{B}^2_tw_t\big)dt
+\big(\widetilde{C}_tx^w_t+\widetilde{D}^1_t\varphi_t+\widetilde{D}^2_tw_t\big)dW_t,\\
d\varphi_t&=-(\widetilde{A}_t^\top\varphi_t+\Gamma_tw_t)dt,\ t\in[0,T],\\
x^w_0&=x,\ \varphi_T=0,
\end{aligned}
\right.
\end{equation}
where we have denoted $x^w\equiv x^{u^*,w}$ and
\begin{equation*}
\left\{
\begin{aligned}
\widetilde{R}^1&:=\widetilde{R}^1(P):=R^1+(D^1)^\top PD^1,\\
\widetilde{A}&:=\widetilde{A}(P):=A-B^1(\widetilde{R}^1)^{-1}\big[(B^1)^\top P+(D^1)^\top PC+S^1\big],\\
\widetilde{B}^1&:=\widetilde{B}^1(P):=-B^1(\widetilde{R}^1)^{-1}(B^1)^\top,\\
\widetilde{B}^2&:=\widetilde{B}^2(P):=B^2-B^1(\widetilde{R}^1)^{-1}(D^1)^\top PD^2,\\
\widetilde{C}&:=\widetilde{C}(P):=C-D^1(\widetilde{R}^1)^{-1}\big[(B^1)^\top P+(D^1)^\top PC+S^1\big],\\
\widetilde{D}^1&:=\widetilde{B}^1(P):=-D^1(\widetilde{R}^1)^{-1}(B^1)^\top,\\
\widetilde{D}^2&:=\widetilde{D}^2(P):=D^2-D^1(\widetilde{R}^1)^{-1}(D^1)^\top PD^2,\\
\Gamma&:=\Gamma(P):=PB^2+C^\top PD^2-\big[PB^1+C^\top PD^1+(S^1)^\top\big](\widetilde{R}^1)^{-1}(D^1)^\top PD^2.
\end{aligned}
\right.
\end{equation*}
The problem of the leader is to choose an optimal control function $w^*\in\mathcal{U}^2_{ad}$ such that
$$
J^2(x;u^*,w^*)=\min\limits_{w\in\mathcal{U}^2_{ad}}J_2(x;u^*,w).
$$
We first have the following result.
\noindent{\bf Theorem 2.2}\quad{\it Suppose that $w^*$ is an optimal control function of the leader, and the corresponding optimal state is $x^*\equiv x^{w^*}$ together with $\varphi^*$ being solution to (\ref{state equation-leader}). Then we have
\begin{equation}\label{optimal control-leader}
\begin{aligned}
0=R^2_tw^*_t+\big(\widetilde{B}^2_t\big)^\top\mathbb{E}y_t+\big(\widetilde{D}^2_t\big)^\top\mathbb{E}z_t
+S^2_t\mathbb{E}x^*_t+\Gamma_t^\top\mathbb{E}p_t,\ t\in[0,T],
\end{aligned}
\end{equation}
where the triple of processes $(y,z,p)\in\mathbb{R}^n\times\mathbb{R}^{n\times d}\times\mathbb{R}^n$ satisfy the FBSDE
\begin{equation}\label{adjoint equation-leader}
\left\{
\begin{aligned}
dp_t&=\big[\widetilde{A}^\top_tp_t+(\widetilde{B}^1_t)^\top y_t+(\widetilde{D}^1_t)^\top z_t\big]dt,\\
-dy_t&=\big[\widetilde{A}_t^\top y_t+\widetilde{C}_t^\top z_t+(S^2_t)^\top w^*_t+Q^2_tx^*_t\big]dt-z_tdW_t,\ t\in[0,T],\\
p_0&=0,\ y_T=G^2x^*_T.
\end{aligned}
\right.
\end{equation}
Moreover, if we assume that
\noindent{\bf (A2.3)}\quad{\it $G^2\geqslant0,\ Q^2-S^2(R^2)^{-1}(S^2)^\top\geqslant0,\ R^2\gg0$,}
\noindent then the above optimality condition becomes sufficient for the unique existence of the optimal control function $w^*$ of the leader.}
\begin{proof}
Without loss of generality, let $x\equiv0$, and set the perturbed optimal control function $w^*+\lambda w$ for $\lambda>0$ sufficiently small, with $w\in\mathbb{R}^{k_2}$. Then it is easy to see from the linearity of (\ref{state equation-leader}), that the solution to (\ref{state equation-leader}) is $x^*+\lambda x^w$. We first have
\begin{equation*}
\begin{aligned}
\widetilde{J}(\lambda)&:=J_2(0;u^*,w^*+\lambda w)\\
&=\frac{1}{2}\mathbb{E}\int_0^T\big[\big\langle Q^2_t(x^*_t+\lambda x^w_t),x^*_t+\lambda x^w_t\big\rangle+2\big\langle S^2_t(x^*_t+\lambda x^w_t),w^*_t+\lambda w_t\big\rangle\\
&\qquad+\big\langle R^2_t(w^*_t+\lambda w_t),w^*_t+\lambda w_t\big\rangle\big]dt+\frac{1}{2}\mathbb{E}\big\langle G^2(x^*_T+\lambda x^w_T),x^*_T+\lambda x^w_T\big\rangle.
\end{aligned}
\end{equation*}
Hence
\begin{equation*}
\begin{aligned}
0=\frac{\partial\widetilde{J}(\lambda)}{\partial\lambda}\bigg|_{\lambda=0}
&=\mathbb{E}\int_0^T\big[\big\langle Q^2_tx^*_t,x^w_t\big\rangle+\big\langle S^2_tx^*_t,w_t\big\rangle+\big\langle S^2_tx^w_t,w^*_t\big\rangle\\
&\qquad+\big\langle R^2_tw^*_t,w_t\big\rangle\big]dt+\mathbb{E}\big\langle G^2x^*_T,x^w_T\big\rangle.
\end{aligned}
\end{equation*}
Let the triple $(p,y,z)$ satisfy (\ref{adjoint equation-leader}). Then we have
\begin{equation*}
\begin{aligned}
0=\mathbb{E}\int_0^T\big[\langle Q^2_tx^*_t,x^w_t\rangle+\big\langle S^2_tx^*_t,w_t\big\rangle+\big\langle S^2_tx^w_t,w^*_t\big\rangle+\langle R^2_tw^*_t,w_t\rangle\big]dt+\mathbb{E}\langle y_T,x^w_T\rangle.
\end{aligned}
\end{equation*}
Applying It\^{o}'s formula to $\langle x^w_t,y_t\rangle-\langle\varphi_t,p_t\rangle$, noticing (\ref{state equation-leader}) and (\ref{adjoint equation-leader}), we derive
\begin{equation*}
\begin{aligned}
0&=\mathbb{E}\int_0^T\big\langle R^2_tw^*_t+\big(\widetilde{B}^2_t\big)^\top y_t+\big(\widetilde{D}^2_t\big)^\top z_t+S^2_t x^*_t+\Gamma_t^\top p_t,w_t\big\rangle dt\\
&=\int_0^T\big\langle R^2_tw^*_t+\big(\widetilde{B}^2_t\big)^\top\mathbb{E}y_t+\big(\widetilde{D}^2_t\big)^\top\mathbb{E}z_t
+S^2_t\mathbb{E}x^*_t+\Gamma_t^\top\mathbb{E}p_t,w_t\big\rangle dt.
\end{aligned}
\end{equation*}
This implies (\ref{optimal control-leader}). Further, if {\bf (A2.3)} holds, then the functional $w\rightarrow J_2(x;u^*,w)$ is uniformly convex. Thus the necessary condition becomes sufficient for the unique existence of $w^*$. See the remark of Theorem 2.2 in Yong \cite{Yong02} for more details. The proof is complete.
\end{proof}
Next, putting (\ref{state equation-leader}), (\ref{optimal control-leader}) and (\ref{adjoint equation-leader}) together, corresponding with the optimal triple $(w^*,x^*,\varphi^*)$, we get
\begin{equation}\label{system of MF-FBSDE}
\left\{
\begin{aligned}
dx^*_t&=\big(\widetilde{A}_tx^*_t+\widetilde{B}^1_t\varphi_t^*+\widetilde{B}^2_tw_t^*\big)dt
+\big(\widetilde{C}_tx^*_t+\widetilde{D}^1_t\varphi_t^*+\widetilde{D}^2_tw_t^*\big)dW_t,\\
d{\varphi}_t^*&=-(\widetilde{A}_t^\top\varphi_t^*+\Gamma_tw_t^*)dt,\\
dp_t&=\big[\widetilde{A}^\top_tp_t+(\widetilde{B}^1_t)^\top y_t+(\widetilde{D}^1_t)^\top z_t\big]dt,\\
-dy_t&=\big[\widetilde{A}_t^\top y_t+\widetilde{C}_t^\top z_t+(S^2_t)^\top w^*_t+Q^2_tx^*_t\big]dt-z_tdW_t,\\
x^*_0&=x,\ \varphi_T^*=0, \ p_0=0,\ y_T=G^2x^*_T,\\
0&=R^2_tw^*_t+\big(\widetilde{B}^2_t\big)^\top\mathbb{E}y_t+\big(\widetilde{D}^2_t\big)^\top\mathbb{E}z_t
+S^2_t\mathbb{E}x^*_t+\Gamma_t^\top\mathbb{E}p_t,\ t\in[0,T],
\end{aligned}
\right.
\end{equation}
which is a system of coupled MF-FBSDEs. Note that it is different from that in Yong \cite{Yong02}. We need to decouple (\ref{system of MF-FBSDE}), and to study the solvability of it via some Riccati equations. For this target, for the optimal control function $w^*$ of (\ref{optimal control-leader}), we expect a state feedback representation of the form
\begin{equation}\label{supposed form of y}
y_t=P^1_tx^*_t+P^2_t(x^*_t-\mathbb{E}x^*_t)+\phi_t,
\end{equation}
for some differentiable functions $P^1,P^2$ and $\phi$ from $[0,T]$ to $\mathbb{S}^n,\mathbb{R}^{n\times n}$ and $\mathbb{R}^n$, respectively, satisfying $P^1_T=G^2,P^2_T=0$ and $\phi_T=0$.
Noticing that
\begin{equation}\label{dEx}
\left\{
\begin{aligned}
d\mathbb{E}x^*_t&=\big(\widetilde{A}_t\mathbb{E}x^*_t+\widetilde{B}^1_t\varphi_t^*+\widetilde{B}^2_tw_t^*\big)dt,\ t\in[0,T],\\
\mathbb{E}x^*_0&=x,
\end{aligned}
\right.
\end{equation}
and applying It\^{o}'s formula to (\ref{supposed form of y}), we obtain
\begin{equation}\label{Ito's formula}
\begin{aligned}
dy_t=&\big[\dot{\phi}_t+\big(\dot{P}^1_t+P^1_t\widetilde{A}_t\big)x^*_t+\big(\dot{P}^2_t+P^2_t\widetilde{A}_t\big)(x^*_t-\mathbb{E}x^*_t)
+P^1_t\widetilde{B}^1_t\varphi_t^*+P^1_t\widetilde{B}^2_tw_t^*\big]dt\\
&+\big[(P^1_t+P^2_t)\widetilde{C}_tx^*_t+(P^1_t+P^2_t)\widetilde{D}^1_t\varphi^*_t+(P^1_t+P^2_t)\widetilde{D}^2_tw^*_t\big]dW_t\\
=&-\big[\widetilde{A}_t^\top P^1_tx^*_t+\widetilde{A}_t^\top P^2_t(x^*_t-\mathbb{E}x^*_t)+\widetilde{A}_t^\top\phi_t+\widetilde{C}_t^\top z_t
+(S^2_t)^\top w^*_t+Q^2_tx^*_t\big]dt+z_tdW_t.
\end{aligned}
\end{equation}
Thus
\begin{equation}\label{z}
\begin{aligned}
z_t=(P^1_t+P^2_t)\widetilde{C}_tx^*_t+(P^1_t+P^2_t)\widetilde{D}^1_t\varphi^*_t+(P^1_t+P^2_t)\widetilde{D}^2_tw^*_t,\ t\in[0, T].
\end{aligned}
\end{equation}
Plugging (\ref{supposed form of y}), (\ref{z}) into (\ref{optimal control-leader}), and supposing that
\noindent {\bf (A2.4)}\quad $\widetilde{R}^2_t:=\widetilde{R}^2_t(P_t,P^1_t,P^2_t):=R^2_t+(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{D}^2_t$ is convertible, for all $t\in[0,T]$,
\noindent we get
\begin{equation}\label{optimal control-leader-feedback}
\begin{aligned}
w^*_t&=-(\widetilde{R}^2_t)^{-1}\Big\{\big[(\widetilde{B}^2_t)^\top P^1_t+(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{C}_t+S^2_t\big]\mathbb{E}x^*_t\\
&\qquad\qquad\qquad+(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{D}^1_t\varphi^*_t+\Gamma_t^\top\mathbb{E}p_t+(\widetilde{B}^2_t)^\top\phi_t\Big\}.
\end{aligned}
\end{equation}
Inserting (\ref{optimal control-leader-feedback}) into (\ref{z}), we have
\begin{equation}\label{zz}
\begin{aligned}
z_t&=(P^1_t+P^2_t)\widetilde{C}_tx^*_t
-(P^1_t+P^2_t)\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}\big[(\widetilde{B}^2_t)^\top P^1_t+(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{C}_t+S^2_t\big]\mathbb{E}x^*_t\\
&\quad+\big[(P^1_t+P^2_t)\widetilde{D}^1_t-(P^1_t+P^2_t)\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{D}^1_t\big]\varphi^*_t\\
&\quad-(P^1_t+P^2_t)\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}\Gamma_t^\top\mathbb{E}p_t-(P^1_t+P^2_t)\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{B}^2_t)^\top\phi_t.
\end{aligned}
\end{equation}
Comparing $dt$ terms in the fourth equation in (\ref{system of MF-FBSDE}) and (\ref{Ito's formula}) and substituting (\ref{optimal control-leader-feedback}), (\ref{zz}) into them, we obtain
\begin{equation}\label{system of Riccati equations}
\left\{
\begin{aligned}
&0=\dot{P}^1_t+P^1_t\widetilde{A}_t+\widetilde{A}_t^\top P^1_t+\widetilde{C}_t^\top(P^1_t+P^2_t)\widetilde{C}_t-\big[P^1_t\widetilde{B}^2_t+\widetilde{C}_t^\top(P^1_t+P^2_t)\widetilde{D}^2_t+(S^2_t)^\top\big]\\
&\qquad\times(\widetilde{R}^2_t)^{-1}\big[(\widetilde{B}^2_t)^\top P^1_t+(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{C}_t+S^2_t\big]+Q^2_t,\ P^1_T=G^2,\\
&0=\dot{P}^2_t+P^2_t\widetilde{A}_t+\widetilde{A}_t^\top P^2_t+\big[P^1_t\widetilde{B}^2_t+\widetilde{C}_t^\top(P^1_t+P^2_t)\widetilde{D}^2_t+(S^2_t)^\top\big]\\
&\qquad\times(\widetilde{R}^2_t)^{-1}\big[(\widetilde{B}^2_t)^\top P^1_t+(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{C}_t+S^2_t\big],\ P^2_T=0,
\end{aligned}
\right.
\end{equation}
and
\begin{equation}\label{phi}
\left\{
\begin{aligned}
&0=\dot{\phi}_t+\Big\{\widetilde{A}_t^\top-\big[P^1_t\widetilde{B}^2_t+\widetilde{C}_t^\top(P^1_t+P^2_t)\widetilde{D}^2_t+(S^2_t)^\top\big]
(\widetilde{R}^2_t)^{-1}(\widetilde{B}^2_t)^\top\Big\}\phi_t+\Big\{P^1_t\widetilde{B}^1_t\\
&\qquad+\widetilde{C}_t^\top(P^1_t+P^2_t)\widetilde{D}^1_t
-\big[P^1_t\widetilde{B}^2_t+\widetilde{C}_t^\top(P^1_t+P^2_t)\widetilde{D}^2_t+(S^2_t)^\top\big](\widetilde{R}^2_t)^{-1}(\widetilde{D}^2_t)^\top\\
&\qquad\times(P^1_t+P^2_t)\widetilde{D}^1_t\Big\}\varphi^*_t-\big[P^1_t\widetilde{B}^2_t+\widetilde{C}_t^\top(P^1_t+P^2_t)\widetilde{D}^2_t+(S^2_t)^\top\big]
(\widetilde{R}^2_t)^{-1}\Gamma_t^\top\mathbb{E}p_t,\\
&\phi_T=0.
\end{aligned}
\right.
\end{equation}
Note that system (\ref{system of Riccati equations}) consists two coupled Riccati equations, which is entirely new and its solvability is interesting. In fact, adding the two equations in (\ref{system of Riccati equations}), it is obviously that $P^1+P^2\in\mathbb{R}^{n\times n}$ uniquely satisfies the ODE
\begin{equation}\label{P1+P2}
\begin{aligned}
0=\dot{\mathcal{P}}_t+\mathcal{P}_t\widetilde{A}_t+\widetilde{A}_t^\top\mathcal{P}_t+\widetilde{C}_t^\top\mathcal{P}_t\widetilde{C}_t+Q^2_t,\ \mathcal{P}_T=G^2.
\end{aligned}
\end{equation}
Thus (\ref{system of Riccati equations}) becomes
\begin{equation}\label{system of Riccati equations-new}
\left\{
\begin{aligned}
&0=\dot{P}^1_t+P^1_t\widetilde{A}_t+\widetilde{A}_t^\top P^1_t+\widetilde{C}_t^\top\mathcal{P}_t\widetilde{C}_t
-\big[P^1_t\widetilde{B}^2_t+\widetilde{C}_t^\top\mathcal{P}_t\widetilde{D}^2_t+(S^2_t)^\top\big]\\
&\qquad\times(R^2_t+(\widetilde{D}^2_t)^\top\mathcal{P}_t\widetilde{D}^2_t)^{-1}
\big[(\widetilde{B}^2_t)^\top P^1_t+(\widetilde{D}^2_t)^\top\mathcal{P}_t\widetilde{C}_t+S^2_t\big]+Q^2_t,\ P^1_T=G^2,\\
&0=\dot{P}^2_t+P^2_t\widetilde{A}_t+\widetilde{A}_t^\top P^2_t+\big[P^1_t\widetilde{B}^2_t+\widetilde{C}_t^\top\mathcal{P}_t\widetilde{D}^2_t+(S^2_t)^\top\big]\\
&\qquad\times(R^2_t+(\widetilde{D}^2_t)^\top\mathcal{P}_t\widetilde{D}^2_t)^{-1}
\big[(\widetilde{B}^2_t)^\top P^1_t+(\widetilde{D}^2_t)^\top\mathcal{P}_t\widetilde{C}_t+S^2_t\big],\ P^2_T=0,
\end{aligned}
\right.
\end{equation}
and it is a decoupled one now. Let
$$
\widetilde{Q}^2_t:=Q^2_t+\widetilde{C}_t^\top\mathcal{P}_t\widetilde{C}_t,\quad \widetilde{S}^2_t:=S^2_t+(\widetilde{D}^2_t)^\top\mathcal{P}_t\widetilde{C}_t,\quad \forall t\in[0,T].
$$
Then the Riccati equation of $P^1$ can be written as
\begin{equation}\label{P1}
\left\{
\begin{aligned}
&0=\dot{P}^1_t+P^1_t\widetilde{A}_t+\widetilde{A}_t^\top P^1_t-\big[P^1_t\widetilde{B}^2_t+(\widetilde{S}^2_t)^\top\big](\widetilde{R}^2_t)^{-1}
\big[(\widetilde{B}^2_t)^\top P^1_t+\widetilde{S}^2_t\big]+\widetilde{Q}^2_t,\\
&P^1_T=G^2,\\
\end{aligned}
\right.
\end{equation}
If we assume that
\noindent {\bf (A2.5)}\quad $\widetilde{Q}^2-\widetilde{S}^2(\widetilde{R}^2)^{-1}(\widetilde{S}^2)^\top\geqslant0$,
\noindent by {\bf (A2.3), (A2.4)} and {\bf (A2.5)}, there is a unique solution $P^1\geqslant0$. Then there also exists a unique solution $P^2=\mathcal{P}-P^1\in\mathbb{R}^{n\times n}$.
We discuss the solvability of equation (\ref{phi}) for the function $\phi$. In fact, with some computation, we can obtain a two-point
boundary value problem for coupled linear ODE for $(\mathbb{E}x^*,\mathbb{E}p,\varphi^*,\phi)$:
\begin{equation}\label{Ex,Ep,varphi,phi}
\left\{
\begin{aligned}
\frac{d\mathbb{E}x^*_t}{dt}&=\big[\widetilde{A}_t-\widetilde{B}^2_t(\widetilde{R}^2_t)^{-1}\overline{S}^2_t\big]\mathbb{E}x^*_t
-\widetilde{B}^2_t(\widetilde{R}^2_t)^{-1}\Gamma_t^\top\mathbb{E}p_t-\widetilde{B}^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{B}^2_t)^\top\phi_t
+\overline{B}^2_t\varphi^*_t,\\
\frac{d\mathbb{E}p_t}{dt}&=\big(\widetilde{A}_t^\top-\overline{\Gamma}_t^\top\big)\mathbb{E}p_t+(\overline{B}^1_t)^\top\mathbb{E}x^*_t+(\overline{B}^2_t)^\top\phi_t
+\overline{D}^1_t\varphi^*_t,\\
\frac{d\varphi^*_t}{dt}&=\big(\overline{\Gamma}_t-\widetilde{A}_t^\top\big)\varphi^*_t
+\Gamma_t(\widetilde{R}^2_t)^{-1}\Gamma_t^\top\mathbb{E}p_t+\Gamma_t(\widetilde{R}^2_t)^{-1}(\widetilde{B}^2_t)^\top\phi_t
+\Gamma_t(\widetilde{R}^2_t)^{-1}\overline{S}^2_t\mathbb{E}x^*_t,\\
\frac{d\phi_t}{dt}&=-\big[\widetilde{A}_t^\top-(\overline{S}^2_t)^\top(\widetilde{R}^2_t)^{-1}(\widetilde{B}^2_t)^\top\big]\phi_t
-\overline{B}^1_t\varphi^*_t+(\overline{S}^2_t)^\top(\widetilde{R}^2_t)^{-1}\Gamma_t^\top\mathbb{E}p_t,\quad t\in[0,T],\\
\mathbb{E}x^*_0&=x,\ \mathbb{E}p_0=0,\ \varphi_T=0,\ \phi_T=0,
\end{aligned}
\right.
\end{equation}
where for simplicity, we denote
\begin{equation*}
\left\{
\begin{aligned}
\overline{S}^2_t&:=(\widetilde{B}^2_t)^\top P^1_t+(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{C}_t+S^2_t,\\
\overline{\Gamma}_t&:=\Gamma_t(\widetilde{R}^2_t)^{-1}(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{D}^1_t,\\
\overline{B}^1_t&:=P^1_t\widetilde{B}^1_t+\widetilde{C}_t^\top(P^1_t+P^2_t)\widetilde{D}^1_t
-(\overline{S}^2_t)^\top(\widetilde{R}^2_t)^{-1}(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{D}^1_t,\\
\overline{B}^2_t&:=\widetilde{B}^1_t-\widetilde{B}^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{D}^1_t,\\
\overline{D}^1_t&:=(\widetilde{D}^1_t)^\top\big[(P^1_t+P^2_t)\widetilde{D}^1_t-(P^1_t+P^2_t)\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{D}^2_t)^\top
(P^1_t+P^2_t)\widetilde{D}^1_t\big].
\end{aligned}
\right.
\end{equation*}
We define
\begin{equation*}
X:=\begin{pmatrix}\mathbb{E}x^*\\\mathbb{E}p\end{pmatrix},\quad Y:=\begin{pmatrix}\varphi^*\\\phi\end{pmatrix},
\end{equation*}
\begin{equation*}
\begin{aligned}
&\mathbf{A}_t:=\begin{pmatrix}\widetilde{A}_t-\widetilde{B}^2_t(\widetilde{R}^2_t)^{-1}\overline{S}^2_t&-\widetilde{B}^2_t(\widetilde{R}^2_t)^{-1}\Gamma_t^\top\\
(\overline{B}^1_t)^\top&\widetilde{A}_t^\top-\overline{\Gamma}_t^\top\end{pmatrix},\quad
\mathbf{B}_t:=\begin{pmatrix}\overline{B}^2_t&-\widetilde{B}^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{B}^2_t)^\top\\\overline{D}^1_t&(\overline{B}^2_t)^\top\end{pmatrix},\\
&\widehat{\mathbf{A}}_t:=\begin{pmatrix}\Gamma_t(\widetilde{R}^2_t)^{-1}\overline{S}^2_t&\Gamma_t(\widetilde{R}^2_t)^{-1}\Gamma_t^\top\\
0&(\overline{S}^2_t)^\top(\widetilde{R}^2_t)^{-1}\Gamma_t^\top\end{pmatrix},\quad
\widehat{\mathbf{B}}_t:=\begin{pmatrix}\overline{\Gamma}_t-\widetilde{A}_t^\top&\Gamma_t(\widetilde{R}^2_t)^{-1}(\widetilde{B}^2_t)^\top\\
-\overline{B}^1_t&-\widetilde{A}_t^\top+(\overline{S}^2_t)^\top(\widetilde{R}^2_t)^{-1}(\widetilde{B}^2_t)^\top\end{pmatrix},
\end{aligned}
\end{equation*}
and denote
\begin{equation*}
\mathcal{A}_t:=\begin{pmatrix}\mathbf{A}_t&\mathbf{B}_t\\\widehat{\mathbf{A}}_t&\widehat{\mathbf{B}}_t\end{pmatrix},
\end{equation*}
thus (\ref{Ex,Ep,varphi,phi}) can be written as
\begin{equation}\label{X,Y}
\left\{
\begin{aligned}
&d\begin{pmatrix}X_t\\Y_t\end{pmatrix}=\mathcal{A}_t\begin{pmatrix}X_t\\Y_t\end{pmatrix}dt,\quad t\in[0,T],\\
&X_0=(x^\top\quad 0)^\top,\ Y_T=(0\quad 0)^\top.
\end{aligned}
\right.
\end{equation}
From the theory by Yong \cite{Yong99}, we know that (\ref{X,Y}) admits a unique solution $(X,Y)\in L^2(0,T;\mathbb{R}^{2n})\\\times L^2(0,T;\mathbb{R}^{2n})$ if and only if
\begin{equation}\label{assumption of Yong 1999}
\mbox{det}\left\{(0\quad I)e^{\mathcal{A}_tt}\begin{pmatrix}0\\I\end{pmatrix}\right\}>0,\quad \forall t\in[0,T].
\end{equation}
In this case, (\ref{Ex,Ep,varphi,phi}) admits a unique solution $(\mathbb{E}x^*,\mathbb{E}p,\varphi^*,\phi)\in L^2(0,T;\mathbb{R}^n)\times L^2(0,T;\mathbb{R}^n)\times L^2(0,T;\mathbb{R}^n)\times L^2(0,T;\mathbb{R}^n)$. Some recent progress for the two-point boundary value problems associated
with ODEs, refer to Liu and Wu \cite{LW18}.
We summarize the above process in the following theorem.
\noindent{\bf Theorem 2.3}\quad{\it Let {\bf (A2.1)$\sim$(A2.5)} and (\ref{assumption of Yong 1999}) hold, $(P^1,P^2)$ satisfy (\ref{system of Riccati equations}), and $(\mathbb{E}x^*,\mathbb{E}p,\varphi^*,\phi)$ satisfy (\ref{Ex,Ep,varphi,phi}). Then $w^*$ given by (\ref{optimal control-leader-feedback}) is the state feedback representation of the unique optimal control of the leader. Let $x^*$ satisfy
\begin{equation}\label{x}
\left\{
\begin{aligned}
dx^*_t&=\Big\{\widetilde{A}_tx^*_t-\widetilde{B}^2_t(\widetilde{R}^2_t)^{-1}\overline{S}^2_t\mathbb{E}x^*_t-\widetilde{B}^2_t(\widetilde{R}^2_t)^{-1}\Gamma_t^\top\mathbb{E}p_t\\
&\qquad+\big[\widetilde{B}^1_t-\widetilde{B}^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)
\widetilde{D}^1_t\big]\varphi^*_t-\widetilde{B}^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{B}^2_t)^\top\phi_t\Big\}dt\\
&\quad+\Big\{\widetilde{C}_tx^*_t-\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}\overline{S}^2_t\mathbb{E}x^*_t-\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}\Gamma_t^\top\mathbb{E}p_t\\
&\qquad+\big[\widetilde{D}^1_t-\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)
\widetilde{D}^1_t\big]\varphi^*_t-\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{B}^2_t)^\top\phi_t\Big\}dW_t,\\
x^*_0&=x,
\end{aligned}
\right.
\end{equation}
$p$ satisfy
\begin{equation}\label{p}
\left\{
\begin{aligned}
dp_t&=\Big\{\widetilde{A}_t^\top p_t-(\widetilde{D}^1_t)^\top(P^1_t+P^2_t)\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}\Gamma_t^\top\mathbb{E}p_t\\
&\qquad+\big[(\widetilde{B}^1_t)^\top(P^1_t+P^2_t)+(\widetilde{D}^1_t)^\top(P^1_t+P^2_t)\widetilde{C}_t\big]x^*_t\\
&\qquad-\big[(\widetilde{B}^1_t)^\top P^2_t+(\widetilde{D}^1_t)^\top(P^1_t+P^2_t)\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}\overline{S}^2_t\big]\mathbb{E}x^*_t\\
&\qquad+\big[(\widetilde{D}^1_t)^\top(P^1_t+P^2_t)\widetilde{D}^1_t-(\widetilde{D}^1_t)^\top(P^1_t+P^2_t)\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}
(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{D}^1_t\big]\varphi^*_t\\
&\qquad+\big[\widetilde{B}^1_t-(\widetilde{D}^1_t)^\top(P^1_t+P^2_t)\widetilde{D}^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{B}^2_t)^\top\big]\phi_t\Big\}dt,\ t\in[0,T],\\
p_0&=0,
\end{aligned}
\right.
\end{equation}
and define $y^*$ and $z^*$ in (\ref{supposed form of y}) and (\ref{zz}), respectively, then $(x^*,y,z,p,\varphi)$ is the solution to the system of MF-FBSDEs (\ref{system of MF-FBSDE}).}
Finally, from (\ref{optimal control-follower}) and (\ref{optimal control-leader-feedback}), we obtain
\begin{equation}\label{optimal control-follower-feedback}
\begin{aligned}
u^*_t&=-(\widetilde{R}^1_t)^{-1}\big[(B^1_t)^\top P_t+(D^1_t)^\top P_tC_t+S^1_t\big]x^*_t\\
&\quad+(\widetilde{R}^1_t)^{-1}(D^1_t)^\top P_tD^2_t(\widetilde{R}^2_t)^{-1}\big[(\widetilde{B}^2_t)^\top P^1_t
+(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{C}_t+S^2_t\big]\mathbb{E}x^*_t\\
&\quad+(\widetilde{R}^1_t)^{-1}\big[(D^1_t)^\top P_tD^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{D}^2_t)^\top(P^1_t+P^2_t)\widetilde{D}^1_t-(B^1_t)^\top\big]\varphi^*_t\\
&\quad+(\widetilde{R}^1_t)^{-1}(D^1_t)^\top P_tD^2_t(\widetilde{R}^2_t)^{-1}\Gamma_t^\top\mathbb{E}p_t
+(\widetilde{R}^1_t)^{-1}(D^1_t)^\top P_tD^2_t(\widetilde{R}^2_t)^{-1}(\widetilde{B}^2_t)^\top\phi_t,\ t\in[0,T],
\end{aligned}
\end{equation}
where $x^*$ is given by the MF-SDE (\ref{x}). Up to now, we obtain the state feedback representation for the open-loop Stackelberg equilibrium solution $(u^*,w^*)$.
\section{Concluding Remarks}
To conclude this paper, let us give some remarks. In this paper, we have considered a new kind of LQ Stackelberg differential game with mixed deterministic and stochastic controls. The open-loop Stackelberg equilibrium solution is represented as a feedback form of state variable and its expectation, via solutions to some new Riccati equations. Though the framework is a special case of Yong \cite{Yong02}, some new ideas and interesting phenomena come out. We point out that is is possible for us to relax the assumptions in Section 2 of this paper. Possible extension of the results to those in an infinite time horizon with constant coefficients, is an interesting topic. In this case, some stabilizability problems need to be investigated first, and differential Riccati equations will become algebraic Riccati equations. The practical applications of the theoretic results to Stackelberg's type financial market is another challenging problem. We will consider these problems in the near future.
\end{document} |
\begin{document}
\title{A single potential governing convergence of conjugate gradient, accelerated
gradient and geometric descent hanks{Supported
in part by a grant from the U.~S.~Air Force Office of Scientific Research and in part
by a Discovery Grant from the Natural Sciences and Engineering Research Council
(NSERC) of Canada.}
\begin{abstract}
Nesterov's accelerated gradient (AG)
method for minimizing a smooth strongly convex function $f$ is
known to reduce $f(\x_k)-f(\x^*)$ by a factor
of $\eps\in(0,1)$ after $k=O(\sqrt{L/\ell}\log(1/\eps))$ iterations, where
$\ell,L$ are the two parameters of smooth strong convexity. Furthermore,
it is known that this is the best possible complexity in the function-gradient oracle
model of computation. Modulo a line search, the geometric descent (GD)
method of Bubeck, Lee and Singh has the same bound for this class of functions.
The method of linear conjugate gradients (CG)
also satisfies the same
complexity bound in the special case of strongly convex quadratic functions,
but in this special case it can be faster than the AG and GD methods.
Despite similarities in the algorithms and their
asymptotic convergence rates, the conventional analysis of the
running time of CG is mostly disjoint
from that of AG and GD. The analyses of
the AG and GD methods are also rather distinct.
Our main result is analyses of the three methods that share several
common threads: all three analyses show
a relationship to a certain ``idealized algorithm'', all three
establish the convergence rate
through the use of the Bubeck-Lee-Singh geometric lemma, and all three
have the same potential
that is computable at run-time and exhibits decrease
by a factor of $1-\sqrt{\ell/L}$ or better
per iteration.
One application of these analyses is that they open the possibility of hybrid
or intermediate algorithms. One such algorithm is proposed herein
and is shown to perform well in computational tests.
\end{abstract}
\section{First-order methods for strongly convex functions}
Three methods for minimizing smooth, strongly
convex functions are considered in this work, conjugate gradient,
accelerated gradient, and geometric descent.
CG is the oldest and perhaps best known of the methods. It
was introduced by Hestenes and Stiefel \cite{hestenesstiefel}
for
minimizing strongly convex quadratic functions
of the form $f(\x)=\x^TA\x/2-\b^T\x$, where $A$ is
a symmetric positive definite matrix.
There is a significant body of work on
gradient methods for more general
smooth, strongly convex functions. We say that a
differentiable convex function $f:\R^n\rightarrow\R$ is
{\em smooth, strongly convex}
\cite{hiriarturrutylemarechal}
if there exist two scalars $L\ge\ell>0$ such that
for all $\x,\y\in\R^n$,
\begin{equation}
\ell\Vert\x-\y\Vert^2/2\le f(\y)-f(\x)-\nabla f(\x)^T(\y-\x)\le L\Vert\x-\y\Vert^2/2.
\label{eq:strconvdef}
\end{equation}
This is equivalent to assuming convexity and lower and upper
Lipschitz constants on the gradient:
$$\ell\Vert\x-\y\Vert \le \Vert\nabla f(\x)-\nabla f(\y)\Vert \le
L\Vert\x-\y\Vert.$$
Nemirovsky and Yudin \cite{NemYud83}
proposed
a method for minimizing smooth strongly convex
functions requiring
$k=O(\sqrt{L/\ell}\log(1/\eps))$ iterations to produce an iterate
$\x_k$ such that $f(\x_k)-f(\x^*)\le\eps(f(\x_0)-f(\x^*))$, where
$\x^*$ is the optimizer (necessarily unique under the assumptions made).
A drawback of their method is that it requires a two-dimensional
optimization on each iteration that can
be cumbersome to implement (to the best of our knowledge,
the algorithm was not ever widely adopted).
Nesterov \cite{Nesterov:k2} proposed
another method, nowadays
known as the ``accelerated gradient'' (AG) method,
which achieves the same optimal complexity that requires a single
function and gradient evaluation on each iteration.
In the special case of strongly convex quadratic functions,
the parameters $\ell$ and $L$ appearing in \eref{eq:strconvdef}
correspond to $\lambda_{\min}(A)$ and $\lambda_{\max}(A)$, the extremal
eigenvalues of $A$.
The conjugate
gradient method has already been known to satisfy the asymptotic
iteration bound $k=O(\sqrt{L/\ell}\log(1/\eps))$ since the work
of Daniel (1967) described below.
Although the two methods satisfy the same asymptotic bound,
the analyses of the two methods are completely
different. In the case of AG, there are
two analyses by Nesterov \cite{Nesterov:k2, Nesterov:book}.
In our own previous work \cite{KarimiVavasis2016}, we provided a third
analysis based on another potential.
In the case of linear
conjugate gradient, we are aware of no direct analysis of the algorithm
prior to our own previous work \cite{KarimiVavasis2016}.
By ``direct,'' we mean an analysis of $f(\x_{k})-f(\x^*)$ using the
recurrence inherent in CG. Instead, the standard
analysis introduced by Daniel, whose theorem is
stated precisely below, proves that another iterative method, for
example Chebyshev iteration \cite{GVL} or the heavy-ball iteration
\cite{Polyak,bertsekas} achieves reduction of
$\left(1-O(\sqrt{\ell/L})\right)$ per iteration. Then one appeals
to the optimality of the CG iterate in the Krylov space generated
by all of these methods to claim that the CG iterate must be at
least as good as the others.
Recently, Bubeck, Lee and Singh \cite{bubeck} proposed the
geometric descent (GD) algorithm for
analysis of a variant of accelerated
gradient \cite{bubeck}, called ``geometric descent'' (GD).
As presented by the authors, the algorithm requires an exact
line-search on each iteration, although it is possible that similar theoretical
guarantees could be established for an approximate
line search. Under the assumption
that the line-search requires a constant number of function and
gradient evaluations, then GD also requires
$k=O(\sqrt{L/\ell}\log(1/\eps))$ iterations.
We propose analyses of these three algorithms that share several
common features.
First, all three algorithms
can be analyzed using the geometric lemma
of Bubeck, Lee and Singh, which is presented in Section~\ref{sec:BLSlemma}.
Second, all three are related to an ``idealized'' unimplementable
algorithm which is described and analyzed in Section~\ref{sec:idealized}.
Finally, the convergence
behavior for all three of them is governed by a potential $\tilde\sigma_k$,
which has the following three properties:
\begin{enumerate}
\item
There exists an auxiliary sequence of vectors $\y_0,\y_1,\ldots$ such that
$$\tilde\sigma_k^2 \ge \Vert\y_k-\x^*\Vert^2 +\frac{2(f(\x_k)-f(\x^*))}{\ell},$$
for $k=0,1,2\ldots,$
\item
$\displaystyle
\tilde\sigma_{k+1}^2 \le \left(1-\sqrt{\frac{\ell}{L}}\right)\tilde\sigma_k^2$, and
\item
$\tilde\sigma_k$ is computable on each iteration (assuming prior knowledge of
$\ell,L$) in $O(n)$ operations. Our definition of ``computable'' is
explained in more detail in Section~\ref{sec:computable}.
\end{enumerate}
These results are established for the GD algorithm in Section~\ref{sec:GDanalysis},
for the CG algorithm in
Section~\ref{sec:CGanalysis2},
and for the
AG algorithm in Section~\ref{sec:AGanalysis}.
The relationship between IA and the three algorithms is explained in detail
as follows.
Section~\ref{sec:CGanalysis} shows that CG exactly implements IA
for quadratic objective functions even though IA is in general unimplementable.
On the other hand, GD (analyzed in Section~\ref{sec:GDanalysis2}) and AG
(analyzed in Section~\ref{sec:AGanalysis2})
both simulate IA in the sense
that they produce optimal iterates given partial information
about the objective in the current iterate.
Because the three algorithms each compute a scalar $\tilde \sigma_k$ satisfying
the above properties, it becomes straightforward to create hybrids. In other
words, the above analysis treats all three algorithms as essentially 1-step
processes as opposed to long inductive chains. In Section~\ref{sec:hybrid} we
propose a hybrid CG algorithm that performs well in computational tests, which
are described in Section~\ref{sec:comp}.
The reason from making a hybrid CG algorithm is that the performance
of linear conjugate gradient on specific instances can be much better than the
worst-case bound given by Daniel's theorem; the performance on specific
instances is highly governed by the eigenvalues of $A$. Therefore, using
a conjugate-gradient-like algorithm for a nonlinear problem may also perform
better than the $(1-\sqrt{\ell/L})^k$ worst-case convergence bound. This is
also the motivation for traditional nonlinear conjugate gradient, as we
discuss below.
We conclude this introductory section with a few remarks about our
previous related manuscript \cite{KarimiVavasis2016}. In that work, we established
that a potential defined by
$$\Psi_k=\Vert\y_k-\x^*\Vert^2 +\frac{2(f(\x_k)-f(\x^*))}{\ell}$$
decreases by a factor $(1-\sqrt{\ell/L})$ per iteration for both CG
and AG. This $\Psi_k$ is not computable since $\x^*$ is not known
{\em a priori}, and therefore our previous result does not have any
immediate algorithmic application. Our notion of ``computability''
is defined in more detail in Section~\ref{sec:computable}.
The potential $\tilde\sigma_k$ developed
herein is computable on every step and therefore may be used to guide
a hybrid algorithm. In addition, the current work also applies to
the GD method, which was not addressed in our previous manuscript.
\section{Notation}
Define $B(\x,r)=\{\y\in\R^n:\Vert \x-\y\Vert\le r\}$, i.e., the
closed ball centered at $\x\in\R^n$ of radius $r$.
An {\em affine set}
is a set of the form $\mathcal{M}=\{\c+\w:\w\in\mathcal{W}\}$ where $\c\in\R^n$ is fixed
and $\mathcal{W}\subset \R^n$ is a linear subspace.
We write this as $\mathcal{M}=\c+\mathcal{W}$, a special case of a Minkowski sum.
Another notation for an affine set is
$\mathop{\rm aff}\{\x_1,\ldots,\x_k\}$, which is defined as
$\{\alpha_1\x_1+\cdots+\alpha_k\x_k:\alpha_1+\cdots+\alpha_k=1\}$.
If $\w_1,\ldots,\w_k$ span $\mathcal{W}$, then
it is clear that
$\c+\mathcal{W}=\mathop{\rm aff}\{\c,\c+\w_1,\ldots,\c+\w_k\}$.
Suppose $\mathcal{U}$ is an affine subset of $\R^n$.
The set ${\bf T}\mathcal{U}=\{\x-\y:\x,\y\in\mathcal{U}\}$
is called the {\em tangent space}
of $\mathcal{U}$ and is a linear subspace. If $\mathcal{U}$ is presented as
$\mathcal{U}=\c+\mathcal{W}$, where $\mathcal{W}$ is a linear subspace, then
it follows that ${\bf T}\mathcal{U}=\mathcal{W}$.
\section{Preliminary lemmas}
\label{sec:BLSlemma}
We start with a special case of
a lemma from Drusvyatskiy et al.\ \cite{Drusvyatskiy}, which
is an extension of work by Bubeck et al.\ \cite{bubeck}:
\begin{lemma}
Suppose $\x,\y\in\R^n$. Let
$\delta,\rho,\sigma$ be three nonnegative scalars
such that $\delta\le \Vert\x-\y\Vert$.
Suppose $\lambda\in[0,1]$ and
\begin{equation}
\z=(1-\lambda)\x+\lambda\y.
\label{eq:lemmaz}
\end{equation}
Then
$$B(\x,\rho)\cap B(\y,\sigma)\subset B(\z,\xi),$$
where
\begin{equation}
\xi = \sqrt{(1-\lambda)\rho^2+\lambda\sigma^2-\lambda(1-\lambda)\delta^2},
\label{eq:xidef}
\end{equation}
The
argument of the square-root in \eref{eq:xidef} is guaranteed to be nonnegative
whenever $B(\x,\rho)\cap B(\y,\sigma)\ne \emptyset$,
or equivalently, whenever $\rho+\sigma\ge \Vert\x-\y\Vert$.
\label{lem:ballintersect1}
\end{lemma}
\begin{proof}
We prove the second claim first.
The quantity appearing in the square root of \eref{eq:xidef} is
nonnegative as the following inequalities show:
\begin{align*}
(1-\lambda)\rho^2 + \lambda\sigma^2 - (1-\lambda)\lambda\delta^2
&=(1-\lambda)\lambda(\rho^2 + \sigma^2 - \delta^2) +
(1-\lambda)^2\rho^2+\lambda^2\sigma^2 \\
&\ge (1-\lambda)\lambda(\rho^2 + \sigma^2 - \delta^2) +
2(1-\lambda)\lambda\rho\sigma \\
&= (1-\lambda)\lambda((\rho + \sigma)^2 - \delta^2) \\
&\ge 0,
\end{align*}
where the last line uses the assumptions $\rho+\sigma\ge\Vert\x-\y\Vert\ge \delta$.
Now for the first part of the lemma,
the proof that $B(\x,\rho)\cap B(\y,\sigma)\subset B(\z,\xi)$
for $\xi$ given by \eref{eq:xidef} follows from more general
analysis in Drusvyatskiy et al.~\cite{Drusvyatskiy}.
Assume that $\p\in B(\x,\rho)\cap B(\y,\sigma)$ so
\begin{align}
(\p-\x)^T(\p-\x) -\rho^2 &\le 0, \label{eq:px1} \\
(\p-\y)^T(\p-\y) -\sigma^2 &\le 0. \label{eq:py1}
\end{align}
For $\lambda\in[0,1]$, add $(1-\lambda)$ times \eref{eq:px1} to
$\lambda$ times \eref{eq:py1} and rearrange to obtain a new inequality satisfied
by $\p$:
$$(\p-\z)^T(\p-\z)+(1-\lambda)\lambda\Vert\x-\y\Vert^2 -(1-\lambda)\rho^2-
\lambda\sigma^2\le 0,$$
i.e.
$$\Vert\p-\z\Vert\le \left((1-\lambda)\rho^2+\lambda\sigma^2-(1-\lambda)\lambda\Vert\x-\y\Vert^2\right)^{1/2},$$
where $\z$ is defined by \eref{eq:lemmaz}. By substituting the definition
$\delta\le\Vert\x-\y\Vert$, we observe that $\p\in B(\z,\xi)$, where $\xi$ is defined
by \eref{eq:xidef}.
\end{proof}
This leads to the following, which is
a more precise statement of the geometric lemma from
Bubeck et al.\ \cite{bubeck}:
\begin{lemma}
Let $\x,\y,\rho,\sigma,\delta$ be as in the preceding lemma.
Under the assumption
$\rho+\sigma\ge \delta$ and the
additional assumption
$\delta \ge \sqrt{|\rho^2-\sigma^2|}$,
\eref{eq:xidef} is minimized
over possible choices of $\lambda\in[0,1]$ by:
\begin{equation}
\lambda^* = \frac{\delta^2+\rho^2-\sigma^2}{2\delta^2},
\label{eq:lambdastar}
\end{equation}
yielding
\begin{equation}
\z^*=(1-\lambda^*)\x+\lambda^*\y,
\label{eq:zdef}
\end{equation}
in which case the minimum value of \eref{eq:xidef} is,
\begin{equation}
\xi^*=\frac{1}{2}\sqrt{2\rho^2+2\sigma^2-\delta^2-
\frac{(\rho^2-\sigma^2)^2}{\delta^2}}.
\label{eq:xistardef}
\end{equation}
\label{lem:ballintersect2}
\end{lemma}
\begin{proof}
First, note that
$|\rho^2-\sigma^2|/\delta^2\le 1$ by the assumption made, thus
ensuring that $\lambda^*\in[0,1]$.
Therefore, it follows from the preceding lemma that
the quantity appearing in the square root of \eref{eq:xidef} is nonnegative.
The previous lemma establishes that for any $\p\in B(\x,\rho)\cap B(\y,\sigma)$,
and for an arbitrary $\lambda\in[0,1]$,
$$
\Vert\p-\z\Vert^2 \le (1-\lambda)\rho^2 + \lambda\sigma^2 - \lambda(1-\lambda)\delta^2.$$
We observe that the right-hand side is a convex quadratic in $\lambda$ and
hence is minimized when the derivative with respect to $\lambda$ is zero, and one
checks that this value is precisely \eref{eq:lambdastar}. Substituting
$\lambda=\lambda^*$ into \eref{eq:xidef} yields \eref{eq:xistardef}.
\end{proof}
\section{Idealized algorithm}
\label{sec:idealized}
We consider the following idealized algorithm for minimizing $f(\x)$, where
$f:\R^n\rightarrow\R$ is smooth, strongly convex. As in the introduction,
let $\ell,L$ denote the two parameters of strong convexity.
\begin{align}
& \mbox{\bf Idealized Algorithm (IA)} \notag\\
& \x_0:=\mbox{arbitrary} \notag \\
& \mathcal{M}_1 := \x_0+\Span\{\nabla f(\x_0)\} \notag \\
&\mbox{for }k:=1,2,\ldots \notag \\
&\hphantom{\mbox{for }} \x_{k}:=\argmin\{f(\x):\x\in \mathcal{M}_{k}\}\label{eq:ia2.xupd} \\
& \hphantom{\mbox{for }} \y_k := \argmin\{\Vert\y-\x^*\Vert:\y\in\mathcal{M}_{k}\}
\label{eq:ia2.yupd}\\
&\hphantom{\mbox{for }} \mathcal{M}_{k+1}:=\x_{k}+\Span\{\y_k-\x_k,\nabla f(\x_{k})\} \label{eq:ia2.mupd}\\
& \mbox{end} \notag
\end{align}
This algorithm is called ``idealized'' because it is not implementable
in the general case;
it requires prior knowledge of $\x^*$ in
\eref{eq:ia2.yupd}. Nonetheless, we will argue
that CG, accelerated gradient, and geometric gradient are related
to the idealized algorithm in different ways.
Notice that $\mathcal{M}_k$ is an affine set that is two-dimensional
on most iterations.
Alternate notation for this set, also used herein, is
$\mathcal{M}_{k}=\mathop{\rm aff}\{\x_{k-1},\y_{k-1},\x_{k-1}-\nabla f(\x_{k-1})\}$.
Note also that by \eref{eq:ia2.xupd} and \eref{eq:ia2.yupd},
$\x_k,\y_k\in\mathcal{M}_k$, and by \eref{eq:ia2.mupd},
$\x_k,\y_k\in\mathcal{M}_{k+1}$, and therefore $\mathcal{M}_k$,
$\mathcal{M}_{k+1}$ have a common 1-dimensional affine subspace.
We start with the main theorem about
IA. For iteration $k$, define
a potential $\Psi_k$ as follows:
\begin{equation}
\Psi_k= \Vert \y_{k}-\x^*\Vert^2+\frac{2(f(\x_k)-f(\x^*))}{\ell}.
\label{eq:psidef}
\end{equation}
\begin{theorem}
For Algorithm IA, for each $k=1,2,\ldots$,
$$\Psi_{k+1}\le \left(1-\sqrt{\frac{\ell}{L}}\right)\Psi_k.$$
\label{thm:iacvg}
\end{theorem}
\begin{proof}
The proof follows closely from the analysis in \cite{bubeck}. Define
\begin{align*}
\obx_k&=\x_k-\nabla f(\x_k)/L, \\
\oobx_k&=\x_k-\nabla f(\x_k)/\ell.
\end{align*}
The point $\obx_k$
satisfies $f(\obx_k)\le f(\x_k)-\Vert\nabla f(\x_k)\Vert^2/(2L)$.
Observe that
$\obx_k\in\mathcal{M}_{k+1}$,
so $\obx_k$ is a candidate
for the optimizer in \eref{eq:ia2.xupd} on iteration $k+1$, and hence
\begin{equation}
f(\x_{k+1})\le f(\x_k)-\Vert\nabla f(\x_k)\Vert^2/(2L),
\label{eq:fdesc0}
\end{equation}
which is equivalent to
\begin{equation}
\frac{2(f(\x_{k+1})-f(\x^*))}{\ell}\le \frac{2(f(\x_k)-f(\x^*))}{\ell}-
\frac{\Vert\nabla f(\x_k)\Vert^2}{L\ell}.
\label{eq:fdesc}
\end{equation}
Next, observe that a rearrangement of the definition of strong convexity yields:
\begin{equation}
\frac{-2\nabla f(\x_k)^T(\x_k-\x^*)}{\ell}
+\Vert\x_k-\x^*\Vert^2
\le \frac{-2(f(\x_k)-f(\x^*))}{\ell}.
\label{eq:strcvx1}
\end{equation}
We use this result in the following:
\begin{align}
\Vert\oobx_k-\x^*\Vert^2 & = \Vert\oobx_k-\x_k+\x_k-\x^*\Vert^2 \notag \\
&= \Vert\oobx_k-\x_k\Vert^2 + 2(\oobx_k-\x_k)^T(\x_k-\x^*) + \Vert\x_k-\x^*\Vert^2 \notag\\
&=\frac{\Vert\nabla f(\x_k)\Vert^2}{\ell^2} -
\frac{2\nabla f(\x_k)^T(\x_k-\x^*)}{\ell} +\Vert\x_k-\x^*\Vert^2 \notag \\
&\le
\frac{\Vert\nabla f(\x_k)\Vert^2}{\ell^2}
-\frac{2(f(\x_k)-f(\x^*))}{\ell}\mbox{ (by \eref{eq:strcvx1})}\label{eq:rdef0} \\
&\equiv \rho_k^2,\label{eq:rdef}
\end{align}
where we introduced $\rho_k$ for the square root of the quantity in \eref{eq:rdef0}.
Thus, $\x^*\in B(\oobx_k,\rho_k)$.
Next, define
\begin{equation}
\sigma_k=\Vert\y_k-\x^*\Vert,
\label{eq:sdef}
\end{equation}
so that $\x^*\in B(\y_k,\sigma_k)$.
By the minimality
property of $\x_k$, we know that $\nabla f(\x_k)$ is orthogonal to
${\bf T}\mathcal{M}_{k}$,
which contains $\x_k-\y_k$, i.e.,
\begin{equation}
\nabla f(\x_k)^T(\y_k-\x_k)=0.
\label{eq:gradorth}
\end{equation}
Thus,
\begin{align}
\Vert\y_k-\oobx_k\Vert
&= \Vert(\y_k-\x_k)+(\x_k-\oobx_k)\Vert \notag \\
&= \Vert(\y_k-\x_k)+\nabla f(\x_k)/\ell\Vert \notag \\
&= \sqrt{\Vert\y_k-\x_k\Vert^2+\Vert\nabla f(\x_k)/\ell\Vert^2} \mbox{ (by Pythagoras's theorem)}\notag \\
&\ge \Vert\nabla f(\x_k)\Vert/\ell, \label{eq:ykoobx}
\end{align}
so define
\begin{equation}
\delta_k = \Vert\nabla f(\x_k)\Vert/\ell,
\label{eq:ddef}
\end{equation}
to conclude that $\Vert\y_k-\oobx_k\Vert\ge \delta_k$.
We have defined $\delta_k,\rho_k,\sigma_k$ as in
Lemma~\ref{lem:ballintersect2}.
We need to confirm the inequality
$\rho_k+\sigma_k\ge \delta_k$:
\begin{align*}
\rho_k+\sigma_k&\ge \Vert\oobx_k-\x^*\Vert + \Vert \y_k-\x^*\Vert \\
& \ge \Vert\oobx_k-\y_k\Vert \\
&\ge \delta_k.
\end{align*}
The other inequality is derived as follows. First,
$\rho_k\le \delta_k$ since $\delta_k^2$ is the first term in \eref{eq:rdef0}.
Also, $\sigma_k\le \delta_k$ since
\begin{align*}
\sigma_k^2 &= \Vert \y_k-\x^*\Vert^2 \\
&\le \Vert \x_k-\x^*\Vert^2 && \mbox{(by the optimality of $\y_k$)} \\
&\le \frac{2(f(\x_k)-f(\x^*))}{\ell} && \mbox{(by strong convexity)} \\
&\le \delta_k^2 && \mbox{(since \eref{eq:rdef0} is nonnegative)}.
\end{align*}
Thus, $\delta_k\ge\max(\rho_k,\sigma_k)$ so $\delta_k^2\ge |\rho_k^2-\sigma_k^2|$.
Therefore, we can conclude from Lemma~\ref{lem:ballintersect2} that
there exists a $\z_k^*\in\mathop{\rm aff}\{\oobx_k,\y_k\}$ (and hence in
$\mathcal{M}_{k+1}$)
such that
\begin{equation}
\Vert\z_k^*-\x^*\Vert\le \xi_k^*,
\label{eq:zbound}
\end{equation}
where $\xi_k^*$ is defined by \eref{eq:xistardef} for $\rho_k,\sigma_k,\delta_k$
given by \eref{eq:rdef},
\eref{eq:sdef} and \eref{eq:ddef} respectively. After some simplification
and cancellation of \eref{eq:xistardef}, one arrives at:
\begin{equation}
(\xi_k^*)^2=\Vert\y_k-\x^*\Vert^2 - \left(\frac{f(\x_k)-f(\x^*)+\Vert\y_k-\x^*\Vert^2\cdot\ell/2}
{\Vert\nabla f(\x_k)\Vert}\right)^2.
\label{eq:pkdef}
\end{equation}
Since $\y_{k+1}$ is the optimizer of \eref{eq:ia2.yupd},
$\y_{k+1}$ is
at least as close to $\x^*$ as $\z_k^*$, and hence,
$$\Vert\y_{k+1}-\x^*\Vert^2\le
\Vert\y_k-\x^*\Vert^2 - \left(\frac{f(\x_k)-f(\x^*)+\Vert\y_k-\x^*\Vert^2\cdot\ell/2}
{\Vert\nabla f(\x_k)\Vert}\right)^2.$$
Adding this inequality to \eref{eq:fdesc} yields:
\begin{align*}
\Psi_{k+1}&=\Vert\y_{k+1}-\x^*\Vert^2 + \frac{2(f(\x_{k+1})-f(\x^*))}{\ell} \\
&\le \Vert\y_k-\x^*\Vert^2 - \left(\frac{f(\x_k)-f(\x^*)+\Vert\y_k-\x^*\Vert^2\cdot\ell/2}
{\Vert\nabla f(\x_k)\Vert}\right)^2 + \frac{2(f(\x_k)-f(\x^*))}{\ell}-
\frac{\Vert\nabla f(\x_k)\Vert^2}{L\ell} \\
& \le
\Vert\y_k-\x^*\Vert^2 -\frac{2\left[f(\x_k)-f(\x^*)+\Vert\y_k-\x^*\Vert^2\cdot\ell/2\right]}{\sqrt{L\ell}}
+ \frac{2(f(\x_k)-f(\x^*))}{\ell} \\
&=
\left[\Vert\y_k-\x^*\Vert^2+\frac{2(f(\x_k)-f(\x^*))}{\ell}\right]
\cdot\left(1-\sqrt{\frac{\ell}{L}}\right) \\
&=
\Psi_k
\cdot\left(1-\sqrt{\frac{\ell}{L}}\right).
\end{align*}
The third line was obtained by applying the inequality
$a^2+b^2\ge 2ab$ to the second and fourth terms of the second line.
\end{proof}
\section{Analysis of the geometric descent algorithm}
\label{sec:GDanalysis}
In this section we present the
geometric descent (GD) algorithm due to
\cite{bubeck} and
an analysis of it.
Our analysis varies slightly from the proof
due to \cite{bubeck}; in their proof, the
potential involves the term $2(f(\obx_k)-f(\x^*))/\ell$
rather than $2(f(\x_k)-f(\x^*))/\ell$.
The reason for
the change is to unify the analysis with the other algorithms
considered in order for the NCG construction in Section~\ref{sec:hybrid}
to be applicable.
\begin{align}
& \mbox{\bf Geometric Descent} \notag\\
& \x_0:=\mbox{arbitrary} \notag \\
& \y_0:=\x_0 \notag\\
&\mbox{for }k:=1,2,\ldots \notag \\
& \hphantom{\mbox{for }} {\obx}_{k-1} : =\x_{k-1}-\nabla f(\x_{k-1})/L \label{eq:gd.steep} \\
& \hphantom{\mbox{for }} {\oobx}_{k-1} : =\x_{k-1}-\nabla f(\x_{k-1})/\ell \notag \\
&\hphantom{\mbox{for }} \mbox{Determine $\lambda_k$ according to
\eref{eq:gd.lambda1} or \eref{eq:gd.lambda2} below.} \notag \\
&\hphantom{\mbox{for }} \y_k:=(1-\lambda_k)\oobx_{k-1} + \lambda_k\y_{k-1} \label{eq:gd.yupd} \\
&\hphantom{\mbox{for }} \x_k:=\argmin\{f(\x):\x\in \mathop{\rm aff}\{\obx_{k-1},\y_k\}\}\label{eq:gd.xupd} \\
& \mbox{end} \notag
\end{align}
Note: The operation in \eref{eq:gd.xupd} is a line search and
requires an inner iteration to find the optimal $\x$ in the specified line.
This algorithm (which is one of several variants
of GD presented by the authors) is derived in \cite{bubeck}.
It can be regarded as
extracting the essential properties
of $\x_k$ and $\y_k$ used in the proof of Theorem~\ref{thm:iacvg} to
obtain an implementable algorithm.
Indeed, the authors use that proof that we presented
in Section~\ref{sec:idealized} to analyze GD rather
IA. A more precise statement of the relationship between
GD and IA is provided in the next section.
Intuitively, the proof in the previous
section shows that $\x_k$ need not be the minimizer
in \eref{eq:ia2.xupd}; it suffices for $\x_k$ to satisfy the two
properties \eref{eq:fdesc0} and \eref{eq:gradorth}. The Geometric
Descent algorithm satisfies these two properties with a ``dogleg'' step
in \eref{eq:gd.xupd}
that combines a gradient step with a step toward $\y_k$.
Property \eref{eq:fdesc0} is satisfied because $f(\x_k)\le f(\obx_{k-1})$,
and property \eref{eq:gradorth} is satisfied because of the minimality of
$\x_k$ with respect to $\mathop{\rm aff}\{\obx_{k-1},\y_k\}.$
The proof also shows that it suffices to take a $\y_{k+1}$ that
satisfies the inequality for $\z_k^*$ in
\eref{eq:zbound} rather than solving \eref{eq:ia2.yupd}.
We now turn to the computation of $\lambda_k$ and the
associated issues with the radii $\rho_k,\sigma_k$.
Recall that $\rho_k^{\rm IA}$ from \eref{eq:rdef0}
and $\sigma_k^{\rm IA}$ from \eref{eq:sdef} both involve
$\x^*$ and hence are unimplementable. The difficulty with
$\sigma_k$ is straightforward to resolve: define $\sigma_k$
to be an upper on $\Vert\y_k-\x^*\Vert$ rather than its exact value,
and ensure inductively that $\sigma_{k+1}$ is an upper
bound on $\Vert\y_{k+1}-\x^*\Vert$.
The difficulty with \eref{eq:rdef0} is resolved using
offsets denoted by $\gamma_k$,
a clever device from \cite{bubeck}. As in \eref{eq:rdef0} and \eref{eq:rdef},
\begin{align}
\Vert\oobx_k-\x^*\Vert^2
&\le
\frac{\Vert\nabla f(\x_k)\Vert^2}{\ell^2}
-\frac{2(f(\x_k)-f(\x^*))}{\ell}\mbox{ (by \eref{eq:strcvx1})}\label{eq:gd.rdef0} \\
&\equiv \rho_k^2,\label{eq:gd.rdef} \\
& \equiv \tilde\rho_k^2 - \gamma_k \label{eq:gd.rdef2}
\end{align}
where
\begin{align}
\tilde\rho_k &= \frac{\Vert\nabla f(\x_k)\Vert}{\ell}, \label{eq:rpdef} \\
\gamma_k &= \frac{2(f(\x_k)-f(\x^*))}{\ell}. \label{eq:gammakdef}
\end{align}
Let $\sigma_0,\sigma_1,\ldots,$ be a sequence of positive scalars
such that $\sigma_k\ge \Vert\y_k-\x^*\Vert$ for all $k=0,1,\ldots$,
and suppose that
\begin{equation}
\tilde\sigma_k = (\sigma_k^2 + \gamma_k)^{1/2}.
\label{eq:sigmakprime}
\end{equation}
Thus, we have the relationships:
\begin{align*}
\tilde\sigma_k^2 &= \sigma_k^2 + \gamma_k, \\
\tilde\rho_k^2 &= \rho_k^2 + \gamma_k.
\end{align*}
Note that $\tilde\rho_k$ is easily computable on the $k$th iteration, while
$\tilde\sigma_k$ can be updated recursively. The rationale of these definitions is
as follows. From \eref{eq:lambdastar}, one sees that
if $\sigma^2$ and $\rho^2$ are both incremented by the same constant
additive term $\gamma_k$,
then $\lambda^*$ is unaffected. Also, it follows from this observation
and from \eref{eq:xistardef} that if $\sigma^2$ and $\rho^2$ are both incremented
by $\gamma_k$, then $(\xi^*)^2$ is also incremented by $\gamma_k$. Thus,
the GD algorithm works throughout with radii whose squares are incremented by $\gamma_k$.
This increment $\gamma_k$ changes from one iteration to the next and hence must
be adjusted at the start of each iteration (see \eref{eq:sigmadef} below).
In more detail, the sequence of computations is as follows.
We initialize the algorithm by:
\begin{equation}
\sigma_0:=\sqrt{2}\Vert\nabla f(\x_0)\Vert/\ell. \label{eq:sigma0}
\end{equation}
This initialization is carried out prior to the main loop of GD.
The rationale for this formula is provided in the proof of
\eref{eq:sigma-ub} below.
Assuming inductively that $\tilde\sigma_{k-1}$ is already known,
compute as follows:
\begin{align}
& \tilde\rho_{k-1} := \frac{\Vert\nabla f(\x_{k-1})\Vert}{\ell}
\label{eq:gd.rpdef} \\
&\mbox{if $\tilde\sigma_{k-1}^2\le 2\tilde\rho_{k-1}^2$} \label{eq:gd.ifstmt} \\
& \hphantom{\mbox{if }}\delta_{k-1} := \Vert\y_{k-1}-\oobx_{k-1}\Vert, \label{eq:gd.deltadef} \\
& \hphantom{\mbox{if }}\lambda_k :=
\frac{\delta_{k-1}^2+\tilde\rho_{k-1}^2-\tilde\sigma_{k-1}^2}{2\delta_{k-1}^2},
&&\mbox{(as in \eref{eq:lambdastar})}
\label{eq:gd.lambda1}\\
& \hphantom{\mbox{if }}\tilde\xi^*_k :=
\frac{1}{2}\sqrt{2\tilde\rho_{k-1}^2+2\tilde\sigma_{k-1}^2-\delta_{k-1}^2-
\frac{(\tilde\rho_{k-1}^2-\tilde\sigma_{k-1}^2)^2}{\delta_{k-1}^2}},
&&\mbox{(as in \eref{eq:xistardef})} \label{eq:gd.xik} \\
&\hphantom{\mbox{if }} \tilde\sigma_k := \sqrt{(\tilde\xi^*_k)^2-\gamma_{k-1}+\gamma_{k}} \label{eq:sigmadef} \\
& \mbox{else} \\
& \hphantom{\mbox{if }}\lambda_k := 0 \label{eq:gd.lambda2}\\
& \hphantom{\mbox{if }}\tilde\sigma_k := \sqrt{\tilde\rho_{k-1}^2 - \gamma_{k-1}+\gamma_{k}}.
\label{eq:sigmadef2}
\end{align}
Although computation of $\gamma_k$ alone requires prior knowledge of $\x^*$,
the difference $\gamma_{k}-\gamma_{k-1}$ appearing
in \eref{eq:sigmadef} and \eref{eq:sigmadef2} does not, as is
evident from \eref{eq:gammakdef}. This is the motivation for
using $\tilde\sigma_k$ and $\tilde\rho_k$ in the computation instead
of $\sigma_k,\rho_k$.
In the theorems below
it is confirmed that the square
roots in \eref{eq:sigmadef} and \eref{eq:sigmadef2} take nonnegative
arguments.
The convergence of the GD algorithm is proved via
two theorems, which are both variants of
theorems due to \cite{bubeck}.
Before stating and proving the two theorems, we establish two
inequalities.
As noted earlier, \eref{eq:gradorth} holds for GD, and
hence so does \eref{eq:ykoobx}.
Then it follows from \eref{eq:ykoobx}
combined with \eref{eq:gd.rpdef}, \eref{eq:gd.deltadef} that
\begin{equation}
\tilde\rho_{k-1}= \frac{\Vert \nabla f(\x_{k-1})\Vert}{\ell} \le \delta_{k-1}.
\label{eq:gd.deltarhobd}
\end{equation}
Next, regarding $\gamma_{k}-\gamma_{k-1}$ appearing in
\eref{eq:sigmadef} and \eref{eq:sigmadef2}, observe
\begin{align}
\gamma_{k}-\gamma_{k-1} & = \frac{2(f(\x_{k})-f(\x_{k-1}))}{\ell} \notag\\
& \le \frac{2(f(\obx_{k-1})-f(\x_{k-1}))}{\ell}\notag\\
&\le -\frac{\Vert\nabla f(\x_{k-1})\Vert^2}{L\ell} \label{eq:gammadiff}
\end{align}
where the second line follows because $f(\x_{k})\le f(\obx_{k-1})$ by
\eref{eq:gd.xupd} while the third follows by \eref{eq:fdesc0}.
\begin{theorem}
For all $k=1,\ldots,$
\begin{equation}
\tilde\sigma_k^2 \ge \Vert\y_{k}-\x^*\Vert^2 + \frac{2(f(\x_k)-f(\x^*))}{\ell}.
\label{eq:sigma-ub}
\end{equation}
\end{theorem}
\begin{proof}
Note that the statement of the theorem may be equivalently written,
$$\tilde\sigma_k^2 \ge \Vert\y_{k}-\x^*\Vert^2 + \gamma_{k}.$$
The proof is by induction.
The base case is that $\tilde\sigma_0^2\ge \Vert\y_0-\x^*\Vert^2+\gamma_0$.
Both terms of the right-hand side may be bounded by noting that strong convexity applied
to the two points $\x_0,\x^*$ and rearranged may be written:
$$-\frac{2}{\ell}\nabla f(\x_0)^T(\x^*-\x_0)\ge \Vert \x_0-\x^*\Vert^2 +\gamma_0.$$
Again by strong convexity, $\Vert\x_0-\x^*\Vert\le \Vert \nabla f(\x_0)\Vert/\ell$, so
we can apply this inequality and the Cauchy-Schwarz inequality on the left-hand
side to obtain
$$\frac{2\Vert\nabla f(\x_0)\Vert^2}{\ell^2}\ge \Vert\x_0-\x^*\Vert^2 +\gamma_0.$$
Thus, the bound
$\tilde\sigma_0^2\ge\Vert\y_0-\x^*\Vert^2+\gamma_0$ is assured
by \eref{eq:sigma0}.
For the induction case, assume $k\ge 1$ and
the induction hypothesis
$$\tilde\sigma_{k-1}^2 \ge \Vert\y_{k-1}-\x^*\Vert^2 + \gamma_{k-1}.$$
There are two possibilities depending on the ``if''-statement
\eref{eq:gd.ifstmt}. First, suppose the condition
of \eref{eq:gd.ifstmt} holds, which may
be rewritten as
$\tilde\sigma_{k-1}^2-\tilde\rho_{k-1}^2 \le \tilde\rho_{k-1}^2$.
By \eref{eq:gd.deltarhobd}, this implies $\delta_{k-1}^2\ge \tilde\sigma_{k-1}^2-\tilde\rho_{k-1}^2$,
and we already know from \eref{eq:gd.deltarhobd} that
$\delta_{k-1}^2\ge\tilde\rho_{k-1}^2\ge \tilde\rho_{k-1}^2-\tilde\sigma_{k-1}^2$.
The conclusion from all these inequalities is
\begin{equation}
\delta_{k-1}^2\ge\tilde\rho_{k-1}^2\ge |\tilde\rho_{k-1}^2-\tilde\sigma_{k-1}^2|.
\label{eq:gd.chain1}
\end{equation}
Referring now to Lemma~\ref{lem:ballintersect2}, make the following identifications:
\begin{align*}
\x &= \oobx_{k-1},\\
\y &= \y_{k-1}, \\
\rho^2 & = \tilde\rho_{k-1}^2 - \gamma_{k-1}, \\
\sigma^2 &= \tilde\sigma_{k-1}^2 - \gamma_{k-1}, \\
\delta &= \delta_{k-1},
\end{align*}
in order to apply the lemma. The condition $\rho+\sigma\ge \delta$
follows immediately since we already have assumed by induction
that $\x^*\in B(\y,\sigma)$ and it follows from \eref{eq:gd.rdef}
that $\x^*\in B(\x,\rho)$, thus implying that
$B(\x,\rho)\cap B(\y,\sigma)\ne\emptyset$. The condition
$\delta^2\ge\rho^2-\sigma^2$ follows because $\delta_{k-1}\ge\tilde\rho_{k-1}$
as in \eref{eq:gd.chain1}. The condition
$\delta^2\ge \sigma^2-\rho^2$ follows because
$\delta_k^2\ge \tilde\sigma_{k-1}^2-\tilde\rho_{k-1}^2$ (as established in
\eref{eq:gd.chain1}).
Therefore, by the lemma,
if we define $\lambda^*$
$$\lambda^*=\frac{\delta^2+\rho^2-\sigma^2}{2\delta^2}
=\frac{\delta_{k-1}^2 +\tilde\rho_{k-1}^2
-\tilde\sigma_{k-1}^2}{2\delta_{k-1}^2},$$
i.e., the formula for $\lambda_k$ in \eref{eq:gd.lambda1}
(notice that the two terms $\gamma_{k-1}$ cancel),
and we define $\y_k$ as in \eref{eq:gd.yupd},
then
\begin{equation}
\x^*\in B(\y_k,\xi)
\label{eq:gd.xi1}
\end{equation}
where
\begin{align*}
\xi^2 &= \frac{1}{4}\left(2\rho^2 + 2\sigma^2 - \delta^2 -\frac{1}{\delta^2}
(\rho^2-\sigma^2)^2\right) \\
& =
\frac{1}{4}
\left(
2\tilde\rho_{k-1}^2
+2\tilde\sigma_{k-1}^2 - 4\gamma_{k-1} - \delta_{k-1}^2
-\frac{1}{\delta_{k-1}^2}
\left(
\tilde\rho_{k-1}^2 - \tilde\sigma_{k-1}^2\right)^2\right) \\
&=
(\tilde\xi_k^*)^2 - \gamma_{k-1} &&\mbox{(by \eref{eq:gd.xik})} \\
& =
\tilde\sigma_k^2 - \gamma_k, &&\mbox{(by \eref{eq:sigmadef})}.
\end{align*}
This establishes
the theorem in the first case.
If the condition in \eref{eq:gd.ifstmt} fails, then
$\lambda_k=0$ as in \eref{eq:gd.lambda2}, implying from
\eref{eq:gd.yupd} that $\y_k=\oobx_{k-1}$.
Then $\Vert\y_k-\x^*\Vert^2\le \tilde\rho_{k-1}^2-\gamma_{k-1}$
by
\eref{eq:gd.rdef2}, implying by \eref{eq:sigmadef2}
that $\Vert\y_k-\x^*\Vert^2 \le \tilde\sigma_k^2-\gamma_k$,
thus establishing the theorem in the second case.
\end{proof}
\begin{theorem}
For each $k=1,2,\ldots,$
\begin{equation}
\tilde\sigma_k^2 \le \left(1-\sqrt{\frac{\ell}{L}}\right)\tilde\sigma_{k-1}^2.
\label{eq:sigma-decr}
\end{equation}
\end{theorem}
\begin{proof}
We again take two cases depending on whether
the condition in \eref{eq:gd.ifstmt} holds.
If it holds, then
\eref{eq:gd.chain1} in the preceding proof
holds.
Observe that the function $x\mapsto x+C/x$ for $C>0$ is unimodal on
$(0,\infty)$ with a minimizer at $\sqrt{C}$, which means that if
all the other parameters are fixed, the maximizing choice for $\delta_{k-1}^2$
in \eref{eq:gd.xik} is $\delta_*^2=|\tilde\rho_{k-1}^2-\tilde\sigma_{k-1}^2|$.
Therefore, by unimodality combined with the ordering
$\delta_{k-1}\ge\tilde\rho_{k-1}\ge\delta_*$
(which is \eref{eq:gd.chain1}),
the right-hand side of \eref{eq:gd.xik} can only
increase if we replace $\delta_{k-1}$ by $\tilde\rho_{k-1}$, thus obtaining,
\begin{align*}
(\tilde\xi^*_k)^2
&=
\frac{1}{4}\left(2\tilde\rho_{k-1}^2+2\tilde\sigma_{k-1}^2-\delta_{k-1}^2-
\frac{(\tilde\rho_{k-1}^2-\tilde\sigma_{k-1}^2)^2}{\delta_{k-1}^2}\right) \\
&\le
\frac{1}{4}\left(2\tilde\rho_{k-1}^2+2\tilde\sigma_{k-1}^2-\tilde\rho_{k-1}^2-
\frac{(\tilde\rho_{k-1}^2-\tilde\sigma_{k-1}^2)^2}{\tilde\rho_{k-1}^2}\right) \\
&= \tilde\sigma_{k-1}^2 -\frac{\tilde\sigma_{k-1}^4}{4\tilde\rho_{k-1}^2}.
\end{align*}
Therefore,
\begin{align*}
\tilde\sigma_k^2 & = (\tilde\xi_k^*)^2 -\gamma_{k-1}+\gamma_k \\
&\le
\tilde\sigma_{k-1}^2 -\frac{\tilde\sigma_{k-1}^4}{4\tilde\rho_{k-1}^2}
-\gamma_{k-1}+\gamma_k \\
& =
\tilde\sigma_{k-1}^2 -\frac{\tilde\sigma_{k-1}^4}{4\Vert\nabla f(\x_{k-1})\Vert^2/\ell^2}
-\gamma_{k-1}+\gamma_k \\
& \le
\tilde\sigma_{k-1}^2 -\frac{\tilde\sigma_{k-1}^4}{4\Vert\nabla f(\x_{k-1})\Vert^2/\ell^2}
-\frac{\Vert\nabla f(\x_{k-1})\Vert^2}{L\ell} && \mbox{(by \eref{eq:gammadiff})} \\
& \le
\tilde\sigma_{k-1}^2 -2\cdot\frac{\tilde\sigma_{k-1}^2}{2\Vert\nabla f(\x_{k-1})\Vert/\ell}
\cdot \frac{\Vert\nabla f(\x_{k-1})\Vert}{\sqrt{L\ell}} && \mbox{(since $a^2+b^2\ge 2ab$)} \\
& =
\tilde\sigma_{k-1}^2 \left(1-\sqrt{\frac{\ell}{L}}\right).
\end{align*}
In the other case, $\tilde\sigma_{k-1}^2/2\ge \tilde\rho_{k-1}^2$
so we obtain
\begin{align*}
\tilde\sigma_k^2 & = \tilde\rho_{k-1}^2 -\gamma_{k-1}+\gamma_k
&& \mbox{(by \eref{eq:sigmadef2})} \\
&= \frac{\Vert \nabla f(\x_{k-1})\Vert^2}{\ell^2} -\gamma_{k-1}+\gamma_k\\
&\le
\frac{\Vert \nabla f(\x_{k-1})\Vert^2}{\ell^2}-
\frac{\Vert \nabla f(\x_{k-1})\Vert^2}{L\ell} &&\mbox {(by \eref{eq:gammadiff})} \\
& =
\frac{\Vert \nabla f(\x_{k-1})\Vert^2}{\ell^2}(1-\ell/L) \\
& =
\tilde\rho_{k-1}^2(1-\ell/L) \\
&\le
\tilde\sigma_{k-1}^2(1-\ell/L)/2 && \mbox{(by the hypothesis of the case).}
\end{align*}
It is a simple matter to confirm that $(1-\eps)/2\le (1-\sqrt{\eps})$
for any $\eps\in[0,1]$, thus establishing the theorem in this case.
\end{proof}
\section{Relationship between GD and IA}
\label{sec:GDanalysis2}
We already observed that GD and IA both work in the same
affine subspace $\mathcal{M}_k$ on each iteration. In this section,
we develop further insight into their connection.
On each step IA solves two optimization
problems exactly, \eref{eq:ia2.xupd} and \eref{eq:ia2.yupd},
to obtain $\x_k$ and $\y_k$. We argue
that GD computes optimal
solutions to these two problems not for the actual objective function $f$
but for some other objective function that agrees
with its partial information
about $f$.
This is stated precisely in the following two theorems, the
first about $\x_k$ and the second about $\y_k$.
Regarding the computation of $\x_k$, let us recall that
GD takes a steepest descent step from $\x_{k-1}$ in
line \eref{eq:gd.steep} followed by a line search in
\eref{eq:gd.xupd}. The simplest line-search method for
minimizing a convex function
is bisection based on the sign of the directional derivative
of the objective along the line. Evaluation of signs of derivatives
does not give any information other than the location of the minimizer
since univariate convex functions are unimodal. (In other words,
bisection to find the minimizer of two convex functions $f_1,f_2$
with a common minimizer will evaluate the same sequence
of points for $f_1$ as for $f_2$.) Therefore, the information
about $f$ used in GD to find $\x_k$
is the value of $\nabla f(\x_{k-1})$, the line
$\mathop{\rm aff}\{\obx_{k-1},\y_{k-1}\}$, and the location of the minimizer on that
line. The following theorem says that GD chooses the optimal
$\x_k\in\mathcal{M}_k$
given this partial information about the objective function
$f$. (In contrast, IA chooses the optimal $\x_k$ for the
true objective $f$.)
\begin{theorem}
Given scalar $L>0$, a point $\x_{k-1}\in\R^n$, $n\ge 2$,
a nonzero vector $\g\in\R^n$, define
$\obx_{k-1}=\x_{k-1}-\g/L$. Also, assume we are
given
a line $\Lambda \subset \R^n$ containing
$\obx_{k-1}$ but not $\x_{k-1}$, and point $\x_{k}\in\Lambda$.
Let $\mathcal{M}_k=\mathop{\rm aff}\{\Lambda,\x_{k-1}\}$.
Assume that
\begin{equation}
\g^T(\x_k-\obx_{k-1})<0.
\label{eq:angassum}
\end{equation}
(Assumption \eref{eq:angassum} will be explained later.)
Define
$\mathcal{F}$ to be:
\begin{align*}
\mathcal{F}=\{f:\R^n\rightarrow\R: &\mbox{$f$ is convex}, \\
&\argmin\{f(\x):\x\in \Lambda\}=\x_{k}, \\
&\mbox{$\nabla f$ is $L$-Lipschitz}, \\
& \nabla f(\x_{k-1})=\g\}.
\end{align*}
Define $q(\x)=\sup\{f(\x)-f(\x_{k-1}):f\in\mathcal{F}\}$.
Then
\begin{equation}
q(\x_k)=\min\{q(\x):\x\in\mathcal{M}_k\}.
\label{eq:s0argmin}
\end{equation}
\end{theorem}
The class $\mathcal{F}$ is meant to capture the set of all functions whose
partial information known to the GD algorithm agrees with the partial information
of the actual objective function, that is, $\nabla f(\x_{k-1})=\g$ and
$\x_k$ is the minimizer of $f$ on the line $\Lambda$. The conclusion
of the theorem is that for the worst $f$ in this class (the sup appearing
in the definition of $q$), $\x_k$ chosen by GD is optimal over $\mathcal{M}_k$.
The hypotheses on the given data correspond to the induction hypotheses of GD
except for \eref{eq:angassum}, which we discuss later.
\begin{proof}
Since the algorithm is invariant under translation and rotation
of space, without loss of generality we can
transform coordinates to identify $\mathcal{M}_k$ with
the $(x_1,x_2)$-plane.
Assume that the transformation places
the point $\obx_{k-1}$ at the origin $(0,0)$,
the line $\Lambda$ on the $x_1$-axis, the point
$\x_k$ at $(s,0)$.
Denote the point $\x_{k-1}$ with $(a,b)$ in this plane.
Because of the identification of $(0,0)$ with $\obx_{k-1}$,
we know that for $f\in\mathcal{F}$, $\g=\nabla f(\x_{k-1})=\nabla f(a,b)=(aL,bL)$
in this rotated and translated coordinate system.
Since the length $\g$ is unchanged by translation and rotation,
then $(a,b)$ must satisfy the restriction that $((aL)^2+(bL)^2)^{1/2}$ is the
original length $\Vert\g\Vert$. Observe that
$f(0,0)-f(a,b)\le -\Vert\g\Vert^2/(2L)=-(L/2)(a^2+b^2)$
by \eref{eq:fdesc0}.
For the remainder of the proof, let $k_0=-(L/2)(a^2+b^2)$.
Thus, $f(s,0)-f(a,b)\le k_0$
since $(s,0)$ is the minimizer of $f\in\mathcal{F}$ over $\Lambda$
(hence achieves a value
less than $f(0,0$)). This inequality holds for all $f\in\mathcal{F}$,
thus showing that $q(\x_k)\le k_0$.
In these transformed coordinates, assumption \eref{eq:angassum}
is rewritten as the inequality $as<0$.
For the remainder of this proof, assume $a>0$ hence $s<0$; the
other case is obtained by reflection of the $x_1$-coordinate.
Consider the function,
$$\hat f_0(x,y) = \left\{
\begin{array}{ll}
Lx^2/2 + Ly^2/2,& x\ge 0, \\
Ly^2/2, & x\le 0,
\end{array}
\right.
$$
The level curves of this function are semicircles in the right half-plane
and parallel rays in the left half-plane. It satisfies all the conditions
for membership in $\mathcal{F}$ except that $(s,0)$ is not the unique minimizer
over $\Lambda$;
all points of the ray $\{(x,0):x\le 0\}$ are minimizers. Therefore,
we perturb this function slightly.
Fix a $\delta>0$ small.
Define scalars $m=-\delta (s+a)/(L-2\delta)$ and $p=\delta m - \delta s$.
Note that
$m,p$
tend to 0 as $\delta\rightarrow 0$, so assume that $\delta$ is
sufficiently small that $2\delta<L$, $|m|<|s|$, $|m|<|a|$.
Consider the following function $\hat f_\delta$:
$$\hat f_\delta(x,y) = \left\{
\begin{array}{ll}
q_1(x-m)^2/2 + p(x-m)+Ly^2/2,& x\ge m, \\
q_2(x-m)^2/2+p(x-m) +Ly^2/2, & x\le m,
\end{array}
\right.
$$
where $q_1=L-\delta$, $q_2=\delta$.
It is straightforward to check that $\hat f_\delta \in\mathcal{F}$.
In particular, $\partial \hat f_\delta(s,0)/\partial x=\delta(s-m)+p=0$ since
$p=\delta(m-s)$.
The choice of $m$ ensures that $\partial \hat f_\delta(a,b)/\partial x=aL$.
Observe that $\hat f_\delta(s,0)-\hat f_\delta(a,b)\rightarrow k_0$ as $\delta\rightarrow 0$,
and $(s,0)$ is the minimizer of $f_\delta$. Therefore, for all
$\x\in\mathcal{M}_k$, $f_\delta(\x)-f_\delta(\x_{k-1})$ is bounded
below by $k_0$ plus a residual that tends to 0 as $\delta\rightarrow 0$.
which means
that for all $\x\in\mathcal{M}_k$, $q(\x)\ge k_0$ (since $f_\delta$ is a candidate
for the supremum in the definition of $q(\cdot)$).
Since we already established that $q(\x_k)\le k_0$, this proves the theorem.
\end{proof}
Let us know examine assumption \eref{eq:angassum}, which
in transformed coordinates is written $as<0$. If
$as>0$, then the minimizer over $\mathcal{M}_k$
cannot lie on the $x$-axis, i.e.,
$\mathcal{F}$ is empty as the following argument shows.
(The case of $as=0$ needs separate
treatment, which we omit). Consider an arbitrary $f\in \mathcal{F}$.
Select $t$ to solve the following equation:
\begin{equation}
((s+at,bt) - (a,b))^T(a,b)=0;
\label{eq:saborth}
\end{equation}
one easily determines that
\begin{equation}
t=\frac{a^2+b^2-as}{a^2+b^2}.
\label{eq:tformula}
\end{equation}
It is impossible that $a^2+b^2-as<0$, or equivalently, that $t<0$ because then
\begin{align*}
f(s,0)&\ge f(a,b)+\nabla f(a,b)^T((s,0)-(a,b)) && \mbox{(by the subgradient inequality)}
\\
& = f(a,b)+(aL,bL)^T((s,0)-(a,b)) \\
&= f(a,b)+L(as-a^2-b^2) \\
& > f(a,b),
\end{align*}
contradicting the minimality of $(s,0)$.
But there is also a contradiction when $t\ge 0$.
Consider the following chain of inequalities:
\begin{align*}
f(s,0)+\frac{Lt^2\Vert(a,b)\Vert^2}{2}&\ge f(s+at,bt) &&\mbox{(by $L$-smoothness} \\
&&& \mbox{since $\nabla f(s,0)=(0,0)$)} \\
&\ge f(a,b)+\nabla f(a,b)^T((s+at,bt)-(a,b))\\
&&& \mbox{(the subgradient inequality)} \\
&=f(a,b) && \mbox{(using \eref{eq:saborth})} \\
&\ge f(0,0)+L/2\Vert(a,b)\Vert^2 && \mbox{(by \eref{eq:fdesc0})} \\
& \ge f(s,0)+L/2\Vert(a,b)\Vert^2 && \mbox{(since $(s,0)$ is the minimizer)}.
\end{align*}
This chain of inequalities starts and ends at the same quantity except for the
presence of $t^2$; thus, the inequalities can hold only if $t\ge 1$. But
if $as>0$ and $a^2+b^2-as\ge 0$,
then it follows from \eref{eq:tformula} that $0\le t <1$. So we conclude
that $as>0$ contradicts the optimality of $(s,0)$.
More comments on this matter appear at the end of this section.
The next theorem covers $\y_k$ and requires a different construction.
Its format and interpretation are analogous to the previous
theorem.
\begin{theorem}
Suppose the following data is given: two points $\x_{k-1},\y_{k-1}$ in
$\R^n$, $n\ge 3$,
a nonzero vector $\g\in\R^n$, four positive scalars $\rho,\sigma,\delta,\ell$
that satisfy the following conditions. For
the remainder of the theorem, let $\oobx_{k-1}=\x_{k-1}-\g/\ell$.
The conditions are:
\begin{itemize}
\item
$\delta=\Vert\oobx_{k-1}-\y_{k-1}\Vert$,
\item
$\rho<\Vert\g\Vert/\ell$
\item
$\rho+\sigma\ge \delta$,
\item
$\delta\ge \sqrt{|\rho^2-\sigma^2|}$, and
\item
\begin{equation}
(\y_{k-1}-\oobx_{k-1})^T\g > \rho^2\ell/\lambda^*,
\label{eq:yxbbcond}
\end{equation}
where $\lambda^*$ is given by \eref{eq:lambdastar}.
(Assumption \eref{eq:yxbbcond} will be explained later.)
\end{itemize}
Let $\mathcal{F}$ be the following set of functions:
\begin{align}
\mathcal{F}=\{f:\R^n\rightarrow\R:
& \mbox{$f$ is strongly convex with modulus $\ell$}, \notag \\
& \nabla f(\x_{k-1})=\g, \notag \\
& \frac{\Vert\g\Vert^2}{\ell^2}-\frac{2(f(\x_{k-1})-\min\{f\})}{\ell}=\rho^2,
\mbox{ and} \label{eq:rhocond2}\\
& \argmin\{f\}\in B(\y_{k-1},\sigma)\}. \notag
\end{align}
Here, $\min\{f\}$ is shorthand for $\min\{f(\x):\x\in\R^n\}$ and
similarly for $\argmin\{f\}$.
Let $\mathcal{M}_k$ denote $\mathop{\rm aff}\{\x_{k-1},\y_{k-1},\oobx_{k-1}\}$.
Define
$$q(\y)=\sup\{\Vert\y-\argmin\{f\}\Vert: f\in\mathcal{F}\}.$$
Let $\y_k$ be the point computed by the GD algorithm for this data
using \eref{eq:gd.yupd}.
Then
\begin{equation}
\y_k=\argmin\{q(\y):\y\in \mathcal{M}_k\}.\label{eq:yksolve}
\end{equation}
\end{theorem}
\begin{proof}
By translating and rigidly rotating space, we can identify $\mathcal{M}_k$ with
the $(x_1,x_2)$ plane.
After this coordinate transformation,
we may assume $\oobx_{k-1}$ coincides with the origin $(0,0)$.
For the remainder of the discussion, remaining coordinates $x_3,x_4,\ldots,x_n$\
are
not written and are assumed to be 0's.
Let us write $\x_{k-1}=(a,b)$.
Since $\oobx_{k-1}$ is at the origin, this implies $\g=(a\ell,b\ell)$.
Choose the transformation to make
$\y_{k-1}$ lie on the positive $x_1$-axis at position $(\delta,0)$.
(Recall that $\delta=\Vert\y_{k-1}-\oobx_{k-1}\Vert$.)
Let $\lambda^*$ be as in \eref{eq:lambdastar}. As in \eref{eq:gd.yupd},
let $\y_{k}=(1-\lambda^*)\oobx_{k-1}+\lambda^*\y_{k-1}$, which in
this coordinate system is
$\y_k=(\delta\lambda^*, 0).$
Let $f$ be an arbitrary member of $\mathcal{F}$. The hypotheses
on $\mathcal{F}$ imply that
$\argmin\{f\}\in B(\oobx_{k-1},\rho)\cap B(\y_{k-1},\sigma)$, and therefore,
by Lemmas \ref{lem:ballintersect1}
and \ref{lem:ballintersect2}, $\Vert\argmin\{f\}-\y_k\Vert\le \xi^*$,
where $\xi^*$ is given by \eref{eq:xistardef}. This shows that
$q(\y_{k})\le \xi^*$.
The remainder of the proof shows that
$q(\y)>\xi^*$ for $\y\in \mathcal{M}_k-\{\y_k\}$, which will
establish \eref{eq:yksolve}. Let
$\kappa$ stand for either $+1$ or $-1$. Embed
$\mathcal{M}_k$ in one higher dimension and write 3-tuples of
coordinates (so that $\x_{k-1}=(a,b,0)$,
$\g=(a\ell,b\ell,0)$, $\y_{k-1}=(\delta,0,0)$ and
so forth). Define
\begin{equation}
\x^*=\left(\begin{array}{c}
\delta\lambda^* \\
0 \\
\kappa \xi^*
\end{array}
\right).
\label{eq:xoptdef1}
\end{equation}
Define
$$f(\x)=(\ell/2)\Vert\x-\x^*\Vert^2 +\ell\left|(\x^*)^T(\x-\x^*)\right|.$$
First, observe the obvious consequences of this
formula that $\x^*=\argmin\{f\}$ and that $f(\x^*)=0$.
We claim that $f\in\mathcal{F}$. The fact that $f$ is
$\ell$-strongly convex follows from the presence of the
first term. The second term is convex but not strongly convex.
In order to establish the remaining conditions for
membership in $\mathcal{F}$, we
first determine which branch of the absolute value
holds when evaluating $f(\x_{k-1})$; in particular,
we establish the inequality that $(\x^*)^T(\x_{k-1}-\x^*)> 0$,
i.e., that $(\x^*)^T\x_{k-1}>\Vert\x^*\Vert^2$. The left-hand side
evaluates to $a\delta\lambda^*$, while the right-hand side
evaluates to $(\delta\lambda^*)^2+(\xi^*)^2$
which simplifies to $\rho^2$ according to
\eref{eq:lambdastar} and \eref{eq:xistardef}.
Thus, we must establish $a\delta\lambda^*>\rho^2$; this follows
from \eref{eq:yxbbcond} which states that
$\delta a\ell>\rho^2\ell/\lambda^*$ in the transformed coordinates.
This inequality implies that in the neighborhood of
$\x_{k-1}$, the absolute value sign appearing in the definition
of $f$ may be dropped.
We now establish
the remaining conditions for membership of $f$ in $\mathcal{F}$.
We have $\nabla f(\x_{k-1})=\ell(\x_{k-1}-\x^*)+\ell\x^*=\ell\x_{k-1}=\g$,
so the second condition is established.
For the third condition, recalling that $(\x^*)^T\x_{k-1}=a\delta\lambda^*$
while $\Vert\x^*\Vert^2=\rho^2$, we compute,
\begin{align*}
f(\x_{k-1}) &=(\ell/2)((a-\delta\lambda^*)^2+b^2+(\xi^*)^2) + \ell(a\delta\lambda^*-\rho^2) \\
&=(\ell/2)(a^2 + b^2 -\rho^2),
\end{align*}
where, to obtain the second line, we combined like terms and used
the already established identity $(\delta\lambda^*)^2+(\xi^*)^2=\rho^2$.
Therefore,
\begin{align*}
\frac{\Vert\g\Vert^2}{\ell^2} - \frac{2}{\ell}(f(\x_{k-1})-f(\x^*))
&= (a^2+b^2) - (a^2+b^2-\rho^2) \\
&=\rho^2,
\end{align*}
thus establishing \eref{eq:rhocond2}. For the last condition,
\begin{align*}
\Vert\y_{k-1}-\x^*\Vert^2 &= (\delta-\delta\lambda^*)^2+(\xi^*)^2 \\
&= (\sigma^*)^2.
\end{align*}
Thus, membership of $f\in\mathcal{F}$ is established. Note that
the two choices for $\kappa$, namely, $\pm 1$
lead to two different optimizers $\x^*$ in \eref{eq:xoptdef1}
whose distance apart is
$2\xi^*$. Therefore, the midpoint of the two optimizers is the
only point in $\R^n$ whose distance from both of them is
bounded above by $\xi^*$. This midpoint is exactly $\y_k$. This
proves that $q(\y_k)\ge \xi^*$ (we already showed that $q(\y_k)\le \xi^*)$),
and also that for any $\y\in\mathcal{M}_k-\{\y_k\}$, $q(\y)>\xi^*$.
This establishes \eref{eq:yksolve}.
\end{proof}
We now discuss the conditions imposed on the given data.
The condition $\delta=\Vert\oobx_{k-1}-\y_{k-1}\Vert$ is
simply part of the construction, and the conditions
$\rho<\Vert\g\Vert/\ell$ and
$\rho+\sigma\ge \delta$ ensure that $\mathcal{F}$ is
nonempty. The condition
$\delta\ge \sqrt{|\rho^2-\sigma^2|}$ is the main case of
the two cases arising in the main loop of GD.
In particular, the condition \eref{eq:gd.ifstmt} is used to establish
\eref{eq:gd.chain1}. We omit
the treatment of the minor case.
Finally, we discuss \eref{eq:yxbbcond}. If a strengthened version of this
condition fails to hold, then
$\mathcal{F}$ is empty.
In particular, strong convexity implies that the optimizer lies in a ball
of radius $\Vert\g\Vert/\ell$ about $\x_{k-1}$, i.e.,
$$\Vert\x_{k-1}-\x^*\Vert^2 \le \Vert\g\Vert^2/\ell^2.$$
In the
transformed coordinates, this is written
$$(a-\delta\lambda^*)^2+b^2+(\xi^*)^2\le a^2+b^2,$$
which simplifies to
$$2a\delta\lambda^*\ge\rho^2,$$
while \eref{eq:yxbbcond} is written as $a\delta\lambda^*>\rho^2$ in the
transformed coordinates, so the
two bounds differ by a factor of 2. In the case that
$\rho^2\in [a\delta\lambda^*,2a\delta\lambda^*]$, the two possibilities are (1)
$\mathcal{F}=\emptyset$, or (2) there is an $f\in\mathcal{F}$, but
the construction of $f$ used in the proof
does not work. We do not know
which possibility is correct.
We now summarize the results in this section with some observations.
We have shown that the computations of $\x_k,\y_k$ in the GD algorithm
solve minimization problems akin to those in IA except that
the minimizer pertains to the worst case $f$ that agrees with
the partial information that GD has about $f$ rather than the true $f$.
In both cases, the theorems had some apparently extraneous assumptions,
namely \eref{eq:angassum} in the first theorem, and \eref{eq:yxbbcond} in the second.
However, in both these cases, the extraneous assumptions indicate
that GD may not be using the
information about $f$ entirely. In the case of the first theorem,
if $\g^T(\x_{k}-\obx_{k-1})>0$
then the minimizer of $f$ over $\mathcal{M}_k$ cannot be on the
line searched by GD, yet GD does not use this information.
In the case of the second theorem, GD does not
use the fact that $\x^*\in B(\x_{k-1},\Vert \nabla f(\x_{k-1})\Vert/\ell)$.
The main point of this section is to clarify the relationship between IA
and GD, but the preceding paragraph reveals a second point. By constructing
these example functions to show that the GD algorithm is the best possible
in some cases but not others
given the limited information that it uses,
we also show that it may be possible to improve
on GD by making better use of the information
(such as the third ball mentioned in the
previous paragraph) in certain cases.
We do not pursue this idea here, but
see, e.g., \cite{Drusvyatskiy} for results in this direction.
\section{Computability of the potential}
\label{sec:computable}
A point to make about the GD algorithm and its
analysis is that the potential $\tilde\sigma_k^2$ is
computable on every iteration of the algorithm, i.e.,
it does not require prior knowledge of $\x^*$. (It does,
however, require prior knowledge of $\ell$.)
This fact
can also be deduced from the original
presentation of \cite{bubeck}, although the computability
is not further used therein.
In this section we explain in more detail what we
mean by ``computable''. In particular, a potential
$\tilde\sigma_k$ that is an upper bound to \eref{eq:psidef}
is computable if it has the
following properties.
\begin{enumerate}
\item
It must be possible to compute the potential without
prior knowledge of $\x^*$ or $f(\x^*)$.
This is the reason that we regard the potential
$\Psi_k$ itself defined in \eref{eq:psidef} as
noncomputable.
\item
It must have an {\em a posterior} dependence on the actual convergence
of the algorithm, in the sense that if
$\x_k,\y_k$ are very close to $\x^*$, then it
should be the case that $\tilde\sigma_k$ is
very close to zero. In particular, this
rules out using a completely {\em a priori} potential like
$$\tilde\sigma_k := C\Vert\nabla f(\x_0)\Vert^2\left(1-\sqrt{\frac{\ell}{L}}\right)^k$$
for a fixed constant $C$. Although this potential indeed
is an upper bound on \eref{eq:psidef} and is computable, it has no relationship
to the current iterate and therefore has no algorithmic use.
\item
The potential should decrease by the factor $(1-\sqrt{\ell/{L}})$
per iteration (or perhaps $(1-\mbox{const}\cdot\sqrt{\ell/{L}})$, since a constant
factor improvement may be possible). Thus, although
$C\Vert \nabla f(\x_k)\Vert^2$ for a correctly chosen $C$ is
an upper bound on \eref{eq:psidef}, it does not satisfy
our requirement of steady decrease and in fact can be
oscillatory.
The significance of guaranteed decrease in the potential is twofold.
First, the guaranteed decrease is useful for theoretical analysis
to establish a linear convergence rate. Indeed, it is used
herein for this
purpose to establish previously known convergence rates for AG
and CG in a new manner.
Second, steady decrease in the potential can be used in an
algorithm to ensure that progress is being made. To give
one example not pursued herein, consider the problem of detecting stagnation
due to imprecise arithmetic in linear conjugate gradient.
Although
$\Vert \nabla f(\x_k)\Vert^2$ is commonly used as a termination
criterion for linear conjugate gradient, it is not suitable
for use as a stagnation test because it can be highly oscillatory
and therefore cannot be used to check whether a single
iteration was successful.
A steadily decreasing potential, however, could be used
in a CG stagnation test. (We have preliminary results
on this matter that will be the subject of future work.)
Our present algorithmic use of the potential as a measure of steady
decrease is reported in Sections~\ref{sec:hybrid}--\ref{sec:comp}.
We present an experiment to illustrate why
$\Vert \nabla f(\x_k)\Vert^2$ is not a suitable substitute for
$\tilde\sigma_k$ in Section~\ref{sec:comp}.
\end{enumerate}
The potential for GD given by
\eref{eq:sigmadef} and \eref{eq:sigmadef2}. As mentioned
earlier, it is a slight variant of the potential defined
by the authors of GD; theirs also has these properties.
It is somewhat surprising that the same potential also
applies to conjugate gradient and accelerated
gradient, as developed in the next few sections.
We do not know of any other computable potential with these
properties.
\section{Analysis of linear conjugate gradient}
\label{sec:CGanalysis}
The linear conjugate gradient (CG) algorithm for
minimizing $f(\x)=\x^TA\x/2 -\b^T\x$,
where
$A$ is a symmetric positive definite matrix,
is due to Hestenes and Stiefel
\cite{hestenesstiefel}
and is as follows.
\begin{align}
& \mbox{\bf Linear Conjugate Gradient} \notag \\
& \x_0:=\mbox{arbitrary} \notag \\
& \r_0:=\b-A\x_0 \notag \\
& \p_1:=\r_0 \notag \\
& \mbox{for } k:=1,2,\ldots, \notag \\
&\displaystyle \hphantom{\mbox{for }}
\alpha_{k}:= \frac{\r_{k-1}^T\r_{k-1}}{\p_{k}^TA\p_{k}} \label{eq:alphadef} \\
&\hphantom{\mbox{for }}
\x_{k} := \x_{k-1}+\alpha_{k}\p_{k} \label{eq:cg.xupd} \\
&\hphantom{\mbox{for }}
\r_{k} := \r_{k-1}-\alpha_{k} A\p_{k} \label{eq:rupd} \\
&\displaystyle \hphantom{\mbox{for }}
\beta_{k+1}:= \frac{\r_{k}^T\r_{k}}{\r_{k-1}^T\r_{k-1}} \label{eq:betadef}\\
&\hphantom{\mbox{for }}
\p_{k+1} := \beta_{k+1}\p_{k}+\r_{k} \label{eq:pupd} \\
&\mbox{end} \notag
\end{align}
We now show that linear CG exactly implements
Algorithm IA, and therefore also satisfies the bound of Theorem~\ref{thm:iacvg}.
This is perhaps surprising because CG does not have prior
information about $\x^*$. The following key results about CG are from the
original paper:
\begin{theorem}(\cite{hestenesstiefel})
Let $\mathcal{V}_k=\x_0+\Span\{\r_0,\ldots,\r_{k-1}\}$ in CG.
Then
(a) An equivalent
formula is $\mathcal{V}_k=\x_0+\Span\{\p_1,\ldots,\p_{k}\}$,
(b) $\x_k$ is the minimizer of $f(\x)$ over $\mathcal{V}_k$,
(c) $\r_k=-\nabla f(\x_k)$, and
(d) $\x_k+\tau_k\p_k$ is the minimizer of $\Vert \x-\x^*\Vert$ over
$\mathcal{V}_k$, where
\begin{equation}
\tau_k = \frac{2(f(\x_k)-f(\x^*))}{\Vert\r_{k-1}\Vert^2}.
\label{eq:tau_k}
\end{equation}
\label{thm:hest}
\end{theorem}
Part (b) appears as Theorem 4:3 of \cite{hestenesstiefel},
while parts (a) and (c) are not stated explicitly. All of (a)--(c)
are covered
by most textbook treatments of CG. Part (d) appears as Theorem 6:5 and
is less well known.
The following theorem establishes the claim that Algorithm CG implements
IA. The principal result is part (b). The remaining parts
are necessary to support the induction proof.
\begin{theorem}
Suppose IA and CG are applied to the same quadratic function with
the same $\x_0$. Let the sequences of iterates be denoted
$\x_k^{\rm IA}$ and $\x_k^{\rm CG}$ respectively.
Then for each $k=1,2,\ldots$,
(a) $\mathcal{M}_{k}\subset \mathcal{V}_k$,
(b) $\x_k^{\rm CG}=\x_k^{\rm IA}$, and
(c) $\y_k=\x_{k}^{\rm CG}+\tau_{k}\p_{k}$.
\end{theorem}
\begin{proof}
For the $k=1$ case, observe that $\p_1=-\nabla f(\x_0)$ so
$\mathcal{M}_1=\mathcal{V}_1$. Since $\x_1^{\rm CG}$ minimizes
$f(\x)$ over $\mathcal{V}_1$ while $\x_1^{\rm IA}$ minimizes
$f(\x)$ over $\mathcal{M}_1$, we conclude $\x_1^{IA}=\x_1^{\rm CG}$.
For (c), observe that $\y_1$ minimizes $\Vert\y-\x^*\Vert$
over $\mathcal{M}_1$ by \eref{eq:ia2.yupd}, while $\x_1^{\rm CG}+\tau_1\p_1$
minimizes the same function over the same affine space, so (c)
is established.
Now assuming (a)--(c) hold for some $k\ge 1$, we establish them for $k+1$.
We will write $\x_k$ for both $\x_k^{\rm CG}$ and $\x_k^{\rm IA}$
since these
are equal by induction. For (a), we start with
$\mathcal{M}_{k+1}=\x_k+\Span\{\x_k-\y_k,\nabla f(\x_k)\}$.
We already know from (c) that $\y_k-\x_k=\tau_k\p_k\in{\bf T}\mathcal{V}_k
\subset {\bf T}\mathcal{V}_{k+1}.$ Also,
$\nabla f(\x_k)=-\r_k=\beta_{k+1}\p_k-\p_{k+1}$ (by \eref{eq:pupd}), hence
$\nabla f(\x_k)\in {\bf T}\mathcal{V}_{k+1}$
Thus, ${\bf T}\mathcal{M}_{k+1}\subset{\bf T}\mathcal{V}_{k+1}$,
so showing $\mathcal{M}_{k+1}\subset\mathcal{V}_{k+1}$ is reduced to finding
a single common point, and we may take $\x_k$
to be this point.
For (b), $\x_{k+1}^{\rm CG}$ minimizes $f(\x)$ over $\mathcal{V}_{k+1}$
by Theorem~\ref{thm:hest}(a).
We also know that $\x_{k+1}^{\rm CG}\in \mathcal{M}_{k+1}$
because
\begin{align*}
\x_{k+1}^{\rm CG}&=\x_k+\alpha_{k+1}\p_{k+1} && \mbox{(by \eref{eq:cg.xupd})}\\
&=\x_k+\alpha_{k+1}(\r_k+\beta_{k+1}\p_k) && \mbox{(by \eref{eq:pupd})}\\
&=\x_k+\alpha_{k+1}(-\nabla f(\x_k) + \beta_{k+1}\p_k)
&& \mbox{(by Theorem~\ref{thm:hest}(b))}\\
&=\x_k+\alpha_{k+1}(-\nabla f(\x_k) + \beta_{k+1}(\y_k-\x_k)/\tau_k) &&
\mbox{(by induction, part (c))} \\
&\in \x_k+\Span\{\nabla f(\x_k), \y_k-\x_k\}.
\end{align*}
Since $\mathcal{M}_{k+1}\subset\mathcal{V}_{k+1}$ according to part (a), the
optimality of $\x_{k+1}^{\rm CG}$ with respect to $\mathcal{V}_{k+1}$ implies
that it is also optimal for $f(\x)$ with respect to $\mathcal{M}_{k+1}$,
hence $\x_{k+1}^{\rm IA}=\x_{k+1}^{\rm CG}.$ Thus, write $\x_{k+1}$ for
both vectors for the remainder of the argument.
A similar argument establishes (c). First, we observe
that $\x_{k+1}+\tau_{k+1}\p_{k+1}$ lies in $\mathcal{M}_{k+1}$
because $\x_{k+1}+\tau_{k+1}\p_{k+1}=\x_k+(\alpha_{k+1}+\tau_{k+1})\p_{k+1}$,
and the we can proceed as in the last paragraph except with
$(\alpha_{k+1}+\tau_{k+1})$ playing the role of $\alpha_{k+1}$. Next,
$\x_{k+1}+\tau_{k+1}\p_{k+1}$ minimizes $\Vert\x-\x^*\Vert$ over $\mathcal{V}_{k+1}$
by Theorem~\ref{thm:hest}(d). Thus, $\x_{k+1}+\tau_{k+1}\p_{k+1}$ must
be the same as $\y_{k+1}$. This concludes the induction.
\end{proof}
The surprising aspect of this analysis is that CG exactly
identifies $\mathcal{M}_k$ that appears in Algorithm IA despite
not ever computing $\y_k^{\rm IA}$. The reason is that the line
$\mathop{\rm aff}\{\x_k,\y_k^{\rm IA}\}$ agrees with the
line $\x_k+\Span\{\p_k\}$, which is computed by CG.
\section{A computable potential for linear conjugate gradient}
\label{sec:CGanalysis2}
The analysis in the preceding section shows that CG implements
the idealized algorithm. However, the decrease in the potential
cannot be measured during the algorithm because \eref{eq:tau_k} requires
prior knowledge of the optimizer. In this section, we observe
that the GD
potential can also be used for CG, yielding
a computable potential.
An application of
this potential will be presented in Section~\ref{sec:hybrid}.
We define an auxiliary sequence of vectors $\y_k$
using the formulas in GD. This sequence is not the
true minimizer that occurs in IA
\eref{eq:ia2.yupd} and in
Theorem~\ref{thm:hest}, part (d). But nonetheless,
$\Vert \y_k-\x^*\Vert$ shrinks sufficiently fast
to establish the necessary decrease in the potential.
In particular, we exactly mimic the equations that
define the quantities
$\tilde\sigma_k$, $\tilde\rho_k$, $\lambda_k$, $\delta_k$, $\y_k$
in GD, and modify only $\x_k$ so that it is computed using
the CG algorithm instead of the GD algorithm.
The two same two theorems that held for GD also hold for CG:
\begin{theorem}
For each $k=0,1,2,\ldots$,
$$\Vert\x^*- \y_k\Vert^2 + \frac{2(f(\x_k)-f(\x^*))}{\ell}\le \tilde\sigma_k^2.$$
\end{theorem}
\begin{theorem}
For each $k=1,2,\ldots$,
$$\tilde\sigma_{k}^2\le \left(1-\sqrt{\frac{\ell}{L}}\right)\tilde\sigma_{k-1}^2.$$
\end{theorem}
The following observations about $\x_k$
computed in CG show that the same proofs of the
previous theorems work for CG.
First, $\y_{k}\in\mathcal{V}_{k}$, whereas $\r_{k}$ is orthogonal
to ${\bf T}\mathcal{V}_{k}$ (a well known property of CG), and thus
$\nabla f(\x_{k})^T(\y_{k}-\x_{k})=0$.
This is \eref{eq:gradorth}, which was a necessary ingredient
in the proof of GD.
Second, \eref{eq:gammadiff} still holds because
the CG step from $\x_{k-1}$ to $\x_k$ improves
$f$ at least as much as the step from $\x_{k-1}$
to $\obx_{k-1}$, since $\obx_k$ lies in the Krylov space
$\mathcal{V}_k$
where $\x_k$ is optimal for $f$ over this space.
\section{Accelerated gradient}
\label{sec:AGanalysis}
The Accelerated Gradient (AG) algorithm of Nesterov is an even looser approximation
to IA than GD in the sense that there is no optimization subproblem per iteration;
instead, all step lengths are fixed.
For this section, let us define
\begin{equation}
\kappa = \frac{L}{\ell},
\label{eq:kappadef}
\end{equation}
because this ratio, sometimes called the
{\em condition number} of $f$, is used often throughout the algorithm and
analysis.
The algorithm is as follows.
\begin{align}
& \mbox{\bf Accelerated Gradient} \notag \\
& \x_0:=\mbox{arbitrary} \notag \\
& \w_0:=\x_0 \notag \\
& \mbox{for } k:=1,2,\ldots, \notag \\
& \hphantom{\mbox{for }} \x_k := \w_{k-1}-\nabla f(\w_{k-1})/L \label{eq:wupd} \\
& \hphantom{\mbox{for }} \w_k := \x_k + \theta(\x_k-\x_{k-1})\mbox { where } \label{eq:ag.xupd} \\
& \hphantom{\mbox{forforforfor}} \displaystyle \theta=
\frac{\sqrt{\kappa}-1}{\sqrt{\kappa}+1}. \label{eq:thetadef} \\
&\mbox{end} \notag
\end{align}
Note that some versions of AG in the literature
vary the choice of $\theta$
(e.g., see \cite{Nesterov:book}).
For the purpose of analysis, let us define the following auxiliary sequences
of vectors and scalars:
\begin{align}
\oobw_k &= \w_k-\nabla f(\w_k)/\ell, && (k=0,1,\ldots)
\label{eq:ag.oobw} \\
\y_0 &=\x_0, \notag \\
\y_k &= \x_k+\tau(\x_k-\x_{k-1}),
&& (k=1,2, \ldots) \label{eq:ag.ydef} \\
\tilde\sigma_0 &= \sqrt{2}\Vert\nabla f(\x_0)\Vert /\ell, \label{eq:ag.tsigma0}\\
\tilde\sigma_{k+1} &=
\bigg[(1-\kappa^{-1/2})\tilde\sigma_{k}^2+ \frac{2(f(\x_{k+1})-f(\w_{k}))}{\ell}
\notag\\
&\hphantom{=}
\quad\mbox{}+\frac{\Vert\nabla f(\w_{k})\Vert^2}{L\ell}-(\kappa^{1/2}-\kappa^{-1/2})\Vert\w_{k}-\x_{k}\Vert^2\bigg]^{1/2}
&& (k=0,1,2,\ldots) \label{eq:ag.tsigma}
\end{align}
where
\begin{equation}
\tau= \sqrt{\kappa}-1.
\label{eq:taudef}
\end{equation}
We prove two main results about these
scalars. The first shows that
$\tilde\sigma_k$ is decreasing at the appropriate
rate, while the second shows that it is
an upper bound on the distance to the optimizer.
\begin{theorem}
For each $k=0,1,2,\ldots,$
\begin{equation}
\tilde\sigma_{k+1}^2 \le (1-\kappa^{-1/2})\tilde\sigma_{k}^2.
\label{eq:agthm1}
\end{equation}
\end{theorem}
\begin{proof}
By squaring both sides of \eref{eq:ag.tsigma}, it is apparent that
\eref{eq:agthm1} reduces to showing:
$$\frac{2(f(\x_{k+1})-f(\w_{k}))}{\ell} +
\frac{\Vert\nabla f(\w_{k})\Vert^2}{L\ell}-(\kappa^{1/2}-\kappa^{-1/2})\Vert\w_{k}-\x_{k}\Vert^2\le 0.$$
Clearly it suffices to show
$$\frac{2(f(\x_{k+1})-f(\w_{k}))}{\ell} +
\frac{\Vert\nabla f(\w_{k})\Vert^2}{L\ell}\le 0.$$
This follows immediately from \eref{eq:wupd}, which implies
that $f(\x_{k+1})\le f(\w_k)-\Vert\nabla f(\w_k)\Vert^2/(2L)$.
\end{proof}
\begin{theorem}
For each $k=0,1,2\ldots$,
\begin{equation}
\Vert\y_k-\x^*\Vert^2 +\frac{2(f(\x_k)-f(\x^*))}{\ell}\le
\tilde\sigma_k^2.
\label{eq:ag.tsigmabd1}
\end{equation}
\end{theorem}
\begin{proof}
The proof of \eref{eq:ag.tsigmabd1} is by induction on $k$.
We start by deriving some preliminary relationships.
It is clear from \eref{eq:ag.xupd} and \eref{eq:ag.ydef} that $\w_k,\x_k,\y_k$
are collinear and the tangent to their common line is $\x_k-\x_{k-1}$, hence
we easily obtain from these equations:
\begin{align}
\y_k&=\x_k+\frac{\tau}{\theta}(\w_k-\x_{k}) &&\mbox{(by \eref{eq:ag.xupd} and \eref{eq:ag.ydef})} \label{eq:wxy0} \\
&=\x_k+(\sqrt{\kappa}+1)(\w_k-\x_k) &&\mbox{(by \eref{eq:thetadef} and \eref{eq:taudef})}\notag \\
&=(\sqrt{\kappa}+1)\w_k - \sqrt{\kappa}\x_k. \label{eq:wxy}
\end{align}
For the $k=0$ case,
\eref{eq:ag.tsigmabd1} follows from the initialization in
\eref{eq:ag.tsigma0} and strong convexity.
We now assume the result \eref{eq:ag.tsigmabd1} holds for
$k$ and establish it for $k=1$.
The proof relies on Lemma~\ref{lem:ballintersect1}, so first we must
argue that $\y_{k+1}$ lies on the
line segment between $\y_k$ and $\oobw_k.$ This is the content
of the following derivation:
\begin{align}
\y_{k+1} &= \x_{k+1}+(\sqrt{\kappa}-1)(\x_{k+1}-\x_k) &&\mbox{(by \eref{eq:ag.ydef}
and \eref{eq:taudef})} \notag \\
&=\sqrt{\kappa}\x_{k+1}-(\sqrt{\kappa}-1)\x_k \notag \\
&=\sqrt{\kappa}\w_k -(\sqrt{\kappa}-1)\x_k- \sqrt{\kappa}\nabla f(\w_k)/L
&& \mbox{(by \eref{eq:wupd})} \notag \\
&=\sqrt{\kappa}\w_k -(\sqrt{\kappa}-1)\x_k- \kappa^{-1/2}\nabla f(\w_k)/\ell
&& \mbox{(by \eref{eq:kappadef})} \notag \\
&=\kappa^{-1/2}\w_k+(1-\kappa^{-1/2})((\sqrt{\kappa}+1)\w_k
-\sqrt{\kappa}\x_k) \notag \\
&\hphantom{=}\quad\mbox{}
- \kappa^{-1/2}\nabla f(\w_k)/\ell \notag \\
&=\kappa^{-1/2}\w_k+(1-\kappa^{-1/2})\y_k- \kappa^{-1/2}\nabla f(\w_k)/\ell &&\mbox{(by \eref{eq:wxy})} \notag\\
& = \kappa^{-1/2}\oobw_k + (1-\kappa^{-1/2})\y_k. &&\mbox{(by \eref{eq:ag.oobw})}
\label{eq:ag.lambdadef}
\end{align}
We take
$\x$, $\y$ appearing in Lemma~\ref{lem:ballintersect1} to be
$\oobw_k$, $\y_k$ respectively.
Next we need to define $\delta,\rho,\sigma$ to be used
in Lemma.
In the case of $\rho$ and
we copy the definitions used in the analysis of IA:
\begin{align}
\Vert\oobw_k-\x^*\Vert^2 &\le
\frac{\Vert\nabla f(\w_k)\Vert^2}{\ell^2} - \frac{2(f(\w_k)-f(\x^*))}
{\ell} && \mbox{(by \eref{eq:rdef0})} \notag\\
&\equiv \rho^2. \label{eq:ag.rhodef}
\end{align}
For $\sigma$, we use the induction hypothesis:
\begin{align}
\Vert\y_k-\x^*\Vert^2&\le \tilde\sigma_k^2 - \frac{2 (f(\x_k)-f(\x^*))}{\ell} \notag\\
&\equiv \sigma^2. \label{eq:ag.sigmadef}
\end{align}
In the case of $\delta$, we have:
\begin{align}
\Vert \oobw_k-\y_k\Vert^2 &=
\Vert \w_k-\nabla f(\w_k)/\ell -
(\sqrt{\kappa}+1)\w_k+\sqrt{\kappa}\x_k)\Vert^2 &&
\mbox{(by \eref{eq:ag.oobw} and \eref{eq:wxy})} \notag \\
& = \Vert \sqrt{\kappa}(\x_k-\w_k)-\nabla f(\w_k)/\ell\Vert^2 \notag\\
&=\kappa\Vert\w_k-\x_k\Vert^2 + \frac{2\sqrt{\kappa}(\w_k-\x_k)^T\nabla f(\w_k)}
{\ell} \notag \\
&\hphantom{=}\quad\mbox{}
+ \frac{\Vert\nabla f(\w_k)\Vert^2}{\ell^2} \notag\\
&\ge
\kappa\Vert\w_k-\x_k\Vert^2 + \frac{2\sqrt{\kappa}(f(\w_k)-f(\x_k))}{\ell} \notag\\
&\hphantom{=}\quad\mbox{}
+\sqrt{\kappa}\Vert\w_k-\x_k\Vert^2
+ \frac{\Vert\nabla f(\w_k)\Vert^2}{\ell^2} && \mbox{(by strong convexity)} \notag\\
&=(\kappa + \sqrt{\kappa})
\Vert\w_k-\x_k\Vert^2 + \frac{2\sqrt{\kappa}(f(\w_k)-f(\x_k))}{\ell} \\
&\hphantom{=}\quad\mbox{}
+ \frac{\Vert\nabla f(\w_k)\Vert^2}{\ell^2} \notag\\
&\equiv \delta^2. \label{eq:ag.deltadef}
\end{align}
From \eref{eq:ag.lambdadef}, $\lambda = 1-\kappa^{-1/2}$
(and hence $\lambda(1-\lambda)=\kappa^{-1/2}-\kappa^{-1}$).
Finally, by Lemma~\ref{lem:ballintersect1},
\begin{align*}
\Vert\y_{k+1}-\x^*\Vert^2 &\le \kappa^{-1/2}\rho^2+(1-\kappa^{-1/2})\sigma^2
-(\kappa^{-1/2}-\kappa^{-1})\delta^2 \\
&=
\frac{\Vert\nabla f(\w_k)\Vert^2}{L\ell}-\frac{2(f(\w_k)-f(\x^*))}{\ell}
+(1-\kappa^{-1/2})\tilde\sigma_k^2-(\kappa^{1/2}-\kappa^{-1/2})\Vert\w_k-\x_k\Vert^2 \\
&=
\tilde\sigma_{k+1}^2-\frac{2(f(\x_{k+1})-f(\x^*))}{\ell},
\end{align*}
thus completing the induction. The second line was obtained
by substituting \eref{eq:ag.rhodef}, \eref{eq:ag.sigmadef}, \eref{eq:ag.deltadef}
in the first line
followed by cancellation of like terms. The third line was obtained
from \eref{eq:ag.tsigma}.
\end{proof}
\section{Relationship between IA and AG}
\label{sec:AGanalysis2}
The relationship between the idealized algorithm and AG is weaker than
that between IA and either GD or CG because AG does not solve any
optimization subproblems and instead takes fixed stepsizes.
Thus, at best it is an approximation to IA. Furthermore,
the computations of $\x_k$ and $\y_k$ are more closely tied together,
making it unclear whether any kind of induction hypothesis can be
applied to either in isolation. For these reasons, we propose the following
theorem characterizing the AG--IA relationship.
\begin{theorem}
Suppose one is given points $\x_{k-1},\y_{k-1}\in\R^n$,
a nonzero vector $\g\in\R^n$, $n\ge 3$, and
scalars $\ell,L$ such that $L>\ell>0$.
For the remainder of this discussion, define $\kappa=L/\ell$
and
\begin{equation}
\w_{k-1}=\frac{\sqrt{\kappa}}{\sqrt{\kappa}+1} \x_{k-1}+\frac{1}{\sqrt{\kappa+1}}\y_{k-1},
\label{eq;wxy3}
\end{equation}
as in \eref{eq:wxy0}.
Assume further (these assumption will be explained later) that
\begin{equation}
\Vert \x_{k-1}-\w_{k-1}\Vert \le \Vert\g\Vert/L,
\label{eq:assum5}
\end{equation}
and
\begin{equation}
(\x_{k-1}-\w_{k-1})^T\g=0.
\label{eq:assum6}
\end{equation}
Let
\begin{align*}
\mathcal{F} = \{f:\R^n\rightarrow\R: & \mbox{$\nabla f$ is $L$-Lipschitz}, \\
& \mbox{$f$ is strongly convex with modulus $\ell$}, \\
&\nabla f(\w_{k-1})=\g\}.
\end{align*}
Define
\begin{equation}
q(\x,\y)=\sup_{f\in\mathcal{F}}\frac{(2/\ell)(f(\x)-\min\{f\})+\Vert\y-\argmin\{f\}\Vert^2}
{(2/\ell)(f(\x_{k-1})-\min\{f\})+\Vert\y_{k-1}-\argmin\{f\}\Vert^2}.
\label{eq:qdef}
\end{equation}
Here $\min\{f\}$ and $\argmin\{f\}$ are short-hand for
$\min\{f(\x):\x\in\R^n\}$ and $\argmin\{f(\x):\x\in\R^n\}$ respectively.
Define $\mathcal{M}_k=\w_{k-1}+\Span\{\g,\x_{k-1}-\y_{k-1}\}$.
Let $\x_k,\y_k$ be the vectors computed by the AG
algorithm for this data (which lie in $\mathcal{M}_k$). Then
\begin{equation}
q(\x_k,\y_k)\le 1-\kappa^{-1/2},
\label{eq:agbound1}
\end{equation}
and
\begin{equation}
\min\{q(\x,\y):\x,\y\in\mathcal{M}_k\}\ge 1-\kappa^{-1/2}-O(\kappa^{-1}).
\label{eq:agbound2}
\end{equation}
\end{theorem}
Note that \eref{eq:agbound1} and \eref{eq:agbound2} imply that
the choice of new iterate $(\x_k,\y_k)$ made by AG is optimal up to a lower
order remainder term given the partial information used by AG. In
contrast, IA is optimal (separately) for both terms in the numerator
of \eref{eq:qdef} for the specific $f$ and with no remainder term.
\begin{proof}
The proof of \eref{eq:agbound1} appears in \cite{KarimiVavasis2016}
(see (21), (22) and (43) in that paper, which use different notation
for AG). Therefore, this proof establishes only \eref{eq:agbound2},
which involves constructing
a certain $f\in\mathcal{F}$ to attain this bound.
It suffices to prove the result for the $n=3$
case, since we can extend $f:\R^3\rightarrow\R$ to higher dimensions by adding
terms $\ell x_4^2+\cdots+\ell x_n^2$ and appending 0's to $\x_{k-1}$,
$\y_{k-1}$ and $\g$.
The theorem is invariant under rigid motions of $\R^3$, so we
can place $\w_{k-1}$ at an arbitrary point. In addition,
we can rotate the two
vectors $\g$ and $\x_{k-1}-\w_{k-1}$ to arbitrary positions as long
as their lengths and their orthogonality are preserved. Starting with
$\g$, let $g_0$ denote $\Vert\g\Vert$.
Rotate $\g$ to
$$\g=\gamma\left(
\begin{array}{c}
-L\kappa^{-3/4}(=-\ell \kappa^{1/4}) \\
0 \\
\frac{\ell}{1-\kappa^{-1}} \\
\end{array}
\right),$$
where $\gamma$ is chosen to preserve the length of $\g$, in other words,
$$\gamma=(g_0/\ell) \cdot (\kappa^{1/2}+(1-\kappa^{-1})^{-2})^{-1/2}.$$
Translate $\w_{k-1}$ as follows:
$$\w_{k-1}=
\gamma\left(
\begin{array}{c}
-\kappa^{-3/4} \\
0 \\
\frac{1}{1-\kappa^{-1}}
\end{array}
\right).$$
Finally, we rotate $\x_{k-1}-\w_{k-1}$ as follows:
$$\x_{k-1}-\w_{k-1}=
\gamma\left(
\begin{array}{c}
\frac{\kappa^{-5/4}v^2(1-\kappa^{-1})}{(1-\kappa^{-1})^2+\kappa^{-1/2}} \\
v\kappa^{-3/4}\\
v^2\kappa^{-1}-\frac{\kappa^{-3/2}v^2}{(1-\kappa^{-1})^2+\kappa^{-1/2}}
\end{array}
\right)
$$
where $v$ is a scalar parameter that controls the length
of $\x_{k-1}-\w_{k-1}$. In more detail, observe that
$\Vert\x_{k-1}-\w_{k-1}\Vert=\gamma\kappa^{-3/4}|v|+O(\kappa^{-1})$,
while $\Vert\g\Vert/L=\gamma\kappa^{-3/4}+O(\kappa^{-1})$. Therefore,
to assure \eref{eq:assum5}, we restrict $|v|\le 1$.
It is also straightforward
to check that \eref{eq:assum6} is satisfied.
With these two definitions in hand, we can now write:
$$\x_{k-1}=\w_{k-1}+(\x_{k-1}-\w_{k-1})=
\gamma\left(
\begin{array}{c}
-\kappa^{-3/4}+\frac{\kappa^{-5/4}v^2(1-\kappa^{-1})}{(1-\kappa^{-1})^2+\kappa^{-1/2}} \\
v\kappa^{-3/4}\\
\frac{1}{1-\kappa^{-1}} +v^2\kappa^{-1}-\frac{\kappa^{-3/2}v^2}{(1-\kappa^{-1})^2+\kappa^{-1/2}}
\end{array}
\right),
$$
and
$$\y_{k-1} = \w_{k-1}-\sqrt{\kappa}(\x_{k-1}-\w_{k-1})=
\gamma\left(
\begin{array}{c}
-\kappa^{-3/4}-\frac{\kappa^{-3/4}v^2(1-\kappa^{-1})}{(1-\kappa^{-1})^2+\kappa^{-1/2}} \\
-v\kappa^{-1/4}\\
\frac{1}{1-\kappa^{-1}} -v^2\kappa^{-1/2}+\frac{\kappa^{-1}v^2}{(1-\kappa^{-1})^2+\kappa^{-1/2}}
\end{array}
\right).$$
Next, we define:
$$f(\x)=\frac{Lx_1^2+\sqrt{L\ell}x_2^2+\ell x_3^2}{2}.$$
It is straightforward to verify that $\nabla f$ is $L$-Lipschitz,
that $f$ is $\ell$-strongly convex, and that
$\nabla f(\w_{k-1})=\g$; thus $f\in\mathcal{F}$. Also,
it is obvious that $\argmin\{f\}=\bz$ and $\min\{f\}=0$.
We evaluate the two terms in the denominator of \eref{eq:qdef}:
\begin{align*}
f(\x_{k-1}) - \min\{f\} &= f(\x_{k-1}) \\
&=\frac{\gamma^2}{2}
\left(L(\kappa^{-3/4}+O(\kappa^{-5/4}))^2+
\sqrt{L\ell}(v\kappa^{-3/4})^2+
\ell(1+O(\kappa^{-1}))^2\right) \\
&= \frac{\gamma^2\ell}{2}(1+\kappa^{-1/2}+O(\kappa^{-1})).
\end{align*}
Also,
\begin{align*}
\Vert\y_{k-1}-\argmin\{f\}\Vert^2 &= \Vert\y_{k-1}\Vert^2 \\
&=\gamma^2\left(O(\kappa^{-3/4})^2+
v^2\kappa^{-1/2}+
(1-v^2\kappa^{-1/2}+O(\kappa^{-1}))^2\right) \\
&=\gamma^2(1-v^2\kappa^{-1/2}+O(\kappa^{-1})).
\end{align*}
Thus, the denominator of \eref{eq:qdef} simplifies to
$\gamma^2(2+(1-v^2)\kappa^{-1/2}+O(\kappa^{-1}))$.
Next, we need to solve two constrained
quadratic optimization problems to obtain a lower bound on
the numerator of \eref{eq:qdef}. The constraint is
$\x\in\mathcal{M}_{k}$ for the first and
$\y\in\mathcal{M}_{k}$ for the second.
Imposing the constraint is simpler if it is written
as an inhomogenous linear equation;
one checks that
\begin{align*}
\mathcal{M}_{k}&=\w_{k-1}+\Span\{\x_{k-1}-\w_{k-1},\g\} \\
&=\left\{\x\in\R^3:\frac{\kappa^{-1/4}x_1}{1-\kappa^{-1}}-v\kappa^{-1/4}x_2+x_3=\gamma\right\},
\end{align*}
by substituting $\w_{k-1}$, $\x_{k-1}-\w_{k-1}$ and $\g$ into the
left-hand side of given equation and confirming that the values are $\gamma, 0, 0$
respectively.
It is also straightforward to check using a Lagrange-multiplier
argument that for any positive $A,B,C$ and any $(a,b,c)\ne (0,0,0)$,
$$\min\{Ax_1^2+Bx_2^2+Cx_3^2:ax_1+bx_2+cx_3=\gamma\}=\frac{\gamma^2}{a^2/A+b^2/B+c^2/C}.$$
Using this result for the first term of the numerator of \eref{eq:qdef}
yields that for any $\x\in\mathcal{M}_k$,
\begin{align*}
f(\x)-\min\{f\}&\ge \frac{1}{2}\cdot\frac{\gamma^2}{
\frac{\kappa^{-1/2}}{L(1-\kappa^{-1})^2} +
\frac{v^2\kappa^{-1/2}}{\sqrt{Ll}} +
\frac{1}{\ell}} \\
&= \frac{\gamma^2\ell}{2}(1+O(\kappa^{-1})).
\end{align*}
As for the second term of the numerator, for any $\y\in\mathcal{M}_k$,
\begin{align*}
\Vert\y-\argmin\{f\}\Vert^2 &\ge
\frac{\gamma^2}{
\frac{\kappa^{-1/2}}{(1-\kappa^{-1})^2} +
v^2\kappa^{-1/2} +
1} \\
&= \gamma^2(1-(1+v^2)\kappa^{-1/2}+O(\kappa^{-1})).
\end{align*}
Therefore, a lower bound on the numerator of \eref{eq:qdef}
is $\gamma^2(2-(1+v^2)\kappa^{-1/2}+O(\kappa^{-1}))$.
Finally, for any $\x,\y\in \mathcal{M}_k$,
\begin{align*}
q(\x,\y)&\ge \frac{\gamma^2(2-(1+v^2)\kappa^{-1/2}+O(\kappa^{-1}))}
{\gamma^2(2+(1-v^2)\kappa^{-1/2}+O(\kappa^{-1}))}\\
&=1-\kappa^{-1/2}+O(\kappa^{-1}).
\end{align*}
\end{proof}
We now turn to the assumptions of the preceding theorem.
An assumption like \eref{eq:assum5} is necessary because,
in the situation that $\Vert\g\Vert\ll \Vert \x_{k-1}-\w_{k-1}\Vert$,
strong convexity (see \eref{eq:rdef0})
implies that the true minimizer $\x^*$ is
close to $\w_{k-1}-\g/\ell$, meaning that the update
to $\y_k$ implicit in AG is suboptimal. Thus,
an assumption along the lines of
\eref{eq:assum5}
is necessary to establish the optimality of AG.
Orthogonality assumption \eref{eq:assum6} appears to be unnecessary
and rather is a limitation of our construction, which uses
a quadratic function $f$. Intuitively,
we need to construct a function
that varies more rapidly in one direction than another. We
used a quadratic function, whose level curves have fixed orthogonal axes,
which creates a requirement of orthogonality in the two directions.
However, a more general convex function may have level curves
whose axes of elongation vary from one level curve to the next.
As in the concluding remarks
of Section~\ref{sec:GDanalysis2},
the proof of the optimality of the algorithm combined with
consideration of the assumptions uncovers situations when the algorithm
may be making suboptimal choices. In the case of AG, this occurs
on iterations when $\Vert\nabla f(\w_{k-1})\Vert$ is unexpectedly small.
\section{A hybrid nonlinear conjugate gradient}
\label{sec:hybrid}
In this section, we propose a hybrid nonlinear conjugate gradient algorithm with
a convergence guarantee for smooth, strongly convex functions which is
related to an algorithm from the PhD thesis of the first author
\cite{karimi:thesis}. Classical
nonlinear conjugate gradient (NCG) methods such as the Fletcher-Reeves and
Polak-Ribi\`ere methods are known to have poor worst-case performance
for this class of functions--worse even than steepest descent. See
\cite{NemYud83} for more information. The method developed in
this section guarantees $O(\log(1/\epsilon)\sqrt{L/\ell})$ convergence,
the best possible, and reduces to the optimal CG
algorithm in the case of a quadratic function.
The algorithm proposed below uses classical nonlinear conjugate
gradient steps mixed with geometric descent steps.
The rationale for developing this algorithm is as follows. Classical NCG, although
it has no global convergence bound even for strongly convex functions,
behaves well on ``nearly quadratic'' functions. For typical objective
functions occurring in practice, nearly quadratic behavior is expected close
to the solution. Therefore, a method that can switch between steps with
a guaranteed complexity and NCG steps has the possibility of outperforming
both methods.
A summary of the algorithm is as follows. At the beginning of
iteration $k$, the algorithm has a quadruple
$(\x_{k-1},\y_{k-1},\p_{k-1},\tilde\sigma_{k-1})$. From this
quadruple, a step of nonlinear conjugate gradient can be
applied. For the line search, the line-search function of $\alpha$,
namely, $f(\x_{k-1}+\alpha\p_{k-1})$, is approximated by a univariate
quadratic, whose quadratic coefficient is obtained by computing
$\p_{k-1}^T\nabla^2 f(\x_{k-1})\p_{k-1}$ using reverse-mode automatic
differentiation. This approximation is exact in the case that $f$
itself is a quadratic function, in which case the hybrid algorithm
reproduces the steps of linear conjugate gradient.
The hybrid algorithm then computes $\x_k=\x_{k-1}+\alpha\p_{k-1}$ and
computes $\y_k$
as in the GD algorithm. It checks whether $f$ has decreased and
whether $\tilde\sigma_{k}^2\le (1-\sqrt{\ell/L})\tilde\sigma_{k-1}^2$.
If so, the iteration is over, and the nonlinear CG step is accepted.
If not, then a GD step is taken instead.
The detailed specification of the algorithm is as follows.
\begin{align}
& \mbox{\bf Hybrid Nonlinear Conjugate Gradient} \notag\\
& \x_0:=\mbox{arbitrary};\quad\y_0:=\x_0;\quad\p_0:=\bz\notag \\
& \g_{-1}:=\bz;
\tilde\sigma_0 := \sqrt{2}\Vert\nabla f(\x_0)\Vert/\ell \notag\\
& \mbox{for }k = 1,2,\ldots, \notag\\
& \hphantom{\mbox{for }} \g_{k-1}:=\nabla f(\x_{k-1})\notag\\
& \hphantom{\mbox{for }} (\x_k^{\rm CG}, \p_k) :=
{\it CGSTEP}(\x_{k-1},\p_{k-1},\g_{k-2},\g_{k-1},k) \notag\\
& \hphantom{\mbox{for }} (\y_k,\tilde\xi^*_k) :=
{\it YCOMPUTE}(\x_{k-1},\g_{k-1},\y_{k-1},\tilde\sigma_{k-1}) \notag\\
& \hphantom{\mbox{for }} \hat\gamma_k^{\rm CG} := 2(f(\x_k^{\rm CG})-f(\x_{k-1}))/\ell
\label{eq:hatgamma1}\\
& \hphantom{\mbox{for }} \tilde\sigma_k^{\rm CG} :=
\sqrt{(\tilde\xi^*_k)^2+
\hat\gamma_k^{\rm CG}}\notag \\
& \hphantom{\mbox{for }}\mbox{if } \hat\gamma_k^{\rm CG} \le 0 \mbox{ and }
(\tilde\sigma_k^{\rm CG})^2\le \left(1-\sqrt{\ell/L}\right)\tilde\sigma_{k-1}^2
\notag\\
&\hphantom{\mbox{for if }} \x_{k}:=\x_{k-1}^{\rm CG} \notag\\
&\hphantom{\mbox{for if }} \tilde\sigma_{k}:=\tilde\sigma_k^{\rm CG} \notag\\
& \hphantom{\mbox{for }}\mbox{else}\notag \\
& \hphantom{\mbox{for if }} \obx_{k-1}:=\x_{k-1}-\g_{k-1}/L \notag\\
&\hphantom{\mbox{for if }} \x_k:=\argmin\{f(\x):\x\in\mathop{\rm aff}\{\obx_{k-1},\y_k\}\}
\label{eq:linesearch}\\
& \hphantom{\mbox{for if }} \hat\gamma_k := 2(f(\x_k)-f(\x_{k-1}))/\ell \label{eq:hatgamma2}\\
& \hphantom{\mbox{for if }} \tilde\sigma_k :=
\sqrt{(\tilde\xi^*_k)^2+
\hat\gamma_k}\notag \\
& \hphantom{\mbox{for if }} \p_k := \x_k-\x_{k-1} \label{eq:newp} \\
& \hphantom{\mbox{for }} \mbox{end} \notag \\
& \mbox{end} \notag
\end{align}
\begin{align}
&\mbox{{\bf Function }
${\it CGSTEP}(\x_{k-1},\p_{k-1},\g_{k-2},\g_{k-1},k)$} \notag\\
&\mbox{if }k == 1 \notag \\
& \hphantom{\mbox{if }}\p_k := -\g_{k-1} \notag \\
& \mbox{else} \notag\\
& \hphantom{\mbox{if }} \z := \g_{k-1}-\g_{k-2} \notag \\
& \hphantom{\mbox{if }}\beta_{k} :=
\frac{1}{\z^T\p_{k-1}}\left(
\z - \frac{2\p_{k-1}\Vert\z\Vert^2}{\z^T\p_{k-1}}
\right)^T\g_{k-1} \label{eq:hzbeta} \\
& \hphantom{\mbox{if }}\p_k:= \beta_k\p_{k-1}-\g_{k-1} \notag\\
& \mbox{end} \notag\\
& \alpha_k := -\frac{\p_k^T\g_{k-1}}{\p_k^T\nabla^2 f(\x_{k-1})\p_k} \label{eq:qstep} \\
& \x_k := \x_{k-1}+\alpha_k\p_k \notag \\
& \mbox{return } (\x_k,\p_k) \notag
\end{align}
\begin{align}
&\mbox{{\bf Function }
${\it YCOMPUTE}(\x_{k-1},\g_{k-1},\y_{k-1},\tilde\sigma_{k-1})$} \notag\\
& \oobx_{k-1} := \x_{k-1}-\g_{k-1}/\ell\notag\\
& \tilde\rho_{k-1} := \Vert \g_{k-1}\Vert /\ell \notag \\
& \mbox{if } \tilde\sigma_{k-1}^2 \le 2\tilde\rho_{k-1}^2 \notag \\
& \hphantom{\mbox{if }} \delta_{k-1} := \Vert\y_{k-1}-\oobx_{k-1}\Vert \label{eq:compdelta}\\
& \hphantom{\mbox{if }}\mbox{if }
\delta_{k-1} > \tilde\rho_{k-1} \mbox{ and }
\tilde\rho_{k-1}>|\tilde\rho_{k-1}^2-\tilde\sigma_{k-1}^2|^{1/2}
\label{eq:if2} \\
& \hphantom{\mbox{if if }}
\lambda_k :=
\frac{\delta_{k-1}^2+\tilde\rho_{k-1}^2-\tilde\sigma_{k-1}^2}{2\delta_{k-1}^2} \notag \\
& \hphantom{\mbox{if if }}\tilde\xi^*_k :=
\frac{1}{2}\sqrt{2\tilde\rho_{k-1}^2+2\tilde\sigma_{k-1}^2-\delta_{k-1}^2-
\frac{(\tilde\rho_{k-1}^2-\tilde\sigma_{k-1}^2)^2}{\delta_{k-1}^2}} \notag \\
& \hphantom{\mbox{if }}\mbox{else} \notag\\
& \hphantom{\mbox{if if }}\lambda_k := 1 \notag \\
& \hphantom{\mbox{if if }}\tilde\xi_k^*:= \tilde\sigma_{k-1} \notag \\
& \hphantom{\mbox{if }} \mbox{end} \notag \\
& \mbox{else} \notag \\
& \hphantom{\mbox{if }} \lambda_k := 0 \notag \\
& \hphantom{\mbox{if }} \tilde\xi_k^* := \tilde\rho_{k-1} \notag \\
& \mbox{end} \notag \\
& \y_k := (1-\lambda_k)\oobx_{k-1} + \lambda_k\y_{k-1} \notag \\
& \mbox{return } (\y_k,\tilde\xi_k^*) \notag
\end{align}
Some remarks on this procedure are as follows. The variable
$\hat\gamma_k$ in \eref{eq:hatgamma1}
and \eref{eq:hatgamma2} stands for $\gamma_k-\gamma_{k-1}$, where
$\gamma_k$ is defined as in \eref{eq:gammakdef}.
The line-search implicit in \eref{eq:linesearch} is carried
out with a univariate Newton method. Because we have not made
sufficient assumptions about $f$ to guarantee convergence of
Newton's method, the Newton method is safeguarded with a bisection method.
The univariate second derivative of $f$ needed for the Newton method
can be computed using reverse-mode automatic differentiation in time
proportional to evaluate $f(\x)$ (refer to \cite{NocedalWright}). This
univariate second derivative is also needed in \eref{eq:qstep}.
The formula for $\beta_k$ in \eref{eq:hzbeta} is from the
CG-Descent algorithm of Hager and Zhang \cite{HagerZhang}. As mentioned
earlier, the formula for $\alpha_k$ appearing in \eref{eq:qstep} is
based on a univariate quadratic Taylor expansion of the line-search function
at $\x_k$ in the direction $\p_k$. This formula is exact if $f$ itself
is quadratic, but in all other cases it is speculative. However, if it
yields a poor answer, the overall algorithm is still robust because
when the CG step gives a poor answer, the GD algorithm serves as a backup.
The main theorem about this method, which follows from the material
presented so far, is as follows.
\begin{theorem}
Assuming exact line-search in \eref{eq:linesearch},
the Hybrid NCG algorithm produces a sequence of iterates
$(\x_k,\y_k,\sigma_k)$ satisfying \eref{eq:sigma-ub} and
\eref{eq:sigma-decr}. Furthermore, if $f(\x)$ is a quadratic function,
then Hybrid NCG produces the same sequence of iterates as linear
conjugate gradient.
\end{theorem}
We now turn to three important numerical issues with this method.
The first issue to note is that function {\it YCOMPUTE} has an ``if''
statement \eref{eq:if2} not present in \eref{eq:gd.rpdef}--\eref{eq:sigmadef2}
when the GD algorithm was described.
In the case of the ``exact'' GD algorithm, the condition of the
if-statement is guaranteed to hold as established in Section~\ref{sec:GDanalysis}.
However, because the line-search is only approximate, \eref{eq:gradorth} does
not hold exactly, and therefore the condition of the \eref{eq:if2} may occasionally
fail. In this case, we safeguard its failure by defining $\y_k:=\y_{k-1}$
and $\sigma_k:=\sigma_{k-1}$ (so that $\tilde\sigma_k$ is updated only due
to the decrease in the objective), i.e., we keep the same containing sphere for
the optimizer as on the previous step.
The second numerical issue concerns the computation of $\delta_{k-1}$ in
\eref{eq:compdelta}. This formula is prone to roundoff error as the algorithm
converges because $\y_{k-1}$ and $\oobx_{k-1}$ both tend to $\x^*$ in the limit.
In our implementation,
we addressed this issue by maintaining a separate program variable storing
the vector $\y_k-\x_k$. This vector is updated using a recurrent formula
that is straightforward to derive; the recurrence updates $\y_k-\x_k$ using
vectors that also tend to $\bz$. Given an accurate representation
of $\y_k-\x_k$ it is clear that
\eref{eq:compdelta} can be computed without significant roundoff issues.
A similar issue and similar workaround is applied to \eref{eq:newp}.
The third numerical issue concerns the subtractions in
\eref{eq:hatgamma1}
and \eref{eq:hatgamma2}, which are also prone to roundoff error
as $\x_k$ converges. These errors could upset
the computation of $\tilde\sigma_k$.
Our implementation addressed this using ``computational divided
differences''; see, e.g., \cite{divdiff}.
\section{Computational Results}
\label{sec:comp}
We implemented four methods: Geometric Descent (GD), Accelerated Gradient (AG),
Hybrid Nonlinear Conjugate Gradient (HyNCG, described in the preceding section),
and NCG. NCG stands for nonlinear conjugate gradient using the Hager-Zhang
formula for $\beta_k$ given by \eref{eq:hzbeta}. (The entirety of their
NCG method is called ``CG-Descent''; however, we did not implement other aspects
of CG-Descent such as the line search.)
The line search used by
GD, HyNCG and NCG is based on Newton's method and
is safeguarded with a bisection. The techniques to address numerical
issues described in the
preceding section were applied in GD, HyNCG and NCG. (The line
search and the numerical techniques are not needed for AG).
We applied these four methods to two problem classes:
approximate BPDN and hinge-loss halfspace classification.
BPDN (basis pursuit denoising) refers to the
unconstrained convex optimization
problem:
$$\min \Vert A\x-\b\Vert^2 +\lambda\Vert\x\Vert_1$$
in which $\lambda > 0$ and $A\in\R^{m\times n}$
has fewer rows than columns, so that the problem
is neither strongly convex nor smooth. However, the following
approximation (called APBDN)
is both smooth and strongly convex on any bounded domain:
$$\min \Vert A\x-\b\Vert^2 +\lambda\sum_{i=1}^n\sqrt{x_i^2+\delta}$$
where $\delta>0$ is a fixed scalar. It is easy to see that as
$\delta\rightarrow 0$, the original problem is recovered. As
$\delta\rightarrow 0$, $\ell\rightarrow 0$ and $L\rightarrow\infty$,
where $\ell,L$ are the moduli of strong, smooth convexity.
In our tests of ABPDN
we took $A$ to be a subset of $\sqrt{n}$ rows
of the discrete-cosine transform matrix of size $n\times n$, where
$n$ is an even power of 2. (This matrix and its transpose, although
dense, can be applied in $O(n\log n)$ operations.) The subset of
rows was selected to be those numbered by the first $m=\sqrt{n}$ prime
integers
in order to get reproducible
pseudorandomness in the choices. Similarly, in
order to obtain a pseudorandom $\b$, we selected $\b\in\R^m$
according to the formula $b_i=\sin(i^2)$. The value of
$\lambda$ was fixed at $10^{-3}$ in all tests; the convergence
criterion was $\Vert\nabla f(\x_k)\Vert\le 10^{-8}$.
Finally,
we varied $\delta=10^{-2},10^{-3},10^{-4}$ and we tried both
$n=65536$ and $n=262144$.
The second test-case is the hinge-loss (HL) function for half-space
identification taken from \cite{bubeck}, which is as follows:
$f(\x)=H(\b\circ(A\x))+\lambda\Vert\x\Vert^2/2$, where
$A$ is a given $m\times n$
matrix,
$\b$ is a given $m$-vector of $\pm 1$, `$\circ$' denotes Hadamard product (i.e., the entrywise
product of two vectors), $\lambda>0$ is a regularization parameter, and
$H(\v)=\sum_{i=1}^mh(v_i)$ where
$$h(v)=\left\{
\begin{array}{ll}
0.5-v, & v\le 0, \\
(1-v)^2/2, & v\in[0,1], \\
0, & v\ge 1.
\end{array}
\right.$$
Minimizing $f(\cdot)$ corresponds to finding a hyperplane determined
by $\x$ of the form $U=\{\z\in\R^n:\x^T\z=0\}$ such that rows $i$ of $A$,
$i=1,\ldots,m$, for which
$b_i=1$ lie on one side of $U$ (i.e., $A(i,:)\x>0$)
while rows $i$ of $A$ for which
$b_i=-1$ lie on the opposite side (i.e., $A(i,:)\x<0$).
The objective function penalizes misclassified
points as well as penalizing a large value of $\x$.
This function is smooth and strongly convex. As $\lambda\rightarrow 0$, the
strong convexity parameter $\ell$ vanishes.
Unlike \cite{bubeck}, who test GD applied
to this function on data sets available
on the web, we have tested the four algorithms on synthetic data for the
purpose of better control over experimental conditions.
In our tests, $m=200000$, $n=447$ (so that $n\approx\sqrt{m}$),
$\lambda=3\cdot 10^{-1}, 3\cdot 10^{-2}, 3\cdot 10^{-3}$.
For each $i$, $i=1,\ldots, m$, $b(i)=\pm 1$ chosen at random with
probability $0.5$. If $b(i)=1$, then $A(i,:)=[1,\ldots,1]/\sqrt{n}+\w_i^T$,
where $\w_i$ is a noise vector chosen as a spherical Gaussian with
covariance matrix ${\rm diag}(\sigma^2,\ldots,\sigma^2)$, where $\sigma=0.4$.
If $b(i)=-1$, then $A(i,:)=-[1,\ldots,1]/\sqrt{n}+\w_i^T$.
For these tests, the convergence test was $\Vert \nabla f(\x_k)\Vert \le 10^{-6}$.
The results of all tests are shown in Table~\ref{tab:compresults}.
For NCG and GD, the numbers in this table are the number of inner iterations (line
search steps), which is the dominant cost in these algorithms.
In the case of HyNCG, we have reported the sum of the number of CG steps
(which do not require a line-search) plus the number of inner line-search
iterations. For AG we have
reported the number of outer iterations.
The notation DNC indicates that the algorithm did achieve the
requisite tolerance after
$10^5$ outer iterations.
\begin{table}
\begin{center}
\begin{tabular}{|l|rrrr|}
\hline
& GD & AG & NCG & HyNCG \\
\hline
ABPDN, $n=65536$, $\delta=10^{-2}$ & 58,510 & DNC & 12,345 & 757 \\
ABPDN, $n=65536$, $\delta=10^{-3}$ & 314,367 & DNC & DNC & 9,510 \\
ABPDN, $n=65536$, $\delta=10^{-4}$ & 585,362& DNC & DNC & 28,395 \\
ABPDN, $n=262144$, $\delta=10^{-2}$ & 7,734 & 34758 & 488 & 123 \\
ABPDN, $n=262144$, $\delta=10^{-3}$ & 782,223 & DNC & DNC & 17,195 \\
ABPDN, $n=262144$, $\delta=10^{-4}$ & DNC & DNC & DNC & 40,328 \\
HL, $m=200000$, $\lambda =0.3$ & 154 & 13,170 & 112 & 37 \\
HL, $m=200000$, $\lambda =0.03$ & 151 & 29,218 & 110 & 37 \\
HL, $m=200000$, $\lambda =0.003$ & 151 & 58,793 & 113 & 44 \\
\hline
\end{tabular}
\end{center}
\caption{Number of iterations (see text for details) of four algorithms
on nine synthetic test cases.}
\label{tab:compresults}
\end{table}
One sees from the table that HyNCG was superior in every test case, sometimes by
a wide margin. An unexpected feature of the table, for which we currently do not have
an explanation, is that in the case of the HL suite of problems, the
number of iterations was nearly invariant with respect to variation in $\lambda$,
except for AG, whose running time grows steadily with decreasing $\lambda$.
To conclude this section, we also consider two hybrid algorithm
that do not use the potential. They are as follows:
compute a step of both GD and CG, and then select the step that
decreases either $\Vert \nabla f(\x_k)\Vert$ (denoted HyNCG/gr) or
$f(\x_k)$ (denoted HyNCG/f) by the greatest amount. The results of
this experiment are presented in Table~\ref{tab:compresults2}.
\begin{table}
\begin{center}
\begin{tabular}{|l|rrr|}
\hline
& HyNCG & HyNCG/gr & HyNCG/f \\
\hline
ABPDN, $n=65536$, $\delta=10^{-2}$ & 757 & 1,346 & 36,708 \\
ABPDN, $n=65536$, $\delta=10^{-3}$ & 9,510& 24,509 & 252,050 \\
ABPDN, $n=65536$, $\delta=10^{-4}$ & 28,395 & 67,354 & 105,947 \\
ABPDN, $n=262144$, $\delta=10^{-2}$ & 123 & 184 & 394 \\
ABPDN, $n=262144$, $\delta=10^{-3}$ & 17,195 & 37,435 & 652,435 \\
ABPDN, $n=262144$, $\delta=10^{-4}$ & 40,328 & 86,445 & 213,226 \\
HL, $m=200000$, $\lambda =0.3$ & 37 & 57 & 45 \\
HL, $m=200000$, $\lambda =0.03$ & 37 & 57 & 45 \\
HL, $m=200000$, $\lambda =0.003$ & 44 & 64 & 61 \\
\hline
\end{tabular}
\end{center}
\caption{Number of iterations (see text for details) of three
different hybrids on nine synthetic test cases.}
\label{tab:compresults2}
\end{table}
The table shows that the hybrid
based on the potential outperforms the other two methods, often
by a factor of 2 and sometimes
by a large factor.
The reason for this follows from
the discussion in Section~\ref{sec:computable}.
Although the norm of the gradient can be used (and in fact, {\em was} used
for all tests in this section) as a termination criterion, it is not
helpful for measuring progress step by step.
The two methods HyNCG/gr and HyNCG/f must carry out two evaluations
per iteration to decide which step is preferable.
In contrast, the hybrid HYNCG based on the
potential can select the CG step without trying an alternative
provided the potential shows sufficient decrease.
\section{Conclusions}
We have demonstrated that a single computable potential bounds the
convergence of three algorithms, conjugate gradient, accelerated gradient
and geometric descent. We have also pointed out other connections between
the algorithms, namely, their relationship to an idealized algorithm and
their relationship to the Bubeck-Lee-Singh lemma.
The existence of this potential enables the formulation
of a hybrid method for convex optimization that duplicates the steps of conjugate
gradient in the case of conjugate gradient but nonetheless achieves the
optimal complexity for general smooth, strongly convex problems.
Directions for future work include the following.
\begin{itemize}
\item
The hybrid algorithm requires prior knowledge of $\ell,L$; it would be interesting
to develop an algorithm with the same guarantees that does not need prior knowledge
of them. Note that linear conjugate gradient does not need any such prior knowledge
of the coefficient matrix $A$.
\item
It would be interesting to establish a theoretical result about the improved
performance of the hybrid algorithm in the case of ``nearly quadratic'' functions.
\item
Although accelerated gradient has been extended well beyond the realm
of unconstrained smooth, strongly convex functions, none of the other
algorithms has been. It would be interesting to extend the conjugate
gradient ideas outside this space.
Also interesting is the extension to constrained or composite convex
minimization. See, for example, \cite{KarimiVavasis}.
\end{itemize}
\end{document} |
\begin{document}
\maketitle
\footnote{partially supported by Grant of JSPS}
\begin{abstract}
This paper shows some criteria for a scheme of finite type over an
algebraically closed field to be non-singular in terms of jet
schemes.
For the base field of characteristic zero,
the scheme is non-singular if and only if one of the truncation morphisms
of its jet schemes
is flat.
For the positive characteristic case,
we obtain a similar characterization under the reducedness condition on the
scheme.
We also obtain by a simple discussion that the scheme is non-singular if and only if one of its jet schemes is
non-singular.
\end{abstract}
\section{Introduction}
\noindent
In 1968 John F. Nash introduced the jet schemes and the arc space of
an algebraic and an analytic variety and
posed the Nash problem (\cite{nash}).
The jet schemes and the arc space are considered to be something to reflect
the nature of the
singularities of a variety.
(The Nash problem itself concerns a connection between the arc
space and the singularities.)
By looking at the jet schemes over a variety,
we can see some properties of the
singularities of the variety (see \cite{ein}, \cite{e-Mus}, \cite{must01},
\cite{must02}) : for example, if \( X \) is
locally a complete intersection variety, the singularities of \( X
\) are canonical (resp.
terminal) if and only
if the jet scheme \( X_{m} \) is irreducible (resp. normal) for every
\( m\in {\Bbb N} \).
For a non-singular variety \( X \), the jet
schemes are distinguished: the \( m \)-jet scheme \( X_{m} \) is
non-singular for every \( m\in {\Bbb N} \) and every truncation morphism \(
\psi_{m',m}:
X_{m'}\longrightarrowX_{m}\) is smooth with the fiber \( {\Bbb A}_{k}^{(m'-m)\dim X} \)
for \( m'>m\geq 0\).
Then, it is natural to ask whether these properties
characterize the smoothness of the variety \( X \).
Our results are rather stronger, i.e., only one jet scheme or one
truncation morphism is sufficient to characterize the smoothness of the variety \( X \).
In this paper we prove the following:
\begin{prop}
\label{sm}
Let \( k \) be a field of arbitrary
characteristic and \( f:X\longrightarrowY \) a morphism of \( k \)-schemes.
Then the following are equivalent:
\begin{enumerate}
\item[(i)] \( f \) is smooth (resp. unramified, \'etale);
\item[(ii)] For every \( m\in {\Bbb N} \), the morphism \( f_{m}:X_{m}\longrightarrow
Y_{m} \) induced from $f$ is smooth (resp. unramified, \'etale);
\item[(iii)] There is an integer \( m\in {\Bbb N} \) such that
the morphism \( f_{m}:X_{m}\longrightarrow
Y_{m} \) is smooth (resp. unramified, \'etale).
\end{enumerate}
\end{prop}
As a corollary of this proposition, we obtain the following:
\begin{cor}
\label{smooth}
Let \( k \) be a field of arbitrary
characteristic.
A scheme \( X \) of finite type over \( k \) is
smooth if and only if there is \( m\in {\Bbb Z}_{\geq 0} \) such that \( X_{m}
\) is smooth.
\end{cor}
\begin{thm}
\label{flat}
Let \( k \) be an algebraically closed field of characteristic zero.
A scheme \( X \) of finite type over \( k \)
is non-singular if and only if there is a pair of
integers \( 0\leq m<m' \) such that the truncation morphism \( \psi_{m', m}: X_{m'}\longrightarrow
X_{m} \) is a flat morphism.
\end{thm}
Here, we note that the assumption of the characteristic of the base
field in Theorem \ref{flat} is necessary.
We will see a counter example of this statement in positive
characteristic(Example \ref{ex}).
If we assume that the scheme $X$ is reduced,
then we have a similar criterion as Theorem \ref{flat} also for the positive characteristic case.
\begin{thm}
\label{positive}
Let \( k \) be an algebraically closed field of arbitrary characteristic.
Assume the scheme \( X \) of finite type over \( k \) is reduced.
Then $X$ is non-singular if and only if there is a pair of
integers \( 0< m<m' \) such that
the
truncation morphism \(
\psi_{m',m}:X_{m'}\longrightarrowX_{m} \) is flat .
\end{thm}
This paper is motivated by Kei-ichi Watanabe's question.
The author expresses her hearty thanks to him.
The author is also grateful to Mircea Musta\cedilla{t}\v{a} for his
helpful comments and stimulating discussions.
\section{Preliminaries on jet schemes}
In this paper, a $k$-scheme is always a separated scheme over a field $k$.
\begin{defn}
Let \( X \) be a scheme of finite type over \( k \)
and $K\supset k$ a field extension.
A morphism \( \operatorname{Spec} K[t]/(t^{m+1})\longrightarrowX \) is called an \( m \)-jet
of \( X \).
\end{defn}
\begin{say}
\label{field}
Let \( X \) be a scheme of finite type over \( k \).
Let \( {\cal S}ch/k \) be the category of \( k \)-schemes
and \( {\cal S}et \) the category of sets.
Define a contravariant functor \( {\cal F}_{m}^X: {\cal S}ch/k \longrightarrow{\cal S}et \)
by
$$
{\cal F}_{m}^X(Y)=\operatorname{Hom} _{k}(Y\times_{\operatorname{Spec} k}\operatorname{Spec} k[t]/(t^{m+1}), X).
$$
Then, \( {\cal F}_{m}^X \) is representable by a scheme \( X_{m} \) of finite
type over \( k \), that is
$$
\operatorname{Hom} _{k}(Y, X_{m})\simeq\operatorname{Hom} _{k}(Y\times_{\operatorname{Spec} k}
\operatorname{Spec} k[t]/(t^{m+1}), X).
$$
This \( X_{m} \) is called the {\it scheme of \( m \)-jets} of \( X
\) or the {\it \( m \)-jet scheme} of \( X \).
For \( m<m' \) the canonical surjection \( k[t]/(t^{m'+1})\longrightarrowk[t]/(t^{m+1}) \)
induces a morphism \( \psi^X_{m',m}:X_{m'}\longrightarrowX_{m} \),
which we call a truncation morphism.
In particular, for \( m=0 \) \( \psi^X_{m,0}:
X_{m}\longrightarrowX \) is denoted by \( \pi^X_{m} \).
We denote \( \psi^X_{m',m} \) and \( \pi^X_{m} \) by \(
\psi_{m',m} \) and \( \pi_{m} \), respectively, if there is no risk
of confusion.
By \ref{field}, a point \( z \in X_{m} \) gives an \( m \)-jet \(
\alpha_{z}:
\operatorname{Spec} K[t]/(t^{m+1})
\longrightarrowX \) and \( \pi^X_{m}(z)=\alpha_{z}(0) \),
where \( K \) is the residue field at \( z \) and \( 0 \) is
the point of \( \spec K[t]/(t^{m+1}) \).
From now on we denote a point \( z \) of \( X_{m} \) and the
corresponding \( m \)-jet \( \alpha_{z} \) by the common symbol \(
\alpha \).
\end{say}
\begin{say}
The canonical inclusion \( k\longrightarrowk[t]/(t^{m+1}) \) induces a
section \( \sigma^X_{m}:X \hookrightarrow X_{m} \) of \( \pi^X_{m} \).
The image \( \sigma^X_{m}(x) \) of a point \( x\in X \) is the
trivial \( m \)-jet at \( x \) and is denoted by \( x_{m} \).
\end{say}
\begin{say}
Let \( f:X\longrightarrowY \) be a morphism of \( k \)-schemes.
Then the canonical morphism \( f_{m}:X_{m}\longrightarrowY_{m} \)
is induced for every \( m\in {\Bbb N} \) such that the
following diagram is commutative:
\[ \begin{array}{ccc}
X_{m}& \stackrel{f_{m}}\longrightarrow & Y_{m}\\
\pi^X_{m} \downarrow\ \ \ \ \ & & \ \ \ \ \downarrow \pi^Y_{m}\\
X & \stackrel{f}\longrightarrow & Y\\
\end{array}. \]
Pointwise, for \( \alpha\in X_{m} \) , \( f_{m}(\alpha)\) is the $m$-jet \[ f\circ \alpha:
\spec K[t]/(t^{m+1})\stackrel{\alpha}\longrightarrow X \stackrel{f}\longrightarrow Y. \]
\end{say}
\section{Proof of Proposition \ref{sm}}
\noindent
[{\it Proof of Proposition \ref{sm}}]
(i)\( \Rightarrow \) (ii): This implication for smooth and \'etale cases is already mentioned in \cite{BL} and \cite{EM}.
For the reader's convenience, the proof is included here.
Assume for an integer \( m\geq 0 \), a commutative diagram of \( k \)-schemes:
\[ \begin{array}{ccc}
X_{m}& \stackrel{f_{m}}\longrightarrow & Y_{m}\\
\uparrow& & \uparrow\\
Z' &\hookrightarrow& Z\\
\end{array}
\]
is given, where \( Z'\hookrightarrow Z \) is a closed immersion of affine schemes
whose defining ideal is nilpotent.
This diagram is equivalent to the following commutative diagram:
\[ \begin{array}{ccc}
X& \stackrel{f}\longrightarrow &Y \\
\uparrow& & \uparrow\\
Z'\times {\Spec k[t]/(t^{m+1})} &\hookrightarrow& Z\times {\Spec k[t]/(t^{m+1})}\\
\end{array}. \]
Here, we note that \( Z'\times {\Spec k[t]/(t^{m+1})} \hookrightarrow Z\times {\Spec k[t]/(t^{m+1})} \)
is a closed subscheme with the nilpotent defining ideal.
If \( f \) is smooth (resp. unramified, \'etale),
there exists a (resp. there exists at most one, there exists a unique)
morphism \( Z\times {\Spec k[t]/(t^{m+1})} \longrightarrowX \) which makes the two triangles
commutative.
This is equivalent to the fact that there exists a (resp. there exists at most one,
there exists a unique)
morphism \( Z \longrightarrowX_{m} \) which makes the two triangles in the first
diagram
commutative.
\noindent
(ii)\( \Rightarrow \) (iii): trivial.
\noindent
(iii)\( \Rightarrow \) (i):
Assume a commutative diagram,
\begin{equation}\label{d1}
\begin{array}{ccc}
X& \stackrel{f}\longrightarrow &Y \\
\varphi\uparrow\ \ \ & & \ \ \ \uparrow\psi\\
Z' &\hookrightarrow& Z \\
\end{array}
\end{equation}
is given, where \( Z'\hookrightarrow Z \) is a closed immersion of affine schemes whose defining ideal is nilpotent.
For an integer \( m\geq 0 \), by composing with the sections
\(\sigma_{m}^X: X\hookrightarrow X_{m} \), \( \sigma_{m}^Y:
Y\hookrightarrow Y_{m} \), we obtain the commutative diagram:
\begin{equation}\label{d2}
\begin{array}{ccc}
X_{m}&\stackrel{f_{m}}\longrightarrow& Y_{m}\\
\cup& & \cup\\
X& \stackrel{f}\longrightarrow &Y \\
\varphi\uparrow\ \ \ & & \ \ \ \uparrow\psi\\
Z' &\hookrightarrow& Z \\
\end{array}. \end{equation}
Now, if \( f_{m} \) is smooth (resp. unramified, \'etale),
there exists a (resp. exists at most one, exists a unique )
morphism \( Z\longrightarrowX_{m} \) such that the two triangles are commutative
in the diagram (\ref{d2}).
By composing this morphism $Z\longrightarrowX_m$ with $\pi_m^X:X_m\longrightarrowX$,
we obtain that there exists a (resp. exists at most one, exists a unique )
morphism \( Z\longrightarrowX \) such that the two triangles in the lower rectangle are commutative.
\( \Box \)
\vskip.5truecm
\noindent
[{\it Proof of Corollary \ref{smooth}}]
In Proposition \ref{sm}, let \( Y=\spec k \). \( \Box \)
\section{jet schemes of a local analytic scheme}
For the proofs of the theorems, here we set up the jet schemes for local analytic schemes.
Let $k$ be an algebraically closed field of arbitrary characteristic.
The representability of the following functor follows from \cite{voj}.
Here, we show the concrete form of the scheme representing the functor.
\begin{prop}
Let ${\widehat{{\Bbb A}_{k}^N}}$ be the affine scheme $\operatorname{Spec} \widehat{\o_{{\Bbb A}^N,0}}$, where $\o_{{\Bbb A}^N,0}$ is the local ring of the origin $0\in {\Bbb A}_k^N$ and $\widehat{\o_{{\Bbb A}^N,0}}$ is the completion of $\o_{{\Bbb A}^N,0}$ at the maximal ideal.
Let ${\cal F}_m^{{\widehat{{\Bbb A}_{k}^N}}}: Sch/k \longrightarrowSet$ be the functor from the category of $k$-schemes to the category of sets defined as follows:
$${\cal F}_m^{{\widehat{{\Bbb A}_{k}^N}}}(Y):=\operatorname{Hom}_k(Y\times_{\spec k} {\Spec k[t]/(t^{m+1})}, {\widehat{{\Bbb A}_{k}^N}}).$$
For a morphism $u:Y\longrightarrowZ$ in $Sch/k$,
$${\cal F}_m^{{\widehat{{\Bbb A}_{k}^N}}}(u):\operatorname{Hom}_k(Z\times{\Spec k[t]/(t^{m+1})},{\widehat{{\Bbb A}_{k}^N}})\longrightarrow\operatorname{Hom}_k(Y\times{\Spec k[t]/(t^{m+1})},{\widehat{{\Bbb A}_{k}^N}}) $$
is defined by $f\mapsto f\circ (u\times id)$.
Then, ${\cal F}_m^{{\widehat{{\Bbb A}_{k}^N}}}$ is representable by the scheme
$$({\widehat{{\Bbb A}_{k}^N}})_m:=\operatorname{Spec} k[[x_{0,1}, x_{0,2},\ldots,x_{0,N}]][x_{1,1},\ldots,x_{1,N},\ldots,x_{m,
1},\ldots,x_{m,N}]$$
$$ =\spec k[[{\bf x}_{0}]][{\bf x}_{1},\ldots,{\bf x}_{m}], $$
where we denote the multivariables \( (
x_{i,1},x_{i,2},\ldots,x_{i,N})\) by \( {\bf x}_{i} \) for the
simplicity of notation.
\end{prop}
\begin{pf} We may assume that $Y$ is an affine scheme $\spec R$ over $k$.
Then,
$$\operatorname{Hom}_k(Y\times{\Spec k[t]/(t^{m+1})},{\widehat{{\Bbb A}_{k}^N}}) \simeq \operatorname{Hom}_k(k[[{\bf x}_0]], R[t]/(t^{m+1}))$$
Here we have a bijection:
$$\operatorname{Hom}_k(k[[{\bf x}_0]], R[t]/(t^{m+1}))\simeq \operatorname{Hom}_k(k[[{\bf x}_0]],R)\times R^{mN}$$
by $\varphi\mapsto (\pi_0\circ \varphi, \pi_1\varphi(x_{01}),...,\pi_1\varphi(x_{0,N}),..., \pi_m\varphi(x_{01}),...,\pi_m\varphi(x_{0,N}))$,
where, $$\pi_i: R[t]/(t^{m+1})\longrightarrowR\ \ \ \ (i=0,1,...,m) $$ is the projection of $R[t]/(t^{m+1})
=R\oplus Rt \oplus\cdots \oplus Rt^m \simeq R^{m+1}$
to the $i$-th factor.
Indeed it gives a bijection, since we have the inverse map
$$\operatorname{Hom}_k(k[[{\bf x}_0]],R)\times R^{mN}\longrightarrow\operatorname{Hom}_k(k[[{\bf x}_0]], R[t]/(t^{m+1}))$$
by
$$(\varphi_0, a_{1,1},..,a_{1,N},...,a_{m,1},..., a_{m,N})\mapsto \varphi$$
where $\varphi\in \operatorname{Hom}_k(k[[{\bf x}_0]], R[t]/(t^{m+1}))$ is defined as follows:
For $\gamma(x_{0,1}, x_{0,2},\ldots,x_{0,N})\in k[[{\bf x}_0]]$,
substituting $\sum_{i=0}^m x_{i,j}t^i$ into $x_{0,j}$ $(j=1,...,N)$ in $\gamma$,
we obtain
$$\gamma(\sum {\bf x}_i t^i)=\sum_{i=0}^{\infty}\left(\sum_{\sum_{\ell} i_{\ell}=i, 1\leq j_{\ell}\leq N}
\gamma_{i_1,j_1,...,i_s, j_s}x_{i_1,j_1}\cdots x_{i_s,j_s}\right)t^i$$
in $k[[{\bf x}_{0}, {\bf x}_{1},\ldots,{\bf x}_{m}, t]]$, where $\gamma_{i_1,j_1,...,i_s, j_s}\in k[[{\bf x}_0]]$.
Define $\varphi(\gamma)\in R[t]/(t^{m+1})$ by
$$\varphi(\gamma)=\sum_{i=0}^m\left(\sum_{\sum_{\ell} i_{\ell}=i, 1\leq j_{\ell}\leq N}
\varphi_0(\gamma_{i_1,j_1,...,i_s, j_s})a_{i_1,j_1}\cdots a_{i_s,j_s}\right)t^i.$$
On the other hand,
It is clear that there is a bijection
$$\operatorname{Hom}_k(k[[{\bf x}_0]][{\bf x}_1,...,{\bf x}_m], R)\simeq \operatorname{Hom}_k(k[[{\bf x}_0]],R)\times R^{mN}$$
by $\varphi\mapsto (\varphi|_{k[[{\bf x}_0]]}, \varphi(x_{1,1}),...,\varphi(x_{1,N}),...,
\varphi(x_{m, 1}),...,\varphi(x_{m,N}))$.
By this, we have
$$\operatorname{Hom}_k(k[[{\bf x}_0]], R[t]/(t^{m+1}))\simeq\operatorname{Hom}_k(k[[{\bf x}_0]][{\bf x}_1,...,{\bf x}_m], R),$$
which implies
$$\operatorname{Hom}_k(Y\times \spec k[t]/(t^{m+1}), {\widehat{{\Bbb A}_{k}^N}}) \simeq \operatorname{Hom}_k(Y, \spec k[[{\bf x}_0]][{\bf x}_1,...,{\bf x}_m])$$
This completes the proof.
\end{pf}
By this proposition, we have the following:
\begin{cor}
Let $X\subset {\widehat{{\Bbb A}_{k}^N}}$ be a closed subscheme.
Let $I$ be the defining ideal of $X$ in ${\widehat{{\Bbb A}_{k}^N}}$.
Define a functor ${\cal F}_m^X: Sch/k\longrightarrowSet$ for this $ X$ in the same way
as in the previous proposition.
For a power series \( f\in k[[{\bf x}_{0}]]\) we define an element
\( F_{m}\in k[[{\bf x}_{0}]][ {\bf x}_{1}\ldots,{\bf x}_{m}] \) as follows:
\[ f(\sum_{i= 0}^m {\bf x}_{i}t^i)=F_{0}+F_{1}t+F_{2}t^2+\cdots+
F_{m}t^m+\cdots . \]
Then, the functor ${\cal F}_m^X$ is represented by a scheme \( X_{m} \) defined in
\(( {\widehat{{\Bbb A}_{k}^N}})_{m}= \spec k[[{\bf x}_{0}]][{\bf x}_{1},\ldots,{\bf x}_{m}]\) by
the ideal generated by \( F_{i} \)'s \( (i\leq m) \) for all \( f\in I \).
(It is sufficient to take \( F_{i} \)'s \( (i\leq m) \) for all generators $f\in I$.)
\end{cor}
\begin{pf}
We use the notation in the proof of the previous proposition.
There, we obtained bijections :
$$\operatorname{Hom}_k(k[[{\bf x}_0]], R[t]/(t^{m+1}))\stackrel{\Phi}\simeq
\operatorname{Hom}_k(k[[{\bf x}_0]],R)\times R^{mN}$$
$$\stackrel{\Psi}\simeq
\operatorname{Hom}_k(k[[{\bf x}_0]][{\bf x}_1,...,{\bf x}_m], R).$$
Here, for $Y=\spec R$, we have the fact that
$${\cal F}_m^X(Y)=\operatorname{Hom}_k(k[[{\bf x}_0]]/I, R[t]/(t^{m+1}))$$
is the subset
$$\{\varphi:k[[{\bf x}_0]]\longrightarrowR[t]/(t^{m+1})\mid \varphi(\gamma)=0 \ \ \mbox{for\ generators}\ \gamma\in I\}$$
of $\operatorname{Hom}_k(k[[{\bf x}_0]], R[t]/(t^{m+1}))$.
The condition $\varphi(\gamma)=0$ is equivalent to the conditions $\pi_i\circ\varphi(\gamma)=0$
$(i=0,1,...,m)$.
Therefore, this subset is mapped by $\Psi\circ \Phi$ to the subset
$$\big\{\varphi:k[[{\bf x}_0]][{\bf x}_1,...,{\bf x}_m]\longrightarrowR\mid \ \varphi(x_{i,j})=a_{i,j}, \ \mbox{for\ generators}\ \gamma\in I, $$
$$\sum_{\sum_{\ell} i_{\ell}=i, 1\leq j_{\ell}\leq N}
\varphi_0(\gamma_{i_1,j_1,...,i_s, j_s})a_{i_1,j_1}\cdots a_{i_s,j_s}=0 \ (i=0,1,...,m)\big\}.$$
Let the ideal $J\subset k[[{\bf x}_0]][{\bf x}_1,...,{\bf x}_m]$ be generated by
$$\sum_{\sum_{\ell} i_{\ell}=i, 1\leq j_{\ell}\leq N}
\gamma_{i_1,j_1,...,i_s, j_s}x_{i_1,j_1}\cdots x_{i_s,j_s}$$ for generators $\gamma\in I,$
then it follows that our subset is equal to
$$\operatorname{Hom}_k(k[[{\bf x}_0]][{\bf x}_1,...,{\bf x}_m]/J,R).$$
\end{pf}
\begin{rem}
\label{algan}
Let $X\subset {\Bbb A}_k^N$ be a closed subscheme containing the origin $0$, $I_X$ the defining ideal
and $\widehat X$ the affine scheme $\spec \widehat {\o_{X,0}}$.
Note that the defining ideal $I$ of $\widehat{X}$ in ${{\widehat{{\Bbb A}_{k}^N}}}$
is generated by $I_X$. For a polynomial \( f\in k[{\bf x}_{0}] \) we define an element
\( F_{m}\in k[{\bf x}_{0}, {\bf x}_{1}\ldots,{\bf x}_{m}] \) in the same way as in the previous corollary.
Then \( \widehat X_{m} \) is defined in
\(( {\widehat{{\Bbb A}_{k}^N}})_{m}= \spec k[[{\bf x}_{0}]][{\bf x}_{1},\ldots,{\bf x}_{m}]\) by
the ideal generated by \( F_{i} \)'s \( (i\leq m) \) for generators \( f\in I_{X} \).
\end{rem}
\begin{cor}
Under the notation of Remark\ref{algan}, it follows that
$$\widehat X_m=\widehat X\times_X X_m.$$
\end{cor}
\begin{pf}
Note that $F_i\in k[{\bf x}_0, {\bf x}_1,..., {\bf x}_m]$ for a generator $f$ of $I_X$ and $I$ is generated by $I_X$.
Now the expressions
$$X_{m}= \spec k[{\bf x}_{0},{\bf x}_{1},\ldots,{\bf x}_{m}]/ (F_i)_{f\in I_X}$$
$$\widehat X_{m}= \spec k[[{\bf x}_{0}]][{\bf x}_{1},\ldots,{\bf x}_{m}]/ (F_i)_{f\in I_X}$$
give the required equality.
\end{pf}
\begin{cor}
\label{equal}
Under the notation of Remark\ref{algan},
let $\pi_m^X$ and $\pi_m^{\widehat X}$ be the canonical projections
$X_m\longrightarrowX$ and $\widehat X_m\longrightarrow\widehat X$, respectively.
Then, we obtain the isomorphism of schemes:
$$(\pi_m^X)^{-1}(0)\simeq (\pi_m^{\widehat X})^{-1}(0).$$
\end{cor}
\begin{cor}
\label{reduction}
Under the notation of Remark\ref{algan},
replacing $X$ by a sufficiently small neighborhood of $0$, we obtain the equivalence that
the truncation morphism $X_{m'}\longrightarrowX_m$ is flat
if and only if
the truncation morphism $\widehat X_{m'}\longrightarrow\widehat X_m$ is flat.
\end{cor}
\begin{pf}
``Only if" part follows from the base change property for flatness. ``If" part follows from the fact that the homomorphism
$\o_{X,0}\longrightarrow\widehat{\o_{X,0}}$ is faithfully flat.
\end{pf}
\begin{defn}
\label{weight}
A monomial \( {\bf x}=\prod_{\ell=1}^d x_{i_{\ell},j_{\ell}}\in k[[{\bf x}_{0}]][{\bf x}_{1},\ldots,{\bf x}_{m}] \)
is called a monomial of {\it weight} \( w \) if \( w=\sum_{\ell=1}^d
i_{\ell}\).
For an element \( F\in k[[{\bf x}_{0}]][{\bf x}_{1},\ldots,{\bf x}_{m}] \) the
order \( \operatorname{ord} F \) is defined as the lowest degree of the monomials
in \( {\bf x}_{0},\ldots,{\bf x}_{m} \) that appear in \( F \).
\end{defn}
Note that every monomial in \( F_{m} \) has weight \(m \) for $f\in k[[{\bf x}_0]]$.
The next lemma follows from the definition of \( F_{m} \):
\begin{lem}
\label{appear}
\label{lem} Let \( f \) be a non-zero power series in \( k[[{\bf x}_{0}]] \) of order \(
\geq 1 \).
\begin{enumerate}
\item[(i)]
When char \( k \)= 0,
a monomial \( \prod_{\ell=1}^r x_{0,j_{\ell}} \) appears in \( f \) if and only if
for every \( i_{\ell}\geq 0 \),
the monomial \[ \prod_{\ell=1}^r x_{i_{\ell},j_{\ell}} \] appears in \(
F_{m} \), where \( \sum_{\ell}i_{\ell}=m \).
Hence, \( \operatorname{ord} F_{m}=\operatorname{ord} f \), and in particular \( F_{m}\neq 0 \)
for every \( m \).
\item[(ii)]
For any characteristic, a monomial $\displaystyle\prod_{j=1}^N x_{0,j}^{e_j}$ appears in $f$ if and only if
for every \( i_{\ell}\geq 0 \),
the monomial
$$\prod_{j=1}^N x_{i_j,j}^{e_j}$$
appears in $F_m$, where $m=\sum_j e_j i_j$.
\end{enumerate}
\end{lem}
\begin{pf}
The statement of ``if'' part follows immediately from the
definition of \( F_{m} \) for both (i) and (ii).
Now assume that
\( g= \prod_{\ell=1}^r x_{0,j_{\ell}} \) is a monomial in \( f \).
By substituting \( \sum_{i\geq 0}x_{i, j}t^i \)
into $x_{0,j}$ in this monomial, we obtain
\[ g(\sum_{i\geq 0}{\bf x}_{i}t^i)=G_{0}+G_{1}t+G_{2}t^2+\cdots. \]
Therefore, \( G_{m} \) is the sum of the monomials of the form \( \prod_{\ell=1}^r x_{i_{\ell},j_{\ell}} \)
with \( i_{\ell}\geq 0 \) and \( \sum_{\ell}i_{\ell}=m \).
If the characteristic of \( k \) is zero, the coefficients of each such
monomial is nonzero.
And each monomial \( \prod_{\ell=1}^r x_{i_{\ell},j_{\ell}} \) in
\(G_{m}\) is not canceled by the contribution from the other monomials of \( f
\),
because the collection \( (j_{1},..,j_{\ell},..,j_{r}) \)
assigns the source monomial \( \prod_{\ell=1}^r x_{0,j_{\ell}} \).
This shows the statement of ``only if'' part of (i).
For the proof of only if part of (ii), let $g=\prod_j x_{0,j}^{e_j}$ and define $G_i$ in the same way as in the previous discussion.
Then, the monomial $\prod_j x_{i_j,j}^{e_j}$ appears with coefficient 1 in $G_m$ for $m=\sum_j e_j i_j$.
Therefore, the coefficient of $\prod_j x_{i_j,j}^{e_j}$ in $F_m$ is the same as the coefficient of
$\prod_j x_{0,j}^{e_j}$ in $f$.
\end{pf}
\begin{rem} The statement (i) of Lemma \ref{appear} does not hold for positive characteristic case. For example, let $p>0$ be the characteristic of the base field $k$ and
\( f=x_{0,1}^p \in k[[x_{0,1}]]\).
Then \( F_{m}=x_{i,1}^p \) for \(
m=pi \) and \( F_{m}=0 \) for \( m\not\equiv 0 \) (mod \( p \)).
\end{rem}
As we saw in the previous section,
Corollary \ref{smooth} follows immediately from Proposition \ref{sm}.
But here we give another proof of Corollary \ref{smooth} for an algebraically closed base field, since we think that it gives some useful insight into jet schemes.
\vskip.5truecm
\noindent
[{\it Proof of Corollary \ref{smooth}}]
We may assume that \( (X,0)\subset ({\widehat{{\Bbb A}_{k}^N}}, 0) \) is a closed
subscheme with a singularity at $0$, where \( N \) is the embedding dimension of \( (X,0) \).
Then every element \( f \in I_{X} \) has order greater than 1.
By this, every element \( F_{i}\) of the defining ideal \( I_{X_{m}} \)
of \( X_{m} \) in \( ({\widehat{{\Bbb A}_{k}^N}})_{m} \)
has order greater than 1.
Here, note that \( I_{X_{m}}\neq 0 \), since \( I_{X}\neq 0 \) and \(
F_{0}=f \) for \( f\in I_{X} \).
Therefore the Jacobian matrix of \( I_{X_{m}} \) is
the zero matrix at the trivial \( m
\)-jet \( 0_{m} \in X_{m} \) at \( 0 \), which shows that \( 0_{m} \) is a singular
point in \( X_{m} \) for every \( m \). \( \Box \)
\section{Proofs of theorems \ref{flat}, \ref{positive}}
\begin{say}
\label{note}
For the proof of the theorems,
we fix the notation as follows:
Let \( (X,0)\subset ({\widehat{{\Bbb A}_{k}^N}}, 0) \) be a singularity of
embedding dimension \( N \).
Let \( 0\leq m<m' \),
\( R_{m}=k[[{\bf x}_{0}]][{\bf x}_{1},\ldots,{\bf x}_{m}] \), \( I\subset R_{m} \)
the defining ideal of \( X_{m} \) in \( ({\widehat{{\Bbb A}_{k}^N}})_{m} \), \(
R_{m'}=k[[{\bf x}_{0}]][{\bf x}_{1},..,{\bf x}_{m},..{\bf x}_{m'}] \) and \( I'\subset R_{m'} \)
the defining ideal of \( X_{m'} \) in \( ({\widehat{{\Bbb A}_{k}^N}})_{m'} \).
Let \( M \) be the maximal ideal of \( R_{m}\) generated by \(
{\bf x}_{0},\ldots,{\bf x}_{m} \).
\end{say}
\begin{lem}
\label{notation}
Under the notation as in \ref{note}, if there is an element \( F\in I'
\cap MR_{m'} \) such that
\( F\not\in MI'+IR_{m'} \),
then the truncation morphism \(\psi_{m',m}: X_{m'}\longrightarrowX_{m} \)
is not flat.
\end{lem}
\begin{pf}
The truncation morphism \(\psi_{m',m}: X_{m'}\longrightarrowX_{m} \) corresponds to
the canonical
ring homomorphism \( R_{m}/I\longrightarrowR_{m'}/I' \).
The non-flatness follows from the non-injectivity of the
canonical homomorphism:
\[ M/I\otimes_{R_{m}/I}R_{m'}/I'\longrightarrowR_{m'}/I'. \]
Since we have an isomorphism of the first module
\[
M/I\otimes_{R_{m}/I}R_{m'}/I'\simeq MR_{m'} /(MI'+IR_{m'}),\]
the existence of an element
\( F\in I'\cap MR_{m'} \) such that \( F\not\in MI'+IR_{m'} \) gives
the non-injectivity.
\end{pf}
[{\it Proof of Theorem \ref{flat}}]
Assume that the base field \( k \) is algebraically closed and of characteristic zero and \( (X, 0) \) is
a singular point of a scheme $X$ of finite type over $k$.
Then we will deduce that every truncation morphism \(\psi_{m',m}: X_{m'}\longrightarrowX_{m}
\)
\( (m'>m\geq 0) \) is not flat.
For this, it is sufficient to prove that \(\psi_{m',m}: \widehat{X_{m'}}\longrightarrow\widehat{X_{m}}
\)
\( (m'>m\geq 0) \) is not flat by Corollary \ref{reduction}.
So we may assume that $X$ is a closed subscheme of ${\widehat{{\Bbb A}_{k}^N}}$ with the embedding dimension $N$.
Let $I_X$ be the defining ideal of $X$ in ${\widehat{{\Bbb A}_{k}^N}}$.
We use the notation of \ref{note}.
Let \( f \) be an element in \( I_{X} \) with the minimal order \( d
\).
Note that \( d\geq 2 \), as \( N \) is the embedding dimension.
Then, by Lemma \ref{lem}, (i), \( F_{m+1} \) is not zero and
presented as
\[ F_{m+1}=g_{1}({\bf x}_{0})x_{m+1, 1}+\cdots+g_{N}({\bf x}_{0})x_{m+1,N}+
g'({\bf x}_{0},\ldots,{\bf x}_{m}), \]
where \( \operatorname{ord}
F_{m+1}=d \) and
some of \( g_{i} \)'s are not zero.
We should note that \( \operatorname{ord} g_{i}=d-1 \) for all non-zero \( g_{i} \)'s.
As \( \operatorname{ord} g_{i}\geq 1\), for every \( i \) and \( \operatorname{ord} g'\geq 1 \), the element \( F_{m+1} \) is in \( MR_{m'} \).
It is clear that \( F_{m+1}\in I' \).
On the other hand,
as \( \operatorname{ord} I=\operatorname{ord} I'=d \), it follows that $\operatorname{ord} MI'\geq d+1$ and the initial term of an element
$IR_{m'}$ of order $d$ is the initial term of an element of $I$.
Hence,
the initial term of an element in
\( MI'+IR_{m'} \) of order \( d \) should be the initial term of an element of $I$,
therefore it should be a polynomial in \( {\bf x}_{0},\ldots, {\bf x}_{m} \).
However, the initial term of \( F_{m+1} \) is not of this form,
which implies
\( F_{m+1}\not\in MI'+IR_{m'}\).
By Lemma \ref{notation}, the non-flatness of \(
\psi_{m',m}:X_{m'}\longrightarrowX_{m} \) follows for every pair \( (m, m') \)
with \( 0\leq m<m' \).
\( \Box \)
\begin{exmp}
\label{ex}
The condition char\( k \)=0 is necessary for Theorem \ref{flat}.
Indeed,
there are counter examples for Theorem \ref{flat} in case of
positive characteristic.
For example,
let \( X \) be a scheme defined by \( x_{0,1}^p \) in \(
{\Bbb A}_{k}^1=\spec k[x_{0,1}] \) over a field \( k \) of characteristic p.
Let $r$ be an integer with $0<r < p$
Then, for any positive integer $q$, we have
\[ X_{pq+r}=\spec k[x_{0,1}, x_{1,1},..., x_{pq+r, 1}]/(x_{0,1}^p,..., x_{q,1}^p) \] and
\[ X_{pq}=\spec k[x_{0,1}, x_{1,1},.., x_{pq, 1}] / (x_{0,1}^p,..., x_{q,1}^p). \]
It is clear that \( X_{pq+r} \) is flat over \( X_{pq} \), while \( X \) is
singular.
\end{exmp}
\vskip.5truecm
\noindent
[{\it Proof of Theorem \ref{positive}}]
As in the proof of the previous theorem, we will show the non-flatness of the truncation morphisms,
if $(X,0) $ is singular.
As $X$ is reduced, some fiber of the truncation morphism $\psi_{m',m}:X_{m'}\longrightarrowX_m$ has
dimension $\leq (m'-m)\dim (X,0)$ for a small affine neighborhood $X$ of $0$, if $\psi_{m',m}$ is flat.
(If $X$ is of equi-dimensional, then the fiber has dimension $\dim (X,0)$.)
Hence, if $\psi_{m',m}$ is flat, by Corollaries \ref{equal}, \ref{reduction}, the dimension of the fiber over
a closed point in $(\pi_{m}^{\widehat X})^{-1}(0)$ by the morphism
$\widehat{\psi_{m',m}}:\widehat{X_{m'}}\longrightarrow\widehat{X_m}$ is $\leq (m'-m)\dim (X,0)$.
With remarking this fact and Corollary \ref{reduction},
we may assume that $X$ is a singular closed subscheme of
${\widehat{{\Bbb A}_{k}^N}}$ for the embedding dimension $N$ of $(X,0)$.
First assume \( m'<d(m+1) \).
Note that for every \( g\in I_{X} \),
\[ \overline{G}_{i}=G_{i}({\bf 0},\ldots,{\bf 0},{\bf x}_{m+1},\ldots,{\bf x}_{i})=0 \]
for \( i<d(m+1) \).
This is because every monomial in $G_i$ has a factor $x_{\ell, j}$ with $\ell\leq m$,
since the weight of \( G_{i} \) is \( i\) \( (<d(m+1))\) and
\(\operatorname{ord} G_{i}\geq d \).
Let \( 0_{m} \) be the trivial \( m \)-jet at \( 0 \).
As \( \psi_{m', m}^{-1}(0_{m}) \) is defined in \( {\Bbb A}^{(m'-m)N} \)
by the ideal generated by \( \overline{G}_{i} \)'s with \( i\leq m' \) for $g\in I_X$,
it follows that \[ \psi_{m', m}^{-1}(0_{m})\simeq {\Bbb A}^{N(m'-m)}, \]
which is a fiber of dimension \( N(m'-m)> (m'-m)\dim (X,0) \).
Therefore, $\psi_{m',m}$ is not flat,
because otherwise the fiber dimension would be $(m'-m)\dim(X,0)$ as we saw before.
Therefore, we may assume that \( m'\geq d(m+1) \), where $d=\operatorname{ord} I_X$.
Let $f\in I_X$ have the order $ d$.
Let $\prod_j x_{0,j}^{e_j}$ be a monomial with the minimal degree in $f$.
Then, $\sum_j e_j=d$ and therefore $e_j\leq d$ for every $j$.
Let $e$ be one of non-zero $e_j$'s.
By the assumption $m'\geq d(m+1)$, there is a positive integer $i$ such that $m\leq ie <m'$.
Let $s$ be minimal among such $i$'s.
Then \( F_{se}\in I' \) is clear and also we have \( F_{se}\in MR_{m'} \) under the notation of \ref{note}.
Indeed, if a monomial \( \prod_{\ell=1}^u x_{i_{\ell}, j_{\ell}}
\) of \( F_{se} \) has a factor \( x_{i_{\ell}, j_{\ell}} \) with \( i_{\ell}\geq m+1
\), let this \( i_{\ell} \) be \( i_{1} \).
Then \( i_{1}\geq m+1 > (s-1)e \).
By this,
\[ \sum_{\ell\neq 1}i_{\ell}< se-(s-1)e=e\leq d\leq u. \]
Therefore, there is at least one \( \ell \) such that \( i_{\ell}\leq 1\leq m \).
Hence every monomial of \( F_{se} \) is contained in \( MR_{m'} \).
Now let $e=e_1$.
As
\[ \prod_j x_{0,j}^{e_j} \]
is a monomial of \( f \) of the minimal order \( d \), by Lemma \ref{appear},
\[ x_{1,s}^e\prod_{j\neq 1}x_{0, j}^{e_j} \]
is a monomial of \( F_{se} \).
Therefore, \( \operatorname{ord} F_{se}=d \).
This monomial does not appear in any element of \( MI'+IR_{m'} \).
Indeed, \( \operatorname{ord} MI'\geq d+1 \) and the initial term of
an element of \( IR_{m'} \) of order \( d \)
must be the initial term of an element of \( I \), because of \(
\operatorname{ord} I = d \).
Therefore, every initial monomial of an element of \( IR_{m'} \)
of order \( d \) is of the
form
\[ \prod_{\ell}x_{i_{\ell},j_{\ell}}, \ \ \ (\sum_{\ell}i_{\ell}\leq m),\]
since \( I \) is generated by \( F_{i} \)'s with \( i\leq m \) for \(
f\in I_{X} \).
As \( x_{1,s}^e\prod_{j\neq 1}x_{0, j}^{e_j} \) is not of
this form, we obtain \( F_{se}\not\in IR_{m'}+MI' \).
By this and Lemma \ref{notation}, it follows that
\( X_{m'}\longrightarrowX_{m} \) is not flat for \( m'>m> 0 \).
\begin{rem}
In the proof of Theorem \ref{positive}, we used the condition $m\geq 1$.
It is not clear if the same statement as in the Theorem \ref{positive} follows for $m=0$
in positive characteristic case,
i.e., If the base field is of positive characteristic, $X$ is reduced and
$\pi_{m'}=\psi_{m',0}:X_{m'}\longrightarrowX$ is flat for some $m'>0$, then is $X$ non-singular?
But in particular, if $m'=1$, it holds true.
This is seen as follows:
For an affine scheme $X$ of finite type over $k$, the fiber of a point $x\in X$ by the projection $\pi_1:X_1\longrightarrowX$ is the Zariski tangent space
of the point.
Therefore $\dim \pi_1^{-1}(x)={\operatorname{embdim}} (X,x)$.
If $(X, 0)$ is singular and reduced, $\dim \pi_1^{-1}(x)> \dim(X,0)$, while there are points in a small neighborhood of $0$ such that the fiber dimension is $\dim (X,0)$.
Hence, $\pi_1$ is not flat.
\end{rem}
\makeatletter \renewcommand{\@biblabel}[1]{
#1.}\makeatother
\end{document} |
\begin{document}
\begin{frontmatter}
\titledata{The Erd\H{o}s--Faber--Lov\'{a}sz Conjecture revisited}{}
\authordatatwo{John Baptist Gauci}{[email protected]}{}{Jean Paul Zerafa}{[email protected]}{The research work disclosed in this publication is funded by the ENDEAVOUR Scholarship Scheme (Malta). The scholarship may be part-financed by the European Union -- European Social Fund (ESF) under Operational Programme II -- Cohesion Policy 2014--2020, ``Investing in human capital to create more opportunities and promote the well being of society".}{Department of Mathematics, University of Malta, Malta}
\keywords{Erd\H{o}s--Faber--Lov\'{a}sz Conjecture, chromatic number, clique-decomposition, edge-colouring}
\msc{05C15}
\begin{abstract}
The Erd\H{o}s--Faber--Lov\'{a}sz Conjecture, posed in 1972, states that if a graph $G$ is the union of $n$ cliques of order $n$ (referred to as defining $n$-cliques) such that two cliques can share at most one vertex, then the vertices of $G$ can be properly coloured using $n$ colours. Although still open after almost 50 years, it can be easily shown that the conjecture is true when every shared vertex belongs to exactly two defining $n$-cliques. We here provide a quick and easy algorithm to colour the vertices of $G$ in this case, and discuss connections with clique-decompositions and edge-colourings of graphs.
\end{abstract}
\end{frontmatter}
\section{Introduction}\label{section intro}
For any graph $G$, the \emph{chromatic number} $\chi(G)$ is the least number of colours that are required to colour all the vertices of $G$ such that no two adjacent vertices receive the same colour. In $1972$, Erd\H{o}s, Faber, and Lov\'{a}sz posed the following conjecture, whilst at a tea party in Boulder Colorado.
\begin{conjecture}[EFL Conjecture] \cite{Erdos1981} \label{EFLConjecture}
If a graph $G$ is the union of $n$ cliques of order $n$, no two of which share more than one vertex, then $\chi(G)=n$.
\end{conjecture}
For ``a proof or disproof" of the conjecture, Paul Erd\H{o}s initially offered $50$USD, but then, having seen that the problem is not so trivial and simple as it seems, he increased his offer to $500$USD. To this day, no complete solution of this conjecture exists. We refer the reader to \cite{efl0,efl1,efl2,efl3,efl4,efl5} for a more thorough introduction to the conjecture and recent results. In particular, we remark that the fractional version of the EFL Conjecture was solved by Kahn and Seymour \cite{KahnSeymour1992} in 1992. Moreover, in January 2021, it was announced \cite{kuhn} that the conjecture is true for sufficiently large values of $n$, which to our knowledge, is the best result so far in trying to attack the EFL Conjecture.
For every positive integer $n$, let $\mathbb{EFL}_{n}$ denote the class of graphs that are constructed as the union of $n$ cliques $Q_{1}, \ldots, Q_{n}$ each of order $n$, such that any two of these $n$ cliques intersect in at most one vertex. The $n$-cliques $Q_{1}, \ldots, Q_{n}$ are referred to as the \emph{defining} $n$-cliques, and a vertex which belongs to more than one defining $n$-clique is said to be \emph{shared}. Conjecture \ref{EFLConjecture} is equivalent to saying that if $G \in \mathbb{EFL}_{n}$, then $\chi(G)=n$. To avoid the trivial case when $n=1$, we tacitly assume that $n\geq 2$. We also remark that since a graph $G$ belonging to $\mathbb{EFL}_{n}$ contains $n$-cliques, $\chi(G)\geq n$, and so in order to prove that $\chi(G)=n$ it suffices to provide a proper $n$-colouring of the vertices of $G$.
Let $\mathcal{G}$ be in $\mathbb{EFL}_{n}$ such that every shared vertex belongs to \textbf{exactly} two defining $n$-cliques of $\mathcal{G}$.
It is known that, in this case, $\mathcal{G}$ admits a proper $n$-vertex-colouring by using clique-decompositions and edge-colourings as described in the following (for a more detailed discussion, the reader is referred to Section \ref{section last}). In fact, consider the complete graph $K_{n}$ on $n$ vertices, with each vertex of $K_{n}$ corresponding to a defining $n$-clique of $\mathcal{G}$ and with every shared vertex of $\mathcal{G}$ corresponding to an edge of $K_{n}$. By Vizing's Theorem, the edges of $K_{n}$ can be properly coloured using at most $n$ colours, and consequently, the shared vertices of $\mathcal{G}$ can also be properly coloured using at most $n$ colours. This colouring is then extended to a proper $n$-colouring of all the vertices of $\mathcal{G}$, as follows. Every defining $n$-clique of $\mathcal{G}$ contains at most $n-1$ shared vertices, which by the above are all coloured differently. Let $\mathcal{C}_i$ denote the set of colours of the shared vertices belonging to the defining $n$-clique $Q_{i}$. Since the number of unshared vertices of $Q_{i}$ is equal to $n-|\mathcal{C}_i|$, the unshared vertices of $Q_{i}$ can be assigned mutually different colours from $\{\,1,\ldots, n\,\}-\mathcal{C}_i$, yielding a proper $n$-vertex-colouring of $\mathcal{G}$. Consequently, $\chi(\mathcal{G})=n$, and hence, the EFL Conjecture holds for this particular instance.
The aim of this note is to present a very simple and straightforward algorithm that enables the construction of a proper colouring of the vertices of $\mathcal{G}$ with $n$ colours, when every shared vertex belongs to exactly two defining $n$-cliques, without having the need to first colour the edges of the corresponding complete graph $K_n$ and then transferring this colouring to $\mathcal{G}$ (as described above).
We first consider the special case when the number of shared vertices in $\mathcal{G}$ achieves the maximum value ${n \choose 2}$, that is, when every two defining $n$-cliques share a vertex and every shared vertex belongs to exactly two defining $n$-cliques. Lemma \ref{lemma main} gives the algorithm for the case when $n$ is even, and then with the help of Example \ref{Example EFL mod}, this is generalised to the case when $n$ is odd (see Proposition \ref{prop EFL algorithm1}). Finally, in Theorem \ref{Theorem EFL algorithm2}, we discuss how this algorithm can be utilised in the case when the number of shared vertices is less than $\binom{n}{2}$. In fact, our algorithm properly colours the shared vertices of a graph $\mathcal{G}\in\mathbb{EFL}_{n}$ (in which every shared vertex belongs to exactly two defining $n$-cliques) using at most $n$ colours. One can then extend this colouring to a proper $n$-colouring of all the vertices of $\mathcal{G}$, as explained above. In Section \ref{section last} we discuss a very natural reformulation of the EFL Conjecture, suggested in \cite{efl0}, in terms of clique-decompositions and edge-colourings. We end this note by suggesting Problem \ref{problem} which although we believe is captivating in itself, we think it can shed some further light on an eventual complete solution of the EFL Conjecture.
Although most of our terminology is standard, we refer the reader to \cite{BM} for further definitions and notation not explicitly stated.
\section{Main result}\label{section main}
For $i$ and $j$ in $\{\,1,\ldots,n\,\}$ and $i<j$, if the defining $n$-cliques $Q_{i}$ and $Q_{j}$ have a shared vertex, then this shared vertex is denoted by the ordered pair $(i,j)$. Let $\mathcal{G}_{n}$ be the unique graph in $\mathbb{EFL}_{n}$ having $\binom{n}{2}$ shared vertices, and let $\mathcal{V}_{n}\subset V(\mathcal{G}_{n})$ be the set of shared vertices of $\mathcal{G}_{n}$, that is, $\vert \mathcal{V}_{n}\vert=\binom{n}{2}$. In this case every shared vertex belongs to exactly two defining $n$-cliques. We remark that in what follows the complete residue system used when taking operations modulo $t$ is $\{\,1,\ldots, t\,\}$. We first consider the case when $n$ is an even integer.
\begin{lemma}\label{lemma main}
Let $n\geq 2$ be an even integer. The function $c:\mathcal{V}_{n}\rightarrow \{\,1,\ldots, n-1\,\}$, such that
\begin{linenomath}
$$c\big((i,j)\big)\equiv\left\{
\begin{array}{rl}
i+j\pmod{n-1} & $if $j<n,\\
2i\pmod{n-1} & $if $j=n,
\end{array}\right.
$$
\end{linenomath}
is a proper $(n-1)$-colouring of the vertices in $\mathcal{V}_{n}$.
\end{lemma}
\begin{proof}
Let $(i,j)$ and $(k,\ell)$ be two adjacent vertices. It suffices to show that $c\big((i,j)\big)\neq c\big((k,\ell)\big)$. For contradiction, we suppose that $c\big((i,j)\big)= c\big((k,\ell)\big)$. We first note that exactly one of $i$ and $j$ has to be equal to exactly one of $k$ and $\ell$. There are five cases that need to be considered.
\noindent\textbf{Case 1.} $i=k$ and either $j<\ell<n$ or $\ell<j<n$.
Notwithstanding whether $j<\ell<n$ or $\ell<j<n$, we have $i+j\equiv k+\ell\equiv i+\ell\pmod{n-1}$, implying that $j\equiv \ell\pmod{n-1}$, a contradiction.\,\,\,{\tiny$\blacksquare$}
\noindent\textbf{Case 2.} $j=\ell<n$ and either $i<k$ or $k<i$.
Notwithstanding whether $i<k$ or $k<i$, we have $i+j\equiv k+\ell \equiv k+j\pmod{n-1}$, implying that $i\equiv k\pmod{n-1}$, a contradiction.\,\,\,{\tiny$\blacksquare$}
\noindent\textbf{Case 3.} Either $j=k$ or $i=\ell$.
Without loss of generality, let $j=k$. Then, $i<\ell\leq n$ and $j<n$. Assuming first that $\ell<n$, we get that $i+j\equiv k+\ell\equiv j+\ell\pmod{n-1}$, implying that $i\equiv j\pmod{n-1}$, a contradiction. Thus, $\ell=n$, and $i+j\equiv 2k\equiv 2j\pmod{n-1}$, implying that $i\equiv j\pmod{n-1}$, a contradiction once again.\,\,\,{\tiny$\blacksquare$}
\noindent\textbf{Case 4.} $j=\ell=n$ and either $i<k$ or $k<i$.
Notwithstanding whether $i<k$ or $k<i$, we have $2i\equiv 2k\pmod{n-1}$, and since $n$ is even, this implies that $i\equiv k\pmod{n-1}$, a contradiction.\,\,\,{\tiny$\blacksquare$}
\noindent\textbf{Case 5.} $i=k$ and either $j<\ell=n$ or $\ell<j=n$.
Without loss of generality, let $j<\ell=n$. Then, $i+j\equiv 2k\equiv 2i\pmod{n-1}$, implying that $j\equiv i\pmod{n-1}$, a contradiction.\,\,\,{\tiny$\blacksquare$}
Hence $c$ is a proper $(n-1)$-colouring of the shared vertices of $\mathcal{G}_{n}$.
\end{proof}
Since every defining $n$-clique of $\mathcal{G}_{n}$ contains $n-1$ shared vertices, it can be easily seen that the proper $(n-1)$-vertex-colouring given in Lemma \ref{lemma main} can be extended to a proper $n$-vertex-colouring of $\mathcal{G}_{n}$, by assigning a unique new colour to all the vertices in $V(\mathcal{G}_{n})-\mathcal{V}_{n}$, implying that $\chi(\mathcal{G}_{n})=n$.
\begin{example}\label{Example EFL mod}
Here, we use the colouring explained in Lemma \ref{lemma main} to obtain a proper $9$-colouring of the shared vertices of $\mathcal{G}_{10}$. Note that addition is taken modulo $9$.
\begin{table}[H]
\centering
\begin{tabular}{ccccccccc}
\addlinespace[-\aboverulesep]
\cmidrule[\heavyrulewidth]{1-9}
\multicolumn{9}{c}{\emph{Colours}} \\
1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 &9 \\
\cmidrule{1-9}
(1,9) & (5,6) & (1,2) & (1,3) & (1,4) & (1,5) & (1,6) & (1,7) & (1,8) \\
(2,8) & (2,9) & (3,9) & (4,9) & (2,3) & (2,4) & (2,5) & (2,6) &(2,7) \\
(3,7) & (3,8) & (4,8) & (5,8) & (5,9) & (6,9) & (3,4) & (3,5) & (3,6) \\
(4,6) & (4,7) & (5,7) & (6,7) & (6,8) & (7,8) & (7,9) & (8,9) &(4,5) \\
\textbf{(5,10)} & \textbf{(1,10)} & \textbf{(6,10)} & \textbf{(2,10)} & \textbf{(7,10)} & \textbf{(3,10)} & \textbf{(8,10)} & \textbf{(4,10)}&\textbf{(9,10)} \\ \cmidrule[\heavyrulewidth]{1-9}
\end{tabular}
\caption{A proper $9$-colouring of the shared vertices in $\mathcal{G}_{10}$}
\end{table}
Colour $10$ is then given to the vertices in $V(\mathcal{G}_{10})-\mathcal{V}_{10}$.
\end{example}
Note that if we remove the shared vertices marked bold, the above table reduces to a proper $9$-colouring of the shared vertices of $\mathcal{G}_{9}$. In fact, let $n\geq 3$ be an odd integer. The above idea can be used for a proper $n$-colouring of the shared vertices of $\mathcal{G}_{n}$. More precisely, by the algorithm given in Lemma \ref{lemma main}, we know that the colouring
\begin{linenomath}
$$c\big((i,j)\big)\equiv\left\{
\begin{array}{rl}
(i+j)\pmod{n} & $if $j<n+1,\\
2i\pmod{n} & $if $j=n+1,
\end{array}\right.
$$
\end{linenomath}
provides a proper $n$-colouring of the shared vertices of $\mathcal{G}_{n+1}$, since $n+1$ is even. Restricting the above colouring to those shared vertices $(i,j)$ for which $j\leq n$, we get that $c\big((i,j)\big)\equiv i+j\pmod{n}$ is a proper $n$-colouring of the shared vertices of $\mathcal{G}_{n}$. As before, this gives a proper $n$-colouring of the vertices in $\mathcal{G}_{n}$, implying that $\chi(\mathcal{G}_{n})=n$. The next proposition follows immediately from the above arguments.
\begin{proposition}\label{prop EFL algorithm1} If $n$ is even, then
\begin{linenomath}
$$c\big((i,j)\big)\equiv\left\{
\begin{array}{rl}
i+j\pmod{n-1} & $if $j<n\\
2i\pmod{n-1} & $if $j=n,
\end{array}\right.
$$
\end{linenomath}
is a proper $(n-1)$-colouring of the shared vertices of $\mathcal{G}_{n}$. Otherwise, if $n$ is odd, $c\big((i,j)\big)\equiv i+j\pmod{n}$ is a proper $n$-colouring of the shared vertices of $\mathcal{G}_{n}$. \qed
\end{proposition}
Now, let $\mathcal{G}\in\mathbb{EFL}_{n}$ for some integer $n$, such that every shared vertex belongs to exactly two defining $n$-cliques. Let $\mathcal{V}$ be the set of all the shared vertices of $\mathcal{G}$. Then, $\mathcal{V}\subseteq \mathcal{V}_{n}$, and so $\vert \mathcal{V}\vert \leq \binom{n}{2}$. Consider the case when $\vert \mathcal{V}\vert < \binom{n}{2}$. The colouring $c$ given in Proposition \ref{prop EFL algorithm1} is also a proper colouring of the shared vertices of $\mathcal{G}$ using at most $n$ colours, since every shared vertex of $\mathcal{G}$ corresponds to a shared vertex of $\mathcal{G}_{n}$. Hence, the algorithm described in Proposition \ref{prop EFL algorithm1} can be used to provide a proper colouring of the shared vertices of $\mathcal{G}$ using at most $n$ colours. As before, this can then be extended to a proper $n$-colouring of all the vertices of $\mathcal{G}$. We can thus summarise the above results in the following theorem.
\begin{theorem}\label{Theorem EFL algorithm2} Let $\mathcal{G}\in\mathbb{EFL}_{n}$ such that every shared vertex of $\mathcal{G}$ belongs to exactly two defining $n$-cliques of $\mathcal{G}$.
\begin{enumerate}[(i)]
\item If $n$ is even, then
\begin{linenomath}
$$c\big((i,j)\big)\equiv\left\{
\begin{array}{rl}
i+j\pmod{n-1} & $if $j<n\\
2i\pmod{n-1} & $if $j=n.
\end{array}\right.
$$
\end{linenomath}
is a proper $(n-1)$-colouring of the shared vertices of $\mathcal{G}$.
\item If $n$ is odd, then $c\big((i,j)\big)\equiv i+j\pmod{n}$ is a proper $n$-colouring of the shared vertices of $\mathcal{G}$.
\item $\chi(\mathcal{G})=n$.
\end{enumerate}
\end{theorem}
\section{Clique-decompositions and edge-colourings}\label{section last}
We would like to end this note by recalling that, as also indicated above in Section \ref{section intro}, the EFL Conjecture can be restated in a very simple and intuitive way in terms of clique-decompositions and edge-colourings of the complete graph. As far as we know this re-statement of the EFL Conjecture was first suggested in \cite{efl0}. Let $H$ be a simple graph on $n$ vertices. A \emph{clique-decomposition} of $H$ is a collection $\mathcal{D}=\{\,D_{1},\ldots, D_{k}\,\}$ of subgraphs of $H$, such that each $D_{i}$ is a clique, and each edge of $H$ belongs to exactly one clique from $\mathcal{D}$. We denote a clique-decomposition $\mathcal{D}$ of a graph $H$ as $(H,\mathcal{D})$. A \emph{$n$-colouring of $(H,\mathcal{D})$} is an assignment of $n$ colours to the elements of $\mathcal{D}$ such that if $V(D_{i})\cap V(D_{j})\neq\emptyset$, for some $i\neq j$, then the colours of $D_{i}$ and $D_{j}$ are distinct. One can easily visualise this as an edge-colouring (not necessarily proper) of $H$ in which the edges in each $D_{i}$ are monochromatic, and if for some $i\neq j$, $V(D_{i})\cap V(D_{j})\neq\emptyset$, then the edges of $D_{i}$ have a different colour than the edges of $D_{j}$.
Let $G\in \mathbb{EFL}_{n}$ (not necessarily with every shared vertex belonging to exactly two defining $n$-cliques), and let $H$ be the graph on $n$ vertices, say $v_{1},\ldots, v_{n}$, with edge-set $\{\,v_{i}v_{j}\mid V(Q_{i})\cap V(Q_{j})\neq\emptyset, \textrm{ for } i\neq j\,\}$. We consider the following clique-decomposition of $H$. Let $\{\,u_{1}, \ldots, u_{k}\,\}$ be the set of shared vertices of $G$, and, for every $t\in\{\,1,\ldots, k\,\}$, let $\mathcal{I}_{t}$ be the set of indices of all the defining $n$-cliques containing $u_{t}$. Furthermore, for each $t\in\{\,1,\ldots, k\,\}$, we let $D_{t}$ be the subgraph of $H$ induced by the vertices $\{\,v_{i}\mid i\in\mathcal{I}_{t}\,\}$. Consequently, the set $\mathcal{D}=\{\,D_{1}, \ldots, D_{k}\,\}$ is a clique-decomposition of $H$. If there exists a $n$-colouring of $(H,\mathcal{D})$, then there exists a vertex colouring of the shared vertices of $G$ using $n$ colours, implying that $\chi(G)=n$.
In general, one can deduce that every graph in $\mathbb{EFL}_{n}$ gives rise to a simple graph on $n$ vertices with a clique-decomposition, and by a similar argument, every simple graph on $n$ vertices with a clique-decomposition corresponds to some graph in $\mathbb{EFL}_{n}$. The case considered in Section \ref{section main} corresponds to the case when every clique in $\mathcal{D}$ is a $2$-clique. Moreover, as in the previous section, if one can show that for every possible clique-decomposition $\mathcal{D}$ of $K_{n}$, $(K_{n},\mathcal{D})$ admits a $n$-colouring, then the EFL Conjecture would be true. Although a proof of the conjecture for all sufficiently large values of $n$ was recently announced \cite{kuhn}, we still believe that such a problem deserves to be studied further and solved for the other instances as well, as this could give insights into related areas such as clique-decompositions and edge-colourings of graphs, which have been already studied such as in \cite{clique1,clique2}. In this sense, we suggest the following problem which we think could be a possible way forward.
\begin{problem}\label{problem}
Let $\mathcal{D}$ be a clique-decomposition of $K_{n}$, such that every clique in $\mathcal{D}$ is either a 2-clique or a $r$-clique, for some fixed $r\in\{\,3,\ldots, n-1\,\}$. Determine whether $(K_{n},\mathcal{D})$ has a $n$-colouring, and, if in the affirmative, whether an efficient algorithm to find such a $n$-colouring exists.
\end{problem}
\end{document} |
\begin{document}
\title{scRAE: Deterministic Regularized Autoencoders with Flexible Priors for Clustering Single-cell Gene Expression Data}
\author{Arnab~Kumar~Mondal\textsuperscript{\textasteriskcentered}, Himanshu~Asnani\textsuperscript{\textdagger}, Parag~Singla\textsuperscript{\textasteriskcentered}, and Prathosh~AP\textsuperscript{\textasteriskcentered}
\thanks{Email id: [email protected], [email protected],
[email protected], [email protected]}
\thanks{\textasteriskcentered~indicated authors are affiliated with IIT Delhi. \textdagger~indicated author is affiliated with TIFR, Mumbai.}
}
\markboth{IEEE/ACM Transactions on Computational Biology and Bioinformatics}
{Mondal \MakeLowercase{\textit{et al.}}: scRAE: Deterministic RAEs with Flexible Priors for Clustering Single-cell Gene Expression Data}
\IEEEtitleabstractindextext{
\begin{abstract}
Clustering single-cell RNA sequence (scRNA-seq) data poses statistical and computational challenges due to their high-dimensionality and data-sparsity, also known as `dropout' events. Recently, Regularized Auto-Encoder (RAE) based deep neural network models have achieved remarkable success in learning robust low-dimensional representations. The basic idea in RAEs is to learn a non-linear mapping from the high-dimensional data space to a low-dimensional latent space and vice-versa, simultaneously imposing a distributional prior on the latent space, which brings in a regularization effect. This paper argues that RAEs suffer from the infamous problem of bias-variance trade-off in their naive formulation. While a simple AE without a latent regularization results in data over-fitting, a very strong prior leads to under-representation and thus bad clustering. To address the above issues, we propose a modified RAE framework (called the scRAE) for effective clustering of the single-cell RNA sequencing data. scRAE consists of deterministic AE with a flexibly learnable prior generator network, which is jointly trained with the AE. This facilitates scRAE to trade-off better between the bias and variance in the latent space. We demonstrate the efficacy of the proposed method through extensive experimentation on several real-world single-cell Gene expression datasets. The code for our work is available at \url{https://github.com/arnabkmondal/scRAE}.
\end{abstract}
\begin{IEEEkeywords}
Dimensionality Reduction of scRNA-seq data, Clustering of scRNA-seq data, Regularized Auto-Encoder, scRAE
\end{IEEEkeywords}}
\maketitle
\IEEEraisesectionheading{\section{Introduction}}
\IEEEPARstart{S}{ingle} cell RNA sequencing (scRNA-seq) is an emerging technology that facilitates analysis of the genome or transcriptome information from an individual cell.
Availability of large-scale scRNA-seq datasets has opened up new avenues in cancer research \cite{dalerba2011single, navin2011tumour}, embryonic development \cite{marks2010, Tang2010} and many more.
To be able to accomplish a downstream machine learning task on the scRNA-seq data, a critical first step is to learn a `compact' representation of it or reduction of the data dimensionality. However, learning such compact representations is not straightforward due to the highly-sparse nature of scRNA-seq. This arises from a phenomenon called `dropout', where a gene is expressed at a low or moderate level in one cell but remains undetected in another cell of the same cell type \cite{Kharchenko2014}. Dropout events occur because of the stochasticity of mRNA expression, low amounts of mRNA in individual cells and shallow sequencing depth per cell of the sequencing technologies\cite{Angerer2017} used. The excessive zero counts due to dropout cause the data to be zero-inflated and only a small fraction of the transcriptome of each cell is captured effectively. Consequently, the traditional dimensionality reduction methods fail because of excessive zero expression measurement and high variation in gene expression levels among the same type of cell. Motivated by the aforementioned challenges we make the following contributions:
\begin{enumerate}
\item We propose a novel regularized Auto Encoder (AE)-based architecture for dimensionality reduction of scRNA-seq data.
\item Our method introduces an additional state space in the objective of regularized AEs so that the latent prior becomes learnable.
\item The proposed architecture flexibly trades-off between bias (prior imposition) and variance (prior learning) and facilitates operation at different points of the bias-variance curve.
\item We demonstrate the efficacy of the proposed method in learning a compact representation via clustering and visualization task through extensive experimentation on several scRNA-seq datasets.
\end{enumerate}
\section{Related Work}
\subsection{Classical Methods on Dimensionality reduction}
Clustering through Imputation and Dimensionality Reduction (CIDR) \cite{Lin2017CIDR} is a fast algorithm that uses implicit imputation approach to reduce the effect dropouts in scRNA-seq data. CIDR computes dissimilarity matrix between the imputed gene expression profiles for every pair of single cells and perform principal coordinate analysis (PCoA). Finally, clustering is performed using first few principal coordinates. Single-cell Interpretation via Multi-kernel Learning (SIMLR) \cite{Wang2017SIMLR} performs dimensionality reduction by learning a cell-to-cell similarity matrix from the input single-cell data. To learn the similarity matrix, SIMLR learns weights for multiple kernel learning. SIMLR addresses dropout events by employing a rank constraint in the learned cell-to-cell similarity and graph diffusion. For clustering, affinity propagation (AP) can be applied to the learned similarity matrix, or k-means clustering can be used in the latent space after applying SIMLR for dimension reduction. SEURAT \cite{Satija2015SEURAT} is a sequential process involving multiple steps of normalisation, transformation, decomposition, embedding, and clustering of the scRNA-seq data. Single-cell consensus clustering (SC3) \cite{Kiselev2017SC3} continually integrates different clustering solutions through a consensus approach. SC3 first apply Euclidean, Pearson and Spearman metrics to construct distance matrices between the cells. Next, the distance matrices are transformed using either PCA \cite{Jolliffe2011} or by computing the eigen-vectors of the associated graph Laplacian, followed by kmeans clustering. Finally, SC3 deploys cluster-based similarity partitioning algorithm (CSPA) to compute the consensus matrix which is clustered using hierarchical clustering with complete agglomeration. However, SC3 is computationally heavy as it uses ensemble clustering. Single-cell Aggregated From Ensemble (SAFE) \cite{SAFE} is another consensus clustering method that takes the cluster outputs of the four algorithms: SC3 \cite{Kiselev2017SC3}, CIDR \cite{Lin2017CIDR}, Seurat \cite{Satija2015SEURAT} and t-SNE \cite{tsne} + k-means as input and ensembles using three hypergraph-based partitioning algorithms: hypergraph partitioning algorithm (HGPA), meta-cluster algorithm (MCLA) and cluster-based similarity partitioning algorithm (CSPA). SAMEClustering \cite{SAMEClustering}, Single-cell RNA-seq Aggregated clustering via Mixture model Ensemble, proposes an ensemble framework that uses SC3, CIDR, Seurat, t-SNE + k-means and SIMLR to obtain individual cluster solution. Finally it chooses a maximally diverse subset of four, according to variation in pairwise Adjusted Rand Index (ARI) and solve for an ensemble cluster solution using EM algorithms. RAFSIN \cite{rafsin} utilizes random forest graphs to cluster scRNA-seq data. scPathwayRF \cite{pathway} proposes a pathway-based random forest framework for clustering single-cell expression data. LRSEC \cite{LRSEC} assumes the scRNA-seq data exist in multiple subspaces and develop an ensemble clustering framework by using low-rank model as the basic learner to find the lowest rank representation of the data in the subspace.
\subsection{Deep-Learning based Dimensionality Reduction}
Deep learning based methods perform better as compared to the traditional methods. Autoencoders \cite{hinton2006reducing} are deep neural networks consisting of an encoder network and a decoder network. The encoder network projects high dimensional data to a low dimensional latent space and the decoder network reconstructs the original data from the compressed latent code. The encoder-decoder pair is trained to minimize reconstruction error using stochastic gradient descent. Consequently, autoencoders provide an unsupervised methodology of learning compressed representation of high dimensional data. It has been shown that minimizing a regularized reconstruction error yields an encoder-decoder pair that locally characterizes the shape of the data-generating density \cite{rae_data_dist}. Single-cell Variational Inference (scVI) \cite{Lopez2018SCVI} is an autoencoder based fully probabilistic approach to normalize and analyze scRNA-seq data. It adapts hierarchical Bayesian model and parameterizes conditional distributions using deep neural networks. Sparse Autoencoder for Unsupervised Clustering,
Imputation, and Embedding (SAUCIE) \cite{amodio2019exploring} is a sparse autoencoder for unsupervised clustering, imputation and embedding. DR-A \cite{Lin2020} implements a deep adversarial variational autoencoder based framework. They propose a novel architecture Adversarial Variational AutoEncoder with Dual Matching (AVAE-DM). An encoder-decoder pair learns to project scRNA-seq data to a low dimensional manifold and reconstructs. Two discriminator are trained in the data-space and latent-space respectively to discriminate between real and fake samples. scVAE \cite{scVAE} adapts the framework of Variational AutoEncoder (VAE) \cite{kingma2013autoencoding} for analysing scRNA-seq data. scVAE makes use of likelihood function based on zero inflated Poisson distribution and zero inflated negative binomial distribution to model `dropout' event in scRNA-seq data. scVAE assumes either a Gaussian or a mixture of Gaussian prior.
scDeepCluster \cite{Tian2019scDeepCluster} combines Deep Embedding clustering \cite{dec, improvedDEC} with denoising Deep Count Autoencoder (DCA) \cite{Eraslan2019DCA} to analyze and cluster scRNA-seq data. scziDesk \cite{scziDesk} extends the idea of scDeepCluster \cite{Tian2019scDeepCluster} by introducing weighted soft K-means clustering with inflation operation. GraphSCC \cite{zeng2020accurately} exploits graph convolutional network \cite{GCN} to cluster cells based on scRNA-seq data by accounting structural relations between cells. scGNN\cite{scGNN} is another framework that exploits graph neural networks to formulate and aggregate inter-cell relationships and provide a hypothesis-free deep learning framework for scRNA-Seq analyses. Contrastive-sc \cite{ciortan2021contrastive} is a self-supervised algorithm for clustering scRNA sequence.
scDCC \cite{tian2021model} incorporates domain knowledge as prior information into the modeling process to guide the deep neural framework to simultaneously learn more informative latent representations and biologically meaningful clusters.
\subsection{Bias-Variance Trade-off in RAEs}
Deep-learning based representation learning techniques discovers useful low-dimensional features of high-dimensional data. Auto-encoder based generative models such as VAE \cite{kingma2013autoencoding} and its variants \cite{Lopez2018SCVI, scVAE} or AAE \cite{AAE} and its variants \cite{Lin2020, MaskAAE, FlexAE} are a few examples of deep learning based models that are used for dimensionality reduction. They do so by learning a projection from high-dimensional data-space to a low-dimensional latent space (encoder) and an inverse projection from low dimension representation space to the original data-space (decoder). The encoded latent space is also regularized to conform to some known primitive distribution. However, this regularization increases bias in the network and make learning good representation difficult. On the other hand, if the latent space is completely unregulated the model might memorize unique codes per sample in the finite data regime. In other words, the learnt latent space becomes non-smooth, resulting in increased variance (over-fitting) of the model. This is the infamous bias-variance trade-off that warrants a flexible prior which could facilitate the operation of an AE-model at different points of the bias-variance curve which forms the basis for our proposed work.
\section{Proposed Method}
\subsection{Regularized Generative Autoencoders}
An AE based generative model combines the task of inference and generation by modelling the joint distribution over high-dimensional data space, $\mathcal{X}$ and low-dimensional latent space, $\mathcal{Z}$. The joint inference distribution is defined as $Q_\phi({\bm{x}}, {\bm{z}})=P_d({\bm{x}})Q_\phi({\bm{z}}|{\bm{x}})$ and the joint generative distribution is defined as $P_\theta({\bm{x}}, {\bm{z}})=Q_\psi({\bm{z}})P_\theta({\bm{x}}|{\bm{z}})$,
where, $P_d({\bm{x}})$ represents the true data distribution over $\mathcal{X}$. $Q_\phi({\bm{z}}\lvert{\bm{x}})$ is the posterior distribution of the latent code given data point. $Q_\phi({\bm{z}})=\int P_d({\bm{x}})Q_\phi({\bm{z}}\lvert{\bm{x}})d{\bm{x}}$, is the aggregated posterior. $Q_\phi({\bm{z}}\lvert{\bm{x}})$ is parameterized by a neural network called Encoder, $E_\phi$, that projects the input data ${\bm{x}} \in \mathcal{X}$ to a latent code ${\bm{z}} \in \mathcal{Z}$. $p_\theta({\bm{x}}|{\bm{z}})$ is parameterized by another neural network called Decoder, $D_\theta$, which learns inverse mapping from ${\bm{z}} \in \mathcal{Z}$ to ${\bm{x}} \in \mathcal{X}$. $P_Z({\bm{z}})$ denotes the prior distribution over the latent space, $\mathcal{Z}$. $\phi \in \Phi$, and $\theta \in \Theta$ are vectors of learnable parameters. Mathematically, a RAE solves the following optimization objective:
\begin{equation}
\begin{gathered}
\mathop{\inf}_{\phi, \theta}\Bigg(\mathop{\mathbb{E}}_{P_d({\bm{x}})}\mathop{\mathbb{E}}_{Q_\phi({\bm{z}}|{\bm{x}})}\bigg[c\Big({\bm{x}}, D_\theta\big(E_\phi({\bm{x}})\big)\Big)\bigg]\Bigg)
\\\text{such that } Q_\phi({\bm{z}}) = P_Z({\bm{z}})
\label{eqn:rae_constrained_obj}
\end{gathered}
\end{equation}
Where, $c:\mathcal{X} \times \mathcal{X} \to \mathbb{R}^+$ denotes any measurable cost function (such as Mean Square Error (MSE) and Mean Absolute Error(MAE)). The remaining notations have their usual meaning as defined above.
The above constrained optimization objective can equivalently be written as an unconstrained optimization problem by introducing a Lagrangian:
\begin{gather}
\begin{split}
D_{RAE} &= \mathop{\inf}_{\phi, \theta}\Bigg(\underbrace{\mathop{\mathbb{E}}_{P_d({\bm{x}})}\mathop{\mathbb{E}}_{Q_\phi({\bm{z}}|{\bm{x}})}\Big[c\Big({\bm{x}}, D_\theta\big(E_\phi({\bm{x}})\big)\Big)\Big]}_{\text{a}} +\\ &\qquad\lambda \cdot \underbrace{{\bm{p}}hantom{\mathop{\mathbb{E}}_{Q_\phi({\bm{z}}|{\bm{x}})}}D_{Z}\big(Q_\phi({\bm{z}}),P_Z({\bm{z}})\big) }_\text{b}\Bigg)
\end{split}
\label{eqn:rae_relaxed_obj}
\end{gather}
Where, $D_{Z}(.)$ denotes any divergence measure such as Kullaback-Leibler, Jenson-Shannon or Wasserstein distance, between two distributions, $\lambda$ is the Lagrange multiplier, and rest of the symbols have their usual meaning as defined before.
\subsection{scRAE}
\begin{figure*}
\caption{The novel architecture of scRAE consists of a reconstruction pipeline (the autoencoder) and a P-GEN network. The high-dimensional sparse single cell RNA sequence is mapped to a dense low dimensional latent representation using the encoder network, $E_\phi$ and the original sequence is reconstructed using a decoder network, $D_\theta$. The encoded latent space is constrained by the P-GEN network which consists of a latent generator network, $G_\psi$ and a critic, $C_\kappa$. $G_\psi$ learns the prior flexibly based on feedback from $C_\kappa$. The learnt latent representations are clustered corresponding to different cell types in the dataset. The entire network is trained in an end-to-end fashion. Solid yellow arrows in the left-right direction illustrates the forward path and the color coded dashed arrows in the right-left direction illustrates the backward flow of gradients due to different terms in the optimization objective.}
\label{fig:bd}
\end{figure*}
In this work, we argue that when the prior is extremely simple such as Gaussian, it increases the bias in the network and prevent the autoencoder from discovering the true structure. On the other hand, when the latent space is not restricted, due to increased variance the network tend to memorize the training samples leading to overfitting. As a remedial measure we propose to learn the prior jointly and flexibly along with AE training.\par
As described in Figure {\textnormal{e}}f{fig:bd}, scRAE consists of four neural nets. The Encoder network, $E_\phi$ maps high dimensional sparse scRNA sequence to a low dimensional dense latent code, ${\bm{z}}\sim Q_\phi$ and the Decoder network, $D_\theta$ learns an inverse mapping. These two networks together constitute the reconstruction pipeline and are trained to minimize a reconstruction loss. The Generator network, $G_\psi$ and the critic network, $C_\kappa$ forms the P-GEN network, which simultaneously learns to bring $P_\psi$ closer to $Q_\phi$ and regularizes the encoded latent space.\par
scRAE is trained in an end-to-end fashion. The encoded aggregated posterior distribution, $Q_\phi({\bm{z}}) = \int Q_\phi ({\bm{z}}|{\bm{x}}) P_d({\bm{x}}) d{\bm{x}}$ acts as the target prior for the generator network, $G_\psi$. On the other hand, the learnt prior distribution, $P_\psi({\bm{z}})$ regularizes the learnt latent space. The objective function of scRAE is:
\begin{equation}
\begin{split}
D_{scRAE}&=\mathop{\inf}_{\psi, \phi, \theta}\Bigg(\underbrace{\mathop{\mathbb{E}}_{P_d({\bm{x}})}\mathop{\mathbb{E}}_{Q({\bm{z}}|{\bm{x}})}\Big[c({\bm{x}}, D_\theta({\bm{z}})\Big]}_{\text{a}} + \\
&\qquad\lambda \cdot \underbrace{ {\bm{p}}hantom{\mathop{\mathbb{E}}_{P({\bm{x}})}} D_{Z}(Q_\phi({\bm{z}})||P_\psi({\bm{z}})) }_\text{b}\Bigg)
\end{split}
\label{eqn:scRAE_obj}
\end{equation}, where $\lambda$ denotes the Lagrange multiplier\footnote{Theoretically, the objective should be optimized w.r.t. the Lagrange multiplier $\lambda$. However, in practical implementations \cite{WAE} it is considered to be a hyper-parameter.}, and the remaining notations are as defined before. \par
One way to visualize Equation {\textnormal{e}}f{eqn:scRAE_obj} is that the objective is to minimize a reconstruction error (term a) regularized by a distributional divergence (term b).
Note that, AAE \cite{AAE} and WAE \cite{WAE} are special cases of scRAE when the generator network, $G_\psi$ is an identity function.\par
\subsection{Realization of scRAE}
For implementation, we use Zero-inflated negative binomial (ZINB) based negative log-likelihood objective for $c$ in term (a) of Eq. {\textnormal{e}}f{eqn:scRAE_obj}. $D_Z$, in principle can be chosen to be any distributional divergence such as Kullback-Leibler divergence (KLD), Jensen–Shannon divergence (JSD), Wasserstein Distance and so on. In this work, we propose to use Wasserstein distance and utilize the principle laid in \cite{arjovsky2017wasserstein, gulrajani2017improved}, to optimize the divergence (term (b) in Equation {\textnormal{e}}f{eqn:scRAE_obj}). The loss functions used for different blocks of FlexAE are as follows:
\begin{enumerate}
\item {Likelihood Loss - Realization of Term a in Eq. {\textnormal{e}}f{eqn:scRAE_obj}: Zero-inflated negative binomial (ZINB) model is suitable for modeling count variables with excessive zeros and it is usually used for overdispersed count variables. In order to handle `dropout' event in scRNA-seq data, $P_\theta({\bm{x}}|{\bm{z}})$ is modelled as zero inflated negative binomial (ZINB) distribution in scRAE. ZINB is defined as:
\begin{equation}
f_{ZINB}(x) = \begin{cases}
\pi + (1 - \pi)f_{NB}(x) ~if~x=0\\
(1 - \pi)f_{NB}(x) ~ if~ x > 0
\end{cases}
\end{equation}
\begin{equation}
f_{NB}(x) = \frac{\Gamma (x + \alpha^{-1})}{\Gamma(\alpha^{-1})\Gamma(x+1)}\bigg(\frac{1}{1+\alpha\mu}\bigg)^{\frac{1}{\alpha}}\bigg(\frac{\alpha\mu}{1+\alpha\mu}\bigg)^{x}
\end{equation}, where $\pi$ denotes the probability of excessive zero, $\alpha$ is the dispersion parameter, $\mu$ denotes mean. If, logit, $l = \log \frac{\pi}{1 - \pi}$, the log-likelihood of ZINB distribution is defined as follows:
\begin{equation}
\mathcal{L}_{ZINB} = \begin{cases}
\sum -Softplus(-l) + Softplus\Bigg(\\\qquad-l + \frac{1}{\alpha}\log\bigg(\frac{1}{1+\alpha\mu}\bigg)\Bigg)~if~x=0 \\
\sum -Softplus(-l)-l+\\\qquad\log\big(f_{NB}(x)\big) ~~~~~~~~~~~if~ x > 0
\end{cases}
\end{equation}
The encoder-decoder pair is trained to minimize the negative log-likelihood, $-\mathcal{L}_{ZINB}$ defined under ZINB distribution as reconstruction loss.
\begin{equation}
L_{Reconstruction} = -\mathcal{L}_{ZINB}
\label{eqn:nll}
\end{equation}}
From implementation point of view, $\mu$, $\frac{1}{\alpha}$ and $l$ are learnable parameters. Specifically, if $h_\theta$ denotes the final hidden layer of the decoder network, two fully connected dense layers are used to learn the logit, $l$, and mean, $\mu$. The dispersion parameter, $\frac{1}{\alpha}$ is learnt as a standalone vector.
\begin{equation}
\mu = \exp(s) \times Softmax(W_\mu h_\theta)
\end{equation}, where $s \sim \mathcal{N}(\mu_s, \sigma_s)$. $\mu_s$ and $\sigma_s$ are one-dimensional outputs of the encoder network. The network is trained to match $\mathcal(\mu_s$, $\sigma_s)$ to $\mathcal{N}(\mu_G, \sigma_G)$ by minimizing the KL divergence between two Gaussian distributions. $\mu_G$ and $\sigma^2_G$ are respectively the mean and the variance of the log library size. Note, that the choice of activation ensures the mean, $\mu$ is always non-negative.
\begin{equation}
\alpha^{-1} = \exp({\bm{v}})
\end{equation}, where ${\bm{v}}$ is a randomly initialized independent learnable vector. Like $\mu$, the dispersion parameter is also non-negative by design choice.\\
Finally, the logit, $l$ is the final output of decoder
\begin{equation}
l = W_l h_\theta
\end{equation}
The loss function for the autoencoder can now be written as:
\begin{equation}
L_{AE} = \mathcal{L}_{ZINB} + \lambda D_{KL}\big(\mathcal{N}(\mu_s, \sigma_s)||\mathcal{N}(\mu_G, \sigma_G)\big)
\label{eqn:ae_loss}
\end{equation}
\item {Wasserstein Loss - We use Wasserstein distance \cite{arjovsky2017wasserstein} for $D_Z$ (Term b Eq. {\textnormal{e}}f{eqn:scRAE_obj}):}
\begin{equation}
\begin{split}
L_{Critic} &= \frac{1}{s}\sum_{i=1}^{s}C_\kappa(\hat{{\bm{z}}}^{(i)}) - \frac{1}{s}\sum_{i=1}^{s}C_\kappa ({\bm{z}}^{(i)}) +\\&\qquad \frac{\beta}{s}\sum_{i=1}^{s}\big(\lvert\lvert\nabla_{{\bm{z}}_{avg}}^{(i)}C_\kappa ({\bm{z}}_{avg}^{(i)})\lvert\lvert - 1\big)^2
\end{split}
\label{eqn:critic_loss}
\end{equation}
\begin{equation}
L_{Gen} = -\frac{1}{s}\sum_{i=1}^{s}C_\kappa(\hat{{\bm{z}}}^{(i)})
\label{eqn:gen_loss}
\end{equation}
\begin{equation}
L_{Enc} = \frac{1}{s}\sum_{i=1}^{s}C_\kappa({\bm{z}}^{(i)})
\label{eqn:enc_loss}
\end{equation}
\end{enumerate}
Where, ${\bm{z}}^{(i)} = E_{\phi}({\bm{x}}^{(i)})$, $\hat{{\bm{z}}}^{(i)} = G_{\psi}({\bm{n}}^{(i)})$ and ${\bm{n}}^{(i)} \sim \mathcal{N}(0, I)$. ${\bm{z}}_{avg}^{(i)} = \alpha{\bm{z}}^{(i)} + (1-\alpha)\hat{{\bm{z}}}^{(i)}$, $\alpha,\beta$ are hyper parameters, with $\alpha \sim \mathcal{U}[0, 1]$, and $\beta$ as in \cite{gulrajani2017improved}. $E_{\phi}, D_\theta, G_\psi$, and $C_\kappa$ denote the encoder, decoder, latent generator and critic respectively.\par
As mentioned above, the auto-encoder is required to be optimized jointly with the P-GEN to ensure regularization in the AE latent space. This regularization effectively enforces smoothness in the learnt latent space and prevents the AE from overfitting on the training examples. In order to be able to satisfy the above requirement in practice, we optimize each of the four losses (Equation {\textnormal{e}}f{eqn:ae_loss} - {\textnormal{e}}f{eqn:enc_loss}) specified above in every training iteration. Specifically, in each learning loop, we optimize the $L_{AE}$, $L_{Gen}$, $L_{Enc}$, and $L_{Critic}$ in that order using a learning schedule. We use Adam optimizer for our optimization. To stabilize the adversarial training, we utilize the principle outlined in WGAN-GP\cite{gulrajani2017improved}.
\section{Experiments and Results}
\begin{table}[!t]
\caption{Summarized description of datasets used.}
\begin{center}
{\textnormal{e}}sizebox{\columnwidth}{!}{
\begin{tabular}{c c c c c c c}
\hline
Dataset & Protocol & Sample Size & \# of Genes & \# Cell Types\\
\hline\hline
Klein-$2k$ \cite{klein2015droplet} & inDrop & $2,717$ & $24,175$ & $4$ \\
Han-$2k$ \cite{HAN2018MicorwellSeq} & Microwell-seq & $2,746$ & $19,079$ & $16$ \\
Zeisel-$3k$ \cite{Zeisel1138} & STRT-Seq UMI & $3,005$ & $19,972$ & $9$ \\
Segerstolpe-$3k$ \cite{segerstolpe2016single} & Smart-Seq2 & $3,514$ & $25,525$ & $13$ \\
Cao-$4k$ \cite{Cao2017} & sci-RNA-seq & $4,186$ & $11,955$ & $10$ \\
Baron-$8k$ \cite{baron2016single} & inDrop & $8,569$ & $20,125$ & $14$ \\
Macosko-$44k$ \cite{Macosko20151202} & Drop-seq & $44,808$ & $23,288$ & $39$\\
Zheng-$68k$ \cite{Zheng2017} & 10X & $68,579$ & $32,738$ & $10$\\
Zheng-$73k$ \cite{Zheng2017} & 10X & $73,233$ & $32,738$ & $8$\\
Rosenberg-$156k$ \cite{Rosenberg176} & SPLiT-Seq & $156,049$ & $26,894$ & $73$\\
\hline
\end{tabular}
}
\end{center}
\label{tab:dataset_summary}
\end{table}
To evaluate the efficacy of the proposed method, we have conducted two types of experiments using the lower dimensional embedding of high dimensional scRNA data: 1. Clustering (Refer section {\textnormal{e}}f{sec:clustering_expt}) and 2. Visualization (Refer section {\textnormal{e}}f{sec:data_vis_expt}). In both the experiments, the training is completely unsupervised as no label information is used. However, for performance evaluation and visualization the test labels are used. Further to illustrate the scalability of the proposed method, we use ten real datasets of varying sample size as described in the section {\textnormal{e}}f{sec:datasets}.
\subsection{Datasets}\label{sec:datasets}
In this paper, the following ten datasets are considered for the evaluation.
\begin{enumerate}
\item \textbf{Klein-$2k$} \cite{klein2015droplet}: This dataset has $2,717$ single-cell transcriptomes with $24,175$ features from mouse embryonic stem cells. The cell labels for this dataset can only be considered `silver standard' as labels were assigned using computational methods and the authors' knowledge of the underlying biology.
\item \textbf{Han-$2k$} \cite{HAN2018MicorwellSeq}: The authors in \cite{HAN2018MicorwellSeq} constructed a `mouse cell atlas' with more than $400,000$ single cells covering all of the major mouse organs. In this paper, we consider $2,746$ samples from mouse bladder cells having $16$ distinct classes.
\item \textbf{Zeisel-$3k$} \cite{Zeisel1138}: This dataset consists of $3,005$ single cell transcriptomes from the primary somatosensory cortex (S1) and thehippocampal CA1 region of the juvenile mouse brain. As reported in \cite{Zeisel1138}, the dataset has nine major classes and forty seven distinct sub-classes comprising all known major cell types in the cortex region.
\item \textbf{Segerstolpe-$3k$} \cite{segerstolpe2016single}: The authors of \cite{segerstolpe2016single} sequenced the transcriptomes of human pancreatic islet cells from healthy and type 2 diabetic donors. The dataset consists of $3,514$ samples with $25,525$ features.
\item \textbf{Cao-$4k$} \cite{Cao2017}: This dataset has of $4,186$ samples with $11,955$ features or genes from worm neuron cells. The expressions are categorized into $10$ distinct classes.
\item \textbf{Baron-$8k$} \cite{baron2016single}: This dataset has $8,569$ samples consisting of $20,125$ genes from human pancreatic islet. Cells can be divided into 14 clusters viz. `acinar', `activated stellate', `alpha', `beta', `delta', `ductal',`endothelial', `epsilon', `gamma', `macrophage', `mast', `quiescent stellate', `Schwann', and `t-cell'.
\item \textbf{Macosko-$44k$} \cite{Macosko20151202}: This dataset contains single-cell gene expression counts from $39$ types of $44,808$ mouse retinal cells.
\item \textbf{Zheng-$68k$} \cite{Zheng2017}: This dataset is comprised of $68,579$ single-cell transcriptomes of fresh peripheral blood mono-nuclear cells (PBMC) in a healthy human. The dataset has $10$ different types of cells.
\item \textbf{Zheng-$73k$} \cite{Zheng2017}: This dataset is created by combining $8$ separate datasets of different purified cell types. In this dataset, there are $73,233$ sequences from $8$ distinct cell types.
\item \textbf{Rosenberg-$156k$} \cite{Rosenberg176}: This dataset contains $156,049$ single-cell transcriptomes from postnatal day $2$ and $11$ mouse brains and spinal cords. $73$ distinct cell types were identified and reported in \cite{Rosenberg176}.
\end{enumerate}
Each dataset has been preprocessed and randomly partitioned into $80\%$ training and $20\%$ test samples. The following section describes the preprocessing pipeline.\par
\subsection{Preprocessing and Gene Selection}\label{sec:preprocessing}
Data preprocessing is the first crucial step in preparing the raw count data to suit the proposed framework scRAE. As outlined in \cite{amezquita2020orchestrating}, preprocessing of scRNA sequence includes the following three steps:
\begin{table*}[!ht]
\begin{center}
\caption{Comparison of the average normalized mutual information across $10$ real datasets achieved by different algorithms.}
\label{tab:average_nmi}
\begin{tabular}{cccccccc}
\hline
$n_z$ & scRAE & scziDesk \cite{scziDesk} & scVAE \cite{scVAE} & scDeepCluster \cite{Tian2019scDeepCluster} & DR-A \cite{Lin2020} & SAUCIE \cite{amodio2019exploring} & scVI \cite{Lopez2018SCVI} \\
\hline\hline
2 & \textbf{0.6975} & 0.6259 & 0.5755 & 0.5808 & 0.6441 & 0.4450 & 0.6164 \\
10 & \textbf{0.7093} & 0.6427 & 0.5887 & 0.6139 & 0.6744 & 0.4826 & 0.6271 \\
20 & \textbf{0.7126} & 0.6489 & 0.6132 & 0.6277 & 0.6823 & 0.4787 & 0.6405\\
\hline
\end{tabular}
\end{center}
{\bm{s}}pace{-3mm}
\end{table*}
\begin{enumerate}
\item \textbf{Selection and filtration of cells and genes for quality control:}
We eliminate each cell without at least one expressed gene and each gene expressed in less than ten cells to reduce the effect of low-quality cells and noisy expression in downstream analyses.
\item \textbf{Data normalization and scaling:}
We exclude very highly expressed genes from the computation of the normalization factor for individual cell; because these highly expressed genes would strongly influence the resulting normalized values for all other genes \cite{SPRING}. Further, the normalized count data is log-transformed to adjust for the mean-variance relationship.
\item \textbf{Feature/Gene selection:} Finally, a subset of high-variance attributes is selected for downstream analysis by modeling the variance across the cells for every gene and retaining the highly variable genes. This feature selection step reduces computational burden and noise from uninformative genes. Following \cite{Lin2020}, We select $720$ genes (features) that exhibit the highest inter-cell variance. Refer to Section {\textnormal{e}}f{sec:feature_dim_vs_nmi} for an ablation study to understand the impact of feature dimension. We have used the dispersion-based method outlined in \cite{Satija2015SEURAT} termed as Highly Variable Gene (HVG) selection method. However, we have also experimented with two other popular methods such as SCMarker \cite{SCMarker}, and M3Drop \cite{M3Drop} as detailed in Section {\textnormal{e}}f{gene_sel}.
\end{enumerate}
\begin{figure*}
\caption{Clustering performance of scRAE is compared against $6$ baseline methods. As can be seen from the figure, scRAE achieves highest NMI score for all of the ten datasets irrespective of the latent dimension. Higher NMI score indicates better cluster purity and better performance w.r.t. ground truth labels.}
\label{fig:nmi_comparison}
\end{figure*}
\subsection{Clustering}\label{sec:clustering_expt}
In order to benchmark the proposed algorithm scRAE\footnote{\url{https://github.com/arnabkmondal/scRAE}}, we compare its performance against $6$ state-of-the-art deep learning based methods as listed below:
scVI \cite{Lopez2018SCVI}, SAUCIE \cite{amodio2019exploring}, scDeepCluster \cite{Tian2019scDeepCluster}, scVAE \cite{scVAE}, DR-A \cite{Lin2020}, scziDesk \cite{scziDesk}.
We conduct experiments using ten real-world datasets as described in Section {\textnormal{e}}f{sec:datasets}. To assess the effectiveness of the proposed method, we evaluate the impact of the learnt representations on the performance of K-means clustering algorithm. First, the gene expression data are compressed using the proposed method and the baseline methods. Next, K-means clustering algorithm is used to compute cluster assignment on the compressed representation. To quantitatively evaluate the quality of clustering, we compute and report the normalized mutual information (NMI) scores \cite{NMI} in Figure {\textnormal{e}}f{fig:nmi_comparison} as in previous works such as DRA\cite{Lin2020}, scDeepCluster\cite{Tian2019scDeepCluster}. NMI is defined as follows:
\begin{equation}
\text{NMI} = \frac{I(Y_{true}; Y_{pred})}{\sqrt{H(Y_{true}) H(Y_{pred})}}
\end{equation}, where $I(Y_{true}; Y_{pred})$ denotes the mutual information between the true labels and predicted labels. $H(Y_{true})$ denotes the entropy of the true labels and $H(Y_{pred})$ denotes the entropy of the predicted labels. Higher NMI score indicates better clustering quality. The ground truth label for NMI computation is obtained from the cell type information provided by the authors who published the datasets. Since, the reported cell types are outcome of practical biological experiments, they can be considered as the noise free true labels. We have performed experiments with embedding layer dimensionality set at $2$, $10$, and $20$. As we can see from Figure {\textnormal{e}}f{fig:nmi_comparison}, the proposed method, scRAE, outperforms the current state-of-the-art methods irrespective of embedding dimension as measured by NMI score. The performance boost might be ascribed to the fact that scRAE is able to automatically operate at the optimal point on the bias-variance curve whereas the bias is high in methods such as scVI \cite{Lopez2018SCVI}, scVAE \cite{scVAE} and DR-A \cite{Lin2020}. scDeepCluster \cite{Tian2019scDeepCluster} and scziDesk \cite{scziDesk} adapt the methodologies laid in deep count autoencoder (DCA) \cite{Eraslan2019DCA} and deep embedding for clustering (DEC) \cite{dec}. These methods although avoid bias imposition through prior distribution, might over-fit the training data due to increased variance. Furthermore, the clustering layer in these two algorithms require the number of clusters as an input, which is not known \textit{a priori} in an unsupervised setting. Table {\textnormal{e}}f{tab:average_nmi} provides the average performance measured using NMI over all datasets to summarise the observations in the bar plot of Figure {\textnormal{e}}f{fig:nmi_comparison}. It is seen that on an average scRAE performs better than the baselines with respect to Normalized Mutual Information (NMI) score.\par
Normalized Mutual Information, however, is not adjusted for chance. Generally, for two clustering assignments with a larger number of clusters, mutual information is higher, even when there is actually less shared information. Adjusted Mutual Information (AMI) takes the above fact into consideration and adjusts the Mutual Information (MI) score to account for chance. AMI is defined as follows:
\begin{equation}
\text{AMI} = \frac{I(Y_{true}, Y_{pred}) - \mathbb{E}I(Y_{true}, Y_{pred})}{\frac{1}{2}(H(Y_{true}) + H(Y_{pred})) - \mathbb{E}I(Y_{true}, Y_{pred})}
\end{equation}
Figure {\textnormal{e}}f{fig:ami_comparison} compares AMI scores achieved by different deep learning based method when the embedding layer dimensionality is $10$. As before, scRAE outperforms the current state-of-the-art methods.\par
\begin{figure*}
\caption{Clustering performance of scRAE is compared against $6$ deep learning based baseline methods. As can be seen from the figure, scRAE achieves highest AMI score for all of the ten datasets. Higher AMI score indicates better cluster purity and better performance w.r.t. ground truth labels.}
\label{fig:ami_comparison}
\end{figure*}
We have used APIs provided by the Scanpy \cite{Wolf2018} toolkit\footnote{\url{https://github.com/theislab/scanpy}} for data preprocessing.\par
Next we present the performance of the proposed scRAE for the two downstream tasks of clustering and visualization. We compare its performance against several state-of-the-art baseline methods in section {\textnormal{e}}f{sec:clustering_expt} and {\textnormal{e}}f{sec:data_vis_expt}.
\begin{figure*}
\caption{Clustering performance of scRAE is compared against $6$ deep learning based baseline methods. As can be seen from the figure, scRAE achieves highest Homogeneity Score (HS) for all of the ten datasets. Higher score is better.}
\label{fig:hs_comparison}
\end{figure*}
\begin{figure*}
\caption{Clustering performance of scRAE is compared against $6$ deep learning based baseline methods. As can be seen from the figure, scRAE achieves highest Completeness Score (CS) for all of the ten datasets. Higher score is better.}
\label{fig:cs_comparison}
\end{figure*}
\begin{figure*}
\caption{Two dimensional Visualization for the dataset Zheng-$73k$ \cite{Zheng2017}
\label{fig:zheng73k_visualization}
\end{figure*}
\begin{figure*}
\caption{Visualization of the latent space of the proposed method, scRAE when the latent dimension is $10$ for the dataset Zheng-$73k$ \cite{Zheng2017}
\label{fig:Zheng73k_zDim10_visualization}
\end{figure*}
Figure {\textnormal{e}}f{fig:hs_comparison} and {\textnormal{e}}f{fig:cs_comparison} further qualitatively compares the proposed method against SOTA deep learning methods in terms of homogeneity score and completeness score. Higher homogeneity score achieved by scRAE indicates all of its clusters contain more data points which are members of a single class. Higher completeness score achieved by scRAE on the other hand indicates more data points that are members of a given class are elements of the same cluster.
\begin{figure*}
\caption{Impact of different gene selection method on clustering of Klein-$2k$ \cite{klein2015droplet}
\label{fig:klein_gene_selection}
\caption{Impact of different gene selection method on clustering of Baron-$8k$ \cite{baron2016single}
\label{fig:baron_gene_selection}
\caption{Impact of gene selection method on clustering performance of different methods for two real-world datasets. Highest NMI score is achieved by scRAE when M3Drop \cite{M3Drop}
\label{fig:gene_selection}
\end{figure*}
\begin{table*}[!ht]
\begin{center}
\caption{Impact of gene selection method on clustering performance as measured by Normalized Mutual Information (NMI) score averaged over two real datasets Klein-$2k$ \cite{klein2015droplet} and Baron-$8k$ \cite{baron2016single}. In most of the cases, the clustering performance of an algorithm has improved when advance gene selection techniques such as SCMarker \cite{SCMarker} or M3Drop \cite{M3Drop} is used. Our proposed method scRAE outperforms current SOTA baselines for a fixed gene selection method and achieves best performance for M3Drop \cite{M3Drop}.}\label{tab:gene_selection_vs_nmi}
\begin{tabular}{cccccccc}
\hline
& scRAE & scziDesk \cite{scziDesk} & scVAE \cite{scVAE} & scDeepCluster \cite{Tian2019scDeepCluster} & DR-A \cite{Lin2020} & SAUCIE \cite{amodio2019exploring} & scVI \cite{Lopez2018SCVI} \\
\hline\hline
HVG & \textbf{0.7849} & 0.7309 & 0.6731 & 0.7172 & 0.7111 & 0.6389 & 0.7349 \\
SCMarker & \textbf{0.8167} & 0.7719 & 0.6928 & 0.7556 & 0.7587 & 0.7159 & 0.7437 \\
M3Drop & \textbf{0.8526} & 0.7859 & 0.7567 & 0.7215 & 0.7805 & 0.6219 & 0.7460 \\
\hline
\end{tabular}
\end{center}
{\bm{s}}pace{-3mm}
\end{table*}
\begin{table*}[!ht]
\begin{center}
\caption{Comparison of average run time (in seconds rounded off to closest integer) of different algorithms on the smallest and the largest out of ten real datasets considered in this work.}
\label{tab:runtime}
\begin{tabular}{crrrrrrr}
\hline
Dataset & scRAE & scziDesk \cite{scziDesk} & scVAE \cite{scVAE} & scDeepCluster \cite{Tian2019scDeepCluster} & DR-A \cite{Lin2020} & SAUCIE \cite{amodio2019exploring} & scVI \cite{Lopez2018SCVI} \\
\hline\hline
Klein-$2k$ \cite{klein2015droplet} & 273 & 181 & 367 & 156 & 279 & 150 & 289 \\
Rosenberg-$156k$ \cite{Rosenberg176} & 1785 & 2689 & 1956 & 2317 & 1927 & 663 & 2409 \\
\hline
\end{tabular}
\end{center}
{\bm{s}}pace{-6mm}
\end{table*}
\subsection{Data Visualization}\label{sec:data_vis_expt}
Finally, to evaluate the effectiveness of the proposed method qualitatively, we have performed some experiments to visualize the compressed representation. The purpose of these experiments is to visually identify different cell types. Figure {\textnormal{e}}f{fig:zheng73k_visualization} presents the two dimensional representations learnt by different deep learning based models for Zheng-$73k$ \cite{Zheng2017} dataset. Visualization is performed using the test split after training is complete. Once, the computation of two dimensional representations of the test examples are complete they are plotted directly in Figure {\textnormal{e}}f{fig:zheng73k_visualization}. The ground truth labels are used to color the similar cells with same color and different cell types with separate colors. As we can see in Figure {\textnormal{e}}f{fig:scRAE}, in the latent space of scRAE, the representations corresponding to similar cells are closely located while the representations corresponding to dissimilar cells are far apart. Besides, some clusters are split into several sub-clusters indicating either biological effect or batch effect.\par
In Figure {\textnormal{e}}f{fig:Zheng73k_zDim10_visualization}, we have adopted a two step procedure to visualize the learnt $10$-dimensional latent space in a scRAE model. As can be seen in Figure {\textnormal{e}}f{fig:scRAE_tSNE} and {\textnormal{e}}f{fig:scRAE_UMAP}, when t-SNE \cite{tsne} or UMAP \cite{mcinnes2018umap} is used in the second step for visualization, the clusters in the dataset becomes readily prominent. Similar to the $2$-dimensional case (Figure {\textnormal{e}}f{fig:scRAE}), similar cells are closer and dissimilar cells are further apart. However, PCA being a linear method fails to capture the cluster information as prominently as in Figure {\textnormal{e}}f{fig:scRAE_tSNE} and {\textnormal{e}}f{fig:scRAE_UMAP} in two dimensions (Figure {\textnormal{e}}f{fig:scRAE_PCA}).
\begin{table}[!ht]
\centering
\caption{Impact of feature dimension on clustering performance of scRAE as measured by NMI score for $n_z=2$.}
\label{tab:feature_dim_vs_nmi}
{\textnormal{e}}sizebox{\columnwidth}{!}{
\begin{tabular}{ccccccc}
\hline
\multirow{2}{*}{Dataset} & \multicolumn{6}{c}{Feature Dimension} \\ \cline{2-7}
& 500 & 600 & 700 & 800 & 900 & 1000 \\ \hline
Klein-2k\cite{klein2015droplet} & 0.8052 & 0.8072 & 0.8281 & 0.8234 & 0.8390 & 0.8354 \\
Han-2k\cite{HAN2018MicorwellSeq} & 0.7346 & 0.7422 & 0.7409 & 0.7417 & 0.7288 & 0.7467 \\
Zeisel-3k\cite{Zeisel1138} & 0.6814 & 0.6962 & 0.7338 & 0.7207 & 0.7186 & 0.7009 \\
Macosko-44k\cite{Macosko20151202} & 0.4991 & 0.4849 & 0.5326 & 0.5507 & 0.4883 & 0.5158 \\ \hline
\end{tabular}
}
{\bm{s}}pace{-4mm}
\end{table}
\subsection{Impact of Feature Dimension on Clustering}\label{sec:feature_dim_vs_nmi}
In order to evaluate the effect of feature size on the clustering performance, we vary number of features between $500$ and $1000$ in step size of 100. As can be seen from Table {\textnormal{e}}f{tab:feature_dim_vs_nmi}, the clustering performance saturates beyond feature dimension $700$. Similar behaviour have been observed for $n_z=10$ and $20$. Further, we have obtained similar ordering in the performance metrics of the proposed method and the baseline models for a fixed number of features.
\subsection{Impact of Gene Selection Method} \label{gene_sel}
As discussed in Sec. {\textnormal{e}}f{sec:preprocessing}, selection of informative genes is a critical preprocessing step, which not only reduces computational complexity but could potentially boost clustering performance. In this section, we study the effect of advanced gene selection methods on clustering. We consider two gene selection strategies namely SCMarker \cite{SCMarker}, and M3Drop \cite{M3Drop}. SCMarker \cite{SCMarker} is an unsupervised ab initio marker selection method. It is based on two metrics 1) discriminative power of individual gene expressions and 2) mutually coexpressed gene pairs (MCGPs). M3Drop \cite{M3Drop} identify genes with unusually high numbers of zeros, also called ‘dropouts’, among their observations. As seen from Figure {\textnormal{e}}f{fig:klein_gene_selection} and {\textnormal{e}}f{fig:baron_gene_selection}, the clustering performance of scRAE improves significantly when SCMarker or M3Drop is used for gene selection as compared to Highly Variable Gene (HVG) selection method \cite{Satija2015SEURAT}. M3Drop provides the best performance. This might be ascribed to the advantage of using the dropout-rate over variance as the dropout-rate can be estimated more accurately due to much lower sampling noise. Similar performance boost is observed in other baseline methods as well when SCMarker or M3Drop is used for gene selection (refer to Figure {\textnormal{e}}f{fig:gene_selection} and Table {\textnormal{e}}f{tab:gene_selection_vs_nmi}).
\subsection{Complexity Analysis}
In this section, we compare the run time complexity of the proposed method, scRAE with the baseline methods used. We have used a machine with Intel\textsuperscript{\textregistered} Xeon\textsuperscript{\textregistered} Gold 6142 CPU, 376GiB RAM, and Zotac GeForce\textsuperscript{\textregistered} GTX 1080 Ti 11GB Graphic Card for all of our experiments. Table {\textnormal{e}}f{tab:runtime} reports the average runtimes of all the algorithms for experiments on Klein-$2k$ \cite{klein2015droplet}, Rosenberg-$156k$ \cite{Rosenberg176} (The largest and the smallest datasets, similar observations were observed on the others as well). Preprocessing is a one time process and common step for all the methods. Hence we do not include the time required for preprocessing in this analysis. As can be seen from Table {\textnormal{e}}f{tab:runtime}, for comparable model capacities, SAUCIE \cite{amodio2019exploring} is the fastest, however most of the time its performance is the worst (see Table {\textnormal{e}}f{tab:average_nmi}, {\textnormal{e}}f{tab:gene_selection_vs_nmi}, Figure {\textnormal{e}}f{fig:nmi_comparison}, {\textnormal{e}}f{fig:ami_comparison}, {\textnormal{e}}f{fig:hs_comparison}, {\textnormal{e}}f{fig:cs_comparison}). scDeepCluster \cite{Tian2019scDeepCluster} and scziDesk \cite{scziDesk} holds the second and the third position respectively in terms of computation speed for Klein-$2k$. However, for Rosenberg-$156k$, where the sample complexity is large, scRAE occupies second position. Hence, it can be concluded that scRAE provides considerable boost in the performance compared to baselines without taking significantly higher computational overheads.
\section{Conclusion}
To conclude, in this work, we argue that there is a bias-variance trade-off with the imposition of any prior on the latent space in the finite data regime. We propose a model-based deep learning method scRAE, a generative AE for single-cell RNA sequencing data, which can potentially operate at different points of the bias-variance curve. Unlike previous deep learning-based generative modeling approaches, scRAE flexibly learns the prior while imposing restrictions on the latent space due to joint training of the P-GEN and the AE. This facilitates trading-off bias-variance on the fly and can potentially determine the optimum operating point on the bias-variance curve. We have empirically demonstrated scRAE's efficacy in clustering on ten real-world datasets and quantitatively compared its performance against several deep learning-based approaches. scRAE achieves the best performance as measured by metrics such as NMI, AMI, homogeneity score, and completeness score irrespective of the bottleneck layer's dimensionality. For bottleneck dimension, $n_z=10$, scRAE's average performance over ten datasets can be summarized as follows: average NMI is $0.7093$, average AMI is $0.6994$, average homogeneity score is $0.7706$, and average completeness score is $0.6555$. We have empirically established the effectiveness of scRAE for the visualization of high-dimensional scRNA-seq datasets. We have provided exhaustive ablation studies to examine the impact of feature dimension and the effect of gene selection methods. We have demonstrated scRAE's computational efficiency through extensive experimentation. Further, we have illustrated the scalability of the proposed method on several large datasets \cite{Rosenberg176, Zheng2017}. As more and more single-cell data becomes available, we expect more applications of our proposed method.
\section*{Acknowledgment}
The authors would like to thank Ajay Sailopal, IIT Delhi, for his help in running some of the baseline methods. Parag Singla is supported by the DARPA Explainable Artificial Intelligence (XAI) Program with number N66001-17-2-4032, the Visvesvaraya Young Faculty Fellowships by Govt. of India and IBM SUR awards. Himanshu Asnani acknowledges the support of Department of Atomic Energy, Government of India, under the project no. 12-R\&D-TFR-5.01-0500 and a gift from Adobe Research. Any opinions, findings, conclusions or recommendations expressed in this paper are those of the authors and do not necessarily reflect the views or official policies, either expressed or implied, of the funding agencies.
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{assets/Arnab_Kumar_Mondal_Passport.png}}]{Arnab Kumar Mondal} received his Bachelor of Engineering in Electronics and Telecommunication from Jadavpur University, India in 2013. Right after his graduation, he joined Centre for Development of Telematics (C-DOT), Delhi, and served as a research engineer there until July 2018. In C-DOT he had the opportunity to participate in cutting-edge projects such as the Dense Wavelength Division Multiplexing (DWDM) and Packet Optical Transport Platform (P-OTP). He joined IIT Delhi as a Ph.D. scholar in July, 2018 under the guidance of Prof. Prathosh AP and Prof. Parag Singla. His research interests lie primarily within the field of deep generative models and applied deep learning. He has published a couple of top-tier peer reviewed conference papers so far. His Ph.D. is supported by the Prime Minister's Research Fellows (PMRF) Scheme by Govt. of India.
{\bm{s}}pace{-10mm}
\end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{assets/Himanshu_Asnani_Passport.png}}]{Dr. Himanshu Asnani} is currently Reader (equivalent to tenure-track Assistant Professor) in the School of Technology and Computer Science (STCS) at the Tata Institute of Fundamental Research (TIFR), Mumbai and Affiliate Assistant Professor in the Electrical and Computer Engineering Department at University of Washington, Seattle. His research interests include information and coding theory, statistical learning and inference and machine learning. Dr. Asnani is the recipient of 2014 Marconi Society Paul Baran Young Scholar Award and was named Amazon Catalyst Fellow for the year 2018. He received his Ph.D. in Electrical Engineering Department in 2014 from Stanford University, working under Professor Tsachy Weissman, where he was a Stanford Graduate Fellow. Following his graduate studies, he worked in Ericsson Silicon Valley as a System Architect for couple of years, focusing on designing next generation networks with emphasis on network redundancy elimination and load balancing. Before joining TIFR, Dr. Asnani worked as a Research Associate in Electrical and Computer Engineering Department at University of Washington, Seattle. In the past, he has also held visiting faculty appointments in the Electrical Engineering Department at Stanford University and Electrical Engineering Department at IIT Bombay.
{\bm{s}}pace{-10mm}
\end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{assets/Parag_Singla_Passport.png}}]{Parag Singla} is an Associate Professor in the Department of
Computer Science and Engineering at IIT Delhi. He holds a Bachelors in Computer Science and Engineering (CSE) from IIT Bombay (2002) and Masters in CSE from University of Washington Seattle. He received his PhD from University of Washington Seattle in 2009 and did a PostDoc from University of Texas at Austin during the period 2010-11. He has been a faculty member in the Department of CSE at IIT Delhi since December 2011. Parag’s primary research interests lie in the areas of machine learning, specifically focusing on neuro symbolic reasoning. In the past, he has also worked extensively on graphical models and statistical relational AI. Parag has 40+ publications in top-tier peer reviewed conferences and journals. He also has one best paper award and two patents to his name. He is a recipient of the Visvesvaraya Young Faculty Research Fellowship by Govt. of India.
{\bm{s}}pace{-10mm}
\end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{assets/Prathosh_AP_Passport.png}}]{Prathosh AP} is currently a faculty member at IIT Delhi, India. He received his Ph. D from Indian Institute of Science (IISc), Bangalore in 2015, in the area of temporal data analysis and applied machine learning. He submitted his PhD thesis in a record time of three years after his B.Tech. with many top-tier journal publications. Subsequently he worked in corporate research labs including Xerox Research India, Philips research and a start-up in CA, USA. His work in industry led to generation of several IP, comprising 10 granted US patents, most of which are commercialised. He joined IIT Delhi as an Assistant Professor in the computer technology group of Electrical Engineering where is currently engaged in research and teaching machine and deep learning courses. His current research includes informative-prior guide representational learning, deep generative models, cross-domain learning and their applications.
\end{IEEEbiography}
\end{document} |
\begin{document}
\title{Stochastic Control Problems with Unbounded Control Operators: solutions through generalized derivatives}
\date{}
\author{Fausto Gozzi
\\
Dipartimento di Economia e Finanza,
Universit\`a LUISS - Guido Carli\\
Viale Romania 32,
00197 Roma,
Italy\\
e-mail: [email protected]\\
\\
Federica Masiero\\
Dipartimento di Matematica e Applicazioni, Universit\`a di Milano Bicocca\\
via Cozzi 55, 20125 Milano, Italy\\
e-mail: [email protected]}
\maketitle
\begin{abstract}
This paper deals with a family of stochastic control problems
in Hilbert spaces which arises in many engineering/economic/financial applications (in particular the ones featuring boundary control and control of delay equations with delay in the control) and for which it is difficult to apply the dynamic programming approach due to the unboundedness of the control operator and to the lack of regularity of the underlying transition semigroup.
We introduce a specific concept of partial derivative, designed for this situation, and we develop a method to prove that the associated HJB equation has a solution with enough regularity to find optimal controls in feedback form.
\end{abstract}
\textbf{Key words}:
Stochastic boundary control problems;
Stochastic control of delay equation with delay in the control;
Unbounded control operator;
Second order Hamilton-Jacobi-Bellman equations in infinite dimension;
Smoothing properties of transition semigroups.
\noindent
\textbf{AMS classification}:
93E20 (Optimal stochastic control),
60H20 (Stochastic integral equations),
47D07 (Markov semigroups and applications to diffusion processes),
49L20 (Dynamic programming method),
35R15 (Partial differential equations on infinite-dimensional spaces).
93C23 (Systems governed by functional-differential equations)
\noindent
\textbf{Acknowledgements}:
The authors thanks a lot the Associate Editor and the Referees for careful scrutiny and useful suggestions that led to an improved version of the paper.
Fausto Gozzi has been supported by the Italian
Ministry of University and Research (MIUR), in the framework of PRIN
projects 2015233N54 006 (Deterministic and stochastic evolution equations) and 2017FKHBA8 001 (The Time-Space Evolution of Economic Activities: Mathematical Models and Empirical Applications).
Federica Masiero has been supported by the Italian Ministry of University and Research (MIUR), in the framework of PRIN project 2015233N54 006 (Deterministic and stochastic evolution equations) and by the Gruppo Nazionale per l'Analisi Matematica, la Probabilit\`a e le loro Applicazioni (GNAMPA) of the Istituto Nazionale di Alta Matematica (INdAM).
\tableofcontents
\section{Introduction}
\subsection{Stochastic control in infinite dimension and its applications}
Stochastic optimal control problems arise in a large variety of applications (see e.g. \cite{Neck84}), and have been (and currently are) the object of an extensive theoretical and applied literature.
In recent years, due to progress in the methodologies and in the computational power there has been an increasing interest in studying also what is usually called ``the infinite dimensional case'', i.e. the case when the state and control variables take their values in infinite dimensional spaces. The infinite dimensional case allows for more realistic modeling and substantially means that the state equation is a Stochastic Partial Differential Equation (SPDE from now on) or a Stochastic Differential Delay Equation (SDDE from now on).
Such types of state equations arise naturally in a wide range of
applied models including physics, engineering, operations research, economics and finance.
On the one hand state equations of SPDE type are used when one wants to model control processes where the underlying dynamical system inherently depends on other basic variables beyond time.
For example we recall: the control of SPDEs arising in fluid dynamics (see e.g. \cite{DaPratoDebussche99}, \cite{Sritharanbook}),
in reaction-diffusion problems (see e.g. \cite{MouraFathy13}),
in modeling air pollution (see e.g. \cite{Do14AMS}, \cite{Seinfeld98}),
in robotics (see e.g. \cite{EvansPereiraBoutselisTheodoru19},
\cite{EvansKendallBoutselisTheodoru20}),
in elasticity theory and practice (see e.g. \cite{ChowMenaldi14} \cite{Do17Automatica},\cite{Do17JSV}),
in spatio-temporal economics growth models
(see e.g. \cite{BoucekkineCamachoFabbri13,Brito04}
in the deterministic case and
\cite{GozziLeocata21} in the stochastic case),
in advertising models (see e.g. \cite{BarucciGozziAOR99},
\cite{Huang12}).
On the other hand state equations of SDDE type are used when one wants to model control processes where the underlying dynamical system is not Markovian in the sense that the value of the state at time $t$ depends on the past of the state/control variables.
Such models arise in optimal advertising problems (see e.g. \cite{ChenWu20,GM,GMSJOTA,Huang12,Machowska19}); in optimal portfolio problems
(see e.g. \cite{CarmonaEtAl18,FedFinSto,RosestolatoSwiech17});
in optimal production planning (see e.g.
\cite{Chan07}, \cite{Yan00});
in the feedback stabilisation of engineering systems (see e.g. \cite{Li20AUTO}).
\subsection{The purpose of our paper: the ``unbounded control case'' and its interest}
Despite its interest for applications, the theory of stochastic control in infinite dimension is still a young and incomplete area. For this reason we think it is interesting to
continue to develop the theory for such problems, in particular
trying to cover new problems arising in applications like the ones quoted above.
For an up-to date account of the theory we send the reader see to the recent book \cite{FabbriGozziSwiech}; other books which partly look at the subject are
\cite[Chapter 13]{DP3}, \cite[Chapters 9-10]{Cerraibook}
and \cite[Chapters 5-6]{Nisiobook}).
This paper is devoted exactly to the above task in cases which are quite common in applications and for which there are, at the moment, few and incomplete results available: the cases, roughly speaking, where the control enters in the state equation in an ``unbounded'' way.
Such unboundedness translates into the fact that, while the state process takes values in a Hilbert space $H$
(e.g. $L^2(0,1)$), the term
which brings the control action into the state equation,
say $Cu$, takes values in a bigger Banach space
$\overline{H}$
(e.g. any Sobolev space of negative order) which strictly contains $H$.
Typical applications where such unboundedness arises are
\textbf{stochastic boundary control problems}
and \textbf{stochastic control of delay equations with delay in the control}.
Concerning \textbf{stochastic boundary control problems}, i.e. problems where, for structural reasons, the control is applied only at the boundary of a given region, are quite common in a wide range of applied problems.
In the engineering-related literature
we recall
some recent papers devoted to specific applications like, e.g., \cite{EvansKendallTheodoru21} in the field of robotics; \cite{Liu20AUTO,Lachemi20AUTO,Lachemi21TAC,MouraFathy13} for reaction-diffusion systems (also in the deterministic case);
\cite{Lamoline20} for stochastic port-Hamiltonian systems; \cite{Do17Automatica} \cite{Do17JSV} for marine risers and Timoshenko beams.
\\
Stochastic boundary control problems also arise in advertising models when one wants to take account of the age structure of the products, see e.g. \cite{BarucciGozziAOR99}, \cite{FaggianGrossetMMOR13}, \cite{GrossetViscolani},
\cite{Huang12}.
On the other hand, \textbf{stochastic optimal control problems with delay in the control}
arise since in many practical situations the effect of the control action persists in the future.
We recall, in this respect, the so-called carryover effect in advertising, see e.g. \cite{ChenWu20}, \cite{GM}, \cite{GMSJOTA}, \cite{Machowska19} and, for related deterministic problems, \cite{FeichtingerHartlSethi}.
\\
Applications to economics (delay in production due to the time to build) are studied in \cite{ChenWuAutomatica2010} where a problem with pointwise delay in the state and in the control is studied by means of the stochastic maximum principle (see also \cite{BambietalET} in a related deterministic case).
\\
Concerning applications to finance we recall \cite{CarmonaEtAl18} where a mean-field model of systemic risk with delay in the control is studied.
\\
Finally stochastic control problems with delay in the control are also related to the problem of information delay (i.e. the time which may be necessary
to implementing the control, which is studied e.g. in \cite{SZ}.
\subsection{The novelty of our methodology}
We use here the dynamic programming approach
with the aim of finding solutions of the associated HJB equations which are regular enough to write optimal control strategies in feedback form and to prove verification theorems.
In the literature one can, roughly speaking, distinguish three main methods to do this.
The first is to look at the theory of viscosity solutions (which is partly developed in these cases, see e.g.
\cite{FedericoGozziJDE,GozziRouySwiech06,Swiech20} and
\cite[Section 3.12 and 4.8.3]{FabbriGozziSwiech});
the viscosity solution is not differentiable in general, however in some infinite dimensional cases some type of differentiability can be proved
(see e.g. \cite{FedericoGoldysGozzi10SICON}, \cite{RosestolatoSwiech17}) however such methods seems not applicable here due to the unboundedness of the control operator.
The second approach (developed e.g. in \cite{ChowMenaldi97}, \cite{Ahmed01})
which we may call "variational" is based on the use of coercive bilinear forms associated to Ornstein-Uhlenbeck operators on a suitable Gelfand triple. Again this seems not applicable here since
it seems not compatible with the unboundedness of our control operator and the lack of null controllability estimates of our cases.
The third approach, which is it the one we use here is the theory of mild solutions (see e.g.
the papers \cite{CDP1}, \cite{CDP2}, \cite{DP3}, \cite{G1} \cite{Mas}, \cite{GoldysGozzi06SPA} and the book \cite[Chapter 4]{FabbriGozziSwiech}).
\\
Roughly speaking such approach, which works only in the semilinear case, rewrites the HJB equation
in a suitable integral form and tries to solve it using a fixed point argument. It is based on smoothing properties
of an underlying transition semigroup and allows to find solution which are regular enough to define optimal feedback control strategies.
\\
In the present case there are two technical barriers preventing the use of such approach:
\begin{itemize}
\item
the need of giving a precise sense to the HJB equation, coming from the unboundedness explained above, which is the core of our setting;
\item
the lack of smoothing properties of the underlying transition semigroup.
\end{itemize}
To deal with such issues we introduce
a specific concept of partial derivative, designed for this situation.
We observe that in the literature various concepts of infinite dimensional derivative has been used, depending on the context.
We mention, among others, the papers \cite{Gross1967},
\cite{LunardiRockner20} (which uses the so-called Fomin derivative), \cite{FTGgrad,Mas,Mas-inf-or} (which use the so-called $G$-gradient for a bounded operator $G$),
\cite[Chapter 4]{FabbriGozziSwiech}-\cite{FedericoGozziJDE}
(which use the so-called $G$-gradient for a possibly unbounded operator $G$).
Our definition extends the last one to take into account more
general cases of unboundedness, more precise explanations are given in Section \ref{subsection-C-directionalderivatives}.
\\
Once such derivative is introduced we perform a nontrivial extension of an idea (which we call ``partial smoothing'') that we used in our previous papers \cite{FGFM-I}-\cite{FGFM-II} in a delay case with bounded control operators. Details are given in Sections
\ref{sec:partsmooth-abstr-setting}-\ref{sec:convpartsmooth-abstr-setting}-\ref{sec-HJB}.
We must say that this paper is a first step to attack such difficult problems. Here we show how to find a regular solution to the HJB equation for a special type of cost functionals.
A second step, which is the object of our current research, is to cover more general cost functionals, in particular the ones where the current cost can be state dependent.
Moreover, to make our results more useful for applications we aim at proving prove verification type results: this would allow to construct the optimal controls in feedback form, on the line of what is done, e.g., in \cite{Ahmed03} or in our previous paper \cite{FGFM-II}.
We also think that it would be interesting
to study the case with partial observation
(on the line of what is done, in cases which do not include ours, in \cite{Ahmed15JMAA}, \cite{GozziSwiech00JFA}, \cite{BandiniCossoFuhrmanPham15AAP})
or the case of Mc Kean - Vlasov dynamics
(see e.g. \cite{Ahmed07}, or \cite{CossoEtAl20})
and to study the applicability, to our setting, of numerical methods for infinite dimensional HJB equations like the ones developed in \cite{AllaFalconeKalise15}.
\subsection{Plan of the paper}
The plan of the paper is the following.
\begin{itemize}
\item
Section \ref{subsection-notation} introduces some basic notations.
\item
Section \ref{SE:EXCONTROL} introduces
our driving examples showing how to rewrite them in
a suitable infinite dimensional setting.
\item
Section \ref{subsection-C-directionalderivatives}
provides the definition of our $C$-derivatives together with some comments to compare it with previous definitions.
\item
Section \ref{sec:partsmooth-abstr-setting} presents our partial smoothing result for Ornstein-Uhlenbeck semigroups
(Proposition \ref{prop:partsmooth}).
\item Section \ref{sec:convpartsmooth-abstr-setting}.
generalizes the partial smoothing result to convolutions
(Lemma \ref{lemma_convoluzione}).
\item
In Section \ref{sec-HJB} we first present a general control problem which includes both our driving examples (Subsection \ref{subsec-contr.pr-abstract}); then, in Subsection \ref{sub:HJBsol}, we state and prove our main result
of existence and uniqueness of mild solutions for the HJB equation (Theorem \ref{esistenzaHJB}).
\item
Appendix A is devoted to show that our motivating examples
satisfy the assumptions made.
\end{itemize}
\section{Basic notation and spaces}\label{subsection-notation}
For the reader's convenience we collect here the basic notation used throughout the paper.
\\
Let $H$ be a Hilbert space.
The norm of an element $x$ in $H$ will be denoted by
$\left| x\right|_{H}$ or simply $\left|x\right|$,
if no confusion is possible, and by
$\left\langle \cdot,\cdot\right\rangle _H$,
or simply by $\left\langle \cdot,\cdot\right\rangle$ we denote the inner product in $H$.
We denote by $H^{\ast}$ the dual space of $H$.
If $K$ is another Hilbert space, ${\cal L}(H,K)$ denotes the
space of bounded linear operators from $H$ to $K$ endowed with the usual operator norm.
All Hilbert spaces are assumed to be real and separable.
Let $E$ be a Banach space. As for the Hilbert space case, the norm of an element $x$ in $E$ will be
denoted by $\left|x\right|_{E}$ or simply $\left|x\right|$,
if no confusion is possible.
We denote by $E^{\ast}$ the dual space of $E$,
and by $\left\langle \cdot,\cdot\right\rangle_{E^*,E}$
the duality between $E$ and $E^*$.
If $F$ is another Banach space, ${\cal L}(E,F)$ denotes the
space of bounded linear operators from $E$ to $F$ endowed with the usual operator norm.
All Banach spaces are assumed to be real and separable.
In what follows we will often meet inverses of operators which are not
one-to-one. Let $Q\in {\cal L}\left(H,K\right)$.
Then $H_{0}=\ker Q$ is a closed subspace of $H$. Let
$H_{1}:=[\ker Q]^\perp$ be the orthogonal complement of $H_{0}$ in $H$: $H_{1}$ is closed, too.
Denote by $Q_{1}$ the restriction of $Q$ to $H_{1}$: $Q_{1}$ is
one-to-one and $\operatorname{Im}Q_{1}=\operatorname{Im}Q$.
For $k\in \operatorname{Im}Q$, we define $Q^{-1}$ by setting
\[
Q^{-1}\left(k\right) :=Q_{1}^{-1}\left(k\right) .
\]
The operator $Q^{-1}:\operatorname{Im}Q\rightarrow H$ is called the pseudoinverse of $Q$. $Q^{-1}$ is linear and closed but in general not continuous.
Note that if $k\in\operatorname{Im}Q$, then
$Q_{1}^{-1}\left( k\right)\in[\ker Q]^\perp$.
is the unique element of
\(
\left\{ h :Q\left( h\right) =k\right\}
\)
with minimal norm (see e.g. \cite{Z}, p.209),
Next we introduce some spaces of functions.
Let $H$ and $Z$ be real separable Hilbert spaces.
By $B_b(H,Z)$ (respectively $C_b(H,Z)$, $UC_b(H,Z)$) we denote the space of all functions
$f:H\rightarrow Z$ which are Borel measurable and bounded (respectively continuous
and bounded, uniformly continuous and bounded).
Given an interval $I\subseteq \mathbb R$ we denote by
$C(I\times H,Z)$ (respectively $C_b(I\times H,Z)$)
the space of all functions $f:I \times H\rightarrow Z$
which are continuous (respectively continuous and bounded).
$C^{0,1}(I\times H,Z)$ is the space of functions
$ f\in C(I\times H, Z)$ such that, for all $t\in I$,
$f(t,\cdot)$ is continuously Fr\'echet differentiable with Fr\'echet derivative $\nabla f(t,x)\in {\cal L}(H,Z)$.
By $UC_{b}^{1,2}(I\times H,Z)$
we denote the linear space of the mappings $f:I\times H \rightarrow Z$
which are uniformly continuous and bounded
together with their first time derivative $f_t$ and their first and second space
derivatives $\nabla f,\nabla^2f$.
\\
If the destination space $Z=\mathbb R$ we do not write it in all the above spaces.
\\
The same definitions can be given if $H$ and $Z$ are Banach spaces.
\section{Two examples with unbounded control operator}
\label{SE:EXCONTROL}
We present here two stochastic controlled equations that motivates the introduction of generalized partial derivatives in Section \ref{subsection-C-directionalderivatives}. What they have in common is that, once they are reformulated as infinite dimensional stochastic controlled evolution equations, the control operator is unbounded.
\subsection{Heat equations with boundary control}
\label{SSE:HEATEQUATION}
\subsubsection{The state equation}
\label{SSSE:SEBC}
Let $(\Omega, {\cal F},P)$ be a complete probability space endowed with a filtration $({\cal F}_t)_{t\geq 0}$. Fixed $0\le t \le T<+\infty$, we consider, in an open connected set with smooth boundary ${\cal O}\subseteq \mathbb R^d$ ($d=1,2,3$) the controlled stochastic heat equation with Dirichlet boundary conditions and with boundary control:
\begin{equation}\label{eqDiri}
\left\{
\begin{array}{l}
\displaystyle
\mathbf frac{ \partial y}{\partial t}(s,\xi)
= \mathbb Delta y(s,\xi)+\dot{W}_Q(s,\xi), \qquad s\in [t,T],\;
\xi\in {\cal O},
\\\displaystyle
y(t,\xi)=x(\xi),\; \xi\in {\cal O},
\\\displaystyle
y(s,\xi)= u(s,\xi), \qquad s\in [0,T],\;
\xi\in \partial{\cal O}.
\end{array}
\right.
\end{equation}
where $\mathbb Delta$ is the Laplace operator and we assume the following.
\begin{hypothesis}\label{hp:BC}
\begin{itemize}
\item[]
\item[(i)]
The initial datum $x(\cdot)$ belongs to the state space $H:=L^2({\cal O})$. The set $U$ of control values is a closed and bounded subset of the Hilbert space $K:=L^2(\partial{\cal O})$.
\item[(ii)]
$\dot{W}_Q$ is a so-called colored space-time noise (with space covariance $Q\in {\cal L}(H)$), the filtration $({\cal F}_t)_{t\geq 0}$ coincides with the augmented filtration generated by $W_Q$;
\item[(iii)]
the control strategy $u$ belongs to ${\cal U}$ where
$$
{\cal U}:=\left\lbrace u(\cdot):\Omega\times [0,T] \rightarrow U):\; \hbox{predictable}\right\rbrace
$$
\end{itemize}
\end{hypothesis}
Given any $(t,x) \in [0,T]\times \mathcal{O}$ and $u \in \mathcal{U}$, we denote, formally, by $y^{t,x,u}(s,\xi)$ the solution of \eqref{eqDiri} at $(s,\xi) \in [0,T]\times \mathcal{O}$.\mathbf footnote{Such solution could be defined with various methods. Here, similarly to
\cite[Appendix C]{FabbriGozziSwiech} we define such solution as the unique mild solution (see \eqref{eq:mildsolboundary} of the infinite dimensional system \eqref{eqDiri-abstr-contr} below.
Following the path outlined in
\cite[Appendix C]{FabbriGozziSwiech} we give sense to
\eqref{eqDiri} rewriting it as an evolution equation in the space $H:=L^2({\cal O})$. We assume that the initial condition $x(\cdot)$ belongs to $H$.
The new state will be a process with values in $H$ given, formally, by $X(s;t,x,u)=y^{t,x,u}(s,\cdot)$.}
We define the operator $A_0$ in $H$ setting
(here $H^2({\cal O})$ and $H^1_0({\cal O})$ are the usual Sobolev spaces)
$$
{\cal D}(A_0)=H^2({\cal O})\cap H^1_0({\cal O})
\qquad
A_0y = \mathbb Delta y
{\rm \;\; for\; \;} y\in {\cal D}(A_0).
$$
The operator $A_0$ is self-adjoint and diagonal with strictly negative eigenvalues $\{-\lambda_n\}_{n\in \mathbb N}$
(recall that $\lambda_n\sim n^{2/d}$ as $n \rightarrow +\infty$).
We can endow $H$ with a complete orthonormal basis
$\{e_n\}_{n\in \mathbb N}$
of eigenvectors of $A_0$.\mathbf footnote{We know that $e_0$ is constant and, when $d=1$ and ${\cal O}=(0,\pi)$, $(e_n(\xi))_{n\geq 1}:=(\sqrt{2}\sin (n\xi))_{n\geq 1}$.}
We recall that the linear trace operator
$D:L^2(\partial {\cal O})\rightarrow H$
is defined setting $D a=f$ where $f$ is the unique solution of the Dirichlet problem
$$
\left\{
\begin{array}{l}
\mathbb Delta f(\xi)=0,
\qquad \xi\in {\cal O},
\\
\displaystyle
f(\xi)=a(\xi), \qquad \xi\in \partial{\cal O}.
\end{array}
\right. $$
Equation (\ref{eqDiri}) can now be reformulated
(see \cite[Appendix C]{FabbriGozziSwiech} for a proof) as
\begin{equation}\label{eqDiri-abstr-contr}
\left\{
\begin{array}{l}
\displaystyle
d X(s)= A_0X(s)\,ds +
( -A_0)D u(s)\,dt+Q^{1/2}dW(s),
\\\displaystyle
X(t)=x.
\end{array}
\right.
\end{equation}
where $W(\cdot)$ is a cylindrical noise in $H$.
We define
\begin{equation}\label{notazioneB}
B_0:=( -A_0)D.
\end{equation}
The operator $B_0$, defined in $K=L^2(\partial{\cal O})$,
does not take values in $H=L^2({\cal O})$.
Indeed for all $\varepsilon>0$, the Dirichlet map takes its values in ${\cal D}((-A_0)^{1/4-\varepsilon}$:
$D\in {\cal L}\left(K, {\cal D}((-A_0)^{1/4-\varepsilon})\right)$
(see again \cite[Appendix C]{FabbriGozziSwiech}).
So,
$$
B_0=(-A_0)^{3/4+\varepsilon}(-A_0)^{1/4-\varepsilon}D:
K \rightarrow {\cal D}((-A_0)^{-3/4-\varepsilon}):
$$
Here and from now on we take $0<\varepsilon<1/4$, indeed the point is to take $\varepsilon$ as small as possible in order to have in $B_0$ a better unbounded part.
Hence we have\mathbf footnote{Here, for $\gamma>0$ we denote by
${\cal D}((-A_0)^{-\gamma})$
the completion of $H$ with respect to the norm
$|\cdot|_{-\gamma}=|A_0^{-\gamma}\cdot|_H$.}
$B_0\in {\cal L}\left(K,{\cal D}((-A_0)^{-3/4-\varepsilon})\right)$.
With an abuse of language with respect to the standard use, we
may say that $B$ is unbounded on $H$, in the sense that
its image is not contained in $H$ but in a space larger than $H$
(here
${\cal D}((-A_0)^{-3/4-\varepsilon}=H^{-3/2-2\varepsilon}({\cal O})$)
which we will call $\overline{H}$.
\\
The unique mild solution (which exists thanks e.g. to
\cite[Theorem 1.141]{FabbriGozziSwiech}) of \eqref{eqDiri-abstr-contr}
is denoted by $X(\cdot;t,x,u)$ and is
\begin{equation}\label{eq:mildsolboundary}
X(s;t,x,u)=
e^{(s-t)A_0}x+\int_t^s e^{(s-r)A_0}B_0 u(r) dr +\int_t^se^{(s-r)A_0}Q^{1/2}dW(r)
,\text{ \ \ \ }s\in[t,T].
\end{equation}
Consequently, for any $(t,x) \in [0,T]\times \mathcal{O}$ and $u \in \mathcal{U}$, we give sense to
$y^{t,x,u}$ by setting $y^{t,x,u}(s,\xi):=X(s;t,x,u)(\xi)$.
\subsubsection{The optimal control problem}
\label{SSSE:OCBC}
For any given $t\in [0,T]$ and $x \in H$, the objective is to minimize, over all control strategies in ${\cal U}$, the following finite horizon cost:
\begin{equation}\label{costoastratto}
J(t,x;u)=\mathbb E \left[\int_t^T \left[\ell_0(s)+\ell_1(u(s))\right]\,ds + \phi(X(T;t,x,u))\right],
\end{equation}
under the following assumption
\begin{hypothesis}\label{hp:BCcost}
\begin{itemize}
\item[]
\item[(i)] $\ell_0:[0,T]\rightarrow \mathbb R$, is measurable and bounded.
\item[(ii)] $\ell_1:U\rightarrow \mathbb R$ is measurable and bounded from below.
\item[(iii)] $\phi:H\rightarrow \mathbb R$ is such that, for a suitable finite set $\{\mathbf f_1,\dots,\mathbf f_N\}\subseteq {\cal D}((-A_0)^{\eta})$ (with $\eta>1/4$) and a suitable $\bar\phi \in B_b(\mathbb R^n)$ we have
$$
\phi_0(x)=\bar\phi\left(\left\langle x,\mathbf f_1\right\rangle _H,\dots,
\left\langle x,\mathbf f_N\right\rangle _H\right).
$$
\end{itemize}
\end{hypothesis}
Such cost can be seen as the rewriting in $H$ of a more ''concrete'' cost like
\begin{equation}\label{costoconcreto}
J_0(t,x;u)=\mathbb E \left[\int_t^T \int_{\mathcal{O}} \left[\bar\ell_0(s,\xi)+\bar\ell_1(u(s,\xi))\right]\,d\xi ds + \phi_0\left(y^{t,x,u}(T,\cdot)\right)\right],
\end{equation}
where $\bar\ell_0$ $\bar\ell_1$, $\phi_0$ are chosen so that the corresponding $\ell_0,\ell_1,\phi$ satisfy Hypothesis \ref{hp:BCcost} above.
\newline Note that here the current cost does not depend on the
state, this is due to the fact that putting the dependence on the state in the current cost would increase considerably the technical arguments in the solution of the HJB equation. Moreover , in order to bypass the lack of suitable smoothing properties of the underlying transition semigroup, we have to work on cost functionals which depend on the state only through a suitable operator $P$, which here turns out to be defined by projections on ${\cal D}((-A_0)^{\eta})$ ,with $\eta>1/4$, see Section \ref{subsec-contr.pr-abstract} and \ref{sub:HJBsol} for further details.
\newline The value function of the problem is
\begin{equation}\label{valuefunction-diri}
V(t,x):= \inf_{u \in {\cal U}}J(t,x;u).
\end{equation}
We define the Hamiltonians leaving aside the term not depending on the control $u$.
For $p\in H$, $u \in U$, the current value Hamiltonian $\hat H_{CV}$ is given by (this is formal since neither $B_0u$ nor $B_0^*p$ belong to $H$, in general):
\begin{equation}
\label{eq:hatHCV}
\hat H_{CV}(p\,;u):=\left\langle B_0u,p\right\rangle _{H}+\ell_1(u)=
\left\langle u,B_0^*p\right\rangle _{H}+\ell_1(u)
\end{equation}
and the (minimum value) Hamiltonian by
\begin{equation}\label{psi1-diri}
\hat H_{min}(p):=\inf_{u\in U}\hat H_{CV}(p\,;u).
\end{equation}
The associated HJB equation can then be formally written as
\begin{equation}\label{HJBformale-diri}
\left\{\begin{array}{l}\displaystyle
-\mathbf frac{\partial v(t,x)}{\partial t}={\cal A} [v(t,\cdot)](x) +\ell_0(t)+
\hat H_{min} (\nabla v(t,x)),\qquad t\in [0,T],\,
x\in {\cal H},\\
\\
\displaystyle v(T,x)=\phi(x),
\end{array}\right.
\end{equation}
where $B$ is defined in {\eqref{notazioneB}}, and ${\cal A}$ is the infinitesimal generator of the transition
semigroup $(R_{t})_{0 \leq t\leq T}$ associated to the process $X$ when the control is zero: namely ${\cal A}$ is formally defined by
\begin{equation}\label{eq:ell-diri}
{\cal A}[f](x)=\mathbf frac{1}{2} Tr \; Q \nabla^2f(x)
+ \left\langle x,A^*\nabla f(x)\right\rangle .
\end{equation}
From \eqref{eq:hatHCV} we easily see that, still formally,
$\hat H_{min}(p)$ dependsnot on $p $ but on $B_0^*p$. On the same line also the minimum point in \eqref{eq:hatHCV}, when it exists, only depends on $B_0^*p$. This means that the candidate optimal feedback map, if it exists, is a function of $B_0^*\nabla v$.
For this reason our main goal is to find a solution $v$ of \eqref{HJBformale-diri} for which $B_0^*\nabla v$ makes sense.
For this reason in the sequel we will use the notation
(for $p\in H$ and $q\in K$ such that the expressions below make sense):
\begin{equation}\label{eq:modham}
H_{CV}(q\,;u):=\left\langle u,q\right\rangle _{K}+\ell_1(u)
\quad \hbox{and}\quad
H_{min}(q):= \inf_{u\in U} H_{CV}(q\,;u),
\end{equation}
so that
$$
\hat H_{CV}(p\,;u)=H_{CV}(B^*p\,;u)
\quad \hbox{and}\quad
\hat H_{min}(p)=H_{min}(B^*p)
$$
\subsection{SDEs with delay in the control}
\label{SSE:DELAYEQUATION}
\subsubsection{The state equation}
\label{SSSE:SEdelay}
In a complete probability space $(\Omega, {\cal F}, \mathbb P)$
we consider the following controlled stochastic
differential equation in $\mathbb R^n$ with delay in the control:
\begin{equation}
\left\{
\begin{array}
[c]{l}
dy(s) =a_0 y(s) ds+b_0 u(s) ds +\displaystyle playstyle\int_{-d}^0 u(s+\xi)b_1(d\xi) \, ds
+\sigma dW(s)
,\text{ \ \ \ }s\in [t,T] \\
y(t) =y_0,\\
u(t+\xi)=u_0(\xi), \quad \xi \in [-d,0).
\end{array}
\right. \label{eq-contr-rit}
\end{equation}
Here we consider the case of delay in the control, the case with delay also in the state is more complicated and cannot be treated as an application of the techniques introduced in the present paper.
\noindent We assume the following.
\begin{hypothesis}\label{hp:delaystate}
\begin{itemize}
\item[]
\item[(i)] $W$ is a standard Brownian motion in $\mathbb R^k$, and $({\cal F}_t)_{t\geq 0}$ is the
augmented filtration generated by $W$;
\item[(ii)] the control strategy $u$ belongs to ${\cal U}$ where
$${\cal U}:=\left\lbrace u(\cdot):(\Omega\times [0,T]\rightarrow U):\;
\hbox{predictable} \right\rbrace
$$
with $U$ a closed and bounded subset of $\mathbb R^m$;
\item[(iii)] $a_0\in {\cal L}(\mathbb R^n;\mathbb R^n)$, $b_0 \in {\cal L}(\mathbb R^m;\mathbb R^n)$, $\sigma\in {\cal L}(\mathbb R^k;\mathbb R^n)$, $d>0$;
\item[(iv)] $b_1$
is an $m\times n$ matrix of signed Radon measures on $[-d,0]$ (i.e. it is an element of the dual space of $C([-d,0],{\cal L}(\mathbb R^m;\mathbb R^n))$).
\end{itemize}
\end{hypothesis}
Given any initial datum $(y_0,u_0)\in \mathbb R^n\times L^2([-d,0], \mathbb R^m)$ and any admissible control $u\in {\cal U}$ equation (\ref{eq-contr-rit}) admits a unique strong (in the probabilistic sense) solution which is continuous and predictable
(see e.g. \cite{IkedaWatanabe} Chapter 4, Sections 2 and 3).
Notice that Hypothesis \ref{hp:delaystate}-(iv) on $b_1$ covers, but it is not limited to, the very common case of pointwise delay\mathbf footnote{Our delayed SDE includes, for example, the state equation used in \cite{CarmonaEtAl18}.} but it is technically complicated to deal with: indeed it gives rise, as we are going to see in next subsection, to an unbounded control operator $B$. We underline the fact that in the case of pintwise delay the matrix $b_1$ is a matrix of discrete measures, like weighted Dirac measures.
Recall that, in \cite{FGFM-I} and \cite{FGFM-II} the case of $b_1$ absolutely continuous with respect to the Lebesgue measure has been treated assuming
\begin{equation}\label{eq:b1restrictive}
b_1(d\xi)=\bar b_1(\xi)d(\xi),\;
\bar b_1\in L^2([-d,0],{\cal L}(\mathbb R^m;\mathbb R^n));
\end{equation}
it is clear that such an assumption on $b_1$ leaves aside the pointwise delay case which we treat here.
\subsubsection{Infinite dimensional reformulation}
\label{subsection-infdimref}
Now, using the approach of \cite{VK} (see \cite{GM} for the stochastic case), we reformulate equation (\ref{eq-contr-rit}) as an abstract stochastic differential equation in the Hilbert space $H=\mathbb R^n\times L^2([-d,0],\mathbb R^n)$.
To this end we introduce the operator $A_1 : {\cal D}(A_1) \subset H \rightarrow H$ as follows: for $x=(x_0,x_1)\in H$, \begin{equation}\label{A1}
A_1x=( a_0 x_0 +x_1(0), -x_1'), \quad {\cal D}(A_1)=\left\lbrace x\in H :x_1\in W^{1,2}([-d,0],\mathbb R^n), x_1(-d)=0 \right\rbrace.
\end{equation}
We denote by $A_1^*$ the adjoint operator of $A_1$:
\begin{equation}
\label{Astar}
A_1^{*}x=( a_0^* x_0, x_1'), \quad {\cal D}(A_1^{*})=\left\lbrace x\in H:x_1\in W^{1,2}([-d,0],\mathbb R^n), x_1(0)=x_0 \right\rbrace .
\end{equation}
We denote by $e^{tA_1}$ the $C_0$-semigroup generated by $A_1$. For $x\in H$ we have
\begin{equation}
e^{tA_1} \left(\begin{array}{l}x_0 \\x_1\end{array}\right)=
\left(
\begin{array}
[c]{ll}
e^{ta_0 }x_0+\int_{-d}^{0}1_{[-t,0]} e^{(t+s)a_0 } x_1(s)ds \\[3mm]
x_1(\cdot-t)1_{[-d+t,0]}(\cdot).
\end{array}
\right) \label{semigroup}
\end{equation}
Similarly, denoting by $e^{tA_1^*}=(e^{tA_1})^*$ the $C_0$-semigroup generated by $A_1^*$,
we have for
$z=\left(z_0,z_1\right)\in H$
\begin{equation}
e^{tA_1^*} \left(\begin{array}{l}z_0 \\z_1\end{array}\right)=
\left(
\begin{array}[c]{ll}
e^{t a_0^* }z_0 \\[3mm]
e^{(\cdot+t) a_0^* }z_0 1_{[-t,0]}(\cdot) +z_1(\cdot+t)1_{[-d,-t)}(\cdot).
\end{array}
\right) \label{semigroupadjoint}
\end{equation}
The infinite dimensional noise operator is defined as
\begin{equation}
\label{G}
G:\mathbb R^{k}\rightarrow H,\qquad Gy=(\sigma y, 0), \; y\in\mathbb R^k.
\end{equation}
The control operator $B_1$ is defined as
(here the control space is $K:=\mathbb R^m$ and we denote by $C'([-d,0],\mathbb R^n)$
the dual space of $C([-d,0],\mathbb R^n)$)
\begin{equation}
\label{Bnotbdd}
\begin{array}{c}
B_1:\mathbb R^{m}\rightarrow \mathbb R^n \times C'([-d,0],\mathbb R^n),
\\[2mm]
(B_1u)_0=b_0 u,
\quad \left\langle f,(B_1u)_1\right\rangle _{C,C'} = \displaystyle \int_{-d}^0\left\langle f(\xi),b_1(d\xi )u\right\rangle , \quad u\in\mathbb R^m, \quad
f \in C([-d,0],\mathbb R^n).
\end{array}
\end{equation}
The adjoint $B_1^*$ is
\begin{equation}
\label{B*notbdd}
\begin{array}{c}
B_1^*:\mathbb R^n \times C''([-d,0],\mathbb R^n) \rightarrow \mathbb R^{m},
\\[2mm]
B_1^*(x_0,x_1)=
b^*_0 x_0+\displaystyle \int_{-d}^0 b_1^*(d\xi)x_1(\xi)
,
\; (x_0,x_1)\in \mathbb R^n \times C([-d,0],\mathbb R^n),
\end{array}
\end{equation}
where we have denoted by $C''([-d,0],\mathbb R^n)$ the dual space of $C'([-d,0],\mathbb R^n)$, which contains $C([-d,0],\mathbb R^n)$: here we consider $B_1^*$ acting on $C([-d,0],\mathbb R^n)$, for a characterization of $C''([-d,0],\mathbb R^n)$, and for the inclusion of $C([-d,0],\mathbb R^n)$ in $C''([-d,0],\mathbb R^n)$ see e.g. \cite{kaplan1}, \cite{kaplan2}, \cite{kaplan3} and \cite{Shannon}. If $b_1$ satisfies (\ref{eq:b1restrictive}), $B$ is a bounded operator from $\mathbb R^n$ to $H$, and we can easily write $e^{tA_1}B$: see \cite{FGFM-I}.
\\
If $b_1$ is as in Hypothesis \ref{hp:delaystate}-(iv), then $B$ is unbounded. Still it is possible to write $e^{tA_1}B$ by extending the semigroup, by extrapolation, to $\mathbb R^n\times C'([-d,0];\mathbb R^n)$.
We have, for $u \in \mathbb R^m$
\begin{equation}\label{eq:etAB}
\left(e^{tA_1}B_1\right)_0:\mathbb R^m \rightarrow \mathbb R^n,\qquad \left(e^{tA_1}B_1\right)_0 u=
e^{ta_0}b_0u+ \int_{-d}^0 1_{[-t,0]}e^{(t+r)a_0}b_1(dr)u,
\end{equation}
\begin{equation}\label{eq:etAB1}
\left(e^{tA_1}B_1\right)_1:\mathbb R^m \rightarrow C'([-d,0];\mathbb R^n),\qquad \left\langle f,\left(e^{tA_1}B_1\right)_1 u\right\rangle _{C,C'}=
\int_{-d}^0 f(r+t) 1_{[-d,-t]}b_1(dr)u.
\end{equation}
Let us now define the predictable process
$Y=(Y_0,Y_1):\Omega \times [0,T]\rightarrow H$ as
$$
Y_0(s)=y(s), \qquad Y_1(s)(\xi)=\int_{-d}^\xi b_1(d\zeta)u(\zeta+s-\xi),
$$
where $y$ is the solution of \eqref{eq-contr-rit}
and $u\in {\cal U}$ is the control process.
By \cite[Proposition 2]{GM}, the process $Y$
is the unique mild solution of the abstract evolution equation
in $H$
\begin{equation}
\left\{
\begin{array}
[c]{l}
dY(s) =A_1Y(s) ds+B_1u(s) ds+GdW(s)
,\text{ \ \ \ }t\in[ 0,T] \\
Y(0) =x=(x_0,x_1),
\end{array}
\right. \label{eq-astr}
\end{equation}
where $x_1(\xi)=\displaystyle \int_{-d}^\xi u_0(\zeta-\xi)b_1(d\zeta)u_0(\zeta-\xi)$, for $\xi\in [-d,0)$, and $u_0$ has been introduced in \eqref{eq-contr-rit} as the initial condition of the control process. Since we have assumed $u_0\in L^2([-d,0], \mathbb R^m)$.
Note that we have $x_1\in L^2([-d,0];\mathbb R^n)$\mathbf footnote{This can be seen, e.g., by a simple application of Jensen inequality and Fubini theorem.}. The mild (or integral) form of (\ref{eq-astr}) is
\begin{equation}
Y(s) =e^{(s-t)A_1}x+\int_t^se^{(s-r)A_1}B_1 u(r) dr +\int_t^se^{(s-r)A_1}GdW(r)
,\text{ \ \ \ }s\in[t,T]. \\
\label{eq-astr-mild}
\end{equation}
Here, similarly to what happen in the previous example (see Subsection \ref{SSSE:SEBC}), we may say that the image of $B$ is not contained in $H$ but in a space larger than $H$ (here $\mathbb R^n\times C'([-d,0],\mathbb R^n)$) which we will call $\overline{H}$.
\subsubsection{The optimal control problem}
\label{SSSE:OCdelay}
Similarly to the previous section the objective is to minimize, over all control strategies in ${\cal U}$, a finite horizon cost:
\begin{equation}\label{eq:costoconcretodelay}
\bar J(t,y_0,u_0;u(\cdot))=\mathbb E \left[\int_t^T \left[\ell_0(s)+\ell_1(u(s))\right]\,ds + \bar\phi(y(T;t,x))\right]
\end{equation}
undert the following assumption
\begin{hypothesis}\label{hp:delaycost}
\begin{itemize}
\item[]
\item[(i)] $\ell_0:[0,T]\rightarrow \mathbb R$, is measurable.
\item[(ii)] $\ell_1:U\rightarrow \mathbb R$ is measurable and bounded from below
\item[(iii)] $\bar\phi:\mathbb R^n\rightarrow \mathbb R$ is measurable and bounded.
\end{itemize}
\end{hypothesis}
Such cost functional, using the infinite dimensional reformulation given above, can be rewritten as
\begin{equation}\label{eq:costoastrattodelay}
J(t,x;u(\cdot))=\mathbb E \left[\int_t^T \left[\ell_0(s)+\ell_1(u(s))\right]\,ds +
\phi(Y(T;t,x))\right]
\end{equation}
where $\phi:H\rightarrow \mathbb R$ is defined as $\phi(x_0,x_1)=\bar\phi(x_0)$
for all $x=(x_0,x_1)\in H$. Note again that here the current cost does not depend on the
state, again this is due to the fact that putting the dependence on the state in the current cost would increase considerably the technical arguments in the solution of the HJB equation.
\newline The value function of the problem is
\begin{equation}\label{valuefunction-delay}
V(t,x):= \inf_{u \in {\cal U}}J(t,x;u).
\end{equation}
The Hamiltonians can be defined exactly in the same way as in Subsubsection \ref{SSSE:OCBC} and (using the modified Hamiltonians introduced in \eqref{eq:modham})
the associated HJB equation is formally written as
\begin{equation}\label{HJBformale-delay}
\left\{\begin{array}{l}\displaystyle
-\mathbf frac{\partial v(t,x)}{\partial t}={\cal A} [v(t,\cdot)](x) +\ell_0(t)+
H_{min} (B_1^*\nabla v(t,x)),\qquad t\in [0,T],\,
x\in H,\\
\\
\displaystyle v(T,x)=\phi(x),
\end{array}\right.
\end{equation}
where $B_1$ is defined in {\eqref{Bnotbdd}}, and ${\cal A}$ is the infinitesimal generator of the transition
semigroup $(R_{t})_{0 \leq t\leq T}$ associated to the process $Y$ when the control is zero: namely ${\cal A}$ is formally defined by
\begin{equation}\label{eq:ell}
{\cal A}[f](x)=\mathbf frac{1}{2} Tr \; GG^* \nabla^2f(x)
+ \left\langle x,A_1^*\nabla f(x)\right\rangle .
\end{equation}
On the same line of Subsubsection \ref{SSSE:OCBC} the candidate optimal feedback map, if it exists, is a function of $B_1^*\nabla v$.
\section{$C$-derivatives}\label{subsection-C-directionalderivatives}
In this Section we introduce the definition of generalized partial derivatives
(that we call $C$-directional derivatives, where $C$ is a suitable linear operator)
which is suitable for our needs.
$C$-directional derivatives of functions have been introduced in
\cite[Section 2]{Mas}, \cite{FTGgrad} in the case when $C$ is a bounded operator (see also \cite{FGFM-I,FGFM-II}),
and in \cite{FedericoGozziJDE},
\cite[Section 4.2.1]{FabbriGozziSwiech} in the case when $C$ is possibly unbounded.
Our definition is different from the ones recalled above and is designed to cover a wider class of ``unbounded'' examples, in particular it makes it possible to treat the case when the intersection of image of $C$ and the state space is just the origin, which is, e.g., the case of our examples of Section \ref{SE:EXCONTROL} which were not treatable within the previous setting.
We also recall that concepts which are connected to the one of $C$-directional derivative are the one of Fomin derivative
(see, e.g., \cite[Chapter 3]{BogachevAMS2010} and, recently, \cite{LunardiRockner20}) and the one of derivative
in the directions of a proper subspace
(see e.g. \cite{Gross1967}).
Here is our new definition. The operator $C$ is still ``possibly unbounded'' in the sense that it does not take its values in the state space $H$ but in a larger Banach space $\overline H$ such that $H\subset \overline H$ with continuous embedding.
\begin{definition}
\label{df4:Gderunbounded} Let $H, \,Z,\, K$ and $\overline H$ be
Banach spaces such that $H\subset \overline H$ with continuous embedding.
Let $C:K\rightarrow \overline H$ be a linear and bounded operator.
\begin{itemize}
\item[(i)]
Let $k\in K$ and let $f:\overline H\rightarrow Z$.
We say that $f$ admits $C$-directional derivative
at a point $x\in \overline{H}$ in the direction $k\in K$
(and we denote it by $\nabla^{C}f(x;k)$) if the limit, in the norm topology of $Z$,
\begin{equation}\label{Cderivatabis}
\nabla^{C}f(x;k):=\lim_{s\rightarrow 0}
\mathbf frac{f(x+s Ck)-f(x)}{s},
\end{equation}
exists.
\item[(ii)]
Let $f:\overline H\rightarrow Z$.
We say that $f$ is $C$-G\^ateaux differentiable
at a point $x\in \overline{H}$ if $f$ admits the $C$-directional derivative in every
direction $k\in K$ and there exists a {\bf bounded} linear operator,
the $C$-G\^ateaux derivative $\nabla^C f(x)\in {\cal L}(K,Z)$, such that $\nabla^{C}f(x;k) =\nabla^{C}f(x)k$
for all $k \in K$. We say that $f$ is $C$-G\^ateaux
differentiable on $H$ (respectively $\overline{H}$) if it is $C$-G\^ateaux differentiable at every point $x\in H$
(respectively $x\in\overline{H}$).
\item[(iii)]
Let $f:\overline H\rightarrow Z$.
We say that $f$ is $C$-Fr\'echet differentiable
at a point $x\in \overline{H}$ if it is $C$-G\^ateaux differentiable and if the limit
in (\ref{Cderivatabis}) is uniform for $k$ in the unit ball of $K$.
In this case
we call $\nabla^C f(x)$ the $C$-Fr\'echet derivative (or simply the $C$-derivative) of $f$ at $x$. We say that $f$ is $C$-Fr\'echet differentiable on $H$ (respectively $\overline{H}$) if it is $C$-Fr\'echet differentiable at every point $x\in H$ (respectively $x\in\overline{H}$).
\end{itemize}
\end{definition}
\begin{remark}
\label{rm:Gderunbounded1}
{\rm
The main idea behind the use of $C$-derivatives (starting from the papers \cite{Mas} and \cite{FTGgrad}) lies in the fact that, in applying the dynamic programming approach to optimal control problems which are linear in the control (with control operator $C:U \rightarrow H$ where $U$ is the control space and $H$ is the state space), the natural regularity requirement needed on the value function $V$ to write the optimal feedbacks is that $\nabla^C V$ is well defined.\mathbf footnote{Here we are simplifying a bit since, as one can read in
\cite[Section 4.8.1.4]{FabbriGozziSwiech} (in particular equation (4.294)), the operator $C$ in the gradient may be chosen a bit differently, and the linearity in the control can be weakened without affecting the main issues.} This means that only directional derivatives in the directions of the image of $C$ matter for the purpose of writing optimal feedback controls. In some cases, like the distributed control of heat equation (see e.g. \cite[Section 2.6.1]{FabbriGozziSwiech}), the image of $C$ is contained in the state space (call it $H$), so $\nabla^C V$ is always well defined when $\nabla V$ exists. In some other cases, like the boundary control or the pointwise delayed control (see e.g. \cite[Sections 2.6.2 and 2.6.8]{FabbriGozziSwiech}) the image of the control operator $C$ is not contained in $H$ and it may even happen that the intersection of this image with $H$ is only the origin, which is the case of the driving examples of this paper.
One strategy, used e.g. in \cite{FedericoGozziJDE,FedericoGozziAAP} and in \cite[Section 4.8]{FabbriGozziSwiech} to deal with such cases is to decompose the control operator $C$ in the product $C_1C_2$ where $C_2:K\rightarrow H$ is bounded while the ``unbounded part'' $C_1$ is a closed unbounded operator $C_1:D(C_1)\subseteq H \rightarrow H$ which usually is a power of the operator $A$ driving the state equation.
In this case the derivative needed to express the feedback
control is $\nabla^{C_1} V$ which is defined exactly as
in \cite[Definition 2.2]{FedericoGozziJDE} or
\cite[Definition 4.4]{FabbriGozziSwiech}.\mathbf footnote{Note that \cite[Definition 4.4]{FabbriGozziSwiech} is more general
than our Definition in the sense that it allows the
operator $C$ to depend on the state variable $x \in H$. This could be performed here with ideas similar to what is done in \cite[Section 4.2]{FabbriGozziSwiech}.
We do not do this since it would increase the technicalities
without changing the main ideas which we want to make clear for the reader.}
In such setting, due to the boundedness required
in \cite[Definition 2.2-(ii)]{FedericoGozziJDE}, asking that $\nabla^{C_1} V$ exists
substantially means that we consider the directional derivatives
of $V$ in the directions of $\operatorname{Im} \bar C_1$ where $\bar C_1$ is the
extension of $C_1$ from the whole $H$ to a suitable extrapolation space.
The image of $\bar C_1$ contains
(but can be much larger than) the one of the control
operator $C$\mathbf footnote{For example, in the case of Neumann or Dirichlet boundary control in dimension 1, the image of $C$ is two-dimensional while the one of $\bar C_1$ is infinite dimensional}.
The approach used here is sharper in the sense that we look exactly at the derivatives in the directions of the image of $C$, even if they go out of the state space $H$. In this way we also avoid working with the decomposition of the operator $C$, which is not sharp for our purposes, in particular in the case of pointwise delayed control of Subsection \ref{SSE:DELAYEQUATION} since in this case fractional powers of $A$ are not well defined.\mathbf footnote{A similar issue would arise if we consider boundary control problems where the driving operator $A$ is of first order, like in the case of age-structured problems, see e.g., in the deterministic case, \cite{FaggianGozziKort}.}
}
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
o
\end{remark}
\begin{remark}
\label{rm:Gderunbounded2}
\rm
Definition \ref{df4:Gderunbounded} is exactly the definition of
$C$-derivative contained in \cite[Section 2]{Mas}, \cite{FTGgrad} when
$\overline{H}=H$. This means
that, in this case, the classical G\^ateaux or Fr\'echet differentiability
implies the $C$-G\^ateaux or $C$-Fr\'echet differentiability.
In \cite[Definition 4.4]{FabbriGozziSwiech} the operator $C$ is a closed, possibly unbounded, linear operator
$C:D(C)\subseteq K \rightarrow H$.
This case can be partly embedded in the one we consider in Definition \ref{df4:Gderunbounded}.
We explain now why, restricting to the case when $H$ is reflexive, which is true in our examples.
Let $C^*:D(C^*)\subseteq H' \rightarrow K'$ be the adjoint of $C$ defined in the usual way through the duality $\left\langle C^*h,k\right\rangle _{K',K}=\left\langle h,Ck\right\rangle _{H',H}$, $\mathbf forall\,k\in D(C) ,\, \mathbf forall\,h\in D(C^*) $.
By \cite[Theorem 5.29]{Kato76}, since $H$ is reflexive, we know that $C^*$ is densely defined.
Let
\begin{equation}\label{eqnuova}
E:=D(C^*)=\left\lbrace e \in H' : \exists \, a>0:\,\mathbf forall\, k\in D(C)\, \vert\left\langle Ck, e\right\rangle _{H,H'}\vert\leq a|e|_{H'}\right\rbrace\subseteq H',\end{equation}
endowed with the usual graph norm, i.e.
$$
\|w\|_E:=\|w\|_{H'}+\|C^*w\|_{K'}, \qquad \mathbf forall w \in E.
$$
Let then $E':=D(C^*)'$. Clearly by \eqref{eqnuova} duality
$H''\subseteq E'$.
Then, by the canonical embedding of the bidual we have
$H \subseteq H''\subseteq E'$.
We extend, by extrapolation (see e.g.
\cite[\S II.5]{EngelNagelBook} for the general theory and
\cite[\S 3.3]{FaggianGozziKort} or \cite{Faggian2005,FaggianDCDIS} for specific cases)
$C$ to a continuous operator $\widetilde C:K\rightarrow E'$ setting, for $k \in K$ and $y\in E$,
$$
\left\langle \widetilde Ck,y\right\rangle _{E',E}= \left\langle k,C^*y\right\rangle _{K,K'}
$$
Continuity of $\widetilde C$ immediately follows observing that
$$
|\left\langle \widetilde Ck,y\right\rangle _{E',E}|\le |\left\langle k,C^*y\right\rangle _{K,K'}|
\le |k|_K |C^*y|_{K'}
$$
and taking the supremum over all $y\in E$ in the unit ball.\mathbf footnote{
Notice that the second adjoint operator $C^{**}: D(C^{**})\subset K''\rightarrow H''$ is defined through the equality:
$$
\left\langle k,C^*y\right\rangle _{K,K'}= \left\langle C^{**}k,y\right\rangle _{H'',H'}\;\mathbf forall\,k\in D(C^{**}) ,\, \mathbf forall\,y\in D(C^*) ;
$$
with $D(C^{**})$ defined analogously to \eqref{eqnuova}. So $ \widetilde C$ and $C^{**}$ are operator acting and taking values on different spaces:
$$
\left\langle \widetilde Ck,y\right\rangle _{E',E}=\left\langle C^{**}k,y\right\rangle _{H'',H'}.
$$
}
\newline In this context we now compare
\cite[Definition 4.4]{FabbriGozziSwiech}
for $C$ and Definition \ref{df4:Gderunbounded} for the corresponding extension $\widetilde C$.
Indeed we observe that
\cite[Definition 4.4-(i)]{FabbriGozziSwiech}
says, at point (i) (definition of directional derivatives):
\emph{``The $C$-directional
derivative of $f$ at a point $x\in H$ in the direction $k\in D(C)\subseteq K$ is defined as:
\begin{equation}
\nabla^{C}f(x;k):=\lim_{s\rightarrow 0}
\mathbf frac{f(x+s Ck)-f(x)}{s},\text{ }s\in\mathbb{R},
\label{Cderivata}
\end{equation}
provided that the limit exists.''}
But $k\in D(C)$, in the setting introduced above
means that $\widetilde Ck\in H$.
Hence, concerning point (i), Definition
\ref{df4:Gderunbounded} extends
\cite[Definition 4.4]{FabbriGozziSwiech}.
Finally we observe that, when the image of the operator $C$ crosses
$H$ only at the origin, then,
\cite[Definition 4.4]{FabbriGozziSwiech} cannot
be used while Definition \ref{df4:Gderunbounded} is still fit.
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
o
\end{remark}
\begin{remark}
\label{rm:Gderunbounded3}
{\rm
Observe that, similarly to what observed in \cite[Remark 4.5]{FabbriGozziSwiech} for Definition 4.4 (see also \cite[Definition 2.2]{FedericoGozziJDE}), even if $f$ is Fr\'echet differentiable at $x \in H$, the $C$-derivative may not exist in such point.
This is obvious if we take, e.g., $f(x)=|x|^2$, $C:K\rightarrow \bar H$
with $\operatorname{Im} C \not\subseteq H$. If $k\in K$ is such that $Ck\not\in H$
clearly $\nabla^{C}f\left( x;k\right)$ does not exist.}
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
o
\end{remark}
We are now in position to define suitable spaces of $C$-differentiable functions.
\begin{definition}
\label{df4:Gspaces}
Let $I$ be an interval in $\mathbb R$, let $H$, $\overline{H}$, $K$ and $Z$ be suitable real Banach spaces. Moreover let $H\subset \overline{H}$ with continuous inclusion, and let $C\in {\cal L}(K,\overline{H})$.
\begin{itemize}
\item
We call $C^{1,C}_{b}(\overline{H},Z)$ the space of all continuous and bounded functions $f:\overline H\rightarrow Z$ which admit continuous and bounded $C$-Fr\'echet derivative.
Moreover we call $C^{0,1,C}_b(I\times \overline{H},Z)$ the space of
continuous and bounded functions $f:I\times \overline H\rightarrow Z$ such that
for every $t\in I$, $f(t,\cdot)\in C^{1,C}_b(\overline{H},Z)$ and
$\nabla^C f\in C_b\left(I\times \overline{H},L(K,Z)\right)$.
When $Z=\mathbb R$ we write $C^{1,C}_{b}(\overline{H})$ instead of $C^{1,C}_{b}(\overline{H},Z)$, and it turns out that if $f\in C^{1,C}_{b}(\overline{H})$, then $\nabla^C f\in C_b\left(I\times \overline{H},K')\right)$.
\item
For any $\alpha\in(0,1)$ and $T>0$ (this time $I$ is equal to $[0,T]$) we denote by
$C^{0,1,C}_{\alpha}([0,T]\times \overline{H},Z)$
the space of functions
$f\in C_b([0,T]\times H,Z)\cap
C^{0,1,C}_b((0,T]\times \overline{H},Z)$\mathbf footnote{Note that here
$f(t,\cdot)$ is well defined only in $H$ when $t=0$, while for $t>0$ it is defined over $\overline{H}$. The reason is that the Ornstein-Uhlenbeck semigroup in our examples and in our setting (and consequently the solution of the HJB equation) satisfy the same property.}
such that
the map $(t,x)\mapsto t^{\alpha} \nabla^C f(t,x)$
belongs to $C_b((0,T]\times \overline{H},{\cal L}(K,Z))$.
When $Z=\mathbb R$ we omit it.
The space $C^{0,1,C}_{\alpha}([0,T]\times \overline{H},Z)$
is a Banach space when endowed with the norm
\[
\left\Vert f\right\Vert _{C^{0,1,C}_{\alpha}([0,T]\times \overline{H},Z) }=\sup_{(t,x)\in (0,T]\times \overline{H}}
\vert f(t,x)\vert+
\sup_{(t,x)\in (0,T]\times \overline{H}} t^{\alpha }\left\Vert \nabla^C f(t,x)\right\Vert_{{\cal L}(K,Z)}.
\]
When clear from the context we will write simply
$\left\Vert f\right\Vert _{C^{0,1,C}_{\alpha}}$.
\end{itemize}
\end{definition}
\section{Partial smoothing for Ornstein-Uhlenbeck semigroups}
\label{sec:partsmooth-abstr-setting}
In this section we study the ``partial smoothing'' properties of the Ornstein-Uhlenbeck semigroup (which we call $R_t$, for $t\ge 0$) applied to a generic function $f$ weakening the definition of ``smoothing'' given, e.g., in \cite{DPZ91} (see also \cite[Chapter 9]{DP1}).
Note that a type of partial smoothing has been already developed, e.g., in \cite[Ch.4]{FabbriGozziSwiech} and in \cite{FGFM-I,FGFM-II}.
As said above, the main difference here is that the directions along which we take the derivative can go out of the state space $H$ and this allows to treat in sharper way the control problems exposed in Section \ref{SE:EXCONTROL}.
The following basic assumption holds throughout this section.
\begin{hypothesis}\label{ip-sde-common}
\begin{enumerate}[(i)]
\item[]
\item
Let $H$, $K$ $\Xi$ be three real separable Hilbert
spaces\mathbf footnote{These will be usually the state space,
the control space and the noise space, respectively.}.
\item
Let $(\Omega, {\cal F},({\cal F}_t)_{t\geq 0}, \mathbb P)$ be a filtered probability space
satisfying the usual conditions and let $W$ be an
$(\Omega, {\cal F},({\cal F}_t)_{t\geq 0}, \mathbb P)$-cylindrical Wiener process in $\Xi$ where $({\cal F}_t)_{t\geq 0}$ is the augmented filtration generated by $W$.
\item
Let $A:D(A)\subseteq H \rightarrow H$ be the generator of a strongly continuous semigroup
$e^{tA},\, t\geq 0$ in $H$,
\item
Let $G\in{\cal L}(\Xi,H)$ be such that the selfadjoint operator
\begin{equation}\label{cov-gen}
Q_t=\int_0^t e^{sA}GG^*e^{sA^*}\,ds
\end{equation}
is trace class. We call $Q=GG^*\in {\cal L}(H)$.
\end{enumerate}
\end{hypothesis}
Let $Z(\cdot;x)$ be the Ornstein-Uhlenbeck process
which solves the following SDE in $H$.
\begin{equation}\label{ornstein-gen}
\left\lbrace\begin{array}{l}
dZ(t)=AZ(t)dt+GdW(t),\\
X(0)=x.
\end{array}\right.
\end{equation}
The process $Z(\cdot;x)$ is to be considered in its mild formulation:
\begin{equation}
Z(t;x) =e^{tA}x +\int_0^te^{(t-s)A}GdW(s)
,\text{ \ \ \ }t\ge 0. \\
\label{ornstein-mild-gen}
\end{equation}
$Z$ is a Gaussian process, namely for every $t>0$, the law of
$Z(t)$ is ${\cal N} (e^{tA}x,Q_t)$, the Gaussian measure with mean $e^{tA}x$ and
covariance operator $Q_t$ defined in (\ref{cov-gen}).
The convolution $\int_0^te^{(t-s)A}GdW_s$ has law
${\cal N} (0,Q_t)$ and will be sometimes denoted by $W_A(t)$.
The associated Ornstein-Uhlenbeck transition semigroup $R_t$ is defined by setting, for every $\psi\in B_b(H)$ and $x\in H$,
\begin{equation}
\label{ornstein-sem-gen}
R_t[\psi](x)=\mathbb E \psi(Z(t;x))
=\int_H \psi(z+e^{tA}x){\cal N}(0,Q_t)(dz).
\end{equation}
To study regularizing properties in the directions of an ``unbounded'' operator $C$ (as introduced in Section \ref{subsection-C-directionalderivatives}), and for functions that have a special dependence on the state, through an operator $P$ that we are going to introduce, we assume the following.
\begin{hypothesis}\label{ip:PC}
\begin{enumerate}[(i)]
\item[]
\item
Let $\overline H$ be a real Banach space such that
$H\subseteq \overline{H}$ with continuous and dense inclusion
and that the semigroup $e^{tA}$ admits an extension $\overline{e^{tA}}:\overline{H}\rightarrow \overline{H}$ which is still a $C_0$ semigroup.
\item
Let $C\in {\cal L}(K, \overline H)$.
\item Let $P:H \rightarrow H$ be linear and continuous.
Assume that, for every $t>0$ the operator $Pe^{tA}:H \rightarrow H$ can be extended to a continuous linear operator $\overline H\rightarrow H$, which will be denoted by $\overline{Pe^{tA}}$. With this notation the operator $\overline{Pe^{tA}}C:K\rightarrow H$ is well defined and continuous.
\end{enumerate}
\end{hypothesis}
We now provide two remarks on the above hypothesis: the first on the adjoint of $Pe^{tA}$, the second one on the validity of such hypothesis in our examples.
\begin{remark}\label{rm:adjointPeta}
In the framework of the above Hypothesis \ref{ip:PC}
it is natural to identify $H$ with its topological dual $H'$
and consider the Gelfand triple
$$
\overline{H}'\subseteq H\subseteq\overline{H}.
$$
The adjoint
of the operator $Pe^{tA}:H\rightarrow H$ which is, clearly,
$e^{tA^*}P^*:H\rightarrow H$, indeed takes its values in
$\overline{H}'$ and is, consequently, the adjoint
$$
\left(\overline{Pe^{tA}}\right)^*: H\rightarrow \overline{H}'
$$
of the extended operator $\overline{Pe^{tA}}$.
\\
Indeed, consider $\{x_n\}\subset H$ such that, in the topology of $\overline{H}$, we have $x_n \rightarrow \bar x\in \overline H$.
We know, by Hypothesis \ref{ip:PC}, that
$Pe^{tA}x_n \rightarrow \overline{Pe^{tA}}\bar x$, hence,
for every $y \in H$,
$$
\left\langle x_n,e^{tA^*}P^*y\right\rangle _H=\left\langle Pe^{tA}x_n,y\right\rangle _H \rightarrow \left\langle \overline{Pe^{tA}}\bar x,y\right\rangle _H.
$$
Hence, the continuous linear form $\pi_y$ on $H$ (represented, with the Riesz identification on $H$, by $e^{tA^*}P^*y$) given by
$$
\pi_y:H\rightarrow \mathbb R, \qquad \pi_y (h)=\left\langle h,e^{tA^*}P^*y\right\rangle _H, \qquad h\in H,
$$
can be extended to a continuous linear form
$$
\bar \pi_y:\overline{H} \rightarrow \mathbb R, \qquad
\bar\pi_y (\bar h)=\left\langle \overline{Pe^{tA}}\bar h,y\right\rangle _H
\qquad \bar h\in \overline{H},
$$
with $|\bar\pi_y (\bar h)|\le |\overline{Pe^{tA}}|_{{\cal L}(\overline{H},H)}
|\bar h|_{\overline{H}} |y|_H$.
This is equivalent to say that, under the Riesz identification of $H$ with $H'$,
$e^{tA^*}P^*y\in \overline{H}'\subseteq H$.
\end{remark}
\begin{remark}\label{rm:computeexamples}
In the case of Subsection \ref{SSE:HEATEQUATION}
the above Hypotheses \ref{ip-sde-common} and \ref{ip:PC}
are satisfied if we choose, as seen in Subsection
\ref{SSE:HEATEQUATION},
$$
H=L^2({\cal O}), \qquad
\overline H= {\cal D}\left((-A_0)^{-3/4-\varepsilon}\right)
=H^{-3/2-2\varepsilon}({\cal O})
\quad \hbox{(for suitable small $\varepsilon>0$),}
$$
$A=A_0$, $C=B=(-A_0)D$ as from \eqref{notazioneB}, $P$ any continuous operator $H\rightarrow H$
(we will later take $P$ to be a finite dimensional projection whose image is contained in
${\cal D}\left((-A_0)^{-\eta}\right)$ for some $\eta\ge 0$).
Since we can extend immediately $e^{tA_0}$ to
$$
\overline{e^{tA_0}}:\overline H \rightarrow H
$$
then, in this case, $\overline{Pe^{tA}}=P\overline{e^{tA}}$.
In the case of Subsection \ref{SSE:DELAYEQUATION}
the above Hypotheses \ref{ip-sde-common} and \ref{ip:PC}
are satisfied if we choose
$$
H=\mathbb R^n \times L^2(-d,0;\mathbb R^n),\qquad
\overline H=\mathbb R^n \times C'([-d,0];\mathbb R^n),
$$
(but also $\overline H=\mathbb R^n \times W^{-1,2}([-d,0];\mathbb R^n)$ can be chosen),
$A=A_1$, $C=B$ as from \eqref{Bnotbdd}, and $P(x_0,x_1)=(x_0,0)$. Here the embedding of $L^2([-d,0];\mathbb R^n)\subset C'([-d,0];\mathbb R^n)$ is to be considered in the following sense: to any $f\in L^2([-d,0];\mathbb R^n)$ we associate the measure $\mu_f\in C'([-d,0];\mathbb R^n)$ such that $\mu_f(d\xi)=f(\xi)d\xi$.
\newline Note that
$$
\operatorname{Im }P=\mathbb R^n \times \{0\}.
$$
Moreover, by \myref{semigroup}, we have, for $x=(x_0,x_1) \in H$,
$$
Pe^{tA}x=\left(
e^{ta_0 }x_0+\int_{-d}^{0}1_{[-t,0]} e^{(t+s)a_0 } x_1(s)ds,
0 \right)
$$
Hence, also in this case, we can extend immediately $Pe^{tA}$ to
$$
\overline{Pe^{tA}}:\overline H \rightarrow H
$$
by setting, for $x=(x_0,x_1) \in \mathbb R^n \times C'([-d,0];\mathbb R^n)$
\begin{equation}
\overline{Pe^{tA}}x=\left(
e^{ta_0 }x_0+\int_{-d}^{0}1_{[-t,0]} e^{(t+s)a_0 } x_1(ds),
0 \right).
\label{PetAbardelay}
\end{equation}
Hence, also here Hypothesis \ref{ip:PC}-(iii) is satisfied.
Notice that in the second example $P$ can be immediately extended to $\overline{P}:\overline{H}\rightarrow H$ so $\overline{Pe^{tA}}=\overline{P}\,\,\overline{e^{tA}}$
while in the first example $P$ may not admit such an extension
(it does when $P$ is a finite dimensional projection).
Finally notice that in both examples we have
$\mathbb Imm \overline{Pe^{tA}}\subseteq \mathbb Imm P$.
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
o
\end{remark}
We pass to define the spaces where our ``initial'' data will belong.
\begin{definition}\label{df:spaziphi1}
We call $B_b^P(H)$ (respectively $C_b^P(H)$, $UC_b^P(H)$) the set of functions $\phi:H\rightarrow \mathbb R$ for which there exists
$\bar\phi : \operatorname{Im}(P)\rightarrow \mathbb R$
bounded and Borel measurable and (respectively continuous, uniformly continuous)\mathbf footnote{Here we endow $Im P\subseteq H$ with the topology inherited by $H$.} such that
\begin{equation}\label{fi-gen-allargata}
\phi(x)=\bar\phi(Px) \quad
\mathbf forall x\in H.
\end{equation}
\end{definition}
\begin{remark}\label{rm:ipPCunifcont}
We observe that, in the above Definition \ref{df:spaziphi1}, when $\bar\phi : \operatorname{Im}(P)\rightarrow \mathbb R$
is Borel measurable (respectively continuous, uniformly continuous), then also $\phi$ is Borel measurable (respectively continuous, uniformly continuous). Hence we can easily see that $B^P_b(H)$ (respectively $C^P_b(H)$, $UC^P_b(H)$) is a linear subspace of $B_b(H)$ (respectively $C_b(H)$, $UC_b(H)$).
We also observe that the choice of $P$ in our driving examples
(Subsections \ref{SSE:HEATEQUATION}-\ref{SSE:DELAYEQUATION})
will consider cases
the case when $\mathbb Imm P$ is closed and finite dimensional.
It is then useful to recall that, when the image of $P$ is closed, we can identify the space $B_b^P(H)$ with
$B_b(Im P)$ (and the same for the others). In particular,
in the case of Subsection \ref{SSE:DELAYEQUATION},
when $Im P=\mathbb R^n\times \{0\}$,
we immediately see that
$B^{P}_b(H) \sim B_b(\mathbb R^n)$, $C^{P}_b(H) \sim C_b(\mathbb R^n)$,
$UC^{P}_b(H) \sim UC_b(\mathbb R^n)$. This will be used in the sequel.
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
o
\end{remark}
\noindent To prove our partial smoothing result we need
the following controllability-like assumption.
\begin{hypothesis}\label{ip:NC}
\begin{itemize}
\item[]
\item [(i)]
We have
\begin{equation}\label{eq:inclusionsmoothingC}
\operatorname{Im}\overline{Pe^{tA}}C\subseteq
\operatorname{Im}(P Q_t P^*)^{1/2},\qquad \mathbf forall t>0;
\end{equation}
Consequently, by the Closed Graph Theorem, the operator
$$
\mathbb Lambda^{P,C}(t):K\rightarrow H, \qquad
\mathbb Lambda^{P,C}(t)k:=(P Q_t P^*)^{-1/2}\overline{Pe^{tA}}Ck
\quad \mathbf forall k \in K,
$$
is well defined and bounded for all $t>0$.
\item [(ii)]
For every $T>0$ there exists $\kappa_T>0$ and $\gamma \in (0,1)$ such that
$$
\|\mathbb Lambda^{P,C}(t)\|_{{\cal L}(K,H)} \le \kappa_T t^{-\gamma}, \qquad
\mathbf forall t \in (0,T].
$$
\end{itemize}
\end{hypothesis}
Hypothesis \ref{ip:NC}-(i) is the analogous of the null controllability assumption which guarantees the strong Feller property of the associated
Ornstein-Uhlenbeck transition semigroup, see e.g. \cite{DP1} and \cite{Z}, while \ref{ip:NC}-(ii) is an asumption that guarantees that for $t\rightarrow 0$, the operator norm of $\mathbb Lambda^{P,C}(t)$ blows up in an integrable way. Both the assumptions can be verified in some models, namely in the following we show that the motivating examples introduced in Section \ref{SE:EXCONTROL} satisfy Hypothesis \ref{ip:NC}.
\begin{remark}\label{rm:NCexamples}
In the case of Subsection \ref{SSE:HEATEQUATION}
Hypothesis \ref{ip:NC} is satisfied, e.g., if we choose:
\begin{itemize}
\item $H, \overline{H},A,C$ as in
Remark \ref{rm:computeexamples},
\item $Q=(-A)^{-2\beta}$ for some $\beta\ge 0$
\item $P$ a projection on a finite dimensional subspace contained in ${\cal D}(-A)^{-\alpha}$ for some
$\alpha>\beta+ \mathbf frac14$.
\end{itemize}
See Appendix A1.
\\
In the case of Subsection \ref{SSE:DELAYEQUATION}
the above Hypothesis \ref{ip:NC}
are satisfied if:
\begin{itemize}
\item we choose $H, \overline{H},A,C,P$ as in
Remark \ref{rm:computeexamples};
\item we assume that {$
\operatorname{Im}\left(e^{ta_0}b_0 +\displaystyle \int_{-d}^0 1_{[-t,0]}e^{(t+r)a_0}b_1(dr)
\right)
\subseteq\operatorname{Im}\sigma,
\quad \mathbf forall t>0.
$} We notice that this condition is verified when $\sigma$ is invertible, and it is a weaker assumption.
\end{itemize}
See Appendix A.2.
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
o
\end{remark}
Now we give the result.
\begin{proposition}\label{prop:partsmooth}
Let Hypotheses \ref{ip-sde-common}, \ref{ip:PC} and \ref{ip:NC}-(i) hold true.
\\
Then the semigroup $R_t,\,t>0$ maps functions $\phi\in B_b^P(H)$
into functions which are $C$-Fr\'echet differentiable in $\overline{H}$, and the $C$-derivative is given, for all $x \in \overline{H}$, by
\begin{align}\label{eq:formulader-gen-P}
\nabla^C(R_{t}[\phi])(x)k &=\int_{H}\bar\phi\left(z_1+Pe^{tA}x\right)
\left\langle \mathbb Lambda^{P,C}(t) k,
(PQ_tP^*)^{-1/2}z_1\right\rangle _H{\cal N}(0,PQ_tP^*)(dz_1)
\\
&=
\mathbb E\left[\bar\phi\left(PX(t;x)\right)
\left\langle \mathbb Lambda^{P,C}(t) k,
(PQ_tP^*)^{-1/2}PW_A(t)\right\rangle
\right]
\end{align}
Moreover, for any $\phi\in B^P_b(H)$ and any $k\in K$,
\begin{equation}\label{norm-Cder}
\vert \left\langle \nabla^C R_t[\phi](x), k\right\rangle \vert \leq
\Vert \mathbb Lambda^{P,C}(t) \Vert_{{\cal L}(K, H)} \Vert \phi\Vert_\infty \vert k\vert.
\end{equation}
Furthermore, if $\phi\in C^P_b(H)$, then $\nabla^C R_t[\phi]\in C((0,T]\times \overline{H};K)$.
Finally, if also Hypothesis \ref{ip:NC}-(ii) holds, then the map $(t,x)\rightarrow R_t[\phi](x)$ belongs to $C_\gamma^{0,1,C}([0,T]\times \overline{H})$.
\end{proposition}
\noindent \hbox{{\bf Proof.} }
\\
If $\phi \in B_b^P(H)$, then, by \eqref{ornstein-sem-gen},
for every $t>0$ and $x \in H$,
\begin{equation}
\label{eq:ornstein-sem-phibarCV}
R_t[\phi](x)
=\int_H \bar\phi(Pz+Pe^{tA}x){\cal N}(0,Q_t)(dz),
=\int_{H} \bar\phi(z_1+Pe^{tA}x){\cal N}(0,PQ_tP^*)(dz_1),
\end{equation}
where we adopt the change of variable $z_1=P z$ and we used that the image of the measure ${\cal N}(0,Q_t)$ through
$P:H\rightarrow H$ is, clearly, ${\cal N}(0,PQ_tP^*)$.
Now notice that, defining, for $t>0$,
$$
{\cal L}_t:L^2(0,t;K)\rightarrow H, \qquad {\cal L}_t u = \int_0^t e^{(t-s)A}Gu(s)ds,
$$
we get, by simple computations, that
$$
|(P{\cal L}_t)^*x|^2=\left\langle Q_tP^*x,P^*x\right\rangle
\quad \hbox{which implies}\quad
\operatorname{Im} P{\cal L}_t= \operatorname{Im} (PQ_t P^*)^{1/2}.
$$
Hence, in particular the image of of $(PQ_t P^*)^{1/2}$ is contained in $\mathbb Imm P$. Moreover, if Hypothesis \ref{ip:NC}-(i) holds, the above also implies that
$\mathbb Imm\overline{Pe^{tA}}C\subseteq \mathbb Imm P$ for all $t>0$.
Using this fact, for $\phi \in B^P_b(H)$,
$t>0$, $x \in H$, $k\in K$,
$\alpha \in \mathbb R$,
\begin{align}
R_t[\phi](x+\alpha Ck)=&\int_H
\bar\phi(Pz+\overline{Pe^{tA}}(x+\alpha Ck){\cal N}(0,Q_t)(dz),
\nonumber
\\
=&\int_{H} \bar\phi(z_1+\overline{Pe^{tA}}(x+\alpha Ck)
{\cal N}(0,PQ_t P^*)(dz_1),
\label{eq:ornstein-sem-phibarCVnew}
\end{align}
where, in the second equality, we still use
the change of variable $z_1= Pz$.
Now we apply the change of variable
$z_2=z_1+\overline{Pe^{tA}}\alpha Ck$ to
\eqref{eq:ornstein-sem-phibarCVnew} getting that,
for every $t>0$ and $\phi \in B_b^P(H)$,
\begin{align}
&R_t[\phi](x+\alpha Ck)
=\int_{H} \bar\phi(z_2+Pe^{tA}x)
{\cal N}(\alpha\overline{Pe^{tA}} Ck,PQ_tP^*)(dz_2).
\label{eq:ornstein-sem-phibarCVnewbis}
\end{align}
Now, for $\phi\in B^P_b(H)$, $x\in H$, $k\in K$, $\alpha \in \mathbb R-\{0\}$, we get, by \eqref{eq:ornstein-sem-phibarCV}-\eqref{eq:ornstein-sem-phibarCVnewbis},
\begin{align}
\label{eq:rappincr}
\mathbf frac{1}{\alpha}
&\left[R_{t}[\phi](x+\alpha Ck)-R_{t}[\phi](x)\right]=
\\[2mm]
\notag
=&\mathbf frac{1}{\alpha}
\left[\int_{H} \bar\phi(z_1+Pe^{tA}x)
{\cal N}(\alpha\overline{Pe^{tA}} Ck,PQ_tP^*)(dz_1)
-\int_{H}\bar\phi\left(z_1+Pe^{tA}x\right)
{\cal N}\left(0,PQ_{t}P^*\right)(dz_1)\right].
\end{align}
By the Cameron-Martin theorem, see
e.g. \cite{DP3}, Theorem 1.3.6, the Gaussian measures
${\cal N}\left(\alpha\overline{Pe^{tA}} Ck,PQ_tP^*\right)$ and
$\mathcal{N}\left(0,PQ_tP^*\right)$ are equivalent if and only if
$\overline{Pe^{tA}}Ck\in\operatorname{Im}(PQ_tP^*)^{1/2}$.
In such case, setting, for $y \in \operatorname{Im}(PQ_tP^*)^{1/2}$, the density is
\begin{align}
&d(t,y,z):=\mathbf frac{d{\cal N}\left(y,PQ_tP^*\right)}
{d\mathcal{N}\left(0,PQ_tP^*\right) }(z)
\nonumber \\
& =\exp\left\{ \left\langle (PQ_tP^*)^{-1/2}
y,(PQ_tP^*)^{-1/2}z\right\rangle_H
-\mathbf frac{1}{2}\left|(PQ_tP^*)^{-1/2}y\right|_H^{2}\right\} .
\label{eq:density1}
\end{align}
Such density is well defined for $z\in (\ker PQ_tP^*)^\perp$
(see e.g. \cite[Proposition 1.59]{FabbriGozziSwiech}).
Hence, by \eqref{eq:rappincr},
\begin{multline}
\label{eq:incrnew}\lim_{\alpha\rightarrow 0}\mathbf frac{1}{\alpha}
\left[R_{t}[\phi](x+\alpha Ck)-R_{t}[\phi](x)\right]=
\\
\lim_{\alpha\rightarrow 0}
\int_{H}\bar\phi\left(z_1+Pe^{tA}x\right)
\mathbf frac{d(t,\alpha \overline{Pe^{tA}}Ck,z_1)-1}{\alpha}
{\cal N}(0,PQ_tP^*)(dz_1)
\end{multline}
Now we observe that, by the definition of $\mathbb Lambda^{P,C}(t)$,
$$
\mathbf frac{d(t,\alpha \overline{Pe^{tA}}Ck,z_1)-1}{\alpha}
=\mathbf frac{1}{\alpha}\left[\exp\left\{\alpha
\left\langle
\mathbb Lambda^{P,C}(t) k,(PQ_tP^*)^{-1/2}z_1\right\rangle_H
-\mathbf frac{\alpha^2}{2}
\left|\mathbb Lambda^{P,C}(t)k\right|_H^{2}\right\}
-1\right].
$$
When $\alpha \rightarrow 0$ the above limit is, $\left\langle\mathbb Lambda^{P,C}(t)k,(PQ_tP^*)^{-1/2}z_1\right\rangle_H$, which makes sense for all $z_1\in (\ker PQ_tP^*)^\perp$
and is an $L^2(H;{\cal N}(0,PQ_tP^*))$ function of $z_1$
(see again, e.g., \cite[Proposition 1.59]{FabbriGozziSwiech}).
Moreover, with respect to the measure ${\cal N}(0,PQ_tP^*)(dz_1)$ the map
$$
z_1 \mapsto \mathcal{Q}_t (z_1):= \left\langle\mathbb Lambda^{P,C}(t)k,(PQ_tP^*)^{-1/2}z_1\right\rangle_H
$$
is real valued Gaussian random variable with mean $0$ and
variance $\left|\mathbb Lambda^{P,C}(t)k\right|_H^{2}$ (see, e.g. \cite[Remark 2.2]{DPZ91}).
So in particular, for all $L>0$ $\mathbb E[e^{L|\mathcal{Q}_t| }]<+\infty$.
Now it is easy to see that
$$
\mathbf frac{d(t,\alpha \overline{Pe^{tA}}Ck,z_1)-1}{\alpha}
\le
e^{|\mathcal{Q}_t|+\left|\mathbb Lambda^{P,C}(t)k\right|_H^{2}}
$$
Hence we can apply the dominated convergence theorem
to \eqref{eq:incrnew} getting
\begin{align*}
& \exists \lim_{\alpha\rightarrow 0}\mathbf frac{1}{\alpha}
\left[R_{t}[\phi](x+\alpha Ck)-R_{t}[\phi](x)\right]=
\\
&\lim_{\alpha\rightarrow 0}\mathbf frac{1}{\alpha}
\int_{H}\bar\phi\left(z_1+Pe^{tA}x\right)
\mathbf frac{d(t,\alpha\overline{Pe^{tA}} Ck,z_1)-1}{\alpha}
{\cal N}(0,PQ_tP^*)(dz_1)
\\[2mm]
& =\int_{H}\bar\phi\left(z_1+Pe^{tA}x\right)
\left\langle \mathbb Lambda^{P,C}(t) k, (PQ_tP^*)^{-1/2}z_1\right\rangle _H
{\cal N}(0,PQ_tP^*)(dz_1)
\end{align*}
Consequently, along Definition \ref{df4:Gderunbounded}-(i),
there exists the $C$-directional derivative
$\nabla^C R_{t}\left[\phi\right](x;k)$
which is equal to the above right hand side.
Using that $\mathbb Lambda^{P,C}(t)$ is continuous we
see that the above limit is uniform for $k$ in the unit ball of $K$, so there exists the $C$-Fr\'echet derivative
$\nabla^C R_{t}\left[\phi\right](x)$.
From the above and from
\cite[Proposition 1.59]{FabbriGozziSwiech} we get
\begin{align*}
|\nabla^C R_{t}\left[\phi\right](x;k)|
&\leq
\Vert \bar\phi\Vert_\infty
\left(\int_{H}
\left\langle \mathbb Lambda^{P,C}(t) k, (PQ_tP^*)^{-1/2}z_1\right\rangle _H^2
{\cal N}(0,PQ_tP^*)(dz_1)\right)^{1/2}
\\[3mm]
\nonumber
&
= \Vert \bar\phi\Vert_\infty
\Vert \mathbb Lambda^{P,C}(t)k \Vert_{H}
\le \Vert\phi\Vert_\infty
\Vert \mathbb Lambda^{P,C}(t)\Vert_{{\cal L}(K;H)} |k|_K . \nonumber
\end{align*}
This gives the required estimate.
The statement on continuity follows
using the same argument as in \cite[Theorem 4.41-(ii)]{FabbriGozziSwiech}.
The last statement follows by the last part of Definition \ref{df4:Gspaces}.
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
\begin{remark}\label{rm:smoothingproof}
The proof generalizes the one of Theorem 4.1 in \cite{FGFM-I} and the one of Theorem 4.41 in \cite{FabbriGozziSwiech}. The main difference between Theorem 4.1 in \cite{FGFM-I} and the present proposition is that here we are able to handle an unbounded operator $C$ by enlarging the space $H$. Notice that in the proof $C$ appears only through the operator $\overline{Pe^{tA}}C$. Notice also that, as proved above, the image of such operator is contained in $\operatorname{Im} P$, which is not obvious due to the presence of the closure.
On the other hand, the difference with respect Theorem 4.41 in \cite{FabbriGozziSwiech} is that there $P$ is missing and the
partial derivatives are taken in the unbounded but less general case of Definition \cite[Definition 4.4]{FabbriGozziSwiech}.
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
o
\end{remark}
\begin{remark}\label{rm:partsmooth-second}
Generalizing to our setting the ideas of Proposition 4.5 in \cite{FGFM-I}
it is possible to prove that, if $\phi$ is more regular
(i.e. $\phi\in C^1_b(H)\cap C_b^P(H)$,
also $\nabla^C R_t[\phi]$ has more regularity, i.e.
$\nabla\nabla^{C}R_{t}\left[\phi\right]$,
$\nabla^{C}\nabla R_{t}\left[\phi\right]$ exist, coincide, and
satisfy suitable formulae and estimates.
We omit them here since we do not need them for the purpose of this paper. They will be useful to find optimal feedback controls, which will be the subject of a subsequent paper.
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
o\end{remark}
\section{Partial smoothing for convolutions}
\label{sec:convpartsmooth-abstr-setting}
To solve HJB equations like \eqref{HJBformale-diri} and \eqref{HJBformale-delay}
we need to extend the partial smoothing result of the previous section to convolutions.
We need first to introduce suitable spaces where such convolutions live
and which will be useful later to perform the fixed point argument to find the solution of our HJB equations.
\begin{definition}\label{df:Sigma}
Let $T>0$, $\eta \in (0,1)$.
A function $g\in C_b([0,T]\times H)\cap
C_b((0,T]\times \overline{H})$
belongs to $\Sigma^1_{T,\eta}$ if
\begin{itemize}
\item
there exists a function
$f\in C_b([0,T]\times H)$ such that\mathbf footnote{By continuity this also implies $g(0,x)=f(0,Px)$ for all $x \in H$.}
$$g(t,x)=f\left(t,\overline{Pe^{tA}}x\right),
\qquad \mathbf forall (t,x) \in (0,T]\times \overline{H};
$$
\item
for any $t\in(0,T]$ the function $g(t,\cdot)$ is
$C$-Fr\'echet differentiable on $\overline{H}$ and there exists
a function $\bar f\in C_b((0,T]\times H;K)$
such that
$$
t^\eta \nabla^C g(t,x)=\bar f\left(t,\overline{Pe^{tA}}x\right),
\qquad \mathbf forall (t,x) \in (0,T]\times \overline{H}.
$$
\end{itemize}
\end{definition}
\begin{remark}\label{rm:partsmooth-second-1}
Arguing as in \cite[Section 5]{FGFM-I}, it is possible to define a subspace of $\Sigma^1_{T,\eta}$ of functions $g$ such that there exists the second order derivative $\nabla\nabla^C$ which depends in a special way on $x\in \overline{H}$. This could be useful to prove second order regularity of the solution of our HJB equations. As in Remark \ref{rm:partsmooth-second} we omit this step here: it will be useful to find optimal feedback controls, which will be the subject of a subsequent paper.
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
o\end{remark}
\begin{remark}
\label{rm:sigma1}
We observe that, using substantially the same argument as
\cite[Lemma 5.2]{FGFM-I}, one can prove that
\begin{equation}\label{eq:SigmaSubspace}
\Sigma^1_{T,\eta} \hbox{ is a closed subspace of }
C_\eta^{0,1,C}([0,T]\times \overline{H})
\end{equation}
Moreover we also observe that, by Proposition \ref{prop:partsmooth},
it is immediate to see that, under our Hypotheses \ref{ip-sde-common}-\ref{ip:PC}-\ref{ip:NC}, for any $\phi \in C_b^P(H)$, we have
$R_t[\phi] \in \Sigma^1_{T,\gamma}$.
\end{remark}
We now come back to the abstract common setting and we state a first lemma on the regularity of the convolution type terms.
\begin{lemma}\label{lemma_convoluzione}
Let Hypotheses \ref{ip-sde-common}, \ref{ip:PC} and \ref{ip:NC} hold.
Let $T>0$, $C\in{\cal L}(K,\overline{H})$ and let $\psi:K^*\rightarrow\mathbb R$ be a Lipschitz continuous function.
For every $g \in \Sigma^1_{T,\gamma}$ (where $\gamma$ is
given in Hypothesis \ref{ip:NC}-(ii)), the function
$\hat{g}:[0,T]\times \overline{H} \rightarrow \mathbb R$ belongs to
$\Sigma^1_{T,\gamma}$ where
\begin{equation}\label{iterata-primag}
\hat{g}(t,x) =\int_{0}^{t}
R_{t-s} [\psi(\nabla^{C}g(s,\cdot))](x) ds, \qquad (t,x) \in
[0,T]\times \overline{H}.
\end{equation}
Hence, in particular, $\hat g(t,\cdot)$
is $C$-Fr\'echet differentiable on $\overline{H}$ for every $t\in (0,T]$ and, for a suitable constant $\hat\kappa$ (depending only on $T$ and $\psi$),
\begin{equation}\label{stimaiterata-primag}
\left\vert \nabla ^C(\hat g(t,\cdot))(x) \right\vert_{K^*} \leq
\hat\kappa\left(t^{{1-\gamma}}+t^{{1-2\gamma}}
\Vert g \Vert_{C^{0,1,C}_{{\gamma}}}\right),
\qquad \mathbf forall (t,x)\in (0,T]\times \overline{H}.
\end{equation}
Moreover, for every $g_1,g_2 \in \Sigma^1_{T,\gamma}$ (where $\gamma$ is given in Hypothesis \ref{ip:NC}-(ii)), the function
$\hat{g_1}-\hat{g_2}:[0,T]\times \overline{H} \rightarrow \mathbb R$ belongs to $\Sigma^1_{T,\gamma}$
and, for a suitable constant $\kappa$ (depending only on $T$ and $\psi$),
\begin{multline}\label{stimaiterata-g1g2}
\left\vert
\hat g_1(t,x)
-\hat g_2(t,x))
\right\vert + t^\gamma \left\vert
\nabla ^C(\hat g_1(t,\cdot))(x)
-\nabla ^C(\hat g_2(t,\cdot))(x)
\right\vert_{K^*}
\\
\leq
\kappa\left(t+t^{{1-\gamma}}\right)
\Vert g_1-g_2 \Vert_{C^{0,1,C}_{{\gamma}}},
\qquad \mathbf forall (t,x)\in (0,T]\times \overline{H}.
\end{multline}
\end{lemma}
\noindent \hbox{{\bf Proof.} }
We start by proving that $\hat g$ from (\ref{iterata-primag}) is well defined, continuous, and $C$-Fr\'echet differentiable.
First of all, for any $g\in\Sigma^1_{T,\gamma}$, we denote by $f_g$ and $\bar f_g$ the functions associated to it in Definition \ref{df:Sigma}.
Hence, given any $g\in\Sigma^1_{T,\gamma}$, we have, for
$0<s\le t$, $x,z\in H$:
\begin{align}\label{eq:nablaCshiftg}
& \psi\left(\nabla^{C}g(s,z+e^{(t-s)A}x)\right)
=
\psi\left( s^{-\gamma}\bar f_g\left(s, \overline{Pe^{sA}}z+\overline{Pe^{tA}}x\right)\right)
\end{align}
Hence we can give meaning to the left hand side also for
$x \in \overline{H}$. So we can write
\begin{align}
\notag
& \int_{0}^{t}
R_{t-s} \left[\psi\left(\nabla^{C}(g(s,\cdot))\right)\right](x) ds
=\int_{0}^{t}
\int_H \psi\left(\nabla^{C}g(s,z+e^{(t-s)A}x)\right)
{\cal N}(0,Q_{t-s})(dz)
\\
&=\int_{0}^{t}
\int_H \psi\left( s^{-\gamma}\bar f_g\left(s, {Pe^{sA}}z+\overline{Pe^{tA}}x\right)\right)
{\cal N}(0,Q_{t-s})(dz),
\qquad \mathbf forall(t,x)\in [0,T]\times\overline{H}.
\label{eq:defRconv1}
\end{align}
The above implies that $\hat g$ is well defined on $[0,T]\times\overline{H}$. Continuity follows
using the same argument as in
\cite[Proposition 4.50-(ii)]{FabbriGozziSwiech}.
Consequently the function $f_{\hat g}$ associated to $\hat g$ along Definition \ref{df:Sigma} is
\begin{align*}
f_{\hat g} (t,y)&=\int_{0}^{t}
\int_{H} \psi\left(s^{-\gamma}
\bar f_g\left(s, {Pe^{sA}}z+y\right)
\right){\cal N}(0,Q_{t-s})(dz)
\end{align*}
and, by the Lipschitz assumptions on $\psi$,
$$
\Vert\hat f_{\hat g} \Vert_\infty \le \int_{0}^{T}[\psi]_{Lip}
\left(|\psi(0)| + s^{-\gamma} \Vert \bar f_g \Vert_\infty \right)ds
\le
[\psi]_{Lip}\left[|\psi(0)| T+ \Vert \bar f_g \Vert_\infty (1-\gamma)^{-1} T^{1-\gamma}\right]
$$
To compute the $C$-derivative we first compute, using what is given above,
\begin{align}
\label{eq:defRconv2}
&\int_{0}^{t}
R_{t-s}\left[\psi\left( \nabla^{C}(g(s,\cdot))\right)\right](x+\alpha Ck)ds \\
\notag
&=\int_{0}^{t} \int_H \psi\left( s^{-\gamma}
\bar f_g\left(s, {Pe^{sA}}z+\overline{Pe^{tA}}(x+\alpha Ch)\right)
\right){\cal N}(0,Q_{t-s})(dz) ds
\\[2mm]
\notag&=\int_{0}^{t} \int_H
\psi\left(s^{-\gamma}
\bar f_g\left(s, Pe^{sA}z+\overline{Pe^{tA}}x\right)
\right)
{\cal N}\left(
\overline{Pe^{tA}}\alpha Ck,Q_{t-s}\right)(dz)ds
\\[2mm]
\notag
&=\int_{0}^{t} \int_H
\psi\left(s^{-\gamma}
\bar f_g\left(s, Pe^{sA}z+\overline{Pe^{tA}}x\right)
\right)
d(t-s,\alpha \overline{Pe^{tA}}Ck,z)
{\cal N}\left(0,Q_{t-s}\right)(dz)ds,
\end{align}
where, in the last two equalities, we have used Cameron-Martin Theorem
as in the proof of the above Proposition \ref{prop:partsmooth}, and
the density $d$ is given by \eqref{eq:density1}.
Hence, using \eqref{eq:defRconv1}-\eqref{eq:defRconv2},
\begin{align*}
&\lim_{\alpha\rightarrow 0}\dfrac{1}{\alpha}
\left[\int_{0}^{t}
R_{t-s}\left[\psi\left( \nabla^{C}(g(s,\cdot))\right)\right](x+\alpha Ck)ds -
\int_{0}^{t}
R_{t-s}\left[\psi\left( \nabla^{C}(g(s,\cdot))\right)\right](x) ds\right]=
\\[2mm]
&=\lim_{\alpha\rightarrow 0}\dfrac{1}{\alpha}
\int_{0}^{t} \int_H
\psi\left(s^{-{\gamma}}
\bar f_g\left(s, Pe^{sA}z+ \overline{Pe^{tA}}x\right)
\right)
\mathbf frac{d(t-s,\alpha \overline{Pe^{tA}}Ck,z)-1}{\alpha}
{\cal N}\left(0,Q_{t-s}\right)(dz)ds
\end{align*}
At this point we argue exactly as in the proof of the above Proposition \ref{prop:partsmooth} getting, uniformly for $k$ in the unit sphere of $K$:
\begin{align*}
&\lim_{\alpha\rightarrow 0}\dfrac{1}{\alpha}
\left[\int_{0}^{t}
R_{t-s}\left[\psi\left( \nabla^{C}(g(s,\cdot))\right)\right](x+\alpha Ck)ds -
\int_{0}^{t}
R_{t-s}\left[\psi\left( \nabla^{C}(g(s,\cdot))\right)\right](x) ds\right]=
\\[2mm]
&=
\int_{0}^{t} \int_H
\psi\left(s^{-\gamma}
\bar f_g\left(s, Pe^{sA}z+\overline{Pe^{tA}}x\right)\right)
\left\langle Q_{t-s}^{-1/2}
\overline{Pe^{tA}} Ck, Q_{t-s}^{-1/2}z\right\rangle _H
{\cal N}\left(0,Q_{t-s}\right)(dz)ds.
\end{align*}
This implies the required $C$-Fr\'echet differentiability and
\begin{align}
&\left\langle \nabla ^C \hat g(t,x),k \right\rangle _{K}=
\label{eq:derCConv}
\\
\nonumber
&=
\int_{0}^{t} \int_H
\psi\left(
s^{-\gamma} \bar f_g\left(s, Pe^{sA}z+\overline{Pe^{tA}}x\right)
\right)
\left\langle (Q_{t-s}^{-{1/2}}
\overline{Pe^{tA}}Ck, Q_{t-s}^{-{1/2}}Pz\right\rangle _{H}
{\cal N}\left(0,Q_{t-s}\right)(dz)ds.
\end{align}
Moreover, the right hand side of \eqref{eq:derCConv} provides, when we substitute $\overline{Pe^{tA}}x$ with $y$, the function $\bar{{f}}_{\hat g}$
associated to $\hat g$ along the second part of Definition \ref{df:Sigma}.
\\
At this point, in order to prove estimate (\ref{stimaiterata-primag}),
we use the above representation and the Holder inequality:
\begin{align*}
&\left\vert\left\langle \nabla ^C \hat g(t,x),k \right\rangle _K\right\vert \le
\\
&\leq [\psi]_{Lip}\int_{0}^{t}
\int_H \left(|\psi(0)|+\left\vert
s^{-{\gamma}} \bar f_g\left(s, Pe^{sA}z+\overline{Pe^{tA}}x\right)
\right\vert\right)\\
&\quad\left \vert
\left\langle (Q_{t-s})^{-{1/2}}\overline{Pe^{tA}} Ck, Q_{t-s})^{-{1/2}}Pz\right\rangle _{H}
\right\vert{\cal N}(0,Q_{t-s})(dz) ds
\\
&\leq [\psi]_{Lip}\int_{0}^{t}\left(|\psi(0)|+s^{-\gamma} \left\Vert g \right\Vert_{C^{0,1,C}_{\gamma}}
\right)
\left\Vert Q_{t-s}^{-{1/2}}\overline{Pe^{tA}}Ck \right\Vert_{{\cal L}(K;H)} ds \\
&\leq \kappa_T [\psi]_{Lip}\int_{0}^{t}\left(|\psi(0)|+s^{-{\gamma}}
\left\Vert g \right\Vert_{C^{0,1,C}_{{\gamma}}}\right)
(t-s)^{-\gamma}\vert k\vert_{K} \,ds
\end{align*}
Since
$$
\int_{0}^{t}|\psi(0)|
(t-s)^{-\gamma}\vert k\vert_{K} \,ds
= |\psi(0)|\vert k\vert_{K}
\mathbf frac{1}{1-\gamma}t^{1-\gamma}
$$
$$
\int_{0}^{t}s^{-{\gamma}}
\left\Vert g \right\Vert_{C^{0,1,C}_{{\gamma}}}
(t-s)^{-\gamma}\vert k\vert_{K} \,ds
= \left\Vert g \right\Vert_{C^{0,1,C}_{{\gamma}}}\vert k\vert_{K}
\int_{0}^{t}s^{-{\gamma}}(t-s)^{-\gamma} \,ds
=
\left\Vert g \right\Vert_{C^{0,1,C}_{{\gamma}}}\vert k\vert_{K}
\beta(1-\gamma,1-\gamma)t^{1-2\gamma},
$$
where by $\beta(\cdot,\cdot)$ we mean the Euler Beta function, the claim follows.
\\
The proof of \eqref{stimaiterata-g1g2} follows in a similar way, taking into account estimate \eqref{stimaiterata-primag} an the fact that $\psi$ is Lipschitz continuous:
\begin{multline*}\label{stimaiterata-g1g2}
\left\vert
\hat g_1(t,x)
-\hat g_2(t,x))
\right\vert + t^\gamma \left\vert
\nabla ^C(\hat g_1(t,\cdot))(x)
-\nabla ^C(\hat g_2(t,\cdot))(x)
\right\vert_{K^*}
\\
\leq \vert \int_{0}^{t}
R_{t-s} \left[\psi\left(\nabla^{C}(g_1(s,\cdot))\right)-
\psi\left(\nabla^{C}(g_2(s,\cdot))\right)\right](x) ds\vert\\
+t^\gamma\vert \nabla^{C} \int_{0}^{t}
R_{t-s} \left[\psi\left(\nabla^{C}(g_1(s,\cdot))\right)
-\psi\left(\nabla^{C}(g_2(s,\cdot))\right)\right](x) ds\vert\\
\leq
t^\gamma\left\vert\int_{0}^{t} \int_H \left(
\psi\left(
s^{-\gamma} \bar f_{g_1}\left(s, Pe^{sA}z+\overline{Pe^{tA}}x\right)
\right)-\psi\left(
s^{-\gamma} \bar f_{g_2}\left(s, Pe^{sA}z+\overline{Pe^{tA}}x\right)
\right)\right)
\right.\\
\left.\left\langle (Q_{t-s}^{-{1/2}}
\overline{Pe^{tA}}Ck, Q_{t-s}^{-{1/2}}Pz\right\rangle _{H}
{\cal N}\left(0,Q_{t-s}\right)(dz)ds\right\vert + \kappa t \Vert g_1-g_2 \Vert_{C^{0,1,C}_{{\gamma}}}\\
\leq
\kappa\left(t+t^{{1-\gamma}}\right)
\Vert g_1-g_2 \Vert_{C^{0,1,C}_{{\gamma}}},
\qquad \mathbf forall (t,x)\in (0,T]\times \overline{H}.
\end{multline*}
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
\section{Applying partial smoothing to stochastic control problems}
\label{sec-HJB}
In this Section we first present the stochastic optimal control problem we aim to treat, then we show how the theory developed in the previous sections allows us to solve the associated HJB equation.
\subsection{A stochastic control problem in the abstract setting}
\label{subsec-contr.pr-abstract}
We consider the setting of Hypotheses \ref{ip-sde-common}-\ref{ip:PC}-\ref{ip:NC} which we assume to hold.
We present first the objective functional and then the state equation.
The goal of the controller is to minimize the following
finite horizon cost (here $X(\cdot;t,x)$ is the state process starting at time $t$ with the datum $x$),
\begin{equation}\label{costoastratto-common}
J(t,x;u)=\mathbb E \left(\int_t^T \left[\ell_0(s)+\ell_1(u(s))\right]\,ds + \phi(X(T;t,x))\right)
\end{equation}
over all controls $u(\cdot)$ in
\begin{equation}\label{eq:admcontr}
{\cal U}:=\left\{
u:[0,T]\times \Omega \rightarrow U \subseteq K, \text{ progressively measurable}
\right\}
\end{equation}
under the following assumption.
\begin{hypothesis}\label{ip-costo}
We assume that:
\begin{itemize}
\item[(i)]
the final cost $\phi$ belongs to $C_b^P(H)$ (see Definition \ref{df:spaziphi1});
\item[(ii)]
The current cost $\ell_0$ is measurable and bounded;
\item[(iii)]
the set $U\subset K$ is closed and bounded and the current cost $\ell_1:U\rightarrow \mathbb R$ is measurable and bounded from below.
\end{itemize}
\end{hypothesis}
\begin{remark}
Note that here the current cost does not depend on the state. Putting the dependence on the state in the current cost would increase considerably the technical arguments, namely the fixed point argument in the proof of Theorem \ref{esistenzaHJB}, Section \ref{sub:HJBsol}, wouldn't work, and it is left for a further paper.
\newline We also underline that the above technical problem cannot be overcome by transforming our Bolza type problem in a Mayer type problem (i.e. a problem with only the terminal cost) on the line of what is done e.g. in
\cite[Remark 7.4.1, p.714]{CannarsaSinestrari04}.
Indeed, using such transformation the state dependent running cost would disappear but the state equation would become nonlinear. This would prevent the use of our results on partial smoothing which, up to now, apply only to linear state equations.
\end{remark}
Before introducing the state equation we observe that,
due to Hypothesis \ref{ip-costo}-(i) above, what matters for the controller
is the process $PX(\cdot)$.
Now consider the following controlled SDE in the real separable Hilbert space $H$ (here $0\le t \le s \le T$)
\begin{equation}\label{eq-common-contr}
\left\{
\begin{array}{l}
\displaystyle
d X(s)= [AX(s)+Cu(s)]ds +Q^{1/2}dW(s), \qquad s\in (t,T],
\\\displaystyle
X(s)=x\in H, \quad \mathbf forall s \in [0,t]
\end{array}
\right.
\end{equation}
where $A,\,G$ and $W$ are as in Hypothesis \ref{ip-sde-common}
and $C\in{\cal L}(K,\overline{H})$ is as in Hypothesis \ref{ip:PC}.
Equation (\ref{eq-common-contr}) is formal and has to be considered in its mild formulation (using the so-called variation of constants, see e.g. \cite[Chapter 7]{DP1}) which still present some issues. Indeed the mild solution of \eqref{eq-common-contr} is, still formally,
\begin{equation}
X(s)=e^{(s-t)A}x+\int_t^s{e^{(s-r)A}}C u(r) dr +\int_t^se^{(s-r)A}Q^{1/2}dW(r),
\text{ \ \ \ }s\in[t,T]. \\
\label{eq-mild-common}
\end{equation}
Here the first and the third term belong to $H$, thanks to Hypothesis \ref{ip-sde-common}, while the second in general does not.
By Hypothesis \ref{ip:PC}-(i) we see that the second term can be written as
$$
\int_t^s\overline{e^{(s-r)A}}Cu(r)dr \in \overline{H}.
$$
Hence, even when $x\in H$ the mild solution belongs to $\overline{H}$ but not to $H$. Moreover, still using Hypothesis \ref{ip:PC}-(i), we see that the mild solution makes sense for all $x\in \overline{H}$ and belongs to $\overline{H}$.
\\
On the other hand, thanks to Hypothesis \ref{ip:PC}-(iii), even when
$x\in \overline{H}$ the process $PX(s)$ belongs to $H$ and can be written as
\begin{equation}
PX(s) =\overline{Pe^{(s-t)A}}x+\int_t^s\overline{Pe^{(s-r)A}}C u(r) dr +\int_t^s P e^{(s-r)A}Q^{1/2}dW(r),
\text{ \ \ \ }s\in(t,T]. \\
\label{eq-mild-commonP}
\end{equation}
We define the value function related to this control problem, as usual, as
\begin{equation}\label{valuefunction-gen}
V(t,x):= \inf_{u \in {\cal U}}J(t,x;u).
\end{equation}
\subsection{Solution of the HJB equation}
\label{sub:HJBsol}
We define the Hamiltonian as follows:
the current value Hamiltonian $H_{CV}$ is
$$
H_{CV}(p\,;u):=\left\langle p,u\right\rangle _{K}+\ell_1(u)
$$
and the minimum value Hamiltonian is
\begin{equation}\label{psi1-gen}
H_{min}(p)=\inf_{u\in U}H_{CV}(p\,;u),
\end{equation}
The HJB equation associated to the stochastic optimal control problem presented in the above Section \ref{subsec-contr.pr-abstract} is then, formally,
\begin{equation}\label{HJBformale-common}
\left\{\begin{array}{l}\displaystyle
-\mathbf frac{\partial v(t,x)}{\partial t}={\cal L} [v(t,\cdot)](x) +\ell_0(t)+
H_{min} (\nabla^C v(t,x)),\qquad t\in [0,T],\,
x\in H,\\
\\
\displaystyle v(T,x)=\phi(x),
\end{array}\right.
\end{equation}
Here the differential operator ${\cal L}$
is the infinitesimal generator of the transition
semigroup $(R_{t})_{0\leq t\leq T}$ defined in (\ref{ornstein-sem-gen}) related to the process $Z$ solution of equation
(\ref{ornstein-gen}), namely ${\cal L}$ is formally defined by
\begin{equation}\label{eq:ell-gen}
{\cal L}[f](x)=\mathbf frac{1}{2} Tr \; Q\nabla^2 f(x)
+ \left\langle x,A^*\nabla f(x)\right\rangle .
\end{equation}
\begin{definition}\label{defsolmildHJB}
We say that a
function $v:[0,T]\times H\rightarrow\mathbb R$ is a mild
solution of the HJB equation (\ref{HJBformale-common}) if the following
are satisfied for some $\gamma \in(0,1)$:
\begin{enumerate}
\item $v(T-\cdot, \cdot)\in C^{0,1,C}_{{\gamma}}\left([0,T]\times \overline{H}\right)$;
\item the integral equation
\begin{equation}\
v(t,x) =R_{T-t}[\phi](x)+\int_t^T R_{s-t}\left[
H_{min}(\nabla^C v(s,\cdot))+\ell_0(s)\right](x)\; ds,
\label{solmildHJB-common}
\end{equation}
is satisfied on $[0,T]\times H$.
\end{enumerate}
\end{definition}
We notice that the request in the previous definition \ref{defsolmildHJB}, point 1, implies that $\nabla^C_xv(t,x)$ can blow up like $(T-t)^{-\gamma}$.
We now prove existence and uniqueness of a mild solution of the HJB equation (\ref{HJBformale-diri}) and (\ref{HJBformale-delay}) by a fixed point argument.
\begin{remark}\label{rm:crescitapoli-HJB}
Since functions in $C^{0,1,C}_{\gamma}\left([0,T]\times \overline{H}\right)$
are bounded (see Definition \ref{df4:Gspaces}),
the above Definition \ref{defsolmildHJB} requires, among other properties, that a mild solution is continuous and bounded up to $T$.
This constrains the assumptions on the data, e.g. it implies that the final datum $\phi$ must be continuous and bounded.
We may change this requirement in the above definition asking only measurability or only polynomial growth in $x$ so allowing for more general datum $\phi$ in Hypothesis \ref{ip-costo}-(i). Our main results will remain true with straightforward modifications (see \cite[Chapter 4]{FabbriGozziSwiech} for a treatment of such cases in the case of bounded control operators).
\\
Similarly we may weaken the request of Hypothesis \ref{ip-costo}-(iii) on the boundedness of the set $U$. This may result in the fact that the Hamiltonian $H_{min}$ is not Lipschitz continuous. This case, even if more difficult, could still be treated using the ideas of \cite{G2,FMloclip}.
\end{remark}
\begin{theorem}\label{esistenzaHJB}
Let Hypotheses \ref{ip-sde-common}, \ref{ip:PC}, \ref{ip:NC} and \ref{ip-costo} hold true.
Then the HJB equation (\ref{HJBformale-common})
admits a mild solution $v$ according to Definition \ref{defsolmildHJB}.
Moreover $v$ is unique among the functions $w$ such that $w(T-\cdot,\cdot)\in\Sigma_{T,\gamma}$
and it satisfies, for a suitable constant $\kappa_{1,T}>0$, the estimate
\begin{equation}\label{eq:stimavmainteo}
\Vert v(T-\cdot,\cdot)\Vert_{C^{0,1,C}_{{\gamma}}}\le \kappa_{1,T}\left(\Vert\phi \Vert_\infty
+\Vert\ell_0 \Vert_\infty \right).
\end{equation}
\end{theorem}
\noindent \hbox{{\bf Proof.} }
We first prove existence and uniqueness of a solution in $\Sigma^1_{T,{\gamma}}$, by using a fixed point argument in it. To this aim,
first we rewrite (\ref{solmildHJB-common}) in a forward way. Namely
if $v$ satisfies \myref{solmildHJB-common} then, setting $w(t,x):=v(T-t,x)$ for any
$(t,x)\in[0,T]\times H$, we get that $w$ satisfies
\begin{equation}
w(t,x) =R_{t}[\phi](x)+\int_0^t R_{t-s}[H_{min}(
\nabla^C w(s,\cdot))+\ell_0(s)](x)\; ds,\qquad t\in [0,T],\
x\in H,\label{solmildHJB-forward}
\end{equation}
which is the mild form of the forward HJB equation
\begin{equation}\label{HJBformaleforward-common}
\left\{\begin{array}{l}\displaystyle
\mathbf frac{\partial w(t,x)}{\partial t}={\cal L} [w(t,\cdot)](x) +\ell_0(t)+
H_{min} (\nabla^C w(t,x)),\qquad t\in [0,T],\,
x\in H,\\
\\
\displaystyle w(0,x)=\phi(x).
\end{array}\right.
\end{equation}
Referring to equation \eqref{solmildHJB-forward}, which is the mild version of \eqref{solmildHJB-common}, define the map $\Upsilon$ on $\Sigma^1_{T,\gamma}$ by setting, for $g\in \Sigma^1_{T,{\gamma}}$,
$$ \Upsilon[g](0,x):=\phi(x),$$
and, for $(t,x)\in (0,T]\times \overline{H}$,
\begin{equation}\label{Gamma}
\Upsilon[g](t,x):=R_{t}[\phi](x)+
\int_0^t \ell_0(s) ds+
\int_0^t R_{t-s}[
H_{min}(\nabla^C g(s,\cdot))](x)\; ds.
\end{equation}
Using Proposition \ref{prop:partsmooth}, in particular
\eqref{eq:ornstein-sem-phibarCV}, \eqref{eq:formulader-gen-P}
and the last statement on continuity,
we see that the sum of the first two terms of \eqref{Gamma} belongs to $\Sigma^1_{T,\gamma}$ with
\begin{align*}
f(t,x)&=\int_H\bar\phi(z_1+x){\cal N}(0,PQ_tP^*)(dz_1)
+\int_0^t \ell_0(s) ds,\\
\bar f(t,x)&=\int_H\bar\phi(z_1+x)\left\langle \mathbb Lambda^{P,C}(t),(PQ_tP^*)^{-1/2}z_1\right\rangle
{\cal N}(0,PQ_tP^*)(dz_1),\qquad
\end{align*}
Moreover, we use Lemma \ref{lemma_convoluzione} (simply substituting the generic function $\psi$ with $H_{min}$) to deduce that the third term of \eqref{Gamma} belongs to $\Sigma^1_{T,\gamma}$.
Hence $\Upsilon$
is well defined in $\Sigma^1_{T,\gamma}$ with values in $\Sigma^1_{T,\gamma}$ itself.
\newline As stated in Remark \ref{rm:sigma1}, $\Sigma^1_{T,\gamma}$ is a closed subspace of $C^{0,1,C}_{{\gamma}}([0,T]\times \overline{H})$,
and so if $\Upsilon$ is a contraction,
by the Contraction Mapping Principle there exists a unique (in $\Sigma^1_{T,{\gamma}}$)
mild solution of (\ref{HJBformale-common}).
We then prove the contraction property of $\Upsilon$.
Let $g_1,g_2 \in \Sigma^1_{T,{\gamma}}$. We evaluate
$$\Vert \Upsilon (g_1)-\Upsilon (g_2)\Vert_{\Sigma^1_{T,\gamma}}=\Vert \Upsilon(g_1)-\Upsilon (g_2)\Vert_{C^{0,1,C}_{\gamma}}.$$
For every $(t,x)\in (0,T]\times \overline{H}$, we have
\begin{align*}
\Upsilon (g_1)(t,x)- \Upsilon(g_2)(t,x) =
\int_0^t R_{t-s}\left[H_{min}\left(\nabla^C g_1(s,\cdot)\right)
-H_{min}\left(\nabla^C g_2(s,\cdot)\right)\right](x)ds
\end{align*}
Hence we can use the second part of Lemma
\ref{lemma_convoluzione}, namely estimate \eqref{stimaiterata-g1g2}, to get
\begin{multline}\label{eq:stimaUpsilonNew}
|\Upsilon (g_1)(t,x)- \Upsilon(g_2)(t,x)|
+
t^{{\gamma}}\vert \nabla^C\Upsilon (g_1)(t,x) - \nabla^C\Upsilon(g_2)(t,x) \vert_{K^*}
\le
\kappa (t +t^{{1-\gamma}})\Vert g_1-g_2 \Vert_{C^{0,1,C}_{{\gamma}}}.
\end{multline}
Hence, if $T$ is sufficiently small, we get
that the map $\Upsilon$ is a contraction in $\Sigma^1_{T,\gamma}$
and, if we denote by $w$ its unique fixed point, then $v:=w(T-\cdot,\cdot)$
turns out to be a mild solution of the HJB equation (\ref{HJBformale-common}),
according to Definition \ref{defsolmildHJB}.
Since the constant $C$ is independent of $t$, the case of generic $T>0$ follows
by dividing the interval $[0,T]$
into a finite number of subintervals of length $\delta$ sufficiently small, or equivalently, as done in \cite{Mas},
by taking an equivalent norm with an adequate exponential weight, such as
\[
\left\Vert f\right\Vert _{\eta,C^{0,1,C}_{\gamma}}
=\sup_{(t,x)\in(0,T]\times \overline{H}}
\vert e^{\eta t}f(t,x)\vert+
\sup_{(t,x)\in (0,T]\times \overline{H}} e^{\eta t}t^{\gamma}
\left\Vert \nabla^C f\left( t,x\right)
\right\Vert _{K^*}.
\]
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
\appendix
\begin{appendix}
\section{Appendix: verifying Hypothesis \ref{ip:NC} in our examples}
\subsection{The case of boundary control}\label{Sec-min-ene}
We consider the stochastic heat equation with boundary control \eqref{eqDiri}, reformulated as an abstract evolution equation \eqref{eqDiri-abstr-contr}, where we choose, according to Subsection \ref{SSE:HEATEQUATION} and Remark \ref{rm:computeexamples},
$$
H=L^2({\cal O}), \qquad
\overline H= {\cal D}\left((-A_0)^{-3/4-\varepsilon}\right)
=H^{-3/2-2\varepsilon}({\cal O})
\quad \hbox{(for suitable small $\varepsilon>0$),}
$$
$A=A_0$, $C=B=(-A_0)D$ as from \eqref{notazioneB}.
Moreover, as from Remark \ref{rm:NCexamples} we take $Q=(-A)^{-2\beta}$ for some $\beta\ge 0$ and
$P$ a projection on a finite dimensional subspace contained in $(-A)^{-\alpha}$ for some $\alpha>\beta+ \mathbf frac14$.
The covariance operator $Q_t$ is given by
\begin{equation}\label{cov-heat}
Q_t=\int_0^t (-A_0)^{-2\beta}e^{2sA_0}\,ds=(-A_0)^{-2\beta-1}(I-e^{2tA_0}).
\end{equation}
Notice that it can be deduced by the strong Feller property of the heat transition semigroup that $\operatorname{Im}e^{tA}\subset \operatorname{Im}Q_t^{1/2}$, see e.g. \cite[Section 9.4 and Appendix B]{DP1} for a comprehensive bibliography. Now we estimate
$\Vert Q_t^{-1/2}e^{tA_0} (-A_0 D)\Vert $.
In the sequel we denote by $\lambda_k\geq0,\, k\geq 1,\; \lambda_k\nearrow +\infty,$ the opposite of the eigenvalues of the Laplace operator in ${\cal O}$:
\[
A_0e_k=-\lambda_ke_k,\, k\geq 1.
\]
\begin{lemma}\label{Lemma Qt}
Let $Q_t$ be defined in (\ref{cov-heat}). For every $\varepsilon\in (0,\dfrac{1}{4})$, we get, for some $C_0>0$,
\begin{equation}\label{Q_t-norm}
\Vert Q_t^{-1/2}e^{tA_0}(-A_0 D)\Vert
\le C_0 t^{-\mathbf frac{5}{4}-\beta -\varepsilon} ,
\end{equation}
\end{lemma}
\noindent \hbox{{\bf Proof.} } We notice that $ Q_t^{-1/2}e^{tA_0}(-A_0D)=Q_t^{-1/2}e^{tA_0}(-A_0)^{\mathbf frac{3}{4}+\varepsilon}D_\varepsilon)$, where $D_\varepsilon=(-A_0)^{\mathbf frac{1}{4}-\varepsilon}D$ is bounded $\mathbf forall\, \varepsilon \in \Big(0,\dfrac{1}{4}\Big)$.
Moreover for every $a\in \partial\,{\cal O}$
\begin{align*}
\vert Q_t^{-1/2}e^{tA_0}(-A_0)^{\mathbf frac{3}{4}+\varepsilon}D_\varepsilon a\vert^2
&=\sum_{k=1}^{+\infty}\mathbf frac{\lambda_k^{1+2\beta+\mathbf frac{3}{2}+2\varepsilon}e^{-2t\lambda_k}}{1-e^{-2\lambda_k}} \vert(D_\varepsilon a)_k\vert^2\\ \nonumber
&=\mathbf frac{1}{t^{\mathbf frac{5}{2}+2\beta+2\varepsilon}}\sum_{k=1}^{+\infty}\mathbf frac{(t\lambda_k)^{(\mathbf frac{5}{2}+2\beta+2\varepsilon)}e^{-2t\lambda_k}} {1-e^{-2t\lambda_k}}\vert (D_\varepsilon a)_k\vert^2\\
& =\mathbf frac{1}{t^{\mathbf frac{5}{2}+2\beta+2\varepsilon}}\sum_{k=1}^{+\infty}\mathbf frac{(t\lambda_k)^{(\mathbf frac{5}{2}+2\beta+2\varepsilon)}} {e^{2t\lambda_k}-1}\vert (D_\varepsilon a)_k\vert^2\\
& \leq \mathbf frac{1}{t^{\mathbf frac{5}{2}+2\beta+2\varepsilon}} \sup_{x\geq 0} \mathbf frac{x^{\mathbf frac{5}{2}+2\beta+2\varepsilon}} {e^{x}-1}\sum_{k=1}^{+\infty}\vert (D_\varepsilon a)_k\vert^2\\
& \leq \mathbf frac{1}{t^{\mathbf frac{5}{2}+2\varepsilon}} \sup_{x\geq 0} \mathbf frac{x^{\mathbf frac{5}{2}+2\varepsilon}} {e^{x}-1}\vert (D_\varepsilon a)\vert^2 \leq C_0\mathbf frac{1}{t^{\mathbf frac{5}{2}+2\beta+2\varepsilon}}\vert a\vert^2.
\end{align*}
So we can conclude that as $t\rightarrow 0$, $\mathbf forall a\in \partial\,{\cal O}$, $\vert Q_t^{-1/2}e^{tA_0}(-A_0D)a \vert^2$ blows up at most like $t^{-5/2-2\beta-2\varepsilon}$ and so the claim follows.
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
Now we introduce the operator $P$.
Let $\alpha>0$, let $v_1,..., v_n\in D((-A_0)^{\alpha})$ be linearly independent, and let $P$ be the projection on the span of $\left\langle v_1,...,v_n\right\rangle $, namely
\begin{equation}\label{P-n-gen}
P:H\rightarrow H,\quad Px=\sum_{i=1}^n\left\langle x,v_i\right\rangle v_i, \, \mathbf forall x \in H.
\end{equation}
We set moreover, noticing that $P=P^*$,
\begin{equation}\label{barQ_t-heat}
\bar Q_t:= PQ_tP=P (-A)^{-1-\beta}(I-e^{2tA})P
\end{equation}
Notice that
$P_\alpha:=(-A_0)^{\alpha}P$ is a continuous operator on $H$. Hence
$$
\overline{Pe^{tA_0}}B_0= P e^{tA_0}
(-A_0)^{\mathbf frac34+\varepsilon}((-A_0)^{\mathbf frac14-\varepsilon} D), \qquad
(\overline{Pe^{tA_0}}B_0)^*= ((-A_0)^{\mathbf frac14-\varepsilon}D)^*
(-A_0)^{\mathbf frac34+\varepsilon-\alpha} e^{tA_0}P_\alpha
$$
$$
\left\langle Q_tP^*x,P^* x\right\rangle =
\left\langle (I-e^{2tA_0}) (-A_0)^{-1-2\alpha-\beta} P_\alpha x,P_\alpha x\right\rangle
$$
The aim now is to verify that $\operatorname{Im}\overline{Pe^{tA_0}}(-A_0D)\subset \operatorname{Im}\bar Q_t^{1/2}$ and to estimate $\Vert \bar Q_t^{-1/2}Pe^{tA_0}(-A_0D)\Vert$.
\begin{lemma}\label{Lemma-barQt}
Let $\bar Q_t$ be defined in (\ref{barQ_t-heat}).
Let $\alpha>\beta + \mathbf frac14$. Then, for $\varepsilon\in(0,\mathbf frac14)$,
\begin{equation}\label{barQ_t-norm-heat}
\operatorname{Im}\overline{Pe^{tA}}(-A_0D)\subset \operatorname{Im}\bar Q_t^{1/2}, \quad \Vert \bar Q_t^{-1/2}\overline{Pe^{tA}}(-A_0D)\Vert \leq \mathbf frac{C}{t^{1-\varepsilon}}.
\end{equation}
\end{lemma}
\noindent \hbox{{\bf Proof.} } In the proof we consider the case of the projection on the space generated by only one element $v\in D((-A)^\alpha)$, namely in the proof $P:H\rightarrow H,\; Px=\left\langle x,v\right\rangle v,\, \mathbf forall x \in H$, the extension to a map as in \eqref{P-n-gen} being direct.
\newline We notice that
\[
\left\langle \bar Q_t x,x\right\rangle =\vert\bar Q_t^{1/2}x\vert^2=\vert\left\langle x,v\right\rangle \vert^2\sum_{k\geq1}\mathbf frac{1-e^{-2t\lambda_k}}{\lambda_k^{1+2\beta}}v_k^2
\]
and so
\begin{align}\label{stima-barQ_t}
\vert\bar Q_t^{-1/2}v\vert^2&=\mathbf frac{1}{\vert v\vert^2}\mathbf frac{1}{\sum_{k\geq1}\mathbf frac{1-e^{-2t\lambda_k}}{\lambda_k^{1+2\beta}}v_k^2}\leq
\mathbf frac{1}{t^{1+2\beta}\vert v\vert^2}\mathbf frac{1}{\sum_{k\geq1}\mathbf frac{e^{2t\lambda_k}-1}{(t\lambda_k)^{1+2\beta}e^{2t\lambda_k}}v_k^2}\\ \nonumber
&\leq \mathbf frac{1}{t^{1+2\beta}\vert v\vert^2}\mathbf frac{1}{\sum_{k\geq1}\mathbf frac{e^{2t\lambda_k}-1}{(t\lambda_k)^{1+2\beta}}v_k^2}
\leq \mathbf frac{1}{t^{1+2\beta}\vert v\vert^2}\mathbf frac{1}{\sum_{k\geq1}v_k^2}
\leq C_0 \mathbf frac{1}{t^{1+2\beta}\vert v\vert^4}
\end{align}
Moreover we can write, $\mathbf forall\, a \in \partial \,{\cal O}$ and setting $D_\varepsilon :=(-A_0)^{\mathbf frac14-\varepsilon}$
\[
\overline{Pe^{tA_0}}(-A_0D) a= Pe^{tA_0}(-A_0)^{\mathbf frac{3}{4}+\varepsilon}D_\varepsilon a
=\Big(\sum_{k\geq1}\lambda_k^{\mathbf frac{3}{4}+\varepsilon}e^{-t\lambda_k}(D_\varepsilon a)_kv_k\Big)\,v,
\]
so it is immediate to see that
$$ \operatorname{Im}\overline{Pe^{tA_0}}B\subset \operatorname{Im}\bar Q_t^{1/2}.$$
Taking into account that $D_\varepsilon$ is a bounded operator and that $v\in D((-A)^\alpha)$ we get
\begin{align*}
\Big(\sum_{k\geq1}\lambda_k^{\mathbf frac{3}{4}+\varepsilon}
e^{-t\lambda_k}(D_\varepsilon a)_kv_k\Big)^2&
= \Big(\sum_{k\geq1}\lambda_k^{\mathbf frac{3}{4}+\varepsilon-\alpha}e^{-t\lambda_k}(D_\varepsilon a)_k(\lambda_k)^\alpha v_k\Big)^2 \\
&=\vert \sum_{k\geq1}\lambda_k^{\mathbf frac{3}{4}+\varepsilon-\alpha}e^{-t\lambda_k}(D_\varepsilon a)_k(k^2)^\alpha v_k\vert^2 \\
&\leq
\left(\sum_{k\geq1}\lambda_k^{\mathbf frac{3}{2}+2\varepsilon-2\alpha}
e^{-2t\lambda_k}\vert (D_\varepsilon a)_k^2\right)
\left(\sum_{k\geq 1}\lambda_k^{2\alpha} v^2_k\right)
\\
&\leq C_0 \mathbf frac{1}{t^{\mathbf frac{3}{2}+2\varepsilon-2\alpha}} \left(\sum_{k\geq1}(t\lambda_k)^{\mathbf frac{3}{2}+2\varepsilon-2\alpha}e^{-2t\lambda_k}\vert (D_\varepsilon a)_k\vert^2\right) \vert A^{\alpha} v\vert^2.
\end{align*}
So by these calculations, and by estimate \eqref{stima-barQ_t} we get, with the constant $C_0$ independent on $t$ and that may change value from line to line,
\begin{align*}
\vert \bar Q_t^{-1/2} \overline{Pe^{tA}}(-A_0D) a\vert^2
&\leq C_0 \mathbf frac{1}{t^{1+2\beta}\vert v\vert^4} \mathbf frac{1}{t^{\mathbf frac{3}{2}+2\varepsilon+2\beta-2\alpha}} \sum_{k\geq1}(t\lambda_k)^{\mathbf frac{1}{2}+2\varepsilon+2\beta-2\alpha}e^{-2t\lambda_k}\vert (D_\varepsilon a)_k^2\vert \vert A^{\alpha} v\vert^2\\
&\leq C_0 \mathbf frac{1}{\vert v\vert^4} \mathbf frac{1}{t^{\mathbf frac{5}{2}+2\beta+2\varepsilon-2\alpha}} \sup_{x\geq 0}x^{\mathbf frac{3}{2}+2\varepsilon-2\alpha}e^{-2x}\vert D_\varepsilon a\vert^2.
\end{align*}
Choosing $\alpha>\mathbf frac{1}{4}+\beta=1-2\varepsilon$ we conclude the proof.
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
\subsection{The case of delay in the control}\label{sec-partialSmoothing-delay}
We consider equation \eqref{eq-astr} in Section \ref{SSE:DELAYEQUATION} and we notice that for every $(x_0,x_1) \in H$ the covariance operator $Q_t$ of the stochastic convolution can be written as
\begin{equation}\label{eq:Q^0Q}
Q_t\left(
x_0, x_1
\right) =\left(
Q^0_t x_0, 0
\right),
\end{equation}
where $Q^0_t$ is the selfadjoint operator in $\mathbb R^n$ defined as
\begin{equation}\label{eq:Q^0def}
Q^0_t:=\int_0^t e^{sa_0}\sigma\sigma^*
e^{sa_0^*}\, ds,
\end{equation}
see \cite{FGFM-I}, Lemma 4.6. So
$$
\operatorname{Im} Q_t
=\operatorname{Im} Q_t^0\times \left\lbrace 0\right\rbrace\subseteq \mathbb R^n
\times \left\lbrace 0\right\rbrace.$$
\begin{lemma} The operator $Q_t^0$ defined in (\ref{eq:Q^0def})
is invertible for all $t>0$ if and only if
\begin{equation}\label{cond-delay}
\operatorname{Im} (\sigma,a_0\sigma, \dots , a_0^{n-1}\sigma)= \mathbb R^n,
\end{equation}
Let $P$ be the projection on the first component: $\mathbf forall (x_0,x_1)\in\, H$, $P(x_0,x_1)=(x_0,0)$.
{Then \eqref{eq:inclusionsmoothingC} holds if and only if
\begin{equation}\label{eq:inclusionsmoothingBbis}
\operatorname{Im}\left(e^{ta_0}b_0 +\int_{-d}^0 1_{[-t,0]}e^{(t+r)a_0}b_1(dr)
\right)\subseteq \operatorname{Im}(\sigma,a_0\sigma, \dots a_0^{n-1}\sigma ).
\end{equation}
If moreover
\begin{equation}\label{eq:hpdebregbis}
\operatorname{Im}\left(e^{ta_0}b_0 +\int_{-d}^0 1_{[-t,0]}e^{(t+r)a_0}b_1(dr)
\right)
\subseteq\operatorname{Im}\sigma,
\quad \mathbf forall t>0.
\end{equation}
then
\begin{equation}\label{eq:last-est}
\Vert (P Q_t P^*)^{-1/2} \overline{Pe^{tA_1}}B_1\Vert \leq C
t^{-{1/2}}.
\end{equation}}
\end{lemma}
\noindent \hbox{{\bf Proof.} }
In this case \eqref{eq:inclusionsmoothingC} is written as
$$
\operatorname{Im}\overline{Pe^{tA_1}}B_1\subseteq
\operatorname{Im}(P Q_t P^*)^{1/2},\qquad \mathbf forall t>0.
$$
Recalling \eqref{eq:etAB}
we see that $\overline{Pe^{tA_1}}B_1$ is a bounded operator and can be written as
$$
\overline{Pe^{tA}}B_1=\left(
e^{ta_0 }x_0+\int_{-d}^{0}1_{[-t,0]} e^{(t+s)a_0 } b_1(ds),
0 \right):
$$
Hence the inclusion follows exactly as in \cite{FGFM-I} in the case when $B_1$ is a bounded operator: indeed, in \cite{FGFM-I}, formula (4.35), Proposition 4.11 the only difference is that in the present paper $b_1(\cdot)$ is not necessarily absolutely continuous with respect to the Lebesgue measure) but this does not affect the image of
$\overline{Pe^{tA}}B_1$.
Hence, by \cite{FGFM-I}, Proposition 4.11, it immediately follows \eqref{eq:last-est}.
{
\hbox{\enspace${ \mathchoice\sqr54\sqr54\sqr{4.1}3\sqr{3.5}3}$}}
\end{appendix}
\end{document} |
\begin{document}
\title{The NLMS~algorithm~with~time-variant~optimum\\stepsize~derived~from~a~Bayesian~network~perspective}
\author{Christian~Huemmer,~\IEEEmembership{Student Member,~IEEE},
Roland~Maas,
Walter~Kellermann,~\IEEEmembership{Fellow,~IEEE}
}
\markboth{IEEE Signal Processing Letters,~Vol.~XX, No.~X, Nov.~2014}
{Huemmer \MakeLowercase{\textit{et al.}}: A new derivation of the time-varying NLMS stepsize parameter from a Bayesian network perspective}
\maketitle
\begin{abstract}
In this article, we derive a new stepsize adaptation for the normalized least mean square algorithm (NLMS) by describing the task of linear acoustic echo cancellation from a Bayesian network perspective.
Similar to the well-known Kalman filter equations, we model the acoustic wave propagation from the loudspeaker to the microphone by a latent state vector and define a linear
observation equation (to model the relation between the state vector and the observation) as well as a linear process equation (to model the temporal progress of the state vector).
Based on additional assumptions on the statistics of the random variables in observation and process equation, we apply the expectation-maximization (EM) algorithm to
derive an \mbox{NLMS-like} filter adaptation. By exploiting the conditional independence rules for Bayesian networks, we reveal that the resulting EM-NLMS algorithm has a stepsize update equivalent to the optimal-stepsize calculation proposed by Yamamoto and Kitayama
in 1982, which has been adopted in many textbooks.
As main difference, the instantaneous stepsize value is estimated in the M~step of the EM algorithm (instead of being approximated by artificially extending the acoustic echo path).
The EM-NLMS algorithm is experimentally verified
for synthesized scenarios with both, white noise and male speech as input signal.
\end{abstract}
\begin{IEEEkeywords}
Adaptive stepsize, NLMS, Bayesian network, machine learning, EM algorithm
\end{IEEEkeywords}
\IEEEpeerreviewmaketitle
\section{Introduction}
\label{sec:Intro}
\IEEEPARstart{M}{achine} learning techniques have been widely applied to signal processing tasks since decades \cite{frey2005,adali2011}.
For example, directed graphical models, termed Bayesian networks, have shown to provide a powerful framework for modeling causal probabilistic relationships between random variables~\cite{bilmes2005,ITG6,wainwright2007,barber2010,ChinaSIP7}. In previous work,
the update equations of the Kalman filter and the \ac{NLMS} algorithm have already been
derived from a Bayesian network perspective based on a linear relation between the latent \ac{RIR} vector and the observation~\cite{maas2014,Bishop}.\\
The \ac{NLMS} algorithm is one of the most-widely used adaptive algorithms in speech signal processing and a variety of stepsize adaptation
schemes has been proposed to
improve its system identification performance~\cite{Variable1982,Variable1992,Variable1997,Variable2000,shin2004,Variable2006,KLMS,Variable2008,paleologu2009,hwang2009,Variable2012,zhao2013}.
In this article, we derive a novel \ac{NLMS}-like filter adaptation (termed EM-NLMS algorithm)
by applying the \ac{EM} algorithm to a probabilistic model for linear system identification.
Based on the conditional independence rules for Bayesian networks, it is shown that the normalized stepsize of the EM-NLMS algorithm
is equivalent to the one proposed in~\cite{Variable1982}, which is now commonly accepted as optimum NLMS stepsize rule, see e.g. \cite{AdaptiveFilter}.
As the main difference relative to~\cite{Variable1982} , the normalized stepsize~is~here estimated as part of the \ac{EM} algorithm instead of being approximated by artificially extending the acoustic echo~path.
For a valid comparison, we review the algorithm~of~\cite{Variable1982}~for~the linear \ac{AEC} scenario shown in~Fig.~\ref{fig:NONlinAEC}.
\begin{figure}
\caption{System model for linear \ac{AEC}
\label{fig:NONlinAEC}
\end{figure}
The acoustic path between loudspeaker and microphone at time $n$ is modeled by the linear~\ac{FIR}~filter
\begin{equation}
\mathbf{h}_n=[h_{0,n},h_{1,n},...,h_{M-1,n}]^T
\label{equ:hGen}
\end{equation}
with time-variant coefficients $h_{\kappa,n}$, where ${\kappa=0,...,M-1}$.
The observation equation models the microphone sample $d_n$:
\begin{equation}
d_n = \mathbf{x}^T_n\mathbf{h}_n + v_n,
\label{equ:ObservSpe}
\end{equation}
with the additive variable $v_n$ modeling near-end interferences and the observed
input signal vector \mbox{$\mathbf{x}_n=[x_n,x_{n-1},...,x_{n-M+1}]^T$} capturing the time-domain samples $x_n$.
The iterative estimation of the \ac{RIR} vector by the adaptive \ac{FIR} filter $\mathbf{\hat{h}}_{n}$ is realized by the update rule
\begin{equation}
\mathbf{\hat{h}}_{n} = \mathbf{\hat{h}}_{n-1} + \lambda_n \mathbf{x}_n e_n,
\label{equ:UpdNLMS}
\end{equation}
with the stepsize $\lambda_n$ and the error signal
\begin{equation}
e_n = d_n-\mathbf{x}_n^T\mathbf{\hat{h}}_{n-1}
\label{equ:Error}
\end{equation}
relating the observation $d_n$ and its estimate $\hat{d}_n=\mathbf{x}_n^T\mathbf{\hat{h}}_{n-1}$.
In~\cite{Variable1982}, the optimal choice of $\lambda_n$ has been approximated as:
\begin{equation}
\lambda_{n} \approx \frac{1}{M} \frac{\mathcal{E}\{ ||\mathbf{h}_n - \mathbf{\hat{h}}_{n-1} ||^2_2 \}}{\mathcal{E}\{e_n^2\}},
\label{equ=alphaSchul}
\end{equation}
where $||\cdot||_2$ denotes the Euclidean norm and $\mathcal{E}\{\cdot\}$ the expectation operator.
As the true echo path $\mathbf{h}_n$ is unobservable, so that the numerator in (\ref{equ=alphaSchul}) cannot be computed, $\lambda_n$ is further approximated by introducing a delay of
$N_T$ coefficients to the echo path $\mathbf{h}_n$. Moreover, a recursive approximation of the denominator in (\ref{equ=alphaSchul}) is applied
using the forgetting factor~$\eta$~\mbox{\cite{AdaptiveFilter,NMLSapprox2}}. The resulting stepsize approximation
\begin{equation}
\lambda_{n} \approx \frac{1}{ N_T} \frac{ \sum\limits_{\kappa=0}^{N_T-1} \hat{h}^2_{k,n-1} }{ (1-\eta) e_n^2 + \eta \mathcal{E}\{e_{n-1}^2\} }
\label{equ=alphaSchulApp}
\end{equation}
leads to oscillations which have to be addressed by limiting the absolute value~of~$\lambda_{n}$~\cite{NLMSschultheis}.
\renewcommand{2.5}{2.5}
\begin{table}
\caption{Relation between the NLMS algorithm following~\cite{Variable1982} and the proposed EM-NLMS algorithm}
\centering
\begin{tabular}[T]{|c|c c|}
\hline
& NLMS algorithm \cite{Variable1982} \;&\; EM-NLMS algorithm \\[0.5mm]
\hline
Norm. stepsize $\lambda_n$ & (\ref{equ=alphaSchul}) \qquad \quad& \qquad E step \\[1mm]
\hline
Estimation of $\lambda_n$ & (\ref{equ=alphaSchulApp})\qquad \quad & \qquad M step\\[1mm] \arrayrulecolor{black}\hline
\end{tabular}\\[-17mm]
\begin{tikzpicture}
\node at (1,4.1) {$\hspace{25mm}$ \text{\footnotesize equivalent to}};
\node at (1,3.8) {$\hspace{25mm}$ \text{\footnotesize (Section~\ref{sec:Comp}})};
\node at (1,3.22) {$\hspace{25mm}$ \text{\footnotesize replaced by}};
\node at (1,2.92) {$\hspace{25mm}$ \text{\footnotesize (Subsec.~\ref{cha:ParticleModif2}})};
\end{tikzpicture}\\[-2mm]
\label{tab:Comp}
\end{table}
In this article, we derive~the \mbox{EM-NLMS} algorithm which applies the filter update of~(\ref{equ:UpdNLMS}) using the stepsize in~(\ref{equ=alphaSchul}),~where $\lambda_n$ is
estimated in the~M~Step~of the EM algorithm instead of being approximated by using~(\ref{equ=alphaSchulApp}).
\noindent This article is structured as follows: In Section~\ref{sec:BayesianNetwork}, we propose a probabilistic model for the linear \ac{AEC} scenario of Fig.~\ref{fig:NONlinAEC} and derive the EM-NLMS algorithm, which is revealed
in Section~\ref{sec:Comp} to be similar to the NLMS algorithm proposed~in~\cite{Variable1982}.
As main difference~(cf. Table~\ref{tab:Comp}), the stepsize is estimated in the M Step of the EM algorithm instead of being approximated by artificially extending the acoustic echo path.
In Section~\ref{sec:exper}, the EM-NLMS algorithm is experimentally verified for synthesized scenarios
with both, white noise and male speech as input signal.
Finally, conclusions are drawn in Section~\ref{sec:conclu}.
\section{The EM-NLMS algorithm for linear AEC}
\label{sec:BayesianNetwork}
Throughout this article, the Gaussian \ac{PDF} of a real-valued length-$M$ vector $\mathbf{z}_n$
with mean vector~$\boldsymbol{\mu}_{\mathbf{z},n}$ and covariance matrix $\mathbf{C}_{\mathbf{z},n}$ is denoted~as
\begin{equation}
\begin{split}
&\mathbf{z}_n\sim \mathcal{N} \{ \mathbf{z}_n | \boldsymbol{\mu}_{\mathbf{z},n},\mathbf{C}_{\mathbf{z} ,n} \} \\
=& \frac{ |\mathbf{C}_{\mathbf{z} ,n}|^{-1/2}}{(2 \pi)^{M/2}} \exp \left\{ -\frac{ (\mathbf{z}_n-\boldsymbol{\mu}_{\mathbf{z},n})^T \mathbf{C}_{\mathbf{z} ,n}^{-1}(\mathbf{z}_n-\boldsymbol{\mu}_{\mathbf{z},n})}{2} \right\},
\end{split}
\label{equ:GaussianPDF}
\end{equation}
where $|\cdot|$ represents the determinant of a matrix.
Furthermore,
\mbox{$\mathbf{C}_{\mathbf{z},n}=C_{\mathbf{z},n} \mathbf{I}$} (with identity matrix~$\mathbf{I}$) implies
the elements of $\mathbf{z}_n$ to be mutually statistically independent and of equal variance $C_{\mathbf{z},n}$.
\subsection{Probabilistic AEC model}
To describe the linear \ac{AEC} scenario of Fig.~\ref{fig:NONlinAEC} from a Bayesian network perspective, we model
the acoustic echo path as a latent state vector $\mathbf{h}_n$ identically defined as in~(\ref{equ:hGen})
and capture uncertainties (e.g. due to the limitation to a linear system with a finite set of coefficients) by the additive uncertainty~$\mathbf{w}_n$. Consequently, the linear process equation and the linear observation equation,
\begin{equation}
\mathbf{h}_n= \mathbf{h}_{n-1} + \mathbf{w}_n \quad \text{and} \quad d_n = \mathbf{x}^T_n\mathbf{h}_n + v_n,
\label{equ:TranEq}
\end{equation}
can be jointly represented by the graphical model shown in Fig.~\ref{fig:ParticleModel}.
The directed links express statistical dependencies between the nodes and random variables, such as $v_n$, are marked as circles.
We make the following assumptions on the \ac{PDF}s of the random variables~in~Fig.~\ref{fig:ParticleModel}:
\begin{itemize}
\item
The uncertainty $\mathbf{w}_n$ is normally distributed with mean vector $\mathbf{0}$ (of zero-valued entries) and variance~$C_{\mathbf{w},n}$:
\begin{equation}
\mathbf{w}_n \sim \mathcal{N} \{ \mathbf{w}_n | \mathbf{0}, \mathbf{C}_{\mathbf{w} ,n}\} ,\quad \mathbf{C}_{\mathbf{w} ,n} = C_{\mathbf{w},n} \mathbf{I}.
\label{equ:ChannelUncert}
\end{equation}
\item
The microphone signal uncertainty $v_n$ is assumed to be normally distributed with variance $C_{v, n}$ and zero mean:
\begin{equation}
v_n \sim \mathcal{N} \{ v_n | 0, C_{v, n}\}.
\label{equ:micUnc}
\end{equation}
\item
The posterior distribution $p \left( \mathbf{h}_n | d_{1:n}\right)$ is defined with mean vector $\boldsymbol{\mu}_{\mathbf{h},n}$, variance~$ C_{\mathbf{h},n}$~and~${d_{1:n}=d_1,...,d_n}$:
\begin{equation}
p \left( \mathbf{h}_n | d_{1:n}\right) = \mathcal{N} \{ \mathbf{h}_n | \boldsymbol{\mu}_{\mathbf{h},n},\mathbf{C}_{\mathbf{h} ,n} \},\quad \mathbf{C}_{\mathbf{h} ,n} = C_{\mathbf{h},n} \mathbf{I}.
\label{equ:Posterior}
\end{equation}
\end{itemize}
\begin{figure}
\caption{Bayesian network for linear AEC with latent state vector $\mathbf{h}
\label{fig:ParticleModel}
\end{figure}
\noindent Based on this probabilistic AEC model, we apply the \ac{EM} algorithm consisting of two parts:
In the E~Step, the filter
update is derived based on \ac{MMSE} estimation (Subsection~\ref{cha:ParticleModif}).
In the M~step, we predict the model parameters $C_{v, n+1}$~and~$C_{\mathbf{w}, n+1}$ to estimate the adaptive stepsize value $\lambda_{n+1}$~(Subsection~\ref{cha:ParticleModif2}).
\subsection{E step: Inference of the state vector}
\label{cha:ParticleModif}
The \ac{MMSE} estimation of the state vector
identifies the mean vector of the posterior distribution as estimate $\mathbf{\hat{h}}_n$:
\begin{equation}
\mathbf{\hat{h}}_n = \underset{\mathbf{\tilde{h}}_n}{\operatorname{argmin}} \; \mathcal{E} \{ ||\mathbf{\tilde{h}}_n-\mathbf{h}_n ||_2^2 \} = \mathcal{E} \{ \mathbf{h}_n | d_{1:n} \}=\boldsymbol{\mu}_{\mathbf{h},n}.
\label{equ:mes}
\end{equation}
Due to the linear relations between the variables in (\ref{equ:ObservSpe})~and~(\ref{equ:TranEq}),
and under the restrictions to a linear estimator of $\mathbf{\hat{h}}_n$ and normally distributed random variables, the \ac{MMSE} estimation is analytically tractable~\cite{Bishop} .
Exploiting the product rules for linear Gaussian models and conditional independence of the Bayesian network in Fig~\ref{fig:ParticleModel}, the filter update can be derived as a special case of the Kalman filter equations \cite[p. 639]{Bishop}:
\begin{equation}
\mathbf{\hat{h}}_n = \mathbf{\hat{h}}_{n-1} + \boldsymbol{\Lambda}_n \mathbf{x}_n e_n,
\label{equ:LDSupdate}
\end{equation}
with the stepsize matrix
\begin{equation}
\boldsymbol{\Lambda}_n= \frac{ \mathbf{C}_{\mathbf{h} ,n-1} +\mathbf{C}_{\mathbf{w} ,n} }{\mathbf{x}^T_n ( \mathbf{C}_{ \mathbf{h} ,n-1} +\mathbf{C}_{ \mathbf{w} ,n} )\mathbf{x}_n + C_{v, n}}
\end{equation}
and the update of the covariance matrix given as
\begin{equation}
\mathbf{C}_{\mathbf{h} ,n} = \left( \mathbf{I} -\boldsymbol{\Lambda}_n \mathbf{x}_n \mathbf{x}^T_n \right) (\mathbf{C}_{ \mathbf{h} ,n-1} +\mathbf{C}_{ \mathbf{w} ,n} )
\label{equ:LDSupdateC}.
\end{equation}
By inserting (\ref{equ:ChannelUncert}) and (\ref{equ:Posterior}), we can rewrite the filter update of~(\ref{equ:LDSupdate}) to the filter update defined in (\ref{equ:UpdNLMS}) with the scalar stepsize
\begin{equation}
\lambda_n= \frac{C_{\mathbf{h},n-1} +C_{\mathbf{w},n}}{\mathbf{x}^T_n\mathbf{x}_n (C_{\mathbf{h},n-1} +C_{\mathbf{w},n}) + C_{v,n}}.
\label{equ:Lmabda}
\end{equation}
Finally, the update of $C_{\mathbf{h},n}$ is approximated following (\ref{equ:Posterior}) as
\begin{equation}
C_{\mathbf{h},n} \stackrel{(\ref{equ:Posterior})}{=} \frac{\text{diag}\{\mathbf{C}_{\mathbf{h},n}\}}{M}\stackrel{(\ref{equ:LDSupdateC})}{=} \left( 1 -\lambda_n \frac{\mathbf{x}^T_n \mathbf{x}_n}{M} \right) (C_{\mathbf{h},n-1} +C_{\mathbf{w},n}),
\end{equation}
where $\text{diag}\{\cdot\}$ adds up the diagonal elements of a matrix.\\
\noindent Before showing the equality of the stepsize updates in (\ref{equ:Lmabda})~and~(\ref{equ=alphaSchul}) in Section~\ref{sec:Comp}, we propose a new alternative to estimate $\lambda_n$ in (\ref{equ:Lmabda}) by
deriving the updates of the model parameters $C_{\mathbf{w},n}$ and $C_{v,n}$ in the following section.
\subsection{M step: Online learning of the model parameters}
\label{cha:ParticleModif2}
In the M step, we predict the model parameters for the following time instant.
Although the maximum likelihood estimation is analytically tractable, we apply the \ac{EM}~algorithm to derive an online estimator:
In order to update \mbox{$\theta_{n}=\{ C_{v,n}, C_{\mathbf{w},n}\}$} to the new parameters \mbox{$\theta^{\text{new}}_{n}=\{ C^{\text{new}}_{v,n}, C^{\text{new}}_{\mathbf{w},n}\}$},
the lower~bound
\begin{equation}
\mathcal{E}_{\mathbf{h}_{1:n}|\theta_{1:n}} \{ \ln\left( p(d_{1:n}, \mathbf{h}_{1:n}|\theta_{1:n} ) \right)\} \leq \ln p(d_{1:n}|\theta_{1:n}) ,
\label{equ:bound}
\end{equation}
is maximized, where \mbox{$\theta_{1:n}=\{ C_{v,1:n}, C_{\mathbf{w},1:n}\}$}. For this, the \ac{PDF} $p(d_{1:n}, \mathbf{h}_{1:n}|\theta_{1:n})$ is determined by applying the decomposition rules for Bayesian networks \cite{Bishop}:
\begin{align}
&p(d_{1:n}, \mathbf{h}_{1:n} |\theta_{1:n}) = p(\mathbf{h}_{n} |\mathbf{h}_{n-1}, C_{\mathbf{w},n} \mathbf{I}) p(d_{n}| \mathbf{h}_{n},C_{v,n})\notag \\
&\cdot \prod_{m=1}^{n-1} p(\mathbf{h}_{m} |\mathbf{h}_{m-1}, C_{\mathbf{w},m} \mathbf{I}) p(d_{m}| \mathbf{h}_{m},C_{v,m}).
\label{equ:JointPDFtot}
\end{align}
Next, we take the natural logarithm ln$(\cdot)$ of $p(d_{1:n}, \mathbf{h}_{1:n} |\theta_{1:n})$, replace $\theta_{n}$ by $\theta^{\text{new}}_{n}$ and maximize the right-hand side of~(\ref{equ:bound}) with respect to~$\theta^{\text{new}}_{n}$:
\begin{align}
\theta^{\text{new}}_{n} &= \underset{C^{\text{new}}_{\mathbf{w},n}}{\operatorname{argmax}} \; \mathcal{E}_{ \mathbf{h}_{1:n}|\theta_{n}} \{ \ln \left( p(\mathbf{h}_{n} |\mathbf{h}_{n-1}, C^{\text{new}}_{\mathbf{w},n} \mathbf{I})\right)\} \notag \\
&+ \underset{C^{\text{new}}_{v,n}}{\operatorname{argmax}} \; \mathcal{E}_{ \mathbf{h}_{1:n}|\theta_{n}} \{ \ln \left( p(d_{n}| \mathbf{h}_{n},C^{\text{new}}_{v,n})\right)\},
\label{equ:maxDef}
\end{align}
where we apply two separate maximizations starting~with
the estimation of $C^{\text{new}}_{v,n}$ by inserting
\begin{equation}
\ln(p(d_{n}| \mathbf{h}_{n} ,C^{\text{new}}_{v,n})) \stackrel{(\ref{equ:TranEq})}{=} -\frac{\ln(2\pi C^{\text{new}}_{v, n} )}{2} - \frac{(d_n - \mathbf{x}^T_n\mathbf{h}_n)^2}{2 C^{\text{new}}_{v, n}}
\end{equation}
into (\ref{equ:maxDef}). This leads to the instantaneous estimate:
\begin{align}
C^{\text{new}}_{v,n} &= \mathcal{E}_{\mathbf{h}_{1:n}|\theta_{n}} \{ ( d_n - \mathbf{x}^T_n\mathbf{h}_n )^2 \} \\
& = d_n + \mathbf{x}^T_n ( C_{\mathbf{h},n}\mathbf{I}+\mathbf{\hat{h}}_n \mathbf{\hat{h}}^T_n ) \mathbf{x}_n - 2 \mathbf{x}^T_n \mathbf{\hat{h}}_n \\
&= ( d_n - \mathbf{x}^T_n\mathbf{\hat{h}}_n )^2 + \mathbf{x}^T_n\mathbf{x}_n C_{\mathbf{h},n}.
\label{equ:UpdateV}
\end{align}
The variance (of the microphone signal uncertainty) $C^{\text{new}}_{v,n}$ in~(\ref{equ:UpdateV}) consists of two components, which can be interpreted as follows~\cite{itg2014}:
The first term in~(\ref{equ:UpdateV}) is given as the squared error signal after filter adaptation and is influenced by near-end interferences like background noise.
The second term in~(\ref{equ:UpdateV}) depends on the signal energy $\mathbf{x}^T_n\mathbf{x}_n$ and the variance $C_{\mathbf{h},n}$ which implies that it considers uncertainties in the linear echo path model.
Similar to the derivation for $C^{\text{new}}_{v,n}$, we insert
\begin{flalign}
&\ln( p(\mathbf{h}_{n} |\mathbf{h}_{n-1},C_{\mathbf{w},n} \mathbf{I})) \notag \\[2mm]
&\stackrel{(\ref{equ:TranEq})}{=}-\frac{M\ln(2\pi C^{\text{new}}_{\mathbf{w}, n} )}{2}- \frac{(\mathbf{h}_n-\mathbf{h}_{n-1})^T(\mathbf{h}_n-\mathbf{h}_{n-1})}{2 C^{\text{new}}_{\mathbf{w}, n}}
\end{flalign}
into (\ref{equ:maxDef}),
to derive the instantaneous estimate of $C^{\text{new}}_{\mathbf{w},n}$:
\begin{align}
C^{\text{new}}_{\mathbf{w},n} = \; & \frac{1}{M} \mathcal{E}_{\mathbf{h}_{1:n}|\theta_{n}} \{ (\mathbf{h}_n-\mathbf{h}_{n-1})^T(\mathbf{h}_n-\mathbf{h}_{n-1})\} \\
\stackrel{(\ref{equ:Posterior})}{=} &C_{\mathbf{h},n} - C_{\mathbf{h},n-1} + \frac{1}{M} \left( \mathbf{\hat{h}}_n^T\mathbf{\hat{h}}_n - \mathbf{\hat{h}}_{n-1}^T\mathbf{\hat{h}}_{n-1} \right),
\label{equ:UpdateW}
\end{align}
where we employed the statistical independence between $\mathbf{w}_n$ and $\mathbf{h}_{n-1}$.
Equation (\ref{equ:UpdateW}) implies the estimation of $C^{\text{new}}_{\mathbf{w},n}$ as difference of the filter tap autocorrelations between the time instants $n$ and $n-1$.
Finally, the updated values in $\theta^{\text{new}}_{n}$ are used as initialization for the following time step, so that
\begin{equation}
\theta_{n+1} := \theta^{\text{new}}_{n} \; \rightarrow \; C_{\mathbf{w},n+1} := C^{\text{new}}_{\mathbf{w},n}, \;C_{v,n+1} := C^{\text{new}}_{v,n}.
\end{equation}
\section{Comparison between the EM-NLMS algorithm and the NLMS algorithm proposed in \cite{Variable1982}}
\label{sec:Comp}
In this part, we compare the proposed EM-NLMS~algorithm to the NLMS algorithm reviewed in Section~\ref{sec:Intro} and show the equality between the adaptive stepsizes in (\ref{equ=alphaSchul})~and~(\ref{equ:Lmabda}).
We reformulate the stepsize update in (\ref{equ:Lmabda}) by applying the conditional independence rules for Bayesian networks~\cite{Bishop}:
First, we exploit the equalities
\begin{equation}
\begin{split}
\mathbf{C}_{\mathbf{h} ,n} &\stackrel{(\ref{equ:Posterior})}{=} C_{\mathbf{h},n} \mathbf{I}\stackrel{(\ref{equ:mes})}{=}\mathcal{E} \{ (\mathbf{h}_n- \mathbf{\hat{h}}_n ) (\mathbf{h}_n- \mathbf{\hat{h}}_n )^T\},\\
\mathbf{C}_{\mathbf{w} ,n}&\stackrel{(\ref{equ:ChannelUncert})}{=}C_{\mathbf{w},n} \mathbf{I} = \mathcal{E} \{ \mathbf{w}_n \mathbf{w}_n^T\},
\end{split}
\label{equ:varDef}
\end{equation}
which lead to the following relations:
\begin{align}
C_{\mathbf{h},n} &= \frac{\mathcal{E} \{ (\mathbf{h}_n- \mathbf{\hat{h}}_n )^T (\mathbf{h}_n- \mathbf{\hat{h}}_n )\}}{M} = \frac{\mathcal{E}\{||\mathbf{h}_{n} - \mathbf{\hat{h}}_{n}||^2_2\}}{M} ,\notag \\
C_{\mathbf{w},n} &= \frac{\mathcal{E} \{ \mathbf{w}_n^T \mathbf{w}_n\}}{M} =\frac{ \mathcal{E}\{||\mathbf{w}_{n}||^2_2\}}{M}.
\label{equ:Zwischenschritt}
\end{align}
Second, it can be seen in Fig.~\ref{fig:ParticleModel} that the state vector $\mathbf{h}_{n-1}$ and the uncertainty $\mathbf{w}_n $ are statistically independent as they share a head-to-head relationship with respect to the latent vector $\mathbf{h}_{n}$.
As a consequence, the numerator in (\ref{equ:Lmabda}) can be rewritten~as
\begin{align}
C_{\mathbf{h},n-1} + C_{\mathbf{w},n} \stackrel{(\ref{equ:Zwischenschritt})}{=} \; & \frac{\mathcal{E}\{||\mathbf{h}_{n-1} - \mathbf{\hat{h}}_{n-1}||^2_2\}}{M} +\frac{ \mathcal{E}\{||\mathbf{w}_{n}||^2_2\}}{M} \notag \\
\stackrel{(\ref{equ:TranEq})}{=} \; & \frac{\mathcal{E}\{||\mathbf{h}_{n} - \mathbf{\hat{h}}_{n-1}||^2_2\}}{M}.
\label{equ:UmrechnungD}
\end{align}
Finally, we consider the mean of the squared error signal
\begin{equation}
\mathcal{E}\{e_n^2\} \stackrel{(\ref{equ:ObservSpe}),(\ref{equ:Error})}{=} \mathcal{E}\{(\mathbf{x}_n^T(\mathbf{h}_n-\mathbf{\hat{h}}_{n-1}) + v_n)^2\},
\label{equ:UmrechnungE1}
\end{equation}
which is not conditioned on the microphone signal $d_n$. By applying the conditional independence rules to the Bayesian network in Fig.~\ref{fig:ParticleModel}, the head-to-head relationship with respect to $d_n$ implies $v_n$ to be statistically independent from
$\mathbf{h}_{n-1}$ and $\mathbf{w}_{n}$, respectively. Consequently, we can rewrite (\ref{equ:UmrechnungE1}) as:
\begin{align}
\mathcal{E}\{e_n^2\}
\stackrel{(\ref{equ:micUnc})}{=} \hspace*{1.2mm} & \hspace*{3mm} \mathbf{x}_n^T \mathcal{E}\{(\mathbf{h}_n-\mathbf{\hat{h}}_{n-1})(\mathbf{h}_n-\mathbf{\hat{h}}_{n-1})^T\}\mathbf{x}_n + C_{v,n}\notag \\
\stackrel{(\ref{equ:TranEq}),(\ref{equ:varDef})}{=} \hspace*{-1.2mm} & \hspace*{3mm}\mathbf{x}^T_n\mathbf{x}_n (C_{\mathbf{h},n-1} +C_{\mathbf{w},n}) + C_{v,n}.
\label{equ:UmrechnungE2}
\end{align}
The insertion of (\ref{equ:UmrechnungD})~and~(\ref{equ:UmrechnungE2}) into the stepsize defined in~(\ref{equ:Lmabda}) yields the identical expression for $\lambda_n$ as in (\ref{equ=alphaSchul}).
The main difference of the proposed EM-NLMS algorithm is that the model parameters $C_{\mathbf{h},n}$ and $C_{\mathbf{w},n}$ (and consequently the normalized stepsize $\lambda_n$) are estimated in the M step of the EM algorithm instead of being approximated
using~(\ref{equ=alphaSchulApp}).
\section{Experimental results}
\label{sec:exper}
This section focuses on the experimental verification of the EM-NLMS algorithm (``EM-NLMS'') in comparison to the adaptive stepsize-NLMS algorithm described in Section~\ref{sec:Intro} (``Adapt. NLMS'')
and the conventional \ac{NLMS} algorithm (``Conv. NLMS'') with a fixed stepsize.
An overview of the algorithms including the individually tuned model parameters is shown in Table~\ref{tab:Overview}.
Note the regularization of all three stepsize updates by the additive constant~$\epsilon=0.01$ to avoid a division by zero.
For the evaluation, we synthesize the microphone signal by convolution of the loudspeaker signal with an \ac{RIR} vector measured in a room with $T_{60}=100$~ms (filter length $M=512$ at a sampling rate of $16$~kHz).
This is realized for both white noise and a male speech signal as loudspeaker signals.
Furthermore, background noise is simulated by adding Gaussian white noise at a global signal-to-noise ratio of $20$~dB.
The comparison is realized in terms of the stepsize $\alpha_n$ and the system distance~$\Delta h_n$ as a measure for the system identification performance:\\[-2mm]
\begin{equation}
\Delta h_n= 10 \log_{10}\frac{||\mathbf{\hat{h}}_n-\mathbf{h}_n ||_2^2}{||\mathbf{h}_n ||_2^2} \; \text{dB}, \quad \alpha_n = \lambda_n (\mathbf{x}^T_n \mathbf{x}_n).
\label{equ:NMA}
\end{equation}\\[-2mm]
The results for white noise as input signal are illustrated in Fig~\ref{fig:ResultsWGN}.
Note that in Fig.~\ref{fig:ResultsWGN}a) the EM-NLMS shows the best system identification compared to the Adapt. NLMS and the Conv.~NLMS.
As depicted in Fig.~\ref{fig:ResultsWGN}b), the stepsize $\alpha_n$ of the EM-NLMS and the Adapt. NLMS decreases from a value of $0.5$ with the stepsize of the EM-NLMS decaying more slowly.\\
For male speech as input signal, we improve the convergence of the Conv. NLMS by setting a fixed threshold to stop adaptation ($\alpha_n=0$) in speech pauses.
Furthermore, the absolute value of $\lambda_n$ for the Adapt. NLMS is limited to 0.5 (for a heuristic justification see~\cite{NLMSschultheis}).
As illustrated in Fig.~\ref{fig:ResultsSPEECH}a), the EM-NLMS shows again the best system identification compared to the Adapt.~NLMS and the Conv.~NLMS. By focusing on a small time frame, we can see in Fig.~\ref{fig:ResultsSPEECH}b)
that the stepsize $\alpha_n$ of the EM-NLMS algorithm
is not restricted to the values of $0$ and $0.5$ (as Conv.~NLMS) and not affected by oscillations (as~Adapt.~NLMS).\\
Note that the only relevant increase in computational complexity of the EM-NLMS relative to the Conv. NLMS is caused by the scalar product $\mathbf{\hat{h}}_n^T\mathbf{\hat{h}}_n$ for the calculation of $C_{\mathbf{w},n}$ (cf. Table~\ref{tab:Overview}), which seems relatively small compared to other sophisticated stepsize adaptation algorithms.
\begin{table}[h!]
\centering
\caption{Realizations of the EM-NLMS algorithm~(``EM-NLMS''), \newline the~NLMS algorithm due to \cite{Variable1982} (``Adapt. NLMS``) and \newline the conventional NLMS algorithm (''Conv.~NLMS``) \qquad \quad}
\begin{tabular}[c]{|c|c|}
\hline
& $\mathbf{\hat{h}}_n = \mathbf{\hat{h}}_{n-1} + \lambda_n \mathbf{x}_n e_n$ \\
\hline
EM-NLMS& $\lambda_n= \frac{C_{\mathbf{h},n-1} +C_{\mathbf{w},n}}{\mathbf{x}^T_n\mathbf{x}_n (C_{\mathbf{h},n-1} +C_{\mathbf{w},n}) + C_{v,n} + \epsilon}$ \\[1mm]
& $C_{\mathbf{h},n}= \left( 1 -\lambda_n \frac{\mathbf{x}^T_n \mathbf{x}_n}{M} \right) (C_{\mathbf{h},n-1} +C_{\mathbf{w},n})$\\[1mm]
& $C_{v,n+1} = \left( d_n - \mathbf{x}^T_n\mathbf{\hat{h}}_n \right)^2 + \mathbf{x}^T_n\mathbf{x}_nC_{\mathbf{h},n}$ \\[1mm]
& $C_{\mathbf{w},n+1} =C_{\mathbf{h},n} - C_{\mathbf{h},n-1} + \frac{\mathbf{\hat{h}}_n^T\mathbf{\hat{h}}_n - \mathbf{\hat{h}}_{n-1}^T\mathbf{\hat{h}}_{n-1}}{M}$ \\[1mm]
& $C_{\mathbf{h},0} =C_{\mathbf{w},0} = C_{v,0} = 0.1$, $\; \epsilon=0.01$ \\[1mm]
\hline
&\\[-6mm]
Adapt. NLMS &$\lambda_{n} \approx \frac{1}{ N_T} \frac{ \sum\limits_{\kappa=0}^{N_T-1} \hat{h}^2_{k,n-1} }{ (1-\eta) e_n^2 + \eta \mathcal{E}\{e_{n-1}^2\}+ \epsilon }$\\[1mm]
& $ N_T = 5,\;\; \eta = 0.9,\;\; e_0^2 = 0.1,\;\; \epsilon=0.01 $ \\[1mm]
\hline
Conv. NLMS &$\lambda_{n} =\frac{0.5}{\mathbf{x}^T_n\mathbf{x}_n + \epsilon } , \;\; \epsilon = 0.01$\\[1mm]
\hline
\end{tabular}
\label{tab:Overview}
\end{table}
\section{Conclusion}
\label{sec:conclu}
In this article, we derive the EM-NLMS algorithm from a Bayesian network perspective and show the equality with respect to the NLMS algorithm initially proposed in \cite{Variable1982}.
As main difference, the stepsize is estimated in the M~Step of the EM algorithm instead of being approximated by artificially extending the acoustic echo path.
For the derivation of the EM-NLMS algorithm, which is experimentally shown to be promising for the task of linear AEC, we define a probabilistic model for linear system identification and exploit the product and conditional
independence rules of Bayesian networks.
All together this article exemplifies the benefit of applying machine learning techniques to classical signal processing~tasks.\\[-2.8mm]
\begin{figure}
\caption{Comparison of the EM-NLMS algorithm (``EM-NLMS''), the NLMS algorithm due to \cite{Variable1982}
\label{fig:ResultsWGN}
\end{figure}\\[-7mm]
\begin{figure}
\caption{Comparison of the EM-NLMS algorithm (``EM-NLMS''), the NLMS algorithm due to \cite{Variable1982}
\label{fig:ResultsSPEECH}
\end{figure}
\end{document} |
\begin{document}
\title{Limit holomorphic sections and Donaldson's construction of symplectic submanifolds}
Donaldson proved (in \cite{Do96}) that if $L$ is a suitable positive line bundle over a closed symplectic manifold $X$,
then, for $k$ sufficiently large, the tensor power $L^k$ admits sections whose zero sets are symplectic submanifolds of $X$
(the sections are approximately holomorphic and they satisfy some uniform transversality condition).
The construction relies on the following observation: the local geometry of the bundles $L^k$ near any point $p\in X$,
after a normalization, converges to a model holomorphic Hermitian line bundle $K$ over (some ball in) the tangent space $T_p X$.
In this note, we will describe this phenomenon in detail and exploit it to reformulate Donaldson's theorem as a compactness result:
near each point $p$, the sections he obtains accumulate to holomorphic sections
of $K$ (that we call ``limit sections'')
and their uniform transversality properties correspond to transversality properties of their limits.
Of course, similar considerations apply to all constructions based on Donaldson's techniques (e.g. \cite{Au01}, \cite{IbMaPr00}).
{\bf Acknowledgements.}
I want to thank Emmanuel Giroux for many important suggestions.
\section{Limit sections}
Let $X=(X,\, \omega,\, J,\, g)$ be a closed almost-K\"{a}hler manifold. Hence $\omega$ is a symplectic form, $J$ is an almost-complex structure
and $g$ is a Riemannian metric, satisfying the following compatibility condition:
$g(V,W) = \omega(V,JW)$.
Endow $X$ with a prequantization $L$ (a prequantization is a Hermitian line bundle over $X$ equipped
with a unitary connection of curvature $-i2\pi \omega$).
The charts we will use are normal coordinates with respect to the renormalized metric $g_k=kg$.
Let $B\subset \mathbb{C}^n$ denote the unit ball, with $n= \frac{1}{2}\, $dim$_{\mathbb{R}} X$.
Fix, for every large integer $k$, a chart $\varphi_k : B \rightarrow X$ satisfying two conditions:
(1) The chart $\varphi_k$ is an exponential map for the metric $g_k$ (i.e. given any unit vector $v\in \mathbb{C}^n$, the curve $t \mapsto \varphi_k (tv)$
is a geodesic with $g_k-$length $1$ velocity vector).
(2) The differential $D\varphi_k (0)$ is a $\mathbb{C} - $linear map.
\\
\\
Since $\varphi_k$ is a local diffeomorphism, one can transfer to $B$ the renormalized almost-K\"{a}hler structure
$(\omega_k = k\omega,\, J,\, g_k =kg)$ and it is well known that
this almost-K\"{a}hler structure tends to the standard flat K\"{a}hler structure on $B$, as $k \rightarrow \infty$, in the
${\cal C}^{\infty} -$topology.
The following observation is well-known to experts: the local geometry of
the bundle $L^k$
converges to the geometry of a model line bundle.
Fix some unitary radially flat isomorphism between the pullback line bundle $\varphi_k^* L^k$
and the trivial Hermitian line bundle $B \times \mathbb{C} \rightarrow B$.
Hence, the connection of $\varphi_k^* L^k$ induces a unitary connection $\nabla^k$ on $B \times \mathbb{C} \rightarrow B$.
As $k \rightarrow \infty$, the connection $\nabla^k$
tends to some model connection $\nabla^{\infty}$ on $B \times \mathbb{C} \rightarrow B$,
defined by:
$$
\nabla^{\infty} = d- i\pi \sum_{\alpha = 1}^{n} (x_{\alpha} dy_{\alpha} - y_{\alpha} dx_{\alpha}).
$$
There is a more conceptual description of $\nabla^{\infty}$:
the model connection $\nabla^{\infty}$ is the only radially trivial connection with curvature
$-i2 \pi \sum_{\alpha = 1}^{n} dx_{\alpha} \wedge dy_{\alpha}$.
\\
\\
Warning. Let $s$ be a section of the trivial bundle $B \times \mathbb{C} \rightarrow B$.
We say that $s$ is holomorphic if it is holomorphic for the connection $\nabla^{\infty}$.
Although the section $s$ is a function, it is not the usual concept of holomorphic function.
For example, the function $exp\left( - \frac{\pi}{2} \sum_{\alpha = 1}^{n} | z_{\alpha} |^2 \right)$
is a holomorphic section and, more generally, the section $s$
of $B \times \mathbb{C} \rightarrow B$
is holomorphic if and only if the function
$s \,exp\left( \frac{\pi}{2} \sum_{\alpha = 1}^{n} | z_{\alpha} |^2 \right)$
is holomorphic in the usual sense.
\\
\\
This set of tools is well-known to experts. We will use it to study sequences of sections.
The following two definitions play an important role in our reformulation of Donaldson's theory.
\begin{defn}\label{Def1}
For every sufficiently large integer $k$, let $s_k$ be a ${\cal C}^{\infty} -$smooth section of $L^k$.
We say that the sequence $(s_k)$ is {\it renormalizable} if it satisfies the following compactness condition.
Let $(k_l)$ be a subsequence of the positive integers.
For every sufficiently large integer
$l$, let $\varphi_l$ be a chart satisfying conditions
(1) for $k_l$ (that is,
$\varphi_l$ is an exponential
map for $g_{k_l}$)
and (2) and let $j_l$ be a
unitary radially flat isomorphism between the trivial line bundle $B \times \mathbb{C} \rightarrow B$
and the pullback bundle $\varphi_l^* L^{k_l}$.
If $\sigma_l$ denotes the section of the trivial
bundle $B \times \mathbb{C} \rightarrow B$ corresponding to the pullback section $\varphi_l^*s_{k_l}$ via the
isomorphism $j_l$,
then the sequence $(\sigma_l)$ has a subsequence $(\sigma_{l_m})$ which converges over $B$ for the
smooth compact-open topology.
\end{defn}
\begin{defn}\label{Def2}
The limit of $(\sigma_{l_m})$ is called a {\it limit section} of the renormalizable sequence $(s_k)$.
Hence, a limit section is a section of $B \times \mathbb{C} \rightarrow B$.
\end{defn}
We emphasize that we {\it don't} assume that all charts $\varphi_l$ have the same center.
\\
\\
Let $(s_k)$ be a renormalizable sequence.
If the sections $s_k$ are holomorphic then the limit sections are holomorphic.
More generally, let's state an informal principle:
if the sections $s_k$ satisfy some closed condition then one may
infer that the limit sections satisfy some corresponding condition.
We won't be more specific about this principle (we won't even explain the meaning of the word {\it closed}).
Of course, concerning open conditions, it goes in the opposite direction.
For example, if all limit sections are transverse to $0$ then $s_k$ is transverse to $0$, for every sufficiently large integer $k$.
If, in addition, the zero sets of limit sections are symplectic submanifolds in $B$
(for the symplectic form $\sum_{\alpha = 1}^{n} dx_{\alpha} \wedge dy_{\alpha}$),
then for every sufficiently large integer $k$, the zero set of $s_k$ is a symplectic submanifold in $X$.
Note that every complex submanifold is symplectic. Hence one get the following proposition.
\begin{prop}{\label{o}}
For every sufficiently large integer $k$, let $s_k$ be a ${\cal C}^{\infty} -$smooth section of $L^k$.
Suppose that $(s_k)$ is a renormalizable sequence and suppose that every limit section of
the sequence $(s_k)$ is holomorphic and transverse to $0$.
Then for every sufficiently large integer $k$, the zero set of $s_k$ is a codimension $2$ symplectic submanifold in $X$.
\end{prop}
In the integrable case ($X$ is K\"{a}hler), the sections $s_k$ we will consider
are often holomorphic whereas in the non-integrable case ($X$ almost-K\"{a}hler), typically,
the limit sections are holomorphic but the sections $s_k$ aren't.
\section{Transversality theorems}
To compare with, let's state a consequence of the Kodaira embedding theorem.
\begin{thm}{\label{BK}}
Suppose $J$ is integrable. Then, for every sufficiently large integer $k$, there exists a holomorphic section $s_k$ of $L^k$ which is transverse to $0$.
\end{thm}
Proof.
Kodaira's theorem implies that, for every sufficiently large $k$, there are no base points.
Hence, almost every section is transverse to $0$, by the Bertini theorem.
\\
\\
\indent
Let's first state Donaldson's theorem in the integrable case.
\begin{thm}{\label{DI}}
Suppose $J$ is integrable. Then, for every integer $k\geq 1$, there exists a holomorphic section $s_k$ of $L^k$ such that:
(1) The sequence $(s_k)$ is renormalizable.
(2) The limit sections of the sequence $(s_k)$ are transverse to $0$.
\end{thm}
In the integrable case, one may describe Donaldson's theorem an elaborate variant of Theorem {\ref{BK}}.
The variant has the advantage of being easily transferable to symplectic geometry.
Of course this was Donaldson's main goal and most applications of his techniques are symplectic and contact results.
It is known that if $X$ is almost-K\"{a}hler, then, in general, one can't get holomorphic sections.
Nevertheless, we get {\it asymptotically holomorphic} sections.
In our reformulation, the definition is quite simple:
a renormalizable sequence of smooth sections
is {\it asymptotically holomorphic} if every limit section is holomorphic for the connection $\nabla^{\infty}$.
Note that this definition is weaker than the usual quantitative definition. However, in practice,
the following version of Donaldson's theorem is sufficient for many corollaries.
\begin{thm}{\label{D}}
For every integer $k\geq 1$, there exists a ${\cal C}^{\infty} -$smooth section $s_k$ of $L^k$ such that:
(1) The sequence $(s_k)$ is renormalizable.
(2) The limit sections of the sequence $(s_k)$ are holomorphic and transverse to $0$.
\end{thm}
(Hence, for every sufficiently large integer $k$, the section $s_k$ is transverse to $0$ and, by Proposition \ref{o},
the zero set of $s_k$ is a codimension $2$ symplectic submanifold.)
\\
\\
\indent
Proof of Theorem {\ref{DI}} and Theorem {\ref{D}}.
Donaldson's techniques (in \cite{Do96}, see also \cite{Au97} and \cite{Do99}) produce sections $s_k$ satisfying two famillies of estimates:
\begin{eqnarray*}
\| s_k \|_{{\cal C}^r, g_k} & = & O (1)
\\
\| \overline{\partial} s_k \|_{{\cal C}^r , g_k} & = & O (k^{-\frac{1}{2}})
\end{eqnarray*}
(for every natural integer $r$),
and a uniform transversality condition:
\begin{eqnarray*}
\min_{p\in X} \left( \| s_k(p) \| + \| \nabla s_k(p) \|_{g_k} \right) & \geq & \eta
\end{eqnarray*}
where one calculates the ${\cal C}^r-$norm and the norm of $\nabla s_k(p)$ with the renormalized metric $g_k = kg$.
Here $\eta$ denotes a positive number, independent of $k$.
Recall the notations of Definition {\ref{Def1}}.
The section $\sigma_l$ of $B \times \mathbb{C} \rightarrow B$
corresponds to the pull-back section $\varphi_l^*s_{k_l}$ where $\varphi_l$ is the exponential map for the renormalized metric $g_{k_l}$.
The first estimate implies the following estimate:
$$
\| \sigma_{l} \|_{{\cal C}^r} = O (1)
$$
on the unit ball $B$. Hence, some subsequence of $(\sigma_l)$ converges in the smooth topology and the sequence $(s_k)$ is renormalizable.
The connection of $\varphi_l^* L^{k_l}$ induces a unitary connection $\nabla^{k_l}$ on $B \times \mathbb{C} \rightarrow B$.
Let $\overline{\partial}^{k_l}$ denote the $(0,1)-$part of $\nabla^{k_l}$
and let $\overline{\partial}^{\infty}$ denote the $(0,1)-$part of the limit connection $\nabla^{\infty}$.
Donaldson's second estimate implies the following estimate:
$$
\| \overline{\partial}^{k_l} \sigma_{l} \|_{{\cal C}^r} = O (k_l^{-\frac{1}{2}}).
$$
Since $\nabla^{k_l}$ tends to $\nabla^{\infty}$, the $(0,1)-$part
$\overline{\partial}^{\infty} \sigma_l$ tends to $0$ and the sequence $(s_k)$ is asymptotically holomorphic.
The third estimate implies the following estimate:
$$
\min_{p\in B} \left( \| \sigma_{l}(p) \| + \| \nabla \sigma_{l}(p) \| \right) \geq \frac{\eta}{2}.
$$
Every limit $\sigma_{\infty}$ of a subsequence $(\sigma_{l_m})$ (see Definition \ref{Def2}) satisfies the same estimate:
$$
\min_{p\in B} \left( \| \sigma_{\infty}(p) \| + \| \nabla \sigma_{\infty}(p) \| \right) \geq \frac{\eta}{2}.
$$
Hence, $\sigma_{\infty}$ is transverse to $0$.
The proof of Theorem \ref{D} is completed.
In the integrable case, Donaldson's sections are holomorphic and the proof of Theorem \ref{DI} is similar.
\\
\\
\indent
As noted by Donaldson, the asymptotic transversality property provides bounds for the Riemannian geometry of the zero set, see
\cite[Corollary 33]{Do96}.
For example, one gets the following result.
\begin{prop}
Let $(s_k)$ be a renormalizable sequence. Suppose every limit sequence is transverse to $0$.
Let $Y_k$ be the zero set of $s_k$.
For every sufficiently large integer $k$,
if $p$ lies in $Y_k$ and $A\subset T_p Y_k$ is a 2-plane,
then the sectional curvature $K_{Y_k,g_k} (p,A)$ of $Y_k$ at $(p,A)$ for the metric $g_k$
satisfies the following estimate:
$$
| K_{Y_k,g_k} (p,A) | \leq C
$$
where the bound $C$ is independent of $k$, $p$ and $A$.
\end{prop}
(Hence, if one prefers to calculate with the metric $g$, the sectional curvature is
bounded by some linear function of $k$ because $K_{Y_k,g} (p,A) = k\, K_{Y_k,g_k} (p,A)$.)
\\
\\
\indent
Proof.
Define $u_k= \max_{(p,A)} | K_{Y_k,g_k} (p,A) |$. Since $Y_k$ is compact,
there exist a point $p_k \in Y_k$ and a 2-plane $A_k \subset T_{p_k} Y_k$ satisfying the following equation:
$$ | K_{Y_k,g_k} (p_k,A_k ) | = u_k.$$
Let $\varphi_k$ be a chart centered at $p_k$ satifying conditions (1) and (2)
of Definition {\ref{Def1}}.
Consider the 2-plane $A_k' =(d\varphi_k(0))^{-1} (A_k) \subset \mathbb{C}^n$.
Since the set of 2-planes of $\mathbb{C}^n$ is compact, every subsequence of $(A_k')$
admits a convergent subsubsequence $(A_{k_l}')$.
The corresponding sequence $\sigma_l$
(using the notations of Definition {\ref{Def1}})
admits a limit $\sigma_{\infty}$ which is transverse to $0$. Therefore
the zero set $Y_{\infty} \subset B$ of $\sigma_{\infty}$
is a submanifold and the local geometry of
the corresponding submanifolds $(Y_{k_l})$
converges to the geometry of $Y_{\infty}$.
In particular, the sequence
$(u_{k_l})$ tends to $| K_{Y_{\infty},\mu} (0,A'_{\infty}) |$
where $\mu$ is the standard Euclidean metric on $\mathbb{C}^n$
and $A'_{\infty}$ is the limit 2-plane.
Hence every subsequence of the sequence $(u_k)$ admits a convergent subsubsequence and therefore $(u_k)$ is a bounded sequence.
\end{document} |
\betagin{document}
\title[Non-domination, cycles, and self-replication]{Internal perturbations of homoclinic classes:\\
non-domination, cycles, and self-replication}
\author{Ch. Bonatti}
\address{Institut de Math\'ematiques de Bourgogne, BP 47 870, 1078 Dijon Cedex, France}
\email{[email protected]}
\author{S. Crovisier}
\address{Institut Galil\'ee, Universit\'e Paris 13, Avenue J.-B. Cl\'ement, 93430 Villetaneuse, France}
\email{[email protected]}
\author{L. J. D\'iaz}
\address{Depto. Matem\'atica, PUC-Rio, Marqu\^es de S. Vicente
225, 22453-900 Rio de Janeiro RJ, Brazil}
\email{[email protected]}
\author{N. Gourmelon}
\address{Institut de Math\'ematiques de Bordeaux, Universit\'e de Bordeaux, 351, cours de la Lib\'eration, F33405 Talance Cedex France}
\email{[email protected]}
\thanks{This paper was partially supported by CNPq, Faperj, Pronex
(Brazil), the ANR project DynNonHyp BLAN08-2$_-$313375, and the
Agreement Brazil-France in Mathematics. LJD thanks the kind
hospitality of LAGA at Univ. Paris 13 and the Institut de
Math\'ematiques of Universit\'e de Bourgogne. SC and LJD thanks
the financial support and the warm hospitality of Institute
Mittag-Leffer. We are grateful for comments by K. Shinohara that allowed us to improve the
presentation of Section~\ref{s.viral}.}
\subjclass[2000]{Primary:37C29, 37D20, 37D30} \keywords{chain
recurrence class, dominated splitting, heterodimensional cycle,
homoclinic class, homoclinic tangency, linear cocycle,
$C^{1}$-robustness, wild dynamics}
\date{\today}
\betagin{abstract}
Conditions are provided under which lack of domination of a
homoclinic class yields robust heterodimensional cycles. Moreover,
so-called viral homoclinic classes are studied. Viral classes have
the property of generating copies of themselves producing wild
dynamics (systems with infinitely many homoclinic classes with
some persistence). Such wild dynamics also exhibits uncountably
many aperiodic chain recurrence classes. A scenario (related with
non-dominated dynamics) is presented where viral homoclinic
classes occur.
A key ingredient are adapted perturbations of a diffeomorphism
along a periodic orbit. Such perturbations preserve certain
homoclinic relations and prescribed dynamical properties of a
homoclinic class.
\end{abstract}
\maketitle
\section{Introduction}\lambdabel{s.introd}
There are two sort of cycles associated to periodic saddles that
are the main mechanism for breaking hyperbolicity of systems:
\alphaskip
\noindent $\bullet$
{\bf Homoclinic tangencies:} A diffeomorphism $f$ has a
{\emph{homoclinic tangency}} associated to a
transitive hyperbolic set $K$ if there are points $X$ and $Y$ in $K$ whose stable and unstable manifolds have some
non-transverse intersection. The homoclinic tangency is
{\emph{$C^r$-robust}} if there is a $C^r$-neighborhood $\cN$ of
$f$ such that the hyperbolic continuation $K_g$ of $K$ has a
homoclinic tangency for every $g\in \cN$.
\alphaskip
\noindent $\bullet$ {\bf{Heterodimensional cycles:}} A
diffeomorphism $f$ has a \emph{heterodimensional cycle} associated
to a pair of transitive hyperbolic sets $K$ and $L$ of $f$ if
their stable bundles have different dimensions and their invariant
manifolds meet cyclically, that is, $W^s(K) \cap W^u (L)\ne
\emptyset$ and $W^u(K) \cap W^s (L)\ne \emptyset$. The
heterodimensional cycle is {\emph{$C^r$-robust}} if there is a
$C^r$-neighborhood $\cV$ of $f$ such that the continuations $K_g$
and $L_g$ of $K$ and $L$ have a heterodimensional cycle for every
$g\in \cV$.
Given a closed manifold $M$ consider the space ${\mbox{{\rm Diff}}^1}fr$ of
$C^r$-diffeo\-mor\-phisms defined on $M$ endowed with the usual
$C^r$-topology. There is the following conjecture about
hyperbolicity and cycles:
\betagin{conj}[Palis' density conjecture, \cite{P:00}]
Any diffeomorphism $f\in {\mbox{{\rm Diff}}^1}fr$, $r\ge 1$, can be
$C^r$-approximated either by a hyperbolic diffeomorphism (i.e.
satisfying the Axiom A and the no-cycles condition) or by a
diffeomorphism that exhibits a homoclinic tangency or a
heterodimensional cycle.
\end{conj}
This conjecture was proved for $C^1$-surface diffeomorphisms in
\cite{PS:00}. For some partial progress in higher dimensions see
\cite{BD:08,CP:prep}.
Besides this conjecture one also aims to understand the dynamical
phenomena associated to homoclinic tangencies and
heterodimensional cycles and the interplay between them. We
discuss these topics in the next paragraphs.
Homoclinic tangencies of $C^2$-diffeomorphisms are the main source
of non-hyperbolic dynamics in dimension two, see
\cite{PT:93,N:04}. Namely, as a key mechanism a homoclinic
tangency of a surface $C^2$-diffeomorphism yields $C^2$-robust
homoclinic tangencies and generates open sets of diffeomorphisms
where the generic systems display infinitely many sinks or
sources, \cite{N:78,N:79}. This leads to the first examples of
the so-called {\emph{wild dynamics}} (i.e. systems having
infinitely many elementary pieces of dynamics with some
persistence, see \cite[Chapter 10]{BDV:04} for a discussion and
precise definitions). Moreover, these homoclinic tangencies also
yield infinitely many regions containing robust homoclinic
tangencies associated to other hyperbolic sets (this follows from
\cite{N:79} and \cite{Co:98}, see also the comments in
\cite[page 33]{BDV:04}). Using the terminology in \cite{B:bible},
this means that, for surface diffeomorphisms, the existence of
$C^2$-robust tangencies is a \emph{self-replicating} or
\emph{viral} property, for more details see
Section~\ref{ss.viral}.
Comparing with the $C^2$-case, $C^1$-diffeomorphisms of surfaces
do not have hyperbolic sets with robust homoclinic tangencies, see
\cite{M:pre} and also \cite[Corollary 3.5]{B:bible} for a formal
statement. However, in higher dimensions $C^1$-diffeomorphisms can
display robust tangencies, see for instance \cite{S:72,A:08}.
In higher dimensions, the first examples of robustly
non-hyperbolic dynamics were obtained by Abraham and Smale in
\cite{AS:70} by constructing diffeomorphisms with robust
heterodimensional cycles (although this terminology is not used there).
Moreover, the diffeomorphisms with heterodimensional cycles in
\cite{AS:70} also exhibit robust homoclinic tangencies (this
follows from \cite{BD:pre}).
In the $C^1$-setting, the generation of homoclinic tangencies is a
quite well understood phenomenon that is strongly related to the
existence of non-dominated splittings, \cite{W:04,BGV:06,G:10}.
Contrary to the case of tangencies, the generation of
heterodimensional cycles is not well understood and remains the
main difficulty for solving Palis conjecture in the $C^1$-case. In
contrast with the case of $C^1$-homoclinic tangencies,
heterodimensional cycles yield $C^1$-robust cycles after small
$C^1$-perturbations, \cite{BD:08}. However, in dimension $d\ge 3$,
we do not know ``when and how" homoclinic tangencies
may occur $C^1$-robustly. In fact, all known examples of
$C^1$-robust tangencies also exhibit $C^1$-robust
heterodimensional cycles\footnote{The converse is false: there are
diffeomorphisms (of partially hyperbolic type with one dimensional
central direction) that display robust heterodimensional cycles
but cannot have homoclinic tangencies, see for instance
\cite{M:78,BD:95}.}. For further discussion see \cite[Conjecture
6]{B:bible}.
These comments lead to the following
strong version of Palis' conjecture (in fact, this reformulates
\cite[Question 1]{BD:08}):
\betagin{conj}[{\cite[Conjecture 7]{B:bible}}]
\lambdabel{c.bonatti} The union of the set of hyperbolic
diffeomorphisms (i.e. satisfying the Axiom A and the no-cycle
condition) and of the set of diffeomorphisms having a robust
heterodimensional cycle is
dense in ${\mbox{{\rm Diff}}^1}f$.
\end{conj}
This conjecture holds in two relevant $C^1$-settings: the
conservative diffeomorphisms in dimension $d\ge 3$ and the so
called \emph{tame} systems (diffeomorphisms whose chain recurrence
classes are robustly isolated), see \cite{C:pre} and
\cite[Theorem 2]{BD:08}. See also
previous results in \cite{A:03,GW:03}.
\subsection{Some informal statements and
questions}\lambdabel{ss.informal} In what follows we focus on
$C^1$-diffeomorphisms defined on closed manifolds of dimension
$d\ge 3$. We now briefly and roughly describe some of our results
and the sort of questions we will consider (the precise
definitions and statements will be given throughout the
introduction).
\alphaskip
\noindent {\bf {A)}}
{\em When do homoclinic tangencies yield heterodimensional cycles?}
In terms of {\emph{dominated splittings,}} Theorem~\ref{t.main} and
Corollary~\ref{c.main} give a natural setting where homoclinic
tangencies generate heterodimensional cycles after arbitrarily
small $C^1$-perturbations.
\alphaskip
\noindent {\bf {B)}} {\em What are obstructions to the occurrence
of heterodimensional cycles?} {\em Sectional dissipativity}
prevents the ``coexistence" of periodic saddles with different
indices and hence the occurrence of heterodimensional cycles. For
homoclinic classes that do not have dominated splittings, we
wonder if this is the only possible obstruction for the
generation of heterodimensional cycles.
Corollary~\ref{c.excnodominated} shows that sectional
dissipativity is indeed the only obstruction for the occurrence of
heterodimensional cycles in homoclinic classes without any
dominated splitting.
\alphaskip
\noindent {\bf {C)}} {\em Is it possible to turn the lack of
domination into a robust property?} For homoclinic classes,
Theorem~\ref{t.complex} shows that the non-existence of a
{\emph{dominated splitting of index $i$}} can always be made a
robust property when the class contains some saddle of stable
index different from $i$.
\alphaskip
\noindent {\bf {D)}} {\em Which are the dynamical features
associated to robust non-do\-mi\-na\-ted dynamics?} In contrast to
the case of surfaces, homoclinic tangencies and ``some" lack of
domination do not always lead to wild dynamics. A homoclinic
tangency corresponds to the lack of domination of some index. For
homoclinic classes containing saddles of several stable indices,
Theorem~\ref{t.bviral} and Corollary~\ref{c.bviral} claim that the
robust lack of any domination leads to wild dynamics. In fact,
Theorem~\ref{t.bviral} asserts that the property of ``total
non-domination plus coexistence of saddles of several indices"
provides another example of a viral property of a chain recurrence
class. This property leads to the generic coexistence of a
non-countable set of different (aperiodic) classes, extending
previous results in \cite{BD:02}
\alphaskip
We next define precisely the main definitions involved in this
paper and state our main results.
\subsection{Basic definitions}\lambdabel{ss.basic}
We will focus on two types of elementary pieces of the dynamics:
homoclinic classes and chain recurrence classes.
The {\emph{homoclinic class}} of a hyperbolic periodic point $P$,
denoted by $H(P,f)$, is the closure of the transverse
intersections of the stable and unstable manifolds of the orbit of
$P$. Note that the class $H(P,f)$ coincides with the closure of
the saddles $Q$ {\emph{homoclinically related with $P$}}: the
stable manifold of the orbit of $Q$ transversely meets the
unstable manifold of the orbit of $P$ and vice-versa.
To define a {\emph{chain recurrence class}} we need some
preparatory definitions. A finite sequence of points
$(X_i)_{i=0}^n$ is an {\emph{$\varepsilonilon$-pseudo-orbit}} of a
diffeomorphism $f$ if $\mbox{dist\,}(f(X_i),X_{i+1})<\varepsilonilon$ for
all $i=0,\dots,n-1$. A point $X$ is {\emph{chain recurrent}} for
$f$ if
for every $\varepsilonilon>0$ there is an $\varepsilonilon$-pseudo-orbit
$(X_i)_{i=0}^n$, $n\ge 1$,
starting and ending at $X$ (i.e. $X=X_0=X_n$). The chain recurrent
points form the {\emph{chain recurrent set}} of $f$, denoted by
$R(f)$. This set splits into disjoint {\emph{chain recurrence
classes}} defined as follows. The class of a point $X\in R(f)$,
denoted by $C(X,f)$, is the set of points $Y\in M$ such that for
every $\varepsilonilon>0$ there are $\varepsilonilon$-pseudo-orbits joining $X$
to $Y$ and $Y$ to $X$. A chain recurrence class that does not
contain periodic points is called {\emph{aperiodic.}}
As a remark, in general, for hyperbolic periodic points their
chain recurrence classes contain their homoclinic ones. However,
for $C^1$-generic diffeomorphisms the equality holds,
\cite[Remarque 1.10]{BC:04}.
A key ingredient in this paper is the notion of {\emph{dominated
splitting}}:
\betagin{defi}[Dominated splitting]\lambdabel{d.dominated}
Consider a diffeomorphism $f$ and a compact $f$-invariant set
$\Lambda$. A $Df$-invariant splitting $T_{\Lambda}M=E\oplus F$ over $\Lambda$
is {\emph{dominated}} if the fibers $E_x$ and $F_x$ of $E$ and $F$
have constant dimensions and there exists $k\in \NN$ such that
\betagin{equation}
\lambdabel{e.dominated} \frac{||D_x f^k(u)||}{||D_xf^{k} (w) ||}\le
\frac{1}{2},
\end{equation}
for
every $x\in \Lambda$ and every pair of unitary vectors $u\in E_x$ and
$w\in F_x$.
The {\emph{index}} of the dominated splitting is the dimension of
$E$.
When we want to stress on the role of the constant $k$ we say
that the splitting is \emph{$k$-dominated.}
\end{defi}
Given a periodic point $P$ of $f\in {\mbox{{\rm Diff}}^1}f$ denote by $\pi(P)$ its
period. We order the eigenvalues $\lambda_1(P),\dots, \lambda_d(P)$ of
$D_P f^{\pi(P)}$ in increasing modulus and counted with
multiplicity, that is, $|\lambda_i(P)|\le |\lambda_{i+1}(P)|$. We call
$\lambda_i(P)$ the {\emph{$i$-th multiplier}} of $P$. The \emph{$i$-th
Lyapunov exponent} of $P$ is $\chi_i(P)=\frac{1}{\pi(P)} \, \log
|\lambda_i(P)|$. If $\chi_i(P)<\chi_{i+1}(P)<0$ then one can define
the {\emph{strong stable manifold of dimension $i$}} of the orbit
of $P$, denoted by $W^{ss}_i(P,f)$, as the only $f$-invariant
embedded manifold of dimension $i$ tangent to the $i$-dimensional
eigenspace corresponding to the multipliers $\lambda_1(P),\dots ,
\lambda_i(P)$. There are similar definitions for strong unstable
manifolds.
Recall that if $\Lambda$ is hyperbolic set of $f$ then every
diffeomorphism $g$ close to $f$ has a hyperbolic set $\Lambda_g$
(called the {\emph{continuation of $\Lambda$}}) that is close and
conjugate to $\Lambda$. If the set $\Lambda$ is transitive the dimension
of its stable bundle is called its {\emph{stable index}} or simply
\emph{$s$-index}.
Throughout this paper we consider diffeomorphisms defined on
closed manifolds of dimension $d\ge 3$. Unless it is explicitly
mentioned, we always consider $C^1$-diffeomorphisms,
$C^1$-neighborhoods, and so on. We repeatedly consider
perturbations of diffeomorphisms. By a {\emph{perturbation}} of a
diffeomorphism $f$ we mean here a diffeomorphism $g$ that is
arbitrarily $C^1$-close to $f$. To emphasize the size of the
perturbation we say that a diffeomorphism $g$ is a
{\emph{$\varepsilon$-perturbation}} of $f\in {\mbox{{\rm Diff}}^1}f$ if the $C^1$-distance
between $f$ and $g$ is less than $\varepsilon$.
\subsection{Heterodimensional cycles generated by homoclinic
tangencies}\lambdabel{ss.heterodimtangencies} Recall that the
generation of homoclinic tangencies is closely related to the
absence of dominated splittings over homoclinic classes. In fact,
in \cite{G:10} it is proved that if the stable/unstable splitting
over the periodic points homoclinically related to a saddle $P$ is
not dominated then there are diffeomorphisms $g$ arbitrarily
$C^1$-close to $f$ with a homoclinic tangency associated to $P_g$.
See also previous results in \cite{W:04}.
Our main result about the interplay between homoclinic tangencies
and heterodimensional cycles is stated in the following theorem.
\betagin{theo}\lambdabel{t.main} Let $f$ be a diffeomorphism and
$P$ a hyperbolic periodic saddle of $f$ with stable index $i\ge
2$.
Assume that
\betagin{enumerate}
\item\lambdabel{i1}
there is no dominated splitting over $H(P,f)$ of index $i$,
\item\lambdabel{i2}
there is no dominated splitting over $H(P,f)$ of index $i-1$, and
\item\lambdabel{i3new} the Lyapunov exponents of $P$ satisfy
$\chi_i(P)+\chi_{i+1}(P)\geq 0$.
\end{enumerate}
Then there are diffeomorphisms $g\in {\mbox{{\rm Diff}}^1}f$ arbitrarily
$C^1$-close to $f$ with a heterodimensional cycle associated to
$P_g$ and a saddle $R_g\in H(P_g,g)$ of stable index $i-1$.
Moreover, the diffeomorphisms $g$ can be chosen such that there
are hyperbolic transitive sets $L_g$ and $K_g$ containing $P_g$
and $R_g$, respectively, having simultaneously a robust
heterodimensional cycle and
a robust homoclinic tangency.
\end{theo}
\betagin{rema}\lambdabel{r.mainb} $\,$
\noindent (i) In fact, we prove Theorem~\ref{t.main} under the
following slightly weaker hypothesis replacing condition
(\ref{i3new}).
\betagin{enumerate}
\item[(3')]
For every $\delta>0$ there exists a periodic point $Q_\delta$
homoclinically related to $P$ whose Lyapunov exponents satisfy
$\chi_i(Q_\delta)+\chi_{i+1}(Q_\delta)\geq -\delta$.
\end{enumerate}
\alphaskip
\noindent
(ii) Hypothesis (\ref{i2}) can be replaced by the
following condition (see Proposition~\ref{p.excfinal}).
\betagin{enumerate}
\item[(2')] There is a diffeomorphism $g$ arbitrarily $C^1$-close to $f$ having a periodic point $R_g$ that is homoclinically related to $P_g$
and that has a strong stable manifold of dimension $i-1$
intersecting the unstable manifold of the orbit of $R_g$.
\end{enumerate}
\end{rema}
Theorem~\ref{t.main} will be proved in
Section~\ref{ss.proofoftmain}. Let us observe that
for three dimensional
diffeomorphisms
a version of
this theorem was proved in \cite{S:pre} replacing condition
(\ref{i3new}) by a stronger one requiring existence of a saddle
$Q$ homoclinically related to $P$ such that $\chi_1(Q)+\chi_{2}(Q)
+\chi_3(Q)>0$. Note that conditions (\ref{i3new}) and (3') are
related to the notion of a sectionally dissipative bundle that is
also considered in \cite{PV:94,R:95}, see
Section~\ref{ss.farfromheterodimnsional}.
Condition~(\ref{i1}) is used to get homoclinic tangencies
associated to $P$. Conditions~(\ref{i2}) and (\ref{i2}') assure
that the homoclinic class is not contained in a normally
hyperbolic surface (this would be an obstruction for the
generation of heterodimensional cycles). Finally,
condition~(\ref{i3new}) implies that these tangencies generate
saddles of index $i-1$.
We would like to replace condition (\ref{i3new}) (or (3')) by a
weaker one about Lyapunov exponents of measures supported over the
class, namely requiring the existence of an ergodic measure $\mu$
whose $i$-th and $(i+1)$-th Lyapunov exponents satisfy
$\chi_i(\mu)+\chi_{i+1}(\mu)\geq 0$. This potential extension is
related to the still open problem of approximation of ergodic
measures supported on a homoclinic class by measures supported on
periodic points of the class, see \cite[Conjecture 2]{B:bible} and
\cite{ABC:}.
There is also the following ``somewhat symmetric" version of
Theorem~\ref{t.main} that is an immediate consequence of it.
\betagin{coro}\lambdabel{c.main}
Consider a hyperbolic saddle $P$ of stable index $i$, $2\leq
i\leq d-2$, of a diffeomorphism $f$. Assume that there are no
dominated splittings over $H(P,f)$ of indices $i-1$, $i$, and
$i+1$. Then there is a diffeomorphism $g$ arbitrarily $C^1$-close
to $f$ with a heterodimensional cycle associated to $P_g$ and a
saddle $R_g\in H(P_g,g)$ of stable index $i-1$ or $i+1$.
Moreover, the diffeomorphism $g$ can be chosen such that there are
hyperbolic transitive sets $L_g$ and $K_g$ containing $P_g$ and
$R_g$, respectively, having simultaneously a robust
heterodimensional cycle and a robust homoclinic tangency.
\end{coro}
Theorem~\ref{t.main} has the following consequence
for
$C^1$-generic diffeomorphisms of three dimensional manifolds that
slightly generalizes the dichotomy
``domination versus infinitely many sources/sinks" in \cite{BDP:03}.
\betagin{coro}\lambdabel{c.main2}
Let $M$ be a closed manifold of dimension three. There is a
residual subset $\cR$ of ${\mbox{{\rm Diff}}^1}f$ such that for every
diffeomorphism $f$ and every saddle $P$ of stable index $2$ of $f$
(at least) one of the following three possibilities holds:
\betagin{itemize}
\item
$H(P,f)$ has a dominated splitting;
\item
$H(P,f)$ is the Hausdorff limit of periodic sinks;
\item $f$ has a robust heterodimensional cycle associated to $P$ and
$H(P,f)$ is the Hausdorff limit of periodic sources.
\end{itemize}
\lambdabel{t.trichotomy}
\end{coro}
\subsection{Non-domination far from heterodimensional cycles implies sectional dissipativity.}\lambdabel{ss.farfromheterodimnsional}
One approach for settling Palis conjecture is to study dynamics
{\emph{far from homoclinic tangencies.}} In this case the
diffeomorphisms necessarily have nice dominated splittings that
are adapted to their index structure, see for instance
\cite{W:04}. In contrast, dynamics {\emph{far from
heterodimensional cycles}} is yet little understood. To address
this point we will make the following ``local version" of
Conjecture~\ref{c.bonatti} where a given homoclinic class is
specified.
\alphaskip
\noindent {\bf Conjecture \ref{c.bonatti}'.}
\emph{Let $P$ be a hyperbolic saddle of a
diffeomorphism $f$ such that for every diffeomorphism $g$ that is
$C^1$-close to $f$ there is no heterodimensional cycle associated
to the continuation $P_g$ of $P$. Then there exists a
diffeomorphism $g$ arbitrarily $C^1$-close to $f$ such that the
homoclinic class $H(P_g,g)$ is hyperbolic.}
\alphaskip
To discuss Conjecture~\ref{c.bonatti}' let us first consider a
simple illustrating case involving the notion of {\emph{sectional
dissipativity}}. Let $P$ be a hyperbolic saddle of a
diffeomorphism $f$ of stable index $1$ whose homoclinic class
$H(P,f)$ satisfies the following two properties:
\betagin{itemize}
\item
$H(P,f)$ has no dominated splitting of index $1$ and
\item $H(P,f)$ is
uniformly sectionally dissipative for $f^{-1}$, that is, there is
$n>0$ such that the Jacobian of $f$ in restriction to any
$2$-plane is strictly larger than $1$.
\end{itemize}
Under these hypotheses, the lack of domination of $H(P,f)$
corresponding to the index of $P$ enables a homoclinic tangency
associated to $P$ after a perturbation. However, the sectional
dissipativity prevents the existence of saddle points of stable
index larger than $1$ in a small neighborhood of the homoclinic
class of $P$. Thus any diffeomorphism $g$ that is $C^1$-close to
$f$ cannot have a heterodimensional cycle associated to $P_g$.
We wonder if the case above is the only possible setting where
homoclinic tangencies far from heterodimensional cycles can occur.
We provide a partial result to this question by considering
homoclinic classes without any dominated splitting and a weaker
notion of sectional dissipativity.
Consider a set of periodic points $\cP$ of a diffeomorphism $f$
and a $Df$-invariant subbundle $E$ defined over the set $\cP$. The
bundle $E$ is said to be {\emph{sectionally dissipative at the
period}} if for any point $R\in \cP$ there is a constant $0<
\alpha_R<1$ such that $|\lambda_k\,\lambda_{k+1}|<\alpha_R^{\pi(R)}$ for
every pair of multipliers $\lambda_k$ and $\lambda_{k+1}$ of $R$ whose
eigendirections are contained in $E$. When $E=T_\cP M$ then we
call the set of periodic points $\cP$ {\emph{sectionally
dissipative at the period.}} In the case that the constant
$\alpha_R$ can be chosen independently of $R$ we call the bundle
$E$ (or the set $\cP$) {\emph{uniformly sectionally dissipative at
the period.}}
\betagin{coro} \lambdabel{c.excnodominated} Let $M$ be a closed manifold $M$ with $dim(M)\geq 3$
and $f\colon M\to M$ a diffeomorphism. Consider a homoclinic class
$H(P,f)$ without any dominated splitting that is far from
heterodimensional cycles. Then the set of periodic points of $f$
homoclinically related to $P$ is uniformly sectionally dissipative
at the period either for $f$ or for $f^{-1}$.
\end{coro}
\subsection{Robust non-domination}\lambdabel{ss.robust}
We first recall that the existence of a dominated splitting is (in
some sense) an open property. More precisely, if $\Lambda$ is an
$f$-invariant compact set with a dominated splitting $T_\Lambda M
=E\oplus F$, then there are neighborhoods $U$ of $\Lambda$ in $M$ and
$\cU$ of $f$ in ${\mbox{{\rm Diff}}^1}f$ such that for every $g\in \cU$ and every
$g$-invariant set $\Sigmagma$ contained in $U$ there is a dominated
splitting for $\Sigmagma$ of the same index as $E\oplus F$, see for
instance \cite[Chapter B.1]{BDV:04}. Observe that the next theorem
implies that, in some cases, the absence of domination of a
homoclinic class can, after a perturbation, be turned into a
robust property.
\betagin{theo}
\lambdabel{t.complex} Let $H(P,f)$ be a non trivial homoclinic class
of a periodic point $P$ of stable index $i$. Assume that for some
$j\ne i$ there is no dominated splitting of index $j$. Then there
exists a diffeomorphism $g$ arbitrarily $C^1$-close to $f$ having
a periodic point $Q$ that is homoclinically related to $P_g$ and such that
$\lambda_j(Q)$ and $\lambda_{j+1} (Q)$ are non-real, have the same
modulus, and any $k$-th multiplier of $Q$ has modulus different
from $|\lambda_j(Q)|$, ($k\ne j,j+1$).
\end{theo}
An immediate consequence of this theorem is that for every
diffeomorphism $h$ close to $g$ the homoclinic class $H(P_h,h)$
does not have a dominated splitting of index $j$.
A more detailed version of this theorem is given in
Proposition~\ref{p.complex}. Unfortunately, it still remains to
settle the hardest case in which the lack of domination of the
class $H(P,f)$ corresponds to the stable index of $P$.
Observe that, under the hypotheses of Theorem~\ref{t.complex}, the
constructions in \cite{BDP:03} imply that there are points $Q$
homoclinically related to $P$ whose multipliers $\lambda_j(Q)$ and
$\lambda_{j+1}(Q)$ can be made non-real by small perturbations. The
difficulty in the theorem is to preserve the homoclinic relation
between $P$ and $Q$ throughout the perturbation process.
The following result is a consequence of Theorem~\ref{t.complex}
and the fact that for $C^1$-generic diffeomorphisms two saddles in
the same chain recurrence class robustly belong to the same chain
recurrence class
(see Section~\ref{s.robustizing} for the proof).
\betagin{coro} \lambdabel{c.complexb}
There is a residual set $\cG$ of ${\mbox{{\rm Diff}}^1}f$ such that for every $f\in
\cG$ and every homoclinic class $H(P,f)$ of $f$ having periodic
points of different stable indices the following holds:
\noindent if the class $H(P,f)$ has no dominated splitting of
index $j$ then for any diffeomorphism $g$ in a neighborhood of $f$
the chain recurrence class of $P_g$ has no dominated splitting of
index $j$.
\end{coro}
\subsection{Robust non-domination and self-replication}\lambdabel{ss.viral}
In \cite[Definition 1.1]{BD:02}, for diffeomorphisms defined on
three-dimensional manifolds, we consider the following open
property for chain recurrence classes that we call \emph{Property
${\mathfrak{U}}$}.
\betagin{itemize}
\item[(i)] The class contains two transitive hyperbolic
sets $L$ and $K$ of different stable indices related by a robust
heterodimensional cycle.
\item[(ii)] Each of these sets $K$, $L$
contains a saddle with non-real multipliers.
\item[(iii)] Each of these sets contains a saddle whose
Jacobian is greater than one and a saddle whose Jacobian is less
than one.
\end{itemize}
A key ingredient in \cite{BD:02} is the notion of {\emph{universal
dynamics:}} Given a diffeomorphism $f$ with Property ${\mathfrak{U}}$ by
perturbation we can produce ``any type" of dynamics in a ball
isotopic to the identity (for large iterations of the
diffeomorphisms). In particular, after perturbations one can
re-obtain properties of any orientation preserving diffeomorphism
of a closed ball, see \cite[Definition 1.3]{BD:02}. As a
consequence, chain recurrence classes satisfying Property ${\mathfrak{U}}$
generate new different classes satisfying also this property. Thus
Property ${\mathfrak{U}}$ is a ``self-replicant" or ``viral" property. This
is the main motivation behind the definition of a viral property
in \cite[Sections 7.3-7.5]{B:bible}.
\betagin{defi}[Viral property]
A property ${\mathfrak{P}}$ of chain recurrence classes of saddles is said to
be {\emph{$C^k$-viral}} if for every diffeomorphism $f$ and every
saddle $P$ of $f$ whose chain recurrence class $C(P,f)$ satisfies
${\mathfrak{P}}$ the following conditions hold:
\alphaskip
\noindent{\em{Robustness.}}
There is a $C^k$-neighborhood $\cU$ of $f$ such that $C(P_g,g)$ also satisfies ${\mathfrak{P}}$ for all $g\in \cU$.
\alphaskip
\noindent{\em{Self-replication.}} For every $C^k$-neighborhood
$\cV$ of $f$ and for every neighborhood $V$ of $C(P,f)$ there are
a diffeomorphism $g\in \cV$ and a hyperbolic periodic point
$Q_g\in V$ of $g$ such that $C(Q_g,g)$ is different from
$C(P_g,g)$ and satisfies property ${\mathfrak{P}}$. \lambdabel{d.bviral}
\end{defi}
As observed above, the existence of a robust homoclinic tangency
(associated to a transitive hyperbolic set in the class) is an
example of a $C^2$-viral property for chain recurrence classes in
dimension two.
As a consequence of the above results we now confirm
\cite[Conjecture 14]{B:bible} claiming that the property of robust
non-existence of any dominated splitting over a chain recurrence
class of a saddle is viral in the case that the class contains
saddles whose stable indices are different from $1$ and
$\dim(M)-1$. We formulate the following generalization of
Property~${\mathfrak{U}}$.
\betagin{defi}[Property ${\mathfrak{V}}$] \lambdabel{d.propertyS}
Given a saddle $P$ of a diffeomorphism $f$, the chain recurrence
class
$C(P,f)$ of $P$ satisfies
\emph{Property ${\mathfrak{V}}$} if there is a $C^1$-neighborhood $\cU$ of
$f$ such that for all $g\in \cU$ the chain recurrence class
$C(P_g,g)$ of $P_g$ satisfies the following two conditions:
\betagin{itemize}
\item {\rm (non-domination)}
$C(P_g,g)$
does not have any dominated splitting,
\item {\rm (index variability)}
$C(P_g,g)$ contains a saddle $Q_g$ whose stable index is different
from the one of $P_g$.
\end{itemize}
\end{defi}
Observe that the set of $C^1$-diffeomorphisms satisfying
Property~${\mathfrak{V}}$ is indeed non-empty, see Section~\ref{ss.examples}.
\betagin{theo}\lambdabel{t.bviral}
Property ${\mathfrak{V}}$ is $C^1$-viral for chain recurrence classes.
\end{theo}
The following result is a consequence of Theorem~\ref{t.bviral}
and the properties of $C^1$-generic diffeomorphisms extending
\cite{BD:02}. In fact, the corollary holds for any viral property
of a chain recurrence class containing a saddle.
\betagin{coro}
\lambdabel{c.bviral} Let $C(P,f)$ be a chain recurrence class
satisfying Property~${\mathfrak{V}}$. Then there are a neighborhood $\cU$ of
$f$ and a residual subset $\cG_\cU$ of $\cU$ such that for every
$g\in \cG_{\cU}$
\betagin{itemize}
\item
there are infinitely (countably) many pairwise disjoint homoclinic
classes, and
\item
there are uncountably many aperiodic chain recurrence classes.
\end{itemize}
\end{coro}
Indeed the homoclinic classes obtained in the corollary can be
chosen to also satisfy Property~${\mathfrak{V}}$.
The
proofs of Theorem~\ref{t.bviral} and Corollary~\ref{c.bviral} are
in Section~\ref{s.viral}.
Let us observe that nature of the proof of Theorem~\ref{t.bviral}
is quite different from the approach in \cite{BD:02}, where
universal dynamics is the key ingredient. In \cite{BD:02} this
universal dynamics is obtained by considering saddles in the chain
recurrence class whose Jacobians are larger and smaller than one,
respectively. A restriction of this construction is that all
Lyapunov exponents of the aperiodic classes obtained in
\cite{BD:02} are zero. This follows from the fact that one
considers maps whose ``returns" are close to the identity. Here we
use directly the self-replication property. This allows us to
obtain aperiodic classes with regular points having Lyapunov
exponents uniformly bounded away from zero. See \cite[Section
7.4]{B:bible}, specially Problem 6, for further discussion.
Finally,
bearing in mind the results in \cite{S:pre} and
Corollary~\ref{c.main}, we introduce the following variation of
Property~${\mathfrak{V}}$ for
diffeomorphisms defined on manifolds of dimension $d\ge 4$.
\betagin{defi}[Property ${\mathfrak{V}}'$] \lambdabel{d.propertySprime}
Given a saddle $P$ of a diffeomorphism $f$ the chain recurrence
class
$C(P,f)$ of $P$ has
\emph{Property ${\mathfrak{V}}'$} if there is a $C^1$-neighborhood $\cU$ of
$f$ such that for all $g\in \cU$ the chain recurrence class
$C(P_g,g)$ of $P_g$ satisfies the following two conditions:
\betagin{itemize}
\item
$C(P_g,g)$
does not have any dominated splitting and
\item
$C(P_g,g)$ contains a saddle with stable index $i\not\in\{1,
\dim (M)-1\}$.
\end{itemize}
\end{defi}
Corollary~\ref{c.main} implies that in this case, after a
perturbation, the chain recurrence class $C(P_g,g)$ robustly
satisfies the index variability condition. Thus, after a
perturbation, Property~${\mathfrak{V}}'$ implies Property~${\mathfrak{V}}$. In fact, we
will see that these two properties are ``essentially equivalent",
see Lemmas~\ref{l.VimpliesS} and~\ref{l.SimpliesV}. Finally, we
have the following:
\betagin{rema}\lambdabel{r.ss'}
Theorem~\ref{t.bviral} and Corollary~\ref{c.bviral} hold for
Property~${\mathfrak{V}}'$.
\end{rema}
\subsection*{Organization of the paper}\lambdabel{ss.organization}
We first observe that we will use systematically several
$C^1$-perturbation results imported from \cite{G:10,G:pre,BB:pre}.
These results allow us to realize dynamically perturbations of
cocycles associated to the derivatives of diffeomorphisms along
periodic orbits (see specially Section~\ref{ss.gfranks} and
\ref{ss.dominatedperiodic}).
\alphaskip
\noindent $\bullet$
In Section~\ref{s.homohetero} we recall results about the
generation of homoclinic tangencies and heterodimensional cycles
associated to homoclinic classes.
\alphaskip
\noindent $\bullet$ An ingredient of our paper is the notion of an
{\emph{adapted perturbation}} of a diffeomorphism, that is, a
small perturbation of a
diffeomorphism throughout the orbit of a periodic point
that preserves some homoclinic relations and some prescribed
dynamical properties of a given homoclinic class, (see
Definition~\ref{d.adapted}). An essential feature of adapted
perturbations is that one can perform simultaneously finitely many
of them preserving some prescribed properties of the homoclinic
class. These perturbations are introduced in
Section~\ref{s.adaptedfranks}.
\alphaskip
\noindent $\bullet$
Using adapted perturbations we prove in
Section~\ref{s.lyapunovperiodic} two important technical results
(Propositions~\ref{p.gbobo} and \ref{p.weak}) claiming that the
lack of domination of a homoclinic class yields periodic orbits having
multiple Lyapunov exponents and weak hyperbolicity.
\alphaskip
\noindent $\bullet$ In Sections~\ref{s.robustizing} and
\ref{s.proofof}, in the non-dominated setting we get periodic
orbits inside a homoclinic class having non-real multipliers and
prove Theorem~\ref{t.complex}. This proof is based on
Proposition~\ref{p.complex} whose proof is the most difficult step
of the paper.
\alphaskip
\noindent $\bullet$ In Section~\ref{s.formation} we obtain
homoclinic intersections associated to strong invariant manifolds
of periodic points that will allow us to get heterodimensional
cycles and finally prove Theorem~\ref{t.main} in
Section~\ref{s.proofofmain}.
\alphaskip
\noindent $\bullet$
Finally, we study viral properties of chain recurrence classes and
prove Theorem~\ref{t.bviral} and Corollary~\ref{c.bviral} in
Section~\ref{s.viral} .
\section{Homoclinic tangencies and heterodimensional cycles}
\lambdabel{s.homohetero}
In this section we recall some results about generation of
homoclinic tangencies and robust heterodimensional cycles
associated to homoclinic classes.
\subsection{Homoclinic tangencies}\lambdabel{ss.homoclinictangencies}
Next lemma states the relation between the lack of domination over
a periodic orbit and the generation of homoclinic tangencies.
\betagin{lemm}[{\cite[Theorem 3.1]{G:10}}]\lambdabel{l.gdcds}
For any $K>1$, $\varepsilon>0$, and $d\in \NN$, there are
constants $k_0$ and $\ell_0$ with the following property.
\betagin{itemize}
\item
For every $f\in {\mbox{{\rm Diff}}^1}f$ with $\dim (M)=d$ such that the norms of $Df$ and $Df^{-1}$
are both bounded by $K$, and
\item
for every periodic point $P$ of $f$ of saddle-type such that
\betagin{itemize}
\item the period of $P$ is larger than $\ell_{0}$ and
\item
the stable/unstable splitting $E^s(f^i(P))\oplus E^u(f^i(P))$ over
the orbit of $P$ is not $k_{0}$-do\-mi\-na\-ted,
\end{itemize}
\end{itemize}
there is an $\varepsilon$-perturbation $g$ of $f$ whose support is
contained in an arbitrarily small neighborhood of the orbit of $P$
and such that the stable and unstable manifolds $W^s(P,g)$ and
$W^u(P,g)$ of $P$ have a homoclinic tangency.
Moreover, if $Q$ is homoclinically related to $P$ for $f$ then the
perturbation $g$ can be chosen such that $Q_g$ and $P$ are
homoclinically related (for $g$).
\end{lemm}
\betagin{rema}
\lambdabel{r.gdcds} Lemma~\ref{l.gdcds} implies that the perturbation
$g$ of $f$ can be chosen such that the saddle $P$ has a homoclinic
tangency and its homoclinic class $H(P,g)$ is non-trivial.
Moreover, the orbit of tangency can be chosen inside the
homoclinic class $H(P,g)$.
\end{rema}
\subsection{Robust heterodimensional cycles}\lambdabel{ss.robustcycles}
Let us observe that a homoclinic class $H(P,f)$ may contain
saddles of different indices. But, in principle, it is not
guaranteed that such a property still holds for perturbations
of $f$. We next collect some results from \cite{BDK:pre} that will
allow us to get such a property in a robust way.
We say that a heterodimensional cycle associated to a pair of
transitive hyperbolic sets has {\emph{coindex one}} if the
$s$-indices of theses sets differ by one.
\betagin{lemm}[\cite{BDK:pre}]
\lambdabel{l.bodiki} Let $f\in {\mbox{{\rm Diff}}^1}f$ be a diffeomorphism having
a coindex one
heterodimensional cycle
associated to a
pair of hyperbolic periodic points $P$ and $Q$ such that the homoclinic class
$H(P,f)$ is non trivial. Then there is a diffeomorphism $g$ arbitrarily $C^1$-close
to $f$ with a pair of hyperbolic transitive sets $L_g$ and $K_g$
having a robust heterodimensional cycle and containing the
continuations $P_g$ and $Q_g$ of $P$ and $Q$, respectively.
\end{lemm}
There is the following
consequence of this lemma for $C^1$-generic systems:
\betagin{corol}[\cite{BDK:pre}]
\lambdabel{c.bdk} There is a residual subset $\cG$ of ${\mbox{{\rm Diff}}^1}f$ such
that for every diffeomorphism $f\in \cG$ and every pair of
periodic points $P$ and $Q$ of stable indices $i<j$ in the same
homoclinic class there is a (finite) sequence of transitive
hyperbolic sets $K_i,K_{i+1}\dots, K_j$
such that
\betagin{itemize}
\item
$P\in K_i$, $Q\in K_j$,
\item
the stable index of $K_n$ is $n$, $n=i,i+1,\dots, j$,
and
\item
the sets
$K_k$ and $K_{k+1}$ have a robust heterodimensional cycle
for all $k=i,\dots, j-1$.
\end{itemize}
\end{corol}
\section{Adapted perturbations and generalized Franks' lemma} \lambdabel{s.adaptedfranks}
In this section, we collect some results about $C^1$-perturbations
of diffeomorphisms. Observe that if $g_1,\dots,g_n$ are
$\varepsilon$-perturbations of $f$ with disjoint supports $V_1,\dots,V_n$
then there is an $\varepsilon$-perturbation $g$ of $f$ supported in the
union of the sets $V_i$ such that $g$ coincides with $g_i$
over the set $V_i$.
\subsection{Adapted perturbations}\lambdabel{ss.adapted}
We next introduce a kind of perturbation of
diffeomorphisms along periodic orbits that preserves
homoclinic relations. Moreover, these perturbations can be
performed simultaneously and independently along different
periodic orbits.
In what follows, given $\varrho>0$, we denote by $W^{s,u}_\varrho(P,f)$
the stable/unstable manifolds of size $\varrho$ of the orbit of $P$.
\betagin{defi}[Adapted perturbations]
Consider a property ${\mathfrak{P}}$ about periodic points. Given $f\in
{\mbox{{\rm Diff}}^1}f$, a pair of hyperbolic periodic points $P$ and $Q$ of $f$
that are homoclinically related,
and a neighborhood $\cU\subset {\mbox{{\rm Diff}}^1}f$ of $f$
we say that there is a
\emph{perturbation of $f$ in $\cU$ along the orbit of $Q$ that is
adapted to $H(P,f)$ and property ${\mathfrak{P}}$} if
\betagin{itemize}
\item
for every neighborhood $V$ of the orbit of $Q$ and
\item
for every $\varrho>0$ and every pair of compact sets $K^s\subset W^s_\varrho(Q,f)$ and
$K^u\subset W^u_\varrho(Q,f)$ disjoint from $V$
\end{itemize}
there is a diffeomorphism $g\in \cU$ such that:
\betagin{itemize}
\item $g$ coincides with $f$ outside $V$ and along the $f$-orbit of
$Q$,
\item the points $P_g$ and $Q_g$ are homoclinically related for $g$,
\item the sets $K^s,K^u$ are contained in $W^s_\varrho(Q,g)$ and
$W^u_\varrho(Q,g)$, respectively, and
\item the saddle $Q$ satisfies property ${\mathfrak{P}}$.
\end{itemize}
When the neighborhood $\cU$ of $f$ is the set of diffeomorphisms
that are $\varepsilon$-$C^1$-close to $f$ we say that $g$ is an
\emph{$\varepsilon$-perturbation of $f$ along the orbit of $Q$ that is
adapted to $H(P,f)$ and property ${\mathfrak{P}}$}.
\lambdabel{d.adapted}
\end{defi}
Examples of property ${\mathfrak{P}}$ for periodic points are the existence
of non-real multipliers and negative Lyapunov exponents.
\subsection{Generalized Franks' lemma}
\lambdabel{ss.gfranks}
We need the following extension of the so-called Franks Lemma
\cite{F:71} about dynamical realizations of
perturbations of cocycles along periodic orbits. The novelty of
this extension is that besides the dynamical realization of the cocycle throughout a periodic orbit
the perturbations also preserve some homoclinic/heteroclinic intersections. Next lemma is a particular case of \cite[Theorem~1]{G:pre} and
is a key tool for constructing adapted perturbations. Recall that
a linear map $B\in GL(d,\RR)$ is {\emph{hyperbolic}} if every eigenvalue $\lambda$ of $B$ satisfies $|\lambda|\ne 1$.
\betagin{lemm}[Generalized Franks' Lemma, \cite{G:pre}]\lambdabel{l.gourmelon}
Consider $\varepsilon>0$, a diffeomorphism $f\in {\mbox{{\rm Diff}}^1}f$ and a hyperbolic periodic point $Q$ of period $\ell=\pi(Q)$
of $f$. Then
\betagin{itemize}
\item for any
one-parameter family of linear maps
$(A_{n,t})_{n=0,\dots,\ell-1, \, t\in [0,1],}$, $A_{n,t}\in
\mathrm{GL}(d, \RR)$, $d=\dim (M)$, such that
\betagin{enumerate}
\item \lambdabel{ifg1}
$A_{n,0}=Df (f^n(Q))$,
\item \lambdabel{ifg2}
for all $n=0,\dots,\ell-1$ and all $t\in [0,1]$ it holds
$$
\max \, \left\{ \| Df(f^n(Q)) -A_{n,t} \|, \, \| Df^{-1}(f^n(Q))
-A^{-1}_{n,t} \| \right\}<\varepsilon,
$$
\item \lambdabel{ifg3}
$B_t=A_{\ell-1,t}\circ\cdots\circ A_{0,t}$ is hyperbolic for all
$t\in [0,1]$,
\end{enumerate}
\item
for every neighborhood $V$ of the orbit of $Q$, every $\varrho>0$,
and every pair of compact sets $K^s\subset W^s_\varrho(Q,f)$ and $K^u
\subset W^u_\varrho (Q,f)$ disjoint from $V$,
\end{itemize}
there is an $\varepsilon$-perturbation $g$ of $f$ such that
\betagin{enumerate}
\item[(a)] $g$ and $f$ coincide throughout the orbit of $Q$ and outside
$V$,
\item[(b)]
$K^s\subset W^s_\varrho (Q,g)$ and $K^u \subset W^u_\varrho(Q,g)$, and
\item[(c)]
$Dg(g^n(Q))=Dg(f^n(Q))=A_{n,1}$ for all $n=0,\dots,\ell-1$.
\end{enumerate}
\end{lemm}
\section{Lyapunov exponents of periodic orbits}
\lambdabel{s.lyapunovperiodic}
In this section we see that the lack of domination of a homoclinic
class yields perturbations such that there are periodic points of
the class whose Lyapunov exponents are multiple or close to zero,
see Propositions~\ref{p.gbobo} and \ref{p.weak}. We first state
some preparatory results and prove these propositions in
Section~\ref{ss.multiple}.
\subsection{Lyapunov exponents and homoclinic relations} \lambdabel{ss.homoclinic}
We will use repeatedly throughout the paper the following result.
\betagin{lemm}
\lambdabel{l.homocliniclyapunov} There is a residual subset $\cG$ of
${\mbox{{\rm Diff}}^1}f$ such that for every $f\in \cG$, every saddle $P$ of $f$,
every non-trivial and locally maximal transitive hyperbolic set
$\Lambda$ of $f$ containing $P$, and every $\varepsilon>0$ there is a
saddle $Q\in \Lambda$ such that
\betagin{itemize}
\item
$|\chi_j(Q)-\chi_j(P)|<\varepsilon$ for all $j\in \{1,\dots,d\}$, and
\item
the orbit of $Q$ is $\varepsilon$-dense in $\Lambda$. \end{itemize}
In
particular, the saddle $Q$ can be chosen with arbitrarily large
period.
\end{lemm}
This results follows from the arguments in the proofs of
\cite[Corollary 2]{ABCDW:07} and \cite[Theorem 3.10]{ABC:} using
standard constructions that allow us to distribute these orbits
throughout the ``whole" transitive hyperbolic set while keeping
the control of the exponents.
\subsection{Dominated splittings and cocycles over periodic
orbits}\lambdabel{ss.dominatedperiodic}
We next study the lack of domination of homoclinic classes. For
that we consider periodic orbits (of large period) in the class
and their associated cocycles. Next result is a
standard fact about dominated splittings (see for instance
\cite[Appendix B]{BDV:04}).
\betagin{lemm}[Extension of a dominated splitting to a closure]
\lambdabel{l.dominatedclosure} Consider an $f$-invariant set $\Lambda$
having a $k$-dominated splitting of index $i$. Then the
closure of $\Lambda$ also has a $k$-dominated splitting of index $i$
that coincides with the one over $\Lambda$.
\end{lemm}
As in the case of periodic points of diffeomorphisms, given a
family of linear maps $A_1,\dots ,A_\ell\in GL(d,\RR)$ we consider the
product $B=A_\ell\circ\cdots \circ A_1$ and the eigenvalues
$\lambda_1(B),\dots,\lambda_d(B)$ of $B$ ordered in increasing modulus and
counted with multiplicity. We define the {\emph{$i$-th Lyapunov
exponent}} of $B$ by
$$
\chi_i(B)= \frac 1 \ell \log |\lambda_i(B)|.
$$
The family of linear maps above is \emph{bounded by $K$} if
$\|A_n\|$ and $\|A_n^{-1}\|$ are both less than or equal to $K$ for all
$n=1, \dots, \ell$.
Note that Definition~\ref{d.dominated} of a dominated splitting
over an invariant set of a diffeomorphism can be restated for
sequences of linear maps.
Next lemma relates the lack of domination of a
cocycle and the generation of sinks or sources.
\betagin{lemm}[{\cite[Corollary 2.19 and Remark 2.20]{BGV:06}}]
\lambdabel{l.bgv} For every $K>1$, $\varepsilon>0$, and $d\in \NN$,
there are constants $k_0$ and $\ell_0$ with the following property.
\betagin{itemize}
\item
For every $f\in {\mbox{{\rm Diff}}^1}f$ with $\dim (M)=d$ such that the norms of $Df$ and $Df^{-1}$
are both bounded by $K$, and
\item
for every periodic point $P$ of $f$ of period larger than $\ell_0$
such that there is no any $k_0$-dominated splitting over the orbit
of $P$,
\end{itemize}
there is an $\varepsilon$-perturbation $g$ of $f$ whose support is contained in an arbitrarily small
neighborhood of the orbit of $P$ and such that $P$ is either a
sink or a source of $g$.
\end{lemm}
Next result is a finer version of the previous lemma that allows us
to modify only two consecutive Lyapunov exponents of a cocycle.
\betagin{lemm}[{\cite[Theorem~4.1 and Proposition 3.1]{BB:pre}}]\lambdabel{l.bobo}
For every $K>1$, $\varepsilon>0$ , and $d\geq 2$,
there are constants $k_0$ and $\ell_0$ with the following property.
Consider $\ell\ge \ell_0$ and linear maps $A_1,\dots, A_{\ell}$ in $GL(d,\RR)$, such that:
\betagin{itemize}
\item every $A_n$ is bounded by $K$,
\item for any $i\in \{1,\dots, d-1\}$,
the linear map $B=A_\ell\circ \cdots \circ A_1$ has no any $k_0$-dominated splitting of index $i$.
\end{itemize}
Then for every $j\in \{1,\dots,d-1\}$,
there exist one parameter families of linear maps $
(A_{n,t})_{t\in [0,1]}$ in $GL(d,\RR)$, $n=1,\dots,\ell$, such that
\betagin{enumerate}
\item
$A_{n,0}=A_n$ for all $n=1,\dots,\ell$, and
\item
$A_{n,t}-A_n$ and $A_{n,t}^{-1}-A_n^{-1}$ are bounded by $\varepsilon$ for all $t\in [0,1]$ and all $n=1,\dots,\ell$.
\end{enumerate}
Consider the linear map
$$
B_t=A_{\ell,t}\circ \cdots \circ A_{1,t}.
$$
Then, for any $t\in [0,1]$, the Lyapunov exponents of the map $B_t$ satisfies
\betagin{enumerate}
\item[(3)]
$\chi_m(B_t)=\chi_m(B)$ if $m\ne j,j+1$,
\item[(4)]
$\chi_{j}(B_t)+\chi_{j+1}(B_t)= \chi_{j}(B)+\chi_{j+1}(B)$,
\item[(5)]
$\chi_{j}(B_{t^\prime})$ is non-decreasing and
$\chi_{j+1}(B_{t^\prime})$ is non-increasing, that is
$$
\chi_{j}(B_{t^\prime}) \le \chi_{j}(B_t) \le \chi_{j+1}(B_t) \le
\chi_{j+1}(B_{t^\prime}), \quad \mbox{for all $t'<t$},
$$
\item[(6)]
$\chi_{j+1} (B_1)=\chi_j(B_1)$, and
\item[(7)] the eigenvalues of $B_1$ are all real.
\end{enumerate}
\end{lemm}
\betagin{rema} \lambdabel{r.bobo}
Note that if $A\in GL(d,\RR)$ has real eigenvalues and if its
Lyapunov exponents $\chi_{j}(A)$ and $\chi_{j+1}(A)$ are equal
then there is $\bar A\in GL(d,\RR)$ arbitrarily close to $A$ whose
eigenvalues are real and whose Lyapunov exponents satisfy
$\chi_{m}(\bar A)\ne \chi_{j}(\bar A)=\chi_{j+1}(\bar A)$ for all
$m\ne j,j+1$. Moreover, there is a ``small path of cocycles"
joining $A$ and $\bar A$ that preserves the $j$ and $j+1$ Lyapunov
exponents.
Thus in the conclusions of Lemma~\ref{l.bobo} we can replace
item (3) by
\betagin{enumerate}
\item[(3')]
$\chi_m(B_t)$ is close to $\chi_m(B)$ for all $m\ne j,j+1$ and all
$t\in [0,1]$ and $\chi_{m}(B_1)\ne \chi_{j}(B_1)=\chi_{j+1}(B_1)$.
\end{enumerate}
\end{rema}
In order to get cocycles with real eigenvalues we also use the following result
(see also previous results in \cite[Lemme 6.6]{BC:04}
and \cite[Lemma 3.8]{BGV:06}).
\betagin{prop}[{\cite[Proposition 4.1]{BB:pre}}]\lambdabel{p.bobo}
For every $K>1$, $\varepsilon>0$ , and $d\geq 2$,
there is a constant $\ell_0$ with the following property.
Consider $\ell\ge \ell_0$ and linear maps $A_1,\dots, A_{\ell}$ in $GL(d,\RR)$, such that:
For every family of linear maps $(A_n)_{n=1}^\ell$ in $GL(d,\RR)$ such that $\ell\ge
\ell_0$ and $A_n$ and $A_n^{-1}$ are bounded by $K$ for every $n$,
there are one parameter families of linear maps $(A_{n,t})_{n=1, t\in [0,1]}^\ell$,
in $GL(d,\RR)$, such that
\betagin{itemize}
\item
$A_{n,0}=A_n$,
\item
$A_{n,t}-A_n$
and $A_{n,t}^{-1}-A_n^{-1}$ are bounded by $\varepsilon$ for every $n$,
\item let $B_t=A_{\ell,t}\circ \cdots \circ A_{1,t}$, then for every $j\in
\{1,\dots,d\}$ the Lyapunov exponent $\chi_{j}(B_t)$ is constant for $t\in
[0,1]$, and
\item
all the multipliers of $B_1$ are real.
\end{itemize}
\end{prop}
\subsection{Multiple Lyapunov exponents and weak hyperbolicity}
\lambdabel{ss.multiple}
In Pro\-po\-sitions~\ref{p.gbobo} and \ref{p.weak}
we combine Lemmas~\ref{l.gourmelon} and \ref{l.bobo} to prove that the
lack of domination of a homoclinic class yields periodic orbits
whose Lyapunov exponents are multiple or close to zero.
\betagin{prop} \lambdabel{p.gbobo}
For every $K>1$, $\varepsilon>0$, and $d\in \NN$,
there is a constant $k_0$ with the following property.
Consider a diffeomorphism $f\in {\mbox{{\rm Diff}}^1}f$, $\dim (M)=d$, such that the norms of
$Df$ and $Df^{-1}$ are bounded by $K$,
a hyperbolic periodic point $P$ of $s$-index $i$
whose homoclinic class $H(P,f)$ is non-trivial,
and an integer $j\in \{1,\dots,d\}$ with
$j\ne i$ such that the homoclinic class $H(P,f)$ has
no any $k_0$-dominated splitting of index $j$.
Then there is a periodic point
$Q\in H(P,f)$ homoclinically related with $P$ and an $\varepsilon$-perturbation
$g$ of $f$ along the orbit of $Q$ that is adapted to
$H(P,f)$ and to the following property ${\mathfrak{P}}_{j,j+1}$:
$$
{\mathfrak{P}}_{j,j+1}\eqdef \betagin{cases}
&\text{$\chi_j(Q_g)=\chi_{j+1}(Q_g)$,}\\
& \text{$\chi_m(Q_g)\neq \chi_j(Q_g)$ for all $m\neq j,j+1$,}\\
& \text{$\lambda_m(Q_g)\in \RR$ for all $m$.}
\end{cases}
$$
\end{prop}
\betagin{proof}
Consider
the constants $d\in \NN$, $K>1$, and $\varepsilon>0$.
Applying Lemma~\ref{l.bobo} to these constants
we obtain the constants
$k_0$ and $\ell_0$.
Since the homoclinic class $H(P,f)$ is non-trivial, the set
$\Sigma_{\ell_0}$ of all saddles $Q$ of period larger than $\ell_0$
that are homoclinically related to $P$ is dense in $H(P,f)$.
Observe that there is a saddle $Q\in \Sigma_{\ell_0}$ such that there
is no $k_0$-dominated splitting of index $j$ over the orbit of
$Q$. Otherwise, by Lemma~\ref{l.dominatedclosure}, the closure of
the set $\Sigma_{\ell_0}$ (that is the whole class $H(P,f)$) would
have a $k_0$-dominated splitting of index $j$, which is a
contradiction.
Thus we can apply Lemma~\ref{l.bobo} to the linear maps
$Df(Q),\dots, Df^{\ell-1}(Q)$, $\ell=\pi(Q)\ge \ell_0$, obtaining
one-parameter families of linear maps $(A_{i,t})_{t\in [0,1]}$,
$i=0,\dots,\ell-1$, satisfying the conclusions of
Lemma~\ref{l.bobo}.
We now fix a neighborhood $V$ of the orbit $Q$ and compact sets
$K^s \subset W^s(Q,f)$ and
$K^u\subset W^u(Q,f)$ disjoint from $V$
as in Definition~\ref{d.adapted}. Since $Q$ is
homoclinically related to $P$ there are transverse intersections
$Y^s\in W^s(Q,f)\pitchfork W^u(P,f)$ and $Y^u\in
W^u(Q,f)\pitchfork W^s(P,f)$ and (small) compact disks
$\Delta^s\subset W^s(Q,f)$ and $\Delta^u\subset W^u(Q,f)$ (of the same
dimension as $W^s(Q,f)$ and $W^u(Q,f)$) containing the points
$Y^s$ and $Y^u$. We consider the compact sets
$$
\tilde K^s=K^s\cup \Delta^s
\quad \mbox{and} \quad
\tilde K^u=K^u\cup \Delta^u.
$$
We now apply Lemma~\ref{l.gourmelon} to $\varepsilon$,
$f$, the small path of
cocycles $(A_{n,t})$ above, and the compact sets $\tilde
K^s$ and $\tilde K^u$ to get an $\varepsilon$-perturbation $g$ of
$f$ along the orbit of $Q$ adapted to $H(P,f)$ and Property ${\mathfrak{P}}_{j,j+1}$:
\betagin{itemize}
\item
adapted to $H(P,f)$: By the choice of $\Delta^s$ and $\Delta^u$ the saddle $Q_g$ is
homoclinically related to $P_g$.
\item
adapted to
Property ${\mathfrak{P}}_{j,j+1}$: By item (6) in
Lemma~\ref{l.bobo} it holds
$\chi_j(B_1)=\chi_{j+1}(B_1)$,
by Remark~\ref{r.bobo} we have $\chi_m(B_1)\ne \chi_j(B_i)$ if $m\ne
j,j+1$,
and by item (7) all the eigenvalues of $B_1$ all are real.
\end{itemize}
This concludes the proof of the
proposition.
\end{proof}
\betagin{prop}\lambdabel{p.weak}
For every $K>1$, $\varepsilon>0$, and $d\in \NN$,
there is a constant $k_0$ with the following property.
Consider $\deltalta>0$, a diffeomorphism $f\in {\mbox{{\rm Diff}}^1}f$, $\dim (M)=d$, and a
hyperbolic periodic point $P$ of $f$ of $s$-index $i$ such that:
\betagin{itemize}
\item
the norms of
$Df$ and $Df^{-1}$ are bounded by $K$,
\item
$\chi_i(P)+\chi_{i+1}(P)>-\deltalta$,
\item
the homoclinic class $H(P,f)$ is non-trivial and has no $k_0$-dominated
splitting of index $i$.
\end{itemize}
Then there is a periodic point
$Q\in H(P,f)$ homoclinically related with $P$ and an $\varepsilon$-perturbation
$g$ of $f$ along the orbit of $Q$ that is adapted to
$H(P,f)$ and to the following property
\betagin{equation}
\lambdabel{e.pid}
{\mathfrak{P}}_{i,\delta}\eqdef
\text{The $i$-th Lyapunov exponent of $Q$ satisfies $\chi_i(Q)\in
(-\delta,0)$}.
\end{equation}
\end{prop}
\betagin{proof} The strategy of the proof is analogous to the one of
Proposition~\ref{p.gbobo}, so we will skip some repetitions.
As in the proof of Proposition~\ref{p.gbobo} we consider
constants $k_0$ and $\ell_0$ associated to $K$, $\varepsilon$, and $d$.
Since the homoclinic class $H(P,f)$ has no dominated splitting of
index $i$, there is a locally maximal transitive hyperbolic subset
$L$ of $H(P,f)$ containing $P$ and having no $k_0$-dominated
splitting of index $i$. We can also assume that for every $f'$
close to $f$ the continuation $L_{f'}$ of $L$ has no such a
$k_0$-dominated splitting.
We choose $f'$ in the residual subset
$\cG$ of ${\mbox{{\rm Diff}}^1}f$ in Lemma~\ref{l.homocliniclyapunov}. Then there is a
periodic point $Q_{f'}\in L_{f'}$ such that $\chi_i(Q_{f'})+
\chi_{i+1}(Q_{f'})>-\delta$ and whose orbit has no $k_0$-dominated
splitting of index $i$. Otherwise, by Lemma~\ref{l.dominatedclosure},
the set $L_{f'}$ has a $k_0$-dominated splitting.
Consider the point $Q=Q_f$. We take a first small path of
hyperbolic cocycles $(\bar A_{n,t})_{t\in [0,1]}$,
$n=0,\dots,\ell-1$, $\ell=\pi(Q)$, over the orbit of $Q$ joining the
derivatives $Df$ and $Df'$. Note that, by definition, the cocycle $(\bar A_{n,1})$
does not have a $k_0$-dominated splitting and
the Lyapunov exponents of $\bar B_1= \bar A_{\ell-1,1}\circ \cdots
\circ \bar A_{0,1}$ satisfy $\chi_i(\bar B_1)+ \chi_{i+1}(\bar
B_1)>-\delta$.
Observe that if $\chi_i(\bar B_1)>-\delta$ we are done. Otherwise we
apply Lemma~\ref{l.bobo} to the cocycle $\bar A_{n,1}$,
$n=0,\dots, \ell-1$, and $j=i$. This provides new families of
linear maps $(\tilde A_{n,t})_{t\in [0,1]}$, $n=0,\dots,\ell-1$,
satisfying the conclusions of the lemma. Define the composition
$\tilde B_t$ as above. Let
$$
\tau \eqdef \chi_i(\tilde B_t)+\chi_{i+1}(\tilde B_t)>-\delta.
$$
Note that by item (4) of Lemma~\ref{l.bobo} this number does not depend on $t$.
By item (6) in Lemma~\ref{l.bobo}, there is some $t_0$ such that
$$
\chi_i(\tilde B_{t_0})=\min \left(\frac{\tau -\delta}2,\frac {-\deltalta} 2 \right).
$$
As the map
$\chi_i(\tilde B_{t})$ is non-decreasing (recall item (5) in
Lemma~\ref{l.bobo}) we have $\chi_i(\tilde B_{t})\le
\frac{-\delta}2<0$ for all $t\in [0,t_0]$.
Also
$$ \chi_{i+1}(\tilde B_t)\ge \tau - \min\left(\frac{\tau -\delta}2,\frac {-\deltalta} 2\right)
\ge \frac{\tau+\deltalta} 2+\frac{\max(0,\tau)}{2}>0.$$
Therefore $(\tilde A_{n,t})_{n,t\in[0,t_0]}$ is a path of hyperbolic cocycles.
We next consider the concatenation of the paths of hyperbolic cocycles
$(\bar A_{n,t})_{t\in [0,1]}$ and $(\tilde A_{n,t})_{t\in
[0,t_0]}$.
The end of the proof is the same as the one of
Proposition~\ref{p.gbobo} and involves the definition of the sets
$\tilde K^s$ and $\tilde K^u$.
We apply Lemma~\ref{l.gourmelon} to get an $\varepsilon$-perturbation $g$ of $f$
along the orbit of $Q$ that is adapted to $H(P,f)$
and to property ${\mathfrak{P}}_{i,\delta}$, since by construction
$$ \chi_{i}(Q_g)=\chi_{i}(\tilde B_{t_0})=-\deltalta+
\min\left(\frac{\tau+\delta}{2},\frac \deltalta 2\right)> -\delta.$$
This ends the proof of the proposition.
\end{proof}
\section{``Robustizing" lack of domination} \lambdabel{s.robustizing}
In this section we analyze the existence of dominated splittings
for homoclinic classes. In some cases these splittings will have
several bundles.
\betagin{defi}[Dominated splittings II]
\lambdabel{d.severalbundles} Let $\Lambda$ be an invariant set of a
diffeomorphism $f$.
A $Df$-invariant splitting $E_1\oplus \cdots \oplus E_s$, $s\ge 2$, over the set
$\Lambda$ is dominated if for all
$j\in \{1,\dots,s-1\}$ the splitting $E_1^j\oplus E_{j+1}^s$ is
dominated, where $E_1^j=E_1\oplus \cdots \oplus E_j$ and
$E_{j+1}^s=E_{j+1}\oplus \cdots \oplus E_s$.
As in the case of two bundles, the splitting is
\emph{$k$-dominated} if the splittings $E_1^j\oplus E_{j+1}^k$
are $k$-dominated for all $j$.
There are analogous definitions for cocycles.
\end{defi}
Note that if there is a saddle $Q$ homoclinically related to $P$
such that $\chi_j(Q)=\chi_{j+1}(Q)$ then the class has no
dominated splitting of index $j$. Moreover, if
$$
\chi_{j-1}(Q)< \chi_j(Q)=\chi_{j+1}(Q)<\chi_{j+2}(Q) \quad
\mbox{and} \quad \lambda_j(Q), \lambda_{j+1}(Q)\in ({\mathbb C}\setminus \RR)
$$
then the lack of domination of the homoclinic class
is $C^1$-robust.
In this section we study when the converse holds (up to
perturbations).
A saddle $Q$ of a diffeomorphism $f$ satisfies
property ${\mathfrak{P}}_{j,j+1,\CC}$ if
\betagin{equation} \lambdabel{e.complex}
{\mathfrak{P}}_{j,j+1,\CC}\eqdef
\betagin{cases}
&\mbox{\textbf{(i)} $\chi_j(Q)=\chi_{j+1}(Q)$,}\\
&\mbox{\textbf{(ii)} $\chi_m(Q)\neq
\chi_j(Q)$ for all $m\neq j,j+1$,}\\
&\mbox{\textbf{(iii)} $\lambda_j(Q)$ and $\lambda_{j+1}(Q)$ are non-real.}
\end{cases}
\end{equation}
The main technical step of our constructions is the next
proposition whose proof is postponed to the next section.
It immediately implies Theorem \ref{t.complex}.
\betagin{prop}\lambdabel{p.complex}
For any $K>1$, $\varepsilon>0$, and $d\in \NN$, there is
a constant $k_0$ with the following property.
Consider a diffeomorphism $f\in {\mbox{{\rm Diff}}^1}f$, $\dim M=d$, such that the norms of
$Df$ and $Df^{-1}$ are bounded by $K$, a hyperbolic periodic
point $P$ of $s$-index $i$, and an integer $j\in \{1,\dots,d-1\}$, $j\ne i$.
Assume that the homoclinic class $H(P,f)$ is non trivial and has
no $k_{0}$-dominated splitting of index $j$.
Then there is a periodic point $Q$ that is homoclinically related with $P$ and an
$\varepsilon$-perturbation of $f$ along the orbit of $Q$ that is
adapted to $H(P,f)$ and property ${\mathfrak{P}}_{j,j+1,\CC}$.
\end{prop}
\betagin{rema}\lambdabel{r.complex}
The proof of the proposition provides a point $Q$ with arbitrarily
large period. In particular, there exist infinitely many periodic
points $Q$ satisfying the conclusion of the proposition.
\end{rema}
We postpone the proof of this proposition to
Section~\ref{s.proofof}. We now deduce from it
Corollaries~\ref{c.1} and \ref{c.noname} below.
\betagin{corol}\lambdabel{c.1}
For any $K>1$, $\varepsilon>0$, and $d\in \NN$, there is
a constant $k_0$ with the following property.
Consider a diffeomorphism $f\in {\mbox{{\rm Diff}}^1}f$, $\dim M=d$, such that the norms of
$Df$ and $Df^{-1}$ are bounded by $K$, a homoclinic class $H(P,f)$ of $f$,
and integers $0<j_1<\dots<j_\ell<d$ that are different from the
$s$-index of $P$ and such that
there is no $k_0$-dominated splitting of index $j_k$ over $H(P,f)$ for every
$k\in \{1,\dots,\ell\}$.
Then there exists an $\varepsilon$-perturbation $g$ of $f$ supported in a small
neighborhood of $H(P,f)$ such that for
each $k\in \{1,\dots,\ell\}$ there exists a periodic point
$Q_{k,g}$ of $g$ homoclinically related to $P_g$ satisfying property
${\mathfrak{P}}_{j_k,j_k+1,\CC}$ in equation~\eqref{e.complex}.
In particular, for every diffeomorphism $\bar g$ close to $g$ and for
every $k\in \{1,\dots,\ell\}$ there is no dominated splitting of
index $j_k$ over $H(P_{\bar g},\bar g)$.
\end{corol}
\betagin{proof} By
Proposition~\ref{p.complex}, for each index $j_k$ there is a
periodic point $Q_k$ homoclinically related to $P$ and $\varepsilon$-perturbations of $f$ along the orbit of $Q_k$ that are
adapted to $H(P,f)$ and to property ${\mathfrak{P}}_{j_k,j_k+1,\CC}$. For
each saddle $Q_k$ consider a pair of transverse heteroclinic
points
$$
Y_k^s \in W^s(Q_k,f)\pitchfork W^u(P,f) \quad \mbox{and} \quad
Y_k^u \in W^u(Q_k,f)\pitchfork W^s(P,f).
$$
For each $k$ we also fix compact disks
$$
K_k^s \subset W^s(Q_k,f) \quad \mbox{and} \quad K_k^u \subset
W^u(Q_k,f)
$$
of the same dimensions as $W^s(Q_k,f)$ and $W^u(Q_k,f)$ containing
$Y_k^s$ and $Y_k^u$ in their interiors. By Remark~\ref{r.complex},
we can assume that the orbits of the saddles $Q_k$ are different.
Thus there are small neighborhoods $V_1,\dots, V_\ell$ of these orbits whose
closures are pairwise disjoint and such that for each $k\ne k'$
the orbits of $Y_k^s$ and $Y_k^u$ do not intersect $V_{k'}$. Thus
taking the disks $K^s_k$ and $K_k^u$ small enough, we can assume
that this also holds for the forward orbit of $K_k^s$ and the
backward orbit of $K_k^u$.
For each $k$ we get an adapted $\varepsilon$-perturbation $g_k$ supported
in $V_k$ (and associated to the compact sets $K^s_k$ and $K^u_k$).
Since the supports of these perturbations are disjoint, we can
perform all them simultaneously obtaining a diffeomorphism $g$
that is $\varepsilon$-close to $f$ and has saddles $Q_{k,g}$ satisfying
${\mathfrak{P}}_{j_k,j_k+1,\CC}$, $j=1,\dots,\ell$.
It remains to check that these saddles are homoclinically related
to $P_g$. Observe that for each $k$ the points $Y_k^s$ and $Y_k^u$
are transverse heteroclinic points (associated to $Q_k$ and $P$)
for $g_k$. The choices of the orbits of these heteroclinic points
and of the sets $V_j$ imply that $Y_k^s$ and $Y_k^u$ are also
transverse heteroclinic points (associated to $Q_k$ and $P$) for
$g$ (in fact, the orbits of the points $Y_k^s$ and $Y_k^u$ are the
same for $g_k$ and $g$). This completes the proof of the
corollary.
\end{proof}
We also get the following genericity result.
\betagin{corol} \lambdabel{c.noname}
There exists a residual subset $\cG$ of ${\mbox{{\rm Diff}}^1}f$ such that every
diffeomorphism $f\in \cG$ satisfies the following property:
For every $i,j\in\{1,\dots,d-1\}$, $i\ne j$, and for every
periodic point $P$ of $s$-index $i$ of $f$ such that
there is no
dominated splitting of index $j$ over $H(P,f)$ there exists a
periodic point $Q$ homoclinically related to $P$ satisfying
property ${\mathfrak{P}}_{j,j+1,\CC}$.
\end{corol}
The corollary follows from standard genericity arguments after
noting that for a homoclinic class $H(P,f)$ to have a saddle $Q$
homoclinically related to $P$ satisfying property
${\mathfrak{P}}_{j,j+1,\CC}$ is an open condition.
We are now ready to prove Corollary~\ref{c.complexb}.
\betagin{proof}[Proof of Corollary~\ref{c.complexb}]
The residual subset $\cG$ in Corollary~\ref{c.noname} can be
chosen with the following additional property, see \cite{BC:04}.
For every $f\in\cG$ and for every pair of hyperbolic periodic points
$P$ and $Q$ of $f\in \cG$ that are in the same chain recurrent class the following holds
\betagin{itemize}
\item
the homoclinic classes of $P$ and $Q$ are equal and
\item
there is a neighborhood $\cU$ of $f$ such that for all $g\in \cU$
the chain recurrence classes of $P_g$ and $Q_g$ are equal.
\end{itemize}
Now it is enough to consider a point $Q\in H(P,f)$ of $s$-index
different from the one of $P$ and to apply
Corollary~\ref{c.noname} to $P$ (if $j$ is different to the index
of $P$) or to $Q$ (otherwise).
\end{proof}
\noindent{\emph{Comment.}} We wonder if in the conclusion
of Corollary~\ref{c.complexb} it is possible to consider
homoclinic classes instead of chain recurrence classes. One
difficulty is that in general one may have two hyperbolic periodic
points with different stable index that are robustly in the same
chain recurrence class but whose homoclinic classes do not
coincide robustly. More precisely:
\betagin{ques}
\lambdabel{q.=} Consider an open set $\,\cU$ of ${\mbox{{\rm Diff}}^1}f$ and two
hyperbolic saddles $P_f$ and $Q_f$ whose continuations are defined for all $f\in
\cU$, have different stable indices, and whose chain recurrence
classes coincide for all $f\in \cU$.
Does there exist an open and dense subset $\cV$ of $\cU$ such that
for any $f\in \cV$ one has $Q_f\in H(P_f,f)$? Or even more, $H(P_f,f)=H(Q_f,f)$?
\end{ques}
By \cite{BC:04} the answer to this question is affirmative when
the saddles have the same index. It is also true when the chain
recurrence class is partially hyperbolic with a central direction
that splits into one-dimensional central directions. This
follows using quite standard arguments and we will provide the details
of this construction in a forthcoming note.
\section{Obtaining non-real multipliers: Proof of Proposition~\ref{p.complex}} \lambdabel{s.proofof}
In this section we prove Proposition~\ref{p.complex}. This
proposition follows from the next lemma:
\betagin{lemm}\lambdabel{l.pcomplex}
Consider a homoclinic class $H(P,f)$ and $j\in \NN$ satisfying the
hypothesis of Proposition~\ref{p.complex}. Then there are a hyperbolic
periodic point $Q$ homoclinically related to $P$ and path
of cocycles $(A_{i,t})_{t\in [0,1]}$, $0\leq i<\ell$ and $\ell=\pi(Q)$,
over the orbit of $Q$ that
are $\varepsilon$-perturbations of $Df(f^i(Q))$ and satisfy the following
properties:
\betagin{enumerate}
\item[{\bf{(A)}}]
the composition $B_t=A_{\ell-1,t}\circ \cdots \circ A_{0,t}$ is
hyperbolic for all $t\in [0,1]$,
\item[{\bf{(B)}}]
$A_{i,0}=Df(f^i(Q))$ for all $i=0,\dots \ell-1$, and
\item[{\bf{(C)}}]
the multipliers $\lambda_m$ and the exponents of $\chi_m$ of the
composition $B_1$ satisfy the conclusions in Proposition~~\ref{p.complex}:
$$
\chi_j=\chi_{j+1}, \qquad \chi_m\neq \chi_j \quad \mbox{if $m\neq
j,j+1$}, \qquad \lambda_j, \lambda_{j+1}\not\in \RR.
$$
\end{enumerate}
\end{lemm}
We briefly introduce some formalism that we will use only in this section.
Consider a set $\Sigmagma$ and a bijection $g\colon \Sigmagma \to \Sigmagma$.
Let $E$ be a vector bundle over the
base $\Sigmagma$ such that its fibers $E_x$, $x\in\Sigmagma$, are endowed with an Euclidean metric.
A {\emph{linear cocycle}} on $E$ over $g$ is a map
${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}\colon E \to E$ that sends each fiber $E_x$ to a fiber $E_{g(x)}$ by a linear isomorphism ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{x}$.
The map $g$ is called the {\emph{base transformation}} of the cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}$.
The {\em distance} between two linear cocycles ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}$ and ${\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}$ above the same base transformation $g$ is
$$
{\operatorname{dist}}({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S},{\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T})=\sup_{x\in \Sigmagma}\{\|{\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{x}-{\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{x}\|,\|{({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{x}})^{-1}-{({\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{x})}^{-1}\|\}.
$$
A {\emph{path of cocycles}} defined on the bundle $E$ is a one-parameter family of cocycles $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t)_{t\in[0,1]}$ above the same base transformation $g$
such that the map $t\mapsto {\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t$ is continuous for the metric above.
The {\emph{radius}} of the path $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t)_{t\in[0,1]}$ is defined by
$$
\max_{t\in [0,1]}
{\operatorname{dist}}({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_0,{\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t).
$$
Here we only deal with continuous cocycles (for the ambient topology of $E$)
whose base transformations are diffeomorphisms or restrictions of diffeomorphisms to invariant subsets of the ambient.
Finally, hyperbolicity and domination of cocycles are defined in the natural way, see for example
Definition~\ref{d.severalbundles}.
We will deduce Lemma~\ref{l.pcomplex} from the following result:
\betagin{lemm}\lambdabel{l.pathpcomplex}
Consider a homoclinic class $H(P,f)$ and $j\in \NN$ satisfying the
hypothesis of Proposition~\ref{p.complex}. Then there is an arbitrarily small path of
continuous cocycles $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t)_{t\in[0,1]}$ on $TM$
above the diffeomorphism $f$,
a point $\bar Q$ homoclinically related to $P$,
and a horseshoe $K$ containing $\bar Q$
such that:
\betagin{itemize}
\item ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_0$ coincides with $Df$,
\item the cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t$ restricted to $T_KM$ is hyperbolic, for all $t\in[0,1]$,
\item the cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1$ restricted to $T_KM$
has a dominated splitting
$$
T_K M=E\oplus E^{j,j+1}\oplus F
$$
such that $E$ has dimension $j-1$ and $E^{j,j+1}$ has dimension $2$,
\item
the cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1$ restricted
to the (periodic) orbit of $\bar Q$ by $f$
does not admit any dominated splitting over $E^{j,j+1}$.
\end{itemize}
\end{lemm}
Here a small path, means path of small radius.
\betagin{proof}[Proof of Lemma~\ref{l.pathpcomplex}]
Observe first that arguing as in the previous propositions we
just get a periodic point $\bar Q$ homoclinically related to $P$
and a small path of hyperbolic cocycles $(\bar A_{i,t})_{t\in[0,1]}$,
$0\leq i<\pi(\bar Q)$, defined over the orbit of $\bar Q$ such that
the Lyapunov exponents of the final composition $\bar B_1$
are real and
$\chi_j(\bar B_1)$ and $\chi_{j+1}(\bar
B_1)$ are equal, see
Proposition~\ref{p.gbobo}. Moreover, by Remark~\ref{r.bobo}, we
can assume that, for all $m\ne j,j+1$, the $m$-th exponent
$\chi_m(\bar B_1)$ is different from $\chi_j(\bar B_1)=\chi_{j+1}
(\bar B_1)$ for all $m\ne j,j+1$.
Note that if the multipliers $\lambdambda_j(\bar B_1)$ and $\lambdambda_{j+1}(\bar B_1)$
are equal then one can make
them non-real and conjugate by an arbitrarily small perturbation.
However they might have opposite signs, which is why Lemma~\ref{l.pcomplex} is not obvious.
We now go to the details of the proof of the lemma.
The path ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t$ of cocycles is obtained as a concatenation of the following three paths.
First fix a transverse homoclinic point $X$ for $\bar Q$ and let
$$
\Lambdambda=\{f^n(\bar Q), 0\leq n <\pi(\bar Q)\} \cup \{f^n(X), n\in \ZZ\}.
$$
The compact invariant set $\Lambdambda$ is hyperbolic for the cocycle $Df$.
\betagin{itemize}
\item The first path ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[1]}$ ``linearizes" the dynamics around $\bar Q$.
\item The second path ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[2]}$ is a path of cocycles on $TM$ that extends the path $(\bar A_{i,t})_{t\in[0,1]}$ of cocycles over the orbit of $\bar Q$ introduced above
in such a way that the set $\Lambdambda$ is a hyperbolic set for all $t$.
\item The third path ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[3]}$ provides a cocycle having the required dominated splitting over a horseshoe containing the set $\Lambdambda$.
\end{itemize}
For simplicity of notations, we will assume that $\bar Q$ is a fixed point for $f$ (the argument is identical in the general case).
Thus we write $(\bar A_{t})_{t\in[0,1]}$ instead of $(\bar A_{i,t})_{t\in[0,1]}$.
In what follows, the path of cocycles $(\bar A_{t})_{t\in[0,1]}$ becomes a path of matrices of $GL(d,\RR)$.
\noindent \textbf{(I) The first path of cocycles ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[1]}$.}
Fix a chart around the point $\bar Q$ so that for any $x$ in a neighborhood $V$ of the orbit of $\bar Q$, we can identify the derivative $Df$ (or any neighboring cocycle) at $x$ to a matrix of $GL(d,\RR)$.
\betagin{clai}\lambdabel{c.path1}
There is an arbitrarily small path $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[1]})_{t\in [0,1]}$ of continuous cocycles on $TM$ above $f$
and a neighborhood $W\subset V$ of $\bar Q$ such that
\betagin{itemize}
\item
${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_0^{[1]}=Df$,
\item by considering the restriction to the fiber of each point $x\in W$, the cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1^{[1]}$ is identified to the derivative of $f$ at $\bar Q$,
\item the set $\Lambdambda$ is hyperbolic for all the cocycles ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[1]}$.
\end{itemize}
\end{clai}
\betagin{proof}
By a unit partition, we build a cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1^{[1]}$ satisfying the second item of the claim, for some small neighborhood $W$ of $\bar Q$.
This cocycle can be chosen arbitrarily close to $Df$ (just take $W$ small enough). On each fiber of $TM$,
take for the matrix of ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[1]}$ the $(1-t,t)$-barycenter of the matrices of $Df$ and ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1^{[1]}$.
Since the set $\Lambdambda$ is hyperbolic for $Df$, it will also be for all the cocycles ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[1]}$, provided we chose ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1^{[1]}$ close enough to $Df$.
\end{proof}
\noindent \textbf{(II) The second path of cocycles ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[2]}$.}
Fix a neighborhood $W$ of $\bar Q$ and a path $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[1]})_{t\in [0,1]}$ as in Claim~\ref{c.path1}.
\betagin{clai}\lambdabel{c.path2}
There is a path $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[2]})_{t\in [0,1]}$ of continuous cocycles on $TM$ above $f$ such that:
\betagin{itemize}
\item
${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_0^{[2]}={\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1^{[1]}$,
\item its radius is arbitrarily close to that of $\left(\bar A_t\right)_{t\in[0,1]}$,
\item ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{1}^{[2]}$ coincides with $\bar A_{1}$ over $\bar Q$,
\item for all $t\in [0,1]$, the set $\Lambdambda$ is hyperbolic for the cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[2]}$.
\end{itemize}
\end{clai}
\betagin{proof}
For all $t\in [0,1]$, denote by $E^u_t$ and $E^s_t$ the stable and unstable directions of the hyperbolic point $\bar Q$
for the cocycle $\bar A_t$. These directions vary continuously with $t$.
Hence given any $\varepsilonilon>0$ there exists a sequence $0=t_0<....<t_N=1$
of times such that, for all $0\leq n < N$, there is a path of linear maps $\theta_{n,t}\in GL(d,\RR)$, with $\theta_{n,t_n}=Id$, and for all $t_n\leq t\leq t_{n+1}$:
\betagin{itemize}
\item $\theta_{n,t}$ is $\varepsilonilon$-close to identity,
\item $\theta_{n,t}(E^u_{t_n})=E^u_t$ and $\theta_{n,t}(E^s_{t_n})=E^s_t$.
\end{itemize}
Consider
$n_0\in \NN$ such that $f^{\pm n}(X)\in W$, for all $n\geq n_0$.
First, we define the cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^{[2]}_{t}$ over the segment of orbit $\{f^n(X)\}_{n \ge 0}$ and
for all $t\in[0,1]$. We denote by ${\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{n,t}$ the linear map corresponding to the cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^{[2]}_{t}$ over the point $f^n(X)$. For all $t_n\leq t\leq t_{n+1}$, define ${\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{n,t}$ as follows:
\betagin{itemize}
\item${\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{k,t}$ coincides with ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{1}^{[1]}$ at $f^n(X)$, if $0\leq k < n_0$,
\item${\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{n_0+k,t}=\bar A_{t_k}\circ \theta_{k,t_{k+1}}$, if $k<n$,
\item${\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{n_0+n,t}=\bar A_t\circ\theta_{n,t}$,
\item${\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{n_0+k,t}=\bar A_t$, if $k>n$.
\end{itemize}
Recall that the set $\Lambdambda$ is hyperbolic for ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^{[1]}_1$. Let $E^s_X$ and $E^u_X$ be the stable and unstable directions at $X$ for the cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^{[1]}_1$.
By construction, for all $k\geq n$, the composition ${\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{k,t}\circ ...\circ {\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{0,t}$ maps $E^s_X$ (resp. $E^u_X$) into a
direction corresponding to the stable (resp. unstable) direction of $\bar A_t$.
We define ${\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{n,t}$ symmetrically for the backward orbit $\{f^n(X)\}_{n \le 0}$ of $X$.
Let ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{t,\Lambdambda}^{[2]}$ be the linear cocycle on $T_{\Lambdambda}M$ given by the linear
maps ${\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{n,t}$ over the orbit of $X$ and by the matrix $\bar A_t$ over the point $\bar Q$.
Then the bundles $E^s_X$ and $E^u_X$ are uniformly contracted and uniformly expanded,
respectively, by positive iterations of ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{t,\Lambdambda}^{[2]}$, and conversely by negative iterations.
Hence, the orbits of the bundles $E^s_X$ and $E^u_X$ provide a hyperbolic splitting for ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{t, \Lambdambda}^{[2]}$ over $\Lambdambda$: the cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{t, \Lambdambda}^{[2]}$ is hyperbolic.
Besides, by construction, the family $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{t,\Lambdambda}^{[2]})_{t\in[0,1]}$ is a path
of continuous cocycles starting at the restriction of ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1^{[1]}$ to the set $\Lambdambda$.
The radius of this path can be found close to the radius of $\left(\bar A_t\right)_{t\in[0,1]}$: just take $\varepsilonilon>0$ small enough.
Now, all we need to do is to extend the path ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{t,\Lambdambda}^{[2]}$ of cocycles above the restriction of $f$ to $\Lambdambda$ to
a small path $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[2]})_{t\in [0,1]}$ of continuous cocycles above $f$ starting at ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1^{[1]}$.
Note that, for all $n>n_0+N$, the matrix of ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{t,\Lambdambda}^{[2]}$ is $\bar A_t$
at the iterate $f^{\pm n}(X)$. So is it also at $\bar Q$. Fix a small neighborhood $U_{\bar Q}\subset M$ of $\bar Q$ and these iterates. Fix a small neighborhood $U_n$ for each other iterate $f^n(X)$. Do this such that we have a disjoint union
$$
U=U_{\bar Q}\cup \bigcup_{-n_0-N}^{n_0+N} U_{n}.
$$
Let $1=\phi+\psi$ be a unit partition on $M$ such that $\phi=1$ on $\Lambdambda$ and $\phi=0$ outside of the set $U$. Let ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[2]}$ be the cocycle above $f$ whose matrix on the fiber $T_xM$ is the $(\phi(x),\psi(x))$-barycenter of the 2 following two matrices:
\betagin{itemize}
\item the matrix of ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1^{[1]}$ at $x$,
\item $\betagin{cases}
\mbox{the matrix of ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{t,\Lambdambda}^{[2]}$ at $f^n(X)$, if $x\in U_n$},\\
\mbox{the matrix $\bar A_t$ of ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{t,\Lambdambda}^{[2]}$ at $\bar Q$, if $x\in U_{\bar Q}$}.
\end{cases}$
\end{itemize}
The cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[2]}$ does restrict to $\Lambdambda$ as ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{t,\Lambdambda}^{[2]}$. Choosing the neighborhood $U$ of $\Lambdambda$ small enough, one finds the radius of $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[2]})_{t\in [0,1]}$ as close as wished to the radius of
$({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{t,\Lambdambda}^{[2]})_{t\in [0,1]}$, hence as close as wished to the radius of $\left(\bar A_t\right)_{t\in[0,1]}$.
This ends the proof of the claim.
\end{proof}
\noindent \textbf{(III) The third path of cocycles ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[3]}$.}
We fix paths ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[1]}$ and ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[2]}$ as in Claims~\ref{c.path1} and~\ref{c.path2}.
\betagin{clai}\lambdabel{c.path3}
There is an arbitrarily small path of cocycles $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[3]})_{t\in [0,1]}$ defined on $TM$ above the diffeomorphism $f$ such that:
\betagin{itemize}
\item
${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{0}^{[3]}={\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{1}^{[2]}$,
\item ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{1}^{[3]}$ coincides with $\bar A_{1}$ at $\bar Q$,
\item ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{1}^{[3]}$ admits, over the set $\Lambdambda$, a dominated splitting of the form
$$
T_{\Lambdambda}M=E\oplus E^{j,j+1}\oplus F
$$
such that $E$ has dimension $j-1$, and $E^{j,j+1}$ has dimension $2$,
\item for all $t\in[0,1]$, the cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[3]}$ is hyperbolic over the set $\Lambdambda$.
\end{itemize}
\end{clai}
\betagin{proof}
Since ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{1}^{[2]}$ is equal to $\bar A_1$ at $\bar Q$, recalling the properties of the exponents of $\bar A_1$,
we have that there is a dominated splitting $T_{\bar Q}M=E\oplus E^{j,j+1}\oplus F$ with the required dimensions and such that $E^{j,j+1}$
is either uniformly contracted or uniformly expanded by ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{1}^{[2]}$.
We need to extend this splitting to the whole orbit of $X$.
Observe that
there are $(j-1)$ and $(j+1)$-dimensional spaces $E_X$ and $\tilde{E}_X$ at the point $X$ such that their positive iterations by ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1^{[2]}$
converge to $E$ and $\tilde{E}=E\oplus E^{j,j+1}$, respectively. Symmetrically, there are $(j-1)$ and $(j+1)$-co\-di\-men\-sio\-nal spaces $\tilde{F}_X$
and $F_X$ whose negative iterations by ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1^{[2]}$ converge to $\tilde{F}=E^{j,j+1}\oplus F$ and $F$, respectively.
One can perturb slightly ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{1}^{[2]}$ at the point $X$ in order to make $\tilde{E}_X$ transverse to $F_X$
and $E_X$ transverse to $\tilde{F}_X$. Then the iterates of $\tilde{E}_X$
and $F_X$ by the perturbed cocycle along the orbit of $X$ extend
the dominated splitting $\tilde{E}\oplus F$ to the whole set $\Lambdambda$.
Symmetrically, we get an extension of the dominated splitting $E\oplus \tilde{F}$ to the set $\Lambdambda$.
Taking $ E^{j,j+1}=\tilde E \cap \tilde F$ we get
the dominated splitting $E\oplus E^{j,j+1}\oplus F$ over $\Lambdambda$ for that perturbed cocycle.
That perturbation of ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{1}^{[2]}$ may be reached by an arbitrarily small path of cocycles
$({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{t}^{[3]})_{t\in [0,1]}$ on $TM$ such that ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_{1}^{[3]}$ coincides with $\bar A_{1}$ at $\bar Q$.
In particular, it can be chosen so that ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t^{[3]}$ is hyperbolic over $\Lambdambda$ for all $t$.
\end{proof}
\noindent \textbf{End of the proof of Lemma~\ref{l.pathpcomplex}}.
Define the path $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t)_{t\in [0,1]}$ as the concatenation of the
paths $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^{[1]}_t)_{t\in [0,1]}$, $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^{[2]}_t)_{t\in [0,1]}$, and $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^{[3]}_t)_{t\in [0,1]}$ given by Claims~\ref{c.path1}, \ref{c.path2}, and \ref{c.path3}.
By construction, the path $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t)_{t\in [0,1]}$ can be found having radius arbitrarily close to the radius of $(\bar A_t)_{t\in [0,1]}$.
Choosing $\bar Q$ conveniently, this last radius can be taken arbitrarily small.
Note that the diffeomorphism $f$
has horseshoes $K$ containing the set $\Lambdambda$ that are arbitrarily close to $\Lambdambda$ for the Hausdorff distance. Choosing
the horseshoe
$K$ Hausdorff-close enough to $\Lambdambda$, we have the following:
\betagin{itemize}
\item for all $t\in [0,1]$,
the cocycles ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t$ are continuous on $TM$ and hyperbolic over $\Lambdambda$. Thus, by a compactness argument on $t\in[0,1]$, the cocycles ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t$ are
also hyperbolic over $K$ for all $t\in[0,1]$.
\item
The dominated splitting $T_{\Lambdambda} M=E\oplus E^{j,j+1}\oplus F$ for ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1={\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1^{[3]}$ extends to $K$, see \cite[Appendix B]{BDV:04}.
\end{itemize}
All the conclusions of Lemma~\ref{l.pathpcomplex} are then satisfied. This ends its proof.
\end{proof}
\betagin{proof}[Proof of Lemma~\ref{l.pcomplex}]
Let $({\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t)_{t\in [0,1]}$, $\bar Q$, $K$, and $T_{K}M=E\oplus E^{j,j+1}\oplus F$ be as in Lemma~\ref{l.pathpcomplex}.
Consider a transverse homoclinic point $X$ of $\bar Q$, $X\in W^u_{\operatorname{loc}}
(\bar Q,f) \cap K$, and an iterate of it $f^r(X)\in W^s_{\operatorname{loc}}
(\bar Q,f)\cap K$. These two points can be chosen arbitrarily
close to $\bar Q$.
\betagin{figure}[htb]
\psfrag{Q}{$\bar Q$}
\psfrag{first}{first loop}
\psfrag{second}{second loop}
\psfrag{QQ}{$Q_n$}
\includegraphics[width=5.5cm]{loop2.eps}\hspace{1cm}
\caption{Two-loops orbits $Q_n$}
\lambdabel{f.orbit}
\end{figure}
We next consider periodic points $Q_n$ passing close to $X$ and having orbits with ``two loops".
For every large $n$ there is a
periodic point $Q_n\in K$ of period $2\,n+2+2\,r$ as follows (see
Figure~\ref{f.orbit}): Let $Q_n=Q_n^0$ and $Q_n^i=f^i(Q_n)$, where
\betagin{equation}
\lambdabel{e.Qn}
\betagin{split}
& \mbox{$\bullet\quad Q_n^0$ is close to $f^r(X)$ and $Q_n^0,\dots
,Q_n^n$ are
close to $\bar Q$,}\\
& \mbox{$\bullet\quad Q^{n+i}_n$ is close to $f^i(X)$ for all
$i=0,\dots,r$,}\\
& \mbox{$\bullet\quad Q_n^{n+r},\dots ,Q_n^{n+r+n+2}$ are close to
$\bar Q$,
and}\\
& \mbox{$\bullet\quad Q^{2\,n+r+2+i}_n$ is close to $f^i(X)$ for
all $i=0,\dots,r$.}
\end{split}
\end{equation}
\betagin{clai}\lambdabel{c.orientation}
For $n$ large enough, the linear cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1$ preserves the orientation of the central bundle $E^{j,j+1}$ at the periodic orbit of $Q_n$.
\end{clai}
\betagin{proof} Let ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^c_1$ be the restriction of ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1$ to the central bundle $E^{j,j+1}$.
Since the base $K$ of the $2$-dimensional bundle $E^{j,j+1}$ is a Cantor set,
there is a continuous identification between $E^{j,j+1}$ and $K\times \RR^2$.
Thus, for any $x\in K$, the restriction ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^c_{1,x}$ of ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^c_1$ to the fiber $T_xM$ identifies to a $2\times 2$ matrix.
By continuity, if the distance between a pair of points $x,y\in K$ is less than some $\eta>0$, then the determinants of the matrices
${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^c_{1,x}$ and ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^c_{1,y}$ have the same sign. One then easily checks that for $n$ great enough (when ``close" in (\ref{e.Qn}) means distance less than $\eta/2$), the composition of the matrices ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^c_{1,x}$ along the
(finite) entire orbit of $Q_n$ has positive determinant.
\end{proof}
If the multipliers $\lambdambda_j$ and $\lambdambda_{j+1}$ of the first return map of ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1$ at some $Q_n$ are complex,
then all the conclusions of Lemma~\ref{l.pcomplex} are satisfied by $Q=Q_n$ and the restriction $(A_{i,t})_{t\in[0,1]}$ of the path ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t$ to the orbit of $Q$.
Otherwise, by Claim~\ref{c.orientation},
these multipliers are real and have the same sign.
Recall that the linear cocyle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}^c_1$ admits no dominated splitting at the point $\bar Q$.
Since the orbits of $Q_n$ accumulate on $\bar Q$, then with increasing $n$ the strength of domination (if any)
of the splitting of the bundle $E^{i,j}$
along the orbit of $Q_n$
for the cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1$
will decrease.
We can now apply \cite[Proposition 3.1]{BDP:03}. This result claims that, for $n$ great enough, the
cocycle ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1$ can be perturbed along the
two-dimensional bundle $E_{j,j+1}$ and along the orbit of $Q_n$ to get a pair of non-real and
conjugate eigenvalues.
For $n$ great enough, that perturbation can be reached through a small path $({\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{t,n})_{t\in [0,1]}$ of cocycles over the orbit of $Q_n$.
If the perturbation is small enough then, for all $t$, the hyperbolicity and the domination of the splitting $E\oplus E^{i,j}\oplus F$ of ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_1$ over
the horseshoe
$K$ are preserved. Thus the conclusions of Lemma~\ref{l.pcomplex} are all satisfied for $Q=Q_n$ and the cocycle $(A_{i,t})_{t\in[0,1]}$ defined as the concatenation of
\betagin{itemize}
\item the restriction of the path ${\mathcal A}} \def\cG{{\mathcal G}} \def\cM{{\mathcal M}} \def\cS{{\mathcal S}_t$ to the orbit of $Q=Q_n$, and
\item the path ${\mathcal B}} \def\cH{{\mathcal H}} \def\cN{{\mathcal N}} \def\cT{{\mathcal T}_{t,n}$.
\end{itemize}
This concludes the proof of the Lemma~\ref{l.pcomplex}.
\end{proof}
\section{Formation of strong homoclinic connections}\lambdabel{s.formation}
We say that a saddle $P$ has a {\emph{strong homoclinic
intersection}} if there is a strong stable manifold of the orbit
of $P$ that intersects the unstable manifold of the orbit of $P$
or vice-versa. That is, let $i$ be the $s$-index of $P$, then either
$W^{ss}_k(P)\cap W^u(P)\ne \emptyset$ for some $k<i$ or
$W^{uu}_j(P)\cap W^s(P)\ne \emptyset$ for some $j< \dim (M)-i$
(recall the definitions of
$W^{ss}_k(P)$ and $W^{uu}_j(P)$ in Section~\ref{ss.basic}). In this
section, we see how the lack of domination of a homoclinic class
yields strong homoclinic intersections.
\betagin{prop}\lambdabel{p.strongconnection}
For every $K>1$, $\varepsilon>0$ and $d\geq 2$, there exists a constant $k_0$
with the following property.
Consider $f\in{\mbox{{\rm Diff}}^1}f$, $\dim (M)=d$, and a hyperbolic periodic point $P$
of $s$-index $i$, $i\in \{2,\dots, d-1\}$, such that $H(P,f)$ is
non-trivial and has no $k_0$-dominated splitting of index $i-1$. Then
there is a periodic point $Q$
homoclinically related to $P$ and an $\varepsilon$-perturbation of $f$
along the orbit of $Q$ that is adapted to $H(P,f)$ and to property
${\mathfrak{P}}_{ss}$ defined as follows
\betagin{equation}\lambdabel{e.pss}
{\mathfrak{P}}_{ss}\eqdef
\betagin{cases}
&\mbox{\textbf{(i)} $\chi_{i-1}(Q)<\chi_{i}(Q)$,}\\
&\mbox{\textbf{(ii)} $W^{ss}_{i-1}(Q)\cap W^u(Q)\ne\emptyset$}.
\end{cases}
\end{equation}
\end{prop}
\betagin{proof}
By Proposition~\ref{p.complex} there is
a hyperbolic periodic point $Q$ that is homoclinically
related to $P$ and an $\frac \varepsilon 2$-perturbation $f'$ of $f$
along the orbit of $Q$ that is adapted to $H(P,f)$ and to property
${\mathfrak{P}}_{i-1,i,\CC}$ (see equation \eqref{e.complex}).
This means that fixed small $\varrho>0$, a neighborhood $V$ of the
orbit of $Q$, and compact sets $K^s\subset W^s_\varrho(Q)$ and
$K^u\subset W^u_\varrho(Q)$ disjoint from $V$, there is a diffeomorphism
$f'$ that is $\frac \varepsilon 2$-close to $f$ such that
\betagin{enumerate}
\item\lambdabel{ipg1} $f'=f$ outside $V$ and along the $f$-orbit of
$Q$,
\item \lambdabel{ipg2} the points $P$ and $Q$ are homoclinically related for $f'$,
\item \lambdabel{ipg3} $K^s\subset W^s_\varrho(Q,f')$ and
$K^u \subset W^u_\varrho(Q,f')$, and
\item \lambdabel{ipg4} the saddle $Q_{f'}=Q$ satisfies property ${\mathfrak{P}}_{i-1,i,\CC}$.
\end{enumerate}
By Remark~\ref{r.complex}, the period of $Q$ can be chosen arbitrarily large.
Hence Proposition~\ref{p.bobo} provides a small path of hyperbolic cocycles
joining the restriction of $Df'$ over the orbit
of $Q$ and a cocycle with real multipliers. Applying
Lemma~\ref{l.gourmelon} to this cocycle and to $f'$ we get
an $\frac \varepsilon 2$-perturbation $f''$ of $f'$, such that
$Q=Q_{f''}$ has a pair of real multipliers $\lambda_{i-1}(Q)$ and
$\lambda_{i}(Q)$ such that $|\lambda_{i-1}(Q)|=|\lambda_{i}(Q)|$ and
$|\lambda_{i}(Q)|\ne |\lambda_{j}(Q)|$ for all $j\ne i,i-1$,
and
such that conditions (\ref{ipg1})--(\ref{ipg3}) also hold for
$f''$. Note that $f''$ is $\varepsilon$-close to $f$.
Consider now local coordinates around $Q$ such that
$$
W^s_{\operatorname{loc}} (Q,f'')= [-1,1]^i\times \{0^{d-i}\}\quad \mbox{and}\quad
W^u_{\operatorname{loc}} (Q,f'')= \{ 0^i\}\times [-1,1]^{d-i}.
$$
To conclude the proof of the proposition it is enough to get a
diffeomorphism $g$ arbitrarily $C^1$-close to $f''$ and a
small neighborhood $V_0\subset V$ of the orbit of $Q$ such that
\betagin{enumerate}
\item[{\bf (a)}]
$g=f''$ outside $V_0$ and along the $g$-orbit of $Q_g=Q$,
\item[{\bf (b)}]
$W^s_{\operatorname{loc}} (Q,g)= [-1,1]^i\times \{0^{d-i}\}$ and $W^u_{\operatorname{loc}} (Q,g)=
\{ 0^i\}\times [-1,1]^{d-i}$, and
\item[{\bf (c)}]
$Q_g$ satisfies property ${\mathfrak{P}}_{ss}$.
\end{enumerate}
This will be done in several steps. To simplify the presentation,
let us assume in the remainder steps of the proof that the period
of $Q$ is one.
\betagin{clai}\lambdabel{cl.linear}
There is an arbitrarily $C^1$-small perturbation $g'$ of $f''$
satisfying (a) and (b) and such that the restriction of $g'$ to a
small neighborhood of $Q$ in $W^s_{\operatorname{loc}}(Q,g')$ is linear.
Moreover, one has that $D g'(Q)=D f''(Q)$.
\end{clai}
This claim allows us to define a two dimensional locally invariant
center-stable manifold $W^{cs}_\tau (Q,g')$ of $Q$ tangent to the
space corresponding to the $(i-1)$-th and $i$-th multipliers of
$Q$. Up to a linear change of coordinates, we have
$$
W^{cs}_\tau (Q,g')= \{ 0^{i-2}\} \times [-\tau,\tau]^2\times
\{0^{d-i}\} \text{ and }
Df''(Q)(x^s,x^u)=(A^s,A^u).
$$
\betagin{proof}[Proof of Claim~\ref{cl.linear}]
Using
the coordinates $(x^s,x^u)$ corresponding to the stable and
unstable bundles, in a neighborhood of $Q$, we write
$$
f''(x^s,x^u)=(f^s(x^s,x^u), f^u(x^s,x^u)).
$$
By the invariance of the local stable and unstable manifolds we
have that $f^u(x^s,0)=0^u$ and $f^s(0^s,x^u)=0^s$.
Next step is to linearize the restriction of $f^s$ to the local
stable manifold. Consider a local perturbation $\tilde f^s$ of
$x^s \mapsto f^s(x^s,0^u)$ supported in a small neighborhood of
$0^s$ such that $\tilde f^s(x^s)=A^s(x^s)$ for small $x^s$.
Note that
$$
\tilde f^s(x^s)= f^s(x^s,0^u)+ h^s(x^s),
$$
where $h^s$ is $C^1$-close to the zero map and has support in a
small neighborhood of $0^s$.
Finally, we choose a bump-function $\psi(x^u)$ such that $\psi=1$
in a neighborhood of $0^u$, it is equal to $0$ outside another
small neighborhood of $0^u$, and it has small derivative. We
define $g'$ in a neighborhood of $(0^s,0^u)$ by
$$
g'(x^s,x^u)= \big( f^s(x^s,x^u) + h^s(x^s)\, \psi (x^u),
f^u(x^s,x^u) \big).
$$
By construction, the restriction of $g'$ to a small neighborhood
in the local stable manifold of $Q$ coincides with $\tilde
f^s=A^s$. Moreover, the local unstable manifold of $Q$ is also
preserved and $Dg'(Q)=Df''(Q)$. This completes the proof of the
claim.
\end{proof}
\betagin{clai}\lambdabel{cl.homoclinic}
There is an arbitrarily small $C^1$-perturbation $g''$ of $g'$
satisfying Claim~\ref{cl.linear} (in particular, (a) and (b)) and such
that there is a transverse homoclinic point of $Q$ in $F_{i-1}$, where $F_{i-1}$ is
a $D g''(Q)$-invariant one-dimensional linear
space corresponding to the Lyapunov exponent $\chi_{i-1}(Q)$.
\end{clai}
\betagin{proof}
Note first that as the homoclinic class of $Q$ is non-trivial
there is a transverse homoclinic point $Y$ of $Q$ that belongs to
the local stable manifold of $Q$ where the dynamics is linear.
Next two steps are quite standard. First, by a perturbation we can
assume that $Y\not\in W^{ss}_{i-2}(Q)$.
Second, after replacing the point $Y$ by some forward iterate of it and
after a new perturbation, we can assume that $Y$ belongs to the
$D g'(Q)$-invariant (central) two-dimensional linear
space $F$ corresponding to the Lyapunov exponents $\chi_{i-1}(Q)$ and
$\chi_{i}(Q)$. This follows noting that any stable non-zero vector of $Q$
that is not in the linear space corresponding to the
first $(i-2)$ Lyapunov exponents has normalized iterations
which approximate to $F$.
There are two cases according to the restriction of $g'$ to the
two dimensional space $F$.
\alphaskip
\noindent {\emph{ Case 1: the restriction of $g'$ to $F$ is a
homothety.}} In this case, the point $Y$ belongs to a
one-dimensional $Dg'(Q)$-invariant space and we are done.
\alphaskip
\noindent {\emph{ Case 2: the restriction of $g'$ to $F$ is
parabolic.}} In this case, the restriction of $Dg'(Q)$ to $F$ is
conjugate to a matrix of the form
$$ \left(
\betagin{matrix} \lambda_{i} & 1
\\ 0 & \lambda_i
\end{matrix} \right), \quad 0<|\lambda_i|<1.
$$
Then the normalized iterations of any non-zero vector in $F$
accumulate to the unique one-dimensional invariant sub-space
$F_{i-1}$ of $Dg'(Q)$ in $F$. As above, after a new perturbation
we can assume that there is some iterate of $Y$ in $F_{i-1}$
ending the proof of the claim.
\end{proof}
To conclude the proof of the proposition it is now enough to make
the Lyapunov exponent $\chi_{i-1}(Q)$ smaller than $\chi_i(Q)$ so
that the space $F_{i-1}$ is now locally contained in the strong
stable manifold of $Q$ of dimension $i-1$. To perform this final
perturbation we argue as in Claim~\ref{cl.linear}.
\end{proof}
\section{Homoclinic tangencies yielding heterodimensional cycles}
\lambdabel{s.proofofmain}
In this section we prove Theorem~\ref{t.main} and its alternative
version in item (ii) of Remark~\ref{r.mainb}. For that we need the
following two propositions.
\betagin{prop}\lambdabel{p.cycle}
Consider $f\in {\mbox{{\rm Diff}}^1}f$, $\dim (M)=d$, and a hyperbolic periodic point $P$ of $f$
of $s$-index $i\in \{2,\dots,d-1\}$ such that:
\betagin{enumerate}
\item[(i)]
for any $C^1$-neighborhood $\cU$ of $f$ there exist a hyperbolic periodic
point $R$ homoclinically related to $P$ and perturbations of $f$
in $\cU$ along the orbit of $R$ that are adapted to $H(P,f)$ and
to property ${\mathfrak{P}}_{ss}$,
\item[(ii)]
for any $C^1$-neighborhood $\cU$ of $f$ and any $\deltalta>0$ there
exist a hyperbolic periodic point $Q$ homoclinically related to $P$ and
perturbations of $f$ in $\cU$ along the orbit of $Q$ that are
adapted to $H(P,f)$ and to property ${\mathfrak{P}}_{i,\delta}$,
\end{enumerate}
Then there exists a diffeomorphism $g\in \cU$ arbitrarily
$C^1$-close to $f$ having a heterodimensional cycle
associated to $P_g$ and a saddle $S_g$ of $s$-index $i-1$.
\end{prop}
Recall that properties ${\mathfrak{P}}_{ss}$ and ${\mathfrak{P}}_{i,\delta}$, see
\eqref{e.pss} and \eqref{e.pid}, mean that the saddles $R$ and $Q$
satisfy
\[
\betagin{split}
& \chi_{i-1}(R)<\chi_{i}(R) \quad \mbox{and} \quad
W^{ss}_{i-1}(R)\cap W^u(R)\ne\emptyset,\\
& \chi_i(Q)\in (-\delta,0).
\end{split}
\]
\betagin{prop}\lambdabel{p.excfinal}
Consider $f\in {\mbox{{\rm Diff}}^1}f$, $\dim (M)=d$, having a hyperbolic periodic point $P$ of
$s$-index $i\in \{2,\dots,d-1\}$ such that:
\betagin{enumerate}
\item[{{(1)}}]
$H(P,f)$ is non trivial and has no dominated splitting of index
$i$,
\item[(2')]
there is a diffeomorphism $g$ arbitrarily $C^1$-close to $f$ with
a hyperbolic periodic point $R_g$ homoclinically related to $P_g$ satisfying
property ${\mathfrak{P}}_{ss}$,
and
\item[(3')]
for every $\delta>0$ there exists a hyperbolic periodic point $Q_\delta$
homoclinically related to $P$ such that
$\chi_i(Q_\delta)+\chi_{i+1}(Q_\delta)\ge -\delta$.
\end{enumerate}
Then, there exists a diffeomorphism $g$ arbitrarily $C^1$-close to
$f$ with a heterodimensional cycle associated to a
$P$ and to a saddle of $s$-index $i-1$.
\end{prop}
Note that item (1) in Proposition~\ref{p.excfinal}
corresponds exactly to the same item in Theorem~\ref{t.main},
items (2') and (3') are exactly items (2') and (3')
in Remark~\ref{r.mainb}.
Therefore Proposition~\ref{p.excfinal}
implies the conclusions in Remark~\ref{r.mainb}.
We postpone the proof of these propositions to
Sections~\ref{ss.pcycle} and \ref{ss.pexcfinal}.
Assuming these
propositions
we now prove
Theorem~\ref{t.main} and Corollary~\ref{c.main2}
\subsection{Proof of Theorem~\ref{t.main}}
\lambdabel{ss.proofoftmain}
Proposition~\ref{p.strongconnection}
and assumption (2) in the theorem imply that condition (i) in
Proposition~\ref{p.cycle} is satisfied.
Proposition~\ref{p.weak} and assumptions (1) and (3) in the
theorem imply that condition (ii) in Proposition~\ref{p.cycle}
is satisfied.
Proposition~\ref{p.cycle} now provides a diffeomorphism $g$ with a heterodimensional
cycle associated to $P_g$ and a saddle $S_g$ of $s$-index $i-1$.
By Lemma~\ref{l.bodiki}, we can assume that the diffeomorphism $g$ has
a pair of transitive hyperbolic sets $L_g$ and $K_g$ having a robust heterodimensional cycle,
where $L_g$ contains $P_g$ and $K_g$ contains a periodic point $R_g$ of stable index $i-1$.
We now explain how to improve the previous arguments to obtain
robust homoclinic tangencies.
Fix $\varepsilon>0$ and consider the integer $k_0$ associated to
$\varepsilon$ in Proposition \ref{p.complex}. Since $H(P,f)$ has
no dominated splittings of indices $i-1$ and $i$, there are $r>0$
and a neighborhood $\cU$ of $f$ such that for any $f'\in \cU$ and
any $f'$-invariant set $K$ having an $r$-neighborhood containing
$H(P,f)$ there is no $k_0$-dominated splitting over $K$.
We perform a first perturbation $g_0$ of $f$, $g_0\in \cU$, as
above, obtaining a robust heterodimensional cycle between two
transitive hyperbolic sets containing the saddles $P_{g_0}$ and
$R_{g_0}$. By~\cite{BC:04}, taking $g_0$ in a residual subset of
${\mbox{{\rm Diff}}^1}f$, we can assume that $H(P,g_{0})$ and $H(R_{g_0},g_0)$
coincide. In particular, these homoclinic classes are non-trivial
and their $r$-neighborhoods contain $H(P,g)$. Thus for every
diffeomorphism $h$ close to $g_0$, the homoclinic classes
$H(P_h,h)$ and $H(R_h,h)$ have no $k_0$-dominated splittings of
indices $i-1$ and $i$.
We now consider another small perturbation $g_1\in \cU$ of $g_0$
such that the saddles $P_{g_1}$ and $R_{g_1}$ have a
heterodimensional cycle.
Since the classes $H(P_{g_1},g_{1})$ and $H(R_{g_1},g_{1})$ have
no $k_0$-dominated splittings of indices $i-1$ and $i$,
Proposition \ref{p.complex} provides a pair of hyperbolic periodic
points $Q_{g_1}$ and $T_{g_1}$ homoclinically related to $P_{g_1}$
and $R_{g_1}$, respectively, and two ``independent" local
$\varepsilon$-perturbations $g_{Q}$ and $g_{T}$ of $g_1$ such that
\betagin{itemize}
\item
the supports of $g_{Q}$ and $g_{T}$ are disjoint and contained
in arbitrarily small neighborhoods of the orbits of $Q_{g_1}$ and
$T_{g_1}$, respectively,
\item
these perturbations preserve the heterodimensional cycle
associated to $P_{g_1}$ and $T_{g_1}$,
\item
the $i$-th and $(i-1)$-th multipliers of $Q_{g_1}$ for $g_{Q}$
and of $T_{g_1}$ for $g_{T}$ are non-real.
\end{itemize}
As the supports of the perturbations $g_{Q}$ and $g_{T}$ are
disjoint, combining these perturbations one gets a diffeomorphism
$g_2$ such that $P_{g_2}$ and $T_{g_2}$ have a heterodimensional
cycle and the classes $H(P_{g_2},g_2)$ and $H(T_{g_2},g_2)$
robustly have no dominated splittings of indices $i$ and $i-1$,
respectively.
By Lemma~\ref{l.bodiki}, one can perform a last perturbation $g$
so that $P_g\in K_g$ and $T_g\in L_g$ where $K_g$ and $L_g$ are
transitive hyperbolic sets having a robust heterodimensional
cycle. Finally, we choose $g$ in the residual subset of ${\mbox{{\rm Diff}}^1}f$
in \cite[Theorem 1]{BD:pre}, this choice implies that the sets
$K_g$ and $L_g$ have robust homoclinic tangencies. \hfil \qed
\subsection{Proof of Corollary~\ref{c.main2}}
We first recall that there is a residual subset $\cR$ of ${\mbox{{\rm Diff}}^1}f$
such that every homoclinic class $H(P,f)$ of $f\in\cR$ that does
not have any dominated splitting is the Hausdorff limit of sinks
or sources, see \cite[Corollary 0.3]{BDP:03}. More precisely, if
there is a saddle $Q$ homoclinically related to $P$ whose Jacobian
is less (resp. greater) than one then the class $H(P,f)$ is the
Hausdorff limit of sinks (resp. sources), see the proof of
\cite[Proposition 2.6]{BDP:03}. Thus to prove the corollary it
is enough to consider a saddle $P$ of $s$-index two whose
homoclinic class $H(P,f)$ does not have any dominated splitting
and such that every saddle $Q$ homoclinically related to $P$ has
Jacobian greater than one. By the previous comments, the class
$H(P,f)$ is limit of sources.
Observe that the assumption on the Jacobians implies that
$\chi_2(Q)+\chi_3(Q)>0$. Thus the homoclinic class satisfies all
hypotheses in Theorem~\ref{t.main}. Hence there is a perturbation
$g$ of $f$ with a robust heterodimensional cycle associated to a
hyperbolic set containing $Q_g$ and $P_g$. The corollary now
follows from standard genericity arguments. \hfil \qed
\subsection{Sectional dissipativiness. Corollary~\ref{c.excnodominated}}
Let $P$ be a hyperbolic saddle of a diffeomorphism $f$ such that:
\betagin{itemize}
\item
for every diffeomorphism $g$ that is $C^1$-close to $f$ there is
no heterodimensional cycle associated to $P_g$, and
\item
let $i$ the stable index of $P$, then the homoclinic class
$H(P,f)$ has no dominated splitting of index $i$.
\end{itemize}
Under these hypotheses we consider a dominated splitting
with three bundles (see Definition~\ref{d.severalbundles})
$$
T_{H(P)}M=E_1\oplus E^c\oplus E_3
$$
such that $\dim(E_1)<i<\dim(E_1\oplus E^c)$ and $E^c$ does not
admit
any dominated splitting. Note that the bundles $E_1$ and $E_3$
may
be empty and that $\dim (E^c)\ge 2$.
We now see some properties of the homoclinic class $H(P,f)$ that
follow from Theorem~\ref{t.main} and will imply the corollary.
There are the following cases:
\betagin{itemize}
\item $\dim (E^c)=2$: Assume that $E^c$ is sectionally dissipative. Then, by Theorem~\ref{t.main} and
Remark~\ref{r.mainb}, for every diffeomorphism $g$ $C^1$-close to
$f$ and every saddle $R_g$ homoclinically related to $P_g$ the
unstable and strong stable manifolds of $R_g$ have empty
intersection. There is similar statement when $E^c$ is sectionally
dissipative for $f^{-1}$.
\item
$\dim(E^c)\ge 3$. Since the diffeomorphisms close to $f$ cannot
have heterodimensional cycles, Corollary~\ref{c.main}
implies that
$$
{\textbf{(I)\quad}} i=\dim(E_1\oplus E^c)-1 \qquad \mbox{or}
\qquad {\textbf{(II)\quad}} i= \dim (E_1)+1.
$$
In case (I), by Theorem~\ref{t.main}, the bundle $E^c$ is
uniformly sectionally dissipative. Moreover, by
Remark~\ref{r.mainb}, for every diffeomorphism $g$ $C^1$-close to
$f$ and every saddle $R_g$ homoclinically related to $P_g$ the
unstable and strong stable manifolds of $R_g$ have empty
intersection. There is similar statement for case (II) considering
$f^{-1}$.
\end{itemize}
The previous discussion implies Corollary~\ref{c.excnodominated}.
$\square$
\subsection{Proof of Proposition~\ref{p.cycle}}
\lambdabel{ss.pcycle}
We fix a small neighborhood $\cU$ of $f$ and small $\deltalta>0$.
Conditions (i) and (ii) in the proposition provide saddles $R$ and
$Q$ having different orbits and local perturbations $g_R$ and
$g_Q$ throughout these orbits as follows. Consider small
neighborhoods $V_R$ and $V_Q$ of the orbits of $R$ and $Q$ having
disjoint closures. Then there are perturbations $g_R$ and $g_Q$ of
$f$ in $\cU$ whose supports are contained in $V_R$ and $V_Q$ such
that $R$ satisfies ${\mathfrak{P}}_{ss}$ for $g_R$ and $Q$ satisfies
${\mathfrak{P}}_{i,\delta}$ for $g_Q$.
As the supports of these perturbations are disjoint, we can
consider a perturbation $g_0$ of $f$ which coincides with $g_R$ in
$V_R$, with $g_Q$ in $V_Q$, and with $f$
outside these neighborhoods. Note that if $\cU$ is small then the
diffeomorphism $g_0$ can be chosen arbitrarily close to $f$.
Moreover, since we are considering adapted perturbations, we have
that the saddles $R$ and $Q$ are all homoclinically
related to $P$ (recall the proof of Corollary~\ref{c.1}).
The proposition is an immediate consequence of the following two
claims. We observe that there are similar results in
\cite{PPV:05} and \cite[section 2.5]{CP:prep}, so we just sketch
their proofs.
\betagin{clai} \lambdabel{cl.weakandss}
There is a perturbation $g_1$ of $g_0$ having a hyperbolic
periodic point $S_{g_1}$ that is homoclinically related to
$P_{g_1}$ and that satisfies simultaneously properties ${\mathfrak{P}}_{ss}$
and ${\mathfrak{P}}_{i,\delta}$.
\end{clai}
\betagin{clai} \lambdabel{cl.getacycle}
The dynamical configuration in Claim~\ref{cl.weakandss} yields
diffeomorphisms $g$ having heterodimensional cycles associated to
a periodic orbit homoclinically related to $P_g$ and to a saddle
of index $i-1$. Moreover, if $\delta>0$ is small and $g_1$ is close
enough to $f$ then $g\in \cU$.
\end{clai}
\betagin{proof}[Sketch of the proof of Claim~\ref{cl.weakandss}]
The idea of the proof of the claim is the following. First,
consider a strong homoclinic intersection $X$ of the orbit of $R$.
Then there are $N_1$ and $N_2>0$ such that
$$
X\in g^{-N_1}_0\big( W^{ss}_{\operatorname{loc}}(R, g_0) \big) \cap g^{N_2}_0
\big(W^{u}_{\operatorname{loc}} (R, g_0)).
$$
Observe also that, since $R$ and $Q$ are homoclinically related,
there is a locally maximal transitive hyperbolic set $L$ of $g_0$
containing $R$ and $Q$. Moreover, we can assume (and we do) that
$L$ is disjoint from the orbit of the point $X$.
We consider a ``generic" perturbation $g_0'$ of $g_0$ given by
Lemma~\ref{l.homocliniclyapunov} obtaining a periodic point $S_{g_0'}\in
L_{g_0'}$ which satisfies ${\mathfrak{P}}_{i,\delta}$ and having iterates
arbitrarily close to $R_{g_0'}$. This implies that
$$
(g_0')^{-N_1} \big( W^{ss}_{\operatorname{loc}}({S_{g_0'} , g_0'})\big) \quad
\mbox{and} \quad (g_0')^{N_2} \big(W^{u}_{\operatorname{loc}}(S_{g_0'},
g_0')\big)
$$
have points that are close to $X$. Since $X$ is disjoint from the
orbit of $S_{g_0'}$ we can perform a local perturbation $g_1$ of
$g_0'$ in a small in a neighborhood of $X$ having a strong
homoclinic intersection associated to $S_{g_1}$. This completes
the sketch of the proof of the claim.
\end{proof}
\betagin{proof}[Sketch of the proof of Claim~\ref{cl.getacycle}]
By a small local perturbation $g_2$ of $g_1$ bifurcating the point
$S_{g_1}$ we get two points $\bar R_{g_2}$ and $\bar S_{g_2}$ of
indices $i-1$ and $i$ such that
\betagin{itemize}
\item
$\bar S_{g_2}$ is still
homoclinically related to $P_{g_2}$,
\item
the manifolds $W^u(\bar R_{g_2},g_2)$ and $W^s(\bar S_{g_2},g_2)$
have a transverse intersection point $Y$, and \item the $N_2$-th
iterate of
$W^u_{\operatorname{loc}}(\bar S_{g_2},g_2)$ and the $N_1$-th iterate by $g_2^{-1}$ of
$W^s_{\operatorname{loc}}(\bar R_{g_2},g_2)$ have points that are close.
\end{itemize}
As
above, there is a small local perturbation $g$ of $g_2$ such that
the intersection $W^u(\bar S_{g},g)\cap W^s(\bar R_{g},g)$ is
non-empty. The support of this perturbation is disjoint
from the orbits of the saddles $\bar S_{g_2}$ and $\bar R_{g_2}$,
the transverse intersection
point $Y$, and a pair of transverse heteroclinic points between
$\bar S_{g_2}$ and $P_{g_2}$. As a consequence, the diffeomorphism
$g$ has a heterodimensional cycle associated to $\bar S_g$ and
$\bar R_g$ and $\bar S_g$ is homoclinically related to $P_g$. This
completes the proof of the claim.
\end{proof}
This completes the proof of Proposition~\ref{p.cycle}.
\qed
\subsection{Proof of Proposition~\ref{p.excfinal}}
\lambdabel{ss.pexcfinal} Consider any small $\varepsilon,\deltalta>0$.
The proof of this proposition follows exactly as the one of
Proposition~\ref{p.cycle} after finding an
$\varepsilon$-perturbation $g_0$ of $f$ and two saddles $R$ and
$Q$ of $g_0$ that are homoclinically related to $P_{g_0}$ and
satisfy properties ${\mathfrak{P}}_{ss}$ and ${\mathfrak{P}}_{i,\delta}$, respectively.
Let $k_0\geq 1$ be an integer associated to $\varepsilon$ given by
Proposition~\ref{p.weak}. Fix a point $Q=Q_\delta$ as in item (3) in
the proposition. For an arbitrarily small perturbation $g'$ given
by item (2') consider the point $R_{g'}$ homoclinically related to
$P_{g'}$ and satisfying ${\mathfrak{P}}_{ss}$. Note that $Q_{g'}$ also
satisfies item (3). Moreover, the homoclinic class $H(P_{g'},g')$
does not have any dominated splitting of index $i$.
We now apply Proposition~\ref{p.weak} to get a perturbation $g_0$
of $g'$ supported on an arbitrarily small neighborhood of the
orbit of $Q_{g'}$ and such that property ${\mathfrak{P}}_{i,\delta}$ holds for
$Q_{g_0}$ and $g_0$.
Therefore all conditions in the proposition are satisfied.
\hfil \qed
\section{Viral classes}\lambdabel{s.viral}
In this section we prove Theorem~\ref{t.bviral}. We begin with a
definition.
\betagin{defi}[Property ${\mathfrak{V}}''$] \lambdabel{d.propertyV}
The chain recurrence class
$C(P,f)$ of a saddle $P$ of a diffeomorphism $f\in {\mbox{{\rm Diff}}^1}f$, $\dim (M)=d$, satisfies
\emph{Property ${\mathfrak{V}}''$} if the following conditions hold:
\betagin{enumerate}
\item\lambdabel{i.dviral1}
for every $j\in\{1,\dots, d-1\}$ there exists a periodic point
$Q_{j}$ whose multipliers $\lambda_j(Q)$ and $\lambda_{j+1}(Q)$ are
non-real and whose Lyapunov exponents satisfy $\chi_{k}(Q)\ne
\chi_j(Q)$ for all $k\ne j,j+1$,
\item\lambdabel{i.dviral2}
let $i$ be the $s$-index of $P$, if $j$ is different from $i$ then
the points $P$ and $Q_{j}$ are homoclinically related,
\item\lambdabel{i.dviral3}
if $j=i$ then $Q_i$ has $s$-index $i+1$ or $i-1$ and there are two
hyperbolic transitive sets $L$ and $K$ containing $P$ and $Q_{i}$
and having a robust heterodimensional cycle, and
\item \lambdabel{i.dviral4}
there are saddles $Q^+$ and $Q^-$ homoclinically related to $P$
such that
\betagin{equation}\lambdabel{e.propV}
\chi_1(Q^-)+\chi_2(Q^-)<0 \quad \mbox{and} \quad
\chi_{d-1}(Q^+)+\chi_d(Q^+)>0.
\end{equation}
\end{enumerate}
\end{defi}
Note that the points $Q_j$ in the definition belong to the chain
recurrence class $C(P,f)$. This is obvious for the saddles $Q_j$,
$j\ne i$, that are homoclinically related to $P$. For the saddle
$Q_i$ this follows from the existence of the hyperbolic
transitive sets $L$ and $K$ containing $P$ and $Q_{i}$ and related
by a heterodimensional cycle.
Note also that properties ${\mathfrak{V}}$,
${\mathfrak{V}}'$, and ${\mathfrak{V}}''$ (recall Definitions~\ref{d.propertyS} and
\ref{d.propertySprime}) are open by definition. The next two
lemmas imply that these three
properties are equivalent ``open and densely".
\betagin{lemm} \lambdabel{l.VimpliesS}
Consider a saddle $P$ and its chain recurrence class $C(P,f)$. If
Property ${\mathfrak{V}}''$ holds for $C(P,f)$ then Property ${\mathfrak{V}}$ holds for
$C(P,f)$. Moreover, if the dimension $d\geq 4$, then property
${\mathfrak{V}}'$ also holds for $C(P,f)$.
\end{lemm}
\betagin{proof}
Let $i$ be the $s$-index of $P$ and denote by $Q_{j}$ the saddles
in Property ${\mathfrak{V}}''$.
Condition (\ref{i.dviral1}) and the fact that
$Q_{j}$ belongs to
$C(P,f)$ robustly implies that there is a neighborhood $\cV_{j}$
of $f$ such that, for all $h\in \cV_j$, the class $C(P_h,h)$
cannot have a dominated splitting $E\oplus F$ of index $j$. Since
this holds for all $j=1,\dots,d-1$, the non-domination condition
follows for the class $C(P_h,h)$ for every diffeomorphism $h\in
\cV=\cap_{j=1}^{d-1} \cV_{j}$.
The fact that $C(P_h,h)$ contains a saddle of $s$-index different
from $i$ for all $h\in \cV$ follows from condition
(\ref{i.dviral3}) after recalling that $Q_{i,h}\in C(P_h,h)$ and
that its $s$-index is $i\pm 1$.
In dimension $d\geq 4$, either $P$ or $Q_i$ has s-index different from $1$ and $d-1$.
\end{proof}
\betagin{lemm} \lambdabel{l.SimpliesV}
Consider a saddle $P_f$ and its chain recurrence class $C(P_f,f)$.
Let $\cV$ be an neighborhood of $f$ such that Property ${\mathfrak{V}}$
holds for $C(P_g,g)$ for all $g\in \cV$. Then there is an open and
dense subset $\cW$ of $\,\cV$ such that $C(P_g,g)$ satisfies
${\mathfrak{V}}''$ for all $g\in \cW$. In dimension $d\geq 4$, the same holds
when ${\mathfrak{V}}$ is replaced by ${\mathfrak{V}}'$.
\end{lemm}
\betagin{proof}
Assume that $C(P_g,g)$ satisfies property ${\mathfrak{V}}$ for all $g\in
\cV$. Let $i$ be the $s$-index of $P_f$.
Proposition~\ref{p.complex} implies that there is an open and
dense subset $\cW'$ of $\cV$ such that for all $j\ne i$ and all
$g\in \cW'$ there is a saddle $Q_{j,g}$ of $s$-index $i$
homoclinically related to $P_g$ whose $j$-th multipliers and
exponents satisfy condition (\ref{i.dviral1}). This implies items
(\ref{i.dviral1}) and (\ref{i.dviral2}) in Property~${\mathfrak{V}}''$ for
$j\ne i$.
In what follows we use some properties of $C^1$-generic
diffeomorphisms. Given two hyperbolic saddles $P_f$ and $Q_f$ of a
generic diffeomorphism $f$ then $C(P_f,f)=H(P_f,f)$. Moreover, if
$Q\in C(P,f)$ then there is a neighborhood $\cU$ of $f$ such that
$Q_g\in C(P_g,g)$ for all $g\in \cU$, see \cite{BC:04}.
Furthermore, if $H(P,f)$ contains saddles of $s$-indices $i<j$
then it contains a saddle of $s$-index $k$ for all $k\in (i,j)\cap
\NN$, see \cite{ABCDW:07}.
By the comments above, after a perturbation, we can assume that
the saddle $Q_g$ in Property~${\mathfrak{V}}$ has $s$-index $i\pm 1$ for all
$g\in
\cW'$. Let us assume, for instance, that this index is $i+1$.
Note that $C(P_g,g)=C(Q_g,g)$ and that, by hypothesis, this
class has no dominated splitting. Arguing as above, but now
considering the saddle $Q_g$ of $s$-index $i+1$, we get saddles
$Q_g'$ homoclinically
related to $Q_g$ whose multipliers and exponents satisfy condition
(\ref{i.dviral1}) for $j=i+1$. By construction, these saddles are
robustly in the same chain recurrence class of $Q_g$ and therefore
in $C(P_g,g)$.
By Corollary~\ref{c.bdk}, there exists two hyperbolic transitive
sets $L$ and $K$ containing $P_g$ and $Q'_g$ with a robust
heterodimensional cycle. Taking $Q_{i,g}=Q_g'$ we get condition
(\ref{i.dviral1}) for $j=i$ and condition (\ref{i.dviral3}).
Observe that condition (\ref{i.dviral4}) is trivial if the
$s$-index of $P_f$ is $i\ne 1,d-1$. Suppose that the index is $1$
(the case $d-1$ is analogous). In this case every saddle $Q^+$
homoclinically related to $P_f$ satisfies
$\chi_{d-1}(Q^+)+\chi_d(Q^+)>0$. Note that, after a perturbation
if necessary, we can assume that the homoclinic class of $P_f$
contains saddles $Q_f$ of stable index $2$.
After a new perturbation, one gets a diffeomorphism $h$ with a
heterodimensional cycle
associated to $Q_h$
and $P_h$.
By the arguments in \cite{ABCDW:07} (see Corollary 2)
the unfolding of these cycle provides diffeomorphisms $g$
with a saddle $Q_g^-$ homoclinically related to $P_g$ whose
Lyapunov
exponent $\chi_2 (Q_g^-)$ is arbitrarily close to $0^+$ while
$\chi_1(Q_g^-)$ is negative and uniformly away from $0$.
In particular, one has $\chi_{1}(Q_g)+\chi_2(Q_g)<0$.
This proves
that property ${\mathfrak{V}}''$ holds for $g$.
When $d\geq 4$, let us now assume that $C(p_g,g)$ satisfies
property ${\mathfrak{V}}'$ for all $g\in \cV$. Corollary~\ref{c.main} implies
that there is a dense and open subset of $\cV$ consisting of
diffeomorphisms $g$ such that there exists a hyperbolic periodic
point $Q_g$ in $C(P_g,g)$ with s-index different from the s-index
of $P$. In particular Property ${\mathfrak{V}}$ holds and for a smaller dense
and open subset ${\mathfrak{V}}''$ holds.
\end{proof}
Theorem~\ref{t.bviral} is now a consequence of the two lemmas
above and the following proposition.
\betagin{prop}
[Viral contamination]
\lambdabel{p.viral}
Consider $f\in {\mbox{{\rm Diff}}^1}f$ and a saddle $P$ of $f$. Assume that the
chain recurrence class $C(P,f)$ of $P$ satisfies Property ${\mathfrak{V}}''$.
Then for every neighborhood $V$ of $H(P,f)$
there exist a diffeomorphism $g$ arbitrarily
$C^1$-close to $f$ and a hyperbolic periodic point $Q_g$ of $g$
such that:
\betagin{enumerate}
\item \lambdabel{i.pviral1}
the
orbit of $Q_g$ is
arbitrarily close to $H(P_f,f)$ for the Hausdorff distance,
\item \lambdabel{i.pviral3bis}
there is an open neighborhood $U\subset V$ of the orbit of $Q_g$
such that $P \not\in U$ and either $f(\overline U)\subset U$ or
$f^{-1}(\overline U)\subset U$, and
\item \lambdabel{i.pviral2}
$C(Q_g,g)$ satisfies Property ${\mathfrak{V}}''$.
\end{enumerate}
\end{prop}
Note that item (\ref{i.pviral3bis}) implies that $C(Q_g,g)$ is
disjoint from the chain-re\-cu\-rren\-ce class of $P_g$ (that
contains $H(P_g,g)$). Recall that property ${\mathfrak{V}}$ is robust. Thus
this proposition implies that Property ${\mathfrak{V}}''$ satisfies the
self-replication condition in Definition~ \ref{d.bviral}.
\subsection{Proof of
Proposition~\ref{p.viral}}\lambdabel{ss.proofofpviral}
We consider small $\varepsilon>0$ and an upper bound $K$ of the norms of
$Df$ and $Df^{-1}$. Let $k_0$ and $\ell_0$ be the constants associated
to $\varepsilon$ and $K$ in Lemmas~\ref{l.gdcds} and \ref{l.bgv}. Let $i$
be the $s$-index of $P$. For clearness, we split the proof of the
proposition into six steps.
\alphaskip
\noindent{\emph{Step I: Construction of the saddle $Q$.}} Consider
periodic points $Q^+$ and $Q^-$ as in equation \eqref{e.propV} in
Definition~\ref{d.propertyV}, i.e. $\chi_1(Q^-)+\chi_{2}(Q^-)<0$
and $\chi_{d-1}(Q^+)+\chi_{d}(Q^+)>0$. Note that there exists a
locally maximal transitive hyperbolic set $\Lambda_f$ such that
\betagin{itemize}
\item $\Lambda_f$ contains $P$, $Q^+$, and $Q^-$,
\item
$\Lambda_f\subset H(P,f)$, and
\item
$\Lambda_f$ is arbitrarily close to $H(P,f)$ for the Hausdorff metric.
\end{itemize}
In particular, the set $\Lambda$ has no $k_0$-dominated splitting.
\betagin{clai}\lambdabel{cl.hausdorff} There is a
perturbation $g_0$ of $f$ such that the continuation $\Lambda_{g_0}$
of $\Lambda$ is the Hausdorff limit of the orbits of periodic points
$Q_{g_0}\in \Lambda_{g_0}$ such that
\betagin{equation}\lambdabel{e.clhausdorff}
\chi_1(Q_{g_0}) + \chi_2(Q_{g_0})<0 \quad \mbox{and} \quad
\chi_{d-1}(Q_{g_0}) + \chi_d(Q_{g_0})>0.
\end{equation}
Moreover, the set $\Lambda_{g_0}$ has no $k_0$-dominated splitting of
any index.
\end{clai}
\betagin{proof}
If the $s$-index $i$ of $P$ belongs to $\{2,\dots,d-2\}$ then the
condition on the Lyapunov exponents holds for any saddle
homoclinically related to $P$. Thus it is enough to consider the
cases $i=1$ and $i=d$.
Let assume that $i=1$ (the case $i=d-1$ is similar). In this case,
$\chi_{d-1}(Q) + \chi_d(Q)>0$ for every saddle $Q$ that is
homoclinically related to $P$. Consider the saddle $Q^-\in \Lambda$.
Taking a perturbation $g$ of $f$ in the residual set $\cG$ in
Lemma~\ref{l.homocliniclyapunov}, we can to ``spread" the property
$\chi_1(Q) +\chi_2(Q)<0$ over the hyperbolic set $\Lambda_{g_0}$,
obtaining the point $Q_{g_0}$. This completes the first part of
the claim.
Since $g_0$ is close to $g$ and $\Lambda_{g_0}$ is close to $\Lambda$,
there is no $k_0$-dominated splitting over $\Lambda_{g_0}$. This ends
the proof of the claim.
\end{proof}
By Lemma~\ref{l.dominatedclosure}, we can take the point $Q_{g_0}$
in Claim~\ref{cl.hausdorff} such that its orbit does not have any
$k_0$-dominated splitting, has period larger than $\ell_0$, and
its distance to the homoclinic class $H(P_{g_0},g_0)$ is
arbitrarily small. This completes the choice of the point
$Q=Q_{g_0}$.
\alphaskip
\noindent{\emph{Step II: Separation of homoclinic classes.}} By
Lemma~\ref{l.bgv} there is an $\varepsilon$-perturbation $g_1$ of $g_0$
supported on an arbitrarily small neighborhood of the orbit of
$Q_{g_0}$ such that the orbit of $Q_{g_1}$ is a sink or a source
for $g_1$. In what follows, let us assume that $Q_{g_1}$ is a
sink. Thus there is an open set $U\subset V$ containing the orbit
of $Q_{g_1}$ such that $g_1(\overline U)\subset U$ and $U$ is
disjoint from the homoclinic class of $P_{g_1}$. Note that these
properties hold for any diffeomorphism $g$ that is $C^0$-close to
$g_1$.
This implies item (\ref{i.pviral3bis}) in the proposition.
Recall that the choice of $Q$ and the neighborhood $U$ imply that,
for any perturbation $g$ of $g_1$, the homoclinic class $H(Q_g,g)$
is close to $H(P_f,f)$. This gives item (\ref{i.pviral1}) of the
proposition.
\alphaskip
\noindent{\emph{Step III: Non-trivial homoclinic class of $Q$.}}
Note that after an $\varepsilon$-per\-tur\-ba\-tion we can ``recover" the
original cocycle given by the derivative $Dg_0$ over the orbit of
$Q_{g_0}$, now defined over the orbit of $Q_{g_1}$. In particular,
there is no $k_0$-dominated splitting over the orbit of $Q_{g_1}$,
conditions in equation \eqref{e.clhausdorff} hold, and the saddle
$Q_{g_1}$ has $s$-index $i$. In what follows all perturbations $g$
we consider will preserve the cocycle over the orbit of
$Q_{g_1}$. Hence the homoclinic class of $Q_{g}$ will satisfy
item (\ref{i.dviral4}) in Property ${\mathfrak{V}}''$.
Finally, by Lemma~\ref{l.gdcds} and Remark~\ref{r.gdcds}, there is
an $\varepsilon$-perturbation $g_2$ of $g_1$ supported on an arbitrarily
small neighborhood of the orbit of $Q_{g_1}$ such that the
homoclinic class of $Q_{g_2}$ is not-trivial.
\alphaskip
\noindent{\emph{Step IV: No domination for the homoclinic class of
$Q$.}} Since there is no $k_0$-dominated splitting over the orbit
of $Q_{g_1}$, by Corollary~\ref{c.1}, there is a
$\varepsilon$-perturbation $g_3$ of $g_2$ such that for any $j\ne i$,
$j\in \{1,\dots, d-1\}$, there is a periodic point $Q_{j,g_3}$
homoclinically related to $Q_{g_3}$ that satisfies Property
${\mathfrak{P}}_{j,j+1,\CC}$. In what follows, all perturbations that we will
perform will preserve these properties. This implies that the
homoclinic class will satisfy items (\ref{i.dviral1}) and
(\ref{i.dviral2}) in the definition of Property~${\mathfrak{V}}''$ for every
$j\ne i$.
Finally, for $j=i$, as the class $H(Q_{g_3},g_3)$ is not
$k_0$-dominated, using
Lemma~\ref{l.gdcds} and
Remark~\ref{r.gdcds} we can generate a homoclinic tangency inside
the class after an $\varepsilon$-perturbation $g_4$ of $g_3$. This
prevents the existence of a dominated splitting of index $i$ for
$g_4$.
Note that to complete the proof of the proposition it remains to
get items (\ref{i.dviral1}) for $j=i$ and (\ref{i.dviral3}) of
Property~${\mathfrak{V}}''$.
\alphaskip
\noindent{\emph{Step V: Generation of a robust heterodimensional
cycle.}} Recall that the homoclinic class $H(Q_{g_4},g_4)$ has no
any dominated splitting. There are three possibilities for the
$s$-index $i$ of $P$. If $i\in\{2,\dots,d-2\}$ we can apply
Corollary~\ref{c.main} to get $g_5$ close to $g_4$ with a robust
heterodimensional cycle associated to a hyperbolic set $L_{g_5}$
containing $P_{g_5}$ and a hyperbolic set $K_{g_5}$ of stable
index $i+1$ or $i-1$.
Assume now that $i=d-1$. Recall that $\chi_{d-1}(Q_{g_4}) +
\chi_d(Q_{g_4})>0$. Thus the hypotheses in Theorem~\ref{t.main}
are satisfied by $g_4$ and we get a diffeomorphism $g_5$ having a
robust heterodimensional cycle as before.
Finally, the case $i=1$ is analogous to the case $i=d-1$. Hence we
obtain item (3) in Property~${\mathfrak{V}}''$.
\alphaskip
\noindent{\emph{Step VI: And finally Property~${\mathfrak{V}}''$ holds.}}
Note that since the sets $L_{g_5}$ and $K_{g_5}$ have a robust
heterodimensional cycle, for all $g$ close to $g_5$ they are
contained in the same chain recurrence class. Thus by
\cite[Remarque 1.10]{BC:04} there is a residual subset $\cG'$ of
${\mbox{{\rm Diff}}^1}f$ such that for every $f\in \cG'$, every periodic point of
$f$ is hyperbolic and its homoclinic and chain recurrence classes
coincide. In particular, for diffeomorphisms in $\cG'$ the
homoclinic classes of two periodic points either coincide or are
disjoint.
Therefore for any $g_6\in \cG'$ close to
$g_5$ there is a periodic point $R_{g_6}\in K_{g_6}$ such that the
homoclinic classes $H(R_{g_6},g_6)$ and $H(Q_{g_6},g_6)$ coincide.
Hence the homoclinic class $H(R_{g_6},g_6)$ does not have any
$k_0$-dominated splitting of index $i$.
By Proposition~\ref{p.complex},
there is a saddle $Q_{i,g_6}$ homoclinically related to $R_{g_6}$
such that there is an $\varepsilon$-perturbation of $g$ along the orbit
of $Q_{i,g_6}$ that is adapted to $H(R_{g_6},g_6)$ and to property
${\mathfrak{P}}_{i,i+1,\CC}$. Since the perturbation is adapted, there is a
transitive hyperbolic set $K'_g$ containing $Q_{i,g}$ and $K_g$.
Thus the diffeomorphism $g$ has a robust heterodimensional cycle
associated to $L_g$ and $K'_g$. This ends the proof of the
proposition. \hfil \qed
\subsection{Proof of Corollary~\ref{c.bviral}} \lambdabel{ss.cviral}
Recall that the residual subset $\cG'$ of ${\mbox{{\rm Diff}}^1}f$ introduced in
Step VI consists of diffeomorphisms whose periodic points are all
hyperbolic. In particular, these diffeomorphisms have at most
countably many periodic points and hence countably many homoclinic
classes which are either disjoint or coincide.
By Lemma~\ref{l.SimpliesV}, there exists a dense open subset
$\cW\subset \cV$ such that $C(P_g,g)$ satisfies ${\mathfrak{V}}''$ for all
$g\in \cU$.
Recall that a filtrating neighborhood is an open set $U$ such that
$U=U_+ \cap U_-$ where $U_+$ and $U_-$ are open sets such that
$f(\overline{U_+}) \subset U_+$ and $f^{-1}(\overline{U_-})
\subset U_-$. Observe that there is
filtrating neighborhood for the
chain recurrence class of $Q_g$ separating this class and the
class of $P_g$. In particular, these two recurrence classes are
disjoint. Thus Theorem~\ref{t.bviral} allows to repeat this
process, generating new classes satisfying Property~${\mathfrak{V}}''$.
Inductively, for each $n\in \NN$ we get an open and dense subset
$\cU_n$ of $\cU$ consisting of diffeomorphisms having (at least)
$n$ disjoint homoclinic classes. Therefore, the set
$$
\cG_\cU=\cG' \cap\ \bigcap_{n\in \NN} \cU_n
$$
is a residual subset of $\cU$ consisting of diffeomorphisms with
infinitely (countably) many homoclinic classes. This implies the
first part of the corollary.
To see that there are uncountably many chain recurrence classes
note that the first step of the construction provides two disjoint
filtrating open sets, the set $V_0=U$ containing the chain
recurrence class of $P_g=Q^0$ and the set $V_1$ containing the
chain recurrence class of $Q_g=Q^1$.
Repeating this process $n$ times, we can assume that for each map
$g\in \cG_\cU$ at each step we get $2^n$ open filtrating sets
$V_{i_1,\dots,i_n}$, $i_k=0,1$, that are pairwise disjoint and
nested (i.e. $V_{i_1,\dots,i_n}\subset V_{i_1,\dots,i_{n-1}}$),
and
each set contains a chain recurrence class with property
${\mathfrak{V}}''$. Note that these classes are different and pairwise
disjoint.
Arguing inductively, we can repeat the construction of the first
step for every finite sequence $i_1,\dots,i_n$, getting a new pair
of filtrating neighborhoods $V_{i_1,\dots,i_{n},0}$ and
$V_{i_1,\dots,i_n,1}$ contained in $ V_{i_1,\dots,i_n}$
and each of them containing a chain recurrence class satisfying
Property~${\mathfrak{V}}''$.
Finally, for each infinite sequence $\iota=(i_k)$ consider the set
$$
K_\iota =\bigcap_{k=1}^\infty \overline{V_{i_1,\dots,i_k}}.
$$
By construction, each set $K_\iota$ contains some recurrent point
$X_\iota$ and given two different sequences $\iota$ and $\iota'$
the chain recurrence classes of $X_{\iota}$ and $X_{\iota'}$ are
different. Thus for $g\in \cG_U$ to each sequence $\iota$ we
associate a chain recurrent class $C(X_{\iota},g)$ and this map is
injective.
We have shown that every $g\in \cG_{U}$ has uncountably many chain
recurrence classes. Since, by the definition of $\cG'$, the
diffeomorphism $g$ has only countably many periodic points, there
are uncountably many aperiodic classes. This completes the proof
of the corollary. \hfil \qed
\subsection{Examples}\lambdabel{ss.examples}
We close this paper by providing examples of diffeomorphisms
satisfying viral properties that do not exhibit universal
dynamics.
\betagin{prop}\lambdabel{p.nonempty}
Given any closed manifold $M$ of dimension $d\ge 3$ there is a
non-empty open set of diffeomorphisms having homoclinic classes
satisfying Property~${\mathfrak{V}}$. Moreover, the open set can be chosen
such that the Jacobians of the diffeomorphisms are strictly less
than one over these homoclinic classes.
\end{prop}
The construction follows arguing exactly as in \cite[Appendix
6]{BD:02}. Just note that in this case we do not assume the
existence of a pair of points $P'$ and $Q'$ with Jacobians less
and larger than one as in \cite{BD:02}. A different approach is to
consider perturbations of systems having {\emph{heterodimensional
tangencies}} as in \cite{DNP:06}.
\betagin{thebibliography}{100}
\bibitem{A:03}
F. Abdenur, \emph{Generic robustness of spectral decompositions,}
Ann. Sci. \'Ecole Norm. Sup., {\bf 36} (2003), 213--224.
\bibitem{ABC:}
F. Abdenur, Ch. Bonatti, and S. Crovisier, \emph{Nonuniform
hyperbolicity for $C^1$-generic diffeomorphisms,}
arXiv:0809.3309v1 and to appear in Israel Jour. of Math..
\bibitem{ABCDW:07}
F. Abdenur, Ch. Bonatti, S. Crovisier, L. J. D\'\i az, and L. Wen,
{\emph{Periodic points and homoclinic classes,}} Ergod. Th. and
Dynam. Syst., \textbf{27} (2007), 1--22.
\bibitem{AS:70}
R. Abraham and S. Smale, {\emph{Nongenericity of $\Omegaega
$-stability,}} Global Analysis (Proc. Sympos. Pure Math., Vol.
XIV, Berkeley, Calif., 1968), 5--8 Amer. Math. Soc., Providence,
R.I, (1970).
\bibitem{A:08}
M.~Asaoka, {\emph{Hyperbolic sets exhibiting $C\sp 1$-persistent
homoclinic tangency for higher dimensions,}} Proc. Amer. Math.
Soc. {\bf 136} (2008), 677--686.
\bibitem{B:bible}
Ch. Bonatti, \emph{Towards a global view of dynamical systems, for
the $C^1$-topology,} pr\'e-publication Institut de Math\'ematiques
de Bourgogne (2010).
\bibitem{BB:pre} J. Bochi and
Ch. Bonatti, {\emph{Perturbation of the Lyapunov spectra of
periodic orbits,}} arXiv:1004.5029.
\bibitem{BC:04}
Ch. Bonatti and S. Crovisier, {\emph{R\'ecurrence et
g\'en\'ericit\'e,}} Inventiones Math., {\bf 158} (2004), 33--104.
\bibitem{BD:95}
Ch. Bonatti and L.J. D\'\i az, {\emph{Persistence of transitive
diffeomorphims,}} Ann. Math., {\bf 143} (1995), 367--396.
\bibitem{BD:99}
Ch. Bonatti and L.J. D\'\i az, {\emph{Connexions h\'et\'eroclines
et g\'en\'ericit\'e d'une infinit\'e de puits ou de sources,}}
Ann. Scient. \'Ec. Norm. Sup., {\bf 32}, 135-150, (1999).
\bibitem{BD:02}
Ch. Bonatti and L.J. D\'\i az, {\emph{On maximal transitive sets
of generic diffeomorphisms,}} Publ. Math. Inst. Hautes \'Etudes
Sci., {\bf 96} (2002), 171--197.
\bibitem{BD:08} Ch. Bonatti and L.J. D\'\i az,
{\emph{Robust heterodimensional cycles and $C^1$-generic
dynamics,}} Journal of the Inst. of Math. Jussieu, \textbf{7}
(2008), 469--525
\bibitem{BD:pre} Ch. Bonatti and L.J. D\'\i az, {\emph{Abundance of
$C^1$-robust homoclinic tangencies,}} to appear in Trans. A. M. S
and arXiv:0909.4062.
\bibitem{BDK:pre} Ch. Bonatti, L.J. D\'\i az, and S. Kiriki,
{\emph{Robust heterodimensional cycles and hyperbolic
continuations,}} in preparation.
\bibitem{BDP:03} Ch. Bonatti, L.J. D\'\i az, and E.R. Pujals,
{\emph{A ${\mathcal C}} \def\cI{{\mathcal I}} \def\cO{{\mathcal O}} \def\cU{{\mathcal U}^1$-generic dichotomy for diffeomorphisms: Weak forms
of hyperbolicity or infinitely many sinks or sources,}} Ann. of
Math., {\bf 158} (2003), 355--418.
\bibitem{BDV:04}
Ch. Bonatti, L.J. D\'\i az, and M. Viana, {\emph{Dynamics beyond
uniform hyperbolicity,}} Encyclopaedia of Mathematical Sciences
(Mathematical Physics), {\bf 102}, Springer Verlag, (2004).
\bibitem{BGV:06}
Ch. Bonatti, N. Gourmelon, and T. Vivier, \emph{Perturbations of
the derivative along periodic orbits,} Ergodic Th. and Dynam.
Syst., {\bf 26} (2006), 1307--1337.
\bibitem{Co:98} E. Colli, {\emph{Infinitely many coexisting strange
attractors,}} Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire, {\bf
15} (1998), 539--579.
\bibitem{C:pre} S. Crovisier, {\emph{Birth of homoclinic intersections: a model for the central dynamics of partially hyperbolic systems,}}
arXiv:math/0605387 and to appear in Ann. Math..
\bibitem{CP:prep}
S. Crovisier and E. R. Pujals, \emph{Essential hyperbolicity and
homoclinic bifurcations: a dichotomy phenomenon/mechanism for
diffeomorphisms}, in preparation.
\bibitem{DNP:06} L. J. D\'\i az, A. Nogueria, and E. R. Pujals,
{\emph{Heterodimensional tangencies,}} Nonlinearity, {\bf 19} (2006),
2543--2566.
\bibitem{F:71} J. Franks, {\emph{Necessary conditions for stability of
diffeomorphisms,}} Trans. Amer. Math. Soc., {\bf 158} (1971),
301--308.
\bibitem{GW:03}
S. Gan and L. Wen, {\emph{ Heteroclinic cycles and homoclinic
closures for generic diffeomorphisms,}} J. Dynam. Differential
Equations, {\bf 15} (2003), 451--471.
\bibitem{G:10}
N. Gourmelon, {\emph{Generation of homoclinic tangencies by
$C^1$-perturbations,}} Discrete Contin. Dyn. Syst. {\bf 26}
(2010), 1--42.
\bibitem{G:pre}
N. Gourmelon, {\emph{A Franks' lemma that preserves invariant
manifolds,}} arXiv:0912.1121v2.
\bibitem{M:78} R. Ma\~n\'e,
{\emph{Contributions to the stability conjecture,}} Topology, {\bf
17} (1978), 383--396.
\bibitem{M:pre}
C. G. Moreira, {\emph{There are no $C^1$-stable intersections of
regular Cantor sets,}} Pre-print IMPA 2008,
{\tt{http://www.preprint.impa.br/cgi-bin/MMMsearch.cgi}}
\bibitem{N:78}
S. Newhouse, {\emph{Diffeomorphisms with infinitely many sinks,}}
Topology, {\bf 13} (1974), 9--18.
\bibitem{N:79}
S. Newhouse, {\emph{The abundance of wild hyperbolic sets and
nonsmooth stable sets for diffeomorphisms,}} Publ. Math. I. H.E.S,
{\bf 50} (1979), 101--151.
\bibitem{N:04}
S. Newhouse, {\em New phenomena associated with homoclinic
tangencies,\/} Ergodic Theory Dynam. Systems, {\bf 24} (2004),
1725--1738.
\bibitem{PPV:05}
M. J. Pacifico, E. R. Pujals, and J. L. Vietez, {\emph{ Robustly
expansive homoclinic classes,}} Ergod. Th. and Dynam. Syst., {\bf
25} (2005), 271--300.
\bibitem{P:00}
J. Palis, \emph{A global view of dynamics and a conjecture on the
denseness of finitude of attractors,} Ast\'erisque, {\bf 261}
(2000), 335--347.
\bibitem{PT:93}
J. Palis and F. Takens, {\emph{Hyperbolicity and sensitive chaotic
dynamics at homoclinic bifurcations. Fractal dimensions and
infinitely many attractors,}} Cambridge Studies in Advanced
Mathematics, {\bf 35}, Cambridge University Press, Cambridge,
(1993).
\bibitem{PV:94}
J. Palis and M. Viana, {\emph{High dimension diffeomorphisms
displaying infinitely many periodic attractors,}} Ann. of Math.,
{\bf 140} (1994), 207--250.
\bibitem{PS:00}
E. R. Pujals and M. Sambarino, \emph{Homoclinic tangencies and
hyperbolicity for surface diffeomorphisms,} Ann. of Math., {\bf
151} (2000), 961--1023.
\bibitem{R:95} N. Romero,
{\emph{Persistence of homoclinic tangencies in higher
dimensions,}} Ergodic Theory Dynam. Systems, {\bf 15} (1995),
735--757.
\bibitem{S:pre} K. Shinohara, {\emph{On the indices of periodic points in $C^1$-generic wild homoclinic classes in dimension three,}}
arXiv:1006.5571.
\bibitem{S:72}
C.P. Simon, {\emph{Instability in $\textrm{Diff}(T^3)$ and the
nongenericity
of rational zeta function,}} Trans. A.M.S.,
{\bf 174} (1972), 217--242.
\bibitem{W:04}
L. Wen, \emph{Generic diffeomorphisms away from homoclinic
tangencies and heterodimensional cycles,} Bull. Braz. Math. Soc.
(N.S.), {\bf 35} (2004), 419--452.
\end{thebibliography}
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Constant Modulus Algorithms Using Hyperbolic Givens Rotation}
\author[Canada] {A. Ikhlef} \ead{[email protected]}
\author[Algeria]{R. Iferroujene} \ead{[email protected]}
\author[France] {A. Boudjellal\corref{cor1}} \ead{[email protected].}
\author[France] {K. Abed-Meraim\corref{cor2}} \ead{[email protected]}
\author[Algeria]{A. Belouchrani.} \ead{[email protected].}
\address[Canada] {ECE Dep., Univ. of British Columbia, 2356 Main Mall, Vancouver, V6T 1Z4, Canada.}
\address[Algeria] {EE Dep., Ecole Nationale Polytechnique, BP 182 EL Harrach, 16200 Algiers, Algeria.}
\address[France] {Polytech'Orleans, PRISME Laboratory, 12 Rue de Blois, 45067 Orleans, France.}
\cortext[cor1]{Corresponding author.}
\cortext[cor2]{Principal corresponding author.}
\begin{abstract}
We propose two new algorithms to minimize the constant modulus (CM) criterion in the context of blind source separation. The first algorithm, referred to as Givens CMA (G-CMA) uses unitary Givens rotations and proceeds in two stages: prewhitening step, which reduces the channel matrix to a unitary one followed by a separation step where the resulting unitary matrix is computed using Givens rotations by minimizing the CM criterion. However, for small sample sizes, the prewhitening does not make the channel matrix close enough to unitary and hence applying Givens rotations alone does not provide satisfactory performance. To remediate to this problem, we propose to use non-unitary Shear (Hyperbolic) rotations in conjunction with Givens rotations. This second algorithm referred to as Hyperbolic G-CMA (HG-CMA) is shown to outperform the G-CMA as well as the Analytical CMA (ACMA) in terms of separation quality. The last part of this paper is dedicated to an efficient adaptive implementation of the HG-CMA and to performance assessment through numerical experiments.
\end{abstract}
\begin{keyword}
Blind Source Separation, Constant Modulus Algorithm, Adaptive CMA, Sliding Window, Hyperbolic Rotations, Givens Rotations.
\end{keyword}
\end{frontmatter}
\section{Introduction}
\label{Sec:Intro}
During the last two decades, Blind Source Separation (BSS) has attracted an important interest. The main idea of BSS consists of finding the transmitted signals without using pilot sequences or a priori knowledge on the propagation channel. Using BSS in communication systems has the main advantage of eliminating training sequences, which can be expensive or impossible in some practical situations, leading to an increased spectral efficiency. Several BSS criteria have been proposed in the literature e.g. \cite{Haykin_Bk_00, Comon_Bk}. The CM criterion is probably the best known and most studied higher order statistics based criterion in blind equalization \cite{Yang_98, Abrar_10, Amine_04, Labed_13} and signal separation \cite{Adel_96, Papadias_00, Papadias_04, Veen_Chap_05} areas. It exploits the fact that certain communication signals have the constant modulus property, as for example phase modulated signals. The Constant Modulus Algorithm (CMA) was developed independently by \cite{Godard_80, Treichler_83} and was initially designed for PSK signals. The CMA principle consists of preventing the deviation of the squared modulus of the outputs at the receiver from a constant. The main advantages of CMA, among others, are its simplicity, robustness, and the fact that it can be applied even for non-constant modulus communication signals.
Many solutions to the minimization of the CM criterion have been proposed (see \cite{Veen_Chap_05} and references therein). The CM criterion was first minimized via adaptive Stochastic Gradient Algorithm (SGA) \cite{Treichler_83} and later on many variants have been devised. It is known, in adaptive filtering, that the convergence rate of the SGA is slow. To improve the latter, the authors in \cite{Chen_04} proposed an implementation of the CM criterion via the Recursive Least Squares (RLS) algorithm. The author in \cite{Agee_86} proposed to rewrite the CM criterion as a least squares problem, which is solved using an iterative algorithm named Least Squares CMA (LS-CMA). In \cite{Veen_ACMA_96}, the authors proposed an algebraic solution for the minimization of the CM criterion. The proposed algorithm is named Analytical CMA (ACMA) and consists of computing all the separators, at one time, through solving a generalized eigenvalue problem. The main advantage of ACMA is that, in the noise free case, it provides the exact solution, using only few samples (the number of samples must be greater than or equal to $M^{2}$, where $M$ is the number of transmitting antennas). Moreover, the performance study of ACMA showed that it converges asymptotically to the Wiener receiver \cite{Veen_01}. However, the main drawback of ACMA is its numerical complexity especially for a large number of transmitting antennas. An adaptive version of ACMA was also developed in \cite{Veen_Chap_05}. More generally, an abundant literature on the CM-like criteria and the different algorithms used to minimize them exists including references \cite{Veen_Chap_05, Abrar_10, Yuan_10, Lamare_10, Lamare_11}.
In this paper, we propose two algorithms to minimize the CM criterion. The first one, referred to as Givens CMA (G-CMA), performs prewhitening in order to make the channel matrix unitary then, it applies successive Givens rotations to find the resulting matrix through minimization of the CM criterion. For large number of samples, prewhitening is effective and the transformed channel matrix is very close to unitary, however, for small sample sizes, it is not, and hence results in significant performance loss. In order to compensate the effect of the ineffective prewhitening stage, we propose to use Shear rotations \cite{Fu_06, Iferr_09}. Shear rotations are non-unitary hyperbolic transformations which allow to reduce departure from normality. We note that the authors in \cite{Fu_06, Iferr_09, Souloumiac_09, Iferr_10} used Givens and Shear rotations in the context of joint diagonalization of matrices. We thus propose a second algorithm, referred to as Hyperbolic G-CMA (HG-CMA), that uses unitary Givens rotations in conjunction with non-unitary Shear rotations. The optimal parameters of both complex Shear and Givens rotations are computed via minimization of the CM criterion. The proposed algorithms have a lower computational complexity as compared to the ACMA. Moreover, unlike the ACMA which requires a number of samples greater than the square of the number of transmitting antennas, G-CMA and HG-CMA do not impose such a condition. Finally, we propose an adaptive implementation of the HG-CMA using sliding window which has the advantages of fast convergence and good separation quality for a moderate computational cost comparable to that of the methods in \cite{Agee_86, Papadias_04, Veen_Chap_05}.
The remainder of the paper is organized as follows. Section \ref{Sec:Formulation} introduces the problem formulation and assumptions. In Sections \ref{Sec:GCMA} and \ref{Sec:HGCMA}, we introduce the G-CMA and HG-CMA, respectively. Section \ref{Sec:AHGCMA} is dedicated to the adaptive implementation of the HG-CMA. Some numerical results and discussion are provided in Section \ref{Sec:Results}, and conclusions are drawn in Section \ref{Sec:Conclusion}.
\section{Problem Formulation}
\label{Sec:Formulation}
Consider the following multiple-input multiple-output (MIMO) memoryless system model with $M$ transmit and $N$ receive antennas:
\begin{equation}\label{Eq01}
\mathbf{y}(n)=\mathbf{x}(n)+\mathbf{b}(n)=\mathbf{A}\mathbf{s}(n)+\mathbf{b}(n)
\end{equation}
where $\mathbf{s}(n)=[s_{1}(n), s_{2}(n), \ldots, s_{M}(n)]^{T}$ is the $M\times 1$ source vector, $\mathbf{b}(n)=[b_{1}(n),b_{2}(n),\ldots,b_{N}(n)]^{T}$ is the $N\times 1$ additive noise vector, $\mathbf{A}$ represents the $N\times M$ MIMO channel matrix, and $\mathbf{y}(n)=[y_{1}(n), y_{2}(n), \ldots, y_{N}(n)]^{T}$ is the $N\times 1$ received vector.
In the sequel, we assume that the channel matrix $\mathbf{A}$ is full column rank (and hence $N\geq M$), the source signals are discrete valued (i.e., generated from a finite alphabet), zero-mean, independent and identically distributed (i.i.d.), mutually independent random processes, and the noise is additive white independent from the source signals. Note that these assumptions are quite mild and generally satisfied in communication applications.
Our main goal is to recover the source signals blindly, i.e., using only the received data. For this purpose, we need to compute an $M\times N$ separation (receiver) matrix $\mathbf{W}$ such that $\mathbf{W}\mathbf{y}(n)$ results in the source signals, i.e.
\begin{equation}\label{Eq02}
\mathbf{z}(n)=\mathbf{W}\mathbf{y}(n)=\mathbf{W}\mathbf{A}\mathbf{s}(n)+\bar{\mathbf{b}}(n)=\mathbf{G}\mathbf{s}(n)+\bar{\mathbf{b}}(n)
\end{equation}
where $\mathbf{z}(n)=[z_{1}(n),z_{2}(n), \ldots, z_{M}(n)]^{T}$ is the $M\times1$ vector of the estimated
source signals, $\mathbf{G}=\mathbf{W}\mathbf{A}$ is the $M\times M$ global system matrix and $\bar{\mathbf{b}}(n)=\mathbf{W}\mathbf{b}(n)$ is the filtered noise at the receiver output. Ideally, in BSS, matrix $\mathbf{W}$ separates the source signals except for a possible permutation and up to scalar factors\footnote{To remove these ambiguities, when necessary, side information or a short training sequence is always required.}, i.e.
\begin{equation}\label{Eq03}
\mathbf{W} \mathbf{x}(n)=\mathbf{P} \mathbf{\Lambda}\mathbf{s}(n)
\end{equation}
where $\mathbf{P}$ is a permutation matrix and $\mathbf{\Lambda}$ is a non-singular diagonal matrix.
In the sequel, we propose to use the well known CMA to achieve the desired BSS. In other words, we propose to estimate the separation matrix by minimizing the CM criterion:
\begin{equation}\label{Eq04}
\mathcal{J}(\mathbf{W})=\sum_{j=1}^{K}\sum_{i=1}^{M} \left(|z_{ij}|^{2}-1\right)^{2}
\end{equation}
where $z_{ij}$ is the $(i,j)$th entry of $\mathbf{Z}=\mathbf{W} \mathbf{Y}$, with $\mathbf{Y}=[\mathbf{y}(1), \mathbf{y}(2), \ldots, \mathbf{y}(K)]$ ($K$ being the sample size). This CM criterion has been used by many authors and has been shown to lead to the desired source separation for CM signals\footnote{In fact, the CMA can be used for sub-Gaussian sources (not necessary of constant modulus) as proved in \cite{Regalia_99}.
} and large sample sizes as stated below.
\begin{theorem} If $K$ is large enough such that columns of matrix $\mathbf{S}=[\mathbf{s}(1), \mathbf{s}(2),\\ \ldots, \mathbf{s}(K)]$ include all possible combinations of source vectors\footnote{Note that this is a sufficient condition only.}
$\mathbf{s}(n)$, then the criterion $\mathcal{J}(\mathbf{W})$ (where $\mathbf{W}$ is such that $\mathbf{WA}$ is non singular) is minimized if and only if $\mathbf{W}$ satisfies:
\begin{equation}\label{Eq05}
\mathbf{W} \mathbf{A} = \mathbf{P} \mathbf{\Lambda}
\end{equation}
or, in the absence of noise:
\begin{equation}\label{Eq06}
\mathbf{W}\mathbf{Y}=\mathbf{P}\mathbf{\Lambda}\mathbf{S}
\end{equation}
where $\mathbf{P}$ is an $M\times M$ permutation matrix and $\mathbf{\Lambda}$ is an $M\times M$ diagonal non-singular matrix.
\end{theorem}
\begin{proof}
The proof can easily be derived from that of Theorem $3.2$ in \cite{Talwar_96}.
\end{proof}
\section{Givens CMA (G-CMA)}
\label{Sec:GCMA}
In this section, we propose a new algorithm, referred to as G-CMA, based on Givens rotations, for the minimization of the CM criterion\footnote{Part of this section's work has been presented in \cite{Ikhlef_10}.}. It is made up of two stages:
\begin{enumerate}
\item \textit{Prewhitening:} the prewhitening stage allows to convert the arbitrary channel matrix into a unitary one. Hence, this reduces finding an arbitrary separation matrix to finding a unitary one \cite{Comon_Bk}. Moreover, prewhitening has the advantage of reducing vector size (data compression) in the case where $N>M$ and avoiding trivial undesired solutions.
\item \textit{Givens rotations:} After prewhitening, the new channel matrix is unitary and can therefore be computed via successive Givens rotations. Here, we propose to compute the optimal parameters of these rotations through minimizing the CM criterion.
\end{enumerate}
The prewhitening matrix $\mathbf{B}$ can be computed by using the classical eigendecomposition of the covariance matrix of the received signal $\mathbf{Y}$ (often, it is computed as the inverse square root of the data covariance matrix, $\frac{1}{K}\mathbf{Y}\mathbf{Y}^{H}$ \cite{Comon_Bk}). The whitened signal can then be written as:
\begin{equation}\label{Eq07}
\bar{\mathbf{Y}}=\mathbf{B}\mathbf{Y}
\end{equation}
Therefore, assuming the noise free case and that the prewhitening matrix $\mathbf{B}$ is computed using the exact covariance matrix, we have:
\begin{equation}\label{Eq08}
\bar{\mathbf{Y}}=\mathbf{B}\mathbf{A}\mathbf{S}=\mathbf{V}^{H}\mathbf{S}
\end{equation}
where $\mathbf{V}=\mathbf{A}^{H}\mathbf{B}^{H}$ is an $M\times M$ unitary matrix. From (\ref{Eq08}), it is clear that, in order to find the source signals, it is sufficient to find the unitary matrix $\mathbf{V}$ and hence the separator can simply be expressed as: $\mathbf{W} = \mathbf{V} \mathbf{B}$, which, in the absence of noise, results in $\mathbf{Z} = \mathbf{W} \mathbf{Y} = \mathbf{V} \mathbf{B} \mathbf{Y} = \mathbf{V} \bar{\mathbf{Y}} = \mathbf{V} \mathbf{V}^{H}\mathbf{S} = \mathbf{S}$.
Now, to minimize the CM criterion in (\ref{Eq04}) w.r.t. to matrix $\mathbf{V}$, we propose an iterative algorithm where $\mathbf{V}$ is rewritten using Givens rotations. Indeed, in Jacobi-like algorithms \cite{Golub_Bk_96}, the unitary matrix $\mathbf{V}$ can be decomposed into product of elementary complex Givens rotations $\mathbf{\Psi}_{pq}$ such that:
\begin{equation}\label{Eq09}
\mathbf{V}=\prod_{N_{Sweeps}}~\prod_{1\leq p <q\leq M}\mathbf{\Psi}_{pq}
\end{equation}
where $N_{Sweeps}$ refers to the number of sweeps (iterations\footnote{In this paper we will use the terms \textit{iteration} and \textit{sweep} interchangeably.}) and the Givens rotation matrix $\mathbf{\Psi}_{pq}$ is a unitary matrix where all diagonal elements are one except for two elements $\psi_{pp}$ and $\psi_{qq}$. Likewise, all off-diagonal elements of $\mathbf{\Psi}_{pq}$ are zero except for two elements $\psi_{pq}$ and $\psi_{qp}$. Elements $\psi_{pp}, \psi_{pq}, \psi_{qp}$, and $\psi_{qq}$ are given by:
\begin{eqnarray} \label{Eq10}
\left[\begin{array}{cc}\psi_{pp} & \psi_{pq} \\ \psi_{qp} & \psi_{qq} \end{array} \right]
&=& \left[\begin{array}{cc} \cos (\theta) & e^{\jmath \alpha} \sin(\theta)
\\-e^{-\jmath \alpha} \sin(\theta) & \cos(\theta)\end{array}\right]
\end{eqnarray}
To compute $\mathbf{\Psi}_{pq}$, we need to find only the rotation angles $(\theta,\alpha)$. The idea here is to choose the rotation angles $(\theta,\alpha)$ such that the CM criterion $\mathcal{J}(\mathbf{V})$ is minimized. For this purpose, let us consider the unitary transformation\footnote{For simplicity, we keep using notation $\bar{\mathbf{Y}}$ even though the latter matrix is transformed at each iteration of the proposed algorithm.} $\breve{\mathbf{Y}}=\mathbf{\Psi}_{pq}\bar{\mathbf{Y}}$. Given the structure of $\mathbf{\Psi}_{pq}$, this unitary transformation changes only the elements in rows $p$ and $q$ of $\bar{\mathbf{Y}}$ according to:
\begin{equation}\label{Eq11}
\breve{y}_{pj}=\cos (\theta)\bar{y}_{pj}+e^{\jmath \alpha} \sin(\theta)\bar{y}_{qj} \mbox{ and }
\breve{y}_{qj}=-e^{-\jmath \alpha} \sin(\theta)\bar{y}_{pj}+\cos (\theta)\bar{y}_{qj}
\end{equation}
where $\bar{y}_{ij}$ refers to the $(i,j)$th entry of $\bar{\mathbf{Y}}$.
The algorithm consists of minimizing iteratively the criterion in (\ref{Eq04}) by applying successive Givens rotations, with initialization of $\mathbf{V}=\mathbf{I}$. $\mathbf{\Psi}_{pq}$ are computed such that $\mathcal{J}(\mathbf{\Psi}_{pq})$ is minimized at each iteration. In order to minimize $\mathcal{J}(\mathbf{\Psi}_{pq})$, we propose to express it as a function of $(\theta,\alpha)$. Since the application of Givens rotation matrix $\mathbf{\Psi}_{pq}$ to $\bar{\mathbf{Y}}$ modifies only the two rows $p$ and $q$, the terms that depend on $(\theta,\alpha)$ are those corresponding to $i=p$ or $i=q$ in (\ref{Eq04}). Considering (\ref{Eq10}) and
(\ref{Eq11}), we have:
\begin{eqnarray}\label{Eq12}
\begin{array}{l}
\mathcal{J}(\mathbf{\Psi}_{pq})=\sum_{j=1}^{K}\left[\big(|\breve{y}_{pj}|^{2}
-1\big)^{2}+\big(|\breve{y}_{qj}|^{2}-1\big)^{2}\right] +\sum_{j=1}^{K}\sum_{i=1, i\neq p,q}^{M}
\big(|\bar{y}_{ij}|^{2}-1\big)^{2}
\end{array}
\end{eqnarray}
On the other hand, by considering (\ref{Eq11}) and the following equalities:
\begin{eqnarray}\label{Eq13}
\begin{array}{l}
\cos^{2}(\theta) = \frac{1}{2}(1+\cos(2\theta)),
\sin^{2}(\theta)=\frac{1}{2}(1-\cos(2\theta)),
\sin(2\theta) = 2\sin(\theta)\cos(\theta)
\end{array}
\end{eqnarray}
and after some manipulations, we obtain:
\begin{eqnarray}\label{Eq14}
\begin{split}
|\breve{y}_{pj}|^{2} = \mathbf{t}_{j}^{T}\mathbf{v}+\frac{1}{2}\big(|\bar{y}_{pj}|^{2} +|\bar{y}_{qj}|^{2}\big)
\mbox{ and }
|\breve{y}_{qj}|^{2} =-\mathbf{t}_{j}^{T}\mathbf{v}+\frac{1}{2}\big(|\bar{y}_{pj}|^{2} +|\bar{y}_{qj}|^{2}\big)
\end{split}
\end{eqnarray}
with:
\begin{eqnarray}\label{Eq16}
&&\mathbf{v}=[\cos(2\theta),~\sin(2\theta)\cos(\alpha),~\sin(2\theta)\sin(\alpha)]^{T}\label{Eq16}\\
&&\mathbf{t}_{j}=\Big[\frac{1}{2}\big(|\bar{y}_{pj}|^{2}-|\bar{y}_{qj}|^{2}\big),~
\Re(\bar{y}_{pj}\bar{y}_{qj}^{*}),~\Im(\bar{y}_{pj}\bar{y}_{qj}^{*})\Big]^{T}\label{Eq17}
\end{eqnarray}
where $\Re(a)$ and $\Im(a)$ denote real and imaginary parts of $a$, respectively. Using (\ref{Eq14}), we get:
\begin{align}\label{Eq18}
\big(|\breve{y}_{pj}|^{2}-1\big)^{2}&+\big(|\breve{y}_{qj}|^{2}-1\big)^{2} = 2\mathbf{v}^{T} \mathbf{t}_{j} \mathbf{t}_{j}^{T}\mathbf{v}+2\left(\frac{|\bar{y}_{pj}|^{2} + \bar{y}_{qj}|^{2}}{2}-1\right)^{2}
\end{align}
Then, plugging (\ref{Eq18}) into (\ref{Eq12}) yields:
\begin{eqnarray}\label{Eq19}
\mathcal{J}(\mathbf{\Psi}_{pq}) &=& 2\sum_{j=1}^{K} \mathbf{v}^{T}\mathbf{t}_{j}\mathbf{t}_{j}^{T}\mathbf{v}
+2\sum_{j=1}^{K}\left(\frac{|\bar{y}_{pj}|^{2}+|\bar{y}_{qj}|^{2}}{2}-1\right)^{2} \nonumber \\
&+& \sum_{j=1}^{K}\sum_{i=1 \atop i\neq p,q}^{M} \big(|\bar{y}_{ij}|^{2}-1\big)^{2}
\end{eqnarray}
Given that the second and third summations in (\ref{Eq19}) do not depend on $(\theta,\alpha)$, the minimization problem is equivalent to the minimization of:
\begin{equation}\label{Eq20}
\mathcal{F}(\mathbf{\Psi}_{pq})=\mathbf{v}^{T}\mathbf{T}\mathbf{v}
\end{equation}
where $\mathbf{T}=\sum_{j=1}^{K}\mathbf{t}_{j}\mathbf{t}_{j}^{T}$ and $\|\mathbf{v}\|=1$. Finally, the solution $\mathbf{v}$ that minimizes (\ref{Eq20}) is given by the unit norm eigenvector of $\mathbf{T}$ corresponding to the smallest eigenvalue\footnote{This is a $3\times3$ eigenvalue problem that can be solved explicitly.}. Given $\mathbf{v}=[v_{1},v_{2},v_{3}]^T$ we have:
\begin{eqnarray}\label{Eq21}
\begin{split}
\cos(\theta)=\sqrt{\frac{1+v_{1}}{2}}\mbox{ and } e^{\jmath \alpha} \sin(\theta)=\frac{v_{2}+\jmath v_{3}}{\sqrt{2(1+v_{1})}}
\end{split}
\end{eqnarray}
Using (\ref{Eq21}), the computation of $\mathbf{\Psi}_{pq}$ follows directly from (\ref{Eq10}). The G-CMA algorithm is summarized in Table \ref{Tab:GCMA} (for simplicity, we use the same notation for the data and its transformed version).
\begin{table}[tb]
\renewcommand{1.5}{1.5}
\centering
\begin{tabular}{l}
\hline Initialization: $\mathbf{V}=\mathbf{I}$\\
1.~~Prewhitening: $\bar{\mathbf{Y}}=\mathbf{B}\mathbf{Y}$, where $\mathbf{B}$ is the prewhitening matrix.\\
2.~~Complex Givens rotations:\\
~~~~~~~~\textbf{for} $i=1:N_{Sweeps}$\\
~~~~~~~~~~~~~~\textbf{for} $p=1:M-1$\\
~~~~~~~~~~~~~~~~~~~~\textbf{for} $q=p+1:M$\\
~~~~~~~~~~~~~~~~~~~~~~~~~~Compute $\mathbf{\Psi}_{pq}$ using (\ref{Eq21})\\
~~~~~~~~~~~~~~~~~~~~~~~~~~$\bar{\mathbf{Y}}=\mathbf{\Psi}_{pq}\bar{\mathbf{Y}}$\\
~~~~~~~~~~~~~~~~~~~~~~~~~~$\mathbf{V}=\mathbf{\Psi}_{pq}\mathbf{V}$\\
~~~~~~~~~~~~~~~~~~~~\textbf{end for}\\
~~~~~~~~~~~~~~\textbf{end for}\\
~~~~~~~~\textbf{end for}\\
3.~~After convergence, computation of the separation matrix: $\mathbf{W}=\mathbf{V}\mathbf{B}$\\
4.~~Separation: $\hat{\mathbf{S}}=\mathbf{W}\mathbf{Y}=\bar{\mathbf{Y}}$.\\
\hline
\end{tabular}
\caption{The Givens CMA (G-CMA) algorithm.} \label{Tab:GCMA}
\end{table}
The G-CMA algorithm described above requires that the number of samples available at the receiver is large enough so that the prewhitening step results in an equivalent channel matrix close to unitary, for which the use of Givens rotations is effective. However, for small numbers of samples, prewhitening may result in an equivalent channel matrix not close to unitary, in which case, applying G-CMA alone is ineffective. Next, we propose to solve this problem by introducing the Hyperbolic Givens rotations.
\section{Hyperbolic Givens CMA (HG-CMA)}
\label{Sec:HGCMA}
As stated in the previous section, the use of Givens rotations in the case of small numbers of samples is not effective. To overcome this limitation, we introduce here the use of Hyperbolic Givens rotations. The latter consist of applying Shear rotations and Givens rotations alternatively. Matrix $\mathbf{W}$ can be decomposed into product of elementary complex Shear rotations, Givens rotations and normalization transformation as follows:
\begin{equation} \label{Eq23}
\mathbf{W}= \prod_{N_{Sweeps}}~~\prod_{1\leq p<q \leq M} \mathbf{D}_{pq}~\mathbf{\Psi}_{pq}~\mathbf{H}_{pq}
\end{equation}
where $\mathbf{D}_{pq}$, $\mathbf{\Psi}_{pq}$ and $\mathbf{H}_{pq}$ denote normalization, unitary Givens and non-unitary Shear transformations, respectively. The unitary matrix $\mathbf{\Psi}_{pq}$ is defined in (\ref{Eq10}). Similar to $\mathbf{\Psi}_{pq}$, $\mathbf{H}_{pq}$ is equal to the identity matrix except for the elements $h_{pp}, h_{pq}, h_{qp}$ and $h_{qq}$ that are given by:
\begin{eqnarray} \label{Eq24}
\left[\begin{array}{cc}h_{pp} & h_{pq} \\ h_{qp} & h_{qq} \end{array} \right] &=&
\left[\begin{array}{cc}\cosh (\gamma) & e^{\jmath \beta} \sinh(\gamma)
\\e^{-\jmath \beta} \sinh(\gamma) & \cosh(\gamma)\end{array}\right]
\end{eqnarray}
where $ \gamma \in \mathbb{R}$ is the hyperbolic transformation parameter and $ \beta \in [-\frac{\pi}{2} , \frac{\pi}{2}]$ is an angle parameter (equal to zero in the real case).
The normalization transformation $\mathbf{D}_{pq}=\mathbf{D}_{pq}(\lambda_{p},\lambda_{q})$ is a diagonal matrix with diagonal elements equal to one except for the two elements $d_{pp}=\lambda_p$, and $d_{qq}=\lambda_q$.
In the following derivation, we consider the square case where $N=M$ (if $N > M$, one can use signal subspace projection as in \cite{Veen_Chap_05}).
\subsection{Non-Unitary Shear Rotations}
\label{Sub:Hyperbolic}
By applying $\mathbf{H}_{pq}$ to the received signal, we get:
\begin{equation} \label{Eq25}
\tilde{\mathbf{Y}} = \mathbf{H}_{pq}~\mathbf{Y}
\end{equation}
From (\ref{Eq24}), only the $p$th and $q$th rows of $\mathbf{Y}$ are affected according to:
\begin{eqnarray}\label{Eq26}
\begin{split}
\tilde{y}_{pj}&=& \cosh(\gamma) {y_{pj}} + e^{\jmath \beta} \sinh(\gamma) {y_{qj}} \mbox{ and }
\tilde{y}_{qj}&=& e^{-\jmath \beta} \sinh(\gamma) {y_{pj}} + \cosh(\gamma) {y_{qj}}
\end{split}
\end{eqnarray}
In order to compute $\mathbf{H}_{pq}$, we propose to minimize the CM cost function in (\ref{Eq04}) w.r.t. $\mathbf{H}_{pq}$:
\begin{eqnarray}\label{Eq27}
\mathcal{J}(\mathbf{H}_{pq})= \sum_{j=1}^{K} (|\tilde{y}_{pj}|^2-1)^2 + (|\tilde{y}_{qj}|^2-1)^2 + \sum_{j=1}^{K} \sum_{i=1 \atop i\neq p,q}^{M}({|\bar{y}_{ij}|^2}-1)^2
\end{eqnarray}
By considering (\ref{Eq26}) and the following equalities:
\begin{eqnarray}\label{Eq28}
\begin{split}
\sinh(2 \gamma) = 2 \sinh(\gamma) \cosh(\gamma),
\cosh^2(\gamma) = \frac{1}{2} (\cosh(2 \gamma) + 1),
\sinh^2(\gamma) = \frac{1}{2} (\cosh(2 \gamma) - 1)
\end{split}
\end{eqnarray}
and after some straightforward derivations, we obtain:
\begin{eqnarray}\label{Eq29}
|\tilde{y}_{pj}|^2 = \mathbf{r}_{j}^T \mathbf{u} + \frac {1}{2} (|y_{pj}|^2 - |{y_{qj}|^2}) \mbox{ and }
|\tilde{y}_{qj}|^2 = \mathbf{r}_{j}^T \mathbf{u} - \frac {1}{2} (|y_{pj}|^2 - |{y_{qj}|^2})
\end{eqnarray}
with:
\begin{eqnarray}
&&\mathbf{u}=\left[\cosh(2\gamma),\;\;\cos(\beta)\;\sinh(2\gamma),\;\;\sin(\beta)\;\sinh(2\gamma)\right]^T \label{Eq30} \\
&&\mathbf{r}_{j}=\left[\frac{1}{2}\left(|y_{pj}|^2+|{y_{qj}|^2}\right),\;\;\Re\left(y_{pj}y_{qj}^*\right),\;\;\Im\left(y_{pj}y_{qj}^*\right)\right]^T\label{Eq31}
\end{eqnarray}
Using the results in (\ref{Eq29}), we can rewrite the first two terms in (\ref{Eq27}) as:
\begin{eqnarray}\label{Eq32}
\left(|\tilde{y}_{pj}|^2-1\right)^2 + \left(|\tilde{y}_{qj}|^2-1\right)^2 &=& 2 \mathbf{u}^T \mathbf{r}_{j} \mathbf{r}_{j}^T \mathbf{u} - 4 \mathbf{u}^T \mathbf{r}_{j} \nonumber \\
&+& \frac {1}{2}\left(|\bar{y}_{pj}|^2 - |{\bar{y}_{qj}|^2}\right)^2 + 2
\end{eqnarray}
Then, by substituting (\ref{Eq32}) into (\ref{Eq27}), we obtain:
\begin{eqnarray} \label{Eq33}
\mathcal{J}(\mathbf{u}) = 2\left(\sum_{j=1}^{K} \mathbf{u}^T\mathbf{r}_{j}\mathbf{r}_{j}^T\mathbf{u}-2 \mathbf{u}^T \mathbf{r}_{j}\right) &+& 2 \sum_{j=1}^{K} \big[\frac {1}{4}({|\bar{y}_{pj}|^2} - {|\bar{y}_{qj}|^2})^2 + 1\big] \nonumber \\ &+& \sum_{j=1}^{K} \sum_{i=1 \atop i\neq p,q}^{M}({|\bar{y}_{ij}|^2}-1)^2
\end{eqnarray}
We note that only the first term on the right hand side of the equality (\ref{Eq33}) depends on $(\gamma,\beta)$, and hence the minimization of (\ref{Eq33}) is equivalent to the minimization of:
\begin{equation}\label{Eq34}
\mathcal{F}(\mathbf{u})=\sum_{j=1}^{K}\mathbf{u}^T\mathbf{r}_{j}\mathbf{r}_{j}^T\mathbf{u}-2\mathbf{u}^T \mathbf{r}_{j}
\end{equation}
This optimisation problem can be achieved in three different ways: by computing the exact solution, by taking linear approximation to zero, and with semi linear approximation.
\subsubsection{Exact Solution}
\label{Sub:ExactSol}
In this approach, we compute the optimum solution using the Lagrange multiplier method. The optimization problem can be expressed as:
\begin{eqnarray} \label{Eq35}
\min_{\mathbf{u}}~~\mathcal{F}(\mathbf{u})~~~\textrm{s.t.}~~~\mathbf{u}^T \mathbf{J}_{3}\mathbf{u} = 1
\end{eqnarray}
where $\mathbf{J}_{3} = \mbox{diag}\left(\left[1,-1,~-1\right]\right)$ so that constraint is equivalent to $\cosh^2(2\gamma)-\sinh^2(2\gamma)=1$. The Lagrangian of the optimization problem in (\ref{Eq35}) can be written as:
\begin{equation} \label{Eq36}
\mathcal{L}(\mathbf{u},\lambda) = \mathbf{u}^T \mathbf{R} \mathbf{u}-2 \mathbf{r}^T \mathbf{u}+\lambda(\mathbf{u}^T \mathbf{J}_{3} \mathbf{u}-1)
\end{equation}
where $\mathbf{R}=\sum_{j=1}^K\mathbf{r}_{j}\mathbf{r}_{j}^T$ is a $(3\times3)$ symmetric matrix, $\mathbf{r}=\sum_{j=1}^K\mathbf{r}_{j}$, $\mathbf{u}$ and $\mathbf{r}_{j}$ are defined in (\ref{Eq30}) and (\ref{Eq31}), respectively. The solution that minimizes the Lagrangian in (\ref{Eq36}) can be expressed as:
\begin{equation}\label{Eq38}
\mathbf{u}=(\mathbf{R}+\lambda \mathbf{J}_{3})^{-1} \mathbf{r}
\end{equation}
where $\lambda$ is the solution of:
\begin{equation}\label{Eq39}
\mathbf{u}^T\mathbf{J}_{3}\mathbf{u}=1\Longleftrightarrow \mathbf{r}^T(\mathbf{R}+\lambda \mathbf{J}_{3})^{-1} \mathbf{J}_{3} (\mathbf{R}+\lambda \mathbf{J}_{3})^{-1} \mathbf{r}=1
\end{equation}
which is a $6$-th order polynomial equation (see appendix A) of the form: $P_6(\lambda)=c_0\lambda^6+c_1\lambda^5+c_2\lambda^4+c_3\lambda^3+c_4\lambda^2+c_5\lambda+c_6=0$.
The desired solution $\lambda$ is the real-valued root of the above polynomial that corresponds to the minimum value of (\ref{Eq36}). Finally, given the solution $\mathbf{u}=[u_1\;u_2\;u_3]^T$ in (\ref{Eq38}), the Shear transformation entries are computed as:
\begin{equation} \label{Eq40}
h_{pp} = h_{qq} = \sqrt{\frac{u_1 +1}{2}}\mbox{ and } h_{pq} = h_{qp}^* = \frac{(u_2+\jmath u_3)}{2 h_{pp}}
\end{equation}
Note that, for the computation of each Shear rotation matrix, we need to perform a $3 \times 3$ matrix inversion and solve a $6$-th order polynomial equation. Hence, as the number of sweeps and transmit antennas increases, the complexity increases. In the following, we present two suboptimal solutions that have less complexity and close performance compared to the exact one.
\subsubsection{Semi-Exact Solution}
\label{Sub:SemiExactSol}
We denote this approach by \textit{semi-exact solution}, since for computing $\beta$ we take the approximation in (\ref{Eq41}), while for the angle rotation $\gamma$ we compute an exact solution using the Lagrange multiplier method. By considering the first order approximation around zero of $\sinh$ and $\cosh$, we have:
\begin{eqnarray}\label{Eq41}
\sinh(2\gamma) \approx 2\sinh(\gamma) \approx 2\gamma \mbox{ and } \cosh(2\gamma) \approx \cosh(\gamma) \approx 1
\end{eqnarray}
Using (\ref{Eq41}) in (\ref{Eq30}), equation (\ref{Eq34}) can be expressed as:
\begin{eqnarray}\label{Eq42}
\mathcal{F}(\gamma,\beta)=\sum_{j=1}^{K} r_{j}^{(1)} \left(r_{j}^{(1)}-2\right) &+& 4\gamma \left[\cos(\beta) r_{j}^{(2)}\left(r_{j}^{(1)}-1\right)+\sin(\beta) r_{j}^{(3)}\left(r_{j}^{(1)}-1\right)\right] \nonumber \\
&+& 4\gamma^2\left(\cos(\beta)r_{j}^{(2)}+\sin(\beta)r_{j}^{(3)}\right)^2
\end{eqnarray}
where $r_{j}^{(i)}$ is the $i$th element of $\mathbf{r}_{j}$. The linear approximation of (\ref{Eq42}) for $\gamma$ close to zero (which corresponds to simply neglecting the terms involving $\gamma^n$ for $n\geq2$) can be obtained by discarding the last term of (\ref{Eq42}):
\begin{equation}\label{Eq43}
\begin{array}{l}
\mathcal{F}(\gamma,\beta)\approx \sum_{j=1}^{K} r_{j}^{(1)}\left(r_{j}^{(1)}-2\right) + 4\gamma\left[\cos(\beta)r_{j}^{(2)} \left(r_{j}^{(1)}-1\right)+\sin(\beta)r_{j}^{(3)}\left(r_{j}^{(1)}-1\right)\right]
\end{array}
\end{equation}
The minimization of (\ref{Eq43}) obtained by zeroing its derivative) leads to:
\begin{equation}\label{Eq44}
\begin{array}{l}
\beta = \mathrm{arctan} \left(\frac{\sum_{j=1}^K r_{j}^{(3)}\; \left(r_{j}^{(1)}-1\right)}{\sum_{j=1}^K r_{j}^{(2)}\; \left(r_{j}^{(1)}-1\right)} \right)
\end{array}
\end{equation}
Once we have $\beta$, let us define:
\begin{eqnarray}
&&\tilde{\mathbf{u}} =[\cosh(2\gamma),\;\sinh(2\gamma)]^T\label{Eq45}\\
&&\tilde{\mathbf{r}}_{j}=\left[\frac{1}{2}\left(|y_{pj}|^2+|{y_{qj}|^2}\right),~\cos(\beta) \Re(y_{pj}y_{qj}^*) + \sin(\beta) \Im(y_{pj}y_{qj}^*) \right]^T\label{Eq46}
\end{eqnarray}
and hence, finding $\gamma$ which minimizes (\ref{Eq34}) implies solving the following optimization problem:
\begin{equation} \label{Eq47}
\min_{\tilde{\mathbf{u}}}~~\mathcal{K}(\tilde{\mathbf{u}})~~~\textrm{s.t.}~~~\tilde{\mathbf{u}}^T\mathbf{J}_{2}\tilde{\mathbf{u}} = 1
\end{equation}
where $\mathbf{J}_{2} = \mbox{diag}\left([1,~-1]\right)$ and:
\begin{eqnarray}\label{Eq48}
\begin{array}{l}
\mathcal{K}(\tilde{\mathbf{u}}) = \sum_{j=1}^{K}\tilde{\mathbf{u}}^T \tilde{\mathbf{r}}_{j}\tilde{\mathbf{r}}_{j}^T \tilde{\mathbf{u}}-2 \tilde{\mathbf{u}}^T \tilde{\mathbf{r}}_{j}
\end{array}
\end{eqnarray}
By defining $\tilde{\mathbf{R}}=\sum_{j=1}^{K}\tilde{\mathbf{r}}_{j}\tilde{\mathbf{r}}_{j}^T$ and $\tilde{\mathbf{r}}=\sum_{j=1}^K\tilde{\mathbf{r}}_{j}$, the optimization of (\ref{Eq47}) using Lagrange multiplier leads to:
\begin{equation}\label{Eq50}
\tilde{\mathbf{u}}=(\tilde{\mathbf{R}}+\lambda \mathbf{J}_{2})^{-1} \tilde{\mathbf{r}}
\end{equation}
where $\lambda$ is the solution of:
\begin{equation}\label{Eq51}
\tilde{\mathbf{u}}^T \mathbf{J}_{2} \tilde{\mathbf{u}} = 1 \Longleftrightarrow \tilde{\mathbf{r}}^T (\tilde{\mathbf{R}}+\lambda \mathbf{J}_{2})^{-1}\mathbf{J}_{2}(\tilde{\mathbf{R}}+\lambda \mathbf{J})^{-1}\tilde{\mathbf{r}} = 1
\end{equation}
This is a $4$-th order polynomial equation (see appendix A) of the form: $P_4(\lambda)=c_0\lambda^4+c_1\lambda^3+c_2\lambda^2+c_3\lambda+c_4= 0$. The desired solution $\lambda$ is the real-valued root of the above polynomial that corresponds to the minimum value of (\ref{Eq48}). Finally, given the solution $\tilde{\mathbf{u}}=[\tilde{u}_1\;\tilde{u}_2]^T$ in (\ref{Eq50}) and $\beta$ in (\ref{Eq44}), the Shear transformation entries can be obtained as:
\begin{eqnarray}\label{Eq52}
h_{pp} & = h_{qq} = \sqrt{\frac{1}{2}(\tilde{u}_1 +1)}\mbox{ and }
h_{pq} & = h_{qp}^* = e^{\jmath \beta}\frac{\tilde{u}_2}{2 h_{pp}}
\end{eqnarray}
We note that in this solution, for the computation of each Shear rotation matrix, we need to solve a $4$-th order polynomial equation. Hence, the complexity of this solution is clearly less than that of the exact one.
\subsubsection{Solution with Linear Approximation to Zero}
\label{Sub:ApproSol}
In this approach, we compute $\beta$ as in (\ref{Eq44}) and then we compute $\gamma$ which minimizes (\ref{Eq48}) by considering the approximation in (\ref{Eq41}). We define:
\begin{equation} \label{Eq53}
\tilde{\mathbf{R}} = \left[\begin{array}{cc}\tilde{r}_{11} &\tilde{r}_{12}\\ \tilde{r}_{21} &\tilde{r}_{22} \end{array}\right] ~~ \mathrm{and} ~~\tilde{\mathbf{r}} = \left[\begin{array}{cc}\tilde{r}_{1}\\ \tilde{r}_{2}\end{array}\right]
\end{equation}
and using (\ref{Eq26}), (\ref{Eq48}) can be written as:
\begin{equation} \label{Eq54}
\mathcal{K}(\gamma)=\frac{1}{2}(\tilde{r}_{11}+ \tilde{r}_{22})\cosh(4\gamma)+\tilde{r}_{12} \sinh(4\gamma)-2 \tilde{r}_{1} \cosh(2\gamma)-2 \tilde{r}_{2}\sinh(2\gamma)
\end{equation}
By taking the first derivative of (\ref{Eq54}) with respect to $\gamma$, using (\ref{Eq41}), and setting the result equal to zero, we obtain:
\begin{equation} \label{Eq55}
\sinh(2\gamma) (\tilde{r}_{11}+\tilde{r}_{22}-\tilde{r}_{1}) + \cosh(2\gamma)(\tilde{r}_{12}-\tilde{r}_{2}) = 0
\end{equation}
Which solution is:
\begin{eqnarray}\label{Eq56}
\begin{array}{l}
\gamma =\frac{1}{2}\mathrm{arctanh}\left(\frac{\sum_{j=1}^K \left[\left(\cos(\beta) r_{j}^{(2)}+\sin(\beta) r_{j}^{(3)}\right) \left(1-r_{j}^{(1)}\right)\right]}{\sum_{j=1}^K \left[\left((r_{j}^{(1)})^2-r_{j}^{(1)}\right)+\left(\cos(\beta) r_{j}^{(2)}+\sin(\beta) r_{j}^{(3)}\right)^2\right]} \right)
\end{array}
\end{eqnarray}
Given $\beta$ in (\ref{Eq44}) and $\gamma$ in (\ref{Eq56}), the computation of $\mathbf{H}_{pq}$ follows directly. This solution has the lowest complexity among the three considered ones.
\subsection{Unitary Givens Rotation}
\label{Sub:Givens}
After the Shear transformation, we now apply the Givens transformation to the result of the Shear rotation as:
\begin{equation} \label{Eq57}
\mathbf{\underbar{Y}} = \mathbf{\Psi}_{pq} \tilde{\mathbf{Y}}
\end{equation}
The unitary matrix $\mathbf{\Psi}_{pq}$ is computed in the same way as in Section \ref{Sec:GCMA}.
\subsection{Normalization Rotations}
\label{Sub:HGCMA_Norm}
The last algorithm's transform is a normalization step. In our CM criterion in (\ref{Eq04}), we have set the constant equal to one while in the original CM criterion it is chosen equal to $C_i = E[\absF{s_i}]/E[\absT{s_i}]$. Somehow, this normalization step is introduced to compensate for this constant choice (the value of $C_i$ is supposed unknown in a blind context).
It has been shown in the two previous subsection that both Givens and hyperbolic transformations affect only the rows of indices $p$ and $q$ of the data bloc $\mathbf{\underbar{Y}}$ which means that only these two rows need to be normalized:
\begin{equation}\label{Eq58}
\mathbf{Z}=\mathbf{D}_{(pq)}(\lambda_p,\lambda_q)~\mathbf{\underbar{Y}}
\end{equation}
The optimal parameters $(\lambda_p,\lambda_q)$ are calculated so that they minimize the CM criterion in (\ref{Eq04}) w.r.t. $\mathbf{D}_{(pq)}(\lambda_p,\lambda_q)$. The CM criterion is expressed in this case as (constant terms are omitted):
\begin{equation} \label{Eq59}
\mathcal{J}_{D}(\lambda_p,\lambda_q)=\sum_{j=1}^{K}(\lambda_p^4\absF{\underbar{y}_{pj}}-2\lambda_p^2\absT{\underbar{y}_{pj}}) + \sum_{j=1}^{K}(\lambda_q^4\absF{\underbar{y}_{qj}}-2\lambda_q^2\absT{\underbar{y}_{qj}})
\end{equation}
Optimal normalization parameters can be obtained at the zeros of the derivatives of (\ref{Eq59}) with respect to these two parameters as follows:
\begin{eqnarray} \label{Eq60}
\begin{array}{lll}
\lambda_p = \sqrt{\sum_{j=1}^{K}\absT{\underbar{y}_{pj}} / \sum_{j=1}^{K}\absF{\underbar{y}_{pj}}}
~~\mbox{ and } \lambda_q = \sqrt{\sum_{j=1}^{K}\absT{\underbar{y}_{qj}} / \sum_{j=1}^{K}\absF{\underbar{y}_{qj}}}
\end{array}
\end{eqnarray}
The HG-CMA algorithm is summarized in Table \ref{Tab:HGCMA}.
\begin{table}[tb]
\renewcommand{1.5}{1.5}
\centering
\begin{tabular}{l}
\hline Initialization: $\mathbf{W}=\mathbf{I}$\\
Signal subspace projection if $N>M$ \\
\textbf{for} $i=1:N_{Sweeps}$\\
~~~~~~\textbf{for} $p=1:M-1$\\
~~~~~~~~~~~~\textbf{for} $q=p+1:M$\\
~~~~~~~~~~~~~~~~~~Compute $\mathbf{H}_{pq}$:\\
~~~~~~~~~~~~~~~~~~~~~~- using (\ref{Eq40}) for exact solution\\
~~~~~~~~~~~~~~~~~~~~~~- using (\ref{Eq44}) and (\ref{Eq52}) for semi exact solution\\
~~~~~~~~~~~~~~~~~~~~~~- using (\ref{Eq44}) and (\ref{Eq56}) for linear approximation to zero (preferred)\\
~~~~~~~~~~~~~~~~~~$\mathbf{Y}=\mathbf{H}_{pq}\mathbf{Y}$\\
~~~~~~~~~~~~~~~~~~$\mathbf{W}=\mathbf{H}_{pq}\mathbf{W}$\\
~~~~~~~~~~~~~~~~~~Compute $\mathbf{\Psi}_{pq}$ using (\ref{Eq21})\\
~~~~~~~~~~~~~~~~~~$\mathbf{Y}=\mathbf{\Psi}_{pq}\mathbf{Y}$\\
~~~~~~~~~~~~~~~~~~$\mathbf{W}=\mathbf{\Psi}_{pq}\mathbf{W}$\\
~~~~~~~~~~~~~~~~~~Compute $\mathbf{D}_{pq}$ using (\ref{Eq60})\\
~~~~~~~~~~~~~~~~~~$\mathbf{Y}=\mathbf{D}_{pq}\mathbf{Y}$\\
~~~~~~~~~~~~~~~~~~$\mathbf{W}=\mathbf{D}_{pq}\mathbf{W}$\\
~~~~~~~~~~~~\textbf{end for}\\
~~~~~~\textbf{end for}\\
\textbf{end for}\\
Separation: $\hat{\mathbf{S}}=\mathbf{W}\mathbf{Y}=\mathbf{Y}$.\\
\hline
\end{tabular}
\caption{The Hyperbolic Givens CMA (HG-CMA) algorithm.} \label{Tab:HGCMA}
\end{table}
\section{Adaptive HG-CMA}
\label{Sec:AHGCMA}
To make an adaptive version of the HG-CMA algorithm, let us consider a sliding bloc of size $K$, $\mathbf{Y}^{(t-1)}=\left[\mathbf{y}(t-K),...,\mathbf{y}(t-2),\mathbf{y}(t-1)\right]$ which is updated at each new acquisition of a new sample $\mathbf{y}(t)$ (at time instant $t$). The main idea of the adaptive HG-CMA is to apply only one sweep of complex rotations on the sliding window at each time instant and update the separation matrix $\mathbf{W}$ by this sweep of rotations.
The numerical cost of the HG-CMA is of order $O(KM^2)$ (assuming $K >M$) but can be reduced to $O(KM)$ flops per iteration if we use only one or two rotations per time instant. In the simulation experiments, we compare the performance of the algorithm in the 3 following cases:
\begin{itemize}
\item When we use one complete sweep (i.e. $M(M-1)/2$ rotations)
\item When we use one single rotation which indices are chosen according to an automatic selection (i.e. automatic incrementation) throughout the iterations in such a way all search directions are visited periodically.
\item When we use two rotations per iteration (time instant): one pair of indices is selected according to the maximum deviation criterion:
\begin{equation}\label{Eq61}
\begin{array}{l}
(p,q) = arg\max \sum_{k=1}^K(|y_{pk}|^2 -1)^2+(|y_{qk}|^2 -1)^2
\end{array}
\end{equation}
the other rotation indices are selected automatically.
\end{itemize}
Comparatively, the adaptive ACMA \cite{Veen_Chap_05} costs approximately $O(M^3)$ flops per iteration and the LS-CMA\footnote{We consider here an adaptive version of the LS-CMA using the same sliding window as for our algorithm.} costs $O(KM^2 + M^3)$. Interestingly, as shown in section \ref{Sec:Results}, the sliding window length $K$ can be chosen of the same order as the number of sources $M$ without affecting much the algorithm's performance. In that case, the numerical cost of HG-CMA becomes similar to that of the adaptive ACMA. The adaptive HG-CMA algorithm is summarized in Table \ref{Tab:AHGCMA}. Note that, the normalization step is done outside the sweep loop which reduces slightly the numerical cost.
\begin{table}[tb]
\centering
\renewcommand{1.5}{1.5}
\begin{tabular}{l}
\hline
~Initialization:~$\mathbf{W}^{(K)}=\mathbf{I}_{M}$\\
~\textbf{For} $t = K+1, K+2, ...$ \textbf{do}\\
~~~~~~$\mathbf{y}(t)=\mathbf{W}^{(t-1)}~\mathbf{y}(t)$\\
~~~~~~$\mathbf{Y}^{(t)}=\left[\mathbf{y}(t-K),...,\mathbf{y}(t-1),\mathbf{y}(t)\right]$\\
~~~~~~$\mathbf{W}^{(t)}=\mathbf{W}^{(t-1)}$\\
~~~~~~\textbf{For all} $1 \leq p <q \leq M$ \textbf{do}\\
~~~~~~~~~~~~Compute $\mathbf{H}_{(pq)}$ using (\ref{Eq44}) and (\ref{Eq56})\\
~~~~~~~~~~~~Compute $\mathbf{\Psi}_{(pq)}$ using (\ref{Eq21})\\
~~~~~~~~~~~~Update $\mathbf{W}^{(t)}=\mathbf{\Psi}_{(pq)}~\mathbf{H}_{(pq)}~\mathbf{W}^{(t)}$\\
~~~~~~~~~~~~Update $\mathbf{Y}^{(t)}=\mathbf{\Psi}_{(pq)}~\mathbf{H}_{(pq)}~\mathbf{Y}^{(t)}$\\
~~~~~~\textbf{end For} \\
~~~~~~\textbf{For} $1\leq p\leq M$,~compute $\lambda_p$ using (\ref{Eq60}),~\textbf{end For}\\
~~~~~~Compute $\mathbf{D}=\mbox{diag}([\lambda_1, \cdots, \lambda_M])$\\
~~~~~~Update $\mathbf{W}^{(t)}=\mathbf{D}~\mathbf{W}^{(t)}$ and $\mathbf{Y}^{(t)}=\mathbf{D}~\mathbf{Y}^{(t)}$\\
~\textbf{end For}\\
\hline
\end{tabular}
\caption{Adaptive HG-CMA Algorithm.}
\label{Tab:AHGCMA}
\end{table}
\section{Numerical Results}
\label{Sec:Results}
Some numerical results are now presented in order to assess the performance of the proposed algorithms. For comparison we use ACMA \cite{Veen_ACMA_96} and LS-CMA \cite{Veen_Chap_05} as a benchmark. As performance measure, we use the signal to interference
and noise ratio (SINR) defined as:
\begin{eqnarray}\label{eq60}
\begin{array}{l}
\textrm{SINR}=\frac{1}{M}\sum_{k=1}^{M}\textrm{SINR}_{k}\mbox{ with }~~\textrm{SINR}_{k}= \frac{|g_{kk}|^{2}}{\sum\limits_{\ell,\ell\neq k}|g_{k\ell}|^{2}+\mathbf{w}_{k}\mathbf{R}_{b}\mathbf{w}_{k}^{H}}
\end{array}
\end{eqnarray}
where $\textrm{SINR}_{k}$ is the signal to interference and noise ratio at the $k$th output $g_{ij}=\mathbf{w}_{i}\mathbf{a}_{j}$, where $\mathbf{w}_{i}$ and $\mathbf{a}_{j}$ are the $i$th row vector and $j$th column vector of matrices $\mathbf{W}$ and $\mathbf{A}$, respectively.
$\mathbf{R}_{b}=E[\mathbf{b}\mathbf{b}^{H}]=\sigma_{b}^{2}\mathbf{I}_{N}$ is the noise covariance matrix. The source signals are assumed to be of unit variance.
We use the data model in (\ref{Eq01}); The system inputs are independent, uniformly distributed and drawn from 8-PSK, or 16-QAM constellations. The channel matrices $\mathbf{A}$ are generated randomly at each run but with controlled conditioning (their entries are generated as i.i.d. Gaussian variables). Unless otherwise specified, we consider $M=5$ transmit and $N=7$ receive antennas. The noise variance is determined according to the desired signal to noise ratio (SNR). In all figures the results are averaged over 1000 independent realizations (Monte Carlo runs).
Fig. \ref{Fig_1} depicts the SINR of HG-CMA vs. the SNR. We compare the three solutions, i.e., linear approximation to zero, semi-exact and exact solutions for Shear rotations in HG-CMA for 8-PSK and 16-QAM constellations. The sample size is $K=100$ and the number of sweeps is set equal to 10. We observe that the three solutions have almost the same performance for both 8-PSK and 16-QAM constellations. Therefore, in the following simulations, in HG-CMA, we will consider the linear approximation to zero solution.
In Fig. \ref{Fig_2}, we investigate the effect of the number of sweeps on the performance of G-CMA and HG-CMA. The figure shows the SINR vs. the SNR for different numbers of sweeps. In this simulation, we assumed 8-PSK constellation and $K=100$ samples. We observe that, as expected, the performance is improved by increasing the number of sweeps and from 5 sweeps upwards, the performance remains unchangeable. In the rest of this section we consider $10$ sweeps in G-CMA and HG-CMA. Moreover, we can see that for small number of iterations HG-CMA is much better than G-CMA and the gap between them decreases as the number of iterations increases.
Fig. \ref{Fig_3} compares the proposed HG-CMA and G-CMA algorithms with ACMA in terms of SINR vs. SNR for 8-PSK constellation and various numbers of samples. We observe that, as expected, the larger the number of samples, the better the performance for all algorithms. For small number of samples, i.e. $K=20$, we observe that HG-CMA significantly outperforms ACMA and G-CMA. We also observe that G-CMA performs better than ACMA for low to moderate SNR while for $\mathrm{SNR}>23~\mathrm{dB}$, ACMA becomes better. The reason that ACMA performs worse than HG-CMA is that the number of samples $K=20$ is less than the number of transmit antennas squared $M^2$, i.e., $K=20<M^2=25$ and as we stated above for ACMA to achieve good performance in the case of PSK constellations the number of samples $K$ must be at least greater than $M^2$ \cite{Veen_01}. For $K=100$, HG-CMA still provides the best performance while the performance of ACMA becomes very close to that of HG-CMA and better than that of G-CMA. We can say that for small or moderate number of samples the proposed algorithms are more suitable as compared to ACMA even for PSK constellations.
In Fig. \ref{Fig_4}, we consider the case of 16-QAM constellation. We notice that the proposed HG-CMA and G-CMA algorithms provide better performance as compared to ACMA. We also observe that, unlike the 8-PSK case in Fig. \ref{Fig_3}, the performance of HG-CMA and G-CMA are close in the case of 16-QAM. Moreover, we can see that the gap between the performance of the proposed algorithms and ACMA gets smaller as the number of samples $K$ increases. We can say that the proposed HG-CMA and G-CMA algorithms are more suitable as compared to ACMA for non-constant modulus constellations, since they provide better performance for a lower computational cost.
In Figs. \ref{Fig_5} and \ref{Fig_6}, we plot the SINR of HG-CMA, G-CMA and ACMA vs. the number of samples $K$ for 8-PSK and 16-QAM constellations, respectively. We compare the performance of the proposed algorithms HG-CMA and G-CMA with ACMA for different antenna configurations and SNR=30 dB. In both figures we observe that, the larger the number of samples, the better the performance. In Fig. \ref{Fig_5}, in the case of 8-PSK constellation, we observe that HG-CMA provides the best performance. For small number of samples, G-CMA outperforms ACMA. However, for large number of samples ACMA performs better. In Fig. \ref{Fig_6} for 16-QAM, HG-CMA and G-CMA outperform ACMA and the gap is larger for small number of samples and decreases as the number of samples increases.
In Figs. \ref{Fig_7} and \ref{Fig_8} we plot the symbol error rate (SER) of HG-CMA, G-CMA and ACMA vs. SNR for different number of samples $K$ for 8-PSK and 16-QAM constellations, respectively. We considered $M=5$ and $N=7$. In Fig. \ref{Fig_7}, for 8-PSK case, we notice that the proposed HG-CMA provides the best performance. We also observe that G-CMA outperforms ACMA for small number of samples, here $K=20$. However, for large number of samples ACMA performs better than G-CMA for all SNRs. Note that for very large SNR and $K \geq M^2$ it is expected that ACMA outperforms HG-CMA since ACMA in this case provides the optimal (exact in the noiseless case) solution. In the case of 16-QAM in Fig. \ref{Fig_8}, we observe that the proposed HG-CMA and G-CMA algorithms always outperform ACMA, even for large number of samples. Therefore, we can conclude that the proposed HG-CMA and G-CMA are preferable to ACMA in the case of non-constant modulus constellations, i.e. 16-QAM, for any number of samples. In the case of constant modulus constellations, e.g. PSK, HG-CMA and G-CMA are better than ACMA for small number of samples. However, for large number of samples and the range of interest of SNR from $0-30$ dB, HG-CMA and ACMA have close performance and ACMA is better than G-CMA.
To assess the performance of the adaptive HG-CMA, we consider here, unless stated otherwise, a $5\times5$ MIMO system (i.e. $M=5$), an i.i.d. 8-PSK modulated sequences as input sources, and the processing window size is set equal to $K=2M$. In Fig. \ref{Fig_Time}, we compare the convergence rates and separation quality of adaptive HG-CMA (with different number of rotations per time instant), LS-CMA and adaptive ACMA. One can observe that adaptive HG-CMA outperforms the two other algorithms in this simulation context. Even with only two rotations per time instant, our algorithm leads to high separation quality with fast convergence rate (typically, few tens of iterations are sufficient to reach the steady state level).
In Fig. \ref{Fig_SNR}, the plots represent the steady state SINR (obtained after 1000 iterations) versus the SNR. One can see that the adaptive HG-CMA has no floor effect (as for the LS-CMA and adaptive ACMA) and its SINR increases almost linearly with the SNR in dB.
In Fig. \ref{Fig_M}, the SNR is set equal to $20dB$ and the plots represent again the steady state SINR versus the number of sources $M$. Severe performance degradation is observed (when the number of sources increases) for the LS-CMA and adaptive ACMA while the adaptive HG-CMA performance seems to be unaffected. In Fig. \ref{Fig_K}, the plots illustrate the algorithms performance versus the chosen processing window size\footnote{This concerns only LS-CMA and adaptive HG-CMA as the adaptive ACMA in \cite{Veen_Chap_05} uses an exponential window with parameter $\beta = 0.995$.} $K$. Surprisingly, HG-CMA algorithm reaches its optimal performance with relatively short window sizes ($K$ can be chosen of the same order as $M$).
In the last experiment (Fig. \ref{Fig_QAM}), we consider 16-QAM sources (with non CM property). In that case, all algorithms performance are degraded but adaptive HG-CMA still outperforms the two other algorithms. To improve the performance in the case of non constant modulus signals, one needs to increase the processing window size as illustrated by this simulation result but more importantly, one needs to use more elaborated cost functions which combines the CM criterion with alphabet matching criteria e.g. \cite{Amine_04, Labed_13}.
\begin{figure}
\caption{Average SINR of HG-CMA vs. SNR. $M=5$, $N=7$, $K=100$, 8-PSK, 16-QAM, and the number of sweeps is 10.}
\label{Fig_1}
\end{figure}
\begin{figure}
\caption{Average SINR of HG-CMA and G-CMA vs. SNR. The effect of the number of sweeps on the performance of G-CMA. $M=5$, $N=7$, $K=100$, and 8-PSK.}
\label{Fig_2}
\end{figure}
\begin{figure}
\caption{Average SINR of HG-CMA, G-CMA, and ACMA vs. SNR for different numbers of samples $K$. 8-PSK case, $M=5$, $N=7$, and 10 sweeps.}
\label{Fig_3}
\end{figure}
\begin{figure}
\caption{Average SINR of HG-CMA, G-CMA and ACMA vs. SNR for different numbers of samples $K$. 16-QAM case, $M=5$, $N=7$, and 10 sweeps.}
\label{Fig_4}
\end{figure}
\begin{figure}
\caption{Average SINR of HG-CMA, G-CMA and ACMA vs. the number of samples $K$ for different antenna configurations. 8-PSK case, SNR=30 dB, and 10 sweeps.}
\label{Fig_5}
\end{figure}
\begin{figure}
\caption{Average SINR of HG-CMA, G-CMA and ACMA vs. the number of samples $K$ for different antenna configurations. 16-QAM case, SNR=30 dB, and 10 sweeps.}
\label{Fig_6}
\end{figure}
\begin{figure}
\caption{Average symbol error rate of HG-CMA, G-CMA and ACMA vs. SNR for different numbers of samples $K$. 8-PSK case, $M=5$, $N=7$, and 10 sweeps.}
\label{Fig_7}
\end{figure}
\begin{figure}
\caption{Average symbol error rate of HG-CMA, G-CMA and ACMA vs. SNR for different numbers of samples $K$. 16-QAM case, $M=5$, $N=7$, and 10 sweeps.}
\label{Fig_8}
\end{figure}
\begin{figure}
\caption{SINR vs. Time Index: $SNR=20dB$, $M=N=5$, $K=10$, 8-PSK.}
\label{Fig_Time}
\end{figure}
\begin{figure}
\caption{SINR vs. SNR: $M=N=5$, $K=10$, 8-PSK.}
\label{Fig_SNR}
\end{figure}
\begin{figure}
\caption{SINR vs. Source Number: $SNR=20dB$, $K=2M$, 8-PSK.}
\label{Fig_M}
\end{figure}
\begin{figure}
\caption{SINR vs. Bloc Size K: $M=N=5$, 8-PSK.}
\label{Fig_K}
\end{figure}
\begin{figure}
\caption{SINR vs. SNR: $M=N=5$, 16-QAM.}
\label{Fig_QAM}
\end{figure}
\section{Conclusion}
\label{Sec:Conclusion}
We proposed two algorithms, G-CMA and HG-CMA, for BSS in the context of MIMO communication systems based on the CM criterion. In G-CMA we combined prewhitening and Givens rotations and in HG-CMA we combined Shear rotations and Givens rotations. G-CMA is appropriate for large number of samples since in this case prewhitening is accurate. However, in the case of small number of samples HG-CMA is preferred since Shear rotations allow to compensate for the prewhitening stage, i.e., reduce the departure from normality. For PSK constellations and small number of samples, we showed that the proposed HG-CMA and G-CMA algorithms are better than the conventional ACMA. However for large number of samples HG-CMA and ACMA have close performance and ACMA outperforms G-CMA. In the case of 16-QAM constellation, HG-CMA and G-CMA outperform largely the conventional ACMA for small number of samples.
Also, for the HG-CMA, a moderate complexity adaptive implementation is considered with the advantages of fast convergence rate and high separation quality. The simulation results illustrate its effectiveness as compared to the adaptive implementations of ACMA and LS-CMA. They show that the sliding window size can be chosen as small as twice the number of sources without significant performance loss. Also, they illustrate the trade off between the convergence rate and the algorithm's numerical cost as a function of the number of used rotations per iteration. As a perspective, the proposed technique can be adapted for the optimization of more elaborated cost functions which combine the CM criteria with alphabet matching criteria.
\section{Appendix A} \label{Sec:AppA}
It has been shown in subsection \ref{Sub:ExactSol} that the optimal solution in the sense of minimizing the CM criterion in (\ref{Eq27}) is given by (see equation (\ref{Eq38})):
\begin{equation}\label{A1}
\mathbf{u}=(\mathbf{R}+\lambda \mathbf{J}_{3})^{-1} \mathbf{r}
\end{equation}
where $\lambda$ is the solution of:
\begin{equation}\label{A2}
\mathbf{u}^T \mathbf{J}_{3} \mathbf{u} = 1 \Longleftrightarrow \mathbf{r}^T (\mathbf{R}+\lambda \mathbf{J}_{3})^{-1}\mathbf{J}_{3}(\mathbf{R}+\lambda \mathbf{J}_{3})^{-1} \mathbf{r}=1
\end{equation}
In the following, we will show that (\ref{A2}) is a $6$-th order polynomial equation.
Let the $3 \times 3$ matrices $\mathbf{U}$ and $\mathbf{\Lambda}=\mbox{diag}\left[\lambda_1~\lambda_2~\lambda_3 \right]$ be the generalized eigenvectors and eigenvalues matrices of the matrix pair ($\mathbf{R}$,~$\mathbf{J}_3$), i.e.
\begin{equation}\label{A3}
\mathbf{R} = \mathbf{J}_3~\mathbf{U}~\mathbf{\Lambda}~\mathbf{U}^{-1}
\end{equation}
and hence:
\begin{equation}\label{A4}
\left(\mathbf{R}+\lambda\mathbf{J}_3\right)^{-1}=\mathbf{U}\left(\mathbf{\Lambda}+ \lambda\mathbf{I}_3\right)^{-1}\mathbf{U}^{-1}\mathbf{J}_3
\end{equation}
replacing (\ref{A4}) in (\ref{A2}) leads to:
\begin{equation}\label{A5}
\mathbf{r}^T\mathbf{U}\left(\mathbf{\Lambda}+\lambda\mathbf{I}_3\right)^{-2}\mathbf{U}^{-1}\mathbf{J}_3 \mathbf{r}=\mathbf{a}^T\left(\mathbf{\Lambda}+\lambda\mathbf{I}_3\right)^{-2}\mathbf{b}=1
\end{equation}
where $\mathbf{a}^T=\mathbf{r}^T\mathbf{U}=\left[a_1~a_2~a_3\right]$ and $\mathbf{b}=\mathbf{U}^{-1}\mathbf{J}_3 \mathbf{r}=\left[b_1~b_2~b_3\right]^T$. Knowing that $\left(\mathbf{\Lambda}+\lambda\mathbf{I}_3\right)^{-2} = \mbox{diag}\left[(\lambda+\lambda_1)^{-2},~ (\lambda+\lambda_2)^{-2},~(\lambda+\lambda_3)^{-2}\right]$, (\ref{A5}) is rewritten as:
\begin{equation}\label{A6}
\sum_{i=1}^{3}\frac{a_i b_i}{\left(\mathbf{\lambda}+\lambda_i \right)^{2}}=1
\end{equation}
which is equivalent to:
\begin{equation}\label{A8}
\prod_{i=1}^{3}\left(\mathbf{\lambda}+\lambda_i \right)^{2} - \sum_{i=1}^{3} a_i b_i \prod_{j=1, j\neq i}^{3}\left(\mathbf{\lambda}+\lambda_j \right)^{2}=0
\end{equation}
Which is a $6$-th order polynomial equation of the form $P_6(\lambda)=c_0\lambda^6+c_1\lambda^5+c_2\lambda^4+c_3\lambda^3+c_4\lambda^2+c_5\lambda+c_6=0$ with:
\begin{eqnarray} \label{A9}
\begin{array}{lll}
c_0 = 1,~~c_1 = 2\sum_{i=1}^{3}\lambda_i, ~~c_2 = \sum_{i=1}^{3}\left(\lambda_i^2+4\prod_{j=1,j\neq i}^{3}\lambda_j\right)-\mathbf{a}^T\mathbf{b} \nonumber \\
c_3 = 2\sum_{i=1}^{3}\left(\left(\lambda_i^2-a_ib_i\right) \sum_{j=1,j\neq i}^{3}\lambda_j\right), ~~
c_6 = \prod_{i=1}^{3}\lambda_i^2 -\sum_{i=1}^{3}a_ib_i\prod_{j=1,j\neq i}^{3}\lambda_j^2 \nonumber \\
c_4 = \lambda_1^2\lambda_2^2\left(1+\lambda_3^2\right)+4\prod_{i=1 }^{3}\lambda_i\sum_{i=1}^{3}\lambda_i- \sum_{i=1}^{3}a_ib_i\left(\sum_{j=1,j\neq i}^{3}\lambda_j^2+4\prod_{j=1,j\neq i}^{3}\lambda_j\right)\nonumber \\
c_5 = 2\left(\prod_{i=1}^{3}\lambda_i\right)\left(\sum_{i=1}^{3}\prod_{j=1,j\neq i}^{3}\lambda_j\right) -\sum_{i=1}^{3}a_ib_i\left(\sum_{j=1,j\neq i}^{3}\lambda_j\right)\left(\prod_{j=1,j\neq i}^{3}\lambda_j\right)\nonumber \\
\end{array}
\end{eqnarray}
Using the same reasoning, we can find the coefficients of the $4$-th order polynomial equation in (\ref{Eq51}); $P_4(\lambda)=c_0\lambda^4+c_1\lambda^3+c_2\lambda^2+c_3\lambda^1+c_4=0$.
\begin{eqnarray} \label{A10}
\begin{array}{lll}
c_0 = 1, ~~c_1 = 2\sum_{i=1}^{2}\tilde{\lambda_i}, ~~ c_2 = \sum_{i=1}^{2}\tilde{\lambda_i}^2+4\prod_{j=1,j\neq i}^{2}\tilde{\lambda_j}-\tilde{\mathbf{a}}^T \tilde{\mathbf{b}} \nonumber \\
c_3 = 2\sum_{i=1}^{2}\left(\tilde{\lambda_i}^2- \tilde{a}_i \tilde{b}_i\right) \sum_{j=1,j\neq i}^{2}\tilde{\lambda_j}, ~~
c_4 = \prod_{i=1}^{2}\tilde{\lambda_i}^2 -\sum_{i=1}^{2} \tilde{a}_i \tilde{b}_i\prod_{j=1,j\neq i}^{2}\tilde{\lambda_j}^2 \nonumber
\end{array}
\end{eqnarray}
with $\tilde{\mathbf{a}}^T=\tilde{\mathbf{r}}^T\tilde{\mathbf{U}}=\left[\tilde{a}_1~\tilde{a}_2\right]$ and $\tilde{\mathbf{b}}=\tilde{\mathbf{U}}^{-1}\mathbf{J}_2 \tilde{\mathbf{r}}=\left[\tilde{b}_1~\tilde{b}_2\right]^T$.
Where the $2 \times 2$ matrices $\tilde{\mathbf{U}}$ and $\tilde{\mathbf{\Lambda}} = \mbox{diag} \left[\tilde{\lambda}_1 ~\tilde{\lambda}_2\right]$ represent the generalized eigendecomposition of the matrix pair ($\tilde{\mathbf{R}}$,~$\mathbf{J}_2$).
\end{document} |
\betaegin{document}
\thetaitle{On the $\Gammaamma$-limit of singular perturbation problems with optimal profiles which are not one-dimensional. Part II: The lower bound}
\title{On the $\Gamma$-limit of singular perturbation problems with optimal profiles which are not one-dimensional. Part II: The lower bound}
\betaegin{center}
\thetaextsc{Arkady Poliakovsky \varphiootnote{E-mail:
[email protected]}
}\\[3mm]
Department of Mathematics, Ben Gurion University of the Negev,\\
P.O.B. 653, Be'er Sheva 84105, Israel
\\[2mm]
\varepsilonnd{center}
\betaegin{abstract}
In part II we construct the lower bound, in the spirit of
$\Gammaamma$-$\varliminf$ for some general classes of singular perturbation
problems, with or without the prescribed differential constraint,
taking the form $$E_\varepsilon(v):=\int_\Omegamega
\varphirac{1}{\varepsilon}F\Big(\varepsilon^n\nabla^n v,...,\varepsilon\nabla
v,v\Big)dx\quad\thetaext{for}\;\;
v:\Omegamega\sigmaubset\varphiield{R}^N\thetao\varphiield{R}^k\;\;\thetaext{such that}\;\; A\cdot\nabla
v=0,$$ where the function $F\gammaeq 0$ and $A:\varphiield{R}^{k\thetaimes N}\thetao\varphiield{R}^m$ is
a prescribed linear operator (for example, $A:\varepsilonquiv 0$,
$A\cdot\nabla v:=\thetaext{curl}\, v$ and $A\cdot\nabla v=\thetaext{div}\,
v$). Furthermore, we studied the cases where we can easy prove the
coinciding of this lower bound and the upper bound obtained in
\cite{PI}. In particular we find the formula for the $\Gammaamma$-limit
for the general class of anisotropic problems without a differential
constraint (i.e., in the case $A:\varepsilonquiv 0$).
\varepsilonnd{abstract}
\sigmaection{Introduction}
\betaegin{definition}
Consider a family $\{I_\varepsilon\}_{\varepsilon>0}$ of functionals
$I_\varepsilon(\phi):U\thetao[0,+\infty]$, where $U$ is a given metric
space. The $\Gammaamma$-limits of $I_\varepsilon$ are defined by:
\betaegin{align*}
(\Gammaamma-\varliminf_{\varepsilon\thetao 0^+} I_\varepsilon)(\phi)
:=\inf\left\{\varliminf_{\varepsilon\thetao
0^+}I_\varepsilon(\phi_\varepsilon):\;\,\{\phi_\varepsilon\}_{\varepsilon>0}\sigmaubset
U,\; \phi_\varepsilon\thetao\phi\thetaext{ in }U\;
\thetaext{as}\;\varepsilon\thetao 0^+\right\},\\
(\Gammaamma-\varlimsup_{\varepsilon\thetao 0^+} I_\varepsilon)(\phi)
:=\inf\left\{\varlimsup_{\varepsilon\thetao
0^+}I_\varepsilon(\phi_\varepsilon):\;\,\{\phi_\varepsilon\}_{\varepsilon>0}\sigmaubset
U,\; \phi_\varepsilon\thetao\phi\thetaext{ in }U\;
\thetaext{as}\;\varepsilon\thetao 0^+\right\},\\
(\Gammaamma-\lim_{\varepsilon\thetao 0^+}
I_\varepsilon)(\phi):=(\Gammaamma-\varliminf_{\varepsilon\thetao 0^+}
I_\varepsilon\betaig)(\phi)=(\Gammaamma-\varlimsup_{\varepsilon\thetao 0^+}
I_\varepsilon)(\phi)\;\;\,\thetaext{in the case they are equal}.
\varepsilonnd{align*}
\varepsilonnd{definition}
It is useful to know the $\Gammaamma$-limit of $I_\varepsilon$, because
it describes the asymptotic behavior as $\varepsilon\deltaownarrow 0$ of
minimizers of $I_\varepsilon$, as it is clear from the following
simple statement:
\betaegin{proposition}[De-Giorgi]\label{propdj}
Assume that $\phi_\varepsilon$ is a minimizer of $I_\varepsilon$ for
every $\varepsilon>0$. Then:
\betaegin{itemize}
\item
If $I_0(\phi)=(\Gammaamma-\varliminf_{\varepsilon\thetao 0^+}
I_\varepsilon)(\phi)$ and $\phi_\varepsilon\thetao\phi_0$ as
$\varepsilon\thetao 0^+$ then $\phi_0$ is a minimizer of $I_0$.
\item
If $I_0(\phi)=(\Gammaamma-\lim_{\varepsilon\thetao 0^+}
I_\varepsilon)(\phi)$ (i.e. it is a full $\Gammaamma$-limit of
$I_\varepsilon(\phi)$) and for some subsequence $\varepsilon_n\thetao
0^+$ as $n\thetao\infty$, we have $\phi_{\varepsilon_n}\thetao\phi_0$, then
$\phi_0$ is a minimizer of $I_0$.
\varepsilonnd{itemize}
\varepsilonnd{proposition}
Usually, for finding the $\Gammaamma$-limit of $I_\varepsilon(\phi)$, we
need to find two bounds.
\betaegin{itemize}
\item[{\betaf(*)}] Firstly, we find a lower bound, i.e. a functional
$\underline{I}(\phi)$ such that for every family
$\{\phi_\varepsilon\}_{\varepsilon>0}$, satisfying
$\phi_\varepsilon\thetao \phi$ as $\varepsilon\thetao 0^+$, we have
$\varliminf_{\varepsilon\thetao 0^+}I_\varepsilon(\phi_\varepsilon)\gammaeq
\underline{I}(\phi)$.
\item[{\betaf(**)}] Secondly, we find an upper
bound, i.e. a functional $\omegaverlineerline{I}(\phi)$, such that for every
$\phi\in U$ there exists a family
$\{\psi_\varepsilon\}_{\varepsilon>0}$, satisfying
$\psi_\varepsilon\thetao \phi$ as $\varepsilon\thetao 0^+$ and
$\varlimsup_{\varepsilon\thetao 0^+}I_\varepsilon(\psi_\varepsilon)\leq
\omegaverlineerline{I}(\phi)$.
\item[{\betaf(***)}] If we find that
$\underline{I}(\phi)=\omegaverlineerline{I}(\phi):=I(\phi)$, then $I(\phi)$ is
the $\Gammaamma$-limit of $I_\varepsilon(\phi)$.
\varepsilonnd{itemize}
\betaegin{comment}
In various applications we deal with the asymptotic behavior as $\varepsilon\thetao 0^+$ of a family of functionals
of the following general form:
\betaegin{multline}\label{vfdhghfghddh}
I_\varepsilon\Big(v(\cdot),h(\cdot),\psi(\cdot)\Big):=\\ \int_\Omega
\varphirac{1}{\varepsilon}G\betaigg( \betaig\{\varepsilon^n\nabla^{n+1}v,\varepsilon^n\nabla^n
h,\varepsilon^n\nabla^n\psi\betaig\},\ldots,\betaig\{\varepsilon\nabla^2v,\varepsilon\nabla
h,\varepsilon\nabla\psi\betaig\},\betaig\{\nabla v,h,\psi\betaig\},
v,x\betaigg)dx+\int_\Omega \varphirac{1}{\varepsilon}W\Big(\nabla v,h,\psi,v,x\Big)dx\\
\thetaext{for}\;\; v:\Omega\thetao\varphiield{R}^k,\;\;
\psi:\Omega\thetao\varphiield{R}^m\;\;\thetaext{and}\;\;h:\Omega\thetao\varphiield{R}^{d\thetaimes
N}\;\;\thetaext{s.t}\;\;\Deltaiv h\varepsilonquiv 0\,.
\varepsilonnd{multline}
Here $\Omega\sigmaubset\varphiield{R}^N$ is an open set and we assume that $G$ and $W$
are nonnegative continuous functions defined on
$$
\betaig\{\varphiield{R}^{k\thetaimes N^{n+1}}\thetaimes\varphiield{R}^{d\thetaimes
N^{n+1}}\thetaimes\varphiield{R}^{m\thetaimes
N^n}\betaig\}\thetaimes\ldots\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N\thetaimes N}\thetaimes\varphiield{R}^{m\thetaimes
N}\betaig\}\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes
N}\thetaimes\varphiield{R}^{m}\betaig\}\thetaimes \varphiield{R}^k\thetaimes\varphiield{R}^N
$$
and $\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^{m}\thetaimes
\varphiield{R}^k\thetaimes\varphiield{R}^N$ respectively, and such that
\betaegin{equation}\label{gghiohiohioikbk}
G\Big(0,0,\ldots,0,\betaig\{a,b,c\betaig\},d,x\Big)\varepsilonquiv 0 \quad\thetaext{for
all}\;\;a\in\varphiield{R}^{k\thetaimes N},\;b\in\varphiield{R}^{d\thetaimes
N},\;c\in\varphiield{R}^m,\;d\in\varphiield{R}^k,\;x\in\varphiield{R}^N\,.
\varepsilonnd{equation}
We have the following important particular cases of the general
energy $I_\varepsilon$. We have the first order problem where the functional
$I_\varepsilon$, which acts on functions $\psi:\Omega\thetao\varphiield{R}^m$, has the form
\betaegin{equation}\label{b1..}
I_\varepsilon(\psi)=\int_\Omega
\varepsilon\betaig|\nabla\psi(x)\betaig|^2+\varphirac{1}{\varepsilon}W\Big(\psi(x),x\Big)dx\,,
\varepsilonnd{equation}
or more generally
\betaegin{equation}\label{b2..}
I_\varepsilon(\psi)=\int_\Omega\varphirac{1}{\varepsilon}G\Big(\varepsilon^n\nabla\psi^n,\ldots,\varepsilon\nabla\psi,\psi,x\Big)dx
+\int_\Omega\varphirac{1}{\varepsilon}W\betaig(\psi,x\betaig)dx\,.
\varepsilonnd{equation}
In the case of second order problems the functional $I_\varepsilon$, which
acts on functions $v:\Omega\thetao\varphiield{R}^k$, has the form
\betaegin{equation}\label{b3..}
I_\varepsilon(v)=\int_\Omega \varepsilon\betaig|\nabla^2 v(x)\betaig|^2+\varphirac{1}{\varepsilon}W\Big(\nabla
v(x),v(x),x\Big)dx\,,
\varepsilonnd{equation}
or more generally
\betaegin{equation}\label{b4..}
I_\varepsilon(v)=\int_\Omega\varphirac{1}{\varepsilon}G\Big(\varepsilon^n\nabla^{n+1}
v,\ldots,\varepsilon\nabla^2 v,\nabla
v,v,x\Big)dx+\int_\Omega\varphirac{1}{\varepsilon}W\betaig(\nabla v,v,x\betaig)dx\,.
\varepsilonnd{equation}
The functionals of the form \varepsilonr{b1..} arise in the theories of phase
transitions and minimal surfaces. They were first studied by Modica
and Mortola \cite{mm1}, Modica \cite{modica}, Sterenberg
\cite{sternberg} and others. The $\Gammaamma$-limit of the functional in
\varepsilonr{b1..}, where $W$ don't depend on $x$ explicitly, was obtained in
the general vectorial case by Ambrosio in \cite{ambrosio}. The
$\Gammaamma$-limit of the functional of form \varepsilonr{b2..}, where $n=1$ and
there exist $\alphalpha,\betaeta\in\varphiield{R}^m$ such that $W(h,x)=0$ if and only
if $h\in\{\alphalpha,\betaeta\}$, under some restriction on the explicit
dependence on $x$ of $G$ and $W$, was obtained by Fonseca and
Popovici in \cite{FonP}. The $\Gammaamma$-limit of the functional of
form \varepsilonr{b2..}, with $n=2$, $G(\cdot)/\varepsilon\varepsilonquiv\varepsilon^3|\nabla^2\psi|^2$
and $W$ which doesn't depend on $x$ explicitly, was found by
I.~Fonseca and C.~Mantegazza in \cite{FM}.
Note here that the particular cases of Theorems
\ref{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnewbhjhjkgj2} and
\ref{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnew}, where $n=1$ and
there exist $\alphalpha,\betaeta\in\varphiield{R}^m$ such that $W(h)=0$ if and only if
$h\in\{\alphalpha,\betaeta\}$, was obtained by Fonseca and Popovici in
\cite{FonP}.
The functionals of second order of the form \varepsilonr{b3..} arise, for
example, in the gradient theory of solid-solid phase transitions,
where one considers energies of the form
\betaegin{equation}\label{b3..part}
I_\varepsilon(v)=\int_\Omega \varepsilon|\nabla^2 v(x)|^2+\varphirac{1}{\varepsilon}W\Big(\nabla
v(x)\Big)dx\,,
\varepsilonnd{equation}
where $v:\Omega\sigmaubset\varphiield{R}^N\thetao\varphiield{R}^N$ stands for the deformation, and the
free energy density $W(F)$ is nonnegative and satisfies
$$W(F)=0\quad\thetaext{if and only if}\quad F\in K:=SO(N)A\cap SO(N)B\,.$$
Here $A$ and $B$ are two fixed, invertible matrices, such that
$rank(A-B)=1$ and $SO(N)$ is the set of rotations in $\varphiield{R}^N$. The
simpler case where $W(F)=0$ if and only if $F\in\{A,B\}$ was studied
by Conti, Fonseca and Leoni in \cite{contiFL}. The case of problem
\varepsilonr{b3..part}, where $N=2$ and $W(QF)=W(F)$ for all $Q\in SO(2)$ was
investigated by Conti and Schweizer in \cite{contiS1} (see also
\cite{contiS} for a related problem). Another important example of
the second order energy is the so called Aviles-Giga functional,
defined on scalar valued functions $v$ by
\betaegin{equation}\label{b5..}
\int_\Omega\varepsilon|\nabla^2 v|^2+\varphirac{1}{\varepsilon}\betaig(1-|\nabla
v|^2\betaig)^2\quad\quad\thetaext{(see \cite{adm})}.
\varepsilonnd{equation}
In the general form \varepsilonr{vfdhghfghddh} we also include the dependence
on $\Deltaiv$-free function $h$, which can be useful in the study of
problems with non-local terms as the The Rivi\`ere-Serfaty
functional and other functionals in Micromagnetics.
\varepsilonnd{comment}
In various applications we deal with the asymptotic behavior as $\varepsilon\thetao 0^+$ of a family of
functionals $\{I_\varepsilon\}_{\varepsilon>0}$
of the following forms.
\betaegin{itemize}
\item
In the case of the first order problem the functional $I_\varepsilon$, which
acts on functions $\psi:\Omega\thetao\varphiield{R}^m$, has the form
\betaegin{equation}\label{b1..}
I_\varepsilon(\psi)=\int_\Omega
\varepsilon\betaig|\nabla\psi(x)\betaig|^2+\varphirac{1}{\varepsilon}W\Big(\psi(x),x\Big)dx\,,
\varepsilonnd{equation}
or more generally
\betaegin{equation}\label{b2..}
I_\varepsilon(\psi)=\int_\Omega\varphirac{1}{\varepsilon}G\Big(\varepsilon^n\nabla\psi^n,\ldots,\varepsilon\nabla\psi,\psi,x\Big)dx
+\int_\Omega\varphirac{1}{\varepsilon}W\betaig(\psi,x\betaig)dx\,,
\varepsilonnd{equation}
where $G(0,\ldots,0,\psi,x)\varepsilonquiv 0$.
\item In the case of the second order problem the functional $I_\varepsilon$,
which acts on functions $v:\Omega\thetao\varphiield{R}^k$, has the form
\betaegin{equation}\label{b3..}
I_\varepsilon(v)=\int_\Omega \varepsilon\betaig|\nabla^2 v(x)\betaig|^2+\varphirac{1}{\varepsilon}W\Big(\nabla
v(x),v(x),x\Big)dx\,,
\varepsilonnd{equation}
or more generally
\betaegin{equation}\label{b4..}
I_\varepsilon(v)=\int_\Omega\varphirac{1}{\varepsilon}G\Big(\varepsilon^n\nabla^{n+1}
v,\ldots,\varepsilon\nabla^2 v,\nabla
v,v,x\Big)dx+\int_\Omega\varphirac{1}{\varepsilon}W\betaig(\nabla v,v,x\betaig)dx\,,
\varepsilonnd{equation}
where $G(0,\ldots,0,\nabla v,v,x)\varepsilonquiv 0$.
\varepsilonnd{itemize}
The functionals of the form \varepsilonr{b1..} arise in the theories of phase
transitions and minimal surfaces. They were first studied by Modica
and Mortola \cite{mm1}, Modica \cite{modica}, Sterenberg
\cite{sternberg} and others. The $\Gammaamma$-limit of the functional in
\varepsilonr{b1..}, where $W$ don't depend on $x$ explicitly, was obtained in
the general vectorial case by Ambrosio in \cite{ambrosio}. The
$\Gammaamma$-limit of the functional of the form \varepsilonr{b2..}, where $n=1$
and there exist $\alphalpha,\betaeta\in\varphiield{R}^m$ such that $W(h,x)=0$ if and
only if $h\in\{\alphalpha,\betaeta\}$, under some restriction on the
explicit dependence on $x$ of $G$ and $W$, was obtained by Fonseca
and Popovici in \cite{FonP}. The $\Gammaamma$-limit of the functional of
the form \varepsilonr{b2..}, with $n=2$,
$G(\cdot)/\varepsilon\varepsilonquiv\varepsilon^3|\nabla^2\psi|^2$ and $W$ which doesn't depend
on $x$ explicitly, was found by I.~Fonseca and C.~Mantegazza in
\cite{FM}.
\betaegin{comment}
Note here that the particular cases of Theorems
\ref{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnewbhjhjkgj2} and
\ref{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnew}, where $n=1$ and
there exist $\alphalpha,\betaeta\in\varphiield{R}^m$ such that $W(h)=0$ if and only if
$h\in\{\alphalpha,\betaeta\}$, was obtained by Fonseca and Popovici in
\cite{FonP}.
\varepsilonnd{comment}
The functionals of second order of the form \varepsilonr{b3..} arise, for
example, in the gradient theory of solid-solid phase transitions,
where one considers energies of the form
\betaegin{equation}\label{b3..part}
I_\varepsilon(v)=\int_\Omega \varepsilon|\nabla^2 v(x)|^2+\varphirac{1}{\varepsilon}W\Big(\nabla
v(x)\Big)dx\,,
\varepsilonnd{equation}
where $v:\Omega\sigmaubset\varphiield{R}^N\thetao\varphiield{R}^N$ stands for the deformation, and the
free energy density $W(F)$ is nonnegative and satisfies
$$W(F)=0\quad\thetaext{if and only if}\quad F\in K:=SO(N)A\cup SO(N)B\,.$$
Here $A$ and $B$ are two fixed, invertible matrices, such that
$rank(A-B)=1$ and $SO(N)$ is the set of rotations in $\varphiield{R}^N$. The
simpler case where $W(F)=0$ if and only if $F\in\{A,B\}$ was studied
by Conti, Fonseca and Leoni in \cite{contiFL}. The case of problem
\varepsilonr{b3..part}, where $N=2$ and $W(QF)=W(F)$ for all $Q\in SO(2)$ was
investigated by Conti and Schweizer in \cite{contiS1} (see also
\cite{contiS} for a related problem). Another important example of
the second order energy is the so called Aviles-Giga functional,
defined on scalar valued functions $v$ by
\betaegin{equation}\label{b5..}
\int_\Omega\varepsilon|\nabla^2 v|^2+\varphirac{1}{\varepsilon}\betaig(1-|\nabla
v|^2\betaig)^2\quad\quad\thetaext{(see \cite{adm},\cite{ag1},\cite{ag2})}.
\varepsilonnd{equation}
In this paper we deal with the asymptotic behavior as $\varepsilon\thetao 0^+$ of
a family of functionals of the following general form: Let
$\Omegamega\sigmaubset{\mathbb{R}}^N$ be an open set.
For every $\varepsilon>0$ consider the general functional
\betaegin{multline}\label{fhjvjhvjhv}
I_{\varepsilon}(v)=\betaig\{I_{\varepsilon}(\Omegamega)\betaig\}(v):=
\int_{\Omegamega}\varphirac{1}{\varepsilon}G\Big(\varepsilon^n\nabla^n
v,\ldots,\varepsilon\nabla
v,v,x\Big)+\varphirac{1}{\varepsilon}W\betaig(v,x\betaig)dx\quad \thetaext{with }
v:=(\nabla u,h,\psi),\\
\thetaext{ where } u\in W^{(n+1),1}_{loc}(\Omegamega,{\mathbb{R}}^k),\;\,
h\in W^{n,1}_{loc}(\Omegamega,{\mathbb{R}}^{d\thetaimes N})\thetaext{ s.t.
}\thetaext{div } h\varepsilonquiv 0,\;\,\psi\in
W^{n,1}_{loc}(\Omegamega,{\mathbb{R}}^m).
\varepsilonnd{multline}
Here
$$G:\varphiield{R}^{\betaig(\{k\thetaimes N\}+\{d\thetaimes N\}+m\betaig)\thetaimes N^n}\thetaimes\ldots\thetaimes\varphiield{R}^{\betaig(\{k\thetaimes N\}+\{d\thetaimes N\}+m\betaig)\thetaimes N}
\thetaimes\varphiield{R}^{\{k\thetaimes N\}+\{d\thetaimes N\}+m}\thetaimes\varphiield{R}^N\,\thetao\,\varphiield{R}$$ and
$W:\varphiield{R}^{\{k\thetaimes N\}+\{d\thetaimes N\}+m}\thetaimes\varphiield{R}^N\,\thetao\,\varphiield{R}$ are
nonnegative continuous functions and $G$ satisfies
$G(0,\ldots,0,v,x)\varepsilonquiv 0$. The functionals in \varepsilonr{b1..},\varepsilonr{b2..}
and \varepsilonr{b3..},\varepsilonr{b4..} are important particular cases of the
general energy $I_\varepsilon$ in \varepsilonr{fhjvjhvjhv}. In the general form
\varepsilonr{fhjvjhvjhv} we also include the dependence on $\Deltaiv$-free
function $h$, which can be useful in the study of problems with
non-local terms as the Rivi\`ere-Serfaty functional and other
functionals in Micromagnetics.
In order to simplify the notations for every open
$\mathcal{U}\sigmaubset\varphiield{R}^N$ consider
\betaegin{multline}\label{fhjvjhvjhvholhiohiovhhjhvvjvf}
\mathcal{B}(\mathcal{U}):=\betaigg\{v\in
L^1_{loc}\betaig(\mathcal{U},{\mathbb{R}}^{k\thetaimes
N}\thetaimes{\mathbb{R}}^{d\thetaimes
N}\thetaimes{\mathbb{R}}^m\betaig):\;\;v=(\nabla u,h,\psi),\\u\in
W^{1,1}_{loc}(\mathcal{U},{\mathbb{R}}^k),\;\, h\in
L^{1}_{loc}(\mathcal{U},{\mathbb{R}}^{d\thetaimes N})\thetaext{ s.t.
}\thetaext{div } h\varepsilonquiv 0,\;\,\psi\in
L^{1}_{loc}(\mathcal{U},{\mathbb{R}}^m)\betaigg\},
\varepsilonnd{multline}
and
\betaegin{equation}\label{huighuihuiohhhiuohoh}
F\Big(\nabla^n v,\ldots,\nabla v,v,x\Big)\,:=\,G\Big(\nabla^n
v,\ldots,\nabla v,v,x\Big)\,+\,W(v,x)
\varepsilonnd{equation}
Then
\betaegin{equation}\label{fhjvjhvjhvnlhiohoioiiy}
I_{\varepsilon}(v)
=\int_{\Omegamega}\varphirac{1}{\varepsilon}F\Big(\varepsilon^n\nabla^n
v,\ldots,\varepsilon\nabla v,v,x\Big)dx\quad \thetaext{with }
v\in \mathcal{B}(\Omega)\cap W^{n,1}_{loc}\betaig(\Omega,\varphiield{R}^{k\thetaimes
N}\thetaimes{\mathbb{R}}^{d\thetaimes N}\thetaimes{\mathbb{R}}^m\betaig).
\varepsilonnd{equation}
\betaegin{comment}
\betaegin{multline}\label{fhjvjhvjhvholhiohiovhhjhvvjvf}
\mathcal{B}(\Omegamega):=\betaigg\{v\in
W^{1,n}_{loc}\betaig(\Omegamega,{\mathbb{R}}^{k\thetaimes
N}\thetaimes{\mathbb{R}}^{d\thetaimes
N}\thetaimes{\mathbb{R}}^m\betaig):\;v:=(\nabla u,m,\psi),\\u\in
W^{(n+1),1}_{loc}(\Omegamega,{\mathbb{R}}^k),\;\, m\in
W^{n,1}_{loc}(\Omegamega,{\mathbb{R}}^{d\thetaimes N})\thetaext{ s.t. }\thetaext{div
} m\varepsilonquiv 0,\;\,\psi\in W^{n,1}_{loc}(\Omegamega,{\mathbb{R}}^m)\betaigg\},
\varepsilonnd{multline}
\varepsilonnd{comment}
What can we expect as the $\Gammaamma$-limit or at least as an upper
bound of these general energies in $L^p$-topology for some $p\gammaeq
1\,$? It is clear that if $G$ and $W$ are nonnegative and $W$ is a
continuous on the argument $v$ function, then the upper bound for
$I_\varepsilon(\cdot)$ will be finite only if
\betaegin{equation}\label{hghiohoijojjkhhhjhjjkjgg}
W\betaig(v(x),x\betaig)=0\quad\thetaext{for a.e.}\;\;x\in\Omegamega\,,
\varepsilonnd{equation}
i.e. if we define
\betaegin{equation}\label{cuyfyugugghvjjhh}
\mathcal{A}_0:=\betaigg\{v\in L^p\betaig(\Omegamega,{\mathbb{R}}^{k\thetaimes
N}\thetaimes{\mathbb{R}}^{d\thetaimes
N}\thetaimes{\mathbb{R}}^m\betaig)\cap\mathcal{B}(\Omegamega):\;\,
W\betaig(v(x),x\betaig)=0\;\,\thetaext{for a.e.}\,\;x\in\Omegamega\betaigg\}
\varepsilonnd{equation}
and
\betaegin{equation}\label{cuyfyugugghvjjhhggihug}
\mathcal{A}:=\betaigg\{v\in L^p\betaig(\Omegamega,{\mathbb{R}}^{k\thetaimes
N}\thetaimes{\mathbb{R}}^{d\thetaimes N}\thetaimes{\mathbb{R}}^m\betaig):\;\,
(\Gammaamma-\varlimsup_{\varepsilon\thetao 0^+}
I_\varepsilon)(v)<+\infty\betaigg\},
\varepsilonnd{equation}
then clearly $\mathcal{A}\sigmaubset\mathcal{A}_0$. In most interesting
applications the set $\mathcal{A}_0$ consists of discontinuous
functions. The natural space of discontinuous functions is $BV$
space. It turns out that in the general case if $G$ and $W$ are
$C^1$-functions and if we consider
\betaegin{equation}\label{hkghkgh}
\mathcal{A}_{BV}:=\mathcal{A}_0\cap\mathcal{B}(\mathbb{R}^N)\cap
BV\cap L^\infty,
\varepsilonnd{equation}
then
\betaegin{equation}\label{nnloilhyoih}
\mathcal{A}_{BV}\sigmaubset\mathcal{A}\sigmaubset\mathcal{A}_0.
\varepsilonnd{equation}
In many cases we have $\mathcal{A}_{BV}=\mathcal{A}$. For example
this is indeed the case if the energy $I_\varepsilon(v)$ has the
simplest form $I_\varepsilon(v)=\int_\Omegamega\varepsilon|\nabla
v|^2+\varphirac{1}{\varepsilon}W(v)\,dx$, and the set of zeros of $W$:
$\{h: W(h)=0\}$ is finite. However, this is in general not the case.
For example, as was shown by Ambrosio, De Lellis and Mantegazza in
\cite{adm}, $\mathcal{A}_{BV}\sigmaubsetneq\mathcal{A}$ in the
particular case of the energy defined by \varepsilonqref{b5..} with $N=2$. On
the other hand, there are many applications where the set
$\mathcal{A}$ still inherits some good properties of $BV$ space. For
example, it is indeed the case for the energy \varepsilonqref{b5..} with
$N=2$, as was shown by Camillo de Lellis and Felix Otto in
\cite{CDFO}.
The main contribution of \cite{PI} was to improve our method (see
\cite{pol},\cite{polgen}) for finding upper bounds in the sense of
({\betaf**}) for the general functional \varepsilonr{fhjvjhvjhv} in the case
where the limiting function belongs to $BV$-space, i.e. for
$v=(\nabla u,h,\psi)\in\mathcal{A}_{BV}$. In order to formulate the
main results of \cite{PI} and of this paper we present the following
definitions.
\betaegin{definition}
For every $v_\ec\nu\in S^{N-1}$ define
$Q(v_\ec\nu):=\betaig\{y\in{\mathbb{R}}^N:\;
-1/2<y\cdotv_\ec\nu_j<1/2\quad\varphiorall j\betaig\}$, where
$\{v_\ec\nu_1,\ldots,v_\ec\nu_N\}$ is an orthonormal base in
${\mathbb{R}}^N$ such that $v_\ec\nu_1=v_\ec\nu$. Then set
\betaegin{multline*}
\mathcal{D}_1(v^+,v^-,v_\ec\nu):=\betaigg\{v\in
C^n\betaig(\mathbb{R}^N,{\mathbb{R}}^{k\thetaimes
N}\thetaimes{\mathbb{R}}^{d\thetaimes N}\thetaimes{\mathbb{R}}^m\betaig)\cap
\mathcal{B}(\mathbb{R}^N):\\ v(y)\varepsilonquiv \thetaheta(v_\ec\nu\cdot
y)\;\,\thetaext{and}\;\, v(y)=v^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq
-1/2,\;\; v(y)=v^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\betaigg\},
\varepsilonnd{multline*}
where $\mathcal{B}(\cdot)$ is defined in
\varepsilonqref{fhjvjhvjhvholhiohiovhhjhvvjvf}, and
\betaegin{multline*}
\mathcal{D}_{per}(v^+,v^-,v_\ec\nu):=\betaigg\{v\in
C^n\betaig(\mathbb{R}^N,{\mathbb{R}}^{k\thetaimes
N}\thetaimes{\mathbb{R}}^{d\thetaimes N}\thetaimes{\mathbb{R}}^m\betaig)\cap \mathcal{B}(\mathbb{R}^N):\\
v(y)=v^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq -1/2,\;\; v(y)=v^+\;\thetaext{
if }\; y\cdotv_\ec\nu\gammaeq 1/2,\;\, v(y+v_\ec\nu_j)=v(y)\;\;\varphiorall
j=2,\ldots, N\betaigg\}.
\varepsilonnd{multline*}
Next define
\betaegin{align}
\label{Energia1} E_{1}(v^+,v^-,v_\ec\nu,x)=\inf\betaigg\{
\int\limits_{Q(v_\ec\nu_v)}\varphirac{1}{L}F\Big(L^n\nabla^nz_\eta,\ldots,L\nabla
z_\eta,z_\eta,x\Big)dy:\;\, L>0,\, z_\eta(y)\in
\mathcal{D}_1(v^+,v^-,v_\ec\nu)\betaigg\}\,,\\
\label{Energia2} E_{per}(v^+,v^-,v_\ec\nu,x)=\inf\betaigg\{
\int\limits_{Q(v_\ec\nu_v)}\varphirac{1}{L}F\Big(L^n\nabla^nz_\eta,\ldots,L\nabla
z_\eta,z_\eta,x\Big)dy:\;\, L>0,\, z_\eta(y)\in
\mathcal{D}_{per}(v^+,v^-,v_\ec\nu)\betaigg\}\,.\\
\label{Energia3} E_{abst}(v^+,v^-,x)=\Big(\Gammaamma-\varliminf_{
\varepsilon \thetao 0^+}
I_\varepsilon\betaig(Q(v_\ec\nu)\betaig)\Big)\Big(\varepsilonta(v^+,v^-,v_\ec\nu)\Big),
\varepsilonnd{align}
where
\betaegin{equation}\varepsilonta(v^+,v^-,v_\ec\nu)(y):=
\betaegin{cases}
v^-\quad\thetaext{if }v_\ec\nu\cdot y<0,\\
v^+\quad\thetaext{if }v_\ec\nu\cdot y>0,
\varepsilonnd{cases}
\varepsilonnd{equation}
and we mean the $\Gammaamma-\varliminf$ in $L^p$ topology for some $p\gammaeq
1$.
\varepsilonnd{definition}
It is not difficult to deduce that
\betaegin{equation}\label{dghfihtihotj}
E_{abst}(v^+,v^-,v_\ec\nu,x)\leq E_{per}(v^+,v^-,v_\ec\nu,x)\leq
E_{1}(v^+,v^-,v_\ec\nu,x).
\varepsilonnd{equation}
Next define the functionals
$K_1(\cdot),K_{per}(\cdot),K^{*}(\cdot):\mathcal{B}(\Omega)\cap BV\cap
L^\infty\,\thetao\,\varphiield{R}$ by
\betaegin{equation}\label{hfighfighfih}
K_{1}(v):=
\betaegin{cases}
\int_{\Omega\cap
J_v}E_{1}\Big(v^+(x),v^-(x),v_\ec\nu_v(x),x\Big)\,d\mathcal{H}^{N-1}(x)\quad\thetaext{if
}v\in \mathcal{A}_0,\\+\infty\quad\thetaext{otherwise},
\varepsilonnd{cases}
\varepsilonnd{equation}
\betaegin{equation}\label{hfighfighfihgigiugi}
K_{per}(v):=
\betaegin{cases}
\int_{\Omega\cap
J_v}E_{per}\Big(v^+(x),v^-(x),v_\ec\nu_v(x),x\Big)\,d\mathcal{H}^{N-1}(x)\quad\thetaext{if
}v\in \mathcal{A}_0,\\+\infty\quad\thetaext{otherwise},
\varepsilonnd{cases}
\varepsilonnd{equation}
\betaegin{equation}\label{hfighfighfihhioh}
K^{*}(v):=
\betaegin{cases}
\int_{\Omegamega\cap
J_v}E_{abst}\Big(v^+(x),v^-(x),v_\ec\nu_v(x),x\Big)\,d\mathcal{H}^{N-1}(x)\quad\thetaext{if
}v\in \mathcal{A}_0,\\+\infty\quad\thetaext{otherwise},
\varepsilonnd{cases}
\varepsilonnd{equation}
where $J_v$ is the jump set of $v$, $v_\ec\nu_v$ is the jump vector
and $v^-,v^+$ are jumps of $v$.
\betaegin{comment}
In this paper we deal with the asymptotic behavior as $\varepsilon\thetao 0^+$ of a family of functionals
of the following general form:
\betaegin{multline}\label{vfdhghfghddh}
I_\varepsilon\Big(v(\cdot),h(\cdot),\psi(\cdot)\Big):=\\ \int_\Omega
\varphirac{1}{\varepsilon}G\betaigg( \betaig\{\varepsilon^n\nabla^{n+1}v,\varepsilon^n\nabla^n
h,\varepsilon^n\nabla^n\psi\betaig\},\ldots,\betaig\{\varepsilon\nabla^2v,\varepsilon\nabla
h,\varepsilon\nabla\psi\betaig\},\betaig\{\nabla v,h,\psi\betaig\},
v,x\betaigg)dx+\int_\Omega \varphirac{1}{\varepsilon}W\Big(\nabla v,h,\psi,v,x\Big)dx\\
\thetaext{for}\;\; v:\Omega\thetao\varphiield{R}^k,\;\;
\psi:\Omega\thetao\varphiield{R}^m\;\;\thetaext{and}\;\;h:\Omega\thetao\varphiield{R}^{d\thetaimes
N}\;\;\thetaext{s.t}\;\;\Deltaiv h\varepsilonquiv 0\,.
\varepsilonnd{multline}
Here $\Omega\sigmaubset\varphiield{R}^N$ is an open set and we assume that $G$ and $W$
are nonnegative continuous functions defined on
$$
\betaig\{\varphiield{R}^{k\thetaimes N^{n+1}}\thetaimes\varphiield{R}^{d\thetaimes
N^{n+1}}\thetaimes\varphiield{R}^{m\thetaimes
N^n}\betaig\}\thetaimes\ldots\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N\thetaimes N}\thetaimes\varphiield{R}^{m\thetaimes
N}\betaig\}\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes
N}\thetaimes\varphiield{R}^{m}\betaig\}\thetaimes \varphiield{R}^k\thetaimes\varphiield{R}^N
$$
and $\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^{m}\thetaimes
\varphiield{R}^k\thetaimes\varphiield{R}^N$ respectively, and such that
\betaegin{equation}\label{gghiohiohioikbk}
G\Big(0,0,\ldots,0,\betaig\{a,b,c\betaig\},d,x\Big)\varepsilonquiv 0 \quad\thetaext{for
all}\;\;a\in\varphiield{R}^{k\thetaimes N},\;b\in\varphiield{R}^{d\thetaimes
N},\;c\in\varphiield{R}^m,\;d\in\varphiield{R}^k,\;x\in\varphiield{R}^N\,.
\varepsilonnd{equation}
The main contribution of \cite{PI} was to improve our method (see
\cite{pol},\cite{polgen}) for finding upper bounds in the sense of
({\betaf**}) for the general functional \varepsilonr{vfdhghfghddh} in the case
where the limiting functions $\nabla v,h,\psi$ belong to the class
$BV$.
What can we expect as reasonable upper bounds?
It is clear that if the non-negative function $W$ is continuous then
every upper bound $\omegaverlineerline{I}(v,h,\psi)$ of \varepsilonr{vfdhghfghddh},
under the $L^p$-convergence of $\nabla v,h,\psi$, will be finite
only if
\betaegin{equation}\label{hghiohoijoj}
W\Big(\nabla v(x),h(x),\psi(x),v(x),x\Big)\varepsilonquiv 0\quad\thetaext{for
a.e.}\;\;x\in\Omega\,.
\varepsilonnd{equation}
In order to formulate the main results of this paper we set
\betaegin{multline}\label{fihgfighfhj}
F\Big(\betaig\{\varepsilon^n\nabla^{n+1}v,\varepsilon^n\nabla^n
h,\varepsilon^n\nabla^n\psi\betaig\},\ldots,\betaig\{\varepsilon\nabla^2v,\varepsilon\nabla
h,\varepsilon\nabla\psi\betaig\},\betaig\{\nabla v,h,\psi\betaig\},
v,x\betaigg):=\\Gamma\Big(\betaig\{\varepsilon^n\nabla^{n+1}v,\varepsilon^n\nabla^n
h,\varepsilon^n\nabla^n\psi\betaig\},\ldots,\betaig\{\varepsilon\nabla^2v,\varepsilon\nabla
h,\varepsilon\nabla\psi\betaig\},\betaig\{\nabla v,h,\psi\betaig\},
v,x\betaigg)+W\Big(\nabla v,h,\psi,v,x\Big)
\varepsilonnd{multline}
and define the following functionals:
\betaegin{multline}\label{nvhfighf}
\betaegin{cases}
K^*\betaig(v(\cdot),h(\cdot),\psi(\cdot)\betaig):=\int_{\Omega\cap(J_{\nabla
v}\cup J_{h}\cup J_{\psi})}E^*\Big(\betaig\{\nabla
v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x,p,q_1,q_2,q_3\Big)d\mathcal{H}^{N-1}(x)
\\
K_1\betaig(v(\cdot),h(\cdot),\psi(\cdot)\betaig):=\int_{\Omega\cap (J_{\nabla
v}\cup J_{h}\cup J_{\psi})}E_1\Big(\betaig\{\nabla
v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big)d\mathcal{H}^{N-1}(x)\\
K_{per}\betaig(v(\cdot),h(\cdot),\psi(\cdot)\betaig):=\int_{\Omega\cap(
J_{\nabla v}\cup J_{h}\cup J_{\psi})}E_{per}\Big(\betaig\{\nabla
v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big)d\mathcal{H}^{N-1}(x)
\varepsilonnd{cases}
\\ \thetaext{for}\;\;v:\Omega\thetao\varphiield{R}^k,\;h:\Omega\thetao \varphiield{R}^{d\thetaimes N},\;\psi:\Omega\thetao\varphiield{R}^m\;\;\thetaext{s.t.}\;\nabla v,h,\psi\in
BV, \;\Deltaiv h\varepsilonquiv 0\;\;\thetaext{and}\;\;W\betaig(\nabla
v,h,\psi,v,x\betaig)\varepsilonquiv 0\,,
\varepsilonnd{multline}
where $J_{\nabla v}, J_{h}$ and $J_{\psi}$ are the jump sets of
$\nabla v,h$ and $\psi$,
$v_\ec\nu$ is the jump vector, $E_1(\cdot), E_{per}(\cdot)$ are
defined by
\betaegin{multline}\label{bkjguiguigbgbgg}
E_{per}\Big(\betaig\{\nabla v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big):=\\
\inf\Bigg\{\int\limits_{I_{v_\ec
\nu}}\varphirac{1}{L}F\betaigg(\Big\{L^n\nabla^{n+1}\sigmaigma(y),L^n\nabla^n\thetaheta(y),
L^n\nabla^n\gammaamma(y)\Big\},\ldots,\Big\{\nabla\sigmaigma(y),\thetaheta(y),\gammaamma(y)\Big\},v(x),x\betaigg)dy:\;\;L>0,\\
\;\sigmaigma\in \mathcal{W}^{(1)}_{per}(x,\nabla v^+,\nabla v^-,v_\ec
\nu),\,\thetaheta\in \mathcal{W}^{(2)}_{per}(x,h^+,h^-,v_\ec
\nu),\,\gammaamma\in \mathcal{W}^{(3)}_{per}(x,\psi^+,\psi^-,v_\ec
\nu)\Bigg\}\,,
\varepsilonnd{multline}
\betaegin{multline}\label{bkjguiguigbgbgggtyhjh}
E_{1}\Big(\betaig\{\nabla v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big):=\\
\inf\Bigg\{\int\limits_{I_{v_\ec
\nu}}\varphirac{1}{L}F\betaigg(\Big\{L^n\nabla^{n+1}\sigmaigma(y),L^n\nabla^n\thetaheta(y),
L^n\nabla^n\gammaamma(y)\Big\},\ldots,\Big\{\nabla\sigmaigma(y),\thetaheta(y),\gammaamma(y)\Big\},v(x),x\betaigg)dy:\;\;L>0,\\
\;\sigmaigma\in \mathcal{W}^{(1)}_{per}(x,\nabla v^+,\nabla v^-,v_\ec
\nu),\,\thetaheta\in \mathcal{W}^{(2)}_{per}(x,h^+,h^-,v_\ec
\nu),\,\gammaamma\in
\mathcal{W}^{(3)}_{per}(x,\psi^+,\psi^-,v_\ec \nu)\\
\thetaext{s.t}\;\;\nabla\sigmaigma(y)\varepsilonquiv \thetailde\sigmaigma(v_\ec\nu\cdot
y),\;\thetaheta(y)\varepsilonquiv \thetailde\thetaheta(v_\ec\nu\cdot y),\;\gammaamma(y)\varepsilonquiv
\thetailde\gammaamma(v_\ec\nu\cdot y)\Bigg\}\,.
\varepsilonnd{multline}
Here $\{v_\ec k_1,v_\ec k_2,\ldots v_\ec k_N\}$ is an orthonormal base
in $\varphiield{R}^N$ satisfying $v_\ec k_1=v_\ec\nu$,
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjnkmklklkintjyk}
\mathcal{W}^{(1)}_{per}(x,\nabla v^+,\nabla v^-,v_\ec \nu):=
\betaigg\{u\in C^{n+1}(\varphiield{R}^N,\varphiield{R}^k):\;\;\nabla u(y)=\nabla
v^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
\nabla u(y)=\nabla v^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{
and }\;\nabla u\betaig(y+v_\ec k_j\betaig)=\nabla u(y)\;\;\varphiorall
j=2,3,\ldots, N\betaigg\}\,,
\varepsilonnd{multline}
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjhhhhklklklkintlul}
\mathcal{W}^{(2)}_{per}(x,h^+,h^-,v_\ec \nu):=\\
\betaigg\{\xi\in C^n(\varphiield{R}^N,\varphiield{R}^{d\thetaimes N}):\;\;div_y \xi(y)\varepsilonquiv
0,\;\;\xi(y)=h^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
\xi(y)=h^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{ and
}\;\xi\betaig(y+v_\ec k_j\betaig)=\xi(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,,
\varepsilonnd{multline}
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddkkjkjlkjlkintuuk}
\mathcal{W}^{(3)}_{per}(x,\psi^+,\psi^-,v_\ec \nu):=
\betaigg\{z_\eta\in C^n(\varphiield{R}^N,\varphiield{R}^m):\;\;z_\eta(y)=\psi^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
z_\eta(y)=\psi^+\;\thetaext{ if }\; y\cdotv_\ec\nu(x)\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec k_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,,
\varepsilonnd{multline}
\betaegin{equation}\label{cubidepgghkkkkllllllljjjkkkkkjjjhhhhlkkkffffggggdddhhhjjkljjkljljlkjintuuy}
I_{v_\ec \nu}:= \Big\{y\in\varphiield{R}^N:\; |y\cdotv_\ec k_j|<1/2\quad\varphiorall
j=1,2,\ldots, N\Big\}\,.
\varepsilonnd{equation}
Furthermore,
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkkgenhgjkgggjhgjhgjbjkbjm}
E^*\Big(\betaig\{\nabla v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x,p,q_1,q_2,q_3\Big):=\\
\inf\Bigg\{\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}
\varphirac{1}{\varepsilon}F\betaigg(\Big\{\varepsilon^n\nabla^{n+1}\sigmaigma_\varepsilon(y),\varepsilon^n\nabla^n\thetaheta_\varepsilon(y),
\varepsilon^n\nabla^n\gammaamma_\varepsilon(y)\Big\},\ldots,\Big\{\nabla\sigmaigma_\varepsilon(y),\thetaheta_\varepsilon(y),\gammaamma_\varepsilon(y)\Big\},v(x),x\betaigg)dy:\\
\sigmaigma_\varepsilon\in W^{1,q_1}(I_{v_\ec \nu},\varphiield{R}^k)\cap W^{(n+1),p}(I_{v_\ec
\nu},\varphiield{R}^k),\;\thetaheta_\varepsilon\in L^{q_2}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes N})\cap
W^{n,p}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes N}),\\ \gammaamma_\varepsilon\in L^{q_3}(I_{v_\ec
\nu},\varphiield{R}^{m})\cap W^{n,p}(I_{v_\ec \nu},\varphiield{R}^m)\;\; \thetaext{s.t.}\;
\Deltaiv_y\thetaheta_\varepsilon(y)\varepsilonquiv
0,\;\nabla\sigmaigma_\varepsilon(y)\thetao\sigmaigma\betaig(y,\nabla v^+,\nabla
v^-,v_\ec\nu\betaig)\;\thetaext{in}\;L^{q_1}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N}),\\
\thetaheta_\varepsilon(y)\thetao\thetaheta(y,h^+,h^-,v_\ec\nu)\;\thetaext{in}\;L^{q_2}(I_{v_\ec
\nu},\varphiield{R}^{d\thetaimes
N}),\;\gammaamma_\varepsilon(y)\thetao\gammaamma(y,\psi^+,\psi^-,v_\ec\nu)\;\thetaext{in}\;L^{q_3}(I_{v_\ec
\nu},\varphiield{R}^{m})
\Bigg\}\,,
\varepsilonnd{multline}
where
\betaegin{multline}\label{fhyffgfgfgfffgfgenkjgjgkgkgjhgjggjhgjffj}
\sigmaigma\betaig(y,\nabla v^+,\nabla v^-,v_\ec\nu\betaig):=\betaegin{cases}\nabla
v^+\quad\thetaext{if}\;\,y\cdotv_\ec \nu>0\,,\\
\nabla v^-\quad\thetaext{if}\;\,y\cdotv_\ec
\nu<0\,,\varepsilonnd{cases}\quad\thetaheta\betaig(y,h^+,h^-,v_\ec\nu\betaig):=\betaegin{cases}h^+\quad\thetaext{if}\;\,y\cdotv_\ec \nu>0\,,\\
h^-\quad\thetaext{if}\;\,y\cdotv_\ec
\nu<0\,,\varepsilonnd{cases}\\ \thetaext{and}\quad\gammaamma\betaig(y,\psi^+,\psi^-,v_\ec\nu\betaig):=\betaegin{cases}\psi^+\quad\thetaext{if}\;\,y\cdotv_\ec \nu>0\,,\\
\psi^-\quad\thetaext{if}\;\,y\cdotv_\ec \nu<0\,.\varepsilonnd{cases}
\varepsilonnd{multline}
Observe here that it is clear that
\betaegin{multline}\label{fgbjfohjfopdhfolkkk}
E^*\Big(\betaig\{\nabla v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x,p,q_1,q_2,q_3\Big)\leq\\
E_{per}\Big(\betaig\{\nabla v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big)\leq E_{1}\Big(\betaig\{\nabla
v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big)\,.
\varepsilonnd{multline}
\varepsilonnd{comment}
Then, by \varepsilonr{dghfihtihotj} trivially follows
\betaegin{equation}\label{nvhfighfrhyrtehu}
K^*\betaig(v\betaig)\leq K_{per}\betaig(v\betaig)\leq K_1\betaig(v\betaig)\,.
\varepsilonnd{equation}
We call $K_1(\cdot)$ by the bound, achieved by one dimensional
profiles, $K_{per}(\cdot)$ by the bound, achieved by
multidimensional periodic profiles and $K^*(\cdot)$ by the bound,
achieved by abstract profiles.
Our general conjecture is that $K^*(\cdot)$ coincides with the $\Gammaamma$-limit for the
functionals $I_\varepsilon(\cdot)$ in \varepsilonr{fhjvjhvjhvnlhiohoioiiy}, under
$L^{p}$ convergence, in the case where the limiting functions $v\in
BV\cap L^\infty$.
It is known that in the case of the problem \varepsilonr{b1..}, where $W\in
C^1$ don't depend on $x$ explicitly, this is indeed the case and
moreover, in this case we have equalities in \varepsilonr{nvhfighfrhyrtehu}
(see \cite{ambrosio}). The same result is also known for problem
\varepsilonr{b5..} when $N=2$ (see \cite{adm} and \cite{CdL},\cite{pol}). It
is also the case for problem \varepsilonr{b3..part} where $W(F)=0$ if and
only if $F\in\{A,B\}$, studied by Conti, Fonseca and Leoni, if $W$
satisfies the additional hypothesis ($H_3$) in \cite{contiFL}.
However, as was shown there by an example, if we don't assume
($H_3$)-hypothesis, then it is possible that $E_{per}\betaig(\nabla
v^+,\nabla v^-,v_\ec\nu\betaig)$ is strictly smaller than
$E_{1}\betaig(\nabla v^+,\nabla v^-,v_\ec\nu\betaig)$ and thus, in general,
$K_1(\cdot)$ can differ from the $\Gammaamma$-limit. In the same work it
was shown that if, instead of ($H_3$) we assume hypothesis ($H_5$),
then $K_{per}(\cdot)$ turns to be equal to $K^*(\cdot)$ and the
$\Gammaamma$-limit of \varepsilonr{b3..part} equals to $K_{per}(\cdot)\varepsilonquiv
K^*(\cdot)$. The similar result known also for problem \varepsilonr{b2..},
where $n=1$ and there exist $\alphalpha,\betaeta\in\varphiield{R}^m$ such that
$W(h,x)=0$ if and only if $h\in\{\alphalpha,\betaeta\}$, under some
restriction on the explicit dependence on $x$ of $G$ and $W$. As was
obtained by Fonseca and Popovici in \cite{FonP} in this case we also
obtain that $K_{per}(\cdot)\varepsilonquiv K^*(\cdot)$ is the $\Gammaamma$-limit
of \varepsilonr{b2..}. In the case of problem \varepsilonr{b3..part}, where $N=2$ and
$W(QF)=W(F)$ for all $Q\in SO(2)$, Conti and Schweizer in
\cite{contiS1} found that the $\Gammaamma$-limit equals to $K^*(\cdot)$
(see also \cite{contiS} for a related problem). However, by our
knowledge, it is not known, weather in general $K^*(\cdot)\varepsilonquiv
K_{per}(\cdot)$.
On \cite{polgen} we showed that for the general problems \varepsilonr{b2..}
and \varepsilonr{b4..}, $K_1(\cdot)$ is the upper bound in the sense of
{\betaf(**)}, if the limiting function belongs to $BV$-class. However,
as we saw, this bound is not sharp in general. In \cite{PI} we
improved our method and obtained that for the general problem
\varepsilonr{fhjvjhvjhvnlhiohoioiiy}, $K_{per}(\cdot)$ is always an upper
bound in the sense of {\betaf(**)} in the case where the limiting
functions $v$ belong to $BV$-space and $G,W\in C^1$. More precisely,
we have the following Theorem:
\betaegin{theorem}\label{ffgvfgfhthjghgjhg}
Let $\Omega\sigmaubset\varphiield{R}^N$ be an open set and
$$F:\varphiield{R}^{\betaig(\{k\thetaimes N\}+\{d\thetaimes N\}+m\betaig)\thetaimes N^n}\thetaimes\ldots\thetaimes\varphiield{R}^{\betaig(\{k\thetaimes N\}+\{d\thetaimes N\}+m\betaig)\thetaimes N}
\thetaimes\varphiield{R}^{\{k\thetaimes N\}+\{d\thetaimes N\}+m}\thetaimes\varphiield{R}^N\,\thetao\,\varphiield{R}$$ be a
nonnegative $C^1$ function. Furthermore assume that $v:=(\nabla
u,h,\psi)\in\mathcal{B}(\varphiield{R}^N)\cap BV\betaig(\varphiield{R}^N,\varphiield{R}^{k\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^m\betaig)\cap
L^\infty\betaig(\varphiield{R}^N,\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^m\betaig)$
satisfies $\Deltaiv h\varepsilonquiv 0$, $|Dv|(\partial\Omegamega)=0$ and
$$F\Big(0,\ldots,0,v(x),x\Big)=0\quad\thetaext{for a.e.}\;\;x\in\Omega.$$
Then there exists a sequence $v_\varepsilon=\betaig(\nabla
u_\varepsilon,h_\varepsilon,\psi_\varepsilon\betaig)\in \mathcal{B}(\varphiield{R}^N)\cap
C^\infty\betaig(\varphiield{R}^N,\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^m\betaig)$
such that $\Deltaiv h_\varepsilon\varepsilonquiv 0$, for every $p\gammaeq 1$ we have
$v_\varepsilon\thetao v$ in $L^p$ and
$$\lim_{\varepsilon\thetao 0^+}\int_{\Omegamega}\varphirac{1}{\varepsilon}F\Big(\varepsilon^n\nabla^n
v_\varepsilon(x),\ldots,\varepsilon\nabla v_\varepsilon(x)\,,\,v(x)\,,\,x\Big)dx=
K_{per}(v).$$ Here $\mathcal{B}(\varphiield{R}^N)$ was defined by
\varepsilonr{fhjvjhvjhvholhiohiovhhjhvvjvf} and $K_{per}(\cdot)$ was defined
by \varepsilonr{hfighfighfihgigiugi}.
\varepsilonnd{theorem}
\betaegin{comment}
\betaegin{theorem}\label{ffgvfgfhthjghgjhg}
Let $G$ and $W$ be nonnegative $C^1$-functions defined on
$$
\betaig\{\varphiield{R}^{k\thetaimes N^{n+1}}\thetaimes\varphiield{R}^{d\thetaimes
N^{n+1}}\thetaimes\varphiield{R}^{m\thetaimes
N^n}\betaig\}\thetaimes\ldots\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N\thetaimes N}\thetaimes\varphiield{R}^{m\thetaimes
N}\betaig\}\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes
N}\thetaimes\varphiield{R}^{m}\betaig\}\thetaimes \varphiield{R}^k\thetaimes\varphiield{R}^N
$$
and $\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^{m}\thetaimes
\varphiield{R}^k\thetaimes\varphiield{R}^N$ respectively, and $G$ satisfies
\betaegin{equation*}
G\Big(0,0,\ldots,0,\betaig\{a,b,c\betaig\},d,x\Big)\varepsilonquiv 0 \quad\thetaext{for
all}\;\;a\in\varphiield{R}^{k\thetaimes N},\;b\in\varphiield{R}^{d\thetaimes
N},\;c\in\varphiield{R}^m,\;d\in\varphiield{R}^k,\;x\in\varphiield{R}^N\,.
\varepsilonnd{equation*}
Then for every $v\in Lip(\varphiield{R}^N,\varphiield{R}^k)\cap L^1\cap L^\infty$ such that
$\nabla v\in BV$ and for every $h\in BV(\varphiield{R}^N,\varphiield{R}^{d\thetaimes N})\cap
L^\infty$ and $\psi\in BV(\varphiield{R}^N,\varphiield{R}^{m})\cap L^\infty$ satisfying
$\|D(\nabla v)\|(\partial\Omegamega)+\|D h\|(\partial\Omegamega)+\|D
\psi\|(\partial\Omegamega)=0$, $\Deltaiv h\varepsilonquiv 0$ in $\varphiield{R}^N$ and
\betaegin{equation*}
W\Big(\nabla v(x),v(x),h(x),\psi(x),x\Big)\varepsilonquiv 0\quad\thetaext{for
a.e.}\;\;x\in\Omega\,,
\varepsilonnd{equation*}
there exists sequences $\{v_\varepsilon\}_{0<\varepsilon<1}\sigmaubset
C^\infty(\varphiield{R}^N,\varphiield{R}^k)$, $\{h_\varepsilon\}_{0<\varepsilon<1}\sigmaubset
C^\infty(\varphiield{R}^N,\varphiield{R}^{d\thetaimes N})$ and $\{\psi_\varepsilon\}_{0<\varepsilon<1}\sigmaubset
C^\infty(\varphiield{R}^N,\varphiield{R}^{m})$
such that $div_x h_\varepsilon(x)\varepsilonquiv 0$ in $\varphiield{R}^N$,
$\int_\Omega\psi_\varepsilon\,dx=\int_\Omega \psi\,dx$, for every $p\gammaeq 1$ we have
$\lim_{\varepsilon\thetao0^+}v_\varepsilon=v$ in $W^{1,p}$, $\lim_{\varepsilon\thetao0^+}h_\varepsilon=h$ in
$L^{p}$, $\lim_{\varepsilon\thetao0^+}\psi_\varepsilon=\psi$ in $L^{p}$ and
\betaegin{multline}\label{hfhvdiofjdollk}
\lim_{\varepsilon\thetao 0^+}I_\varepsilon\betaig(v_\varepsilon,h_\varepsilon,\psi_\varepsilon\betaig):=\\ \lim_{\varepsilon\thetao
0^+}\int_\Omega \varphirac{1}{\varepsilon}F\betaigg(
\betaig\{\varepsilon^n\nabla^{n+1}v_\varepsilon,\varepsilon^n\nabla^n
h_\varepsilon,\varepsilon^n\nabla^n\psi_\varepsilon\betaig\},\ldots,\betaig\{\varepsilon\nabla^2v_\varepsilon,\varepsilon\nabla
h_\varepsilon,\varepsilon\nabla\psi_\varepsilon\betaig\},\betaig\{\nabla v_\varepsilon,h_\varepsilon,\psi_\varepsilon\betaig\},
v_\varepsilon,x\betaigg)dx\\ \leq K_{per}\betaig(v,h,\psi\betaig)\,,
\varepsilonnd{multline}
where $I_\varepsilon(\cdot)$ is defined by \varepsilonr{vfdhghfghddh}, $F$ is defined
by \varepsilonr{fihgfighfhj} and $K_{per}(\cdot)$ is defined by
\varepsilonr{nvhfighf}.
\varepsilonnd{theorem}
\varepsilonnd{comment}
The main result of this paper provides with that, for the general problem \varepsilonr{fhjvjhvjhvnlhiohoioiiy},
when $G,W$ don't depend on $x$ explicitly, $K^*(\cdot)$ is a lower
bound in the sense of {\betaf(*)}. More precisely, we have the
following Theorem:
\betaegin{theorem}\label{dehgfrygfrgygenjklhhjkghhjggjfjkh}
Let $\Omega\sigmaubset\varphiield{R}^N$ be an open set and
$$F:\varphiield{R}^{\betaig(\{k\thetaimes N\}+\{d\thetaimes N\}+m\betaig)\thetaimes N^n}\thetaimes\ldots\thetaimes\varphiield{R}^{\betaig(\{k\thetaimes N\}+\{d\thetaimes N\}+m\betaig)\thetaimes N}
\thetaimes\varphiield{R}^{\{k\thetaimes N\}+\{d\thetaimes N\}+m}\,\thetao\,\varphiield{R}$$ be a nonnegative
continuous function. Furthermore assume that $v:=(\nabla
u,h,\psi)\in\mathcal{B}(\Omega)\cap BV\betaig(\Omega,\varphiield{R}^{k\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^m\betaig)\cap
L^\infty\betaig(\Omega,\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^m\betaig)$
satisfies
$$F\Big(0,\ldots,0,v(x)\Big)=0\quad\thetaext{for a.e.}\;\;x\in\Omega.$$
Then for every $\{v_\varepsilon\}_{\varepsilon>0}\sigmaubset
\mathcal{B}(\Omegamega)\cap W^{n,1}_{loc}\betaig(\Omega,\varphiield{R}^{k\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^m\betaig)$, such that $v_\varepsilon\thetao
v$ in $L^p$ as $\varepsilon\thetao 0^+$, we have $$\varliminf_{\varepsilon\thetao
0^+}\int_{\Omegamega}\varphirac{1}{\varepsilon}F\Big(\varepsilon^n\nabla^n
v_\varepsilon(x),\ldots,\varepsilon\nabla v_\varepsilon(x)\,,\,v(x)\Big)dx\gammaeq
K^{*}(v).$$ Here $K^{*}(\cdot)$ is defined by \varepsilonr{hfighfighfihhioh}
with respect to $L^p$ topology.
\varepsilonnd{theorem}
For slightly generalized formulation and additional details see
Theorem \ref{dehgfrygfrgygenjklhhj}. See also Theorem
\ref{dehgfrygfrgygen} as an analogous result for more general
functionals than that defined by \varepsilonr{fhjvjhvjhvnlhiohoioiiy}.
\betaegin{comment}
\betaegin{theorem}\label{dehgfrygfrgygenjklhhjkghhjggjfjkh}
Let $G$ and $W$ be nonnegative continuous functions defined on
$$
\betaig\{\varphiield{R}^{k\thetaimes N^{n+1}}\thetaimes\varphiield{R}^{d\thetaimes
N^{n+1}}\thetaimes\varphiield{R}^{m\thetaimes
N^n}\betaig\}\thetaimes\ldots\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N\thetaimes N}\thetaimes\varphiield{R}^{m\thetaimes
N}\betaig\}\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes
N}\thetaimes\varphiield{R}^{m}\betaig\},
$$
and $\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^{m}$ respectively,
\betaegin{equation*}
G\Big(0,0,\ldots,0,\betaig\{a,b,c\betaig\}\Big)\varepsilonquiv 0 \quad\thetaext{for
all}\;\;a\in\varphiield{R}^{k\thetaimes N},\;b\in\varphiield{R}^{d\thetaimes N},\;c\in\varphiield{R}^m\,.
\varepsilonnd{equation*}
Furthermore, let $q_1,q_2,q_3\gammaeq 1$, $p\gammaeq 1$ and $v(x)\in
W^{1,q_1}_{loc}(\Omega,\varphiield{R}^k)$, $h(x)\in L^{q_2}_{loc}(\Omega,\varphiield{R}^{d\thetaimes
N})$ and $\psi\in L^{q_3}_{loc}(\Omega,\varphiield{R}^{m})$ be such that $div_x
h(x)\varepsilonquiv 0$ in $\Omega$ and
$$W\Big(0,0,\ldots,0,\betaig\{\nabla v,h,\psi\betaig\}\Big)=0
\quad\thetaext{a.e.~in}\; \Omegamega\,.$$ Assume also that $\nabla
v,h,\psi\in BV_{loc}\cap L^\infty_{loc}$ or, more generally, there
exists a $\mathcal{H}^{N-1}$ $\sigmaigma$-finite Borel set $S\sigmaubset\Omega$
which we denote by $\Omega\cap (J_{\nabla v}\cup J_{h}\cup J_{\psi})$
and there exist Borel mappings $\nabla v^+(x):S\thetao\varphiield{R}^{k\thetaimes N}$,
$\nabla v^-(x):S\thetao\varphiield{R}^{k\thetaimes N}$, $h^+(x):S\thetao\varphiield{R}^{d\thetaimes N}$,
$h^-(x):S\thetao\varphiield{R}^{d\thetaimes N}$, $\psi^+(x):S\thetao\varphiield{R}^{m}$,
$\psi^-(x):S\thetao\varphiield{R}^{m}$ and $v_\ec \nu(x):S\thetao S^{N-1}$ such that for
every $x\in S$ we have
\betaegin{multline}\label{L2009surfhh8128odno888jjjjjkkkkkkgenhjjhjkjhjhgjhghkhkccb}
\lim\limits_{\rho\thetao 0^+}\varphirac{1}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}\int_{B_\rho^+(x,v_\ec
\nu(x))}\Bigg(\Big|\nabla v(y)-\nabla
v^+(x)\Big|^{q_1}+\Big|h(y)-h^+(x)\Big|^{q_2}
+\Big|\psi(y)-\psi^+(x)\Big|^{q_3}\Bigg)\,dy=0\,,\\
\lim\limits_{\rho\thetao 0^+}\varphirac{1}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}\int_{B_\rho^-(x,v_\ec
\nu(x))}\Bigg(\Big|\nabla v(y)-\nabla v^-(x)\Big|^{q_1}+\Big|h(y)-
h^-(x)\Big|^{q_2}+\Big|\psi(y)-\psi^-(x)\Big|^{q_3}\Bigg)\,dy=0\,.
\varepsilonnd{multline}
Then for every
$\{v_\varepsilon\}_{\varepsilon>0}\sigmaubset W^{1,q_1}_{loc}(\Omega,\varphiield{R}^k)\cap
W^{(n+1),p}_{loc}(\Omega,\varphiield{R}^k)$, $\{h_\varepsilon\}_{\varepsilon>0}\sigmaubset
L^{q_2}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})\cap W^{n,p}_{loc}(\Omega,\varphiield{R}^{d\thetaimes
N})$ and $\{\psi_\varepsilon\}_{\varepsilon>0}\sigmaubset L^{q_3}_{loc}(\Omega,\varphiield{R}^{m})\cap
W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$ satisfying $div_x h_\varepsilon(x)\varepsilonquiv 0$ in $\Omega$,
$v_\varepsilon\thetao v$ in $W^{1,q_1}_{loc}(\Omega,\varphiield{R}^k)$ as $\varepsilon\thetao 0^+$, $h_\varepsilon\thetao
h$ in $L^{q_2}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})$ as $\varepsilon\thetao 0^+$ and
$\psi_\varepsilon\thetao \psi$ in $L^{q_3}_{loc}(\Omega,\varphiield{R}^{m})$, we have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenjbhghghggjkgkgkjgkgjkljgh}
\varliminf_{\varepsilon\thetao 0^+}I_\varepsilon\betaig(v_\varepsilon,h_\varepsilon,\psi_\varepsilon\betaig):=\\
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{\Omega}\varphirac{1}{\varepsilon} F\betaigg(
\betaig\{\varepsilon^n\nabla^{n+1}v_{\varepsilon},\varepsilon^n\nabla^n
h_\varepsilon,\varepsilon^n\nabla^n\psi_\varepsilon\betaig\},\ldots,\betaig\{\varepsilon\nabla^2v_{\varepsilon},\varepsilon\nabla
h_\varepsilon,\,\varepsilon\nabla\psi_\varepsilon\betaig\},\,\betaig\{\nabla
v_{\varepsilon},h_\varepsilon,\psi_\varepsilon\betaig\}\betaigg)\,dx \gammaeq K^*\betaig(v,h,\psi\betaig)\,,
\varepsilonnd{multline}
where $F$ and $K^*(\cdot)$ are defined by \varepsilonr{fihgfighfhj} and
\varepsilonr{nvhfighf} respectively.
\varepsilonnd{theorem}
\varepsilonnd{comment}
As we saw there is a natural question: weather in general
$K^*(\cdot)\varepsilonquiv K_{per}(\cdot)\,$? The answer yes will mean that,
in the case when $G,W$ are $C^1$ functions which don't depend on $x$
explicitly, the upper bound in Theorem \ref{ffgvfgfhthjghgjhg} will
coincide with the lower bound of Theorem
\ref{dehgfrygfrgygenjklhhjkghhjggjfjkh} and therefore we will find
the full $\Gammaamma$-limit in the case of $BV$ limiting functions. The
equivalent question is weather
\betaegin{equation*}
E_{abst}(v^+,v^-,v_\ec\nu,x)= E_{per}(v^+,v^-,v_\ec\nu,x),
\varepsilonnd{equation*}
where $E_{per}(\cdot)$ is defined in \varepsilonr{Energia2} and
$E_{abst}(\cdot)$ is defined by \varepsilonr{Energia3}. In section
\ref{vdhgvdfgbjfdhgf} we formulate and prove some partial results
that refer to this important question. In particular we prove that
this is indeed the case for the general problem \varepsilonr{b2..} i.e. when
we have no prescribed differential constraint. More precisely, we
have the following Theorem:
\betaegin{theorem}\label{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnewbhjhjkgj}
Let $G\in C^1\betaig(\varphiield{R}^{m\thetaimes N^n}\thetaimes\varphiield{R}^{m\thetaimes
N^{(n-1)}}\thetaimes\ldots\thetaimes\varphiield{R}^{m\thetaimes N}\thetaimes \varphiield{R}^m,\varphiield{R}\betaig)$ and
$W\in C^1(\varphiield{R}^m,\varphiield{R})$ be nonnegative functions such that
$G\betaig(0,0,\ldots,0,b)= 0$ for every $b\in\varphiield{R}^m$ and there exist
$C>0$ and $p\gammaeq 1$ satisfying
\betaegin{multline}\label{hgdfvdhvdhfvjjjjiiiuyyyjitghujtrnewkhjklhkl}
\varphirac{1}{C}|A|^p
\leq F\Big(A,a_1,\ldots,a_{n-1},b\Big) \leq
C\betaigg(|A|^p+\sigmaum_{j=1}^{n-1}|a_j|^{p}+|b|^p+1\betaigg)\quad \thetaext{for
every}\;\;\betaig(A,a_1,a_2,\ldots,a_{n-1},b\betaig),
\varepsilonnd{multline}
where we denote
$$F\Big(A,a_1,\ldots,a_{n-1},b\Big):=G\Big(A,a_1,\ldots,a_{n-1},b\Big)+W(b)$$
Next let $\psi\in BV(\varphiield{R}^N,\varphiield{R}^{m})\cap L^\infty$ be such that $\|D
\psi\|(\partial\Omegamega)=0$ and $W\betaig(\psi(x)\betaig)=0$ for a.e.
$x\in\Omega$.
Then $K^*(\psi)=K_{per}(\psi)$ and for every
$\{\varphi_\varepsilon\}_{\varepsilon>0}\sigmaubset W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$ such that
$\varphi_\varepsilon\thetao \psi$ in $L^p_{loc}(\Omega,\varphiield{R}^m)$ as $\varepsilon\thetao 0^+$, we have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenhjhhhhjtjurtnewjgvjhgv}
\varliminf_{\varepsilon\thetao 0^+}I_\varepsilon(\varphi_\varepsilon):=\varliminf_{\varepsilon\thetao
0^+}\varphirac{1}{\varepsilon}\int_\Omega F\betaigg(\,\varepsilon^n\nabla^n
\varphi_\varepsilon(x),\,\varepsilon^{n-1}\nabla^{n-1}\varphi_\varepsilon(x),\,\ldots,\,\varepsilon\nabla \varphi_\varepsilon(x),\, \varphi_\varepsilon(x)\betaigg)dx\\
\gammaeq K_{per}(\psi):= \int_{\Omega\cap J_\psi}\betaar
E_{per}\Big(\psi^+(x),\psi^-(x),v_\ec \nu(x)\Big)d \mathcal
H^{N-1}(x)\,,
\varepsilonnd{multline}
where
\betaegin{multline}\label{L2009hhffff12kkkhjhjghghgvgvggcjhggghtgjutnewjgkjgjk}
\betaar E_{per}\Big(\psi^+,\psi^-,v_\ec \nu\Big)\;:=\;\\
\inf\Bigg\{\int_{Q_{v_\ec \nu}}\varphirac{1}{L} F\betaigg(L^n\,\nabla^n
z_\eta,\,L^{n-1}\,\nabla^{n-1} z_\eta,\,\ldots,\,L\,\nabla
z_\eta,\,z_\eta\betaigg)\,dx:\;\; L\in(0,+\infty)\,,\;z_\eta\in
\mathcal{\thetailde D}_{per}(\psi^+,\psi^-,v_\ec \nu)\Bigg\}\,,
\varepsilonnd{multline}
with
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddkkjkjlkjlkintuukgkggk}
\mathcal{\thetailde D}_{per}(\psi^+,\psi^-,v_\ec \nu):=
\betaigg\{z_\eta\in C^n(\varphiield{R}^N,\varphiield{R}^m):\;\;z_\eta(y)=\psi^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
z_\eta(y)=\psi^+\;\thetaext{ if }\; y\cdotv_\ec\nu(x)\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec k_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,.
\varepsilonnd{multline}
Here $Q_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec
\nu_j|<1/2\;\;\;\varphiorall j=1,2\ldots N\}$ where $\{v_\ec \nu_1,v_\ec
\nu_2,\ldots,v_\ec \nu_N\}\sigmaubset\varphiield{R}^N$ is an orthonormal base in
$\varphiield{R}^N$ such that $v_\ec \nu_1:=v_\ec \nu$. Moreover, there exists e
sequence $\{\psi_\varepsilon\}_{\varepsilon>0}\sigmaubset C^\infty(\varphiield{R}^N,\varphiield{R}^m)$ such that
$\int_\Omega\psi_\varepsilon(x)dx=\int_\Omega \psi(x)dx$, for every $q\gammaeq 1$ we have
$\psi_\varepsilon\thetao \psi$ in $L^q(\Omega,\varphiield{R}^m)$ as $\varepsilon\thetao 0^+$, and we have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenhjhhhhjtjurtgfhfhfjfjfjnewjkggujk}
\lim_{\varepsilon\thetao 0^+}I_\varepsilon(\psi_\varepsilon):=\lim_{\varepsilon\thetao 0^+}\varphirac{1}{\varepsilon}\int_\Omega
F\betaigg(\,\varepsilon^n\nabla^n
\psi_\varepsilon(x),\,\varepsilon^{n-1}\nabla^{n-1}\psi_\varepsilon(x),\,\ldots,\,\varepsilon\nabla \psi_\varepsilon(x),\, \psi_\varepsilon(x)\betaigg)dx\\
=K_{per}(\psi):= \int_{\Omega\cap J_\psi}\betaar
E_{per}\Big(\psi^+(x),\psi^-(x),v_\ec \nu(x)\Big)d \mathcal
H^{N-1}(x)\,.
\varepsilonnd{multline}
\varepsilonnd{theorem}
See Theorem \ref{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnew} as a
slightly generalized result.
\betaegin{remark}\label{vyuguigiugbuikkk}
In what follows we use some special notations and apply some basic
theorems about $BV$ functions. For the convenience of the reader we
put these notations and theorems in Appendix.
\varepsilonnd{remark}
\betaegin{comment}
The asymptotic behavior, when $\varepsilon\thetao 0$ of the family
$\{I_\varepsilon\}_{\varepsilon>0}$ of the functionals
$I_\varepsilon(\phi):\mathcal{T}\thetao\varphiield{R}^+\cup\{0\}\cup\{+\infty\}$, where
$\mathcal{T}$ is a given metric space, is partially described by the
De Giorgi's $\Gammaamma$-limit, i.e.
$$I_0(\phi):=\inf\limits_{\{\varepsilon\},\{\phi_\varepsilon\}}\left\{\varliminf\limits_{\varepsilon\thetao 0^+}I_\varepsilon(\phi_\varepsilon):
\phi_\varepsilon\thetao\phi\thetaext{ in }\mathcal{T}\right\}\,.$$ Usually, for
finding the $\Gammaamma$-limit of $I_\varepsilon(\phi)$, we need to find two
bounds.
\betaegin{itemize}
\item[{\betaf(*)}] Firstly, we wish to find a lower bound, i.e. the functional
$\underline{I}(\phi)$ such that for every family
$\{\phi_\varepsilon\}_{\varepsilon>0}$, satisfying $\phi_\varepsilon\thetao \phi$ as $\varepsilon\thetao 0^+$,
we have $\varliminf_{\varepsilon\thetao 0^+}I_\varepsilon(\phi_\varepsilon)\gammaeq \underline{I}(\phi)$.
\item[{\betaf(**)}] Secondly, we wish to find an upper
bound, i.e. the functional $\omegaverlineerline{I}(\phi)$ such that there
exists the family $\{\psi_\varepsilon\}_{\varepsilon>0}$, satisfying $\psi_\varepsilon\thetao \phi$
as $\varepsilon\thetao 0^+$, and we have $\varlimsup_{\varepsilon\thetao 0^+}I_\varepsilon(\psi_\varepsilon)\leq
\omegaverlineerline{I}(\phi)$.
\item[{\betaf(***)}] If we obtain
$\underline{I}(\phi)=\omegaverlineerline{I}(\phi):=I(\phi)$, then $I(\phi)$
will be the $\Gammaamma$-limit of $I_\varepsilon(\phi)$.
\varepsilonnd{itemize}
In various applications we deal with the asymptotic behavior as $\varepsilon\thetao 0^+$ of a family of functionals
of the following general form:
\betaegin{multline}\label{vfdhghfghddh2}
I_\varepsilon\Big(v(\cdot),h(\cdot),\psi(\cdot)\Big):=\\ \int_\Omega
\varphirac{1}{\varepsilon}G\betaigg( \betaig\{\varepsilon^n\nabla^{n+1}v,\varepsilon^n\nabla^n
h,\varepsilon^n\nabla^n\psi\betaig\},\ldots,\betaig\{\varepsilon\nabla^2v,\varepsilon\nabla
h,\varepsilon\nabla\psi\betaig\},\betaig\{\nabla v,h,\psi\betaig\},
v,x\betaigg)dx+\int_\Omega \varphirac{1}{\varepsilon}W\Big(\nabla v,h,\psi,v,x\Big)dx\\
\thetaext{for}\;\; v:\Omega\thetao\varphiield{R}^k,\;\;
\psi:\Omega\thetao\varphiield{R}^m\;\;\thetaext{and}\;\;h:\Omega\thetao\varphiield{R}^{d\thetaimes
N}\;\;\thetaext{s.t}\;\;\Deltaiv h\varepsilonquiv 0\,.
\varepsilonnd{multline}
Here $\Omega\sigmaubset\varphiield{R}^N$ is an open set and we assume that $G$ and $W$
are nonnegative continuous functions defined on
$$
\betaig\{\varphiield{R}^{k\thetaimes N^{n+1}}\thetaimes\varphiield{R}^{d\thetaimes
N^{n+1}}\thetaimes\varphiield{R}^{m\thetaimes
N^n}\betaig\}\thetaimes\ldots\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N\thetaimes N}\thetaimes\varphiield{R}^{m\thetaimes
N}\betaig\}\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes
N}\thetaimes\varphiield{R}^{m}\betaig\}\thetaimes \varphiield{R}^k\thetaimes\varphiield{R}^N
$$
and $\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^{m}\thetaimes
\varphiield{R}^k\thetaimes\varphiield{R}^N$ respectively, and such that
\betaegin{equation}\label{gghiohiohioikbk2}
G\Big(0,0,\ldots,0,\betaig\{a,b,c\betaig\},d,x\Big)\varepsilonquiv 0 \quad\thetaext{for
all}\;\;a\in\varphiield{R}^{k\thetaimes N},\;b\in\varphiield{R}^{d\thetaimes
N},\;c\in\varphiield{R}^m,\;d\in\varphiield{R}^k,\;x\in\varphiield{R}^N\,.
\varepsilonnd{equation}
We have the following important particular cases of the general
energy $I_\varepsilon$. We have the first order problem where the functional
$I_\varepsilon$, which acts on functions $\psi:\Omega\thetao\varphiield{R}^m$, has the form
\betaegin{equation}\label{b1..2}
I_\varepsilon(\psi)=\int_\Omega
\varepsilon\betaig|\nabla\psi(x)\betaig|^2+\varphirac{1}{\varepsilon}W\Big(\psi(x),x\Big)dx\,,
\varepsilonnd{equation}
or more generally
\betaegin{equation}\label{b2..2}
I_\varepsilon(\psi)=\int_\Omega\varphirac{1}{\varepsilon}G\Big(\varepsilon^n\nabla\psi^n,\ldots,\varepsilon\nabla\psi,\psi,x\Big)dx
+\int_\Omega\varphirac{1}{\varepsilon}W\betaig(\psi,x\betaig)dx\,.
\varepsilonnd{equation}
In the case of second order problems the functional $I_\varepsilon$, which
acts on functions $v:\Omega\thetao\varphiield{R}^k$, has the form
\betaegin{equation}\label{b3..2}
I_\varepsilon(v)=\int_\Omega \varepsilon\betaig|\nabla^2 v(x)\betaig|^2+\varphirac{1}{\varepsilon}W\Big(\nabla
v(x),v(x),x\Big)dx\,,
\varepsilonnd{equation}
or more generally
\betaegin{equation}\label{b4..2}
I_\varepsilon(v)=\int_\Omega\varphirac{1}{\varepsilon}G\Big(\varepsilon^n\nabla^{n+1}
v,\ldots,\varepsilon\nabla^2 v,\nabla
v,v,x\Big)dx+\int_\Omega\varphirac{1}{\varepsilon}W\betaig(\nabla v,v,x\betaig)dx\,.
\varepsilonnd{equation}
The functionals of the form \varepsilonr{b1..2} arise in the theories of phase
transitions and minimal surfaces. They were first studied by Modica
and Mortola \cite{mm1}, Modica \cite{modica}, Sterenberg
\cite{sternberg} and others. The $\Gammaamma$-limit of the functional in
\varepsilonr{b1..2}, where $W$ don't depend on $x$ explicitly, was obtained
in the general vectorial case by Ambrosio in \cite{ambrosio}. The
$\Gammaamma$-limit of the functional of form \varepsilonr{b2..2}, where $n=1$ and
there exist $\alphalpha,\betaeta\in\varphiield{R}^m$ such that $W(h,x)=0$ if and only
if $h\in\{\alphalpha,\betaeta\}$, under some restriction on the explicit
dependence on $x$ of $G$ and $W$, was obtained by Fonseca and
Popovici in \cite{FonP}. The $\Gammaamma$-limit of the functional of
form \varepsilonr{b2..2}, with $n=2$, $G(\cdot)/\varepsilon\varepsilonquiv\varepsilon^3|\nabla^2\psi|^2$
and $W$ which doesn't depend on $x$ explicitly, was found by
I.~Fonseca and C.~Mantegazza in \cite{FM}.
The functionals of second order of the form \varepsilonr{b3..2} arise, for
example, in the gradient theory of solid-solid phase transitions,
where one considers energies of the form
\betaegin{equation}\label{b3..part2}
I_\varepsilon(v)=\int_\Omega \varepsilon|\nabla^2 v(x)|^2+\varphirac{1}{\varepsilon}W\Big(\nabla
v(x)\Big)dx\,,
\varepsilonnd{equation}
where $v:\Omega\sigmaubset\varphiield{R}^N\thetao\varphiield{R}^N$ stands for the deformation, and the
free energy density $W(F)$ is nonnegative and satisfies
$$W(F)=0\quad\thetaext{if and only if}\quad F\in K:=SO(N)A\cap SO(N)B\,.$$
Here $A$ and $B$ are two fixed, invertible matrices, such that
$rank(A-B)=1$ and $SO(N)$ is the set of rotations in $\varphiield{R}^N$. The
simpler case where $W(F)=0$ if and only if $F\in\{A,B\}$ was studied
by Conti, Fonseca and Leoni in \cite{contiFL}. The case of problem
\varepsilonr{b3..part2}, where $N=2$ and $W(QF)=W(F)$ for all $Q\in SO(2)$
was investigated by Conti and Schweizer in \cite{contiS1} (see also
\cite{contiS} for a related problem). Another important example of
the second order energy is the so called Aviles-Giga functional,
defined on scalar valued functions $v$ by
\betaegin{equation}\label{b5..2}
\int_\Omega\varepsilon|\nabla^2 v|^2+\varphirac{1}{\varepsilon}\betaig(1-|\nabla
v|^2\betaig)^2\quad\quad\thetaext{(see \cite{adm})}.
\varepsilonnd{equation}
In the general form \varepsilonr{vfdhghfghddh2} we also include the
dependence on $\Deltaiv$-free function $h$, which can be useful in the
study of problems with non-local terms as the The Rivi\`ere-Serfaty
functional and other functionals in Micromagnetics.
\varepsilonnd{comment}
\betaegin{comment}
The asymptotic behavior, when $\varepsilon\thetao 0$ of the family
$\{I_\varepsilon\}_{\varepsilon>0}$ of the functionals
$I_\varepsilon(\phi):\mathcal{T}\thetao\varphiield{R}^+\cup\{0\}\cup\{+\infty\}$, where
$\mathcal{T}$ is a given metric space, is partially described by the
De Giorgi's $\Gammaamma$-limit, i.e.
$$I_0(\phi):=\inf\limits_{\{\varepsilon\},\{\phi_\varepsilon\}}\left\{\varliminf\limits_{\varepsilon\thetao 0^+}I_\varepsilon(\phi_\varepsilon):
\phi_\varepsilon\thetao\phi\thetaext{ in }\mathcal{T}\right\}\,.$$ It is well known
that if $\betaar\phi_\varepsilon$ are minimizers for $I_\varepsilon$ and if
$\betaar\phi_\varepsilon\thetao\betaar\phi$ in $\mathcal{T}$ as $\varepsilon\thetao 0$ then $\phi$
is a minimizer of $I_0$. Usually, for finding the $\Gammaamma$-limit of
$I_\varepsilon(\phi)$, we need to find two bounds.
\betaegin{itemize}
\item[{\betaf(*)}] Firstly, we wish to find a lower bound, i.e. the functional
$\underline{I}(\phi)$ such that for every family
$\{\phi_\varepsilon\}_{\varepsilon>0}$, satisfying $\phi_\varepsilon\thetao \phi$ as $\varepsilon\thetao 0^+$,
we have $\varliminf_{\varepsilon\thetao 0^+}I_\varepsilon(\phi_\varepsilon)\gammaeq \underline{I}(\phi)$.
\item[{\betaf(**)}] Secondly, we wish to find an upper
bound, i.e. the functional $\omegaverlineerline{I}(\phi)$ such that there
exists the family $\{\psi_\varepsilon\}_{\varepsilon>0}$, satisfying $\psi_\varepsilon\thetao \phi$
as $\varepsilon\thetao 0^+$, and we have $\varlimsup_{\varepsilon\thetao 0^+}I_\varepsilon(\psi_\varepsilon)\leq
\omegaverlineerline{I}(\phi)$.
\item[{\betaf(***)}] If we obtain
$\underline{I}(\phi)=\omegaverlineerline{I}(\phi):=I(\phi)$, then $I(\phi)$
will be the $\Gammaamma$-limit of $I_\varepsilon(\phi)$.
\varepsilonnd{itemize}
In various applications we deal with the asymptotic behavior as $\varepsilon\thetao 0^+$ of a family of
functionals $\{I_\varepsilon\}_{\varepsilon>0}$
of the following forms.
\betaegin{itemize}
\item
In the case of the first order problem the functional $I_\varepsilon$, which
acts on functions $\psi:\Omega\thetao\varphiield{R}^m$, has the form
\betaegin{equation}\label{b1..2}
I_\varepsilon(\psi)=\int_\Omega
\varepsilon\betaig|\nabla\psi(x)\betaig|^2+\varphirac{1}{\varepsilon}W\Big(\psi(x),x\Big)dx\,,
\varepsilonnd{equation}
or more generally
\betaegin{equation}\label{b2..2}
I_\varepsilon(\psi)=\int_\Omega\varphirac{1}{\varepsilon}G\Big(\varepsilon^n\nabla\psi^n,\ldots,\varepsilon\nabla\psi,\psi,x\Big)dx
+\int_\Omega\varphirac{1}{\varepsilon}W\betaig(\psi,x\betaig)dx\,,
\varepsilonnd{equation}
where $G(0,\ldots,0,\psi,x)\varepsilonquiv 0$.
\item In the case of the second order problem the functional $I_\varepsilon$,
which acts on functions $v:\Omega\thetao\varphiield{R}^k$, has the form
\betaegin{equation}\label{b3..2}
I_\varepsilon(v)=\int_\Omega \varepsilon\betaig|\nabla^2 v(x)\betaig|^2+\varphirac{1}{\varepsilon}W\Big(\nabla
v(x),v(x),x\Big)dx\,,
\varepsilonnd{equation}
or more generally
\betaegin{equation}\label{b4..2}
I_\varepsilon(v)=\int_\Omega\varphirac{1}{\varepsilon}G\Big(\varepsilon^n\nabla^{n+1}
v,\ldots,\varepsilon\nabla^2 v,\nabla
v,v,x\Big)dx+\int_\Omega\varphirac{1}{\varepsilon}W\betaig(\nabla v,v,x\betaig)dx\,,
\varepsilonnd{equation}
where $G(0,\ldots,0,\nabla v,v,x)\varepsilonquiv 0$.
\varepsilonnd{itemize}
The functionals of the form \varepsilonr{b1..2} arise in the theories of phase
transitions and minimal surfaces. They were first studied by Modica
and Mortola \cite{mm1}, Modica \cite{modica}, Sterenberg
\cite{sternberg} and others. The $\Gammaamma$-limit of the functional in
\varepsilonr{b1..2}, where $W$ don't depend on $x$ explicitly, was obtained
in the general vectorial case by Ambrosio in \cite{ambrosio}. The
$\Gammaamma$-limit of the functional of the form \varepsilonr{b2..2}, where $n=1$
and there exist $\alphalpha,\betaeta\in\varphiield{R}^m$ such that $W(h,x)=0$ if and
only if $h\in\{\alphalpha,\betaeta\}$, under some restriction on the
explicit dependence on $x$ of $G$ and $W$, was obtained by Fonseca
and Popovici in \cite{FonP}. The $\Gammaamma$-limit of the functional of
the form \varepsilonr{b2..2}, with $n=2$,
$G(\cdot)/\varepsilon\varepsilonquiv\varepsilon^3|\nabla^2\psi|^2$ and $W$ which doesn't depend
on $x$ explicitly, was found by I.~Fonseca and C.~Mantegazza in
\cite{FM}.
Note here that the particular cases of Theorems
\ref{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnewbhjhjkgj2} and
\ref{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnew}, where $n=1$ and
there exist $\alphalpha,\betaeta\in\varphiield{R}^m$ such that $W(h)=0$ if and only if
$h\in\{\alphalpha,\betaeta\}$, was obtained by Fonseca and Popovici in
\cite{FonP}.
The functionals of second order of the form \varepsilonr{b3..2} arise, for
example, in the gradient theory of solid-solid phase transitions,
where one considers energies of the form
\betaegin{equation}\label{b3..part2}
I_\varepsilon(v)=\int_\Omega \varepsilon|\nabla^2 v(x)|^2+\varphirac{1}{\varepsilon}W\Big(\nabla
v(x)\Big)dx\,,
\varepsilonnd{equation}
where $v:\Omega\sigmaubset\varphiield{R}^N\thetao\varphiield{R}^N$ stands for the deformation, and the
free energy density $W(F)$ is nonnegative and satisfies
$$W(F)=0\quad\thetaext{if and only if}\quad F\in K:=SO(N)A\cup SO(N)B\,.$$
Here $A$ and $B$ are two fixed, invertible matrices, such that
$rank(A-B)=1$ and $SO(N)$ is the set of rotations in $\varphiield{R}^N$. The
simpler case where $W(F)=0$ if and only if $F\in\{A,B\}$ was studied
by Conti, Fonseca and Leoni in \cite{contiFL}. The case of problem
\varepsilonr{b3..part2}, where $N=2$ and $W(QF)=W(F)$ for all $Q\in SO(2)$
was investigated by Conti and Schweizer in \cite{contiS1} (see also
\cite{contiS} for a related problem). Another important example of
the second order energy is the so called Aviles-Giga functional,
defined on scalar valued functions $v$ by
\betaegin{equation}\label{b5..2}
\int_\Omega\varepsilon|\nabla^2 v|^2+\varphirac{1}{\varepsilon}\betaig(1-|\nabla
v|^2\betaig)^2\quad\quad\thetaext{(see \cite{adm})}.
\varepsilonnd{equation}
In this paper we deal with the asymptotic behavior as $\varepsilon\thetao 0^+$ of a family of functionals
of the following general form:
\betaegin{multline}\label{vfdhghfghddh2}
I_\varepsilon\Big(v(\cdot),h(\cdot),\psi(\cdot)\Big):=\\ \int_\Omega
\varphirac{1}{\varepsilon}G\betaigg( \betaig\{\varepsilon^n\nabla^{n+1}v,\varepsilon^n\nabla^n
h,\varepsilon^n\nabla^n\psi\betaig\},\ldots,\betaig\{\varepsilon\nabla^2v,\varepsilon\nabla
h,\varepsilon\nabla\psi\betaig\},\betaig\{\nabla v,h,\psi\betaig\},
v,x\betaigg)dx+\int_\Omega \varphirac{1}{\varepsilon}W\Big(\nabla v,h,\psi,v,x\Big)dx\\
\thetaext{for}\;\; v:\Omega\thetao\varphiield{R}^k,\;\;
\psi:\Omega\thetao\varphiield{R}^m\;\;\thetaext{and}\;\;h:\Omega\thetao\varphiield{R}^{d\thetaimes
N}\;\;\thetaext{s.t}\;\;\Deltaiv h\varepsilonquiv 0\,.
\varepsilonnd{multline}
Here $\Omega\sigmaubset\varphiield{R}^N$ is an open set and we assume that $G$ and $W$
are nonnegative continuous functions defined on
$$
\betaig\{\varphiield{R}^{k\thetaimes N^{n+1}}\thetaimes\varphiield{R}^{d\thetaimes
N^{n+1}}\thetaimes\varphiield{R}^{m\thetaimes
N^n}\betaig\}\thetaimes\ldots\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N\thetaimes N}\thetaimes\varphiield{R}^{m\thetaimes
N}\betaig\}\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes
N}\thetaimes\varphiield{R}^{m}\betaig\}\thetaimes \varphiield{R}^k\thetaimes\varphiield{R}^N
$$
and $\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^{m}\thetaimes
\varphiield{R}^k\thetaimes\varphiield{R}^N$ respectively, and such that
\betaegin{equation}\label{gghiohiohioikbk2}
G\Big(0,0,\ldots,0,\betaig\{a,b,c\betaig\},d,x\Big)\varepsilonquiv 0 \quad\thetaext{for
all}\;\;a\in\varphiield{R}^{k\thetaimes N},\;b\in\varphiield{R}^{d\thetaimes
N},\;c\in\varphiield{R}^m,\;d\in\varphiield{R}^k,\;x\in\varphiield{R}^N\,.
\varepsilonnd{equation}
The functionals in \varepsilonr{b1..2},\varepsilonr{b2..2} and \varepsilonr{b3..2},\varepsilonr{b4..2}
are important particular cases of the general energy $I_\varepsilon$ in
\varepsilonr{vfdhghfghddh2}. In the general form \varepsilonr{vfdhghfghddh2} we also
include the dependence on $\Deltaiv$-free function $h$, which can be
useful in the study of problems with non-local terms as the
Rivi\`ere-Serfaty functional and other functionals in
Micromagnetics.
The main contribution of \cite{PI} was to improve our method (see \cite{pol},\cite{polgen}) for
finding upper bounds in the sense of ({\betaf**}) for the general
functional \varepsilonr{vfdhghfghddh2} in the case where the limiting
functions $\nabla v,h,\psi$ belong to the class $BV$.
What can we expect as reasonable upper bounds?
It is clear that if the non-negative function $W$ is continuous then
every upper bound $\omegaverlineerline{I}(v,h,\psi)$ of \varepsilonr{vfdhghfghddh2},
under the $L^p$-convergence of $\nabla v,h,\psi$, will be finite
only if
\betaegin{equation}\label{hghiohoijoj2}
W\Big(\nabla v(x),h(x),\psi(x),v(x),x\Big)\varepsilonquiv 0\quad\thetaext{for
a.e.}\;\;x\in\Omega\,.
\varepsilonnd{equation}
In order to formulate the main results of this paper we set
\betaegin{multline}\label{fihgfighfhj2}
F\Big(\betaig\{\varepsilon^n\nabla^{n+1}v,\varepsilon^n\nabla^n
h,\varepsilon^n\nabla^n\psi\betaig\},\ldots,\betaig\{\varepsilon\nabla^2v,\varepsilon\nabla
h,\varepsilon\nabla\psi\betaig\},\betaig\{\nabla v,h,\psi\betaig\},
v,x\betaigg):=\\Gamma\Big(\betaig\{\varepsilon^n\nabla^{n+1}v,\varepsilon^n\nabla^n
h,\varepsilon^n\nabla^n\psi\betaig\},\ldots,\betaig\{\varepsilon\nabla^2v,\varepsilon\nabla
h,\varepsilon\nabla\psi\betaig\},\betaig\{\nabla v,h,\psi\betaig\},
v,x\betaigg)+W\Big(\nabla v,h,\psi,v,x\Big)
\varepsilonnd{multline}
and define the following functionals:
\betaegin{multline}\label{nvhfighf2}
\betaegin{cases}
K^*\betaig(v(\cdot),h(\cdot),\psi(\cdot)\betaig):=\int_{\Omega\cap (J_{\nabla
v}\cup J_{h}\cup J_{\psi})}E^*\Big(\betaig\{\nabla
v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x,p,q_1,q_2,q_3\Big)d\mathcal{H}^{N-1}(x)
\\
K_1\betaig(v(\cdot),h(\cdot),\psi(\cdot)\betaig):=\int_{\Omega\cap (J_{\nabla
v}\cup J_{h}\cup J_{\psi})}E_1\Big(\betaig\{\nabla
v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big)d\mathcal{H}^{N-1}(x)\\
K_{per}\betaig(v(\cdot),h(\cdot),\psi(\cdot)\betaig):=\int_{\Omega\cap
(J_{\nabla v}\cup J_{h}\cup J_{\psi})}E_{per}\Big(\betaig\{\nabla
v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big)d\mathcal{H}^{N-1}(x)
\varepsilonnd{cases}
\\ \thetaext{for}\;\;v:\Omega\thetao\varphiield{R}^k,\;h:\Omega\thetao \varphiield{R}^{d\thetaimes N},\;\psi:\Omega\thetao\varphiield{R}^m\;\;\thetaext{s.t.}\;\nabla v,h,\psi\in
BV, \;\Deltaiv h\varepsilonquiv 0\;\;\thetaext{and}\;\;W\betaig(\nabla
v,h,\psi,v,x\betaig)\varepsilonquiv 0\,,
\varepsilonnd{multline}
where $J_{\nabla v}, J_{h}, J_{\varphi}$ are the jump sets of
$\nabla v,h,m$,
$v_\ec\nu$ is the jump vector, $E_1(\cdot), E_{per}(\cdot)$ are
defined by
\betaegin{multline}\label{bkjguiguigbgbgg2}
E_{per}\Big(\betaig\{\nabla v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big):=\\
\inf\Bigg\{\int\limits_{I_{v_\ec
\nu}}\varphirac{1}{L}F\betaigg(\Big\{L^n\nabla^{n+1}\sigmaigma(y),L^n\nabla^n\thetaheta(y),
L^n\nabla^n\gammaamma(y)\Big\},\ldots,\Big\{\nabla\sigmaigma(y),\thetaheta(y),\gammaamma(y)\Big\},v(x),x\betaigg)dy:\;\;L>0,\\
\;\sigmaigma\in \mathcal{W}^{(1)}_{per}(x,\nabla v^+,\nabla v^-,v_\ec
\nu),\,\thetaheta\in \mathcal{W}^{(2)}_{per}(x,h^+,h^-,v_\ec
\nu),\,\gammaamma\in \mathcal{W}^{(3)}_{per}(x,\psi^+,\psi^-,v_\ec
\nu)\Bigg\}\,,
\varepsilonnd{multline}
\betaegin{multline}\label{bkjguiguigbgbgggtyhjh2}
E_{1}\Big(\betaig\{\nabla v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big):=\\
\inf\Bigg\{\int\limits_{I_{v_\ec
\nu}}\varphirac{1}{L}F\betaigg(\Big\{L^n\nabla^{n+1}\sigmaigma(y),L^n\nabla^n\thetaheta(y),
L^n\nabla^n\gammaamma(y)\Big\},\ldots,\Big\{\nabla\sigmaigma(y),\thetaheta(y),\gammaamma(y)\Big\},v(x),x\betaigg)dy:\;\;L>0,\\
\;\sigmaigma\in \mathcal{W}^{(1)}_{per}(x,\nabla v^+,\nabla v^-,v_\ec
\nu),\,\thetaheta\in \mathcal{W}^{(2)}_{per}(x,h^+,h^-,v_\ec
\nu),\,\gammaamma\in
\mathcal{W}^{(3)}_{per}(x,\psi^+,\psi^-,v_\ec \nu)\\
\thetaext{s.t}\;\;\nabla\sigmaigma(y)\varepsilonquiv \thetailde\sigmaigma(v_\ec\nu\cdot
y),\;\thetaheta(y)\varepsilonquiv \thetailde\thetaheta(v_\ec\nu\cdot y),\;\gammaamma(y)\varepsilonquiv
\thetailde\gammaamma(v_\ec\nu\cdot y)\Bigg\}\,.
\varepsilonnd{multline}
Here $\{v_\ec k_1,v_\ec k_2,\ldots v_\ec k_N\}$ is an orthonormal base
in $\varphiield{R}^N$ satisfying $v_\ec k_1=v_\ec\nu$,
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjnkmklklkintjyk2}
\mathcal{W}^{(1)}_{per}(x,\nabla v^+,\nabla v^-,v_\ec \nu):=
\betaigg\{u\in C^{n+1}(\varphiield{R}^N,\varphiield{R}^k):\;\;\nabla u(y)=\nabla
v^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
\nabla u(y)=\nabla v^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{
and }\;\nabla u\betaig(y+v_\ec k_j\betaig)=\nabla u(y)\;\;\varphiorall
j=2,3,\ldots, N\betaigg\}\,,
\varepsilonnd{multline}
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjhhhhklklklkintlul2}
\mathcal{W}^{(2)}_{per}(x,h^+,h^-,v_\ec \nu):=\\
\betaigg\{\xi\in C^n(\varphiield{R}^N,\varphiield{R}^{d\thetaimes N}):\;\;div_y \xi(y)\varepsilonquiv
0,\;\;\xi(y)=h^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
\xi(y)=h^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{ and
}\;\xi\betaig(y+v_\ec k_j\betaig)=\xi(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,,
\varepsilonnd{multline}
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddkkjkjlkjlkintuuk2}
\mathcal{W}^{(3)}_{per}(x,\psi^+,\psi^-,v_\ec \nu):=
\betaigg\{z_\eta\in C^n(\varphiield{R}^N,\varphiield{R}^m):\;\;z_\eta(y)=\psi^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
z_\eta(y)=\psi^+\;\thetaext{ if }\; y\cdotv_\ec\nu(x)\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec k_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,,
\varepsilonnd{multline}
\betaegin{equation}\label{cubidepgghkkkkllllllljjjkkkkkjjjhhhhlkkkffffggggdddhhhjjkljjkljljlkjintuuy2}
I_{v_\ec \nu}:= \Big\{y\in\varphiield{R}^N:\; |y\cdotv_\ec k_j|<1/2\quad\varphiorall
j=1,2,\ldots, N\Big\}\,.
\varepsilonnd{equation}
Furthermore,
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkkgenhgjkgggjhgjhgjbjkbjm2}
E^*\Big(\betaig\{\nabla v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x,p,q_1,q_2,q_3\Big):=\\
\inf\Bigg\{\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}
\varphirac{1}{\varepsilon}F\betaigg(\Big\{\varepsilon^n\nabla^{n+1}\sigmaigma_\varepsilon(y),\varepsilon^n\nabla^n\thetaheta_\varepsilon(y),
\varepsilon^n\nabla^n\gammaamma_\varepsilon(y)\Big\},\ldots,\Big\{\nabla\sigmaigma_\varepsilon(y),\thetaheta_\varepsilon(y),\gammaamma_\varepsilon(y)\Big\},v(x),x\betaigg)dy:\\
\sigmaigma_\varepsilon\in W^{1,q_1}(I_{v_\ec \nu},\varphiield{R}^k)\cap W^{(n+1),p}(I_{v_\ec
\nu},\varphiield{R}^k),\;\thetaheta_\varepsilon\in L^{q_2}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes N})\cap
W^{n,p}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes N}),\\ \gammaamma_\varepsilon\in L^{q_3}(I_{v_\ec
\nu},\varphiield{R}^{m})\cap W^{n,p}(I_{v_\ec \nu},\varphiield{R}^m)\;\; \thetaext{s.t.}\;
\Deltaiv_y\thetaheta_\varepsilon(y)\varepsilonquiv
0,\;\nabla\sigmaigma_\varepsilon(y)\thetao\sigmaigma\betaig(y,\nabla v^+,\nabla
v^-,v_\ec\nu\betaig)\;\thetaext{in}\;L^{q_1}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N}),\\
\thetaheta_\varepsilon(y)\thetao\thetaheta(y,h^+,h^-,v_\ec\nu)\;\thetaext{in}\;L^{q_2}(I_{v_\ec
\nu},\varphiield{R}^{d\thetaimes
N}),\;\gammaamma_\varepsilon(y)\thetao\gammaamma(y,\psi^+,\psi^-,v_\ec\nu)\;\thetaext{in}\;L^{q_3}(I_{v_\ec
\nu},\varphiield{R}^{m})
\Bigg\}\,,
\varepsilonnd{multline}
where
\betaegin{multline}\label{fhyffgfgfgfffgfgenkjgjgkgkgjhgjggjhgjffj2}
\sigmaigma\betaig(y,\nabla v^+,\nabla v^-,v_\ec\nu\betaig):=\betaegin{cases}\nabla
v^+\quad\thetaext{if}\;\,y\cdotv_\ec \nu>0\,,\\
\nabla v^-\quad\thetaext{if}\;\,y\cdotv_\ec
\nu<0\,,\varepsilonnd{cases}\quad\thetaheta\betaig(y,h^+,h^-,v_\ec\nu\betaig):=\betaegin{cases}h^+\quad\thetaext{if}\;\,y\cdotv_\ec \nu>0\,,\\
h^-\quad\thetaext{if}\;\,y\cdotv_\ec
\nu<0\,,\varepsilonnd{cases}\\ \thetaext{and}\quad\gammaamma\betaig(y,\psi^+,\psi^-,v_\ec\nu\betaig):=\betaegin{cases}\psi^+\quad\thetaext{if}\;\,y\cdotv_\ec \nu>0\,,\\
\psi^-\quad\thetaext{if}\;\,y\cdotv_\ec \nu<0\,.\varepsilonnd{cases}
\varepsilonnd{multline}
Observe here that it is clear that
\betaegin{multline}\label{fgbjfohjfopdhfolkkk2}
E^*\Big(\betaig\{\nabla v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x,p,q_1,q_2,q_3\Big)\leq\\
E_{per}\Big(\betaig\{\nabla v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big)\leq E_{1}\Big(\betaig\{\nabla
v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big)\,.
\varepsilonnd{multline}
Therefore,
\betaegin{equation}\label{nvhfighfrhyrtehu2}
K^*\betaig(v(\cdot),h(\cdot),\psi(\cdot)\betaig)\leq
K_{per}\betaig(v(\cdot),h(\cdot),\psi(\cdot)\betaig)\leq
K_1\betaig(v(\cdot),h(\cdot),\psi(\cdot)\betaig)\,.
\varepsilonnd{equation}
We call $K_1(\cdot)$ by the bound, achieved by one dimensional
profiles, $K_{per}(\cdot)$ by the bound, achieved by
multidimensional periodic profiles and $K^*(\cdot)$ by the bound,
achieved by abstract profiles.
Our general conjecture is that $K^*(\cdot)$ is the $\Gammaamma$-limit for the
functionals $I_\varepsilon(\cdot)$ in \varepsilonr{vfdhghfghddh2}, under
$\betaig\{W^{1,q_1},L^{q_2}, L^{q_3}\betaig\}$ convergence, in the case
where the limiting functions $\nabla v,h,\psi\in BV$ and satisfy
\varepsilonr{hghiohoijoj2}. It is known that in the case of the problem
\varepsilonr{b1..2}, where $W\in C^1$ don't depend on $x$ explicitly, this is
indeed the case and moreover, in this case we have equalities in
\varepsilonr{nvhfighfrhyrtehu2} (see \cite{ambrosio}). The same result is
also known for problem \varepsilonr{b5..2} when $N=2$ (see \cite{adm} and
\cite{CdL},\cite{pol}). It is also the case for problem
\varepsilonr{b3..part2} where $W(F)=0$ if and only if $F\in\{A,B\}$, studied
by Conti, Fonseca and Leoni, if $W$ satisfies the additional
hypothesis ($H_3$) in \cite{contiFL}. However, as was shown there by
the example, if we don't assume ($H_3$)-hypothesis, then it is
possible that $E_{per}\betaig(\nabla v^+,\nabla v^-,v_\ec\nu\betaig)$ is
strictly smaller than $E_{1}\betaig(\nabla v^+,\nabla v^-,v_\ec\nu\betaig)$
and thus, in general, $K_1(\cdot)$ can differ from the
$\Gammaamma$-limit. In the same work it was shown that if, instead of
($H_3$) we assume hypothesis ($H_5$), then $K_{per}(\cdot)$ turns to
be equal to $K^*(\cdot)$ and the $\Gammaamma$-limit of \varepsilonr{b3..part2}
equals to $K_{per}(\cdot)\varepsilonquiv K^*(\cdot)$. The similar result
known also for problem \varepsilonr{b2..2}, where $n=1$ and there exist
$\alphalpha,\betaeta\in\varphiield{R}^m$ such that $W(h,x)=0$ if and only if
$h\in\{\alphalpha,\betaeta\}$, under some restriction on the explicit
dependence on $x$ of $G$ and $W$. As was obtained by Fonseca and
Popovici in \cite{FonP} in this case we also obtain that
$K_{per}(\cdot)\varepsilonquiv K^*(\cdot)$ is the $\Gammaamma$-limit of
\varepsilonr{b2..2}.
In the case of problem \varepsilonr{b3..part2},
where $N=2$ and $W(QF)=W(F)$ for all $Q\in SO(2)$, Conti and
Schweizer in \cite{contiS1} found that the $\Gammaamma$-limit equals to
$K^*(\cdot)$ (see also \cite{contiS} for a related problem).
However, it isn't known, either in general $K^*(\cdot)\varepsilonquiv
K_{per}(\cdot)$.
On \cite{polgen} we showed that for the general problems \varepsilonr{b2..2}
and \varepsilonr{b4..2}, $K_1(\cdot)$ is the upper bound in the sense of
{\betaf(**)}, if the limiting function belongs to $BV$-class. However,
as we saw, this bound is not sharp in general. As we obtained in
\cite{PI} for the general problem \varepsilonr{vfdhghfghddh2},
$K_{per}(\cdot)$ is always the upper bound in the sense of {\betaf(**)}
in the case where the limiting functions $\nabla v,h,\psi$ belong to
$BV$-space and $G,W\in C^1$. More precisely, we have the following
Theorem:
\betaegin{theorem}\label{ffgvfgfhthjghgjhg2}
Let $G$ and $W$ be nonnegative $C^1$-functions defined on
$$
\betaig\{\varphiield{R}^{k\thetaimes N^{n+1}}\thetaimes\varphiield{R}^{d\thetaimes
N^{n+1}}\thetaimes\varphiield{R}^{m\thetaimes
N^n}\betaig\}\thetaimes\ldots\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N\thetaimes N}\thetaimes\varphiield{R}^{m\thetaimes
N}\betaig\}\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes
N}\thetaimes\varphiield{R}^{m}\betaig\}\thetaimes \varphiield{R}^k\thetaimes\varphiield{R}^N
$$
and $\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^{m}\thetaimes
\varphiield{R}^k\thetaimes\varphiield{R}^N$ respectively, and $G$ satisfies
\betaegin{equation*}
G\Big(0,0,\ldots,0,\betaig\{a,b,c\betaig\},d,x\Big)\varepsilonquiv 0 \quad\thetaext{for
all}\;\;a\in\varphiield{R}^{k\thetaimes N},\;b\in\varphiield{R}^{d\thetaimes
N},\;c\in\varphiield{R}^m,\;d\in\varphiield{R}^k,\;x\in\varphiield{R}^N\,.
\varepsilonnd{equation*}
Then for every $v\in Lip(\varphiield{R}^N,\varphiield{R}^k)\cap L^1\cap L^\infty$ such that
$\nabla v\in BV$ and for every $h\in BV(\varphiield{R}^N,\varphiield{R}^{d\thetaimes N})\cap
L^\infty$ and $\psi\in BV(\varphiield{R}^N,\varphiield{R}^{m})\cap L^\infty$ satisfying
$\|D(\nabla v)\|(\partial\Omegamega)+\|D h\|(\partial\Omegamega)+\|D
\psi\|(\partial\Omegamega)=0$, $\Deltaiv h\varepsilonquiv 0$ in $\varphiield{R}^N$ and
\betaegin{equation*}
W\Big(\nabla v(x),v(x),h(x),\psi(x),x\Big)\varepsilonquiv 0\quad\thetaext{for
a.e.}\;\;x\in\Omega\,,
\varepsilonnd{equation*}
there exists sequences $\{v_\varepsilon\}_{0<\varepsilon<1}\sigmaubset
C^\infty(\varphiield{R}^N,\varphiield{R}^k)$, $\{h_\varepsilon\}_{0<\varepsilon<1}\sigmaubset
C^\infty(\varphiield{R}^N,\varphiield{R}^{d\thetaimes N})$ and $\{\psi_\varepsilon\}_{0<\varepsilon<1}\sigmaubset
C^\infty(\varphiield{R}^N,\varphiield{R}^{m})$
such that $div_x h_\varepsilon(x)\varepsilonquiv 0$ in $\varphiield{R}^N$,
$\int_\Omega\psi_\varepsilon\,dx=\int_\Omega \psi\,dx$, for every $p\gammaeq 1$ we have
$\lim_{\varepsilon\thetao0^+}v_\varepsilon=v$ in $W^{1,p}$, $\lim_{\varepsilon\thetao0^+}h_\varepsilon=h$ in
$L^{p}$, $\lim_{\varepsilon\thetao0^+}\psi_\varepsilon=\psi$ in $L^{p}$ and
\betaegin{multline}\label{hfhvdiofjdollk2}
\lim_{\varepsilon\thetao 0^+}I_\varepsilon\betaig(v_\varepsilon,h_\varepsilon,\psi_\varepsilon\betaig):=\\ \lim_{\varepsilon\thetao
0^+}\int_\Omega \varphirac{1}{\varepsilon}F\betaigg(
\betaig\{\varepsilon^n\nabla^{n+1}v_\varepsilon,\varepsilon^n\nabla^n
h_\varepsilon,\varepsilon^n\nabla^n\psi_\varepsilon\betaig\},\ldots,\betaig\{\varepsilon\nabla^2v_\varepsilon,\varepsilon\nabla
h_\varepsilon,\varepsilon\nabla\psi_\varepsilon\betaig\},\betaig\{\nabla v_\varepsilon,h_\varepsilon,\psi_\varepsilon\betaig\},
v_\varepsilon,x\betaigg)dx\\ \leq K_{per}\betaig(v,h,\psi\betaig)\,,
\varepsilonnd{multline}
where $I_\varepsilon(\cdot)$ is defined by \varepsilonr{vfdhghfghddh2}, $F$ is defined
by \varepsilonr{fihgfighfhj2} and $K_{per}(\cdot)$ is defined by
\varepsilonr{nvhfighf2}.
\varepsilonnd{theorem}
The main result
of this paper is that for the general problem \varepsilonr{vfdhghfghddh2},
when $G,W$ don't depend on $x$ explicitly, $K^*(\cdot)$ is the lower
bound in the sense of {\betaf(*)}. More precisely, we have the
following Theorem:
\betaegin{theorem}\label{dehgfrygfrgygenjklhhjkghhjggjfjkh2}
Let $G$ and $W$ be nonnegative continuous functions defined on
$$
\betaig\{\varphiield{R}^{k\thetaimes N^{n+1}}\thetaimes\varphiield{R}^{d\thetaimes
N^{n+1}}\thetaimes\varphiield{R}^{m\thetaimes
N^n}\betaig\}\thetaimes\ldots\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N\thetaimes N}\thetaimes\varphiield{R}^{m\thetaimes
N}\betaig\}\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes
N}\thetaimes\varphiield{R}^{m}\betaig\},
$$
and $\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes N}\thetaimes\varphiield{R}^{m}$ respectively,
\betaegin{equation*}
G\Big(0,0,\ldots,0,\betaig\{a,b,c\betaig\}\Big)\varepsilonquiv 0 \quad\thetaext{for
all}\;\;a\in\varphiield{R}^{k\thetaimes N},\;b\in\varphiield{R}^{d\thetaimes N},\;c\in\varphiield{R}^m\,.
\varepsilonnd{equation*}
Furthermore, let $q_1,q_2,q_3\gammaeq 1$, $p\gammaeq 1$ and $v(x)\in
W^{1,q_1}_{loc}(\Omega,\varphiield{R}^k)$, $h(x)\in L^{q_2}_{loc}(\Omega,\varphiield{R}^{d\thetaimes
N})$ and $\psi\in L^{q_3}_{loc}(\Omega,\varphiield{R}^{m})$ be such that $div_x
h(x)\varepsilonquiv 0$ in $\Omega$ and
$$W\Big(0,0,\ldots,0,\betaig\{\nabla v,h,\psi\betaig\}\Big)=0
\quad\thetaext{a.e.~in}\; \Omegamega\,.$$ Assume also that $\nabla
v,h,\psi\in BV_{loc}\cap L^\infty_{loc}$ or, more generally, there
exists a $\mathcal{H}^{N-1}$ $\sigmaigma$-finite Borel set $S\sigmaubset\Omega$
which we denote by $\Omega\cap (J_{\nabla v}\cup J_{h}\cup J_{\psi})$
and there exist Borel mappings $\nabla v^+(x):S\thetao\varphiield{R}^{k\thetaimes N}$,
$\nabla v^-(x):S\thetao\varphiield{R}^{k\thetaimes N}$, $h^+(x):S\thetao\varphiield{R}^{d\thetaimes N}$,
$h^-(x):S\thetao\varphiield{R}^{d\thetaimes N}$, $\psi^+(x):S\thetao\varphiield{R}^{m}$,
$\psi^-(x):S\thetao\varphiield{R}^{m}$ and $v_\ec \nu(x):S\thetao S^{N-1}$ such that for
every $x\in S$ we have
\betaegin{multline}\label{L2009surfhh8128odno888jjjjjkkkkkkgenhjjhjkjhjhgjhghkhkccb2}
\lim\limits_{\rho\thetao 0^+}\varphirac{1}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}\int_{B_\rho^+(x,v_\ec
\nu(x))}\Bigg(\Big|\nabla v(y)-\nabla
v^+(x)\Big|^{q_1}+\Big|h(y)-h^+(x)\Big|^{q_2}
+\Big|\psi(y)-\psi^+(x)\Big|^{q_3}\Bigg)\,dy=0\,,\\
\lim\limits_{\rho\thetao 0^+}\varphirac{1}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}\int_{B_\rho^-(x,v_\ec
\nu(x))}\Bigg(\Big|\nabla v(y)-\nabla v^-(x)\Big|^{q_1}+\Big|h(y)-
h^-(x)\Big|^{q_2}+\Big|\psi(y)-\psi^-(x)\Big|^{q_3}\Bigg)\,dy=0\,.
\varepsilonnd{multline}
Then for every
$\{v_\varepsilon\}_{\varepsilon>0}\sigmaubset W^{1,q_1}_{loc}(\Omega,\varphiield{R}^k)\cap
W^{(n+1),p}_{loc}(\Omega,\varphiield{R}^k)$, $\{h_\varepsilon\}_{\varepsilon>0}\sigmaubset
L^{q_2}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})\cap W^{n,p}_{loc}(\Omega,\varphiield{R}^{d\thetaimes
N})$ and $\{\psi_\varepsilon\}_{\varepsilon>0}\sigmaubset L^{q_3}_{loc}(\Omega,\varphiield{R}^{m})\cap
W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$ satisfying $div_x h_\varepsilon(x)\varepsilonquiv 0$ in $\Omega$,
$v_\varepsilon\thetao v$ in $W^{1,q_1}_{loc}(\Omega,\varphiield{R}^k)$ as $\varepsilon\thetao 0^+$, $h_\varepsilon\thetao
h$ in $L^{q_2}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})$ as $\varepsilon\thetao 0^+$ and
$\psi_\varepsilon\thetao \psi$ in $L^{q_3}_{loc}(\Omega,\varphiield{R}^{m})$, we have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenjbhghghggjkgkgkjgkgjkljgh2}
\varliminf_{\varepsilon\thetao 0^+}I_\varepsilon\betaig(v_\varepsilon,h_\varepsilon,\psi_\varepsilon\betaig):=\\
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{\Omega}\varphirac{1}{\varepsilon} F\betaigg(
\betaig\{\varepsilon^n\nabla^{n+1}v_{\varepsilon},\varepsilon^n\nabla^n
h_\varepsilon,\varepsilon^n\nabla^n\psi_\varepsilon\betaig\},\ldots,\betaig\{\varepsilon\nabla^2v_{\varepsilon},\varepsilon\nabla
h_\varepsilon,\,\varepsilon\nabla\psi_\varepsilon\betaig\},\,\betaig\{\nabla
v_{\varepsilon},h_\varepsilon,\psi_\varepsilon\betaig\}\betaigg)\,dx \gammaeq K^*\betaig(v,h,\psi\betaig)\,,
\varepsilonnd{multline}
where $F$ and $K^*(\cdot)$ are defined by \varepsilonr{fihgfighfhj2} and
\varepsilonr{nvhfighf2} respectively.
\varepsilonnd{theorem}
For slightly generalized formulation and additional details see
Theorem \ref{dehgfrygfrgygenjklhhj}. See also Theorem
\ref{dehgfrygfrgygen} as an analogous result for more general
functionals than that defined by \varepsilonr{vfdhghfghddh2}.
As we saw there is a natural question either in general
$K^*(\cdot)\varepsilonquiv K_{per}(\cdot)$. The answer yes will mean that, in
the case when $G,W$ are $C^1$ functions which don't depend on $x$
explicitly, the upper bound in Theorem \ref{ffgvfgfhthjghgjhg2} will
coincide with the lower bound of Theorem
\ref{dehgfrygfrgygenjklhhjkghhjggjfjkh2} and therefore we will find
the full $\Gammaamma$-limit in the case of $BV$ limiting functions. The
equivalent question is either
\betaegin{equation}\label{hdiohdo2}
E^*\Big(\betaig\{\nabla v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x,p,q_1,q_2,q_3\Big)=
E_{per}\Big(\betaig\{\nabla v^+,h^+,\psi^+\betaig\}, \betaig\{\nabla
v^-,h^-,\psi^-\betaig\},v_\ec\nu,x\Big)\,,
\varepsilonnd{equation}
where $E_{per}(\cdot)$ is defined in \varepsilonr{bkjguiguigbgbgg2} and
$E^*(\cdot)$ is defined by
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkkgenhgjkgggjhgjhgjbjkbjm2}.
In the section \ref{vdhgvdfgbjfdhgf} we formulate and prove some
partial results that refer to this important question. In particular
we prove that this is indeed the case for the general problem
\varepsilonr{b2..2}. More precisely, we have the following Theorem:
\betaegin{theorem}\label{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnewbhjhjkgj2}
Let $G\in C^1\betaig(\varphiield{R}^{m\thetaimes N^n}\thetaimes\varphiield{R}^{m\thetaimes
N^{(n-1)}}\thetaimes\ldots\thetaimes\varphiield{R}^{m\thetaimes N}\thetaimes \varphiield{R}^m,\varphiield{R}\betaig)$ and
$W\in C^1(\varphiield{R}^m,\varphiield{R})$ be nonnegative functions such that
$G\betaig(0,0,\ldots,0,b)= 0$ for every $b\in\varphiield{R}^m$ and there exist
$C>0$ and $p\gammaeq 1$ satisfying
\betaegin{multline}\label{hgdfvdhvdhfvjjjjiiiuyyyjitghujtrnewkhjklhkl2}
\varphirac{1}{C}|A|^p
\leq F\Big(A,a_1,\ldots,a_{n-1},b\Big) \leq
C\betaigg(|A|^p+\sigmaum_{j=1}^{n-1}|a_j|^{p}+|b|^p+1\betaigg)\quad \thetaext{for
every}\;\;\betaig(A,a_1,a_2,\ldots,a_{n-1},b\betaig),
\varepsilonnd{multline}
where we denote
$$F\Big(A,a_1,\ldots,a_{n-1},b\Big):=G\Big(A,a_1,\ldots,a_{n-1},b\Big)+W(b)$$
Next let $\psi\in BV(\varphiield{R}^N,\varphiield{R}^{m})\cap L^\infty$ be such that $\|D
\psi\|(\partial\Omegamega)=0$ and $W\betaig(\psi(x)\betaig)=0$ for a.e.
$x\in\Omega$.
Then $K^*(\psi)=K_{per}(\psi)$ and for every
$\{\varphi_\varepsilon\}_{\varepsilon>0}\sigmaubset W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$ such that
$\varphi_\varepsilon\thetao \psi$ in $L^p_{loc}(\Omega,\varphiield{R}^m)$ as $\varepsilon\thetao 0^+$, we have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenhjhhhhjtjurtnewjgvjhgv2}
\varliminf_{\varepsilon\thetao 0^+}I_\varepsilon(\varphi_\varepsilon):=\varliminf_{\varepsilon\thetao
0^+}\varphirac{1}{\varepsilon}\int_\Omega F\betaigg(\,\varepsilon^n\nabla^n
\varphi_\varepsilon(x),\,\varepsilon^{n-1}\nabla^{n-1}\varphi_\varepsilon(x),\,\ldots,\,\varepsilon\nabla \varphi_\varepsilon(x),\, \varphi_\varepsilon(x)\betaigg)dx\\
\gammaeq K_{per}(\psi):= \int_{\Omega\cap J_\psi}\betaar
E_{per}\Big(\psi^+(x),\psi^-(x),v_\ec \nu(x)\Big)d \mathcal
H^{N-1}(x)\,,
\varepsilonnd{multline}
where
\betaegin{multline}\label{L2009hhffff12kkkhjhjghghgvgvggcjhggghtgjutnewjgkjgjk2}
\betaar E_{per}\Big(\psi^+,\psi^-,v_\ec \nu\Big)\;:=\;\\
\inf\Bigg\{\int_{I_{v_\ec \nu}}\varphirac{1}{L} F\betaigg(L^n\,\nabla^n
z_\eta,\,L^{n-1}\,\nabla^{n-1} z_\eta,\,\ldots,\,L\,\nabla
z_\eta,\,z_\eta\betaigg)\,dx:\;\; L\in(0,+\infty)\,,\;z_\eta\in
\mathcal{W}^{(3)}_{per}(\psi^+,\psi^-,v_\ec \nu)\Bigg\}\,,
\varepsilonnd{multline}
with
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddkkjkjlkjlkintuukgkggk2}
\mathcal{W}^{(3)}_{per}(\psi^+,\psi^-,v_\ec \nu):=
\betaigg\{z_\eta\in C^n(\varphiield{R}^N,\varphiield{R}^m):\;\;z_\eta(y)=\psi^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
z_\eta(y)=\psi^+\;\thetaext{ if }\; y\cdotv_\ec\nu(x)\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec k_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,.
\varepsilonnd{multline}
Here $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec k_j|<1/2\;\;\;\varphiorall
j=1,2\ldots N\}$ where $\{v_\ec k_1,v_\ec k_2,\ldots,v_\ec
k_N\}\sigmaubset\varphiield{R}^N$ is an orthonormal base in $\varphiield{R}^N$ such that $v_\ec
k_1:=v_\ec \nu$. Moreover, there exists e sequence
$\{\psi_\varepsilon\}_{\varepsilon>0}\sigmaubset C^\infty(\varphiield{R}^N,\varphiield{R}^m)$ such that
$\int_\Omega\psi_\varepsilon(x)dx=\int_\Omega \psi(x)dx$, for every $q\gammaeq 1$ we have
$\psi_\varepsilon\thetao \psi$ in $L^q(\Omega,\varphiield{R}^m)$ as $\varepsilon\thetao 0^+$, and we have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenhjhhhhjtjurtgfhfhfjfjfjnewjkggujk2}
\lim_{\varepsilon\thetao 0^+}I_\varepsilon(\psi_\varepsilon):=\lim_{\varepsilon\thetao 0^+}\varphirac{1}{\varepsilon}\int_\Omega
F\betaigg(\,\varepsilon^n\nabla^n
\psi_\varepsilon(x),\,\varepsilon^{n-1}\nabla^{n-1}\psi_\varepsilon(x),\,\ldots,\,\varepsilon\nabla \psi_\varepsilon(x),\, \psi_\varepsilon(x)\betaigg)dx\\
=K_{per}(\psi):= \int_{\Omega\cap J_\psi}\betaar
E_{per}\Big(\psi^+(x),\psi^-(x),v_\ec \nu(x)\Big)d \mathcal
H^{N-1}(x)\,.
\varepsilonnd{multline}
\varepsilonnd{theorem}
See Theorem \ref{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnew} as a
slightly generalized result. Note here that the particular cases of
Theorems \ref{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnewbhjhjkgj2}
and \ref{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnew}, where $n=1$
and there exist $\alphalpha,\betaeta\in\varphiield{R}^m$ such that $W(h)=0$ if and only
if $h\in\{\alphalpha,\betaeta\}$, was obtained by Fonseca and Popovici in
\cite{FonP}.
\betaegin{remark}\label{vyuguigiugbuikkk2}
In what follows we use some special notations and apply some basic
theorems about $BV$ functions. For the convenience of the reader we
put these notations and theorems in Appendix.
\varepsilonnd{remark}
\varepsilonnd{comment}
\betaegin{comment}
\sigmaection{Notations and basic general results about $BV$-functions}
\label{sec:pre2}
\betaegin{itemize}
\item For given a real topological linear space $X$ we denote by $X^*$ the dual space (the space of continuous linear functionals from $X$ to $\varphiield{R}$).
\item For given $h\in X$ and $x^*\in X^*$ we denote by $\betaig<h,x^*\betaig>_{X\thetaimes X^*}$ the value in $\varphiield{R}$ of the functional $x^*$ on the vector $h$.
\item For given two normed linear spaces $X$ and $Y$ we denote by $\mathcal{L}(X;Y)$ the linear space of continuous (bounded) linear operators from $X$ to $Y$.
\item For given $A\in\mathcal{L}(X;Y)$ and $h\in X$ we denote by $A\cdot h$ the value in $Y$ of the operator $A$ on the vector $h$.
\item For given two reflexive Banach spaces $X,Y$ and
$S\in\mathcal{L}(X;Y)$ we denote by $S^*\in \mathcal{L}(Y^*;X^*)$
the corresponding adjoint operator, which satisfy
\betaegin{equation*}\betaig<x,S^*\cdot y^*\betaig>_{X\thetaimes X^*}:=\betaig<S\cdot
x,y^*\betaig>_{Y\thetaimes Y^*}\quad\quad\thetaext{for every}\; y^*\in
Y^*\;\thetaext{and}\;x\in X\,.
\varepsilonnd{equation*}
\item Given open set $G\sigmaubset\varphiield{R}^N$ we denote by
$\mathcal{D}(G,\varphiield{R}^d)$ the real topological linear space of
compactly supported $\varphiield{R}^d$-valued test functions i.e.
$C^\infty_c(G,\varphiield{R}^d)$ with the usual topology.
\item
We denote $\mathcal{D}'(G,\varphiield{R}^d):=\betaig\{\mathcal{D}(G,\varphiield{R}^d)\betaig\}^*$
(the space of $\varphiield{R}^d$ valued distributions in $G$).
\item
Given $h\in\mathcal{D}'(G,\varphiield{R}^d)$ and $\deltaelta\in\mathcal{D}(G,\varphiield{R}^d)$
we denote $<\deltaelta,h>:=\betaig<\deltaelta,h\betaig>_{\mathcal{D}(G,\varphiield{R}^d)\thetaimes
\mathcal{D}'(G,\varphiield{R}^d)}$ i.e. the value in $\varphiield{R}$ of the distribution
$h$ on the test function $\deltaelta$.
\item
Given a linear operator $v_\ec A\in\mathcal{L}(\varphiield{R}^d;\varphiield{R}^k)$ and a
distribution $h\in\mathcal{D}'(G,\varphiield{R}^d)$ we denote by $v_\ec A\cdot h$
the distribution in $\mathcal{D}'(G,\varphiield{R}^k)$ defined by
\betaegin{equation*}
<\deltaelta,v_\ec A \cdot h>:=<v_\ec A^*\cdot
\deltaelta,h>\quad\quad\varphiorall\deltaelta\in\mathcal{D}(G,\varphiield{R}^k).
\varepsilonnd{equation*}
\item
Given $h\in\mathcal{D}'(G,\varphiield{R}^d)$ and $\deltaelta\in\mathcal{D}(G,\varphiield{R})$ by
$<\deltaelta,h>$ we denote the vector in $\varphiield{R}^d$ which satisfy
$<\deltaelta,h>\cdot v_\ec e:=<\deltaeltav_\ec e,h>$ for every $v_\ec
e\in\varphiield{R}^d$.
\item
For a $p\thetaimes q$ matrix $A$ with $ij$-th entry $a_{ij}$ we denote
by $|A|=\betaigl(\Sigmaigma_{i=1}^{p}\Sigmaigma_{j=1}^{q}a_{ij}^2\betaigr)^{1/2}$
the Frobenius norm of $A$.
\item For two matrices $A,B\in\varphiield{R}^{p\thetaimes q}$ with $ij$-th entries
$a_{ij}$ and $b_{ij}$ respectively, we write\\
$A:B\,:=\,\sigmaum\limits_{i=1}^{p}\sigmaum\limits_{j=1}^{q}a_{ij}b_{ij}$.
\item For the $p\thetaimes q$ matrix $A$ with
$ij$-th entry $a_{ij}$ and for the $q\thetaimes d$ matrix $B$ with
$ij$-th entry $b_{ij}$ we denote by $AB:=A\cdot B$ their product,
i.e. the $p\thetaimes d$ matrix, with $ij$-th entry
$\sigmaum\limits_{k=1}^{q}a_{ik}b_{kj}$.
\item We identify
the $v_\ec u=(u_1,\ldots,u_q)\in\varphiield{R}^q$ with the $q\thetaimes 1$ matrix $A$
with $i1$-th entry $u_i$, so that for the $p\thetaimes q$ matrix $A$
with $ij$-th entry $a_{ij}$ and for $v_\ec
v=(v_1,v_2,\ldots,v_q)\in\varphiield{R}^q$ we denote by $A\,v_\ec v :=A\cdotv_\ec
v$ the $p$-dimensional vector $v_\ec u=(u_1,\ldots,u_p)\in\varphiield{R}^p$,
given by $u_i=\sigmaum\limits_{k=1}^{q}a_{ik}v_k$ for every $1\leq i\leq
p$.
\item As usual $A^T$ denotes the transpose of the matrix $A$.
\item For
$v_\ec u=(u_1,\ldots,u_p)\in\varphiield{R}^p$ and $v_\ec
v=(v_1,\ldots,v_p)\in\varphiield{R}^p$ we denote by $v_\ec uv_\ec v:=v_\ec
u\cdotv_\ec v:=\sigmaum\limits_{k=1}^{p}u_k v_k$ the standard scalar
product. We also note that $v_\ec uv_\ec v=v_\ec u^Tv_\ec v=v_\ec v^Tv_\ec
u$ as products of matrices.
\item For $v_\ec
u=(u_1,\ldots,u_p)\in\varphiield{R}^p$ and $v_\ec v=(v_1,\ldots,v_q)\in\varphiield{R}^q$ we
denote by $v_\ec u\omegatimesv_\ec v$ the $p\thetaimes q$ matrix with $ij$-th
entry $u_i v_j$ (i.e. $v_\ec u\omegatimesv_\ec v=v_\ec u\,v_\ec v^T$ as
product of matrices).
\item For
any $p\thetaimes q$ matrix $A$ with $ij$-th entry $a_{ij}$ and $v_\ec
v=(v_1,v_2,\ldots,v_d)\in\varphiield{R}^d$ we denote by $A\omegatimesv_\ec v$ the
$p\thetaimes q\thetaimes d$ tensor with $ijk$-th entry $a_{ij}v_k$.
\item
Given a vector valued function
$f(x)=\betaig(f_1(x),\ldots,f_k(x)\betaig):\Omega\thetao\varphiield{R}^k$ ($\Omega\sigmaubset\varphiield{R}^N$) we
denote by $Df$ or by $\nabla_x f$ the $k\thetaimes N$ matrix with
$ij$-th entry $\varphirac{\partial f_i}{\partial x_j}$.
\item
Given a matrix valued function
$F(x):=\{F_{ij}(x)\}:\varphiield{R}^N\thetao\varphiield{R}^{k\thetaimes N}$ ($\Omega\sigmaubset\varphiield{R}^N$) we
denote by $div\,F$ the $\varphiield{R}^k$-valued vector field defined by
$div\,F:=(l_1,\ldots,l_k)$ where
$l_i=\sigmaum\limits_{j=1}^{N}\varphirac{\partial F_{ij}}{\partial x_j}$.
\item Given a
matrix valued function $F(x)=\betaig\{f_{ij}(x)\betaig\}(1\leq i\leq
p,\,1\leq j\leq q):\Omega\thetao\varphiield{R}^{p\thetaimes q}$ ($\Omega\sigmaubset\varphiield{R}^N$) we denote
by $DF$ or by $\nabla_x F$ the $p\thetaimes q\thetaimes N$ tensor with
$ijk$-th entry $\varphirac{\partial f_{ij}}{\partial x_k}$.
\item For every dimension $d$
we denote by $I$ the unit $d\thetaimes d$-matrix and by $O$ the null
$d\thetaimes d$-matrix.
\item Given a vector valued
measure $\mu=(\mu_1,\ldots,\mu_k)$ (where for any $1\leq j\leq k$,
$\mu_j$ is a finite signed measure) we denote by $\|\mu\|(E)$ its
total variation measure of the set $E$.
\item For any $\mu$-measurable function $f$, we define the product measure
$f\cdot\mu$ by: $f\cdot\mu(E)=\int_E f\,d\mu$, for every
$\mu$-measurable set $E$.
\item Throughout this paper we assume that
$\Omega\sigmaubset\varphiield{R}^N$ is an open set.
\varepsilonnd{itemize}
In what follows we present some known results on BV-spaces. We rely mainly on the book \cite{amb}
by Ambrosio, Fusco and Pallara. Other sources are the books
by Hudjaev and Volpert~\cite{vol}, Giusti~\cite{giusti} and Evans and Gariepy~\cite{evans}.
We begin by introducing some notation.
For every $v_\ec\nu\in S^{N-1}$ (the unit sphere in $\varphiield{R}^N$) and $R>0$
we set
\betaegin{align}
B_{R}^+(x,v_\ec\nu)&=\{y\in\varphiield{R}^N\,:\,|y-x|<R,\,
(y-x)\cdotv_\ec\nu>0\}\,,\label{eq:B+2}\\
B_{R}^-(x,v_\ec\nu)&=\{y\in\varphiield{R}^N:|y-x|<R,\,
(y-x)\cdotv_\ec\nu<0\}\,,\label{eq:B-2}\\
H_+(x,v_\ec\nu)&=\{y\in\varphiield{R}^N:(y-x)\cdotv_\ec\nu>0\}\,,\label{HN+2}\\
H_-(x,v_\ec\nu)&=\{y\in\varphiield{R}^N:(y-x)\cdotv_\ec\nu<0\}\label{HN-2}\\
\intertext{and} H^0_{v_\ec \nu}&=\{ y\in\varphiield{R}^N\,:\, y\cdotv_\ec
\nu=0\}\label{HN2}\,.
\varepsilonnd{align}
Next we recall the definition of the space of functions with bounded
variation. In what follows, ${\mathcal L}^N$ denotes the Lebesgue measure in $\varphiield{R}^N$.
\betaegin{definition}
Let $\Omegamega$ be a domain in $\varphiield{R}^N$ and let $f\in L^1(\Omegamega,\varphiield{R}^m)$.
We say that $f\in BV(\Omegamega,\varphiield{R}^m)$ if
\betaegin{equation*}
\int_\Omegamega|Df|:=\sigmaup\betaigg\{\int_\Omegamega\sigmaum\limits_{k=1}^{m}f_k\Deltaiv\,\varphi_k\,d\mathcal{L}^N
:\;\varphi_k\in C^1_c(\Omegamega,\varphiield{R}^N)\;\varphiorall
k,\,\sigmaum\limits_{k=1}^{m}|\varphi_k(x)|^2\leq 1\;\varphiorall
x\in\Omegamega\betaigg\}
\varepsilonnd{equation*}
is finite. In this case we define the BV-norm of $f$ by
$\|f\|_{BV}:=\|f\|_{L^1}+\int_\Omegamega|D f|$.
\varepsilonnd{definition}
We recall below some basic notions in Geometric Measure Theory (see
\cite{amb}).
\betaegin{definition}\label{defjac8898782}
Let $\Omegamega$ be a domain in $\varphiield{R}^N$. Consider a function
$f\in L^1_{loc}(\Omegamega,\varphiield{R}^m)$ and a point $x\in\Omegamega$.\\
i) We say that $x$ is a point of {\varepsilonm approximate continuity} of $f$
if there exists $z\in\varphiield{R}^m$ such that
$$\lim\limits_{\rho\thetao 0^+}\varphirac{\int_{B_\rho(x)}|f(y)-z|\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0\,.
$$
In this case $z$ is called an {\varepsilonm approximate limit} of $f$ at $x$
and
we denote $z$ by $\thetailde{f}(x)$. The set of points of approximate continuity of
$f$ is denoted by $G_f$.\\
ii) We say that $x$ is an {\varepsilonm approximate jump point} of $f$ if
there exist $a,b\in\varphiield{R}^m$ and $v_\ec\nu\in S^{N-1}$ such that $a\neq
b$ and \betaegin{equation}\label{aprplmin2} \lim\limits_{\rho\thetao
0^+}\varphirac{\int_{B_\rho^+(x,v_\ec\nu)}|f(y)-a|\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0,\quad \lim\limits_{\rho\thetao
0^+}\varphirac{\int_{B_\rho^-(x,v_\ec\nu)}|f(y)-b|\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0.
\varepsilonnd{equation}
The triple $(a,b,v_\ec\nu)$, uniquely determined by \varepsilonqref{aprplmin2}
up to a permutation of $(a,b)$ and a change of sign of $v_\ec\nu$, is
denoted by $(f^+(x),f^-(x),v_\ec\nu_f(x))$. We shall call
$v_\ec\nu_f(x)$ the {\varepsilonm approximate jump vector} and we shall
sometimes write simply $v_\ec\nu(x)$ if the reference to the function
$f$ is clear. The set of approximate jump points is denoted by
$J_f$. A choice of $v_\ec\nu(x)$ for every $x\in J_f$ (which is
unique up to sign) determines an orientation of $J_f$. At a point of
approximate continuity $x$, we shall use the convention
$f^+(x)=f^-(x)=\thetailde f(x)$.
\varepsilonnd{definition}
We recall the following results on BV-functions that we shall use in
the sequel. They are all taken from \cite{amb}. In all of them
$\Omegamega$ is a domain in $\varphiield{R}^N$ and $f$ belongs to $BV(\Omegamega,\varphiield{R}^m)$.
\betaegin{theorem}[Theorems 3.69 and 3.78 from \cite{amb}]\label{petTh2}
$ $ \\i) $\mathcal{H}^{N-1}$-almost every point in
$\Omegamega\sigmaetminus J_f$ is a point of approximate continuity of $f$.\\
ii) The set $J_f$ is a countably $\mathcal{H}^{N-1}$-rectifiable
Borel set, oriented by $v_\ec\nu(x)$. In other words, $J_f$ is
$\sigmaigma$-finite with respect to $\mathcal{H}^{N-1}$, there exist
countably many $C^1$ hypersurfaces $\{S_k\}^{\infty}_{k=1}$ such
that
$\mathcal{H}^{N-1}\Big(J_f\sigmaetminus\betaigcup\limits_{k=1}^{\infty}S_k\Big)=0$,
and for $\mathcal{H}^{N-1}$-almost every $x\in J_f\cap S_k$, the
approximate jump vector $v_\ec\nu(x)$ is normal to $S_k$ at the
point $x$.\\iii) $\betaig[(f^+-f^-)\omegatimesv_\ec\nu_f\betaig](x)\in
L^1(J_f,d\mathcal{H}^{N-1})$.
\varepsilonnd{theorem}
\betaegin{theorem}[Theorems 3.92 and 3.78 from \cite{amb}]\label{vtTh2}
The distributional gradient
$D f$ can be decomposed
as a sum of three Borel regular finite matrix-valued measures on
$\Omegamega$,
\betaegin{equation*}
D f=D^a f+D^c f+D^j f
\varepsilonnd{equation*}
with
\betaegin{equation*}
D^a f=(\nabla f)\,\mathcal{L}^N ~\thetaext{ and }~ D^j f=(f^+-f^-)\omegatimesv_\ec\nu_f
\mathcal{H}^{N-1}\llcorner J_f\,.
\varepsilonnd{equation*}
$D^a$, $D^c$ and $D^j$ are called absolutely continuous part, Cantor
and jump part of $D f$, respectively, and $\nabla f\in
L^1(\Omegamega,\varphiield{R}^{m\thetaimes N})$ is the approximate differential of $f$.
The three parts are mutually singular to each other. Moreover we
have the
following properties:\\
i) The support of $D^cf$ is concentrated on a set of
$\mathcal{L}^N$-measure zero, but $(D^c f) (B)=0$ for any Borel set
$B\sigmaubset\Omegamega$ which is $\sigmaigma$-finite with respect to
$\mathcal{H}^{N-1}$;\\ii) $[D^a f]\betaig(f^{-1}(H)\betaig)=0$ and $[D^c
f]\betaig(\thetailde f^{-1}(H)\betaig)=0$ for every $H\sigmaubset\varphiield{R}^m$ satisfying
$\mathcal{H}^1(H)=0$.
\varepsilonnd{theorem}
\betaegin{theorem}[Volpert chain rule, Theorems 3.96 and 3.99 from \cite{amb}]\label{trTh2}
Let $\Phi\in C^1(\varphiield{R}^m,\varphiield{R}^q)$ be a Lipschitz function satisfying
$\Phi(0)=0$ if $|\Omegamega|=\infty$. Then, $v(x)=(\Phi\circ f)(x)$
belongs to $BV(\Omegamega,\varphiield{R}^q)$ and we have
\betaegin{equation*}\betaegin{split}
D^a v = \nabla\Phi(f)\,\nabla f\,\mathcal{L}^N,\; D^c v =
\nabla\Phi(\thetailde f)\,D^c f,\; D^j v =
\betaig[\Phi(f^+)-\Phi(f^-)\betaig]\omegatimesv_\ec\nu_f\,
\mathcal{H}^{N-1}\llcorner J_f\,.
\varepsilonnd{split}
\varepsilonnd{equation*}
\varepsilonnd{theorem}
We also recall that the trace operator $T$ is a continuous map from
$BV(\Omegamega)$, endowed with the strong topology (or more generally,
the topology induced by strict convergence), to
$L^1(\partial\Omegamega,{\mathcal H}^{N-1}\llcorner\partial\Omegamega)$,
provided that $\Omegamega$ has a bounded Lipschitz boundary (see
\cite[Theorems 3.87 and 3.88]{amb}).
\varepsilonnd{comment}
\sigmaection{The abstract lower bound}
\betaegin{definition}\label{gdhgvdgjkdfgjkh}
Given an open set $G\sigmaubset\varphiield{R}^N$ and a \underline{vector} $v_\ec
q=(q_1,q_2,\ldots, q_m)\in\varphiield{R}^m$, such that $q_j\gammaeq 1$ for every
$1\leq j\leq m$, define the Banach space $L^{v_\ec q}(G,\varphiield{R}^m)$ as the
space of all (equivalency classes of a.e. equal) functions
$f(x)=\betaig(f_1(x),f_2(x),\ldots, f_m(x)\betaig):G\thetao\varphiield{R}^m$, such that
$f_j(x)\in L^{q_j}(G,\varphiield{R})$ for every $1\leq j\leq m$, endowed with
the norm $\|f\|_{L^{v_\ec
q}(G,\varphiield{R}^m)}:=\sigmaum_{j=1}^{m}\|f_j\|_{L^{q_j}(G,\varphiield{R})}$. Next define, as
usual, $L^{v_\ec q}_{loc}(G,\varphiield{R}^m)$ as a space of all functions
$f:G\thetao\varphiield{R}^m$, such that for every compactly embedded
$U\sigmaubset\sigmaubset G$ we have $f\in L^{v_\ec q}_{loc}(U,\varphiield{R}^m)$. Finally
in the case where $q\in[1,+\infty)$ is a \underline{scalar} we as,
usual, consider $L^{q}(G,\varphiield{R}^m):=L^{v_\ec q}(G,\varphiield{R}^m)$ and
$L^{q}_{loc}(G,\varphiield{R}^m):=L^{v_\ec q}_{loc}(G,\varphiield{R}^m)$, where $v_\ec
q:=(q,q,\ldots,q)$.
\varepsilonnd{definition}
\betaegin{definition}\label{gdhgvdgjkdfgjkhdd}
Given a vector $x:=(x_1,x_2,\ldots, x_m)\in\varphiield{R}^m$ and a
\underline{vector} $v_\ec q=(q_1,q_2,\ldots, q_m)\in\varphiield{R}^m$, such that
$q_j\gammaeq 1$, we define $|x|^{v_\ec q}:=\sigmaum_{j=1}^{m}|x_j|^{q_j}$.
Note that for a \underline{scalar} $q$, $|x|^q$ and
$|x|^{(q,q,\ldots q)}$ are, in general, different quantities,
although they have the same order, i.e. $|x|^q/C\leq|x|^{(q,q,\ldots
q)}\leq C|x|^q$ for some constant $C>0$.
\varepsilonnd{definition}
\betaegin{theorem}\label{dehgfrygfrgy}
Let $\mathcal{M}$ be a subset of $\varphiield{R}^m$, $\Omega\sigmaubset\varphiield{R}^N$ be an open
set and $D\sigmaubset\Omega$ be a $\mathcal{H}^{N-1}$ $\sigmaigma$-finite Borel
set. Consider $F\in C(\varphiield{R}^{m\thetaimes N}\thetaimes \varphiield{R}^m\thetaimes\varphiield{R}^N,\varphiield{R})$,
which satisfies $F\gammaeq 0$ and the following property: For every
$x_0\in\Omega$ and every $\thetaau>0$ there exists $\alphalpha>0$ satisfying
\betaegin{equation}\label{vcjhfjhgjkg}
F(a,b,x)-F(a,b,x_0)\gammaeq -\thetaau F(a,b,x_0)\quad\varphiorall\, a\in
\varphiield{R}^{m\thetaimes N}\;\varphiorall\, b\in\varphiield{R}^m\;\varphiorall\, x\in\varphiield{R}^N\;\;\thetaext{such
that}\;\;|x-x_0|<\alphalpha\,.
\varepsilonnd{equation}
Furthermore, let $v_\ec A\in \mathcal{L}(\varphiield{R}^{d\thetaimes N};\varphiield{R}^m)$,
$q=(q_1,q_2,\ldots, q_m)\in\varphiield{R}^m$, $p\gammaeq 1$ and
$v\in\mathcal{D}'(\Omega,\varphiield{R}^d)$ be such that $q_j\gammaeq 1$, $v_\ec
A\cdot\nabla v\in L^q_{loc}(\Omega,\varphiield{R}^m)$ and $F\betaig(0,\{v_\ec
A\cdot\nabla v\}(x),x\betaig)=0$ a.e.~in $\Omega$. Assume also that there
exist three Borel mappings $\{v_\ec A\cdot\nabla v\}^+(x):D\thetao\varphiield{R}^m$,
$\{v_\ec A\cdot\nabla v\}^-(x):D\thetao\varphiield{R}^m$ and $v_\ec n(x):D\thetao S^{N-1}$
such that for every $x\in D$ we have
\betaegin{multline}\label{L2009surfhh8128odno888jjjjjkkkkkk}
\lim\limits_{\rho\thetao 0^+}\varphirac{\int_{B_\rho^+(x,v_\ec
n(x))}\betaig|\{v_\ec A\cdot\nabla v\}(y)-\{v_\ec A\cdot\nabla
v\}^+(x)\betaig|^q\,dy} {\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0,\\
\lim\limits_{\rho\thetao 0^+}\varphirac{\int_{B_\rho^-(x,v_\ec
n(x))}\betaig|\{v_\ec A\cdot\nabla v\}(y)-\{v_\ec A\cdot\nabla
v\}^-(x)\betaig|^q\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0\quad\quad\quad\quad\thetaext{(see
Definition \ref{gdhgvdgjkdfgjkhdd})}.
\varepsilonnd{multline}
Then for every
$\{v_\varepsilon\}_{\varepsilon>0}\sigmaubset\mathcal{D}'(\Omega,\varphiield{R}^d)$, satisfying $v_\ec
A\cdot\nabla v_\varepsilon\in L^q_{loc}(\Omega,\varphiield{R}^m)\cap W^{1,p}_{loc}(\Omega,\varphiield{R}^m)$,
$\{v_\ec A\cdot\nabla v_\varepsilon\}(x)\in \mathcal{M}$ for a.e. $x\in\varphiield{R}^N$
and $v_\ec A\cdot\nabla v_\varepsilon\thetao v_\ec A\cdot\nabla v$ in
$L^q_{loc}(\Omega,\varphiield{R}^m)$ as $\varepsilon\thetao 0^+$, we have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkk}
\varliminf_{\varepsilon\thetao 0^+}\varphirac{1}{\varepsilon}\int_\Omega F\Big(\,\varepsilon\nabla\betaig\{v_\ec
A\cdot\nabla v_\varepsilon\betaig\}(x),\, \{v_\ec A\cdot\nabla v_\varepsilon\}(x),\,x\Big)dx\gammaeq\\
\int_{D}E_0\Big(\{v_\ec A\cdot\nabla v\}^+(x),\{v_\ec A\cdot\nabla
v\}^-(x),v_\ec n(x),x\Big)d \mathcal H^{N-1}(x)\,,
\varepsilonnd{multline}
where for every $x\in\varphiield{R}^N$, $a,b\in\varphiield{R}^m$ and any unit vector $v_\ec
\nu\in\varphiield{R}^N$
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkk}
E_0\betaig(a,b,v_\ec \nu,x\betaig):=\inf\Bigg\{\varliminf_{\varepsilon\thetao
0^+}\varphirac{1}{\varepsilon}\int_{I_{v_\ec \nu}} F\Big(\,\varepsilon\nabla\betaig\{v_\ec
A\cdot\nabla \varphi_\varepsilon\betaig\}(y),\, \{v_\ec A\cdot\nabla
\varphi_\varepsilon\}(y),\,x\Big)dy:\;\; \varphi_\varepsilon\in\mathcal{D}'(I_{v_\ec
\nu},\varphiield{R}^d)\quad\thetaext{s.t.}
\\ v_\ec A\cdot\nabla \varphi_\varepsilon\in L^q(I_{v_\ec
\nu},\varphiield{R}^m)\cap W^{1,p}(I_{v_\ec \nu},\varphiield{R}^m),\;\;v_\ec A\cdot\nabla
\varphi_\varepsilon\in \mathcal{M}\; \thetaext{a.e. in}\;I_{v_\ec
\nu}\;\;\thetaext{and}\;\;\{v_\ec A\cdot\nabla \varphi_\varepsilon\}(y)\thetao
\xi(y,a,b,v_\ec \nu)\;\thetaext{in}\; L^q(I_{v_\ec \nu},\varphiield{R}^m)\Bigg\}.
\varepsilonnd{multline}
Here $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec\nu_j|<1/2\;\;\;\varphiorall
j=1,2\ldots N\}$ where
$\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec \nu$ and
\betaegin{equation}\label{fhyffgfgfgfffgf}
\xi(y,a,b,v_\ec \nu):=\betaegin{cases}a\quad\thetaext{if}\;y\cdotv_\ec
\nu>0\,,\\ b\quad\thetaext{if}\;y\cdotv_\ec \nu<0\,.\varepsilonnd{cases}
\varepsilonnd{equation}
\varepsilonnd{theorem}
\betaegin{proof}
It is clear that we may assume that
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkk}
T_0:=\varliminf_{\varepsilon\thetao 0^+}\varphirac{1}{\varepsilon}\int_\Omega
F\Big(\,\varepsilon\nabla\betaig\{v_\ec A\cdot\nabla v_\varepsilon\betaig\}(x),\, \{v_\ec
A\cdot\nabla v_\varepsilon\}(x),\,x\Big)dx<+\infty\,,
\varepsilonnd{equation}
otherwise it is trivial. Then, up to a subsequence
$\varepsilon_n\thetao 0^+$ as $n\thetao+\infty$, we have
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhh}
\varphirac{1}{\varepsilon_n}F\Big(\,\varepsilon_n\nabla\betaig\{v_\ec A\cdot\nabla
v_n\betaig\}(x),\, \{v_\ec A\cdot\nabla
v_n\}(x),\,x\Big)dx\rightharpoonup\mu\,,
\varepsilonnd{equation}
and
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkk}
T_0\gammaeq\mu(\Omega)\,,
\varepsilonnd{equation}
where $\mu$ is some positive finite Radon measure on $\Omega$ and the
convergence is in the sense of the weak$^*$ convergence of finite
Radon measures. Moreover, for every compact set $K\sigmaubset\Omega$ we have
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhfghghkkjjjkk}
\mu(K)\gammaeq\varliminf_{n\thetao +\infty}\varphirac{1}{\varepsilon_n}\int_{K}
F\Big(\,\varepsilon_n\nabla\betaig\{v_\ec A\cdot\nabla v_n\betaig\}(x),\, \{v_\ec
A\cdot\nabla v_n\}(x),\,x\Big)dx\,.
\varepsilonnd{equation}
Next by the Theorem about $k$-dimensinal densities (Theorem 2.56 in
\cite{amb}) we have
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkk}
\mu(D)\gammaeq\int_D\sigmaigma(x)\,d\mathcal{H}^{N-1}(x)\,,
\varepsilonnd{equation}
where
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkilljkkkk}
\sigmaigma(x):=\varlimsup_{\rho\thetao
0^+}\varphirac{\mu(B_\rho(x))}{\omegamega_{N-1}\rho^{N-1}}\,,
\varepsilonnd{equation}
with $\omegamega_{N-1}$ denoting the $\mathcal{L}^{N-1}$-measure of
$(N-1)$-dimensional unit ball. Fix now $\deltaelta>1$. Then by
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhfghghkkjjjkk},
for every $x\in\Omega$ and every $\rho>0$ sufficiently small we have
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhfghghkkjjjkkhjgjghghghkkkkk}
\mu\betaig(B_{(\deltaelta\rho)}(x)\betaig)\gammaeq\mu\betaig(\omegaverline
B_\rho(x)\betaig)\gammaeq\varliminf_{n\thetao +\infty}\varphirac{1}{\varepsilon_n}\int_{\omegaverline
B_\rho(x)} F\Big(\,\varepsilon_n\nabla\betaig\{v_\ec A\cdot\nabla v_n\betaig\}(y),\,
\{v_\ec A\cdot\nabla v_n\}(y),\,y\Big)dy\,.
\varepsilonnd{equation}
On the other hand by
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkk},
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkk}
and
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkilljkkkk}
we obtain
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkiuok}
T_0\gammaeq\mu(\Omega)\gammaeq\mu(D)\gammaeq\int_D\sigmaigma(x)\,\mathcal{H}^{N-1}(x)=\int_D\betaigg\{\varlimsup_{\rho\thetao
0^+}\varphirac{\mu(B_{(\deltaelta\rho)}(x))}{\omegamega_{N-1}(\deltaelta\rho)^{N-1}}\betaigg\}\,d\mathcal{H}^{N-1}(x)\,.
\varepsilonnd{equation}
Thus plugging
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhfghghkkjjjkkhjgjghghghkkkkk}
into
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkiuok}
we deduce
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkiuokhjjkkkk}
T_0\gammaeq\mu(\Omega)\gammaeq \\
\varphirac{1}{\deltaelta^{N-1}}\int_D\Bigg\{\varlimsup_{\rho\thetao
0^+}\Bigg(\varphirac{1}{\omegamega_{N-1}\rho^{N-1}}\varliminf_{n\thetao
+\infty}\varphirac{1}{\varepsilon_n}\int_{B_\rho(x)} F\Big(\,\varepsilon_n\nabla\betaig\{v_\ec
A\cdot\nabla v_n\betaig\}(y),\, \{v_\ec A\cdot\nabla
v_n\}(y),\,y\Big)dy\Bigg)\Bigg\}\,d\mathcal{H}^{N-1}(x)\,.
\varepsilonnd{multline}
Therefore, since $\deltaelta>1$ was chosen arbitrary we deduce
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkiuokhjjkkkkjhjhhjhh}
T_0\gammaeq\mu(\Omega)\gammaeq \\
\int_D\Bigg\{\varlimsup_{\rho\thetao
0^+}\Bigg(\varphirac{1}{\omegamega_{N-1}\rho^{N-1}}\varliminf_{n\thetao
+\infty}\varphirac{1}{\varepsilon_n}\int_{B_\rho(x)} F\Big(\,\varepsilon_n\nabla\betaig\{v_\ec
A\cdot\nabla v_n\betaig\}(y),\, \{v_\ec A\cdot\nabla
v_n\}(y),\,y\Big)dy\Bigg)\Bigg\}\,d\mathcal{H}^{N-1}(x)\,.
\varepsilonnd{multline}
Next set
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkilljkkkipkppppp}
\varphi_{n,\rho,x}(z):=\varphirac{1}{\rho}v_n(x+\rho
z)\quad\thetaext{and}\quad\varphi_{\rho,x}(z):=\varphirac{1}{\rho}v(x+\rho
z)\,.
\varepsilonnd{equation}
Then changing variables $y=x+\rho z$ in the interior integration in
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkiuokhjjkkkkjhjhhjhh}
we infer
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkiuokhjjkkkkjhjhhjhhlllkkkkkkuku}
T_0\gammaeq\mu(\Omega)\gammaeq \int_D\Bigg\{\\ \varlimsup_{\rho\thetao
0^+}\Bigg(\varphirac{1}{\omegamega_{N-1}}\varliminf_{n\thetao
+\infty}\varphirac{1}{(\varepsilon_n/\rho)}\int_{B_1(0)}
F\Big(\,(\varepsilon_n/\rho)\nabla\betaig\{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\betaig\}(z),\, \{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\}(z),\,x+\rho z\Big)dz\Bigg)
\Bigg\}\,d\mathcal{H}^{N-1}(x)\,.
\varepsilonnd{multline}
However, by \varepsilonr{vcjhfjhgjkg} for every $x\in D$ and every $\thetaau>0$
we obtain
\betaegin{multline}\label{gjdjgdjghdfjghfjhkfg}
\varlimsup_{\rho\thetao 0^+}\Bigg(\varphirac{1}{\omegamega_{N-1}}\varliminf_{n\thetao
+\infty}\varphirac{1}{(\varepsilon_n/\rho)}\int_{B_1(0)}
F\Big(\,(\varepsilon_n/\rho)\nabla\betaig\{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\betaig\}(z),\, \{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\}(z),\,x+\rho z\Big)dz\Bigg)\gammaeq\\
\varlimsup_{\rho\thetao 0^+}\Bigg(\varphirac{1}{\omegamega_{N-1}}\varliminf_{n\thetao
+\infty}\varphirac{1}{(\varepsilon_n/\rho)}\int_{B_1(0)}
(1-\thetaau)F\Big(\,(\varepsilon_n/\rho)\nabla\betaig\{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\betaig\}(z),\, \{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\}(z),\,x\Big)dz\Bigg)\,.
\varepsilonnd{multline}
Thus since $\thetaau>0$ is arbitrary
\betaegin{multline}\label{gjdjgdjghdfjghfjhkfgjjkgjk}
\varlimsup_{\rho\thetao 0^+}\Bigg(\varphirac{1}{\omegamega_{N-1}}\varliminf_{n\thetao
+\infty}\varphirac{1}{(\varepsilon_n/\rho)}\int_{B_1(0)}
F\Big(\,(\varepsilon_n/\rho)\nabla\betaig\{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\betaig\}(z),\, \{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\}(z),\,x+\rho z\Big)dz\Bigg)\gammaeq\\
\varlimsup_{\rho\thetao 0^+}\Bigg(\varphirac{1}{\omegamega_{N-1}}\varliminf_{n\thetao
+\infty}\varphirac{1}{(\varepsilon_n/\rho)}\int_{B_1(0)}
F\Big(\,(\varepsilon_n/\rho)\nabla\betaig\{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\betaig\}(z),\, \{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\}(z),\,x\Big)dz\Bigg)\,.
\varepsilonnd{multline}
Plugging \varepsilonr{gjdjgdjghdfjghfjhkfgjjkgjk} into
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkiuokhjjkkkkjhjhhjhhlllkkkkkkuku}
we deduce
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkiuokhjjkkkkjhjhhjhhlllkkkkk}
T_0\gammaeq\mu(\Omega)\gammaeq \int_D\Bigg\{\\ \varlimsup_{\rho\thetao
0^+}\Bigg(\varphirac{1}{\omegamega_{N-1}}\varliminf_{n\thetao
+\infty}\varphirac{1}{(\varepsilon_n/\rho)}\int_{B_1(0)}
F\Big(\,(\varepsilon_n/\rho)\nabla\betaig\{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\betaig\}(z),\, \{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\}(z),\,x\Big)dz\Bigg)
\Bigg\}\,d\mathcal{H}^{N-1}(x)\,.
\varepsilonnd{multline}
Furthermore, for every $x\in D$ for every small $\rho>0$ we have
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkilljkkkipkpppppfbhfjkgjgkkkhghghfhfgggg}
v_\ec A\cdot\nabla \varphi_{n,\rho,x}\thetao v_\ec A\cdot\nabla
\varphi_{\rho,x}\quad{as}\;n\thetao +\infty\quad\thetaext{in}\quad
L^q\betaig(B_1(0),\varphiield{R}^m\betaig)\,.
\varepsilonnd{equation}
On the other hand by \varepsilonr{L2009surfhh8128odno888jjjjjkkkkkk} for
every $x\in D$ we have
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkilljkkkipkpppppfbhfjkgjgkkkhghghfhfgggglokjjjjhhh}
\{v_\ec A\cdot\nabla \varphi_{\rho,x}\}(z)\thetao \xi\Big(z,\{v_\ec
A\cdot\nabla v\}^+(x),\{v_\ec A\cdot\nabla v\}^-(x),v_\ec
n(x)\Big)\quad{as}\;\rho\thetao 0^+\quad\thetaext{in}\quad
L^q\betaig(B_1(0),\varphiield{R}^m\betaig)\,.
\varepsilonnd{equation}
Thus, for every $x\in D$, we can extract appropriate diagonal
subsequences of $\{\varphi_{n,\rho,x}\}_{\{n,\rho\}}$, and
$\{\varepsilon_n/\rho\}_{\{n,\rho\}}$ which we denote by
$\{\vartheta_{j}\}_{j=1}^{+\infty}$, and $\{\varepsilon'_j\}_{j=1}^{+\infty}$
respectively, so that $\varepsilon'_j\thetao 0^+$ as $j\thetao +\infty$,
$\vartheta_{j}(z)\thetao \xi\Big(z,\{v_\ec A\cdot\nabla v\}^+(x),\{v_\ec
A\cdot\nabla v\}^-(x),v_\ec n(x)\Big)$ in $L^q\betaig(B_1(0),\varphiield{R}^m\betaig)$
as $j\thetao +\infty$, and
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkiuokhjjkkkkjhjhhjhhlllkkkkkffgfgfgffgfg}
\varlimsup_{\rho\thetao 0^+}\Bigg(\varphirac{1}{\omegamega_{N-1}}\varliminf_{n\thetao
+\infty}\varphirac{1}{(\varepsilon_n/\rho)}\int_{B_1(0)}
F\Big(\,(\varepsilon_n/\rho)\nabla\betaig\{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\betaig\}(z),\, \{v_\ec A\cdot\nabla
\varphi_{n,\rho,x}\}(z),\,x\Big)dz\Bigg)\\ \gammaeq
\varphirac{1}{\omegamega_{N-1}}\varliminf_{j\thetao
+\infty}\varphirac{1}{\varepsilon'_j}\int_{B_1(0)} F\Big(\,\varepsilon'_j\nabla\betaig\{v_\ec
A\cdot\nabla \vartheta_{j}\betaig\}(z),\, \{v_\ec A\cdot\nabla
\vartheta_{j}\}(z),\,x\Big)dz\,.
\varepsilonnd{multline}
Plugging this fact into
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkiuokhjjkkkkjhjhhjhhlllkkkkk}
we obtain
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjjjkjkkkkkk}
\varliminf_{\varepsilon\thetao 0^+}\varphirac{1}{\varepsilon}\int_\Omega F\Big(\,\varepsilon\nabla\betaig\{v_\ec
A\cdot\nabla v_\varepsilon(x)\betaig\},\, \{v_\ec A\cdot\nabla v_\varepsilon\}(x),\,x\Big)dx=T_0\gammaeq\\
\int_{D}E_1\Big(\{v_\ec A\cdot\nabla v\}^+(x),\{v_\ec A\cdot\nabla
v\}^-(x),v_\ec n(x),x\Big)d \mathcal H^{N-1}(x)\,,
\varepsilonnd{multline}
where for every $a,b\in\varphiield{R}^m$ and any unit vector $v_\ec \nu\in\varphiield{R}^N$
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkkggghhhjjjkkkk}
E_1\betaig(a,b,v_\ec \nu,x\betaig):=\inf\Bigg\{\varliminf_{\varepsilon\thetao
0^+}\varphirac{1}{\omegamega_{N-1}\varepsilon}\int_{B_1(0)} F\Big(\,\varepsilon\nabla\betaig\{v_\ec
A\cdot\nabla \varphi_\varepsilon(y)\betaig\},\, \{v_\ec A\cdot\nabla
\varphi_\varepsilon\}(y),\,x\Big)dy:\;\; \varphi_\varepsilon\in
\mathcal{D}'\betaig(B_1(0),\varphiield{R}^d\betaig)
\quad\thetaext{s.t.}\\ v_\ec A\cdot\nabla \varphi_\varepsilon\in
L^q\betaig(B_1(0),\varphiield{R}^m\betaig)\cap W^{1,p}\betaig(B_1(0),\varphiield{R}^m\betaig),\;\;v_\ec
A\cdot\nabla \varphi_\varepsilon\in\mathcal{M}\;\thetaext{a.e. in}\;B_1(0)\\
\thetaext{and}\;\;\{v_\ec A\cdot\nabla \varphi_\varepsilon\}(y)\thetao \xi(y,a,b,v_\ec
\nu)\;\thetaext{in}\; L^q\betaig(B_1(0),\varphiield{R}^m\betaig)\Bigg\}\,.
\varepsilonnd{multline}
So it is sufficient to prove that for every $a,b\in\varphiield{R}^m$ and any
unit vector $v_\ec \nu\in\varphiield{R}^N$ we have
\betaegin{equation}\label{dhfhdghfhfgkkkhhh7788jjkk}
E_1\betaig(a,b,v_\ec \nu,x\betaig)\gammaeq E_0\betaig(a,b,v_\ec \nu,x\betaig)\,,
\varepsilonnd{equation}
where $E_0\betaig(a,b,v_\ec \nu,x\betaig)$ is defined by
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkk}. Without loss
of generality it is sufficient to prove
\varepsilonr{dhfhdghfhfgkkkhhh7788jjkk} in the particular case where
$v_\ec\nu=v_\ec e_1$ and $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec
e_j|<1/2\;\;\;\varphiorall j=1,2\ldots N\}$ where $\{v_\ec e_1,v_\ec
e_2,\ldots,v_\ec e_N\}\sigmaubset\varphiield{R}^N$ is the standard orthonormal base
in $\varphiield{R}^N$. Choose a natural number $n\in\mathbb{N}$. Then changing
variables of integration $z=ny$ in
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkkggghhhjjjkkkk}
we obtain
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkkggghhhjjjkkkkkkkkkkllllkkkk}
E_1\betaig(a,b,v_\ec \nu,x\betaig)=\inf\Bigg\{\varliminf_{\varepsilon\thetao
0^+}\varphirac{1}{\omegamega_{N-1}n^{N-1}\varepsilon}\int\limits_{B_n(0)}
F\Big(\,\varepsilon\nabla\betaig\{v_\ec A\cdot\nabla \varphi_\varepsilon(y)\betaig\},\,
\{v_\ec A\cdot\nabla \varphi_\varepsilon\}(y),\,x\Big)dy:\; \varphi_\varepsilon\in
\mathcal{D}'\betaig(B_n(0),\varphiield{R}^d\betaig)\quad
\thetaext{s.t.}\\ v_\ec A\cdot\nabla \varphi_\varepsilon\in
L^q\betaig(B_n(0),\varphiield{R}^m\betaig)\cap W^{1,p}\betaig(B_n(0),\varphiield{R}^m\betaig),\;\;
v_\ec A\cdot\nabla \varphi_\varepsilon\in\mathcal{M}\;\thetaext{a.e. in}\;B_n(0)\\
\thetaext{and}\;\{v_\ec A\cdot\nabla \varphi_\varepsilon\}(y)\thetao \xi(y,a,b,v_\ec
\nu)\;\thetaext{in}\; L^q\betaig(B_n(0),\varphiield{R}^m\betaig)\Bigg\}\,.
\varepsilonnd{multline}
Next for every integers $i_1,i_2,\ldots,i_{N-1}\in\mathbb{Z}$
consider the set
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkilljkkkipkpppppfbhfjk}
I_{(i_1,i_2,\ldots,i_{N-1})}:=\betaigg\{z\in\varphiield{R}^N:\;|z\cdotv_\ec
e_1|<1/2\;\;\thetaext{and}\;\;\betaig|z\cdot v_\ec
e_j-i_{j-1}\betaig|<1/2\;\;\;\varphiorall j=2,3,\ldots, N\betaigg\}.
\varepsilonnd{equation}
and set $I_0:=I_{(0,0,\ldots,0)}$. Then by
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkkggghhhjjjkkkkkkkkkkllllkkkk}
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkkggghhhjjjkkkkkkkkkkllllkkkkkkkkklklklklkllkklkkkkkjjkkjjkjkjkjk}
E_1\betaig(a,b,v_\ec e_1,x\betaig)\gammaeq
\varphirac{1}{\omegamega_{N-1}n^{N-1}}\,Card\betaigg(\Big\{(i_1,i_2,\ldots,i_{N-1})\in\mathbb{Z}^{N-1}:\,I_{(i_1,i_2,\ldots,i_{N-1})}\in
B_n(0)\Big\}\betaigg)\thetaimes\\ \thetaimes\inf\Bigg\{\varliminf_{\varepsilon\thetao
0^+}\varphirac{1}{\varepsilon}\int_{I_0} F\Big(\,\varepsilon\nabla\betaig\{v_\ec A\cdot\nabla
\varphi_\varepsilon(y)\betaig\},\, \{v_\ec A\cdot\nabla
\varphi_\varepsilon\}(y),\,x\Big)dy:\;\; \varphi_\varepsilon\in
\mathcal{D}'(I_0,\varphiield{R}^d)\;\;
\thetaext{s.t.}\\
\;v_\ec A\cdot\nabla \varphi_\varepsilon\in L^q\betaig(I_0,\varphiield{R}^m\betaig)\cap
W^{1,p}\betaig(I_0,\varphiield{R}^m\betaig),\;\;v_\ec A\cdot\nabla
\varphi_\varepsilon\in\mathcal{M}\;\thetaext{a.e. in}\;I_0 \;\;
\thetaext{and}\;\{v_\ec A\cdot\nabla \varphi_\varepsilon\}(y)\thetao \xi(y,a,b,v_\ec
\nu)\;\thetaext{in}\;
L^q\betaig(I_0,\varphiield{R}^m\betaig)\Bigg\}\\
=\varphirac{1}{\omegamega_{N-1}n^{N-1}}\,Card\betaigg(\Big\{(i_1,i_2,\ldots,i_{N-1})\in\mathbb{Z}^{N-1}:\,I_{(i_1,i_2,\ldots,i_{N-1})}\in
B_n(0)\Big\}\betaigg)\,E_0(a,b,v_\ec e_1,x)\,.
\varepsilonnd{multline}
On the other hand clearly
\betaegin{equation}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkyiuouilokkkmnhhfghfhghgjkkkkkhghghffhjkljlkkkkkilljkkkipkpppppfbhfjkgjgkkk}
\lim\limits_{n\thetao
+\infty}\varphirac{1}{\omegamega_{N-1}n^{N-1}}\,Card\betaigg(\Big\{(i_1,i_2,\ldots,i_{N-1})\in\mathbb{Z}^{N-1}:\,I_{(i_1,i_2,\ldots,i_{N-1})}\in
B_n(0)\Big\}\betaigg)=1\,.
\varepsilonnd{equation}
Therefore, since $n\in\mathbb{N}$ was chosen arbitrary we deduce
\varepsilonr{dhfhdghfhfgkkkhhh7788jjkk}. Plugging it into
\varepsilonr{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjjjkjkkkkkk} completes the
proof.
\varepsilonnd{proof}
By the same method we can prove the following more general Theorem.
\betaegin{theorem}\label{dehgfrygfrgygen}
Let $\mathcal{M}$ be a subset of $\varphiield{R}^m$, $\Omega\sigmaubset\varphiield{R}^N$ be an open
set and $D\sigmaubset\Omega$ be a $\mathcal{H}^{N-1}$ $\sigmaigma$-finite Borel
set. Consider $F\in C\betaig(\varphiield{R}^{m\thetaimes N^n}\thetaimes\varphiield{R}^{m\thetaimes
N^{(n-1)}}\thetaimes\ldots\thetaimes\varphiield{R}^{m\thetaimes N}\thetaimes
\varphiield{R}^m\thetaimes\varphiield{R}^N,\varphiield{R}\betaig)$, which satisfies $F\gammaeq 0$ and the following
property: For every $x_0\in\Omega$ and every $\thetaau>0$ there exists
$\alphalpha>0$ satisfying
\betaegin{multline}\label{vcjhfjhgjkgkgjgghj}
F\betaig(a_1,a_2,\ldots, a_n,b,x\betaig)-F\betaig(a_1,a_2,\ldots,
a_n,b,x_0\betaig)\gammaeq -\thetaau F\betaig(a_1,a_2,\ldots, a_n,b,x_0\betaig)\\
\varphiorall\, a_1\in \varphiield{R}^{m\thetaimes N^n}\;\varphiorall\,a_2\in \varphiield{R}^{m\thetaimes
N^{n-1}}\ldots\varphiorall\, a_n\in\varphiield{R}^{m\thetaimes N}\;\varphiorall\,
b\in\varphiield{R}^m\;\varphiorall\, x\in\varphiield{R}^N\;\;\thetaext{such
that}\;\;|x-x_0|<\alphalpha\,.
\varepsilonnd{multline}
Furthermore, let $v_\ec A\in \mathcal{L}(\varphiield{R}^{d\thetaimes N};\varphiield{R}^m)$,
$q=(q_1,q_2,\ldots, q_m)\in\varphiield{R}^m$, $p\gammaeq 1$ and
$v\in\mathcal{D}'(\Omega,\varphiield{R}^d)$ be such that $q_j\gammaeq 1$,
$v_\ec A\cdot\nabla v\in L^q_{loc}(\Omega,\varphiield{R}^m)$ and
$$F\Big(0,0,\ldots,0,\{v_\ec A\cdot\nabla
v\}(x),x\Big)=0\quad\quad\thetaext{for a.e.}\;\,x\in\Omega\,.$$ Assume also
that there exist three Borel mappings $\{v_\ec A\cdot\nabla
v\}^+(x):D\thetao\varphiield{R}^m$, $\{v_\ec A\cdot\nabla v\}^-(x):D\thetao\varphiield{R}^m$ and
$v_\ec n(x):D\thetao S^{N-1}$ such that for every $x\in D$ we have
\betaegin{multline}\label{L2009surfhh8128odno888jjjjjkkkkkkgen}
\lim\limits_{\rho\thetao 0^+}\varphirac{\int_{B_\rho^+(x,v_\ec
n(x))}\betaig|\{v_\ec A\cdot\nabla v\}(y)-\{v_\ec A\cdot\nabla
v\}^+(x)\betaig|^q\,dy} {\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0,\\
\lim\limits_{\rho\thetao 0^+}\varphirac{\int_{B_\rho^-(x,v_\ec
n(x))}\betaig|\{v_\ec A\cdot\nabla v\}(y)-\{v_\ec A\cdot\nabla
v\}^-(x)\betaig|^q\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0\quad\quad\quad\quad\thetaext{(see
Definition \ref{gdhgvdgjkdfgjkhdd})}.
\varepsilonnd{multline}
Then for every
$\{v_\varepsilon\}_{\varepsilon>0}\sigmaubset\mathcal{D}'(\Omega,\varphiield{R}^d)$, satisfying $v_\ec
A\cdot\nabla v_\varepsilon\in L^q_{loc}(\Omega,\varphiield{R}^m)\cap W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$,
$\{v_\ec A\cdot\nabla v_\varepsilon\}(x)\in\mathcal{M}$ for a.e. $x\in\Omega$ and
$v_\ec A\cdot\nabla v_\varepsilon\thetao v_\ec A\cdot\nabla v$ in
$L^q_{loc}(\Omega,\varphiield{R}^m)$ as $\varepsilon\thetao 0^+$, we have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgen}
\varliminf_{\varepsilon\thetao 0^+}\varphirac{1}{\varepsilon}\int_\Omega
F\betaigg(\,\varepsilon^n\nabla^n\betaig\{v_\ec A\cdot\nabla
v_\varepsilon\betaig\}(x),\,\varepsilon^{n-1}\nabla^{n-1}\betaig\{v_\ec A\cdot\nabla
v_\varepsilon\betaig\}(x),\,\ldots,\,\varepsilon\nabla\betaig\{v_\ec
A\cdot\nabla v_\varepsilon(x)\betaig\},\, \{v_\ec A\cdot\nabla v_\varepsilon\}(x),\,x\betaigg)dx\\
\gammaeq \int_{D}E^{(n)}_0\Big(\{v_\ec A\cdot\nabla v\}^+(x),\{v_\ec
A\cdot\nabla v\}^-(x),v_\ec n(x),x\Big)d \mathcal H^{N-1}(x)\,,
\varepsilonnd{multline}
where for every $a,b\in\varphiield{R}^m$ and any unit vector $v_\ec \nu\in\varphiield{R}^N$
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkkgen}
E^{(n)}_0\betaig(a,b,v_\ec \nu,x\betaig):=\\ \inf\Bigg\{\varliminf_{\varepsilon\thetao
0^+}\varphirac{1}{\varepsilon}\int_{I_{v_\ec \nu}} F\betaigg(\,\varepsilon^n\nabla^n\betaig\{v_\ec
A\cdot\nabla \varphi_\varepsilon\betaig\}(y),\,\varepsilon^{n-1}\nabla^{n-1}\betaig\{v_\ec
A\cdot\nabla \varphi_\varepsilon\betaig\}(y),\,\ldots,\,\varepsilon\nabla\betaig\{v_\ec
A\cdot\nabla \varphi_\varepsilon\betaig\}(y),\, \{v_\ec A\cdot\nabla
\varphi_\varepsilon\}(y),\,x\betaigg)dy:\\ \varphi_\varepsilon\in\mathcal{D}'(I_{v_\ec
\nu},\varphiield{R}^d)
\;\; \thetaext{s.t.}\;v_\ec A\cdot\nabla \varphi_\varepsilon\in L^q(I_{v_\ec
\nu},\varphiield{R}^m)\cap W^{n,p}(I_{v_\ec \nu},\varphiield{R}^m),\;\;v_\ec A\cdot\nabla
\varphi_\varepsilon\in\mathcal{M}\;\thetaext{a.e. in}\;I_{v_\ec \nu} \\
\thetaext{and}\;\{v_\ec A\cdot\nabla \varphi_\varepsilon\}(y)\thetao \xi(y,a,b,v_\ec
\nu)\;\thetaext{in}\; L^q(I_{v_\ec \nu},\varphiield{R}^m)\Bigg\}\,.
\varepsilonnd{multline}
Here $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec\nu_j|<1/2\;\;\;\varphiorall
j=1,2\ldots N\}$ where
$\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec \nu$ and
\betaegin{equation}\label{fhyffgfgfgfffgfgen}
\xi(y,a,b,v_\ec \nu):=\betaegin{cases}a\quad\thetaext{if}\;y\cdotv_\ec
\nu>0\,,\\ b\quad\thetaext{if}\;y\cdotv_\ec \nu<0\,.\varepsilonnd{cases}
\varepsilonnd{equation}
\varepsilonnd{theorem}
We have the following particular case of Theorem
\ref{dehgfrygfrgygen}.
\betaegin{theorem}\label{dehgfrygfrgygenjklhhj}
Let $\mathcal{M}$ be a subset of $\varphiield{R}^m$, $\Omega\sigmaubset\varphiield{R}^N$ be an open
set and $D\sigmaubset\Omega$ be a $\mathcal{H}^{N-1}$ $\sigmaigma$-finite Borel
set. Furthermore, let $q_1,q_2,q_3\gammaeq 1$, $p\gammaeq 1$ and let $F$ be
a continuous function defined on
$$
\betaig\{\varphiield{R}^{k\thetaimes N^{n+1}}\thetaimes\varphiield{R}^{d\thetaimes
N^{n+1}}\thetaimes\varphiield{R}^{m\thetaimes
N^n}\betaig\}\thetaimes\ldots\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N\thetaimes N}\thetaimes\varphiield{R}^{m\thetaimes
N}\betaig\}\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes
N}\thetaimes\varphiield{R}^{m}\betaig\}\thetaimes\varphiield{R}^N,
$$
taking values in $\varphiield{R}$ and satisfying $F\gammaeq 0$ and the following
property: For every $x_0\in\Omega$ and every $\thetaau>0$ there exists
$\alphalpha>0$ satisfying
\betaegin{multline}\label{vcjhfjhgjkgkgjgghjfhfhf}
F\betaig(a_1,a_2,\ldots, a_n,b,x\betaig)-F\betaig(a_1,a_2,\ldots,
a_n,b,x_0\betaig)\gammaeq -\thetaau F\betaig(a_1,a_2,\ldots, a_n,b,x_0\betaig)\quad
\varphiorall\, a_1\in \varphiield{R}^{k\thetaimes N^{n+1}}\thetaimes\varphiield{R}^{d\thetaimes
N^{n+1}}\thetaimes\varphiield{R}^{m\thetaimes N^n}\\ \ldots\varphiorall\, a_n\in\varphiield{R}^{k\thetaimes
N\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes N\thetaimes N}\thetaimes\varphiield{R}^{m\thetaimes
N}\;\;\varphiorall\, b\in\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes
N}\thetaimes\varphiield{R}^{m}\;\;\varphiorall\, x\in\varphiield{R}^N\;\;\thetaext{such
that}\;\;|x-x_0|<\alphalpha\,.
\varepsilonnd{multline}
Let $v(x)\in W^{1,q_1}_{loc}(\Omega,\varphiield{R}^k)$, $\betaar m(x)\in
L^{q_2}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})$ and $\varphi\in
L^{q_3}_{loc}(\Omega,\varphiield{R}^{m})$ be such that $div_x \betaar m(x)\varepsilonquiv 0$ in
$\Omega$ and
$$F\Big(0,0,\ldots,0,\{\nabla v,\betaar m,\varphi\},x\Big)=0
\quad\thetaext{a.e.~in}\; \Omegamega\,.$$ Assume also that there exist Borel
mappings $\{\nabla v\}^+(x):D\thetao\varphiield{R}^{k\thetaimes N}$, $\{\nabla
v\}^-(x):D\thetao\varphiield{R}^{k\thetaimes N}$, $\betaar m^+(x):D\thetao\varphiield{R}^{d\thetaimes N}$,
$\betaar m^-(x):D\thetao\varphiield{R}^{d\thetaimes N}$, $\varphi^+(x):D\thetao\varphiield{R}^{m}$,
$\varphi^-(x):D\thetao\varphiield{R}^{m}$ and $v_\ec n(x):D\thetao S^{N-1}$ such that for
every $x\in D$ we have
\betaegin{multline}\label{L2009surfhh8128odno888jjjjjkkkkkkgenhjjhjkj}
\lim\limits_{\rho\thetao 0^+}\varphirac{1}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}\int_{B_\rho^+(x,v_\ec
n(x))}\Bigg(\Big|\nabla v(y)-\{\nabla v\}^+(x)\Big|^{q_1}+\Big|\betaar
m(y)-\betaar m^+(x)\Big|^{q_2}+\Big|\varphi(y)-\varphi^+(x)\Big|^{q_3}\Bigg)\,dy=0\,,\\
\lim\limits_{\rho\thetao 0^+}\varphirac{1}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}\int_{B_\rho^-(x,v_\ec
n(x))}\Bigg(\Big|\nabla v(y)-\{\nabla v\}^-(x)\Big|^{q_1}+\Big|\betaar
m(y)-\betaar
m^-(x)\Big|^{q_2}+\Big|\varphi(y)-\varphi^-(x)\Big|^{q_3}\Bigg)\,dy=0\,.
\varepsilonnd{multline}
Then for every
$\{v_\varepsilon\}_{\varepsilon>0}\sigmaubset W^{1,q_1}_{loc}(\Omega,\varphiield{R}^k)\cap
W^{(n+1),p}_{loc}(\Omega,\varphiield{R}^k)$, $\{m_\varepsilon\}_{\varepsilon>0}\sigmaubset
L^{q_2}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})\cap W^{n,p}_{loc}(\Omega,\varphiield{R}^{d\thetaimes
N})$ and $\{\psi_\varepsilon\}_{\varepsilon>0}\sigmaubset L^{q_3}_{loc}(\Omega,\varphiield{R}^{m})\cap
W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$ satisfying $div_x m_\varepsilon(x)\varepsilonquiv 0$ in $\Omega$,
$\psi_\varepsilon(x)\in\mathcal{M}$ for a.e. $x\in\Omega$, $v_\varepsilon\thetao v$ in
$W^{1,q_1}_{loc}(\Omega,\varphiield{R}^k)$ as $\varepsilon\thetao 0^+$, $m_\varepsilon\thetao \betaar m$ in
$L^{q_2}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})$ as $\varepsilon\thetao 0^+$ and $\psi_\varepsilon\thetao
\varphi$ in $L^{q_3}_{loc}(\Omega,\varphiield{R}^{m})$, we have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenjbhghgh}
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{\Omega}\varphirac{1}{\varepsilon} F\betaigg(
\betaig\{\varepsilon^n\nabla^{n+1}v_{\varepsilon},\,\varepsilon^n\nabla^n
m_\varepsilon,\,\varepsilon^n\nabla^n\psi_\varepsilon\betaig\},\,\ldots\,,\betaig\{\varepsilon\nabla^2v_{\varepsilon},\,\varepsilon\nabla
m_\varepsilon,\,\varepsilon\nabla\psi_\varepsilon\betaig\},\,\betaig\{\nabla
v_{\varepsilon},\,m_\varepsilon,\,\psi_\varepsilon\betaig\},\,x\betaigg)\,dx\\
\gammaeq \int_{D}\betaar E^{(n)}_0\betaigg(\{\nabla v\}^+(x),\betaar
m^+(x),\varphi^+(x),\{\nabla v\}^-(x),\betaar m^-(x),\varphi^-(x),v_\ec
n(x),x\betaigg)d \mathcal H^{N-1}(x)\,,
\varepsilonnd{multline}
where for
any unit vector $v_\ec \nu\in\varphiield{R}^N$
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkkgenhgjkggg}
\betaar E^{(n)}_0\betaigg(\{\nabla v\}^+,\betaar m^+,\varphi^+,\{\nabla v\}^-,\betaar m^-,\varphi^-,v_\ec \nu,x\betaigg):=\\
\inf\Bigg\{\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}
\varphirac{1}{\varepsilon}F\betaigg(\Big\{\varepsilon^n\nabla^{n+1}\sigmaigma_\varepsilon(y),\varepsilon^n\nabla^n\thetaheta_\varepsilon(y),
\varepsilon^n\nabla^n\gammaamma_\varepsilon(y)\Big\},\ldots,\Big\{\nabla\sigmaigma_\varepsilon(y),\thetaheta_\varepsilon(y),\gammaamma_\varepsilon(y)\Big\}
,x\betaigg)dy:\\
\sigmaigma_\varepsilon\in W^{1,q_1}(I_{v_\ec \nu},\varphiield{R}^k)\cap W^{(n+1),p}(I_{v_\ec
\nu},\varphiield{R}^k),\;\thetaheta_\varepsilon\in L^{q_2}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes N})\cap
W^{n,p}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes N}),\\ \betaig\{\gammaamma_\varepsilon:I_{v_\ec
\nu}\thetao\mathcal{M}\betaig\}\in L^{q_3}(I_{v_\ec \nu},\varphiield{R}^{m})\cap
W^{n,p}(I_{v_\ec \nu},\varphiield{R}^m)\;\\ \thetaext{s.t.}\;
\Deltaiv_y\thetaheta_\varepsilon(y)\varepsilonquiv
0,\;\nabla\sigmaigma_\varepsilon(y)\thetao\sigmaigma\betaig(y,\{\nabla v\}^+,\{\nabla
v\}^-,v_\ec\nu\betaig)\;\thetaext{in}\;L^{q_1}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N}),\\
\thetaheta_\varepsilon(y)\thetao\thetaheta(y,\betaar m^+,\betaar
m^-,v_\ec\nu)\;\thetaext{in}\;L^{q_2}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes
N}),\;\gammaamma_\varepsilon(y)\thetao\gammaamma(y,\varphi^+,\varphi^-,v_\ec\nu)\;\thetaext{in}\;L^{q_3}(I_{v_\ec
\nu},\varphiield{R}^{m})
\Bigg\}\,.
\varepsilonnd{multline}
Here $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec\nu_j|<1/2\;\;\;\varphiorall
j=1,2\ldots N\}$ where
$\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec \nu$ and
\betaegin{multline}\label{fhyffgfgfgfffgfgenkjgjgkgkg}
\sigmaigma\betaig(y,\{\nabla v\}^+,\{\nabla
v\}^-,v_\ec\nu\betaig):=\betaegin{cases}\{\nabla
v\}^+\quad\thetaext{if}\;\,y\cdotv_\ec \nu>0\,,\\
\{\nabla v\}^-\quad\thetaext{if}\;\,y\cdotv_\ec
\nu<0\,,\varepsilonnd{cases}\quad\thetaheta\betaig(y,\betaar m^+,\betaar
m^-,v_\ec\nu\betaig):=\betaegin{cases}\betaar m^+\quad\thetaext{if}\;\,y\cdotv_\ec \nu>0\,,\\
\betaar m^-\quad\thetaext{if}\;\,y\cdotv_\ec
\nu<0\,,\varepsilonnd{cases}\\ \thetaext{and}\quad\gammaamma\betaig(y,\varphi^+,\varphi^-,v_\ec\nu\betaig):=\betaegin{cases}\varphi^+\quad\thetaext{if}\;\,y\cdotv_\ec \nu>0\,,\\
\varphi^-\quad\thetaext{if}\;\,y\cdotv_\ec \nu<0\,.\varepsilonnd{cases}
\varepsilonnd{multline}
\betaegin{comment}
Then
for every $\deltaelta>0$ there exist sequences
$\{v_\varepsilon\}_{0<\varepsilon<1}\sigmaubset C^\infty(\varphiield{R}^N,\varphiield{R}^k)$,
$\{m_\varepsilon\}_{0<\varepsilon<1}\sigmaubset C^\infty(\varphiield{R}^N,\varphiield{R}^{d\thetaimes N})$ and
$\{\psi_\varepsilon\}_{0<\varepsilon<1}\sigmaubset C^\infty(\varphiield{R}^N,\varphiield{R}^{m})$
such that $div_x m_\varepsilon(x)\varepsilonquiv 0$ in $\Omega$,
$\int_\Omega\psi_\varepsilon(x)\,dx=\int_\Omega \varphi(x)\,dx$,
$\lim_{\varepsilon\thetao0^+}v_\varepsilon=v$ in $W^{1,p}$, $\lim_{\varepsilon\thetao0^+}(v_\varepsilon-v)/\varepsilon=0$
in $L^{p}$, $\lim_{\varepsilon\thetao0^+}m_\varepsilon=m$ in $L^{p}$,
$\lim_{\varepsilon\thetao0^+}\psi_\varepsilon=\varphi$ in $L^{p}$, $\lim_{\varepsilon\thetao0^+}
\varepsilon^j\nabla^{1+j} v_\varepsilon=0$ in $L^{p}$, $\lim_{\varepsilon\thetao0^+} \varepsilon^j\nabla^j
m_\varepsilon=0$ in $L^{p}$, $\lim_{\varepsilon\thetao0^+} \varepsilon^j\nabla^j\psi_\varepsilon=0$ in
$L^{p}$ for every $p\gammaeq 1$ and any $j\in\{1,\ldots,n\}$ and
\betaegin{multline}
\label{L2009limew03zeta71288888Contggiuuggyyyynew88789999vprop78899shtrihkkklljkhkhgghhhjhhhjkkkhhhjjjkkjjhhj}
0\leq\lim\limits_{\varepsilon\thetao 0}\int\limits_{\Omega}\varphirac{1}{\varepsilon}\thetaimes\\
F\betaigg( \betaig\{\varepsilon^n\nabla^{n+1}v_{\varepsilon},\,\varepsilon^n\nabla^n
m_\varepsilon,\,\varepsilon^n\nabla^n\psi_\varepsilon\betaig\},\,\ldots\,,\betaig\{\varepsilon\nabla^2v_{\varepsilon},\,\varepsilon\nabla
m_\varepsilon,\,\varepsilon\nabla\psi_\varepsilon\betaig\},\,\betaig\{\nabla
v_{\varepsilon},\,m_\varepsilon,\,\psi_\varepsilon\betaig\},\,
v_{\varepsilon},\,f\betaigg)\,dx\\-\int\limits_{\Omega\cap J_{\nabla v}\cup J_{\betaar
m}\cup J_{\varphi}}\Bigg( \inf\betaigg\{\betaar
E^{(n)}_x\betaig(\sigmaigma(\cdot),\thetaheta(\cdot),\gammaamma(\cdot),L\betaig):\;\;L>0,\\
\;\sigmaigma\in \mathcal{W}^{(1)}_n(x,v_\ec k_1,\ldots,v_\ec
k_N),\,\thetaheta\in \mathcal{W}^{(2)}_n(x,v_\ec k_1,\ldots,v_\ec
k_N),\,\gammaamma\in \mathcal{W}^{(3)}_n(x,v_\ec k_1,\ldots,v_\ec
k_N)\betaigg\} \Bigg)\,d\mathcal{H}^{N-1}(x)<\deltaelta\,,
\varepsilonnd{multline}
where
\betaegin{multline}
\label{L2009limew03zeta71288888Contggiuuggyyyynew88789999vprop78899shtrihkkkllyhjyukjkkmmmklklklhhhhkkffgghhjjjkkkllkkhhhjjjlkjjjlkklkjkjl}
\betaar E^{(n)}_x\betaig(\sigmaigma(\cdot),\thetaheta(\cdot),\gammaamma(\cdot),L\betaig):=\\
\int\limits_{I^+_{v_\ec k_1,,\ldots,v_\ec
k_N}}\varphirac{1}{L}F\Bigg(\Big\{L^n\nabla^{n+1}\sigmaigma(y),L^n\nabla^n\thetaheta(y),
L^n\nabla^n\gammaamma(y)\Big\},\ldots,\Big\{\nabla\sigmaigma(y),\thetaheta(y),\gammaamma(y)\Big\},v,f^+ \Bigg)dy+\\
\int\limits_{I^-_{v_\ec k_1,\ldots,v_\ec
k_N}}\varphirac{1}{L}F\Bigg(\Big\{L^n\nabla^{n+1}\sigmaigma(y),
L^n\nabla^n\thetaheta(y), L^n\nabla^n\gammaamma(y)\Big\},\ldots,
\Big\{\nabla\sigmaigma(y),\thetaheta(y),\gammaamma(y)\Big\},v,f^- \Bigg)dy\,,
\varepsilonnd{multline}
\varepsilonnd{comment}
\varepsilonnd{theorem}
\betaegin{proof}
Without any loss of generality we may assume that
$\Omega=\betaig\{x=(x_1,x_2,\ldots, x_N)\in\varphiield{R}^N:\;|x_j|<c_0\;\varphiorall
j\betaig\}$. for some $c_0>0$. Let $\{v_\varepsilon\}_{\varepsilon>0}\sigmaubset
W^{1,q_1}_{loc}(\Omega,\varphiield{R}^k)\cap W^{(n+1),p}_{loc}(\Omega,\varphiield{R}^k)$,
$\{m_\varepsilon\}_{\varepsilon>0}\sigmaubset L^{q_2}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})\cap
W^{n,p}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})$ and
$\betaig\{\psi_\varepsilon:\Omega\thetao\mathcal{M}\betaig\}_{\varepsilon>0}\sigmaubset
L^{q_3}_{loc}(\Omega,\varphiield{R}^{m})\cap W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$ be such that
$div_x m_\varepsilon(x)\varepsilonquiv 0$ in $\Omega$, $v_\varepsilon\thetao v$ in
$W^{1,q_1}_{loc}(\Omega,\varphiield{R}^k)$ as $\varepsilon\thetao 0^+$, $m_\varepsilon\thetao \betaar m$ in
$L^{q_2}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})$ as $\varepsilon\thetao 0^+$ and $\psi_\varepsilon\thetao
\varphi$ in $L^{q_3}_{loc}(\Omega,\varphiield{R}^{m})$.
Clearly there exist
$L(x'),L_\varepsilon(x'):\betaig\{x'\in\varphiield{R}^{N-1}:\;|x'_j|<c_0\;\varphiorall
j\betaig\}\thetao\varphiield{R}^{d\thetaimes(N-1)}$ such that $\Deltaiv_{x'}L(x')\varepsilonquiv \betaar
m_{1}(0,x')$ and $\Deltaiv_{x'}L_\varepsilon(x')\varepsilonquiv m_{\varepsilon,1}(0,x')$, where we
denote by $\betaar m_{1}(x):\Omega\thetao\varphiield{R}^d$ and $\betaar m'(x):\Omega\thetao
\varphiield{R}^{d\thetaimes (N-1)}$ the first column and the rest of the matrix
valued function $\betaar m(x):\Omega\thetao\varphiield{R}^{d\thetaimes N}$, so that $\betaig(\betaar
m_{1}(x),\betaar m'(x)\betaig):=\betaar m(x):\Omega\thetao\varphiield{R}^{d\thetaimes N}$, and we
denote by $m_{\varepsilon,1}(x):\Omega\thetao\varphiield{R}^d$ and $m'_\varepsilon(x):\Omega\thetao \varphiield{R}^{d\thetaimes
(N-1)}$ the first column and the rest of the matrix valued function
$m_\varepsilon(x):\Omega\thetao\varphiield{R}^{d\thetaimes N}$, so that
$\betaig(m_{\varepsilon,1}(x),m'_\varepsilon(x)\betaig):=m_\varepsilon(x):\Omega\thetao\varphiield{R}^{d\thetaimes N}$. Then
define $\Psi_\varepsilon,\Psi:\varphiield{R}^N\thetao\varphiield{R}^m$ and $M_\varepsilon,M:\varphiield{R}^N\thetao\varphiield{R}^{d\thetaimes
(N-1)}$ by
\betaegin{multline}\label{vhgvtguyiiuijjkjkkjggjkjjhjkkllgvvjhkjhk}
\Psi_\varepsilon(x):=\int_{0}^{x_1}\psi_\varepsilon(s,x')ds\,,\quad\Psi(x):=\int_{0}^{x_1}\varphi(s,x')ds\,,\quad M(x):=-L(x')+\int_{0}^{x_1}\betaar m'(s,x')ds\quad\thetaext{and}\\
M_\varepsilon(x):=-L_\varepsilon(x')+\int_{0}^{x_1}m'_\varepsilon(s,x')ds\quad\quad\varphiorall
x=(x_1,x'):=(x_1,x_2,\ldots x_N)\in\Omega\,,
\varepsilonnd{multline}
Then, since $div_x \betaar m\varepsilonquiv 0$ and $div_x m_\varepsilon\varepsilonquiv 0$, by
\varepsilonr{vhgvtguyiiuijjkjkkjggjkjjhjkkllgvvjhkjhk} we obtain
\betaegin{multline}\label{vhgvtguyiiuijjkjkkjggjkjjhjkkllghjjhjhhkjhkljljhlk}
\varphirac{\partial\Psi}{\partial x_1}(x)=\varphi(x)\,,\quad
\varphirac{\partial M}{\partial x_1}(x)=\betaar m'(x)\,,\quad
-div_{x'}M(x)=\betaar m_{1}(x)\quad\thetaext{for a.e.}\;\;
x=(x_1,x')\in\Omega\,,\quad\thetaext{and}\\
\varphirac{\partial\Psi_\varepsilon}{\partial x_1}(x)=\psi_\varepsilon(x)\,,\quad
\varphirac{\partial M_\varepsilon}{\partial x_1}(x)=m'_\varepsilon(x)\,,\quad
-div_{x'}M_\varepsilon(x)=m_{\varepsilon,1}(x)\quad\quad\thetaext{for a.e.}\;\;
x=(x_1,x')\in\Omega\,.
\varepsilonnd{multline}
Therefore,
the result follows by applying
Theorem \ref{dehgfrygfrgygen} to the functions $\{v,M,\Psi\}$ and to
the sequence $\{v_\varepsilon,M_\varepsilon,\Psi_\varepsilon\}$.
\varepsilonnd{proof}
\betaegin{comment}
\betaegin{theorem}\label{dehgfrygfrgygenjklhhj}
Let $\mathcal{M}$ be a subset of $\varphiield{R}^m$, $\Omega\sigmaubset\varphiield{R}^N$ be an open
set and $D\sigmaubset\Omega$ be a $\mathcal{H}^{N-1}$ $\sigmaigma$-finite Borel
set. Furthermore, let $q\gammaeq 1$, $p\gammaeq 1$ and let $F$ be a
continuous function defined on
$$
\betaig\{\varphiield{R}^{k\thetaimes N^{n+1}}\thetaimes\varphiield{R}^{d\thetaimes
N^{n+1}}\thetaimes\varphiield{R}^{m\thetaimes
N^n}\betaig\}\thetaimes\ldots\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N\thetaimes
N}\thetaimes\varphiield{R}^{d\thetaimes N\thetaimes N}\thetaimes\varphiield{R}^{m\thetaimes
N}\betaig\}\thetaimes\betaig\{\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^{d\thetaimes
N}\thetaimes\varphiield{R}^{m}\betaig\},
$$
taking values in $\varphiield{R}$ and satisfying $F\gammaeq 0$. Let $v(x)\in
W^{1,q}_{loc}(\Omega,\varphiield{R}^k)$, $\betaar m(x)\in L^q_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})$
and $\varphi\in L^q_{loc}(\Omega,\varphiield{R}^{m})$ be such that $div_x \betaar
m(x)=0$ a.e.~in $\Omega$ and
$$F\Big(0,0,\ldots,0,\nabla v,\betaar m,\varphi\Big)=0
\quad\thetaext{a.e.~in}\; \Omegamega\,.$$ Assume also that there exist three
Borel mappings $\{\nabla v\}^+(x):D\thetao\varphiield{R}^{k\thetaimes N}$, $\{\nabla
v\}^-(x):D\thetao\varphiield{R}^{k\thetaimes N}$, $\betaar m^+(x):D\thetao\varphiield{R}^{d\thetaimes N}$,
$\betaar m^-(x):D\thetao\varphiield{R}^{d\thetaimes N}$, $\varphi^+(x):D\thetao\varphiield{R}^{m}$,
$\varphi^-(x):D\thetao\varphiield{R}^{m}$ and $v_\ec n(x):D\thetao S^{N-1}$ such that for
every $x\in D$ we have
\betaegin{multline}\label{L2009surfhh8128odno888jjjjjkkkkkkgenhjjhjkj}
\lim\limits_{\rho\thetao 0^+}\varphirac{1}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}\int_{B_\rho^+(x,v_\ec
n(x))}\Bigg(\Big|\nabla v(y)-\{\nabla v\}^+(x)\Big|^q+\Big|\betaar
m(y)-\betaar m^+(x)\Big|^q+\Big|\varphi(y)-\varphi^+(x)\Big|^q\Bigg)\,dy=0\,,\\
\lim\limits_{\rho\thetao 0^+}\varphirac{1}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}\int_{B_\rho^-(x,v_\ec
n(x))}\Bigg(\Big|\nabla v(y)-\{\nabla v\}^-(x)\Big|^q+\Big|\betaar
m(y)-\betaar
m^-(x)\Big|^q+\Big|\varphi(y)-\varphi^-(x)\Big|^q\Bigg)\,dy=0\,.
\varepsilonnd{multline}
Then for every
$\{v_\varepsilon\}_{\varepsilon>0}\sigmaubset W^{1,q}_{loc}(\Omega,\varphiield{R}^k)\cap
W^{(n+1),p}_{loc}(\Omega,\varphiield{R}^k)$, $\{m_\varepsilon\}_{\varepsilon>0}\sigmaubset
L^{q}_{loc}(\Omega,\varphiield{R}^{k\thetaimes N})\cap W^{n,p}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})$
and $\{\psi_\varepsilon\}_{\varepsilon>0}\sigmaubset L^{q}_{loc}(\Omega,\varphiield{R}^{m})\cap
W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$ satisfying $div_x m_\varepsilon(x)\varepsilonquiv 0$ in
$\varphiield{R}^N$, $\psi_\varepsilon(x)\in\mathcal{M}$ for a.e. $x\in\Omega$, $v_\varepsilon\thetao v$ in
$W^{1,q}_{loc}(\Omega,\varphiield{R}^k)$ as $\varepsilon\thetao 0^+$, $m_\varepsilon\thetao \betaar m$ in
$L^{q}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})$ as $\varepsilon\thetao 0^+$ and $\psi_\varepsilon\thetao
\varphi$ in $L^{q}_{loc}(\Omega,\varphiield{R}^{m})$, we will have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenjbhghgh}
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{\Omega}\varphirac{1}{\varepsilon} F\betaigg(
\betaig\{\varepsilon^n\nabla^{n+1}v_{\varepsilon},\,\varepsilon^n\nabla^n
m_\varepsilon,\,\varepsilon^n\nabla^n\psi_\varepsilon\betaig\},\,\ldots\,,\betaig\{\varepsilon\nabla^2v_{\varepsilon},\,\varepsilon\nabla
m_\varepsilon,\,\varepsilon\nabla\psi_\varepsilon\betaig\},\,\betaig\{\nabla
v_{\varepsilon},\,m_\varepsilon,\,\psi_\varepsilon\betaig\}\betaigg)\,dx\\
\gammaeq \int_{D}\betaar E^{(n)}_0\betaigg(\{\nabla v\}^+(x),\betaar
m^+(x),\varphi^+(x),\{\nabla v\}^-(x),\betaar m^-(x),\varphi^-(x),v_\ec
n(x)\betaigg)d \mathcal H^{N-1}(x)\,,
\varepsilonnd{multline}
where for every $a,b\in\varphiield{R}^m$ and any unit vector $v_\ec \nu\in\varphiield{R}^N$
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkjgjgjgjhlllllkkkgenhgjkggg}
\betaar E^{(n)}_0\betaigg(\{\nabla v\}^+,\betaar m^+,\varphi^+,\{\nabla v\}^-,\betaar m^-,\varphi^-,v_\ec \nu\betaigg):=\\
\inf\Bigg\{\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}
\varphirac{1}{\varepsilon}F\betaigg(\Big\{\varepsilon^n\nabla^{n+1}\sigmaigma_\varepsilon(y),\varepsilon^n\nabla^n\thetaheta_\varepsilon(y),
\varepsilon^n\nabla^n\gammaamma_\varepsilon(y)\Big\},\ldots,\Big\{\nabla\sigmaigma_\varepsilon(y),\thetaheta_\varepsilon(y),\gammaamma_\varepsilon(y)\Big\}
\betaigg)dy:\\
\sigmaigma_\varepsilon\in W^{1,q}(I_{v_\ec \nu},\varphiield{R}^k)\cap W^{(n+1),p}(I_{v_\ec
\nu},\varphiield{R}^k),\;\thetaheta_\varepsilon\in L^{q}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N})\cap
W^{n,p}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes N}),\\ \betaig\{\gammaamma_\varepsilon:I_{v_\ec
\nu}\thetao\mathcal{M}\betaig\}\in L^{q}(I_{v_\ec \nu},\varphiield{R}^{m})\cap
W^{n,p}(I_{v_\ec \nu},\varphiield{R}^m)\;\\ \thetaext{s.t.}\; \Deltaiv_y\thetaheta(y)\varepsilonquiv
0,\;\nabla\sigmaigma_\varepsilon(y)\thetao\sigmaigma\betaig(y,\{\nabla v\}^+,\{\nabla
v\}^-,v_\ec\nu\betaig)\;\thetaext{in}\;L^q(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N}),\\
\thetaheta_\varepsilon(y)\thetao\thetaheta(y,\betaar m^+,\betaar
m^-,v_\ec\nu)\;\thetaext{in}\;L^q(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes
N}),\;\gammaamma_\varepsilon(y)\thetao\gammaamma(y,\varphi^+,\varphi^-,v_\ec\nu)\;\thetaext{in}\;L^q(I_{v_\ec
\nu},\varphiield{R}^{m})
\Bigg\}\,.
\varepsilonnd{multline}
Here $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec\nu_j|<1/2\;\;\;\varphiorall
j=1,2\ldots N\}$ where
$\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec \nu$ and
\betaegin{multline}\label{fhyffgfgfgfffgfgenkjgjgkgkg}
\sigmaigma\betaig(y,\{\nabla v\}^+,\{\nabla
v\}^-,v_\ec\nu\betaig):=\betaegin{cases}\{\nabla
v\}^+\quad\thetaext{if}\;\,y\cdotv_\ec \nu>0\,,\\
\{\nabla v\}^-\quad\thetaext{if}\;\,y\cdotv_\ec
\nu<0\,,\varepsilonnd{cases}\quad\thetaheta\betaig(y,\betaar m^+,\betaar
m^-,v_\ec\nu\betaig):=\betaegin{cases}\betaar m^+\quad\thetaext{if}\;\,y\cdotv_\ec \nu>0\,,\\
\betaar m^-\quad\thetaext{if}\;\,y\cdotv_\ec
\nu<0\,,\varepsilonnd{cases}\\ \thetaext{and}\quad\gammaamma\betaig(y,\varphi^+,\varphi^-,v_\ec\nu\betaig):=\betaegin{cases}\varphi^+\quad\thetaext{if}\;\,y\cdotv_\ec \nu>0\,,\\
\varphi^-\quad\thetaext{if}\;\,y\cdotv_\ec \nu<0\,.\varepsilonnd{cases}
\varepsilonnd{multline}
\betaegin{comment*}
Then
for every $\deltaelta>0$ there exist sequences
$\{v_\varepsilon\}_{0<\varepsilon<1}\sigmaubset C^\infty(\varphiield{R}^N,\varphiield{R}^k)$,
$\{m_\varepsilon\}_{0<\varepsilon<1}\sigmaubset C^\infty(\varphiield{R}^N,\varphiield{R}^{d\thetaimes N})$ and
$\{\psi_\varepsilon\}_{0<\varepsilon<1}\sigmaubset C^\infty(\varphiield{R}^N,\varphiield{R}^{m})$
such that $div_x m_\varepsilon(x)\varepsilonquiv 0$ in $\Omega$,
$\int_\Omega\psi_\varepsilon(x)\,dx=\int_\Omega \varphi(x)\,dx$,
$\lim_{\varepsilon\thetao0^+}v_\varepsilon=v$ in $W^{1,p}$, $\lim_{\varepsilon\thetao0^+}(v_\varepsilon-v)/\varepsilon=0$
in $L^{p}$, $\lim_{\varepsilon\thetao0^+}m_\varepsilon=m$ in $L^{p}$,
$\lim_{\varepsilon\thetao0^+}\psi_\varepsilon=\varphi$ in $L^{p}$, $\lim_{\varepsilon\thetao0^+}
\varepsilon^j\nabla^{1+j} v_\varepsilon=0$ in $L^{p}$, $\lim_{\varepsilon\thetao0^+} \varepsilon^j\nabla^j
m_\varepsilon=0$ in $L^{p}$, $\lim_{\varepsilon\thetao0^+} \varepsilon^j\nabla^j\psi_\varepsilon=0$ in
$L^{p}$ for every $p\gammaeq 1$ and any $j\in\{1,\ldots,n\}$ and
\betaegin{multline}
\label{L2009limew03zeta71288888Contggiuuggyyyynew88789999vprop78899shtrihkkklljkhkhgghhhjhhhjkkkhhhjjjkkjjhhj}
0\leq\lim\limits_{\varepsilon\thetao 0}\int\limits_{\Omega}\varphirac{1}{\varepsilon}\thetaimes\\
F\betaigg( \betaig\{\varepsilon^n\nabla^{n+1}v_{\varepsilon},\,\varepsilon^n\nabla^n
m_\varepsilon,\,\varepsilon^n\nabla^n\psi_\varepsilon\betaig\},\,\ldots\,,\betaig\{\varepsilon\nabla^2v_{\varepsilon},\,\varepsilon\nabla
m_\varepsilon,\,\varepsilon\nabla\psi_\varepsilon\betaig\},\,\betaig\{\nabla
v_{\varepsilon},\,m_\varepsilon,\,\psi_\varepsilon\betaig\},\,
v_{\varepsilon},\,f\betaigg)\,dx\\-\int\limits_{\Omega\cap J_{\nabla v}\cup J_{\betaar
m}\cup J_{\varphi}}\Bigg( \inf\betaigg\{\betaar
E^{(n)}_x\betaig(\sigmaigma(\cdot),\thetaheta(\cdot),\gammaamma(\cdot),L\betaig):\;\;L>0,\\
\;\sigmaigma\in \mathcal{W}^{(1)}_n(x,v_\ec k_1,\ldots,v_\ec
k_N),\,\thetaheta\in \mathcal{W}^{(2)}_n(x,v_\ec k_1,\ldots,v_\ec
k_N),\,\gammaamma\in \mathcal{W}^{(3)}_n(x,v_\ec k_1,\ldots,v_\ec
k_N)\betaigg\} \Bigg)\,d\mathcal{H}^{N-1}(x)<\deltaelta\,,
\varepsilonnd{multline}
where
\betaegin{multline}
\label{L2009limew03zeta71288888Contggiuuggyyyynew88789999vprop78899shtrihkkkllyhjyukjkkmmmklklklhhhhkkffgghhjjjkkkllkkhhhjjjlkjjjlkklkjkjl}
\betaar E^{(n)}_x\betaig(\sigmaigma(\cdot),\thetaheta(\cdot),\gammaamma(\cdot),L\betaig):=\\
\int\limits_{I^+_{v_\ec k_1,,\ldots,v_\ec
k_N}}\varphirac{1}{L}F\Bigg(\Big\{L^n\nabla^{n+1}\sigmaigma(y),L^n\nabla^n\thetaheta(y),
L^n\nabla^n\gammaamma(y)\Big\},\ldots,\Big\{\nabla\sigmaigma(y),\thetaheta(y),\gammaamma(y)\Big\},v,f^+ \Bigg)dy+\\
\int\limits_{I^-_{v_\ec k_1,\ldots,v_\ec
k_N}}\varphirac{1}{L}F\Bigg(\Big\{L^n\nabla^{n+1}\sigmaigma(y),
L^n\nabla^n\thetaheta(y), L^n\nabla^n\gammaamma(y)\Big\},\ldots,
\Big\{\nabla\sigmaigma(y),\thetaheta(y),\gammaamma(y)\Big\},v,f^- \Bigg)dy\,,
\varepsilonnd{multline}
\varepsilonnd{comment*}
\varepsilonnd{theorem}
\betaegin{proof}
Without any loss of generality we may assume that
$\Omega=\betaig\{x=(x_1,x_2,\ldots, x_N)\in\varphiield{R}^N:\;|x_j|<c_0\;\varphiorall
j\betaig\}$. for some $c_0>0$. Let $\{v_\varepsilon\}_{\varepsilon>0}\sigmaubset
W^{1,q}_{loc}(\Omega,\varphiield{R}^k)\cap W^{(n+1),p}_{loc}(\Omega,\varphiield{R}^k)$,
$\{m_\varepsilon\}_{\varepsilon>0}\sigmaubset L^{q}_{loc}(\Omega,\varphiield{R}^{k\thetaimes N})\cap
W^{n,p}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})$ and
$\betaig\{\psi_\varepsilon:\Omega\thetao\mathcal{M}\betaig\}_{\varepsilon>0}\sigmaubset
L^{q}_{loc}(\Omega,\varphiield{R}^{m})\cap W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$ be such that
$div_x m_\varepsilon(x)\varepsilonquiv 0$ in $\Omega$, $v_\varepsilon\thetao v$ in
$W^{1,q}_{loc}(\Omega,\varphiield{R}^k)$ as $\varepsilon\thetao 0^+$, $m_\varepsilon\thetao \betaar m$ in
$L^{q}_{loc}(\Omega,\varphiield{R}^{d\thetaimes N})$ as $\varepsilon\thetao 0^+$ and $\psi_\varepsilon\thetao
\varphi$ in $L^{q}_{loc}(\Omega,\varphiield{R}^{m})$.
Clearly there exist
$L(x'),L_\varepsilon(x'):\betaig\{x'\in\varphiield{R}^{N-1}:\;|x'_j|<c_0\;\varphiorall
j\betaig\}\thetao\varphiield{R}^{d\thetaimes(N-1)}$ such that $\Deltaiv_{x'}L(x')\varepsilonquiv \betaar
m_{1}(0,x')$ and $\Deltaiv_{x'}L_\varepsilon(x')\varepsilonquiv m_{\varepsilon,1}(0,x')$, where we
denote by $\betaar m_{1}(x):\Omega\thetao\varphiield{R}^d$ and $\betaar m'(x):\Omega\thetao
\varphiield{R}^{d\thetaimes (N-1)}$ the first column and the rest of the matrix
valued function $\betaar m(x):\Omega\thetao\varphiield{R}^{d\thetaimes N}$, so that $\betaig(\betaar
m_{1}(x),\betaar m'(x)\betaig):=\betaar m(x):\Omega\thetao\varphiield{R}^{d\thetaimes N}$, and we
denote by $m_{\varepsilon,1}(x):\Omega\thetao\varphiield{R}^d$ and $m'_\varepsilon(x):\Omega\thetao \varphiield{R}^{d\thetaimes
(N-1)}$ the first column and the rest of the matrix valued function
$m_\varepsilon(x):\Omega\thetao\varphiield{R}^{d\thetaimes N}$, so that
$\betaig(m_{\varepsilon,1}(x),m'_\varepsilon(x)\betaig):=m_\varepsilon(x):\Omega\thetao\varphiield{R}^{d\thetaimes N}$. Then
define $\Psi_\varepsilon:\varphiield{R}^N\thetao\varphiield{R}^m$ and $M_\varepsilon:\varphiield{R}^N\thetao\varphiield{R}^{d\thetaimes (N-1)}$ by
\betaegin{multline}\label{vhgvtguyiiuijjkjkkjggjkjjhjkkllgvvjhkjhk}
\Psi_\varepsilon(x):=\int_{0}^{x_1}\psi_\varepsilon(s,x')ds\,,\quad\Psi(x):=\int_{0}^{x_1}\varphi(s,x')ds\,,\quad M(x):=-L(x')+\int_{0}^{x_1}\betaar m'(s,x')ds\quad\thetaext{and}\\
M_\varepsilon(x):=-L_\varepsilon(x')+\int_{0}^{x_1}m'_\varepsilon(s,x')ds\quad\quad\varphiorall
x=(x_1,x'):=(x_1,x_2,\ldots x_N)\in\Omega\,,
\varepsilonnd{multline}
Then, since $div_x \betaar m\varepsilonquiv 0$ and $div_x m_\varepsilon\varepsilonquiv 0$, by
\varepsilonr{vhgvtguyiiuijjkjkkjggjkjjhjkkllg} we obtain
\betaegin{multline}\label{vhgvtguyiiuijjkjkkjggjkjjhjkkllghjjhjhhkjhkljljhlk}
\varphirac{\partial\Psi}{\partial x_1}(x)=\varphi(x)\,,\quad
\varphirac{\partial M}{\partial x_1}(x)=\betaar m'(x)\,,\quad
-div_{x'}M(x)=\betaar m_{1}(x)\quad\thetaext{for a.e.}\;\;
x=(x_1,x')\in\Omega\,,\quad\thetaext{and}\\
\varphirac{\partial\Psi_\varepsilon}{\partial x_1}(x)=\psi_\varepsilon(x)\,,\quad
\varphirac{\partial M_\varepsilon}{\partial x_1}(x)=m'_\varepsilon(x)\,,\quad
-div_{x'}M_\varepsilon(x)=m_{\varepsilon,1}(x)\quad\quad\thetaext{for a.e.}\;\;
x=(x_1,x')\in\Omega\,.
\varepsilonnd{multline}
Therefore, the result follows by applying Theorem
\ref{dehgfrygfrgygen} to the functions $\{v,M,\Psi\}$ and to the
sequence $\{v_\varepsilon,M_\varepsilon,\Psi_\varepsilon\}$.
\varepsilonnd{proof}
\varepsilonnd{comment}
\betaegin{comment}
\sigmaection{Further estimates for the lower bound}
\betaegin{proposition}\label{L2009.02}
Let $v_\ec P\in\mathcal{L}(\varphiield{R}^{k\thetaimes N\thetaimes N},\varphiield{R}^d)$ and $v_\ec
Q\in\mathcal{L}(\varphiield{R}^{m\thetaimes N},\varphiield{R}^q)$ be linear operators and
let $F\in C^1(\varphiield{R}^{d}\thetaimes\varphiield{R}^{q}\thetaimes\varphiield{R}^{k\thetaimes
N}\thetaimes\varphiield{R}^m\,,\varphiield{R})$ be such that $F\gammaeq 0$ and there exist $C>0$ and
$p\gammaeq 1$ satisfying
$0\leq F(a,b,c,d)\leq C\betaig(|a|^{p}+|b|^{p}+|c|^p+|d|^p+1\betaig)$ for
every $(a,b,c,d)$.
Furthermore
let $v_\ec k\in\varphiield{R}^k$, $v_\ec\nu\in S^{N-1}$,
$\varphi^+,\varphi^-\in\varphiield{R}^m$ and $V^+,V^-\in \varphiield{R}^{k\thetaimes N}$ be such
that
$V^+-V^-=v_\ec k\omegatimesv_\ec\nu$ and
$F(0,0,V^+,\varphi^+)=F(0,0,V^-,\varphi^-)=0$. Set $\varphi(x)\in
L^\infty(\varphiield{R}^N,\varphiield{R}^m)$ and $v(x):Lip(\varphiield{R}^N,\varphiield{R}^k)$ by
\betaegin{equation}\label{ghgghjhjkdfhg}
\varphi(x):=\betaegin{cases}
\varphi^+\quad\thetaext{if}\;\;x\cdotv_\ec\nu>0\,,\\
\varphi^-\quad\thetaext{if}\;\;x\cdotv_\ec\nu<0\,,
\varepsilonnd{cases}\quad\quad
v(x):=\betaegin{cases}
V^-\cdot x+(x\cdotv_\ec\nu)v_\ec k\quad\thetaext{if}\;\;x\cdotv_\ec\nu\gammaeq 0\,,\\
V^-\cdot x\quad\quad\quad\thetaext{if}\;\;x\cdotv_\ec\nu<0\,.
\varepsilonnd{cases}
\varepsilonnd{equation}
Next
let $\betaig\{v_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset W^{2,p}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$ and $\betaig\{\varphi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
W^{1,p}_{loc}(I_{v_\ec \nu},\varphiield{R}^m)$ be such that $\,\lim_{\varepsilon\thetao
0^+}\varphi_\varepsilon=\varphi$ in $L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^m)$,
$\lim_{\varepsilon\thetao 0^+}v_\varepsilon=v$ in $W^{1,p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon\,v_\ec P\cdot\{\nabla^2 v_\varepsilon\}\betaig)=0$ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{d})$ and $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon\,v_\ec
Q\cdot\{\nabla \varphi_\varepsilon\}\betaig)=0$ in $L^p_{loc}(I_{v_\ec
\nu},\varphiield{R}^{q})$, where, as before, $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot
v_\ec\nu_j|<1/2\;\;\;\varphiorall j=1,2\ldots N\}$ where
$\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec \nu$. Finally
assume that
\betaegin{itemize}
\item either
$\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-v)/\varepsilon\betaig\}=0$ in $L^{p}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$,
\item or there exists a family
$\betaig\{u_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset C^2(\varphiield{R}^N,\varphiield{R}^k)$, such that
$\nabla u_\varepsilon(x)\varepsilonquiv h_\varepsilon(v_\ec\nu\cdot x)$ for some function $h_\varepsilon$
(i.e. $\nabla u_\varepsilon(x)$ depends actually only on the first real
variable in the base $\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}$),
$\nabla u_\varepsilon(x)=\nabla v(x)$ if $|v_\ec\nu\cdot x|>c_0$, where
$0<c_0<1/2$ is a constant, and $\lim_{\varepsilon\thetao 0^+}u_\varepsilon=v$ in
$W^{1,p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$, $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon\,v_\ec
P\cdot\{\nabla^2 u_\varepsilon\}\betaig)=0$ in $L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{d})$
and $\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-u_\varepsilon)/\varepsilon\betaig\}=0$ in
$L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$.
\varepsilonnd{itemize}
Then
\betaegin{equation}\label{ggfghjjhfhfjfhj}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 v_{\varepsilon}\},\,\varepsilon\,v_\ec Q\cdot\{\nabla
\varphi_{\varepsilon}\},\,\nabla v_{\varepsilon},\,\varphi_{\varepsilon}\Big)\,dx\gammaeq
E_{per}\Big(V^+,\varphi^+,V^-,\varphi^-,v_\ec\nu\Big)\,,
\varepsilonnd{equation}
where
\betaegin{multline}\label{L2009hhffff12}
E_{per}\Big(V^+,\varphi^+,V^-,\varphi^-,v_\ec\nu\Big)\;:=\;
\inf\Bigg\{\int_{I_{v_\ec \nu}}\varphirac{1}{L} F\Big(L\,v_\ec
P\cdot\{\nabla^2 \xi\},\,L\,v_\ec Q\cdot\{\nabla z_\eta\},\,\nabla
\xi,\,z_\eta\Big)\,dx:\\ L\in(0,+\infty)\,,\; \xi\in
\mathcal{S}_1(V^+,V^-,I_{v_\ec\nu})\,,\;z_\eta\in
\mathcal{S}_2(\varphi^+,\varphi^-,I_{v_\ec\nu})\Bigg\}\,,
\varepsilonnd{multline}
with
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjfgnhf}
\mathcal{S}_1\betaig(V^+,V^-,I_{v_\ec\nu}\betaig):=
\betaigg\{\xi\in C^2(\varphiield{R}^N,\varphiield{R}^k):\;\;\nabla \xi(y)=V^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
\nabla \xi(y)=V^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{ and
}\;\nabla \xi\betaig(y+v_\ec\nu_j\betaig)=\nabla \xi(y)\;\;\varphiorall
j=2,3,\ldots, N\betaigg\}\,,
\varepsilonnd{multline}
and
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddhdkg}
\mathcal{S}_2\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig):=
\betaigg\{z_\eta\in C^1(\varphiield{R}^N,\varphiield{R}^m):\;\;z_\eta(y)=\varphi^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
z_\eta(y)=\varphi^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec \nu_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,,
\varepsilonnd{multline}
\varepsilonnd{proposition}
\betaegin{proof}
Clearly we may assume
\betaegin{equation}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfd}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 v_{\varepsilon}\},\,\varepsilon\,v_\ec Q\cdot\{\nabla
\varphi_{\varepsilon}\},\,\nabla v_{\varepsilon},\,\varphi_{\varepsilon}\Big)\,dx<+\infty\,,
\varepsilonnd{equation}
otherwise it is trivial. Moreover, without any loss of generality we
may assume that $v_\ec\nu=v_\ec e_1:=(1,0,0,\ldots,0)$ and $I_{v_\ec
\nu}=\betaig\{y=(y_1,y_2\ldots y_N)\in\varphiield{R}^N:\;|y_j|<1/2\;\;\;\varphiorall
j=1,2\ldots N\betaig\}$. Furthermore, without loss of generality we may
assume that $v_\varepsilon\in C^2(\varphiield{R}^N,\varphiield{R}^k)$ $\varphi_\varepsilon\in C^1(\varphiield{R}^N,\varphiield{R}^m)$.
Otherwise we just approximate.
Next we assume that
\betaegin{equation}\label{ffdtdfuiuuyhuiuuyhjh}
\betaegin{cases}
\lim_{\varepsilon\thetao 0^+}\varphi_\varepsilon=\varphi\quad\thetaext{in}\quad L^{p}(I_{v_\ec
\nu},\varphiield{R}^m)\,,\\
\lim_{\varepsilon\thetao 0^+}v_\varepsilon=v\quad\thetaext{in}\quad W^{1,p}(I_{v_\ec \nu},\varphiield{R}^k)\,,\\
\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon\,v_\ec P\cdot\{\nabla^2
v_\varepsilon\}\betaig)=0\quad\thetaext{in}\quad
L^p(I_{v_\ec \nu},\varphiield{R}^{d})\,,\\
\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon\,v_\ec Q\cdot\{\nabla
\varphi_\varepsilon\}\betaig)=0\quad\thetaext{in}\quad L^p(I_{v_\ec \nu},\varphiield{R}^{q})\,,
\varepsilonnd{cases}
\varepsilonnd{equation}
and
\betaegin{itemize}
\item either
$\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-v)/\varepsilon\betaig\}=0$ in $L^{p}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$,
\item or there exists a family $\betaig\{u_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset C^2(\varphiield{R}^N,\varphiield{R}^k)$, such that $\nabla u_\varepsilon(x)\varepsilonquiv h_\varepsilon(v_\ec\nu\cdot x)$
for some function $h_\varepsilon$, $\nabla u_\varepsilon(x)=\nabla v(x)$ if
$|v_\ec\nu\cdot x|>c_0$, where $0<c_0<1/2$ is a constant, and
$\lim_{\varepsilon\thetao 0^+}u_\varepsilon=v$ in $W^{1,p}(I_{v_\ec \nu},\varphiield{R}^k)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon\,v_\ec P\cdot\{\nabla^2 u_\varepsilon\}\betaig)=0$ in
$L^p(I_{v_\ec \nu},\varphiield{R}^{d})$ and $\lim_{\varepsilon\thetao
0^+}\betaig\{(v_\varepsilon-u_\varepsilon)/\varepsilon\betaig\}=0$ in $L^{p}(I_{v_\ec \nu},\varphiield{R}^k)$.
\varepsilonnd{itemize}
Consider $l(t)\in C^\infty(\varphiield{R},\varphiield{R})$ with the properties
$\int_0^1l(s)ds=1/2$ and
\betaegin{equation}\label{L2009smooth1}
\betaegin{cases}l(t)=0 \quad\quad\quad\thetaext{for every }t\in(-\infty,\deltaelta)\,,\\
l(t)\in[0,1] \quad\;\;\thetaext{for every }t\in[\deltaelta,1-\deltaelta]\,,\\
l(t)=1 \quad\quad\quad\thetaext{for every }
t\in(1-\deltaelta,+\infty)\,,\varepsilonnd{cases}
\varepsilonnd{equation}
where $\deltaelta>0$. Clearly such a function exists. Then set
\betaegin{multline}\label{vjhvhjvhjvhjjnjk}
\psi_\varepsilon(x):=\varphi^-\,+\,l\betaigg(\varphirac{x_1}{\varepsilon}+\varphirac{1}{2}\betaigg)\cdot\betaig(\varphi^+-\varphi^-\betaig)\quad\thetaext{and}\quad
\hat u_\varepsilon(x):=V^-\cdot
x+\varepsilon\int_{-\infty}^{x_1/\varepsilon}l\betaig(s+1/2\betaig)\,ds\,\cdot\,v_\ec k\quad
\varphiorall x\in\varphiield{R}^N\,.
\varepsilonnd{multline}
Thus $\psi_\varepsilon\in C^\infty(\varphiield{R}^N,\varphiield{R}^m)$ and $u_\varepsilon\in
C^\infty(\varphiield{R}^N,\varphiield{R}^k)$ and in particular
\betaegin{align}\label{hjfjffjgkjgkkgjghgh}
\varepsilon\nabla\psi_\varepsilon(x):=
l'\betaigg(\varphirac{x_1}{\varepsilon}+\varphirac{1}{2}\betaigg)\cdot\betaig(\varphi^+-\varphi^-\betaig)\omegatimesv_\ec
e_1\quad\varphiorall x\in\varphiield{R}^N\,,
\\
\label{vjhvhjvhjvhjjnjkjgghgfj} \nabla \hat
u_\varepsilon(x):=V^-+l\betaigg(\varphirac{x_1}{\varepsilon}+\varphirac{1}{2}\betaigg)\Big(v_\ec
k\omegatimes v_\ec
e_1\Big)=V^-+l\betaigg(\varphirac{x_1}{\varepsilon}+\varphirac{1}{2}\betaigg)\cdot\Big(V^+-V^-\Big)\quad
\varphiorall x\in\varphiield{R}^N\,,\\
\label{fvfgfffhhffff} \varepsilon\nabla^2 \hat
u_\varepsilon(x):=l'\betaigg(\varphirac{x_1}{\varepsilon}+\varphirac{1}{2}\betaigg)\cdot\Big(V^+-V^-\Big)\omegatimesv_\ec
e_1\quad \varphiorall x\in\varphiield{R}^N\,.
\varepsilonnd{align}
Moreover by \varepsilonr{L2009smooth1}, \varepsilonr{vjhvhjvhjvhjjnjk} and
\varepsilonr{vjhvhjvhjvhjjnjkjgghgfj} we obtain
\betaegin{multline}\label{vjhvhjvhjvhjjnjkgffgjkjhj}
\psi_\varepsilon(x)=
\betaegin{cases}
\varphi^-\;\;\thetaext{if}\;\; x_1\leq-\varepsilon/2\,,\\
\varphi^+\;\;\thetaext{if}\;\; x_1\gammaeq\varepsilon/2\,,
\varepsilonnd{cases}
\;\;\nabla \hat u_\varepsilon(x)=\betaegin{cases}V^-\;\;\thetaext{if}\;\; x_1\leq-\varepsilon/2\,,\\
V^+\;\;\thetaext{if}\;\; x_1\gammaeq\varepsilon/2\,,\varepsilonnd{cases}\;\; \hat
u_\varepsilon(x)=v(x)\;\;\thetaext{if}\;\; |x_1|\gammaeq \varepsilon/2\,,
\varepsilonnd{multline}
and by \varepsilonr{hjfjffjgkjgkkgjghgh} and \varepsilonr{fvfgfffhhffff},
\betaegin{equation}\label{vjhvhjvhjvhjjnjkgffgjkjhjfggfff}
\nabla\psi_\varepsilon(x)=0\quad\thetaext{if}\quad |x_1|\gammaeq\varepsilon/2\,,
\quad\thetaext{and}\quad \nabla^2 \hat u_\varepsilon(x)= 0\quad\thetaext{if}\quad
|x_1|\gammaeq\varepsilon/2\,.
\varepsilonnd{equation}
Therefore, by \varepsilonr{vjhvhjvhjvhjjnjk}, \varepsilonr{hjfjffjgkjgkkgjghgh},
\varepsilonr{vjhvhjvhjvhjjnjkjgghgfj}, \varepsilonr{fvfgfffhhffff},
\varepsilonr{vjhvhjvhjvhjjnjkgffgjkjhj} and
\varepsilonr{vjhvhjvhjvhjjnjkgffgjkjhjfggfff} we have $\lim_{\varepsilon\thetao
0^+}\psi_\varepsilon=\varphi$ in $L^{p}(I_{v_\ec \nu},\varphiield{R}^m)$, $\lim_{\varepsilon\thetao
0^+}\hat u_\varepsilon=v$ in $W^{1,p}(I_{v_\ec \nu},\varphiield{R}^k)$, $\lim_{\varepsilon\thetao
0^+}\{\varepsilon\nabla^2 \hat u_\varepsilon\}=0$ in $L^p(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes
N\thetaimes N})$, $\lim_{\varepsilon\thetao 0^+}\{\varepsilon\nabla \psi_\varepsilon\}=0$ in
$L^p(I_{v_\ec \nu},\varphiield{R}^{m\thetaimes N})$ and $\lim_{\varepsilon\thetao 0^+}\betaig\{(\hat
u_\varepsilon-v)/\varepsilon\betaig\}=0$ in $L^{p}(I_{v_\ec \nu},\varphiield{R}^k)$. Thus, if we have
$\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-v)/\varepsilon\betaig\}=0$ in $L^{p}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$, then clearly $\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-\hat
u_\varepsilon)/\varepsilon\betaig\}=0$ in $L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$ and we set
$u_\varepsilon(x):=\hat u_\varepsilon(x)$. So in any case there exists a family
$\betaig\{u_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset C^2(\varphiield{R}^N,\varphiield{R}^k)$, such that
$\nabla u_\varepsilon(x)\varepsilonquiv h_\varepsilon(v_\ec\nu\cdot x)$ for some function
$h_\varepsilon$, $\nabla u_\varepsilon(x)=\nabla v(x)$ if $|v_\ec\nu\cdot x|>c_0$,
where $0<c_0<1/2$ is a constant, and
\betaegin{equation}\label{gfjguyfygbjhhjgjgghfffgfg}
\betaegin{cases}
\lim_{\varepsilon\thetao 0^+}u_\varepsilon=v\quad\thetaext{in}\quad W^{1,p}(I_{v_\ec
\nu},\varphiield{R}^k)\,,\\
\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon\,v_\ec P\cdot\{\nabla^2 u_\varepsilon\}\betaig)=0\quad
\thetaext{in}\quad L^p(I_{v_\ec \nu},\varphiield{R}^{d})\,,
\\
\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-u_\varepsilon)/\varepsilon\betaig\}=0\quad \thetaext{in}\quad
L^{p}(I_{v_\ec \nu},\varphiield{R}^k)\,.
\varepsilonnd{cases}
\varepsilonnd{equation}
Next for every $t>0$ define
\betaegin{equation}\label{L2009deftvu1}
\psi^{(0)}_{\varepsilon,t}(x):=\varphi_\varepsilon(x)\quad\varphiorall
x\in\varphiield{R}^N\,,\quad\thetaext{and}\quad
u^{(0)}_{\varepsilon,t}(x):=v_\varepsilon(x)\quad\varphiorall x\in\varphiield{R}^N\,,
\varepsilonnd{equation}
and by induction
\betaegin{multline}\label{L2009deftvu1hhhj}
\psi^{(j)}_{\varepsilon,t}(x):=\psi^{(j-1)}_{\varepsilon,t}(x)+\betaigg(l\betaig((x_j-t)/\varepsilon\betaig)+l\betaig(-(t+x_j)/\varepsilon\betaig)\betaigg)\Big(\psi_\varepsilon(x)-\psi^{(j-1)}_{\varepsilon,t}(x)\Big)\quad\varphiorall
j=1,\ldots,N,\;\varphiorall x\in\varphiield{R},\\
u^{(j)}_{\varepsilon,t}(x):=u^{(j-1)}_{\varepsilon,t}(x)+\betaigg(l\betaig((x_j-t)/\varepsilon\betaig)+l\betaig(-(t+x_j)/\varepsilon\betaig)\betaigg)\Big(u_\varepsilon(x)-u^{(j-1)}_{\varepsilon,t}(x)\Big)\quad\varphiorall
j=2,\ldots,N,\;\varphiorall x\in\varphiield{R} \,.
\varepsilonnd{multline}
Finally let
\betaegin{equation}\label{L2009deftvu1bjjhhjkh}
\psi_{\varepsilon,t}(x):=\psi^{(N)}_{\varepsilon,t}(x)\quad\varphiorall
x\in\varphiield{R}^N\,,\quad\quad u_{\varepsilon,t}(x):=u^{(N)}_{\varepsilon,t}(x)\quad\varphiorall
x\in\varphiield{R}^N\,.
\varepsilonnd{equation}
Then clearly $\psi_{\varepsilon,t}\in C^1(\varphiield{R}^N,\varphiield{R}^m)$, $u_{\varepsilon,t}\in
C^2(\varphiield{R}^N,\varphiield{R}^k)$ and for each $t>0$ we have
\betaegin{equation}\label{L2009eqgl1}
\betaegin{cases}
\psi_{\varepsilon,t}(x)=\varphi_\varepsilon(x)\;\;\thetaext{and}\;\; u_{\varepsilon,t}(x)=v_\varepsilon(x)\quad\thetaext{if for every}\;\; j\in\{1,2,\ldots,N\}\;\;\thetaext{we have}\;\; |x_j|<t\,,\\
\psi_{\varepsilon,t}(x)=\psi_\varepsilon(x)\;\;\thetaext{and}\;\;
u_{\varepsilon,t}(x)=u_\varepsilon(x)\quad\thetaext{if }|x_j|>t+(1-\deltaelta)\varepsilon\;\;\thetaext{for
some}\;\; j\in\{1,2,\ldots,N\}\,.
\varepsilonnd{cases}
\varepsilonnd{equation}
Next we will prove that
\betaegin{equation}\label{L2009small1}
\lim_{\varepsilon\thetao 0}\int_{0}^{1/2}\,\int_{\cup_{j=1}^{N}\{ x\in
I_{v_\ec\nu}:\,t<|x_j|<t+\varepsilon\}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 u_{\varepsilon,t}\},\,\varepsilon\,v_\ec
Q\cdot\{\nabla\psi_{\varepsilon,t}\},\,\nabla
u_{\varepsilon,t},\,\psi_{\varepsilon,t}\Big)\,dx\,dt=0\,.
\varepsilonnd{equation}
Indeed
\betaegin{multline}\label{L2009small1hjhjjhhjhhj}
\int_{0}^{1/2}\,\int_{\cup_{j=1}^{N}\{ x\in
I_{v_\ec\nu}:\,t<|x_j|<t+\varepsilon\}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 u_{\varepsilon,t}\},\,\varepsilon\,v_\ec
Q\cdot\{\nabla\psi_{\varepsilon,t}\},\,\nabla
u_{\varepsilon,t},\,\psi_{\varepsilon,t}\Big)\,dx\,dt\leq\\
\sigmaum\limits_{j=1}^{N}\int_{0}^{1/2}\,\int_{\{ x\in
I_{v_\ec\nu}:\,t<|x_j|<t+\varepsilon\}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 u_{\varepsilon,t}\},\,\varepsilon\,v_\ec
Q\cdot\{\nabla\psi_{\varepsilon,t}\},\,\nabla
u_{\varepsilon,t},\,\psi_{\varepsilon,t}\Big)\,dx\,dt=
\sigmaum\limits_{j=1}^{N}\varphirac{1}{\varepsilon}\int\limits_{0}^{1/2}\Bigg(\\
\int\limits_{\{x: x+tv_\ec e_j\in I_{v_\ec\nu},0<x_j<\varepsilon\}}
F\betaigg(\varepsilonv_\ec P\cdot\{\nabla^2 u_{\varepsilon,t}\}(x+tv_\ec e_j),\,\varepsilonv_\ec
Q\cdot\{\nabla\psi_{\varepsilon,t}\}(x+tv_\ec e_j),\,\nabla u_{\varepsilon,t}(x+tv_\ec
e_j),\,\psi_{\varepsilon,t}(x+tv_\ec e_j)\betaigg)dx+\\ \int\limits_{\{x: x-tv_\ec
e_j\in I_{v_\ec\nu},-\varepsilon<x_j<0\}} F\betaigg(\varepsilonv_\ec P\cdot\{\nabla^2
u_{\varepsilon,t}\}(x-tv_\ec e_j),\,\varepsilonv_\ec Q\cdot\{\nabla\psi_{\varepsilon,t}\}(x-tv_\ec
e_j),\,\nabla u_{\varepsilon,t}(x-tv_\ec e_j),\,\psi_{\varepsilon,t}(x-tv_\ec
e_j)\betaigg)dx\Bigg)dt\\=
\sigmaum\limits_{j=1}^{N}\varphirac{1}{\varepsilon}\Bigg(\int\limits_{0}^{\varepsilon}
\int\limits_{\{x:\, x+sv_\ec e_j\in I_{v_\ec\nu},\,x_j\in(0,1/2)\}}\thetaimes\\
\thetaimes F\betaigg(\varepsilon\,v_\ec P\cdot\{\nabla^2 u_{\varepsilon,x_j}\}(x+sv_\ec
e_j),\,\varepsilon\,v_\ec Q\cdot\{\nabla\psi_{\varepsilon,x_j}\}(x+sv_\ec e_j),\,\nabla
u_{\varepsilon,x_j}(x+sv_\ec
e_j),\,\psi_{\varepsilon,x_j}(x+sv_\ec e_j)\betaigg)\,dx\,ds+\\
\int\limits_{-\varepsilon}^{0}\int\limits_{\{x:\, x+sv_\ec e_j\in
I_{v_\ec\nu},\,x_j\in(-1/2,0)\}}\thetaimes\\ \thetaimes F\betaigg(\varepsilon\,v_\ec
P\cdot\{\nabla^2 u_{\varepsilon,(-x_j)}\}(x+sv_\ec e_j),\,\varepsilon\,v_\ec
Q\cdot\{\nabla\psi_{\varepsilon,(-x_j)}\}(x+sv_\ec e_j),\,\nabla
u_{\varepsilon,(-x_j)}(x+sv_\ec e_j),\,\psi_{\varepsilon,(-x_j)}(x+sv_\ec
e_j)\betaigg)\,dx\\ \Bigg)ds\,.
\varepsilonnd{multline}
So by \varepsilonr{L2009small1hjhjjhhjhhj},
\betaegin{multline}\label{L2009small1hjhjjhhjhhjjioiiou}
\int_{0}^{1/2}\,\int_{\cup_{j=1}^{N}\{ x\in
I_{v_\ec\nu}:\,t<|x_j|<t+\varepsilon\}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 u_{\varepsilon,t}\},\,\varepsilon\,v_\ec
Q\cdot\{\nabla\psi_{\varepsilon,t}\},\,\nabla
u_{\varepsilon,t},\,\psi_{\varepsilon,t}\Big)\,dx\,dt\leq\\
\sigmaum\limits_{j=1}^{N}\varphirac{1}{\varepsilon}\int\limits_{-\varepsilon}^{\varepsilon}
\int\limits_{\{x:\, x+sv_\ec e_j\in I_{v_\ec\nu}\}}\thetaimes\\
\thetaimes F\betaigg(\varepsilon\,v_\ec P\cdot\{\nabla^2 u_{\varepsilon,|x_j|}\}(x+sv_\ec
e_j),\,\varepsilon\,v_\ec Q\cdot\{\nabla\psi_{\varepsilon,|x_j|}\}(x+sv_\ec
e_j),\,\nabla u_{\varepsilon,|x_j|}(x+sv_\ec e_j),\,\psi_{\varepsilon,|x_j|}(x+sv_\ec
e_j)\betaigg)\,dx\,ds=
\\
\sigmaum\limits_{j=1}^{N}\varphirac{1}{\varepsilon}\int\limits_{-\varepsilon}^{\varepsilon}
\int\limits_{I_{v_\ec\nu}}F\betaigg(\varepsilon\,v_\ec P\cdot\{\nabla^2
u_{\varepsilon,|x_j-s|}\}(x),\,\varepsilon\,v_\ec
Q\cdot\{\nabla\psi_{\varepsilon,|x_j-s|}\}(x),\,\nabla
u_{\varepsilon,|x_j-s|}(x),\,\psi_{\varepsilon,|x_j-s|}(x)\betaigg)\,dx\,ds\,.
\varepsilonnd{multline}
Thus changing variables $s=\varepsilon\thetaau$ in
\varepsilonr{L2009small1hjhjjhhjhhjjioiiou} gives
\betaegin{multline}\label{L2009small1hjhjjhhjhhjjjkljkljkl}
\int_{0}^{1/2}\,\int_{\cup_{j=1}^{N}\{ x\in
I_{v_\ec\nu}:\,t<|x_j|<t+\varepsilon\}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 u_{\varepsilon,t}\},\,\varepsilon\,v_\ec
Q\cdot\{\nabla\psi_{\varepsilon,t}\},\,\nabla
u_{\varepsilon,t},\,\psi_{\varepsilon,t}\Big)\,dx\,dt\leq\\
\sigmaum\limits_{j=1}^{N}\int\limits_{-1}^{1}
\int\limits_{I_{v_\ec\nu}}F\betaigg(\varepsilon\,v_\ec P\cdot\{\nabla^2
u_{\varepsilon,|x_j-\varepsilon s|}\}(x),\,\varepsilon\,v_\ec Q\cdot\{\nabla\psi_{\varepsilon,|x_j-\varepsilon
s|}\}(x),\,\nabla u_{\varepsilon,|x_j-\varepsilon s|}(x),\,\psi_{\varepsilon,|x_j-\varepsilon
s|}(x)\betaigg)\,dx\,ds\,.
\varepsilonnd{multline}
Next, clearly by \varepsilonr{L2009deftvu1hhhj} there exists a constant
$C_0>0$ such that for every $r,j\in\{1,2,\ldots N\}$ every
$s\in(-1,1)$ and every $\varepsilon\in(0,1)$ we have
\betaegin{multline}\label{ffyfyguihihiuiolkkkkjjjkjkjk}
\int_{I_{v_\ec\nu}}\Bigg(\Big|\varepsilon\,v_\ec Q\cdot\{\nabla
\psi^{(r)}_{\varepsilon,|x_j-\varepsilon s|}(x)\}\Big|^p+\Big|\varepsilon\,v_\ec
P\cdot\{\nabla^2 u^{(r)}_{\varepsilon,|x_j-\varepsilon s|}(x)\}\Big|^p+\\
\Big|\psi^{(r)}_{\varepsilon,|x_j-\varepsilon s|}(x)-\varphi(x)\Big|^p+\Big|\nabla
u^{(r)}_{\varepsilon,|x_j-\varepsilon s|}(x)-\nabla
v(x)\Big|^p+\betaigg|\varphirac{1}{\varepsilon}\Big(u^{(r)}_{\varepsilon,|x_j-\varepsilon
s|}(x)-u_\varepsilon(x)\Big)\betaigg|^p\Bigg)dx\\ \leq
C_0\int_{I_{v_\ec\nu}}\Bigg(\Big|\varepsilon\,v_\ec Q\cdot\{\nabla
\psi^{(r-1)}_{\varepsilon,|x_j-\varepsilon s|}(x)\}\Big|^p+\Big|\varepsilon\,v_\ec
P\cdot\{\nabla^2 u^{(r-1)}_{\varepsilon,|x_j-\varepsilon s|}(x)\}\Big|^p+\\
\Big|\psi^{(r-1)}_{\varepsilon,|x_j-\varepsilon s|}(x)-\varphi(x)\Big|^p+\Big|\nabla
u^{(r-1)}_{\varepsilon,|x_j-\varepsilon s|}(x)-\nabla v(x)\Big|^p+
\betaigg|\varphirac{1}{\varepsilon}\Big(u^{(r-1)}_{\varepsilon,|x_j-\varepsilon
s|}(x)-u_\varepsilon(x)\Big)\betaigg|^p+\\ \Big|\varepsilon\,v_\ec Q\cdot\{\nabla
\psi_\varepsilon(x)\}\Big|^p+\Big|\varepsilon\,v_\ec P\cdot\{\nabla^2 u_\varepsilon(x)\}\Big|^p+
\Big|\psi_\varepsilon(x)-\varphi(x)\Big|^p+\Big|\nabla u_\varepsilon(x)-\nabla
v(x)\Big|^p+\betaigg|\varphirac{1}{\varepsilon}\Big(u_\varepsilon(x)-u_\varepsilon(x)\Big)\betaigg|^p\Bigg)dx\,.
\varepsilonnd{multline}
Therefore, since we have \varepsilonr{ffdtdfuiuuyhuiuuyhjh} and
\varepsilonr{gfjguyfygbjhhjgjgghfffgfg},
by plugging \varepsilonr{L2009deftvu1} and \varepsilonr{L2009deftvu1bjjhhjkh} into
\varepsilonr{ffyfyguihihiuiolkkkkjjjkjkjk} we deduce
\betaegin{multline}\label{ffyfyguihihiuiolkkk}
\lim_{\varepsilon\thetao 0^+}\psi_{\varepsilon,|x_j-\varepsilon
s|}(x)=\varphi(x)\;\;\thetaext{in}\;\;L^{p}(I_{v_\ec \nu},\varphiield{R}^m)\,,\\
\lim_{\varepsilon\thetao 0^+}u_{\varepsilon,|x_j-\varepsilon s|}(x)=v(x)\;\;\thetaext{in}\;\;
W^{1,p}(I_{v_\ec \nu},\varphiield{R}^k)\,,\\ \lim_{\varepsilon\thetao 0^+}\Big(\varepsilon\,v_\ec
P\cdot\betaig\{\nabla^2 u_{\varepsilon,|x_j-\varepsilon s|}(x)\betaig\}\Big)=0\;\;
\thetaext{in}\;\;L^p(I_{v_\ec \nu},\varphiield{R}^{d})\,,\\ \lim_{\varepsilon\thetao
0^+}\Big(\varepsilon\,v_\ec Q\cdot\betaig\{\nabla \psi_{\varepsilon,|x_j-\varepsilon
s|}(x)\betaig\}\Big)=0\;\;\thetaext{in}\;\;L^p(I_{v_\ec
\nu},\varphiield{R}^{q})\;\;\thetaext{and}\\ \lim_{\varepsilon\thetao
0^+}\Big\{\betaig(u_{\varepsilon,|x_j-\varepsilon
s|}(x)-u_\varepsilon(x)(x)\betaig)/\varepsilon\Big\}=0\;\;\thetaext{in}\;\;L^{p}(I_{v_\ec
\nu},\varphiield{R}^k)\\ \thetaext{uniformly by}\;s\in(-1,1)\quad\varphiorall
j=1,2,\ldots N\,.
\varepsilonnd{multline}
On the other hand by \varepsilonr{ghgghjhjkdfhg}
\betaegin{equation}\label{ghgghjhjkdfhghjkhjhj}
\varphi(x):=\betaegin{cases}
\varphi^+\quad\thetaext{if}\;\;x\cdotv_\ec\nu>0\,,\\
\varphi^-\quad\thetaext{if}\;\;x\cdotv_\ec\nu<0\,,
\varepsilonnd{cases}\quad\quad
\nabla v(x):=\betaegin{cases}
V^+\quad\thetaext{if}\;\;x\cdotv_\ec\nu>0\,,\\
V^-\quad\quad\quad\thetaext{if}\;\;x\cdotv_\ec\nu<0\,.
\varepsilonnd{cases}
\varepsilonnd{equation}
Therefore, since $F(0,0,V^+,\varphi^+)=F(0,0,V^-,\varphi^-)=0$ we
have $F\betaig(0,0,\nabla v(x),\varphi(x)\betaig)=0$ for a.e. $x\in
I_{v_\ec \nu}$. Thus, since $0\leq F(a,b,c,d)\leq
C\betaig(|a|^{p}+|b|^{p}+|c|^p+|d|^p+1\betaig)$ for every $(a,b,c,d)$, by
\varepsilonr{ffyfyguihihiuiolkkk}, for every $j=1,2,\ldots, N$ we deduce
\betaegin{multline}\label{L2009small1hjhjjhhjhhjjjkljkljklhjhjhihj}
\lim_{\varepsilon\thetao 0^+}\int\limits_{-1}^{1}
\int\limits_{I_{v_\ec\nu}}F\betaigg(\varepsilon\,v_\ec P\cdot\{\nabla^2
u_{\varepsilon,|x_j-\varepsilon s|}\}(x),\,\varepsilon\,v_\ec Q\cdot\{\nabla\psi_{\varepsilon,|x_j-\varepsilon
s|}\}(x),\,\nabla u_{\varepsilon,|x_j-\varepsilon s|}(x),\,\psi_{\varepsilon,|x_j-\varepsilon
s|}(x)\betaigg)\,dx\,ds=0\,.
\varepsilonnd{multline}
Then plugging \varepsilonr{L2009small1hjhjjhhjhhjjjkljkljklhjhjhihj} into
\varepsilonr{L2009small1hjhjjhhjhhjjjkljkljkl} we deduce \varepsilonr{L2009small1}.
Next let $\varepsilon_n\deltaownarrow 0$ be such that
\betaegin{multline}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbbihhbbm}
\lim_{n\thetao +\infty}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon_n}
F\Big(\varepsilon_n\,v_\ec P\cdot\{\nabla^2 v_{\varepsilon_n}\},\,\varepsilon_n\,v_\ec
Q\cdot\{\nabla \varphi_{\varepsilon_n}\},\,\nabla
v_{\varepsilon_n},\,\varphi_{\varepsilon_n}\Big)\,dx=\\ \varliminf_{\varepsilon\thetao
0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec P\cdot\{\nabla^2
v_{\varepsilon}\},\,\varepsilon\,v_\ec Q\cdot\{\nabla \varphi_{\varepsilon}\},\,\nabla
v_{\varepsilon},\,\varphi_{\varepsilon}\Big)\,dx\,.
\varepsilonnd{multline}
Then, by \varepsilonr{L2009small1}, we can pass to a subsequence, still
denoted by $\varepsilon_n\deltaownarrow 0$, so that for a.e. $t\in(0,1/2)$ we
will have
\betaegin{equation}\label{L2009small1hjkhhjhj}
\lim_{n\thetao+\infty}\int_{\cup_{j=1}^{N}\{ x\in
I_{v_\ec\nu}:\,t<|x_j|<t+\varepsilon_n\}}\varphirac{1}{\varepsilon_n} F\Big(\varepsilon_n\,v_\ec
P\cdot\{\nabla^2 u_{\varepsilon_n,t}\},\,\varepsilon_n\,v_\ec
Q\cdot\{\nabla\psi_{\varepsilon_n,t}\},\,\nabla
u_{\varepsilon_n,t},\,\psi_{\varepsilon_n,t}\Big)\,dx=0\,.
\varepsilonnd{equation}
Therefore, by \varepsilonr{L2009eqgl1} and \varepsilonr{L2009small1hjkhhjhj}, for a.e.
$t\in(0,1/2)$ we have
\betaegin{multline}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukj}
\lim_{n\thetao +\infty}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon_n}
F\Big(\varepsilon_n\,v_\ec P\cdot\{\nabla^2 v_{\varepsilon_n}\},\,\varepsilon_n\,v_\ec
Q\cdot\{\nabla \varphi_{\varepsilon_n}\},\,\nabla
v_{\varepsilon_n},\,\varphi_{\varepsilon_n}\Big)\,dx\gammaeq\\ \varlimsup_{n\thetao
+\infty}\int_{\{ x\in I_{v_\ec\nu}:\,|x_j|<t\,\varphiorall j=1,\ldots
N\}}\varphirac{1}{\varepsilon_n} F\Big(\varepsilon_n\,v_\ec P\cdot\{\nabla^2
v_{\varepsilon_n}\},\,\varepsilon_n\,v_\ec Q\cdot\{\nabla \varphi_{\varepsilon_n}\},\,\nabla
v_{\varepsilon_n},\,\varphi_{\varepsilon_n}\Big)\,dx=\\ \varlimsup_{n\thetao +\infty}\int_{\{
x\in I_{v_\ec\nu}:\,|x_j|<t\,\varphiorall j=1,\ldots N\}}\varphirac{1}{\varepsilon_n}
F\Big(\varepsilon_n\,v_\ec P\cdot\{\nabla^2 u_{\varepsilon_n,t}\},\,\varepsilon_n\,v_\ec
Q\cdot\{\nabla\psi_{\varepsilon_n,t}\},\,\nabla
u_{\varepsilon_n,t},\,\psi_{\varepsilon_n,t}\Big)\,dx=\\
\varlimsup_{n\thetao +\infty}\int_{\{ x\in
I_{v_\ec\nu}:\,|x_j|<t+\varepsilon_n\,\varphiorall j=1,\ldots N\}}\varphirac{1}{\varepsilon_n}
F\Big(\varepsilon_n\,v_\ec P\cdot\{\nabla^2 u_{\varepsilon_n,t}\},\,\varepsilon_n\,v_\ec
Q\cdot\{\nabla\psi_{\varepsilon_n,t}\},\,\nabla
u_{\varepsilon_n,t},\,\psi_{\varepsilon_n,t}\Big)\,dx\,.
\varepsilonnd{multline}
In particular, by \varepsilonr{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbbihhbbm} and
\varepsilonr{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukj} for a.e.
$t\in(0,1/2)$ we have
\betaegin{multline}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjhhhjhjj}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 v_{\varepsilon}\},\,\varepsilon\,v_\ec Q\cdot\{\nabla
\varphi_{\varepsilon}\},\,\nabla
v_{\varepsilon},\,\varphi_{\varepsilon}\Big)\,dx\gammaeq\\
\varlimsup_{n\thetao +\infty}\int_{\{ x\in
I_{v_\ec\nu}:\,|x_j|<t+\varepsilon_n\,\varphiorall j=1,\ldots N\}}\varphirac{1}{\varepsilon_n}
F\Big(\varepsilon_n\,v_\ec P\cdot\{\nabla^2 u_{\varepsilon_n,t}\},\,\varepsilon_n\,v_\ec
Q\cdot\{\nabla\psi_{\varepsilon_n,t}\},\,\nabla
u_{\varepsilon_n,t},\,\psi_{\varepsilon_n,t}\Big)\,dx\,.
\varepsilonnd{multline}
Next set
\betaegin{equation}\label{cfgfgguhhjhujhgh}
\varphi_{\varepsilon,t}(y):=\psi_{\varepsilon,t}\Big(2(t+\varepsilon)y\Big)\in
C^1(\varphiield{R}^N,\varphiield{R}^m)\,,\quad\quad v_{\varepsilon,t}(y):=\varphirac{1}{2(t+\varepsilon)}\cdot
u_{\varepsilon,t}\Big(2(t+\varepsilon)y\Big)\in C^2(\varphiield{R}^N,\varphiield{R}^k)\,.
\varepsilonnd{equation}
Then changing variables $y:=x/(2t+2\varepsilon_n)$ in the integral in r.h.s.
of \varepsilonr{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjhhhjhjj} gives,
that for a.e. $t\in(0,1/2)$ we have
\betaegin{multline}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjhhhjhjjhjhjgjghg}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 v_{\varepsilon}\},\,\varepsilon\,v_\ec Q\cdot\{\nabla
\varphi_{\varepsilon}\},\,\nabla
v_{\varepsilon},\,\varphi_{\varepsilon}\Big)\,dx\gammaeq\\
\varlimsup_{n\thetao
+\infty}\Bigg\{\betaig(2(t+\varepsilon_n)\betaig)^{N-1}\int\limits_{I_{v_\ec
\nu}}\varphirac{2(t+\varepsilon_n)}{\varepsilon_n} F\betaigg(\varphirac{\varepsilon_n}{2(t+\varepsilon_n)}v_\ec
P\cdot\{\nabla^2 v_{\varepsilon_n,t}\},\varphirac{\varepsilon_n}{2(t+\varepsilon_n)}v_\ec
Q\cdot\{\nabla\varphi_{\varepsilon_n,t}\},\nabla
v_{\varepsilon_n,t},\varphi_{\varepsilon_n,t}\betaigg)dy\Bigg\}
\\=\betaig(2t\betaig)^{N-1}\cdot\varlimsup_{n\thetao +\infty}\int\limits_{I_{v_\ec \nu}}\varphirac{2(t+\varepsilon_n)}{\varepsilon_n}
F\Bigg(\varphirac{\varepsilon_n}{2(t+\varepsilon_n)}\,v_\ec P\cdot\{\nabla^2
v_{\varepsilon_n,t}\},\,\varphirac{\varepsilon_n}{2(t+\varepsilon_n)}\,v_\ec
Q\cdot\{\nabla\varphi_{\varepsilon_n,t}\},\,\nabla
v_{\varepsilon_n,t},\,\varphi_{\varepsilon_n,t}\Bigg)\,dy\,.
\varepsilonnd{multline}
On the other hand, by \varepsilonr{L2009eqgl1}
\betaegin{equation}\label{L2009eqgl1gghgh}
\varphi_{\varepsilon,t}(y)=\psi_\varepsilon\Big(2(t+\varepsilon)y\Big)\;\;\thetaext{and}\;\; \nabla
v_{\varepsilon,t}(y)=\nabla u_\varepsilon\Big(2(t+\varepsilon)y\Big)\quad\thetaext{if
}|y_j|>\varphirac{t+\varepsilon(1-\deltaelta)}{2(t+\varepsilon)}\;\;\thetaext{for some}\;\; 1\leq
j\leq N\,,
\varepsilonnd{equation}
Therefore, since $\nabla u_\varepsilon(x)\varepsilonquiv h_\varepsilon(v_\ec\nu\cdot x)$ for
some function $h_\varepsilon$ and $\nabla u_\varepsilon(x)=\nabla v(x)$ if
$|v_\ec\nu\cdot x|>c_0$, where $0<c_0<1/2$ is a constant, clearly
there exist functions $\thetailde\varphi_{\varepsilon,t}(y)\in
\mathcal{S}_2\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig)$ and $\thetailde
v_{\varepsilon,t}(y)\in
\mathcal{S}_2\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig)$ such that
$\thetailde\varphi_{\varepsilon,t}(y)=\varphi_{\varepsilon,t}(y)$ for every $y\in
I_{v_\ec\nu}$ and $\thetailde v_{\varepsilon,t}(y)=v_{\varepsilon,t}(y)$ for every $y\in
I_{v_\ec\nu}$. Thus by \varepsilonr{L2009hhffff12} and
\varepsilonr{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjhhhjhjjhjhjgjghg}
for a.e. $t\in(0,1/2)$ we have
\betaegin{multline}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjhhhjhjjhjhjgjghghgjhjkjljk}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 v_{\varepsilon}\},\,\varepsilon\,v_\ec Q\cdot\{\nabla
\varphi_{\varepsilon}\},\,\nabla v_{\varepsilon},\,\varphi_{\varepsilon}\Big)\,dx\gammaeq
\\ \betaig(2t\betaig)^{N-1}\cdot\varlimsup_{n\thetao +\infty}\int\limits_{I_{v_\ec \nu}}\varphirac{2(t+\varepsilon_n)}{\varepsilon_n}
F\Bigg(\varphirac{\varepsilon_n}{2(t+\varepsilon_n)}\,v_\ec P\cdot\{\nabla^2 \thetailde
v_{\varepsilon_n,t}\},\,\varphirac{\varepsilon_n}{2(t+\varepsilon_n)}\,v_\ec
Q\cdot\{\nabla\thetailde\varphi_{\varepsilon_n,t}\},\,\nabla \thetailde
v_{\varepsilon_n,t},\,\thetailde\varphi_{\varepsilon_n,t}\Bigg)\,dy\\ \gammaeq
\betaig(2t\betaig)^{N-1}\cdot
E_{per}\Big(V^+,\varphi^+,V^-,\varphi^-,v_\ec\nu\Big)\,.
\varepsilonnd{multline}
Thus, since
\varepsilonr{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjhhhjhjjhjhjgjghghgjhjkjljk}
is valid for a.e. $t\in(0,1/2)$, there exists a sequence
$t_n\uparrow (1/2)$ such that
\betaegin{multline}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjhhhjhjjhjhjgjghghgjhjkjljkbmbvvmv}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 v_{\varepsilon}\},\,\varepsilon\,v_\ec Q\cdot\{\nabla
\varphi_{\varepsilon}\},\,\nabla v_{\varepsilon},\,\varphi_{\varepsilon}\Big)\,dx\gammaeq
\betaig(2t_n\betaig)^{N-1}\cdot
E_{per}\Big(V^+,\varphi^+,V^-,\varphi^-,v_\ec\nu\Big)\,.
\varepsilonnd{multline}
Then letting $n\thetao+\infty$ in
\varepsilonr{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjhhhjhjjhjhjgjghghgjhjkjljkbmbvvmv}
we deduce \varepsilonr{ggfghjjhfhfjfhj}.
Finally consider the case in which we have only $L^p_{loc}$ instead of complete $L^p$ convergence.
I.e. $\,\lim_{\varepsilon\thetao 0^+}\varphi_\varepsilon=\varphi$ in $L^{p}_{loc}(I_{v_\ec
\nu},\varphiield{R}^m)$, $\lim_{\varepsilon\thetao 0^+}v_\varepsilon=v$ in $W^{1,p}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$, $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon\,v_\ec P\cdot\{\nabla^2
v_\varepsilon\}\betaig)=0$ in $L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{d})$, $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon\,v_\ec Q\cdot\{\nabla \varphi_\varepsilon\}\betaig)=0$ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{q})$ and
\betaegin{itemize}
\item either
$\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-v)/\varepsilon\betaig\}=0$ in $L^{p}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$,
\item or there exists a family
$\betaig\{u_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset C^2(\varphiield{R}^N,\varphiield{R}^k)$, such that
$\nabla u_\varepsilon(x)\varepsilonquiv h_\varepsilon(v_\ec\nu\cdot x)$ for some function
$h_\varepsilon$, $\nabla u_\varepsilon(x)=\nabla v(x)$ if $|v_\ec\nu\cdot x|>c_0$,
where $0<c_0<1/2$ is a constant, and $\lim_{\varepsilon\thetao 0^+}u_\varepsilon=v$ in
$W^{1,p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$, $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon\,v_\ec
P\cdot\{\nabla^2 u_\varepsilon\}\betaig)=0$ in $L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{d})$
and $\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-u_\varepsilon)/\varepsilon\betaig\}=0$ in
$L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$
\varepsilonnd{itemize}
Then we fix $t\in(c_0,1/2)$, and since
\betaegin{multline}\label{ggfghjjhfhfjfhjjkhhjhjhjjhhhknhkk}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 v_{\varepsilon}\},\,\varepsilon\,v_\ec Q\cdot\{\nabla
\varphi_{\varepsilon}\},\,\nabla v_{\varepsilon},\,\varphi_{\varepsilon}\Big)\,dx\gammaeq\\
\varliminf_{\varepsilon\thetao 0^+}\int_{\{x\in\varphiield{R}^N :\,|x_j|<t\,\varphiorall j=1,\ldots
N\}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec P\cdot\{\nabla^2 v_{\varepsilon}\},\,\varepsilon\,v_\ec
Q\cdot\{\nabla \varphi_{\varepsilon}\},\,\nabla
v_{\varepsilon},\,\varphi_{\varepsilon}\Big)\,dx\,,
\varepsilonnd{multline}
in the similar way, as we did before in \varepsilonr{cfgfgguhhjhujhgh} and
\varepsilonr{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjhhhjhjjhjhjgjghg},
changing variables $y:=x/(2t)$ in the integral in r.h.s. of
\varepsilonr{ggfghjjhfhfjfhjjkhhjhjhjjhhhknhkk} and using
\varepsilonr{ggfghjjhfhfjfhj} (which we have already proved in the case of
complete $L^p$ convergence) gives,
\betaegin{multline}\label{ggfghjjhfhfjfhjjkhhjhjhjjh}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon} F\Big(\varepsilon\,v_\ec
P\cdot\{\nabla^2 v_{\varepsilon}\},\,\varepsilon\,v_\ec Q\cdot\{\nabla
\varphi_{\varepsilon}\},\,\nabla v_{\varepsilon},\,\varphi_{\varepsilon}\Big)\,dx\gammaeq
(2t)^{N-1} E_{per}\Big(V^+,\varphi^+,V^-,\varphi^-,v_\ec\nu\Big)\,.
\varepsilonnd{multline}
Therefore, since $t\in(0,1/2)$ passing to the limit as
$t\uparrow(1/2)$ in \varepsilonr{ggfghjjhfhfjfhjjkhhjhjhjjh} gives the
desired result.
\betaegin{comment*}
Set $\betaar u_{\varepsilon,t}(y):=u_{\varepsilon,t}(y+tv_\ec\nu)=u_{\varepsilon,t}(y_1+t,y')$,
where $y'\in\varphiield{R}^{N-1}$ and $(y_1,y')=y$. First of all it is useful to
denote that
\betaegin{equation}\label{L2009deftvu2}
\betaar
u_{\varepsilon,t}(y):=l(y_1/\varepsilon)\,v_\varepsilon(y_1+t,y')+\betaig(1-l(y_1/\varepsilon)\betaig)\,u_\varepsilon(y_1+t,y')\,.
\varepsilonnd{equation}
Since $0\leq F(a,b,c,d)-F(0,b,c,d)\leq C|a|^{p_1}$ and
$F(0,b,c,d)\leq C (|b|^{p_2}+|c|^{p_2}+1)$ for every $(a,b,c,d)$,
and since $F\betaig(0,\nabla v,v,f\betaig)=0$ on $\Omega$, there exists $\betaar
C>0$ such that
\betaegin{multline*}
\int_{\{ x\in\Omega:\,t<x_1<t+\varepsilon\}}\varphirac{1}{\varepsilon} F\betaig(\varepsilon\nabla^2
u_{\varepsilon,t},\,\nabla u_{\varepsilon,t},\,u_{\varepsilon,t},\,f\betaig)\,dx\leq\\ \varphirac{\betaar
C}{\varepsilon}\int_{\{ x\in\varphiield{R}^N:\,t<x_1<t+\varepsilon\}}\Big(|\varepsilon\nabla^2
u_{\varepsilon,t}|^{p_1}+|\nabla u_{\varepsilon,t}-\nabla
v|^{p_2}\\+|u_{\varepsilon,t}-v|^{p_2}+|\nabla u_{\varepsilon,t}-\nabla
v|+|u_{\varepsilon,t}-v|\Big)\,dx\\=\varphirac{\betaar C}{\varepsilon}\int_{\{
y\in\varphiield{R}^N:\,0<y_1<\varepsilon\}}\Big(\betaig|\varepsilon\nabla^2 \betaar
u_{\varepsilon,t}(y)\betaig|^{p_1}+\betaig|\nabla \betaar u_{\varepsilon,t}(y)-\nabla
v(y_1+t,y')\betaig|^{p_2}+\\ \betaig|\betaar
u_{\varepsilon,t}(y)-v(y_1+t,y')\betaig|^{p_2}+\betaig|\nabla \betaar
u_{\varepsilon,t}(y)-\nabla v(y_1+t,y')\betaig|+\betaig|\betaar
u_{\varepsilon,t}(y)-v(y_1+t,y')\betaig|\Big)\,dy \,.
\varepsilonnd{multline*}
Therefore,
\betaegin{multline}\label{L2009gjjhjh1}
\int_\varphiield{R}\int_{\{ x\in\Omega:\,t<x_1<t+\varepsilon\}}\varphirac{1}{\varepsilon} F\betaig(\varepsilon\nabla^2
u_{\varepsilon,t},\,\nabla u_{\varepsilon,t},\,u_{\varepsilon,t},\,f\betaig)\,dxdt\leq\\
\varphirac{\betaar C}{\varepsilon}\int_\varphiield{R}\int_{\{
y\in\varphiield{R}^N:\,0<y_1<\varepsilon\}}\Big(\betaig|\varepsilon\nabla^2 \betaar
u_{\varepsilon,t}(y)\betaig|^{p_1}+\betaig|\nabla \betaar u_{\varepsilon,t}(y)-\nabla
v(y_1+t,y')\betaig|^{p_2}+\\\betaig|\betaar
u_{\varepsilon,t}(y)-v(y_1+t,y')\betaig|^{p_2}+\betaig|\nabla \betaar
u_{\varepsilon,t}(y)-\nabla v(y_1+t,y')\betaig|+\betaig|\betaar
u_{\varepsilon,t}(y)-v(y_1+t,y')\betaig|\Big)\,dydt\\=\varphirac{\betaar
C}{\varepsilon}\int_\varphiield{R}\int_0^\varepsilon\int_{\varphiield{R}^{N-1}}\Big(\betaig|\varepsilon\nabla^2 \betaar
u_{\varepsilon,t}(s,y')\betaig|^{p_1}+\betaig|\nabla \betaar u_{\varepsilon,t}(s,y')-\nabla
v(s+t,y')\betaig|^{p_2}+\\\betaig|\betaar
u_{\varepsilon,t}(s,y')-v(s+t,y')\betaig|^{p_2}+\betaig|\nabla \betaar
u_{\varepsilon,t}(s,y')-\nabla v(s+t,y')\betaig|+\betaig|\betaar
u_{\varepsilon,t}(s,y')-v(s+t,y')\betaig|\Big)\,dy'dsdt\,.
\varepsilonnd{multline}
However, by \varepsilonr{L2009deftvu2} we have
\betaegin{equation*}
\betaar
u_{\varepsilon,t}(s,y')=l(s/\varepsilon)\,v_\varepsilon(s+t,y')+\betaig(1-l(s/\varepsilon)\betaig)\,u_\varepsilon(s+t,y')\,.
\varepsilonnd{equation*}
Therefore, by \varepsilonr{L2009gjjhjh1} we obtain
\betaegin{multline}\label{L2009gjjhjh2}
\int_\varphiield{R}\int_{\{ x\in\Omega:\,t<x_1<t+\varepsilon\}}\varphirac{1}{\varepsilon} F\betaig(\varepsilon\nabla^2
u_{\varepsilon,t},\,\nabla u_{\varepsilon,t},\,u_{\varepsilon,t},\,f\betaig)\,dxdt\leq\\
\varphirac{C_1}{\varepsilon}\int_0^\varepsilon\int_{\varphiield{R}^{N}}\Big(\betaig|\varepsilon\nabla^2
u_{\varepsilon}(y_1+s,y')\betaig|^{p_1}+ \betaig|\nabla u_{\varepsilon}(y_1+s,y')-\nabla
v(s+y_1,y')\betaig|^{p_2}+\\ \betaig|\betaig(
u_{\varepsilon}(y_1+s,y')-v(s+y_1,y')\betaig)/\varepsilon\betaig|^{p_2}+\betaig|\nabla
u_{\varepsilon}(y_1+s,y')-\nabla v(s+y_1,y')\betaig|+\\
\betaig|\betaig(u_{\varepsilon}(y_1+s,y')-v(s+y_1,y')\betaig)/\varepsilon\betaig|\Big)\,dyds+
\varphirac{C_1}{\varepsilon}\int_0^\varepsilon\int_{\varphiield{R}^{N}}\Big(\betaig|\varepsilon\nabla^2
v_{\varepsilon}(y_1+s,y')\betaig|^{p_1}+\\ \betaig|\nabla v_{\varepsilon}(y_1+s,y')-\nabla
v(s+y_1,y')\betaig|^{p_2}+ \betaig|\betaig(
v_{\varepsilon}(y_1+s,y')-v(s+y_1,y')\betaig)/\varepsilon\betaig|^{p_2}+\\ \betaig|\nabla
v_{\varepsilon}(y_1+s,y')-\nabla
v(s+y_1,y')\betaig|+\betaig|\betaig(v_{\varepsilon}(y_1+s,y')-v(s+y_1,y')\betaig)/\varepsilon\betaig|\Big)\,dyds\\=
C_1\int_{\varphiield{R}^{N}}\Big(\betaig|\varepsilon\nabla^2 u_{\varepsilon}(y)\betaig|^{p_1}+
\betaig|\nabla u_{\varepsilon}(y)-\nabla v(y)\betaig|^{p_2}+\betaig|\betaig(
u_{\varepsilon}(y)-v(y)\betaig)/\varepsilon\betaig|^{p_2}+\betaig|\nabla
u_{\varepsilon}(y)-\nabla v(y)\betaig|+\\
\betaig|\betaig(u_{\varepsilon}(y)-v(y)\betaig)/\varepsilon\betaig|\Big)\,dy+
C_1\int_{\varphiield{R}^{N}}\Big(\betaig|\varepsilon\nabla^2 v_{\varepsilon}(y)\betaig|^{p_1}+
\betaig|\nabla v_{\varepsilon}(y)-\nabla v(y)\betaig|^{p_2}+
\betaig|\betaig( v_{\varepsilon}(y)-v(y)\betaig)/\varepsilon\betaig|^{p_2}+\\
\betaig|\nabla v_{\varepsilon}(y)-\nabla
v(y)\betaig|+\betaig|\betaig(v_{\varepsilon}(y)-v(y)\betaig)/\varepsilon\betaig|\Big)\,dy\thetao
0\quad\thetaext{as }\varepsilon\thetao 0\,.
\varepsilonnd{multline}
The last equality here we deduce by definition of the set $B(v,p)$
(Definition ???). So we get \varepsilonr{L2009small1}.
Next let $\{\varepsilon'_n\}$ be a subsequence of
$\{\varepsilon_n\}$. Then by \varepsilonr{L2009small1} we deduce that there exists a
further subsequence of $\{\varepsilon'_n\}$, which we denote by $\{\varepsilon''_n\}$,
such that
\betaegin{equation}\label{L2009small2}
\lim_{n\thetao \infty}\int_{\{
x\in\Omega:\,t<x_1<t+\varepsilon''_n\}}\varphirac{1}{\varepsilon''_n} F\betaig(\varepsilon''_n\nabla^2
u_{\varepsilon''_n,t},\,\nabla
u_{\varepsilon''_n,t},\,u_{\varepsilon''_n,t},\,f\betaig)\,dx=0\quad\thetaext{for a.e.
}t\in\varphiield{R}\,.
\varepsilonnd{equation}
Then for such $t$, using \varepsilonr{L2009hhffff12} and \varepsilonr{L2009eqgl1}, we
get
\betaegin{multline}\label{L2009hhffff323}
\betaar\mu(\varphiield{R}^N)=\lim_{n\thetao +\infty}\int_\Omegamega\varphirac{1}{\varepsilon_n}
F\betaig(\varepsilon_n\nabla^2 v_{\varepsilon_n},\,\nabla
v_{\varepsilon_n},\,v_{\varepsilon_n},\,f\betaig)\,dx\leq\\ \varliminf_{n\thetao
+\infty}\int_\Omegamega\varphirac{1}{\varepsilon''_n} F\betaig({\varepsilon''_n}\nabla^2
u_{\varepsilon''_n,t},\,\nabla u_{\varepsilon''_n,t},\,u_{\varepsilon''_n,t},\,f\betaig)\,dx\leq\\
\varlimsup_{n\thetao +\infty}\int_{\Omegamega_{t,v_\ec\nu}}\varphirac{1}{\varepsilon''_n}
F\betaig({\varepsilon''_n}\nabla^2 u_{\varepsilon''_n},\,\nabla
u_{\varepsilon''_n},\,u_{\varepsilon''_n},\,f\betaig)\,dx\\+\varlimsup_{n\thetao
+\infty}\int_{\Omegamega\sigmaetminus\Omega_{t+\varepsilon''_n,v_\ec\nu}}\varphirac{1}{\varepsilon''_n}
F\betaig({\varepsilon''_n}\nabla^2 v_{\varepsilon''_n},\,\nabla
v_{\varepsilon''_n},\,v_{\varepsilon''_n},\,f\betaig)\,dx\\+\lim_{n\thetao \infty}\int_{\{
x\in\Omega:\,t<x_1<t+\varepsilon''_n\}}\varphirac{1}{\varepsilon''_n} F\betaig(\varepsilon''_n\nabla^2
u_{\varepsilon''_n,t},\,\nabla u_{\varepsilon''_n,t},\,u_{\varepsilon''_n,t},\,f\betaig)\,dx\leq
\\
\varlimsup_{n\thetao +\infty}\int_{\Omegamega_{t,v_\ec\nu}}\varphirac{1}{\varepsilon'_n}
F\betaig({\varepsilon'_n}\nabla^2 u_{\varepsilon'_n},\,\nabla
u_{\varepsilon'_n},\,u_{\varepsilon'_n},\,f\betaig)\,dx+\varlimsup_{n\thetao
+\infty}\int_{\Omegamega\sigmaetminus\Omega_{t,v_\ec\nu}}\varphirac{1}{\varepsilon''_n}
F\betaig({\varepsilon''_n}\nabla^2 v_{\varepsilon''_n},\,\nabla
v_{\varepsilon''_n},\,v_{\varepsilon''_n},\,f\betaig)\,dx\,.
\varepsilonnd{multline}
So
\betaegin{multline}\label{L2009hhffff3234}
\varliminf_{n\thetao +\infty}\int_{\Omegamega_{t,v_\ec\nu}}\varphirac{1}{\varepsilon_n}
F\betaig(\varepsilon_n\nabla^2 v_{\varepsilon_n},\,\nabla
v_{\varepsilon_n},\,v_{\varepsilon_n},\,f\betaig)\,dx\leq\varlimsup_{n\thetao
+\infty}\int_{\Omegamega_{t,v_\ec\nu}}\varphirac{1}{\varepsilon'_n}
F\betaig({\varepsilon'_n}\nabla^2 u_{\varepsilon'_n},\,\nabla
u_{\varepsilon'_n},\,u_{\varepsilon'_n},\,f\betaig)\,dx\,.
\varepsilonnd{multline}
However, for a.e. $t\in\varphiield{R}$ we have $\betaar\mu(\{x\in\varphiield{R}^N:\,
x\cdotv_\ec\nu=t\})=0$. Therefore, we get ???.
\varepsilonnd{comment*}
\varepsilonnd{proof}
By the same method we can prove the following more general result.
\betaegin{proposition}\label{L2009.02kkk}
Let $n\in\mathbb{N}$, $v_\ec P_j\in\mathcal{L}(\varphiield{R}^{k\thetaimes N\thetaimes
N^j},\varphiield{R}^{d_j})$ and $v_\ec Q_j\in\mathcal{L}(\varphiield{R}^{m\thetaimes
N^j},\varphiield{R}^{q_j})$ be linear operators for all $j=1,2,\ldots n$ and
let $F\in C^1\betaig(\{\varphiield{R}^{d_n}\thetaimes\varphiield{R}^{q_n}\}\thetaimes\ldots\thetaimes
\{\varphiield{R}^{d_{2}}\thetaimes\varphiield{R}^{q_{2}}\}\thetaimes
\{\varphiield{R}^{d_1}\thetaimes\varphiield{R}^{q_1}\}\thetaimes\varphiield{R}^{k\thetaimes N}\thetaimes\varphiield{R}^m\,,\varphiield{R}\betaig)$
be such that $F\gammaeq 0$ and there exist $C>0$ and $p\gammaeq 1$
satisfying
\betaegin{multline*}
0\leq F\Big(\{a_1,b_1\},\{a_2,b_2\},\ldots,\{a_n,b_n\},c,d\Big)\leq
C\betaigg(\sigmaum_{j=1}^{n}|a_j|^{p}+\sigmaum_{j=1}^{n}|b_j|^{p}+|c|^p+|d|^p+1\betaigg)\\
\thetaext{for
every}\;\;\Big(\{a_1,b_1\},\{a_2,b_2\},\ldots,\{a_n,b_n\},c,d\Big)
\,.
\varepsilonnd{multline*}
Furthermore let $v_\ec k\in\varphiield{R}^k$, $v_\ec\nu\in S^{N-1}$,
$\varphi^+,\varphi^-\in\varphiield{R}^m$ and $V^+,V^-\in \varphiield{R}^{k\thetaimes N}$ be such
that
$V^+-V^-=v_\ec k\omegatimesv_\ec\nu$ and
$F\betaig(0,0,\ldots,0,V^+,\varphi^+\betaig)=F\betaig(0,0,\ldots,0,V^-,\varphi^-\betaig)=0$.
Set $\varphi(x)\in L^\infty(\varphiield{R}^N,\varphiield{R}^m)$ and $v(x):Lip(\varphiield{R}^N,\varphiield{R}^k)$ by
\betaegin{equation}\label{ghgghjhjkdfhgkkk}
\varphi(x):=\betaegin{cases}
\varphi^+\quad\thetaext{if}\;\;x\cdotv_\ec\nu>0\,,\\
\varphi^-\quad\thetaext{if}\;\;x\cdotv_\ec\nu<0\,,
\varepsilonnd{cases}\quad\quad
v(x):=\betaegin{cases}
V^-\cdot x+(x\cdotv_\ec\nu)v_\ec k\quad\thetaext{if}\;\;x\cdotv_\ec\nu\gammaeq 0\,,\\
V^-\cdot x\quad\quad\quad\thetaext{if}\;\;x\cdotv_\ec\nu<0\,.
\varepsilonnd{cases}
\varepsilonnd{equation}
Next
let $\betaig\{v_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset W^{(n+1),p}(I_{v_\ec
\nu},\varphiield{R}^k)$ and $\betaig\{\varphi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
W^{n,p}(I_{v_\ec \nu},\varphiield{R}^m)$ be such that $\,\lim_{\varepsilon\thetao
0^+}\varphi_\varepsilon=\varphi$ in $L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^m)$,
$\lim_{\varepsilon\thetao 0^+}v_\varepsilon=v$ in $W^{1,p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^n\,v_\ec P_n\cdot\{\nabla^{n+1}
v_\varepsilon\}\betaig)=0$ in $L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{d_n})$ and
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^n\,v_\ec Q_n\cdot\{\nabla^n
\varphi_\varepsilon\}\betaig)=0$ in $L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{q_n})$, for
every $j=1,2,\ldots, (n-1)$ we have\\ $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^j\,\nabla^{j+1} v_\varepsilon\betaig)=0$ in $L^p_{loc}(I_{v_\ec
\nu},\varphiield{R}^{k\thetaimes N^{j+1}})$ and $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^j\,\nabla^j
\varphi_\varepsilon\betaig)=0$ in $L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{m\thetaimes N^j})$
where, as before, $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot
v_\ec\nu_j|<1/2\;\;\;\varphiorall j=1,2\ldots N\}$ where
$\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec \nu$. Finally
assume that
\betaegin{itemize}
\item either
$\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-v)/\varepsilon\betaig\}=0$ in $L^{p}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$,
\item or there exists a family
$\betaig\{u_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset C^{n+1}(\varphiield{R}^N,\varphiield{R}^k)$, such that
$\nabla u_\varepsilon(x)\varepsilonquiv h_\varepsilon(v_\ec\nu\cdot x)$ for some function $h_\varepsilon$
(i.e. $\nabla u_\varepsilon(x)$ depends actually only on the first real
variable in the base $\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}$),
$\nabla u_\varepsilon(x)=\nabla v(x)$ if $|v_\ec\nu\cdot x|>c_0$, where
$0<c_0<1/2$ is a constant, and $\lim_{\varepsilon\thetao 0^+}u_\varepsilon=v$ in
$W^{1,p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$, $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^n\,v_\ec
P_n\cdot\{\nabla^{n+1} u_\varepsilon\}\betaig)=0$ in $L^p_{loc}(I_{v_\ec
\nu},\varphiield{R}^{d_n})$, for every $j=1,2,\ldots, (n-1)$ we have
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^j\,\nabla^{j+1} u_\varepsilon\betaig)=0$ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N^{j+1}})$ and $\lim_{\varepsilon\thetao
0^+}\betaig\{(v_\varepsilon-u_\varepsilon)/\varepsilon\betaig\}=0$ in $L^{p}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$.
\varepsilonnd{itemize}
Then
\betaegin{multline}\label{ggfghjjhfhfjfhjkkk}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^n\,v_\ec P_n\cdot\{\nabla^{n+1} v_{\varepsilon}\},\,\varepsilon^n\,v_\ec
Q_n\cdot\{\nabla^n \varphi_{\varepsilon}\}\Big\}\,,
\ldots,\,\Big\{\varepsilon\,v_\ec P_1\cdot\{\nabla^2 v_{\varepsilon}\},\,\varepsilon\,v_\ec
Q_1\cdot\{\nabla \varphi_{\varepsilon}\}\Big\},\,\nabla
v_{\varepsilon},\,\varphi_{\varepsilon}\Bigg)\,dx\\ \gammaeq
E^{(n)}_{per}\Big(V^+,\varphi^+,V^-,\varphi^-,v_\ec\nu\Big)\,,
\varepsilonnd{multline}
where
\betaegin{multline}\label{L2009hhffff12kkk}
E^{(n)}_{per}\Big(V^+,\varphi^+,V^-,\varphi^-,v_\ec\nu\Big)\;:=\;\\
\inf\Bigg\{\int_{I_{v_\ec \nu}}\varphirac{1}{L} F\betaigg(\Big\{L^n\,v_\ec
P_n\cdot\{\nabla^{n+1} \xi\},\,L^n\,v_\ec Q_n\cdot\{\nabla^n
z_\eta\}\Big\},\,\ldots\,\Big\{L\,v_\ec P_1\cdot\{\nabla^2
\xi\},\,L\,v_\ec
Q_1\cdot\{\nabla z_\eta\}\Big\},\,\nabla \xi,\,z_\eta\betaigg)\,dx:\\
L\in(0,+\infty)\,,\; \xi\in
\mathcal{S}^{(n)}_1(V^+,V^-,I_{v_\ec\nu})\,,\;z_\eta\in
\mathcal{S}^{(n)}_2(\varphi^+,\varphi^-,I_{v_\ec\nu})\Bigg\}\,,
\varepsilonnd{multline}
with
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjfgnhfkkk}
\mathcal{S}^{(n)}_1\betaig(V^+,V^-,I_{v_\ec\nu}\betaig):=
\betaigg\{\xi\in C^{n+1}(\varphiield{R}^N,\varphiield{R}^k):\;\;\nabla \xi(y)=V^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
\nabla \xi(y)=V^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{ and
}\;\nabla \xi\betaig(y+v_\ec\nu_j\betaig)=\nabla \xi(y)\;\;\varphiorall
j=2,3,\ldots, N\betaigg\}\,,
\varepsilonnd{multline}
and
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddhdkgkkk}
\mathcal{S}^{(n)}_2\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig):=
\betaigg\{z_\eta\in C^n(\varphiield{R}^N,\varphiield{R}^m):\;\;z_\eta(y)=\varphi^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
z_\eta(y)=\varphi^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec \nu_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,,
\varepsilonnd{multline}
\varepsilonnd{proposition}
As a consequence we have the following Theorem.
\betaegin{theorem}\label{L2009.02kkkjkhkjh}
Let $n\in\mathbb{N}$, $v_\ec P\in\mathcal{L}(\varphiield{R}^{k\thetaimes N\thetaimes
N^n},\varphiield{R}^d)$ and $v_\ec Q\in\mathcal{L}(\varphiield{R}^{m\thetaimes N^n},\varphiield{R}^q)$ be
linear operators and
let $F\in C^1\betaig(\{\varphiield{R}^{d}\thetaimes\varphiield{R}^{q}\}\thetaimes \{\varphiield{R}^{k\thetaimes
N^{n}}\thetaimes\varphiield{R}^{m\thetaimes N^{n-1}}\}\thetaimes\ldots\thetaimes \{\varphiield{R}^{k\thetaimes
N\thetaimes N}\thetaimes\varphiield{R}^{m\thetaimes N}\}\thetaimes\varphiield{R}^{k\thetaimes
N}\thetaimes\varphiield{R}^m\,,\varphiield{R}\betaig)$ be such that $F\gammaeq 0$ and there exist $C>0$
and $p> 1$ satisfying
\betaegin{multline}\label{hgdfvdhvdhfv}
\varphirac{1}{C}\Big(|A|^p+|B|^p\Big)-C\Big(|c|^p+|d|^p+1\Big)\leq
F\Big(\{A,B\},\{a_1,b_1\},\ldots,\{a_{n-1},b_{n-1}\},c,d\Big)\leq\\
\leq
C\betaigg(|A|^p+|B|^p+\sigmaum_{j=1}^{n-1}|a_j|^{p}+\sigmaum_{j=1}^{n-1}|b_j|^{p}+|c|^p+|d|^p+1\betaigg)\\
\thetaext{for
every}\;\;\Big(\{A,B\},\{a_1,b_1\},\{a_2,b_2\},\ldots,\{a_{n-1},b_{n-1}\},c,d\Big)
\,.
\varepsilonnd{multline}
Furthermore let $v_\ec k\in\varphiield{R}^k$, $v_\ec\nu\in S^{N-1}$,
$\varphi^+,\varphi^-\in\varphiield{R}^m$ and $V^+,V^-\in \varphiield{R}^{k\thetaimes N}$ be such
that
$V^+-V^-=v_\ec k\omegatimesv_\ec\nu$ and
$F\betaig(0,0,\ldots,0,V^+,\varphi^+\betaig)=F\betaig(0,0,\ldots,0,V^-,\varphi^-\betaig)=0$.
Set $\varphi(x)\in L^\infty(\varphiield{R}^N,\varphiield{R}^m)$ and $v(x):Lip(\varphiield{R}^N,\varphiield{R}^k)$ by
\betaegin{equation}\label{ghgghjhjkdfhgkkkvfggghhhhb}
\varphi(x):=\betaegin{cases}
\varphi^+\quad\thetaext{if}\;\;x\cdotv_\ec\nu>0\,,\\
\varphi^-\quad\thetaext{if}\;\;x\cdotv_\ec\nu<0\,,
\varepsilonnd{cases}\quad\quad
v(x):=\betaegin{cases}
V^-\cdot x+(x\cdotv_\ec\nu)v_\ec k\quad\thetaext{if}\;\;x\cdotv_\ec\nu\gammaeq 0\,,\\
V^-\cdot x\quad\quad\quad\thetaext{if}\;\;x\cdotv_\ec\nu<0\,.
\varepsilonnd{cases}
\varepsilonnd{equation}
Next
let $\betaig\{v_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset W^{(n+1),p}(I_{v_\ec
\nu},\varphiield{R}^k)$ and $\betaig\{\varphi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
W^{n,p}(I_{v_\ec \nu},\varphiield{R}^m)$ be such that $\,\lim_{\varepsilon\thetao
0^+}\varphi_\varepsilon=\varphi$ in $L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^m)$,
$\lim_{\varepsilon\thetao 0^+}v_\varepsilon=v$ in $W^{1,p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$,
and $\lim_{\varepsilon\thetao
0^+}\betaig\{(v_\varepsilon-v)/\varepsilon\betaig\}=0$ in $L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$,
where, as before, $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot
v_\ec\nu_j|<1/2\;\;\;\varphiorall j=1,2\ldots N\}$ where
$\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec \nu$. Moreover
assume that,
\betaegin{itemize}
\item
either $v_\ec P=Id$ and $v_\ec Q=Id$ (where $Id$ is the corresponding
identity operator),
\item
or $\,\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n-1}\,\nabla^{n} v_\varepsilon\betaig)=0$ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N^{n}})$ and\, $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{n-1}\,\nabla^{n-1} \varphi_\varepsilon\betaig)=0$\\ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{m\thetaimes N^{n-1}})$.
\varepsilonnd{itemize}
Then
\betaegin{multline}\label{ggfghjjhfhfjfhjkkkhjkgghgh}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^n\,v_\ec P\cdot\{\nabla^{n+1} v_{\varepsilon}\},\,\varepsilon^n\,v_\ec
Q\cdot\{\nabla^n \varphi_{\varepsilon}\}\Big\},\\ \Big\{\varepsilon^{n-1}\,\nabla^n
v_{\varepsilon},\,\varepsilon^{n-1}\,\nabla^{n-1} \varphi_{\varepsilon}\Big\}\,,
\ldots,\,\Big\{\varepsilon\,\nabla^2 v_{\varepsilon},\,\varepsilon\,\nabla
\varphi_{\varepsilon}\Big\},\,\nabla v_{\varepsilon},\,\varphi_{\varepsilon}\Bigg)\,dx \gammaeq
E^{(n)}_{per}\Big(V^+,\varphi^+,V^-,\varphi^-,v_\ec\nu\Big)\,,
\varepsilonnd{multline}
where
\betaegin{multline}\label{L2009hhffff12kkkhjhjghgh}
E^{(n)}_{per}\Big(V^+,\varphi^+,V^-,\varphi^-,v_\ec\nu\Big)\;:=\;
\inf\Bigg\{\int_{I_{v_\ec \nu}}\varphirac{1}{L}\thetaimes\\ \thetaimes
F\betaigg(\Big\{L^n\,v_\ec P\cdot\{\nabla^{n+1} \xi\},\,L^n\,v_\ec
Q\cdot\{\nabla^n
z_\eta\}\Big\},\,\Big\{L^{n-1}\,\nabla^n \xi,\,L^{n-1}\,\nabla^{n-1} z_\eta\Big\},\,\ldots,\,\Big\{L\,\nabla^2 \xi,\,L\,\nabla z_\eta\Big\},\,\nabla \xi,\,z_\eta\betaigg)\,dx:\\
L\in(0,+\infty)\,,\; \xi\in
\mathcal{S}^{(n)}_1(V^+,V^-,I_{v_\ec\nu})\,,\;z_\eta\in
\mathcal{S}^{(n)}_2(\varphi^+,\varphi^-,I_{v_\ec\nu})\Bigg\}\,,
\varepsilonnd{multline}
with
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjfgnhfkkkghg}
\mathcal{S}^{(n)}_1\betaig(V^+,V^-,I_{v_\ec\nu}\betaig):=
\betaigg\{\xi\in C^{n+1}(\varphiield{R}^N,\varphiield{R}^k):\;\;\nabla \xi(y)=V^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
\nabla \xi(y)=V^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{ and
}\;\nabla \xi\betaig(y+v_\ec\nu_j\betaig)=\nabla \xi(y)\;\;\varphiorall
j=2,3,\ldots, N\betaigg\}\,,
\varepsilonnd{multline}
and
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddhdkgkkkhggh}
\mathcal{S}^{(n)}_2\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig):=
\betaigg\{z_\eta\in C^n(\varphiield{R}^N,\varphiield{R}^m):\;\;z_\eta(y)=\varphi^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
z_\eta(y)=\varphi^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec \nu_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,,
\varepsilonnd{multline}
\varepsilonnd{theorem}
\betaegin{proof}
First of all it is clear that without any loss of generality we may
assume
\betaegin{multline}\label{ggfghjjhfhfjfhjkkkhjkgghghhjhhbbhjjhb}
\varlimsup_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^n\,v_\ec P\cdot\{\nabla^{n+1} v_{\varepsilon}\},\,\varepsilon^n\,v_\ec
Q\cdot\{\nabla^n \varphi_{\varepsilon}\}\Big\},\\ \Big\{\varepsilon^{n-1}\,\nabla^n
v_{\varepsilon},\,\varepsilon^{n-1}\,\nabla^{n-1} \varphi_{\varepsilon}\Big\}\,,
\ldots,\,\Big\{\varepsilon\,\nabla^2 v_{\varepsilon},\,\varepsilon\,\nabla
\varphi_{\varepsilon}\Big\},\,\nabla v_{\varepsilon},\,\varphi_{\varepsilon}\Bigg)\,dx
<\infty\,.
\varepsilonnd{multline}
Then by \varepsilonr{hgdfvdhvdhfv},
\varepsilonr{ggfghjjhfhfjfhjkkkhjkgghghhjhhbbhjjhb} and the fact that
$\,\lim_{\varepsilon\thetao 0^+}\varphi_\varepsilon=\varphi$ in $L^{p}_{loc}(I_{v_\ec
\nu},\varphiield{R}^m)$ and $\lim_{\varepsilon\thetao 0^+}v_\varepsilon=v$ in $W^{1,p}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$ we deduce that $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^n\,v_\ec
P\cdot\{\nabla^{n+1} v_\varepsilon\}\betaig)=0$ in $L^p_{loc}(I_{v_\ec
\nu},\varphiield{R}^{d})$ and $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^n\,v_\ec Q\cdot\{\nabla^n
\varphi_\varepsilon\}\betaig)=0$ in $L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{q})$. Next
remember we assumed that
\betaegin{itemize}
\item
either $v_\ec P=Id$ and $v_\ec Q=Id$ (where $Id$ is the corresponding
identity operator),
\item
or $\,\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n-1}\,\nabla^{n} v_\varepsilon\betaig)=0$ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N^{n}})$ and\, $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{n-1}\,\nabla^{n-1} \varphi_\varepsilon\betaig)=0$\\ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{m\thetaimes N^{n-1}})$.
\varepsilonnd{itemize}
In the first case consider $n_0:=n$ and in the second case consider
$n_0:=n-1$. Thus in any case we have $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{n_0}\,\nabla^{n_0+1} v_\varepsilon\betaig)=0$ in $L^p_{loc}(I_{v_\ec
\nu},\varphiield{R}^{k\thetaimes N^{n_0+1}})$ and\, $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{n_0}\,\nabla^{n_0} \varphi_\varepsilon\betaig)=0$ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{m\thetaimes N^{n_0}})$. We will prove now
that $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{j}\,\nabla^{j+1} v_\varepsilon\betaig)=0$ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N^{j+1}})$ and\, $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{j}\,\nabla^{j} \varphi_\varepsilon\betaig)=0$ in $L^p_{loc}(I_{v_\ec
\nu},\varphiield{R}^{m\thetaimes N^{j}})$ for every $j\in\{1,2,\ldots,n_0\}$. Indeed
fix an arbitrary domain $U\sigmaubset\sigmaubset I_{v_\ec \nu}$ with a smoth
boundary. Then clearly
\betaegin{equation}\label{gfjfhjfgjhfhkdrydsgbnvfjggyhggghfgfgdfdddrrd}
d_e:=\int_U\betaigg(\betaig|\varepsilon^{n_0}\,\nabla^{n_0+1}
v_\varepsilon\betaig|^p+\betaig|\varepsilon^{n_0}\,\nabla^{n_0} \varphi_\varepsilon\betaig|^p\betaigg)\thetao
0\quad\thetaext{as}\;\;\varepsilon\thetao 0^+\,.
\varepsilonnd{equation}
Moreover clearly there exists $\betaar C>0$ such that
$\int_{U}\betaig(|v_\varepsilon|^p+|\varphi_\varepsilon|^p\betaig)dx\leq \betaar C$. From the
other hand by Theorem 7.28 in \cite{gt} there exists $C_0>0$, which
depends only on $U$ $p$ and $n_0$, such that for every $\sigmaigma(x)\in
W^{n_0,p}(U,\varphiield{R})$ and every $\thetaau>0$ we have
\betaegin{equation}\label{gfjfhjfgjhfhkdrydsg}
\betaig\|\nabla^j\sigmaigma(x)\betaig\|_{L^p(U)}\leq \thetaau
\betaig\|\sigmaigma(x)\betaig\|_{W^{n,p}(U)}+C_0\thetaau^{-j/(n-j)}\betaig\|\sigmaigma(x)\betaig\|_{L^p(U)}\quad\quad\varphiorall\;
2\leq n\leq n_0\,,\;\;1\leq j<n\,.
\varepsilonnd{equation}
\betaegin{comment*}
compact $K\sigmaubset\sigmaubset I_{v_\ec\nu}$ and consider $\sigmaigma(x)\in
C^\infty_c(I_{v_\ec\nu},\varphiield{R})$ such that $\sigmaigma(x)=1$ for every $x\in
K$. Then define $u_\varepsilon(x):=\sigmaigma(x)\cdot v_\varepsilon(x)$ and
$\psi_\varepsilon(x):=\sigmaigma(x)\cdot \varphi_\varepsilon(x)$ for every $x\in\varphiield{R}^N$.
Then clearly $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n_0}\,\nabla^{n_0+1}
u_\varepsilon\betaig)=0$ in $L^p(\varphiield{R}^N,\varphiield{R}^{k\thetaimes N^{n_0+1}})$ and
$\,\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n_0}\,\nabla^{n_0} \psi_\varepsilon\betaig)=0$ in
$L^p(\varphiield{R}^N,\varphiield{R}^{m\thetaimes N^{n_0}})$. Moreover clearly there exists
$C_0>0$ such that $\int_{\varphiield{R}^N}\betaig(|u_\varepsilon|^p+|\psi_\varepsilon|^p\betaig)dx\leq
C_0$. Thus if we denote by $\hat\psi_\varepsilon$ the Transform of Furier of
$\psi_\varepsilon$, by $\hat u_\varepsilon$ the Transform of Furier of $u_\varepsilon$ and
$p^*:=p/(p-1)$, we will have
\betaegin{equation}\label{yfguyfuyffiug}
\lim_{\varepsilon\thetao 0}\int_{\varphiield{R}^N}\betaigg(\Big|\varepsilon^{n_0} |z|^{n_0}\hat
u_\varepsilon(z)\Big|^{p^*}+\Big|\varepsilon^{n_0} |z|^{n_0}\hat
\psi_\varepsilon(z)\Big|^{p^*}\betaigg)dz=0\quad\thetaext{and}\quad
\int_{\varphiield{R}^N}\Big(\betaig|\hat u_\varepsilon(z)\betaig|^{p^*}+\betaig|\hat
\psi_\varepsilon(z)\betaig|^{p^*}\Big)dz\leq C\,.
\varepsilonnd{equation}
On the other hand by H\"{o}lder inequality for every
$j\in\{1,2\ldots,n_0-1\}$ we obtain
\betaegin{multline}\label{yfguyfuyffiugjklhhjhhjhjjh}
\int_{\varphiield{R}^N}\betaigg(\Big|\varepsilon^{j} |z|^{j}\hat
u_\varepsilon(z)\Big|^{p^*}+\Big|\varepsilon^{j} |z|^{j}\hat
\psi_\varepsilon(z)\Big|^{p^*}\betaigg)dz\leq\\
2\Bigg\{\int_{\varphiield{R}^N}\betaigg(\Big|\varepsilon^{n_0} |z|^{n_0}\hat
u_\varepsilon(z)\Big|^{p^*}+\Big|\varepsilon^{n_0} |z|^{n_0}\hat
\psi_\varepsilon(z)\Big|^{p^*}\betaigg)dz\Bigg\}^{j/n_0}\cdot\Bigg\{\int_{\varphiield{R}^N}\Big(\betaig|\hat
u_\varepsilon(z)\betaig|^{p^*}+\betaig|\hat
\psi_\varepsilon(z)\betaig|^{p^*}\Big)dz\Bigg\}^{1-j/n_0}\,.
\varepsilonnd{multline}
Then, plugging \varepsilonr{yfguyfuyffiugjklhhjhhjhjjh} into
\varepsilonr{yfguyfuyffiug} we obtain
\betaegin{equation}\label{yfguyfuyffiugjkhhlhlhuuiiiiukkk}
\lim_{\varepsilon\thetao 0}\int_{\varphiield{R}^N}\betaigg(\Big|\varepsilon^{j} |z|^{j}\hat
u_\varepsilon(z)\Big|^{p^*}+\Big|\varepsilon^{j} |z|^{j}\hat
\psi_\varepsilon(z)\Big|^{p^*}\betaigg)dz=0\quad\varphiorall j\in\{1,2\ldots,n_0\}\,.
\varepsilonnd{equation}
Thus, we deduce $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{j}\,\nabla^{j+1}
u_\varepsilon\betaig)=0$ in $L^p(\varphiield{R}^N,\varphiield{R}^{k\thetaimes N^{j+1}})$ and\, $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{j}\,\nabla^{j} \psi_\varepsilon\betaig)=0$ in $L^p(\varphiield{R}^N,\varphiield{R}^{m\thetaimes
N^{j}})$ for every $j\in\{1,2,\ldots,n_0\}$. Therefore, since
$\sigmaigma(x)=1$ for every $x\in K$ and since $K\sigmaubset\sigmaubset\varphiield{R}^N$ was
chosen arbitrary,
\varepsilonnd{comment*}
Thus in particular we deduce from \varepsilonr{gfjfhjfgjhfhkdrydsg} that
there exists $C_1>0$, which depends only on $U$ $p$ and $n_0$, such
that for every $\sigmaigma(x)\in W^{n_0,p}(U,\varphiield{R})$ and every $\thetaau\in
(0,1)$ we have
\betaegin{equation}\label{gfjfhjfgjhfhkdrydsghgghgfgffgfgg}
\betaig\|\thetaau^j\nabla^j\sigmaigma(x)\betaig\|_{L^p(U)}\leq
\betaig\|\thetaau^{n_0}\nabla^{n_0}\sigmaigma(x)\betaig\|_{L^p(U)}+C_1\betaig\|\sigmaigma(x)\betaig\|_{L^p(U)}\quad\quad\varphiorall\;
1\leq j<n_0\,.
\varepsilonnd{equation}
Then setting $\thetaau:=\varepsilon\cdot(d_\varepsilon)^{-1/n_0}$, where $d_\varepsilon$ is defined
by \varepsilonr{gfjfhjfgjhfhkdrydsgbnvfjggyhggghfgfgdfdddrrd}, using
\varepsilonr{gfjfhjfgjhfhkdrydsghgghgfgffgfgg} and the fact that
$\int_{U}\betaig(|v_\varepsilon|^p+|\varphi_\varepsilon|^p\betaig)dx\leq \betaar C$ we obtain
\betaegin{equation}\label{gfjfhjfgjhfhkdrydsghgghgfgffgfggjhhgkhhhlllhhljjggjkgkjk}
\betaig\|\varepsilon^j\nabla^j\varphi_\varepsilon(x)\betaig\|_{L^p(U)}+\betaig\|\varepsilon^j\nabla^{j+1}
v_\varepsilon(x)\betaig\|_{L^p(U)}\leq \hat C d_\varepsilon^{j/n_0}\quad\quad\varphiorall\;
1\leq j<n_0\,,
\varepsilonnd{equation}
where $\hat C>0$ dose not depend on $\varepsilon$. Thus using
\varepsilonr{gfjfhjfgjhfhkdrydsgbnvfjggyhggghfgfgdfdddrrd} we deduce
\betaegin{equation*}
\betaig\|\varepsilon^j\nabla^j\varphi_\varepsilon(x)\betaig\|_{L^p(U)}+\betaig\|\varepsilon^j\nabla^{j+1}
v_\varepsilon(x)\betaig\|_{L^p(U)}\thetao 0\quad\thetaext{as}\;\;\varepsilon\thetao 0^+\quad\varphiorall\;
1\leq j<n_0\,,
\varepsilonnd{equation*}
Therefore, since the domain with a smooth boundary $U\sigmaubset\sigmaubset
I_{v_\ec \nu}$ was chosen arbitrary, we finally deduce that
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{j}\,\nabla^{j+1} v_\varepsilon\betaig)=0$ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N^{j+1}})$ and\, $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{j}\,\nabla^{j} \varphi_\varepsilon\betaig)=0$ in $L^p_{loc}(I_{v_\ec
\nu},\varphiield{R}^{m\thetaimes N^{j}})$ for every $j\in\{1,2,\ldots,n_0\}$. Then
applying Proposition \ref{L2009.02kkk} completes the proof.
\varepsilonnd{proof}
Next by the composition of Theorem \ref{L2009.02kkkjkhkjh} and
Theorem \ref{dehgfrygfrgygen} we obtain the following result
describing the lower bound for the first order problems.
\betaegin{proposition}\label{dehgfrygfrgygenbgggggggggggggkgkg}
Let $\Omega\sigmaubset\varphiield{R}^N$ be an open set. Furthermore, let $F\in
C\betaig(\varphiield{R}^{m\thetaimes N^n}\thetaimes\varphiield{R}^{m\thetaimes
N^{(n-1)}}\thetaimes\ldots\thetaimes\varphiield{R}^{m\thetaimes N}\thetaimes \varphiield{R}^m,\varphiield{R}\betaig)$, be
such that $F\gammaeq 0$ and there exist $C>0$ and $p>1$ satisfying
\betaegin{multline}\label{hgdfvdhvdhfvjjjjiiiuyyyji}
\varphirac{1}{C}|A|^p-C\Big(|b|^p+1\Big)\leq
F\Big(A,a_1,\ldots,a_{n-1},b\Big) \leq
C\betaigg(|A|^p+\sigmaum_{j=1}^{n-1}|a_j|^{p}+|b|^p+1\betaigg)\\
\thetaext{for every}\;\;\betaig(A,a_1,a_2,\ldots,a_{n-1},b\betaig) \,.
\varepsilonnd{multline}
Next let $\varphi\in L^p(\Omega,\varphiield{R}^m)$ be such that $F\betaig(0,0,\ldots,0,
\varphi(x)\betaig)=0$ for a.e. $x\in\Omega$. Assume also that there exist a
$\mathcal{H}^{N-1}$ $\sigmaigma$-finite Borel set $D\sigmaubset\Omega$ and three
Borel mappings $\,\varphi^+(x):D\thetao\varphiield{R}^m$, $\varphi^-(x):D\thetao\varphiield{R}^m$
and $v_\ec n(x):D\thetao S^{N-1}$ such that for every $x\in D$ we have
\betaegin{multline}\label{L2009surfhh8128odno888jjjjjkkkkkkgenjnhhh}
\lim\limits_{\rho\thetao 0^+}\varphirac{\int_{B_\rho^+(x,v_\ec
n(x))}\betaig|\varphi(y)-\varphi^+(x)\betaig|^p\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0\quad\thetaext{and}\quad
\lim\limits_{\rho\thetao 0^+}\varphirac{\int_{B_\rho^-(x,v_\ec
n(x))}\betaig|\varphi(y)-\varphi^-(x)\betaig|^p\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0\,.
\varepsilonnd{multline}
Then for every
$\{\varphi_\varepsilon\}_{\varepsilon>0}\sigmaubset\cap W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$ such that
$\varphi_\varepsilon\thetao \varphi$ in $L^p_{loc}(\Omega,\varphiield{R}^m)$ as $\varepsilon\thetao 0^+$, we
will have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenhjhhhhj}
\varliminf_{\varepsilon\thetao 0^+}\varphirac{1}{\varepsilon}\int_\Omega F\betaigg(\,\varepsilon^n\nabla^n
\varphi_\varepsilon(x),\,\varepsilon^{n-1}\nabla^{n-1}\varphi_\varepsilon(x),\,\ldots,\,\varepsilon\nabla \varphi_\varepsilon(x),\, \varphi_\varepsilon(x)\betaigg)dx\\
\gammaeq \int_{D}\betaar E^{(n)}_{per}\Big(\varphi^+(x),\varphi^-(x),v_\ec
n(x)\Big)d \mathcal H^{N-1}(x)\,,
\varepsilonnd{multline}
where
\betaegin{multline}\label{L2009hhffff12kkkhjhjghghgvgvggcjhgggh}
\betaar E^{(n)}_{per}\Big(\varphi^+,\varphi^-,v_\ec n\Big)\;:=\;\\
\inf\Bigg\{\int_{I_{v_\ec n}}\varphirac{1}{L} F\betaigg(L^n\,\nabla^n
z_\eta,\,L^{n-1}\,\nabla^{n-1} z_\eta,\,\ldots,\,L\,\nabla
z_\eta,\,z_\eta\betaigg)\,dx:\;\; L\in(0,+\infty)\,,\;z_\eta\in
\mathcal{S}^{(n)}(\varphi^+,\varphi^-,I_{v_\ec n})\Bigg\}\,,
\varepsilonnd{multline}
with
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddhdkgkkkhgghjhjhjkj}
\mathcal{S}^{(n)}\betaig(\varphi^+,\varphi^-,I_{v_\ec n}\betaig):=
\betaigg\{z_\eta\in C^n(\varphiield{R}^N,\varphiield{R}^m):\;\;z_\eta(y)=\varphi^-\;\thetaext{ if }\;y\cdotv_\ec n\leq-1/2,\\
z_\eta(y)=\varphi^+\;\thetaext{ if }\; y\cdotv_\ec n\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec \nu_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,,
\varepsilonnd{multline}
Here $I_{v_\ec n}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec\nu_j|<1/2\;\;\;\varphiorall
j=1,2\ldots N\}$ where
$\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec n$.
\varepsilonnd{proposition}
Thus by plugging Proposition \ref{dehgfrygfrgygenbgggggggggggggkgkg}
into Theorem \ref{ffgvfgfhthjghgjhg2} we deduce the $\Gammaamma$-limit
result for the first order problem.
\betaegin{theorem}\label{dehgfrygfrgygenbgggggggggggggkgkgthtjtf}
Let $\Omega\sigmaubset\varphiield{R}^N$ be an open set. Furthermore, let $F\in
C^1\betaig(\varphiield{R}^{m\thetaimes N^n}\thetaimes\varphiield{R}^{m\thetaimes
N^{(n-1)}}\thetaimes\ldots\thetaimes\varphiield{R}^{m\thetaimes N}\thetaimes \varphiield{R}^m,\varphiield{R}\betaig)$, be
such that $F\gammaeq 0$ and there exist $C>0$ and $p>1$ satisfying
\betaegin{multline}\label{hgdfvdhvdhfvjjjjiiiuyyyjitghujtr}
\varphirac{1}{C}|A|^p-C\Big(|b|^p+1\Big)\leq
F\Big(A,a_1,\ldots,a_{n-1},b\Big) \leq
C\betaigg(|A|^p+\sigmaum_{j=1}^{n-1}|a_j|^{p}+|b|^p+1\betaigg)\\
\thetaext{for every}\;\;\betaig(A,a_1,a_2,\ldots,a_{n-1},b\betaig) \,.
\varepsilonnd{multline}
Next let $\varphi\in BV(\varphiield{R}^N,\varphiield{R}^{m})\cap L^\infty$ be such that $\|D
\varphi\|(\partial\Omegamega)=0$ and $F\betaig(0,0,\ldots,0,
\varphi(x)\betaig)=0$ for a.e. $x\in\Omega$.
Then for every
$\{\varphi_\varepsilon\}_{\varepsilon>0}\sigmaubset W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$ such that
$\varphi_\varepsilon\thetao \varphi$ in $L^p_{loc}(\Omega,\varphiield{R}^m)$ as $\varepsilon\thetao 0^+$, we
will have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenhjhhhhjtjurt}
\varliminf_{\varepsilon\thetao 0^+}\varphirac{1}{\varepsilon}\int_\Omega F\betaigg(\,\varepsilon^n\nabla^n
\varphi_\varepsilon(x),\,\varepsilon^{n-1}\nabla^{n-1}\varphi_\varepsilon(x),\,\ldots,\,\varepsilon\nabla \varphi_\varepsilon(x),\, \varphi_\varepsilon(x)\betaigg)dx\\
\gammaeq \int_{\Omega\cap J_\varphi}\betaar
E^{(n)}_{per}\Big(\varphi^+(x),\varphi^-(x),v_\ec \nu(x)\Big)d
\mathcal H^{N-1}(x)\,,
\varepsilonnd{multline}
where
\betaegin{multline}\label{L2009hhffff12kkkhjhjghghgvgvggcjhggghtgjut}
\betaar E^{(n)}_{per}\Big(\varphi^+,\varphi^-,v_\ec \nu\Big)\;:=\;\\
\inf\Bigg\{\int_{I_{v_\ec \nu}}\varphirac{1}{L} F\betaigg(L^n\,\nabla^n
z_\eta,\,L^{n-1}\,\nabla^{n-1} z_\eta,\,\ldots,\,L\,\nabla
z_\eta,\,z_\eta\betaigg)\,dx:\;\; L\in(0,+\infty)\,,\;z_\eta\in
\mathcal{S}^{(n)}(\varphi^+,\varphi^-,I_{v_\ec\nu})\Bigg\}\,,
\varepsilonnd{multline}
with
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddhdkgkkkhgghjhjhjkjtjytrj}
\mathcal{S}^{(n)}\betaig(\varphi^+,\varphi^-,I_{v_\ec \nu}\betaig):=
\betaigg\{z_\eta\in C^n(\varphiield{R}^N,\varphiield{R}^m):\;\;z_\eta(y)=\varphi^-\;\thetaext{ if }\;y\cdotv_\ec \nu\leq-1/2,\\
z_\eta(y)=\varphi^+\;\thetaext{ if }\; y\cdotv_\ec \nu\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec \nu_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,,
\varepsilonnd{multline}
Here $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec\nu_j|<1/2\;\;\;\varphiorall
j=1,2\ldots N\}$ where
$\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec \nu$.
Moreover, there exists e sequence $\{\psi_\varepsilon\}_{\varepsilon>0}\sigmaubset
C^\infty(\varphiield{R}^N,\varphiield{R}^m)$ such that
$\int_\Omega\psi_\varepsilon(x)dx=\int_\Omega\varphi(x)dx$, $\psi_\varepsilon\thetao \varphi$ in
$L^p(\Omega,\varphiield{R}^m)$ as $\varepsilon\thetao 0^+$ and we have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenhjhhhhjtjurtgfhfhfjfjfj}
\lim_{\varepsilon\thetao 0^+}\varphirac{1}{\varepsilon}\int_\Omega F\betaigg(\,\varepsilon^n\nabla^n
\psi_\varepsilon(x),\,\varepsilon^{n-1}\nabla^{n-1}\psi_\varepsilon(x),\,\ldots,\,\varepsilon\nabla \psi_\varepsilon(x),\, \psi_\varepsilon(x)\betaigg)dx\\
= \int_{\Omega\cap J_\varphi}\betaar
E^{(n)}_{per}\Big(\varphi^+(x),\varphi^-(x),v_\ec \nu(x)\Big)d
\mathcal H^{N-1}(x)\,.
\varepsilonnd{multline}
\varepsilonnd{theorem}
\varepsilonnd{comment}
\sigmaection{Further estimates for the lower
bound}\label{vdhgvdfgbjfdhgf}
\betaegin{lemma}\label{L2009.02new}
Let
$v_\ec Q\in\mathcal{L}(\varphiield{R}^{m\thetaimes N},\varphiield{R}^d)$ be linear operator and
let $F\in C^0(\varphiield{R}^{k}\thetaimes\varphiield{R}^d\thetaimes\varphiield{R}^m,\varphiield{R})$ be such that $F\gammaeq 0$
and there exist $C>0$, $q\gammaeq 1$ and $p=(p_1,p_2,\ldots,
p_k)\in\varphiield{R}^k$ such that $p_j\gammaeq 1$ for every $j$ and
\betaegin{equation}\label{RsTT}
0\leq F(a,b,c)\leq
C\Big(|a|^{p}+|b|^q+|c|^q+1\Big)\quad\quad\varphiorall\,
(a,b,c)\in\varphiield{R}^{k}\thetaimes\varphiield{R}^d\thetaimes\varphiield{R}^m
\varepsilonnd{equation}
(see Definition \ref{gdhgvdgjkdfgjkhdd}). Next let $v_\ec\nu\in
S^{N-1}$ and let $\betaig\{m_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$ and $m_0(x)\in L^{p}(I_{v_\ec
\nu},\varphiield{R}^k)$ be such that $F\betaig(m_0(x),0,0\betaig)= 0$ for a.e. $x\in
I_{v_\ec \nu}$ and $\,\lim_{\varepsilon\thetao 0^+}m_\varepsilon=m_0$ in
$L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$, where, as before, $I_{v_\ec
\nu}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec\nu_j|<1/2\;\;\;\varphiorall j=1,
\ldots
N\}$ and $\{v_\ec\nu_1,
\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$, such that $v_\ec\nu_1:=v_\ec \nu$.
Furthermore, let
$\betaig\{\varphi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset L^{q}_{loc}(I_{v_\ec
\nu},\varphiield{R}^m)$ be such that
$\,\lim_{\varepsilon\thetao 0^+}\varphi_\varepsilon=0$ in $L^{q}_{loc}(I_{v_\ec
\nu},\varphiield{R}^m)$,
and $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilonv_\ec Q\cdot\nabla\varphi_\varepsilon\betaig)=0$ in
$L^q_{loc}(I_{v_\ec \nu},\varphiield{R}^{d})$.
Then there exist $\betaig\{r_\varepsilon\betaig\}_{0<\varepsilon<1}\sigmaubset(0,1)$
and $\betaig\{\psi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
C^{\infty}_{c}(I_{v_\ec \nu},\varphiield{R}^m)$ such that $\lim_{\varepsilon\thetao
0^+}r_\varepsilon=1$,
$\,\lim_{\varepsilon\thetao 0^+}\psi_\varepsilon=0$ in $L^{q}(I_{v_\ec \nu},\varphiield{R}^m)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilonv_\ec Q\cdot\nabla \psi_\varepsilon\betaig)=0$ in
$L^q(I_{v_\ec \nu},\varphiield{R}^{d})$,
$\,\lim_{\varepsilon\thetao 0^+}m_{(r_\varepsilon\varepsilon)}\betaig(r_\varepsilon
x\betaig)=m_0(x)$ in $L^{p}(I_{v_\ec \nu},\varphiield{R}^k)$ and
\betaegin{multline}\label{fvyjhfyffhjfghgjkghfff}
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\varphi_{\varepsilon}(x)\,,\,
\varphi_{\varepsilon}(x)\betaigg)dx \gammaeq \varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec
\nu}}\varphirac{1}{\varepsilon}F\betaigg(m_{(r_\varepsilon\varepsilon)}\betaig(r_\varepsilon x\betaig)\,,\,
\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon}(x)\,,\,
\psi_{\varepsilon}(x)\betaigg)dx\,.
\varepsilonnd{multline}
\varepsilonnd{lemma}
\betaegin{proof}
Clearly we may assume
\betaegin{equation}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnew}
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\varphi_{\varepsilon}(x)\,,\,
\varphi_{\varepsilon}(x)\betaigg)dx<+\infty\,,
\varepsilonnd{equation}
otherwise it is trivial. Moreover, without any loss of generality we
may assume that $v_\ec\nu=v_\ec e_1:=(1,
0,\ldots,0)$ and $I_{v_\ec
\nu}=I:=\betaig\{y=(y_1,
\ldots, y_N)\in\varphiield{R}^N:\;|y_j|<1/2\;\;\;\varphiorall j=1,
\ldots, N\betaig\}$.
Furthermore, since, by mollification, we always can approximate
$\varphi_\varepsilon$ by smooth functions, there exist
$\betaig\{\betaar\varphi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset C^\infty(I_{v_\ec
\nu},\varphiield{R}^m)$, such that
$\,\lim_{\varepsilon\thetao 0^+}\betaar\varphi_\varepsilon=0$ in $L^{q}_{loc}(I_{v_\ec
\nu},\varphiield{R}^m)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilonv_\ec Q\cdot\nabla\betaar\varphi_\varepsilon\betaig)=0$ in
$L^q_{loc}(I_{v_\ec \nu},\varphiield{R}^{d})$ and
\betaegin{multline}\label{fughfighdfighfihklhh}
\varphirac{1}{\varepsilon}F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\betaar \varphi_{\varepsilon}(x)\,,\,
\betaar \varphi_{\varepsilon}(x)\betaigg)- \varphirac{1}{\varepsilon}F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\varphi_{\varepsilon}(x)\,,\,
\varphi_{\varepsilon}(x)\betaigg)\thetao 0 \;\;\;\thetaext{in}\;\,L^1_{loc}(I_{v_\ec
\nu},\varphiield{R})\;\,\thetaext{as}\;\,\varepsilon\thetao 0^+.
\varepsilonnd{multline}
Next consider $l(t)\in C^\infty(\varphiield{R},\varphiield{R})$ with the properties
\betaegin{equation}\label{L2009smooth1new}
\betaegin{cases}l(t)=1 \quad\quad\quad\thetaext{for every }t\in(-\infty,\deltaelta)\,,\\
l(t)\in[0,1] \quad\;\;\thetaext{for every }t\in[\deltaelta,1-\deltaelta]\,,\\
l(t)=0 \quad\quad\quad\thetaext{for every }
t\in(1-\deltaelta,+\infty)\,,\varepsilonnd{cases}
\varepsilonnd{equation}
where $\deltaelta\in(0,1/2)$. Clearly such a function exists.
\betaegin{comment}
Then set
\betaegin{multline}\label{vjhvhjvhjvhjjnjknew}
\psi_\varepsilon(x):=\varphi^-\,+\,l\betaigg(\varphirac{x_1}{\varepsilon}+\varphirac{1}{2}\betaigg)\cdot\betaig(\varphi^+-\varphi^-\betaig)\quad\thetaext{and}\quad
\hat u_\varepsilon(x):=V^-\cdot
x+\varepsilon\int_{-\infty}^{x_1/\varepsilon}l\betaig(s+1/2\betaig)\,ds\,\cdot\,v_\ec k\quad
\varphiorall x\in\varphiield{R}^N\,.
\varepsilonnd{multline}
Thus $\psi_\varepsilon\in C^\infty(\varphiield{R}^N,\varphiield{R}^m)$ and $u_\varepsilon\in
C^\infty(\varphiield{R}^N,\varphiield{R}^k)$ and in particular
\betaegin{align}\label{hjfjffjgkjgkkgjghghnew}
\varepsilon\nabla\psi_\varepsilon(x):=
l'\betaigg(\varphirac{x_1}{\varepsilon}+\varphirac{1}{2}\betaigg)\cdot\betaig(\varphi^+-\varphi^-\betaig)\omegatimesv_\ec
e_1\quad\varphiorall x\in\varphiield{R}^N\,,
\\
\label{vjhvhjvhjvhjjnjkjgghgfjnew}
\nabla \hat
u_\varepsilon(x):=V^-+l\betaigg(\varphirac{x_1}{\varepsilon}+\varphirac{1}{2}\betaigg)\Big(v_\ec
k\omegatimes v_\ec
e_1\Big)=V^-+l\betaigg(\varphirac{x_1}{\varepsilon}+\varphirac{1}{2}\betaigg)\cdot\Big(V^+-V^-\Big)\quad
\varphiorall x\in\varphiield{R}^N\,,\\
\label{fvfgfffhhffffnew}
\varepsilon\nabla^2 \hat
u_\varepsilon(x):=l'\betaigg(\varphirac{x_1}{\varepsilon}+\varphirac{1}{2}\betaigg)\cdot\Big(V^+-V^-\Big)\omegatimesv_\ec
e_1\quad \varphiorall x\in\varphiield{R}^N\,.
\varepsilonnd{align}
Moreover by \varepsilonr{L2009smooth1new}, \varepsilonr{vjhvhjvhjvhjjnjknew} and
\varepsilonr{vjhvhjvhjvhjjnjkjgghgfjnew} we obtain
\betaegin{multline}\label{vjhvhjvhjvhjjnjkgffgjkjhjnew}
\psi_\varepsilon(x)=
\betaegin{cases}
\varphi^-\;\;\thetaext{if}\;\; x_1\leq-\varepsilon/2\,,\\
\varphi^+\;\;\thetaext{if}\;\; x_1\gammaeq\varepsilon/2\,,
\varepsilonnd{cases}
\;\;\nabla \hat u_\varepsilon(x)=\betaegin{cases}V^-\;\;\thetaext{if}\;\; x_1\leq-\varepsilon/2\,,\\
V^+\;\;\thetaext{if}\;\; x_1\gammaeq\varepsilon/2\,,\varepsilonnd{cases}\;\; \hat
u_\varepsilon(x)=v(x)\;\;\thetaext{if}\;\; |x_1|\gammaeq \varepsilon/2\,,
\varepsilonnd{multline}
and by \varepsilonr{hjfjffjgkjgkkgjghghnew} and \varepsilonr{fvfgfffhhffffnew},
\betaegin{equation}\label{vjhvhjvhjvhjjnjkgffgjkjhjfggfffnew}
\nabla\psi_\varepsilon(x)=0\quad\thetaext{if}\quad |x_1|\gammaeq\varepsilon/2\,,
\quad\thetaext{and}\quad \nabla^2 \hat u_\varepsilon(x)= 0\quad\thetaext{if}\quad
|x_1|\gammaeq\varepsilon/2\,.
\varepsilonnd{equation}
Therefore, by \varepsilonr{vjhvhjvhjvhjjnjknew}, \varepsilonr{hjfjffjgkjgkkgjghghnew},
\varepsilonr{vjhvhjvhjvhjjnjkjgghgfjnew}, \varepsilonr{fvfgfffhhffffnew},
\varepsilonr{vjhvhjvhjvhjjnjkgffgjkjhjnew} and
\varepsilonr{vjhvhjvhjvhjjnjkgffgjkjhjfggfffnew} we have $\lim_{\varepsilon\thetao
0^+}\psi_\varepsilon=\varphi$ in $L^{p}(I_{v_\ec \nu},\varphiield{R}^m)$, $\lim_{\varepsilon\thetao
0^+}\hat u_\varepsilon=v$ in $W^{1,p}(I_{v_\ec \nu},\varphiield{R}^k)$, $\lim_{\varepsilon\thetao
0^+}\{\varepsilon\nabla^2 \hat u_\varepsilon\}=0$ in $L^p(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes
N\thetaimes N})$, $\lim_{\varepsilon\thetao 0^+}\{\varepsilon\nabla \psi_\varepsilon\}=0$ in
$L^p(I_{v_\ec \nu},\varphiield{R}^{m\thetaimes N})$ and $\lim_{\varepsilon\thetao 0^+}\betaig\{(\hat
u_\varepsilon-v)/\varepsilon\betaig\}=0$ in $L^{p}(I_{v_\ec \nu},\varphiield{R}^k)$. Thus, if we have
$\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-v)/\varepsilon\betaig\}=0$ in $L^{p}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$, then clearly $\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-\hat
u_\varepsilon)/\varepsilon\betaig\}=0$ in $L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$ and we set
$u_\varepsilon(x):=\hat u_\varepsilon(x)$. So in any case there exists a family
$\betaig\{u_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset C^2(\varphiield{R}^N,\varphiield{R}^k)$, such that
$\nabla u_\varepsilon(x)\varepsilonquiv h_\varepsilon(v_\ec\nu\cdot x)$ for some function
$h_\varepsilon$, $\nabla u_\varepsilon(x)=\nabla v(x)$ if $|v_\ec\nu\cdot x|>c_0$,
where $0<c_0<1/2$ is a constant, and
\betaegin{equation}\label{gfjguyfygbjhhjgjgghfffgfgnew}
\betaegin{cases}
\lim_{\varepsilon\thetao 0^+}u_\varepsilon=v\quad\thetaext{in}\quad W^{1,p}(I_{v_\ec
\nu},\varphiield{R}^k)\,,\\
\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon\,v_\ec P\cdot\{\nabla^2 u_\varepsilon\}\betaig)=0\quad
\thetaext{in}\quad L^p(I_{v_\ec \nu},\varphiield{R}^{d})\,,
\\
\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-u_\varepsilon)/\varepsilon\betaig\}=0\quad \thetaext{in}\quad
L^{p}(I_{v_\ec \nu},\varphiield{R}^k)\,.
\varepsilonnd{cases}
\varepsilonnd{equation}
\varepsilonnd{comment}
Then for every $0\leq t<1/2$ define
\betaegin{equation}\label{L2009deftvu1hhhjnew}
\psi_{\varepsilon,t}(x):=\betaar\varphi_\varepsilon(x)\thetaimes\mathop{\prod}\limits\limits_{j=1}^{N}\betaigg(l\betaig((x_j-t)/\varepsilon\betaig)\cdot
l\betaig(-(t+x_j)/\varepsilon\betaig)\betaigg)\quad \quad\varphiorall x\in\varphiield{R}\,.
\varepsilonnd{equation}
Then for every $t\in[0,1/2)$ clearly $\psi_{\varepsilon,t}\in
C^\infty(I^1,\varphiield{R}^m)$
where
\betaegin{equation}\label{vhjfyhjgjkgjkghjk}
I^s=\Big\{y=(y_1,
\ldots, y_N)\in\varphiield{R}^N:\;|y_j|<s/2\;\;\;\varphiorall j=1,
\ldots,
N\Big\}\quad\varphiorall s>0\,.
\varepsilonnd{equation}
Moreover, for each such $t\in[0,1/2)$ we have
\betaegin{equation}\label{L2009eqgl1new}
\betaegin{cases}
\psi_{\varepsilon,t}(x)=\betaar\varphi_\varepsilon(x)
\quad\thetaext{if for every}\;\; j\in\{1,
\ldots,N\}\;\;\thetaext{we have}\;\; |x_j|<t\,,\\
\psi_{\varepsilon,t}(x)=0
\quad\thetaext{if
}|x_j|>t+(1-\deltaelta)\varepsilon\;\;\thetaext{for some}\;\; j\in\{1,
\ldots,N\}\,.
\varepsilonnd{cases}
\varepsilonnd{equation}
So, by \varepsilonr{L2009eqgl1new} for small $\varepsilon>0$ we have $\psi_{\varepsilon,t}\in
C^\infty_c(I^{t+\varepsilon},\varphiield{R}^m)$.
Next we will prove that for every $\thetaau\in(0,1/2)$ we have
\betaegin{equation}
\label{L2009small1new} \lim_{\varepsilon\thetao
0}\int_{0}^{\thetaau}\,\int_{\cup_{j=1}^{N}\{ x\in
I^{t+\varepsilon}:\,|x_j|>t\}}\varphirac{1}{\varepsilon}
F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon,t}(x)\,,\,
\psi_{\varepsilon,t}(x)\betaigg)dx\,dt=0\,.
\varepsilonnd{equation}
Indeed, fix $\thetaau_0\in(\thetaau,1/2)$. Then for $0<\varepsilon<(\thetaau_0-\thetaau)/2$
we have
\betaegin{multline}\label{L2009small1hjhjjhhjhhjjioiiounew}
\int_{0}^{\thetaau}\,\int_{\cup_{j=1}^{N}\{ x\in
I^{t+\varepsilon}:\,|x_j|>t\}}\varphirac{1}{\varepsilon}
F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon,t}(x)\,,\,
\psi_{\varepsilon,t}(x)\betaigg)dx\,dt\leq\\
\sigmaum\limits_{j=1}^{N}\int_{0}^{\thetaau}\,\int_{\{ x\in
I^{t+\varepsilon}:\,|x_j|>t\}}\varphirac{1}{\varepsilon}
F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon,t}(x)\,,\,
\psi_{\varepsilon,t}(x)\betaigg)dxdt\leq\\
\sigmaum\limits_{j=1}^{N}\varphirac{1}{\varepsilon}\int\limits_{-\thetaau}^{\thetaau}\Bigg\{
\int\limits_{\{x: x\in I^{|t|+\varepsilon},-\varepsilon<x_j<\varepsilon\}}F\betaigg(m_\varepsilon(x+tv_\ec
e_j)\,,\,
\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon,|t|}(x+tv_\ec e_j)\,,\,
\psi_{\varepsilon,|t|}(x+tv_\ec e_j)\betaigg)dx\Bigg\}dt\\
\leq \sigmaum\limits_{j=1}^{N}\varphirac{1}{\varepsilon}\int\limits_{-\varepsilon}^{\varepsilon}\Bigg\{
\int\limits_{I^{\thetaau+\varepsilon}}F\betaigg(m_\varepsilon(x+sv_\ec e_j)\,,\,
\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon,|x_j|}(x+sv_\ec e_j)\,,\,
\psi_{\varepsilon,|x_j|}(x+sv_\ec e_j)\betaigg)dx\Bigg\}ds\\
\leq \sigmaum\limits_{j=1}^{N}\varphirac{1}{\varepsilon}\int\limits_{-\varepsilon}^{\varepsilon}\Bigg\{
\int\limits_{I^{\thetaau_0}}F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon,|x_j-s|}(x)\,,\,
\psi_{\varepsilon,|x_j-s|}(x)\betaigg)dx\Bigg\}ds\,.
\varepsilonnd{multline}
Thus changing variables in \varepsilonr{L2009small1hjhjjhhjhhjjioiiounew}
gives
\betaegin{multline}\label{L2009small1hjhjjhhjhhjjjkljkljklnew}
\int_{0}^{\thetaau}\,\int_{\cup_{j=1}^{N}\{ x\in
I^{t+\varepsilon}:\,|x_j|>t\}}\varphirac{1}{\varepsilon}
F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon,t}(x)\,,\,
\psi_{\varepsilon,t}(x)\betaigg)dx\,dt\\
\leq\sigmaum\limits_{j=1}^{N}\int\limits_{-1}^{1}\Bigg\{
\int\limits_{I^{\thetaau_0}}F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon,|x_j-\varepsilon s|}(x)\,,\,
\psi_{\varepsilon,|x_j-\varepsilon s|}(x)\betaigg)dx\Bigg\}ds\,.
\varepsilonnd{multline}
Next,
clearly by \varepsilonr{L2009deftvu1hhhjnew} there exists a constant $C_0>0$
such that for every $j\in\{1,
\ldots, N\}$ every $s\in[0,1/2)$ and
every $\varepsilon\in(0,1)$ we have
\betaegin{equation}\label{ffyfyguihihiuiolkkkkjjjkjkjknew}
\int_{I^{\thetaau_0}}\betaigg(
\Big|\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon,s}(x)\Big|^q+
\Big|\psi_{\varepsilon,s}(x)\Big|^{q}
\betaigg)dx \leq C_0\int_{I^{\thetaau_0}}\Bigg(
\Big|\varepsilonv_\ec Q\cdot\nabla\betaar\varphi_\varepsilon(x)\Big|^q+
\betaig|\betaar\varphi_\varepsilon(x)\betaig|^{q}
\Bigg)dx.
\varepsilonnd{equation}
In particular since $0<\thetaau<\thetaau_0<1/2$ were chosen arbitrary we
deduce that for every $s\in[0,1/2)$ we have
$\,\lim_{\varepsilon\thetao 0^+}\psi_{\varepsilon,s}=0$ in $L^{q}_{loc}(I_{v_\ec
\nu},\varphiield{R}^m)$ and
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon,s}\betaig)=0$ in
$L^{q}_{loc}(I_{v_\ec \nu},\varphiield{R}^d)$.
Next, by \varepsilonr{ffyfyguihihiuiolkkkkjjjkjkjknew} we deduce
\betaegin{multline}\label{ffyfyguihihiuiolkkknew}
\lim_{\varepsilon\thetao 0^+}\int\limits_{I^{\thetaau_0}}\betaigg(
\Big|\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon,|x_j-\varepsilon s|}(x)\Big|^q+
\Big|\psi_{\varepsilon,|x_j-\varepsilon s|}(x)\Big|^{q}
\betaigg)dx=0 \;\;\thetaext{uniformly by}\;s\in(-1,1)\;\;\varphiorall j=1,\ldots
,N.
\varepsilonnd{multline}
On the other hand we have $\lim_{\varepsilon\thetao 0^+}m_\varepsilon=m_0$ in
$L^{p}(I_{\thetaau_0},\varphiield{R}^k)$, $F\betaig(m_0(x),0,0\betaig)=0$ for a.e. $x\in
I_{\thetaau_0}$ and \varepsilonr{RsTT}. Therefore, by \varepsilonr{ffyfyguihihiuiolkkknew}
for every $j=1,
\ldots, N$ we deduce
\betaegin{equation}
\label{L2009small1hjhjjhhjhhjjjkljkljklhjhjhihjnew} \lim_{\varepsilon\thetao
0^+}\int\limits_{-1}^{1}\Bigg\{
\int\limits_{I^{\thetaau_0}}F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\psi_{\varepsilon,|x_j-\varepsilon s|}(x)\,,\,
\psi_{\varepsilon,|x_j-\varepsilon s|}(x)\betaigg)dx\Bigg\}ds=0\,.
\varepsilonnd{equation}
Then plugging \varepsilonr{L2009small1hjhjjhhjhhjjjkljkljklhjhjhihjnew} into
\varepsilonr{L2009small1hjhjjhhjhhjjjkljkljklnew} we deduce
\varepsilonr{L2009small1new}.
Next let $\varepsilon_n\deltaownarrow 0$ be such that
\betaegin{multline}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbbihhbbmnew}
\lim_{n\thetao +\infty}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon_n}
F\betaigg(m_{\varepsilon_n}(x)\,,\,
\varepsilon_nv_\ec Q\cdot\nabla\varphi_{\varepsilon_n}(x)\,,\,
\varphi_{\varepsilon_n}(x)\betaigg)dx=\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec
\nu}}\varphirac{1}{\varepsilon} F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\varphi_{\varepsilon}(x)\,,\,
\varphi_{\varepsilon}(x)\betaigg)dx\,.
\varepsilonnd{multline}
Then, since \varepsilonr{L2009small1new} is valid for every $\thetaau\in(0,1/2)$,
we can pass to a subsequence, still denoted by $\varepsilon_n\deltaownarrow 0$,
so that for a.e. $t\in(0,1/2)$ we will have
\betaegin{equation}
\label{L2009small1hjkhhjhjnew}
\lim_{n\thetao+\infty}\int_{\cup_{j=1}^{N}\{ x\in
I^{t+\varepsilon_n}:\,|x_j|>t\}}\varphirac{1}{\varepsilon_n}
F\betaigg(m_{\varepsilon_n}(x)\,,\,
\varepsilon_nv_\ec Q\cdot\nabla\psi_{\varepsilon_n,t}(x)\,,\,
\psi_{\varepsilon_n,t}(x)\betaigg)dx=0\,.
\varepsilonnd{equation}
Therefore, by \varepsilonr{fughfighdfighfihklhh}, \varepsilonr{L2009eqgl1new} and
\varepsilonr{L2009small1hjkhhjhjnew}, for a.e. $t\in(0,1/2)$ we have
\betaegin{multline}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjnew}
\lim_{n\thetao +\infty}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon_n}
F\betaigg(m_{\varepsilon_n}(x)\,,\,
\varepsilon_nv_\ec Q\cdot\nabla\varphi_{\varepsilon_n}(x)\,,\,
\varphi_{\varepsilon_n}(x)\betaigg)dx \gammaeq \varlimsup_{n\thetao
+\infty}\int_{I^t}\varphirac{1}{\varepsilon_n}
F\betaigg(m_{\varepsilon_n}(x)\,,\,
\varepsilon_nv_\ec Q\cdot\nabla\varphi_{\varepsilon_n}(x)\,,\,
\varphi_{\varepsilon_n}(x)\betaigg)dx\\= \varlimsup_{n\thetao
+\infty}\int_{I^t}\varphirac{1}{\varepsilon_n}
F\betaigg(m_{\varepsilon_n}(x)\,,\,
\varepsilon_nv_\ec Q\cdot\nabla\psi_{\varepsilon_n,t}(x)\,,\,
\psi_{\varepsilon_n,t}(x)\betaigg)dx\\= \varlimsup_{n\thetao
+\infty}\int_{I^{t+\varepsilon_n}}\varphirac{1}{\varepsilon_n}
F\betaigg(m_{\varepsilon_n}(x)\,,\,
\varepsilon_nv_\ec Q\cdot\nabla\psi_{\varepsilon_n,t}(x)\,,\,
\psi_{\varepsilon_n,t}(x)\betaigg)dx\,.
\varepsilonnd{multline}
Thus, by \varepsilonr{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbbihhbbmnew},
\varepsilonr{L2009eqgl1new} and
\varepsilonr{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjnew} for a.e.
$t\in(0,1/2)$ we have
\betaegin{multline}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjhhhjhjjnew}
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\betaigg(m_\varepsilon(x),
\varepsilonv_\ec Q\cdot\nabla\varphi_{\varepsilon}(x),
\varphi_{\varepsilon}(x)\betaigg)dx\gammaeq\varlimsup_{n\thetao
+\infty}\int\limits_{I^{t+\varepsilon_n}}\varphirac{1}{\varepsilon_n}
F\betaigg(m_{\varepsilon_n}(x),
\varepsilon_nv_\ec Q\cdot\nabla\psi_{\varepsilon_n,t}(x),
\psi_{\varepsilon_n,t}(x)\betaigg)dx.
\varepsilonnd{multline}
Since the last inequality is valid for a.e. $t\in(0,1/2)$, by
diagonal arguments we deduce that there exists a new sequences
$t_n\uparrow (1/2)$ and $\varepsilon_n\deltaownarrow 0$ as $n\thetao +\infty$ such
that $\varepsilon_n+t_n<1/2$,
\betaegin{equation}\label{vjhghihikhklhjkg}
\lim_{n\thetao+\infty}\int_{I^{t_n+\varepsilon_n}}\Big|m_{\varepsilon_n}(x)-m_0(x)\Big|^pdx=0\,,
\varepsilonnd{equation}
\betaegin{equation}
\label{hdfghfighfigh} \lim_{n\thetao+\infty}\int_{I^{t_n+\varepsilon_n}}\betaigg(
\Big|\psi_{\varepsilon_n,t_n}\Big|^{q}+
\Big|\varepsilon_nv_\ec Q\cdot\nabla\psi_{\varepsilon_n,t_n}\Big|^q
\betaigg)dx=0,
\varepsilonnd{equation}
and
\betaegin{multline}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjnewgffhjhj}
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\betaigg(m_\varepsilon(x),
\varepsilonv_\ec Q\cdot\nabla\varphi_{\varepsilon}(x),
\varphi_{\varepsilon}(x)\betaigg)dx \gammaeq \lim_{n\thetao
+\infty}\int\limits_{I^{t_n+\varepsilon_n}}\varphirac{1}{\varepsilon_n}
F\betaigg(m_{\varepsilon_n}(x),
\varepsilon_nv_\ec Q\cdot\nabla\psi_{\varepsilon_n,t_n}(x),
\psi_{\varepsilon_n,t_n}(x)\betaigg)dx.
\varepsilonnd{multline}
On the other hand defining
$\betaar\psi_{n}(x):=\psi_{\varepsilon_n,t_n}\betaig((2t_n+2\varepsilon_n)x\betaig)$ we clearly
have $\psi_{\varepsilon}\in C^\infty_c(I^1,\varphiield{R}^m)$.
Moreover, changing variables of integration
$z=x/(2t_n+2\varepsilon_n)$ in
\varepsilonr{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjnewgffhjhj} we
finally deduce,
\betaegin{multline}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjnewgffhjhjglgjklg}
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\betaigg(m_\varepsilon(x)\,,\,
\varepsilonv_\ec Q\cdot\nabla\varphi_{\varepsilon}(x)\,,\,
v_\ec B\cdot\varphi_{\varepsilon}(x)\betaigg)dx\gammaeq\\
\lim_{n\thetao +\infty}\int\limits_{I^{1}}\varphirac{2(t_n+\varepsilon_n)}{\varepsilon_n}
F\betaigg(m_{\varepsilon_n}\betaig(2(t_n+\varepsilon_n)z\betaig)\,,\,
\varphirac{\varepsilon_n}{2(t_n+\varepsilon_n)}v_\ec Q\cdot\nabla\betaar\psi_{n}(z)\,,\,
\betaar\psi_{n}(z)\betaigg)dz,
\varepsilonnd{multline}
and \varepsilonr{fvyjhfyffhjfghgjkghfff}
follows. Finally, since $m_0\in L^{p}(I_{v_\ec \nu},\varphiield{R}^k)$, by
\varepsilonr{vjhghihikhklhjkg} we deduce $\,\lim_{n\thetao
+\infty}m_{(r_{\varepsilon_n}\varepsilon_n)}\betaig(r_{\varepsilon_n} x\betaig)=m_0(x)$ in
$L^{p}(I_{v_\ec \nu},\varphiield{R}^k)$, and by \varepsilonr{hdfghfighfigh} we obtain
$\,\lim_{n\thetao +\infty}\betaar\psi_{n}=0$ in $L^{q}(I_{v_\ec \nu},\varphiield{R}^m)$
and
$\lim_{n\thetao +\infty}\betaig(\varepsilon_nv_\ec Q\cdot\nabla \betaar\psi_{n}\betaig)=0$
in $L^q(I_{v_\ec \nu},\varphiield{R}^{d})$
This completes the proof.
\betaegin{comment}
Since the last inequality valid for a.e.
$t\in(0,1/2)$, using \varepsilonr{fvyjhfyffhjfgh} we deduce that there exists
a sequence $t_n\uparrow (1/2)$ as $n\thetao +\infty$ such that
\betaegin{multline}\label{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjhhhjhjjnewgkgjkgg}
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\betaigg(m_\varepsilon(x)+\varepsilon\betaig(v_\ec P\cdot\nabla \{v_\ec A\cdot \nabla
v_{\varepsilon}\}\betaig)(x)+\varepsilon\betaig(v_\ec Q\cdot\nabla\{v_\ec
B\cdot\varphi_{\varepsilon}\}\betaig)(x)+\betaig(v_\ec A\cdot\nabla
v_{\varepsilon}\betaig)(x)+\betaig(v_\ec
B\cdot\varphi_{\varepsilon}\betaig)(x)\betaigg)dx\gammaeq\\-\lim_{n\thetao+\infty}\Bigg\{\varlimsup_{\varepsilon\thetao
0^+} \int\limits_{I_{v_\ec \nu}\sigmaetminus I^{t_n}}\varphirac{1}{\varepsilon}
F\betaig(m_{\varepsilon}(x)\betaig)dx\Bigg\}+\varliminf_{n\thetao+\infty}\Bigg\{
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec\nu}}\varphirac{1}{\varepsilon}\thetaimes\\
\thetaimes F\betaigg(m_{\varepsilon}(x)+\varepsilon\betaig(v_\ec P\cdot\nabla \{v_\ec A\cdot
\nabla u_{\varepsilon,t_n}\}\betaig)(x)+\varepsilon\betaig(v_\ec Q\cdot\nabla\{v_\ec
B\cdot\psi_{\varepsilon,t_n}\}\betaig)(x)+\betaig(v_\ec A\cdot\nabla
u_{\varepsilon,t_n}\betaig)(x)+\betaig(v_\ec
B\cdot\psi_{\varepsilon,t_n}\betaig)(x)\betaigg)dx\Bigg\}\\=
0+\varliminf_{n\thetao+\infty}\Bigg\{
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec\nu}}\varphirac{1}{\varepsilon}\thetaimes\\
\thetaimes F\betaigg(m_{\varepsilon}(x)+\varepsilon\betaig(v_\ec P\cdot\nabla \{v_\ec A\cdot
\nabla u_{\varepsilon,t_n}\}\betaig)(x)+\varepsilon\betaig(v_\ec Q\cdot\nabla\{v_\ec
B\cdot\psi_{\varepsilon,t_n}\}\betaig)(x)+\betaig(v_\ec A\cdot\nabla
u_{\varepsilon,t_n}\betaig)(x)+\betaig(v_\ec
B\cdot\psi_{\varepsilon,t_n}\betaig)(x)\betaigg)dx\Bigg\}\,.
\varepsilonnd{multline}
On the other hand for every we have $\lim_{\varepsilon\thetao
0^+}u_{\varepsilon,t_n}/\varepsilon=0$ in $L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^d)$,
$\,\lim_{\varepsilon\thetao 0^+}v_\ec B\cdot\psi_{\varepsilon,t_n}=0$ in
$L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$, $\lim_{\varepsilon\thetao 0^+}v_\ec A\cdot
\nabla u_{\varepsilon,t_n}=0$ in $L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilonv_\ec P\cdot\nabla\{v_\ec A\cdot\nabla
u_{\varepsilon,t_n}\}\betaig)=0$ in $L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{k})$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilonv_\ec Q\cdot\nabla\{v_\ec B\cdot
\psi_{\varepsilon,t_n}\}\betaig)=0$ and $\lim_{\varepsilon\thetao 0^+}\|v_\ec P\|\,\nabla
u_{\varepsilon,t_n}=0$ in $L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes N})$.
Therefore, taking the appropriate diagonal subsequences of
$\betaig\{\psi_{\varepsilon,t_n}\betaig\}_{\varepsilon\deltaownarrow 0,n\uparrow+\infty}$ and
$\betaig\{u_{\varepsilon,t_n}\betaig\}_{\varepsilon\deltaownarrow 0,n\uparrow+\infty}$ and using
\varepsilonr{ggfghjjhfhfjfhjhjhjhjfjkdghfdnmbguiyuihyuyhukjhhhjhjjnewgkgjkgg}
gives the desired result.
\varepsilonnd{comment}
\varepsilonnd{proof}
By the same method we can prove the following more general result.
\betaegin{lemma}\label{L2009.02newgen}
Let $n\gammaeq 1$ be a natural number and
let $F\in C^0(\varphiield{R}^{k}\thetaimes\varphiield{R}^d\thetaimes\varphiield{R}^{m\thetaimes
N^{n-1}}\thetaimes\ldots\thetaimes \varphiield{R}^{m\thetaimes N}\thetaimes\varphiield{R}^m,\varphiield{R})$ be such
that $F\gammaeq 0$ and there exist $C>0$, $q\gammaeq 1$ and
$p=(p_1,p_2,\ldots, p_k)\in\varphiield{R}^k$ such that $p_j\gammaeq 1$ for every $j$
and
\betaegin{multline*}
0\leq F\Big(a,b,c_1,\ldots,c_{n-1},d\Big)\leq
C\Big(|a|^{p}+|b|^q+\sigmaum_{j=1}^{n-1}|c_j|^q+|d|^q+1\Big)\\ \varphiorall\,
(a,b,c_1,\ldots,c_{n-1},d)\in\varphiield{R}^{k}\thetaimes\varphiield{R}^d\thetaimes\varphiield{R}^{m\thetaimes
N^{n-1}}\thetaimes\ldots\thetaimes \varphiield{R}^{m\thetaimes N}\thetaimes\varphiield{R}^m
\varepsilonnd{multline*}
(see Definition \ref{gdhgvdgjkdfgjkhdd}).
Next let
$v_\ec Q_{n}\in\mathcal{L}(\varphiield{R}^{m\thetaimes N^{n}},\varphiield{R}^d)$ be a linear
operator, $v_\ec\nu\in S^{N-1}$ and let
$\betaig\{m_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$
and $m_0(x)\in L^{p}(I_{v_\ec \nu},\varphiield{R}^k)$ be such that
$F\betaig(m_0(x),0,0,\ldots,0\betaig)= 0$ for a.e. $x\in I_{v_\ec \nu}$ and
$\,\lim_{\varepsilon\thetao 0^+}m_\varepsilon=m_0$ in $L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$,
where, as before, $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot
v_\ec\nu_j|<1/2\;\;\;\varphiorall j=1,
\ldots, N\}$ and
$\{v_\ec\nu_1,
\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$, such that $v_\ec\nu_1:=v_\ec \nu$.
Furthermore, let
$\betaig\{\varphi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset L^{q}_{loc}(I_{v_\ec
\nu},\varphiield{R}^m)$
be such that
$\,\lim_{\varepsilon\thetao 0^+}\varphi_\varepsilon=0$ in $L^{q}_{loc}(I_{v_\ec
\nu},\varphiield{R}^m)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n}v_\ec Q_{n}\cdot\nabla^{n}
\varphi_\varepsilon\betaig)=0$ in $L^q_{loc}(I_{v_\ec \nu},\varphiield{R}^{d})$,
and we have
$\,\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{j}\nabla^{j}\varphi_\varepsilon\betaig)=0$ in
$L^{q}_{loc}(I_{v_\ec \nu},\varphiield{R}^{m\thetaimes N^{j}})$ for every
$j\in\{1,\ldots,n-1\}$.
Then there exist $\betaig\{r_\varepsilon\betaig\}_{0<\varepsilon<1}\sigmaubset(0,1)$ and
$\betaig\{\psi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset C^{\infty}_{c}(I_{v_\ec
\nu},\varphiield{R}^m)$
such that $\lim_{\varepsilon\thetao
0^+}r_\varepsilon=1$,
$\,\lim_{\varepsilon\thetao 0^+}\psi_\varepsilon=0$ in $L^{q}(I_{v_\ec \nu},\varphiield{R}^m)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n}v_\ec Q_{n}\cdot\nabla^{n}
\psi_\varepsilon\betaig)=0$ in $L^q(I_{v_\ec \nu},\varphiield{R}^{d})$,
for every $j=1,\ldots,(n-1)$ we have $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{j}\nabla^{j}\psi_\varepsilon\betaig)=0$ in $L^{q}(I_{v_\ec
\nu},\varphiield{R}^{m\thetaimes N^{j}})$
$\,\lim_{\varepsilon\thetao 0^+}m_{(r_\varepsilon\varepsilon)}\betaig(r_\varepsilon
x\betaig)=m_0(x)$ in $L^{p}(I_{v_\ec \nu},\varphiield{R}^k)$
and
\betaegin{multline}\label{fvyjhfyffhjfghgjkghfffgen}
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(m_\varepsilon(x)\,,\,\varepsilon^nv_\ec Q_n\cdot\nabla^n\varphi_{\varepsilon}(x)\,,\,
\varepsilon^{n-1}\nabla^{n-1}\varphi_{\varepsilon}(x)\,,\,\ldots\,,\,
\varepsilon\nabla\varphi_{\varepsilon}(x)\,,\,
\varphi_{\varepsilon}(x)
\Bigg)dx\gammaeq\\
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec
\nu}}\varphirac{1}{\varepsilon}F\Bigg(m_{r_\varepsilon\varepsilon}\betaig(r_\varepsilon x\betaig)\,,\,\varepsilon^nv_\ec
Q_n\cdot\nabla^n\psi_{\varepsilon}(x)\,,\,
\varepsilon^{n-1}\nabla^{n-1}\psi_{\varepsilon}(x)\,,\,\ldots\,,\,
\varepsilon\nabla\psi_{\varepsilon}(x)\,,\,
\psi_{\varepsilon}(x)
\Bigg)dx\,.
\varepsilonnd{multline}
\varepsilonnd{lemma}
As a consequence of Lemma \ref{L2009.02newgen} we have the following
Proposition.
\betaegin{proposition}\label{L2009.02kkknew}
Let $n_1,n_2\in \mathbb{N}$. Consider the linear operators
$v_\ec B\in\mathcal{L}(\varphiield{R}^{d\thetaimes N},\varphiield{R}^m)$
and $v_\ec Q\in\mathcal{L}(\varphiield{R}^{k\thetaimes N^{n_2}},\varphiield{R}^{l})$, and
let $F\in C^0\betaig(\{\varphiield{R}^{m\thetaimes
N^{n_1}}\thetaimes\ldots\thetaimes\varphiield{R}^{m\thetaimes N}\thetaimes\varphiield{R}^{m}\}\thetaimes
\{\varphiield{R}^{l}\thetaimes\varphiield{R}^{k\thetaimes N^{n_2-1}}\thetaimes\ldots\thetaimes\varphiield{R}^{k\thetaimes
N}\thetaimes\varphiield{R}^k\}\,,\varphiield{R}\betaig)$ be such that $F\gammaeq 0$ and there exist
$C>0$ and $p_1\gammaeq 1$, $p_2\gammaeq 1$ satisfying
\betaegin{multline*}
0\leq F\Big(\{a_1,\ldots, a_{n_1+1}\},\{b_1,\ldots,b_{n_2+1}\}
\Big)\leq
C\betaigg(\sigmaum_{j=1}^{n_1+1}|a_j|^{p_1}+\sigmaum_{j=1}^{n_2+1}|b_j|^{p_2}
+1\betaigg)\\ \thetaext{for every}\;\; \Big(\{a_1,\ldots,
a_{n_1+1}\},\{b_1,\ldots,b_{n_2+1}\}
\Big)
.
\varepsilonnd{multline*}
Furthermore, let
$v_\ec\nu\in S^{N-1}$, $\varphi^+,\varphi^-\in\varphiield{R}^k$
and $W^+,W^-\in \varphiield{R}^{m}$ be such that if we set
\betaegin{equation}\label{vfyguiguhikjnklklhkukuytou}
\varphi_0(x): =\betaegin{cases}
\varphi^+\;\;\thetaext{if}\;\;x\cdotv_\ec\nu>0,\\
\varphi^-\;\;\thetaext{if}\;\;x\cdotv_\ec\nu<0,
\varepsilonnd{cases}
\;\thetaext{and}\quad\; W_0(x): =\betaegin{cases}
W^+\;\;\thetaext{if}\;\;x\cdotv_\ec\nu>0,\\
W^-\;\;\thetaext{if}\;\;x\cdotv_\ec\nu<0,
\varepsilonnd{cases}
\varepsilonnd{equation}
then
\betaegin{equation}\label{vfyguiguhikjnklklhbjkbbjk}
F\Big(\betaig\{0,0,\ldots,W_0(x)\betaig\},
\betaig\{0,0,\ldots,\varphi_0(x)\betaig\}\Big)=0\quad\thetaext{for
a.e.}\;x\in\varphiield{R}^N\,.
\varepsilonnd{equation}
Next
let $\betaig\{w_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset L^{p_1}_{loc}(I_{v_\ec
\nu},\varphiield{R}^d)$
and
$\betaig\{\varphi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
W^{n_2,p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$ be such that $\{v_\ec B\cdot
\nabla w_\varepsilon\}\in W^{n_1,p_1}_{loc}(I_{v_\ec \nu},\varphiield{R}^m)$, $\lim_{\varepsilon\thetao
0^+}\varphi_\varepsilon=\varphi_0$ in $L^{p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$,
$\lim_{\varepsilon\thetao 0^+}\{v_\ec B\cdot \nabla w_\varepsilon\}=W_0$ in
$L^{p_1}_{loc}(I_{v_\ec \nu},\varphiield{R}^m)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n_2}\,v_\ec Q\cdot\{\nabla^{n_2}
\varphi_\varepsilon\}\betaig)=0$ in $L^{p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^{l})$
and for every $j=1,2,\ldots, (n_2-1)$ we have
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^j\,\nabla^j \varphi_\varepsilon\betaig)=0$ in $L^{
p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N^j})$.
Here, as before, $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot
v_\ec\nu_j|<1/2\;\;\;\varphiorall j=1,\ldots, N\}$ where
$\{v_\ec\nu_1,\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an orthonormal base
in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec \nu$.
\betaegin{comment}
Finally assume that
there exists a
families $\betaig\{h_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset C^{n+1}(\varphiield{R}^N,\varphiield{R}^k)$
and $\betaig\{\gammaamma_\varepsilon(t)\betaig\}_{0<\varepsilon<1}\sigmaubset
C^{n+1}\betaig((-1/2,1/2),\varphiield{R}^k\betaig)$, such that $v_\ec A\cdot\nabla
h_\varepsilon(x)\varepsilonquiv z_\varepsilon(v_\ec\nu\cdot x)$ for some function $z_\varepsilon$ (i.e.
$v_\ec A\cdot\nabla h_0(x)$ depends actually only on the first real
variable in the base $\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}$),
$v_\ec A\cdot\nabla h_\varepsilon(x)=V_0(x)$ if $|v_\ec\nu\cdot x|>c_0$, where
$0<c_0<1/2$ is a constant, $\lim_{\varepsilon\thetao 0^+}\{v_\ec A\cdot \nabla
h_\varepsilon\}=V_0$ in $L^{p_2}(I_{v_\ec \nu},\varphiield{R}^d)$, $\lim_{\varepsilon\thetao
0^+}\betaig\{v_\ec A\cdot \nabla_x \gammaamma_\varepsilon(v_\ec\nu\cdot x)\betaig\}=0$ in
$L^{p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^d)$, for every $j=1,2,\ldots, n_2$ we
have $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{j}\,v_\ec
P_{j}\cdot\nabla^{j}\betaig\{v_\ec A\cdot\nabla h_\varepsilon\betaig\}\betaig)=0$ in
$L^{p_2}(I_{v_\ec \nu},\varphiield{R}^{d_{j}})$, for every $j=1,2,\ldots, n_2$ we
have $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{j}\,v_\ec
P_{j}\cdot\nabla^{j}\betaig\{v_\ec A\cdot\nabla_x \gammaamma_\varepsilon(v_\ec\nu\cdot
x)\betaig\}\betaig)=0$ in $L^{p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^{d_{j}})$,
for every $j=1,2,\ldots, n_2$ we have $\lim_{\varepsilon\thetao
0^+}\betaig(\sigmaum_{s=j}^{n_2}\|v_\ec P_s\|\betaig)\varepsilon^{j-1}\nabla^j_x
\betaig(v_\varepsilon(x)-h_\varepsilon(x)-\gammaamma_\varepsilon(v_\ec\nu\cdot x)\betaig)=0$ in $L^{
p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N^j})$ and
\betaegin{equation}\label{hguiguiguyhuohyiojkhh}
\lim_{\varepsilon\thetao
0^+}\varphirac{1}{\varepsilon}\Big(v_\varepsilon(x)-h_\varepsilon(x)-\gammaamma_\varepsilon(v_\ec\nu\cdot
x)\Big)=0\quad\thetaext{in}\;\;L^{p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^k).
\varepsilonnd{equation}
\varepsilonnd{comment}
Then, there exist $\betaig\{\psi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
\mathcal{S}^{(n_2)}\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig)$
and
$\betaig\{f_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
L^{p_1}(I_{v_\ec \nu},\varphiield{R}^d)$,
where
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddhdkgkkknew}
\mathcal{S}^{(n)}\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig):=
\betaigg\{z_\eta\in
C^n(\varphiield{R}^N,\varphiield{R}^k):\;z_\eta(y)=\varphi_0(y)\;\,\thetaext{if}\;\,|y\cdotv_\ec\nu|\gammaeq
1/2,\;\,\thetaext{and}\;\,z_\eta\betaig(y+v_\ec \nu_j\betaig)=z_\eta(y)\;\varphiorall
j=2,\ldots, N\betaigg\},
\varepsilonnd{multline}
such that $\{v_\ec B\cdot \nabla f_\varepsilon\}\in W^{n_1,p_1}(I_{v_\ec
\nu},\varphiield{R}^m)$, $\,\lim_{\varepsilon\thetao 0^+}\psi_\varepsilon=\varphi_0$ in
$L^{p_2}(I_{v_\ec \nu},\varphiield{R}^k)$,
$\lim_{\varepsilon\thetao 0^+}\{v_\ec B\cdot \nabla f_\varepsilon\}=W_0$ in
$L^{p_1}(I_{v_\ec \nu},\varphiield{R}^m)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n_2}\,v_\ec Q\cdot\{\nabla^{n_2}
\psi_\varepsilon\}\betaig)=0$ in $L^{p_2}(I_{v_\ec \nu},\varphiield{R}^{l})$,
for every $j=1,
\ldots, (n_2-1)$ we have $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^j\,\nabla^j \psi_\varepsilon\betaig)=0$ in $L^{p_2}(I_{v_\ec
\nu},\varphiield{R}^{k\thetaimes N^j})$,
and
\betaegin{multline}\label{ggfghjjhfhfjfhjkkknew}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^{n_1}\nabla^{n_1} \{v_\ec B\cdot\nabla
w_{\varepsilon}\},\ldots,\varepsilon\nabla\{v_\ec B\cdot\nabla w_{\varepsilon}\},\{v_\ec
B\cdot\nabla w_{\varepsilon}\}\Big\},
\Big\{\varepsilon^{n_2}v_\ec Q\cdot\nabla^{n_2}
\varphi_{\varepsilon},\varepsilon^{n_2-1}\nabla^{n_2-1}
\varphi_{\varepsilon},\ldots,\varphi_{\varepsilon}\Big\}\Bigg)dx \gammaeq\\ \varliminf_{\varepsilon\thetao
0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^{n_1}\nabla^{n_1} \{v_\ec B\cdot\nabla
f_{\varepsilon}\},\ldots,\varepsilon\nabla\{v_\ec B\cdot\nabla f_{\varepsilon}\},\{v_\ec
B\cdot\nabla f_{\varepsilon}\}\Big\},
\Big\{\varepsilon^{n_2}v_\ec Q\cdot\nabla^{n_2}
\psi_{\varepsilon},\varepsilon^{n_2-1}\nabla^{n_2-1}
\psi_{\varepsilon},\ldots,\psi_{\varepsilon}\Big\}\Bigg)dx.
\varepsilonnd{multline}
\varepsilonnd{proposition}
\betaegin{proof}
\betaegin{comment}
Consider an arbitrary function
$\lambda(x)\in\mathcal{S}^{(n)}_2\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig)$
and set $\lambda_\varepsilon(x):=\lambda\betaig(x/(s_0\varepsilon)\betaig)$. Then clearly
$\lambda_\varepsilon(x)\thetao \varphi_0(x)$ in $L^p_{loc}(\varphiield{R}^N,\varphiield{R}^m)$. Moreover
clearly $\varepsilon^j\nabla^j\lambda_\varepsilon(x)\thetao 0$ as $\varepsilon\thetao 0^+$ in
$L^p_{loc}(\varphiield{R}^N,\varphiield{R}^{m\thetaimes N^j})$ for every $j\in\{1,2,\ldots,n\}$
and $\varepsilon^j\nabla^j\lambda_\varepsilon(x)$ is bounded in $L^\infty$ for every
$j\in\{0,1,\ldots,n\}$. Next define $m_\varepsilon(x):=s_0\varepsilon
h_0\betaig(x/(s_0\varepsilon)\betaig)+c_\varepsilon$. Then $l_\varepsilon(x):=v_\ec A\cdot \nabla
m_\varepsilon(x)=v_\ec A\cdot \nabla h_0\betaig(x/(s_0\varepsilon)\betaig)$. Thus $v_\ec
A\cdot \nabla m_\varepsilon(x)\thetao V_0(x)$ in $L^p_{loc}(\varphiield{R}^N,\varphiield{R}^d)$. Moreover
clearly $\varepsilon^j\nabla^j \{v_\ec A\cdot\nabla m_\varepsilon\}(x)\thetao 0$ as $\varepsilon\thetao
0^+$ in $L^p_{loc}(\varphiield{R}^N,\varphiield{R}^{d\thetaimes N^j})$ for every
$j\in\{1,2,\ldots,n\}$ and $\varepsilon^j\nabla^j \{v_\ec A\cdot\nabla
m_\varepsilon\}(x)$ is bounded in $L^\infty$ for every
$j\in\{0,1,\ldots,n\}$. Moreover, we clearly have
$\lambda_\varepsilon(x)\in\mathcal{S}^{(n)}_2\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig)$
and $m_\varepsilon(x)\in\mathcal{S}^{(n)}_1\betaig(V^+,V^-,I_{v_\ec\nu}\betaig)$ for
$\varepsilon>0$ sufficiently small.
\varepsilonnd{comment}
Consider a function
$\lambda(x)\in\mathcal{S}^{(n_2)}\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig)$,
such that $\lambda(x)\varepsilonquiv l_0(v_\ec\nu\cdot x)$ for some function
$l_0$ (i.e. $\lambda(x)$ depends actually only on the first real
variable in the base $\{v_\ec\nu_1,\ldots,v_\ec\nu_N\}$), and set
$\lambda_\varepsilon(x):=\lambda\betaig(x/\varepsilon\betaig)$. Then clearly
$\lambda_\varepsilon(x)\thetao \varphi_0(x)$ in $L^{p_2}_{loc}(\varphiield{R}^N,\varphiield{R}^k)$.
Moreover clearly $\varepsilon^j\nabla^j\lambda_\varepsilon(x)\thetao 0$ as $\varepsilon\thetao 0^+$ in
$L^{p_2}_{loc}(\varphiield{R}^N,\varphiield{R}^{k\thetaimes N^j})$ for every
$j\in\{1,\ldots,n_2\}$ and $\varepsilon^j\nabla^j\lambda_\varepsilon(x)$ is bounded in
$L^\infty$ for every $j\in\{0,1,\ldots,n_2\}$. Next define
$\thetaheta_\varepsilon(x):=\varphi_\varepsilon(x)-\lambda_\varepsilon(x)$
Then
clearly
$\,\lim_{\varepsilon\thetao 0^+}\thetaheta_\varepsilon=0$ in $L^{p_2}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n_2}\,v_\ec Q\cdot\{\nabla^{n_2}
\thetaheta_\varepsilon\}\betaig)=0$ in $L^{p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^{l})$,
and for every $j=1,\ldots, (n_2-1)$ we have $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^j\,\nabla^j \thetaheta_\varepsilon\betaig)=0$ in $L^{p_2}_{loc}(I_{v_\ec
\nu},\varphiield{R}^{k\thetaimes N^j})$.
Then by Lemma \ref{L2009.02newgen} there exist
$\betaig\{r_\varepsilon\betaig\}_{0<\varepsilon<1}\sigmaubset(0,1)$ and $\betaar\thetaheta_\varepsilon(x)\in
C^\infty_c\betaig(I_{v_\ec\nu},\varphiield{R}^m\betaig)$,
such that $\lim_{\varepsilon\thetao
0^+}r_\varepsilon=1$,
$\,\lim_{\varepsilon\thetao 0^+}\betaar\thetaheta_\varepsilon=0$ in $L^{p_2}(I_{v_\ec \nu},\varphiield{R}^k)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n_2}\,v_\ec Q\cdot\{\nabla^{n_2}
\betaar\thetaheta_\varepsilon\}\betaig)=0$ in $L^{p_2}(I_{v_\ec \nu},\varphiield{R}^{l})$,
for every $j=1,\ldots, (n_2-1)$ we have $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^j\,\nabla^j \betaar\thetaheta_\varepsilon\betaig)=0$ in $L^{p_2}(I_{v_\ec
\nu},\varphiield{R}^{k\thetaimes N^j})$,
and
\betaegin{multline}\label{ggfghjjhfhfjfhjkkknewggkgkggghjjjh}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^{n_1}\nabla^{n_1} \{v_\ec B\cdot\nabla
w_{\varepsilon}\},\ldots,\varepsilon\nabla\{v_\ec B\cdot\nabla w_{\varepsilon}\},\{v_\ec
B\cdot\nabla w_{\varepsilon}\}\Big\},
\Big\{\varepsilon^{n_2}v_\ec Q\cdot\nabla^{n_2}
\varphi_{\varepsilon},\varepsilon^{n_2-1}\nabla^{n_2-1}
\varphi_{\varepsilon},\ldots,\varphi_{\varepsilon}\Big\}\Bigg)dx=\\
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^{n_1}\nabla^{n_1} \{v_\ec B\cdot\nabla
w_{\varepsilon}\},\ldots,\varepsilon\nabla\{v_\ec B\cdot\nabla
w_{\varepsilon}\},\{v_\ec B\cdot\nabla w_{\varepsilon}\}\Big\}\,,\\
\Big\{\varepsilon^{n_2}v_\ec Q\cdot\nabla^{n_2}
\betaig(\lambda_{\varepsilon}+\thetaheta_\varepsilon\betaig),\varepsilon^{n_2-1}\nabla^{n_2-1}
\betaig(\lambda_{\varepsilon}+\thetaheta_\varepsilon\betaig),\ldots,\betaig(\lambda_{\varepsilon}+\thetaheta_\varepsilon\betaig)\Big\}\Bigg)dx\gammaeq
\\
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\betaigg\{\varepsilon^{n_1}\nabla^{n_1} \{v_\ec B\cdot\nabla
f_{\varepsilon}\},\ldots,\varepsilon\nabla\{v_\ec B\cdot\nabla
f_{\varepsilon}\},\{v_\ec B\cdot\nabla f_{\varepsilon}\}\betaigg\}\,,\\
\betaigg\{\varepsilon^{n_2}v_\ec Q\cdot\nabla^{n_2}
\betaig(\betaar\lambda_{\varepsilon}+\betaar\thetaheta_\varepsilon\betaig),\varepsilon^{n_2-1}\nabla^{n_2-1}
\betaig(\betaar\lambda_{\varepsilon}+\betaar\thetaheta_\varepsilon\betaig),\ldots,\betaig(\betaar\lambda_{\varepsilon}+\betaar\thetaheta_\varepsilon\betaig)\betaigg\}\Bigg)dx,
\varepsilonnd{multline}
where $f_\varepsilon(x):=w_{r_\varepsilon\varepsilon}(r_\varepsilon x)/r_\varepsilon$
and $\betaar\lambda_\varepsilon(x):=\lambda_{r_\varepsilon\varepsilon}(r_\varepsilon x)$. Moreover, by the
same Lemma, $\lim_{\varepsilon\thetao 0^+}\{v_\ec B\cdot \nabla f_\varepsilon\}=W_0$ in
$L^{p_1}(I_{v_\ec \nu},\varphiield{R}^m)$.
On
the other hand clearly
$\betaar\lambda_\varepsilon(x)\in\mathcal{S}^{(n_2)}\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig)$
for
$\varepsilon>0$ sufficiently small.
Thus, since $\betaar\thetaheta_\varepsilon(x)\in
C^\infty_c\betaig(I_{v_\ec\nu},\varphiield{R}^m\betaig)$,
we have
$\psi_\varepsilon(x)\in\mathcal{S}^{(n_2)}\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig)$
where $\psi_\varepsilon(x)\varepsilonquiv \betaar\lambda_{\varepsilon}(x)+\betaar\thetaheta_\varepsilon(x)$ for
every $x\in I_{v_\ec\nu}$.
So by \varepsilonr{ggfghjjhfhfjfhjkkknewggkgkggghjjjh} we deduce
\varepsilonr{ggfghjjhfhfjfhjkkknew}. On the other hand since $r_\varepsilon\thetao 1^-$ we
easily obtain $\lim_{\varepsilon\thetao 0^+}\psi_\varepsilon=\varphi_0$ in
$L^{p_2}(I_{v_\ec \nu},\varphiield{R}^k)$,
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n_2}\,v_\ec Q\cdot\{\nabla^{n_2}
\psi_\varepsilon\}\betaig)=0$ in $L^{p_2}(I_{v_\ec \nu},\varphiield{R}^{l})$,
and for every $j=1,\ldots, (n_2-1)$ we have $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^j\,\nabla^j \psi_\varepsilon\betaig)=0$ in $L^{p_3}(I_{v_\ec
\nu},\varphiield{R}^{k\thetaimes N^j})$.
This completes the proof.
\varepsilonnd{proof}
Next we have the following simple Lemma.
\betaegin{lemma}\label{nfjghfighfihjtfohjt}
Let $n_0\in\mathbb{N}$ and $\betaig\{\varphi_\varepsilon(x)\betaig\}_{\varepsilon>0}\sigmaubset
W^{n_0,p}_{loc}\betaig(I_{v_\ec\nu},\varphiield{R}^m\betaig)$ be such that
$\varepsilon^{n_0}\nabla^{n_0} \varphi_\varepsilon\thetao 0$ in
$L^p_{loc}\betaig(I_{v_\ec\nu},\varphiield{R}^{m\thetaimes N^{n_0}}\betaig)$ and the
sequence $\betaig\{\varphi_\varepsilon(x)\betaig\}_{\varepsilon>0}$ is bounded in
$L^p_{loc}\betaig(I_{v_\ec\nu},\varphiield{R}^{m}\betaig)$ i.e. for every compactly
embedded open set $G\sigmaubset\sigmaubset I_{v_\ec\nu}$ there exists a
constant $\betaar C:=\betaar C(G)>0$ such that $\int_{G}|\varphi_\varepsilon|^p
dx\leq \betaar C$. Then for every $j\in\{1,
\ldots,n_0\}$ we have
$\varepsilon^{j}\nabla^{j} \varphi_\varepsilon\thetao 0$ in
$L^p_{loc}\betaig(I_{v_\ec\nu},\varphiield{R}^{m\thetaimes N^{j}}\betaig)$.
\varepsilonnd{lemma}
\betaegin{proof}
Indeed fix an arbitrary domain $U\sigmaubset\sigmaubset I_{v_\ec \nu}$ with a
smooth boundary. Then clearly
\betaegin{equation}\label{gfjfhjfgjhfhkdrydsgbnvfjggyhggghfgfgdfdddrrdnewllkk}
d_e:=
\betaig\|\varepsilon^{n_0}\,\nabla^{n_0} \varphi_\varepsilon\betaig\|_{L^p(U)} \thetao
0\quad\thetaext{as}\;\;\varepsilon\thetao 0^+\,.
\varepsilonnd{equation}
Moreover, clearly there exists $\betaar C>0$ such that
$\int_{U}|\varphi_\varepsilon|^p dx\leq \betaar C$. On the other hand, by
Theorem 7.28 in \cite{gt} there exists $C_0>0$, which depends only
on $U$ $p$ and $n_0$, such that for every $\sigmaigma(x)\in
W^{n_0,p}(U,\varphiield{R}^m)$ and every $\thetaau>0$ we have
\betaegin{equation}\label{gfjfhjfgjhfhkdrydsgnewllkk}
\betaig\|\nabla^j\sigmaigma(x)\betaig\|_{L^p(U)}\leq \thetaau
\betaig\|\sigmaigma(x)\betaig\|_{W^{n,p}(U)}+C_0\thetaau^{-j/(n-j)}\betaig\|\sigmaigma(x)\betaig\|_{L^p(U)}\quad\quad\varphiorall\;
2\leq n\leq n_0\,,\;\;1\leq j<n\,.
\varepsilonnd{equation}
Thus in particular we deduce from \varepsilonr{gfjfhjfgjhfhkdrydsgnewllkk}
that there exists $C_1>0$, which depends only on $U$ $p$ and $n_0$,
such that for every $\sigmaigma(x)\in W^{n_0,p}(U,\varphiield{R}^m)$ and every
$\thetaau\in (0,1)$ we have
\betaegin{equation}\label{gfjfhjfgjhfhkdrydsghgghgfgffgfggnewllkk}
\betaig\|\thetaau^j\nabla^j\sigmaigma(x)\betaig\|_{L^p(U)}\leq
\betaig\|\thetaau^{n_0}\nabla^{n_0}\sigmaigma(x)\betaig\|_{L^p(U)}+C_1\betaig\|\sigmaigma(x)\betaig\|_{L^p(U)}\quad\quad\varphiorall\;
1\leq j<n_0\,.
\varepsilonnd{equation}
Then setting $\thetaau:=\varepsilon\cdot(d_\varepsilon)^{-1/n_0}$, where $d_\varepsilon$ is defined
by \varepsilonr{gfjfhjfgjhfhkdrydsgbnvfjggyhggghfgfgdfdddrrdnewllkk}, using
\varepsilonr{gfjfhjfgjhfhkdrydsghgghgfgffgfggnewllkk} and the fact that
$\int_{U}|\varphi_\varepsilon|^pdx\leq \betaar C$ we obtain
\betaegin{equation}\label{gfjfhjfgjhfhkdrydsghgghgfgffgfggjhhgkhhhlllhhljjggjkgkjknewllkk}
\betaig\|\varepsilon^j\nabla^j\varphi_\varepsilon(x)\betaig\|_{L^p(U)}\leq \hat C
d_\varepsilon^{j/n_0}\quad\quad\varphiorall\; 1\leq j<n_0\,,
\varepsilonnd{equation}
where $\hat C>0$ dose not depend on $\varepsilon$. Thus using
\varepsilonr{gfjfhjfgjhfhkdrydsgbnvfjggyhggghfgfgdfdddrrdnewllkk} we deduce
\betaegin{equation*}
\betaig\|\varepsilon^j\nabla^j\varphi_\varepsilon(x)\betaig\|_{L^p(U)}\thetao
0\quad\thetaext{as}\;\;\varepsilon\thetao 0^+\quad\varphiorall\; 1\leq j<n_0\,,
\varepsilonnd{equation*}
Therefore, since the domain with a smooth boundary $U\sigmaubset\sigmaubset
I_{v_\ec \nu}$ was chosen arbitrary, we finally deduce that
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{j}\,\nabla^{j} \varphi_\varepsilon\betaig)=0$ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{m\thetaimes N^{j}})$ for every
$j\in\{1,
\ldots,n_0\}$. This completes the proof.
\varepsilonnd{proof}
Plugging Lemma \ref{nfjghfighfihjtfohjt} with the particular case
of Proposition \ref{L2009.02kkknew} we get the following Theorem.
\betaegin{theorem}\label{L2009.02kkkjkhkjhnew}
Let $n_1,n_2\in \mathbb{N}$. Consider the linear operator
$v_\ec Q\in\mathcal{L}(\varphiield{R}^{m\thetaimes N^{n_2}},\varphiield{R}^{l})$, which satisfies
\betaegin{itemize}
\item
either $v_\ec Q=Id$
\item
or $n_2=1$.
\varepsilonnd{itemize}
Next
assume that $F$ is a continuous function, defined on
$$\Big\{\varphiield{R}^{k\thetaimes N^{n_1+1}}\thetaimes\ldots\thetaimes\varphiield{R}^{k\thetaimes
N^2}\thetaimes\varphiield{R}^{k\thetaimes N}\Big\}\thetaimes\Big\{\varphiield{R}^{d\thetaimes
N^{n_1+1}}\thetaimes\ldots\thetaimes\varphiield{R}^{d\thetaimes N^2}\thetaimes\varphiield{R}^{d\thetaimes
N}\Big\}\thetaimes
\Big\{\varphiield{R}^{l}\thetaimes\varphiield{R}^{m\thetaimes
N^{n_2-1}}\thetaimes\ldots\thetaimes\varphiield{R}^{m\thetaimes N}\thetaimes\varphiield{R}^m\Big\},$$ taking
values in $\varphiield{R}$ and satisfying $F\gammaeq 0$. Moreover assume that there
exist $C>0$ and $p_1\gammaeq 1$, $p_2\gammaeq 1$ satisfying
\betaegin{multline}\label{hgdfvdhvdhfvnew}
\varphirac{|c_1|^{p_2}}{C}\leq F\Big(\{a_1,\ldots,
a_{n_1+1}\},\{b_1,\ldots, b_{n_1+1}\},\{c_1,\ldots,c_{n_2+1}\}
\Big)\leq
C\betaigg(\sigmaum_{j=1}^{n_1+1}|a_j|^{p_1}+\sigmaum_{j=1}^{n_1+1}|b_j|^{p_1}+\sigmaum_{j=1}^{n_2+1}|c_j|^{p_2}
+1\betaigg)\\ \thetaext{for every}\;\; \Big(\{a_1,\ldots,
a_{n_1+1}\},\{b_1,\ldots,b_{n_1+1}\},\{c_1,\ldots,c_{n_2+1}\}
\Big).
\varepsilonnd{multline}
Furthermore let
$v_\ec\nu\in S^{N-1}$, $\varphi^+,\varphi^-\in\varphiield{R}^m$,
$V^+,V^-\in\varphiield{R}^{k\thetaimes N}$ and $W^+,W^-\in \varphiield{R}^{d\thetaimes N}$ be such
that if we set
\betaegin{equation*}
\varphi_0(x): =\betaegin{cases}
\varphi^+\;\;\thetaext{if}\;\;x\cdotv_\ec\nu>0,\\
\varphi^-\;\;\thetaext{if}\;\;x\cdotv_\ec\nu<0,
\varepsilonnd{cases}
\; V_0(x): =\betaegin{cases}
V^+\;\;\thetaext{if}\;\;x\cdotv_\ec\nu>0,\\
V^-\;\;\thetaext{if}\;\;x\cdotv_\ec\nu<0,
\varepsilonnd{cases}
\;\thetaext{and}\quad\; W_0(x): =\betaegin{cases}
W^+\;\;\thetaext{if}\;\;x\cdotv_\ec\nu>0,\\
W^-\;\;\thetaext{if}\;\;x\cdotv_\ec\nu<0,
\varepsilonnd{cases}
\varepsilonnd{equation*}
then
\betaegin{equation*}
F\Big(\betaig\{0,0,\ldots,V_0(x)\betaig\}, \betaig\{0,0,\ldots,W_0(x)\betaig\},
\betaig\{0,0,\ldots,\varphi_0(x)\betaig\}\Big)=0\quad\thetaext{for
a.e.}\;x\in\varphiield{R}^N\,.
\varepsilonnd{equation*}
Next
consider $\betaig\{v_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
W^{n_1+1,p_1}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$,
$\betaig\{m_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
W^{n_1,p_1}_{loc}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes N})$
and $\betaig\{\varphi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
W^{n_2,p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^m)$, such that $\,\Deltaiv m_\varepsilon\varepsilonquiv
0$, $\,\lim_{\varepsilon\thetao 0^+}\varphi_\varepsilon=\varphi_0$ in
$L^{p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$, $\,\lim_{\varepsilon\thetao 0^+}m_\varepsilon=W_0$ in
$L^{p_1}_{loc}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes N})$ and $\lim_{\varepsilon\thetao
0^+}\nabla v_\varepsilon=V_0$ in $W^{1,p_1}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$.
Here, as before, $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot
v_\ec\nu_j|<1/2\;\;\;\varphiorall j=1,\ldots, N\}$ where
$\{v_\ec\nu_1,\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an orthonormal base
in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec \nu$.
Then, there exist $\betaig\{\psi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
\mathcal{S}^{(n_2)}\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig)$,
$\betaig\{h_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
W^{n_1,p_1}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes N})$
and $\betaig\{u_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
W^{n_1+1,p_1}(I_{v_\ec \nu},\varphiield{R}^k)$,
where
\betaegin{multline*}
\mathcal{S}^{(n)}\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig):=
\betaigg\{z_\eta\in
C^n(\varphiield{R}^N,\varphiield{R}^m):\;z_\eta(y)=\varphi_0(y)\;\,\thetaext{if}\;\,|y\cdotv_\ec\nu|\gammaeq
1/2,\;\,\thetaext{and}\;\,z_\eta\betaig(y+v_\ec \nu_j\betaig)=z_\eta(y)\;\varphiorall
j=2,\ldots, N\betaigg\},
\varepsilonnd{multline*}
such that such that $\,\Deltaiv h_\varepsilon\varepsilonquiv 0$, $\,\lim_{\varepsilon\thetao
0^+}\psi_\varepsilon=\varphi_0$ in $L^{p_2}(I_{v_\ec \nu},\varphiield{R}^m)$,
$\,\lim_{\varepsilon\thetao 0^+}h_\varepsilon=W_0$ in $L^{p_1}(I_{v_\ec \nu},\varphiield{R}^{d\thetaimes
N})$,
$\lim_{\varepsilon\thetao 0^+}\nabla u_\varepsilon=V_0$ in $W^{1,p_1}(I_{v_\ec \nu},\varphiield{R}^k)$,
and
\betaegin{multline*}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^{n_1}\nabla^{n_1+1}v_{\varepsilon},\ldots,\varepsilon\nabla^2
v_{\varepsilon},\nabla v_{\varepsilon}\Big\},\,
\Big\{\varepsilon^{n_1}\nabla^{n_1}m_{\varepsilon},\ldots,\varepsilon\nabla m_{\varepsilon},
m_{\varepsilon}\Big\},\\
\Big\{\varepsilon^{n_2}v_\ec Q\cdot\nabla^{n_2}
\varphi_{\varepsilon},\varepsilon^{n_2-1}\nabla^{n_2-1}
\varphi_{\varepsilon},\ldots,\varphi_{\varepsilon}\Big\}\Bigg)dx \gammaeq\\ \varliminf_{\varepsilon\thetao
0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^{n_1}\nabla^{n_1+1}u_{\varepsilon},\ldots,\varepsilon\nabla^2
u_{\varepsilon},\nabla
u_{\varepsilon}\Big\},\,\Big\{\varepsilon^{n_1}\nabla^{n_1}h_{\varepsilon},\ldots,\varepsilon\nabla
h_{\varepsilon}, h_{\varepsilon}\Big\},\\
\Big\{\varepsilon^{n_2}v_\ec Q\cdot\nabla^{n_2}
\psi_{\varepsilon},\varepsilon^{n_2-1}\nabla^{n_2-1}
\psi_{\varepsilon},\ldots,\psi_{\varepsilon}\Big\}\Bigg)dx.
\varepsilonnd{multline*}
\varepsilonnd{theorem}
\betaegin{proof}
It is clear that without any loss of generality we may assume
\betaegin{multline}\label{ggfghjjhfhfjfhjkkkhjkgghghhjhhbbhjjhbnew}
\varliminf_{\varepsilon\thetao 0^+}\int_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^{n_1}\nabla^{n_1+1}v_{\varepsilon},\ldots,\varepsilon\nabla^2
v_{\varepsilon},\nabla v_{\varepsilon}\Big\},\,
\Big\{\varepsilon^{n_1}\nabla^{n_1}m_{\varepsilon},\ldots,\varepsilon\nabla m_{\varepsilon},
m_{\varepsilon}\Big\},\\
\Big\{\varepsilon^{n_2}v_\ec Q\cdot\nabla^{n_2}
\varphi_{\varepsilon},\varepsilon^{n_2-1}\nabla^{n_2-1}
\varphi_{\varepsilon},\ldots,\varphi_{\varepsilon}\Big\}\Bigg)dx<+\infty.
\varepsilonnd{multline}
Then by \varepsilonr{hgdfvdhvdhfvnew} and
\varepsilonr{ggfghjjhfhfjfhjkkkhjkgghghhjhhbbhjjhbnew}
we deduce that $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n_2}\,v_\ec
Q\cdot\{\nabla^{n_2} \varphi_\varepsilon\}\betaig)=0$ in $L^{p_2}_{loc}(I_{v_\ec
\nu},\varphiield{R}^{l})$. Next remember we assumed that
\betaegin{itemize}
\item
either $v_\ec Q=Id$
\item
or $n_2=1$.
\varepsilonnd{itemize}
So in any case, by Lemma \ref{nfjghfighfihjtfohjt} we deduce that
for every $1\leq j<n_2$ we have $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{j}\,\nabla^{j} \varphi_\varepsilon\betaig)=0$ in
$L^{p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^{m\thetaimes N^{j}})$.
Thus, applying Proposition \ref{L2009.02kkknew}
completes the proof.
\varepsilonnd{proof}
\betaegin{comment}
\betaegin{theorem}\label{L2009.02kkkjkhkjhnew}
Let $n,m\in\mathbb{N}$, $v_\ec P\in\mathcal{L}(\varphiield{R}^{k\thetaimes N\thetaimes
N^n},\varphiield{R}^d)$ and $v_\ec Q\in\mathcal{L}(\varphiield{R}^{l\thetaimes N^m},\varphiield{R}^q)$ be
linear operators and
let $F\in C^0\betaig(\{\varphiield{R}^{d}\thetaimes\varphiield{R}^{k\thetaimes
N^{n}}\thetaimes\ldots\thetaimes\varphiield{R}^{k\thetaimes N\thetaimes N}\thetaimes\varphiield{R}^{k\thetaimes
N}\}\thetaimes \{\varphiield{R}^{q}\thetaimes\varphiield{R}^{l\thetaimes
N^{m-1}}\thetaimes\ldots\thetaimes\varphiield{R}^{l\thetaimes N}\thetaimes\varphiield{R}^l\}\,,\varphiield{R}\betaig)$ be
such that $F\gammaeq 0$ and there exist $C>0$ and $p_1,p_2\gammaeq 1$
satisfying
\betaegin{multline}\label{hgdfvdhvdhfvnew}
\varphirac{1}{C}\Big(|A|^{p_1}+|B|^{p_2}\Big)
\leq
F\Big(\{A,a_1,a_2,\ldots a_{n-1},c\},\{B,b_1,b_2,\ldots,b_{m-1},e\}\Big)\leq\\
\leq
C\betaigg(|A|^{p_1}+|B|^{p_2}+\sigmaum_{j=1}^{n-1}|a_j|^{p_1}+\sigmaum_{j=1}^{m-1}|b_j|^{p_2}+|c|^{p_1}+|e|^{p_2}+1\betaigg)\\
\thetaext{for every}\;\;\Big(\{A,a_1,a_2,\ldots
a_{n-1},c\},\{B,b_1,b_2,\ldots,b_{m-1},e\}\Big) \,.
\varepsilonnd{multline}
Furthermore let $v_\ec k\in\varphiield{R}^k$, $v_\ec\nu\in S^{N-1}$,
$\varphi^+,\varphi^-\in\varphiield{R}^l$ and $V^+,V^-\in \varphiield{R}^{k\thetaimes N}$ be such
that
$V^+-V^-=v_\ec k\omegatimesv_\ec\nu$ and
$F\betaig(\{0,\ldots,0,V^+\},\{0,\ldots,0,\varphi^+\}\betaig)=F\betaig(\{0,\ldots,0,V^-\},\{0,\ldots,0,\varphi^-\}\betaig)=0$.
Set $\varphi(x)\in L^\infty(\varphiield{R}^N,\varphiield{R}^l)$ and $v(x):Lip(\varphiield{R}^N,\varphiield{R}^k)$ by
\betaegin{equation}\label{ghgghjhjkdfhgkkkvfggghhhhbnew}
\varphi(x):=\betaegin{cases}
\varphi^+\quad\thetaext{if}\;\;x\cdotv_\ec\nu>0\,,\\
\varphi^-\quad\thetaext{if}\;\;x\cdotv_\ec\nu<0\,,
\varepsilonnd{cases}\quad\quad
v(x):=\betaegin{cases}
V^-\cdot x+(x\cdotv_\ec\nu)v_\ec k\quad\thetaext{if}\;\;x\cdotv_\ec\nu\gammaeq 0\,,\\
V^-\cdot x\quad\quad\quad\thetaext{if}\;\;x\cdotv_\ec\nu<0\,.
\varepsilonnd{cases}
\varepsilonnd{equation}
Next
let $\betaig\{v_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset W^{(n+1),p_1}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$ and $\betaig\{\varphi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
W^{m,p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^l)$ be such that $\,\lim_{\varepsilon\thetao
0^+}\varphi_\varepsilon=\varphi$ in $L^{p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^l)$,
$\lim_{\varepsilon\thetao 0^+}v_\varepsilon=v$ in $W^{1,p_1}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$,
and $\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-v)/\varepsilon\betaig\}=0$ in
$L^{p_1}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$, where, as before, $I_{v_\ec
\nu}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec\nu_j|<1/2\;\;\;\varphiorall j=1,2\ldots
N\}$ where $\{v_\ec\nu_1,v_\ec\nu_2,\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is
an orthonormal base in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec \nu$.
Moreover assume that,
\betaegin{itemize}
\item
either $v_\ec P=Id$ or $n=1$ and
\item
either $v_\ec Q=Id$ or $m=1$.
\varepsilonnd{itemize}
Then
\betaegin{multline}\label{ggfghjjhfhfjfhjkkkhjkgghghnew}
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^nv_\ec P\cdot\{\nabla^{n+1} v_{\varepsilon}\},
\varepsilon^{n-1}\nabla^n v_{\varepsilon}, \ldots,\varepsilon\nabla^2 v_{\varepsilon},\nabla
v_{\varepsilon}\Big\}, \Big\{ \varepsilon^mv_\ec Q\cdot\{\nabla^m \varphi_{\varepsilon}\},
\varepsilon^{n-1}\nabla^{n-1}\varphi_{\varepsilon}, \ldots,\varepsilon\nabla
\varphi_{\varepsilon},\varphi_{\varepsilon}\Big\}\Bigg)dx\\ \gammaeq
E^{(n,m)}_{per}\Big(V^+,\varphi^+,V^-,\varphi^-,v_\ec\nu\Big).
\varepsilonnd{multline}
where
\betaegin{multline}\label{L2009hhffff12kkknew}
E^{(n,m)}_{per}\Big(V^+,\varphi^+,V^-,\varphi^-,v_\ec\nu\Big)\;:=\;\\
\inf\Bigg\{\int_{I_{v_\ec \nu}}\varphirac{1}{L} F\betaigg(\Big\{L^nv_\ec
P\cdot\{\nabla^{n+1} \xi\},L^{n-1}\nabla^{n} \xi,\ldots,L\nabla^2
\xi,\nabla \xi\Big\},\Big\{ L^mv_\ec Q\cdot\{\nabla^m
z_\eta\},L^{m-1}\nabla^{m-1} z_\eta,\ldots,L\,\nabla z_\eta,z_\eta\Big\}\betaigg)\,dx:\\
L\in(0,+\infty)\,,\; \xi\in
\mathcal{S}^{(n)}_1(V^+,V^-,I_{v_\ec\nu})\,,\;z_\eta\in
\mathcal{S}^{(n)}_2(\varphi^+,\varphi^-,I_{v_\ec\nu})\Bigg\}\,,
\varepsilonnd{multline}
with
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjfgnhfkkkghgnew}
\mathcal{S}^{(n)}_1\betaig(V^+,V^-,I_{v_\ec\nu}\betaig):=
\betaigg\{\xi\in C^{n+1}(\varphiield{R}^N,\varphiield{R}^k):\;\;\nabla \xi(y)=V^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
\nabla \xi(y)=V^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{ and
}\;\nabla \xi\betaig(y+v_\ec\nu_j\betaig)=\nabla \xi(y)\;\;\varphiorall
j=2,3,\ldots, N\betaigg\}\,,
\varepsilonnd{multline}
and
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddhdkgkkkhgghnew}
\mathcal{S}^{(m)}_2\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig):=
\betaigg\{z_\eta\in C^m(\varphiield{R}^N,\varphiield{R}^l):\;\;z_\eta(y)=\varphi^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
z_\eta(y)=\varphi^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec \nu_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,.
\varepsilonnd{multline}
\betaegin{comment*}
exist $\betaig\{u_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
\mathcal{S}^{(n)}_1\betaig(V^+,V^-,I_{v_\ec\nu}\betaig)$ and
$\betaig\{\psi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
\mathcal{S}^{(n)}_2\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig)$, where
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjfgnhfkkkghgnew}
\mathcal{S}^{(n)}_1\betaig(V^+,V^-,I_{v_\ec\nu}\betaig):=
\betaigg\{\xi\in C^{n+1}(\varphiield{R}^N,\varphiield{R}^k):\;\;\nabla \xi(y)=V^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
\nabla \xi(y)=V^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{ and
}\;\nabla \xi\betaig(y+v_\ec\nu_j\betaig)=\nabla \xi(y)\;\;\varphiorall
j=2,3,\ldots, N\betaigg\}\,,
\varepsilonnd{multline}
and
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddhdkgkkkhgghnew}
\mathcal{S}^{(n)}_2\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig):=
\betaigg\{z_\eta\in C^m(\varphiield{R}^N,\varphiield{R}^l):\;\;z_\eta(y)=\varphi^-\;\thetaext{ if }\;y\cdotv_\ec\nu\leq-1/2,\\
z_\eta(y)=\varphi^+\;\thetaext{ if }\; y\cdotv_\ec\nu\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec \nu_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,3,\ldots,
N\betaigg\}\,,
\varepsilonnd{multline}
such that $\,\lim_{\varepsilon\thetao 0^+}\psi_\varepsilon=\varphi$ in
$L^{p}_{loc}(I_{v_\ec \nu},\varphiield{R}^l)$, $\lim_{\varepsilon\thetao 0^+}u_\varepsilon=v$ in
$W^{1,p}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$, $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^n\,v_\ec
P\cdot\{\nabla^{n+1} u_\varepsilon\}\betaig)=0$ in $L^p_{loc}(I_{v_\ec
\nu},\varphiield{R}^{d})$, $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^n\,v_\ec Q\cdot\{\nabla^m
\psi_\varepsilon\}\betaig)=0$ in $L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{q})$
and
\betaegin{multline}\label{ggfghjjhfhfjfhjkkkhjkgghghnew}
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^nv_\ec P\cdot\{\nabla^{n+1} v_{\varepsilon}\},
\ldots,\varepsilon\nabla^2 v_{\varepsilon},\nabla v_{\varepsilon}\Big\}, \Big\{ \varepsilon^nv_\ec
Q\cdot\{\nabla^n \varphi_{\varepsilon}\},
\ldots,\varepsilon\nabla
\varphi_{\varepsilon},\varphi_{\varepsilon}\Big\}\Bigg)dx\gammaeq\\
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^nv_\ec P\cdot\{\nabla^{n+1} u_{\varepsilon}\},
\ldots,\varepsilon\nabla^2 u_{\varepsilon},\nabla u_{\varepsilon}\Big\}, \Big\{ \varepsilon^nv_\ec
Q\cdot\{\nabla^n \psi_{\varepsilon}\},
\ldots,\varepsilon\nabla \psi_{\varepsilon},\psi_{\varepsilon}\Big\}\Bigg)dx.
\varepsilonnd{multline}
Moreover, for every $1\leq j<n$ we have $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{j}\,\nabla^{j+1} u_\varepsilon\betaig)=0$ in $L^p_{loc}(I_{v_\ec
\nu},\varphiield{R}^{k\thetaimes N^{j+1}})$ and for every $1\leq j<m$ we have
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{j}\,\nabla^{j} \psi_\varepsilon\betaig)=0$ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{l\thetaimes N^{j}})$.
\varepsilonnd{comment*}
\varepsilonnd{theorem}
\betaegin{proof}
First of all without loss of generality we can assume $v_\ec\nu=v_\ec
e_1:=(1,0,\ldots,0)$. Next it is clear that without any loss of
generality we may assume
\betaegin{equation}\label{ggfghjjhfhfjfhjkkkhjkgghghhjhhbbhjjhbnew}
\varlimsup_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^nv_\ec P\cdot\{\nabla^{n+1}
v_{\varepsilon}\},\ldots,\varepsilon\nabla^2 v_{\varepsilon},\nabla v_{\varepsilon}\Big\}, \Big\{
\varepsilon^mv_\ec Q\cdot\{\nabla^m \varphi_{\varepsilon}\},\ldots,\varepsilon\nabla
\varphi_{\varepsilon},\varphi_{\varepsilon}\Big\}\Bigg)dx <\infty\,.
\varepsilonnd{equation}
Then by \varepsilonr{hgdfvdhvdhfvnew} and
\varepsilonr{ggfghjjhfhfjfhjkkkhjkgghghhjhhbbhjjhbnew}
we deduce that $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^n\,v_\ec P\cdot\{\nabla^{n+1}
v_\varepsilon\}\betaig)=0$ in $L^{p_1}_{loc}(I_{v_\ec \nu},\varphiield{R}^{d})$ and
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^n\,v_\ec Q\cdot\{\nabla^n
\varphi_\varepsilon\}\betaig)=0$ in $L^{p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^{q})$. Next
remember we assumed that
\betaegin{itemize}
\item
either $v_\ec P=Id$ or $n=1$ and
\item
either $v_\ec Q=Id$ or $m=1$.
\varepsilonnd{itemize}
\betaegin{comment*}
In the first case consider $n_0:=n$ and in the second case consider
$n_0:=n-1$. Thus in any case we have $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{n_0}\,\nabla^{n_0+1} v_\varepsilon\betaig)=0$ in $L^p_{loc}(I_{v_\ec
\nu},\varphiield{R}^{k\thetaimes N^{n_0+1}})$ and\, $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{n_0}\,\nabla^{n_0} \varphi_\varepsilon\betaig)=0$ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{m\thetaimes N^{n_0}})$. We will prove now
that $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{j}\,\nabla^{j+1} v_\varepsilon\betaig)=0$ in
$L^p_{loc}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N^{j+1}})$ and\, $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{j}\,\nabla^{j} \varphi_\varepsilon\betaig)=0$ in $L^p_{loc}(I_{v_\ec
\nu},\varphiield{R}^{m\thetaimes N^{j}})$ for every $j\in\{1,2,\ldots,n_0\}$. Indeed
fix an arbitrary domain $U\sigmaubset\sigmaubset I_{v_\ec \nu}$ with a smooth
boundary. Then clearly
\betaegin{equation}\label{gfjfhjfgjhfhkdrydsgbnvfjggyhggghfgfgdfdddrrdnew}
d_e:=\int_U\betaigg(\betaig|\varepsilon^{n_0}\,\nabla^{n_0+1}
v_\varepsilon\betaig|^p+\betaig|\varepsilon^{n_0}\,\nabla^{n_0} \varphi_\varepsilon\betaig|^p\betaigg)\thetao
0\quad\thetaext{as}\;\;\varepsilon\thetao 0^+\,.
\varepsilonnd{equation}
Moreover clearly there exists $\betaar C>0$ such that
$\int_{U}\betaig(|v_\varepsilon|^p+|\varphi_\varepsilon|^p\betaig)dx\leq \betaar C$. From the
other hand by Theorem 7.28 in \cite{gt} there exists $C_0>0$, which
depends only on $U$ $p$ and $n_0$, such that for every $\sigmaigma(x)\in
W^{n_0,p}(U,\varphiield{R})$ and every $\thetaau>0$ we have
\betaegin{equation}\label{gfjfhjfgjhfhkdrydsgnew}
\betaig\|\nabla^j\sigmaigma(x)\betaig\|_{L^p(U)}\leq \thetaau
\betaig\|\sigmaigma(x)\betaig\|_{W^{n,p}(U)}+C_0\thetaau^{-j/(n-j)}\betaig\|\sigmaigma(x)\betaig\|_{L^p(U)}\quad\quad\varphiorall\;
2\leq n\leq n_0\,,\;\;1\leq j<n\,.
\varepsilonnd{equation}
Thus in particular we deduce from \varepsilonr{gfjfhjfgjhfhkdrydsgnew} that
there exists $C_1>0$, which depends only on $U$ $p$ and $n_0$, such
that for every $\sigmaigma(x)\in W^{n_0,p}(U,\varphiield{R})$ and every $\thetaau\in
(0,1)$ we have
\betaegin{equation}\label{gfjfhjfgjhfhkdrydsghgghgfgffgfggnew}
\betaig\|\thetaau^j\nabla^j\sigmaigma(x)\betaig\|_{L^p(U)}\leq
\betaig\|\thetaau^{n_0}\nabla^{n_0}\sigmaigma(x)\betaig\|_{L^p(U)}+C_1\betaig\|\sigmaigma(x)\betaig\|_{L^p(U)}\quad\quad\varphiorall\;
1\leq j<n_0\,.
\varepsilonnd{equation}
Then setting $\thetaau:=\varepsilon\cdot(d_\varepsilon)^{-1/n_0}$, where $d_\varepsilon$ is defined
by \varepsilonr{gfjfhjfgjhfhkdrydsgbnvfjggyhggghfgfgdfdddrrdnew}, using
\varepsilonr{gfjfhjfgjhfhkdrydsghgghgfgffgfggnew} and the fact that
$\int_{U}\betaig(|v_\varepsilon|^p+|\varphi_\varepsilon|^p\betaig)dx\leq \betaar C$ we obtain
\betaegin{equation}\label{gfjfhjfgjhfhkdrydsghgghgfgffgfggjhhgkhhhlllhhljjggjkgkjknew}
\betaig\|\varepsilon^j\nabla^j\varphi_\varepsilon(x)\betaig\|_{L^p(U)}+\betaig\|\varepsilon^j\nabla^{j+1}
v_\varepsilon(x)\betaig\|_{L^p(U)}\leq \hat C d_\varepsilon^{j/n_0}\quad\quad\varphiorall\;
1\leq j<n_0\,,
\varepsilonnd{equation}
where $\hat C>0$ dose not depend on $\varepsilon$. Thus using
\varepsilonr{gfjfhjfgjhfhkdrydsgbnvfjggyhggghfgfgdfdddrrdnew} we deduce
\betaegin{equation*}
\betaig\|\varepsilon^j\nabla^j\varphi_\varepsilon(x)\betaig\|_{L^p(U)}+\betaig\|\varepsilon^j\nabla^{j+1}
v_\varepsilon(x)\betaig\|_{L^p(U)}\thetao 0\quad\thetaext{as}\;\;\varepsilon\thetao 0^+\quad\varphiorall\;
1\leq j<n_0\,,
\varepsilonnd{equation*}
Therefore, since the domain with a smooth boundary $U\sigmaubset\sigmaubset
I_{v_\ec \nu}$ was chosen arbitrary,
compact $K\sigmaubset\sigmaubset I_{v_\ec\nu}$ and consider $\sigmaigma(x)\in
C^\infty_c(I_{v_\ec\nu},\varphiield{R})$ such that $\sigmaigma(x)=1$ for every $x\in
K$. Then define $u_\varepsilon(x):=\sigmaigma(x)\cdot v_\varepsilon(x)$ and
$\psi_\varepsilon(x):=\sigmaigma(x)\cdot \varphi_\varepsilon(x)$ for every $x\in\varphiield{R}^N$.
Then clearly $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n_0}\,\nabla^{n_0+1}
u_\varepsilon\betaig)=0$ in $L^p(\varphiield{R}^N,\varphiield{R}^{k\thetaimes N^{n_0+1}})$ and
$\,\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{n_0}\,\nabla^{n_0} \psi_\varepsilon\betaig)=0$ in
$L^p(\varphiield{R}^N,\varphiield{R}^{m\thetaimes N^{n_0}})$. Moreover clearly there exists
$C_0>0$ such that $\int_{\varphiield{R}^N}\betaig(|u_\varepsilon|^p+|\psi_\varepsilon|^p\betaig)dx\leq
C_0$. Thus if we denote by $\hat\psi_\varepsilon$ the Transform of Furier of
$\psi_\varepsilon$, by $\hat u_\varepsilon$ the Transform of Furier of $u_\varepsilon$ and
$p^*:=p/(p-1)$, we will have
\betaegin{equation}\label{yfguyfuyffiug}
\lim_{\varepsilon\thetao 0}\int_{\varphiield{R}^N}\betaigg(\Big|\varepsilon^{n_0} |z|^{n_0}\hat
u_\varepsilon(z)\Big|^{p^*}+\Big|\varepsilon^{n_0} |z|^{n_0}\hat
\psi_\varepsilon(z)\Big|^{p^*}\betaigg)dz=0\quad\thetaext{and}\quad
\int_{\varphiield{R}^N}\Big(\betaig|\hat u_\varepsilon(z)\betaig|^{p^*}+\betaig|\hat
\psi_\varepsilon(z)\betaig|^{p^*}\Big)dz\leq C\,.
\varepsilonnd{equation}
On the other hand by H\"{o}lder inequality for every
$j\in\{1,2\ldots,n_0-1\}$ we obtain
\betaegin{multline}\label{yfguyfuyffiugjklhhjhhjhjjh}
\int_{\varphiield{R}^N}\betaigg(\Big|\varepsilon^{j} |z|^{j}\hat
u_\varepsilon(z)\Big|^{p^*}+\Big|\varepsilon^{j} |z|^{j}\hat
\psi_\varepsilon(z)\Big|^{p^*}\betaigg)dz\leq\\
2\Bigg\{\int_{\varphiield{R}^N}\betaigg(\Big|\varepsilon^{n_0} |z|^{n_0}\hat
u_\varepsilon(z)\Big|^{p^*}+\Big|\varepsilon^{n_0} |z|^{n_0}\hat
\psi_\varepsilon(z)\Big|^{p^*}\betaigg)dz\Bigg\}^{j/n_0}\cdot\Bigg\{\int_{\varphiield{R}^N}\Big(\betaig|\hat
u_\varepsilon(z)\betaig|^{p^*}+\betaig|\hat
\psi_\varepsilon(z)\betaig|^{p^*}\Big)dz\Bigg\}^{1-j/n_0}\,.
\varepsilonnd{multline}
Then, plugging \varepsilonr{yfguyfuyffiugjklhhjhhjhjjh} into
\varepsilonr{yfguyfuyffiug} we obtain
\betaegin{equation}\label{yfguyfuyffiugjkhhlhlhuuiiiiukkk}
\lim_{\varepsilon\thetao 0}\int_{\varphiield{R}^N}\betaigg(\Big|\varepsilon^{j} |z|^{j}\hat
u_\varepsilon(z)\Big|^{p^*}+\Big|\varepsilon^{j} |z|^{j}\hat
\psi_\varepsilon(z)\Big|^{p^*}\betaigg)dz=0\quad\varphiorall j\in\{1,2\ldots,n_0\}\,.
\varepsilonnd{equation}
Thus, we deduce $\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{j}\,\nabla^{j+1}
u_\varepsilon\betaig)=0$ in $L^p(\varphiield{R}^N,\varphiield{R}^{k\thetaimes N^{j+1}})$ and\, $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{j}\,\nabla^{j} \psi_\varepsilon\betaig)=0$ in $L^p(\varphiield{R}^N,\varphiield{R}^{m\thetaimes
N^{j}})$ for every $j\in\{1,2,\ldots,n_0\}$. Therefore, since
$\sigmaigma(x)=1$ for every $x\in K$ and since $K\sigmaubset\sigmaubset\varphiield{R}^N$ was
chosen arbitrary,
\varepsilonnd{comment*}
Thus in any case, by Lemma \ref{nfjghfighfihjtfohjt} we deduce that
for every $1\leq j<n$ we have $\lim_{\varepsilon\thetao
0^+}\betaig(\varepsilon^{j}\,\nabla^{j+1} v_\varepsilon\betaig)=0$ in $L^{p_1}_{loc}(I_{v_\ec
\nu},\varphiield{R}^{k\thetaimes N^{j+1}})$ and for every $1\leq j<m$ we have
$\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^{j}\,\nabla^{j} \varphi_\varepsilon\betaig)=0$ in
$L^{p_2}_{loc}(I_{v_\ec \nu},\varphiield{R}^{l\thetaimes N^{j}})$.
Next consider $l_0(t)\in C^\infty(\varphiield{R},\varphiield{R})$ with the properties
$\int_{-1/2}^{1/2}l_0(s)ds=1/2$ and
\betaegin{equation}\label{L2009smooth1newfff}
\betaegin{cases}l_0(t)=0 \quad\quad\quad\thetaext{for every }t\in(-\infty,-1/2+\deltaelta)\,,\\
l_0(t)\in[0,1] \quad\;\;\thetaext{for every }t\in[-1/2+\deltaelta,1/2-\deltaelta]\,,\\
l_0(t)=1 \quad\quad\quad\thetaext{for every }
t\in(1/2-\deltaelta,+\infty)\,,\varepsilonnd{cases}
\varepsilonnd{equation}
where $\deltaelta\in(0,1/2)$. Clearly such a function exists. Then set
\betaegin{equation}\label{vjhvhjvhjvhjjnjknew}
h_\varepsilon(x):=V^-\cdot
x+\varepsilon\int_{-\infty}^{x_1/\varepsilon}l_0\betaig(s\betaig)\,ds\,\cdot\,v_\ec k\quad
\varphiorall x\in\varphiield{R}^N\,.
\varepsilonnd{equation}
Thus $h_\varepsilon\in C^\infty(\varphiield{R}^N,\varphiield{R}^k)$ and in particular
\betaegin{align}
\label{vjhvhjvhjvhjjnjkjgghgfjnew} \nabla
h_\varepsilon(x):=V^-+l_0\betaigg(\varphirac{x_1}{\varepsilon}\betaigg)\Big(v_\ec k\omegatimes v_\ec
e_1\Big)=V^-+l_0\betaigg(\varphirac{x_1}{\varepsilon}\betaigg)\cdot\Big(V^+-V^-\Big)\quad
\varphiorall x\in\varphiield{R}^N\,,\\
\label{fvfgfffhhffffnew} \varepsilon^j\nabla^{j+1}
h_\varepsilon(x):=l^{(j)}_0\betaigg(\varphirac{x_1}{\varepsilon}\betaigg)\cdot\Big(V^+-V^-\Big)\omegatimesv_\ec
e_1\omegatimes\ldots\omegatimesv_\ec e_1\quad \varphiorall x\in\varphiield{R}^N\;\varphiorall j\gammaeq
1\,.
\varepsilonnd{align}
Moreover by \varepsilonr{L2009smooth1newfff}, \varepsilonr{vjhvhjvhjvhjjnjknew} and
\varepsilonr{vjhvhjvhjvhjjnjkjgghgfjnew} we obtain
\betaegin{equation}\label{vjhvhjvhjvhjjnjkgffgjkjhjnew}
\nabla h_\varepsilon(x)=\betaegin{cases}V^-\;\;\thetaext{if}\;\; x_1\leq-\varepsilon/2\,,\\
V^+\;\;\thetaext{if}\;\; x_1\gammaeq\varepsilon/2\,,\varepsilonnd{cases}\;\;
h_\varepsilon(x)=v(x)\;\;\thetaext{if}\;\; |x_1|\gammaeq \varepsilon/2\,,
\varepsilonnd{equation}
and by \varepsilonr{fvfgfffhhffffnew},
\betaegin{equation}\label{vjhvhjvhjvhjjnjkgffgjkjhjfggfffnew}
\nabla^{j+1} h_\varepsilon(x)= 0\quad\thetaext{if}\quad
|x_1|\gammaeq\varepsilon/2\quad\;\;\varphiorall j\gammaeq 1\,.
\varepsilonnd{equation}
Therefore, by \varepsilonr{vjhvhjvhjvhjjnjknew},
\varepsilonr{vjhvhjvhjvhjjnjkjgghgfjnew}, \varepsilonr{fvfgfffhhffffnew},
\varepsilonr{vjhvhjvhjvhjjnjkgffgjkjhjnew} and
\varepsilonr{vjhvhjvhjvhjjnjkgffgjkjhjfggfffnew} we have $\lim_{\varepsilon\thetao
0^+}h_\varepsilon=v$ in $W^{1,p_1}(I_{v_\ec \nu},\varphiield{R}^k)$, for every $j\gammaeq 1$
we have $\lim_{\varepsilon\thetao 0^+}\{\varepsilon^j\nabla^{j+1} h_\varepsilon\}=0$ in
$L^{p_1}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes N^{j+1}})$, and $\lim_{\varepsilon\thetao
0^+}\betaig\{(h_\varepsilon-v)/\varepsilon\betaig\}=0$ in $L^{p_1}(I_{v_\ec \nu},\varphiield{R}^k)$.
Thus, since we have $\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-v)/\varepsilon\betaig\}=0$ in
$L^{p_1}_{loc}(I_{v_\ec \nu},\varphiield{R}^k)$, then clearly $\lim_{\varepsilon\thetao
0^+}\betaig\{(v_\varepsilon-h_\varepsilon)/\varepsilon\betaig\}=0$ in $L^{p_1}_{loc}(I_{v_\ec
\nu},\varphiield{R}^k)$. So there exists a family
$\betaig\{h_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset C^\infty(\varphiield{R}^N,\varphiield{R}^k)$, such that
$\nabla h_\varepsilon(x)\varepsilonquiv z_\varepsilon(v_\ec\nu\cdot x)$ for some function
$z_\varepsilon$, $\nabla h_\varepsilon(x)=\nabla v(x)$ if $|v_\ec\nu\cdot x|>c_0$,
where $0<c_0<1/2$ is a constant, and
\betaegin{equation}\label{gfjguyfygbjhhjgjgghfffgfgnew}
\betaegin{cases}
\lim_{\varepsilon\thetao 0^+}h_\varepsilon=v\quad\thetaext{in}\quad W^{1,p_1}(I_{v_\ec
\nu},\varphiield{R}^k)\,,\\
\lim_{\varepsilon\thetao 0^+}\betaig(\varepsilon^j\nabla^{j+1} h_\varepsilon\betaig)=0\quad
\thetaext{in}\quad L^{p_1}(I_{v_\ec \nu},\varphiield{R}^{k\thetaimes
N^{j+1}})\quad\varphiorall j\gammaeq 1\,,
\\
\lim_{\varepsilon\thetao 0^+}\betaig\{(v_\varepsilon-h_\varepsilon)/\varepsilon\betaig\}=0\quad \thetaext{in}\quad
L^{p_1}(I_{v_\ec \nu},\varphiield{R}^k)\,.
\varepsilonnd{cases}
\varepsilonnd{equation}
Then by applying Proposition \ref{L2009.02kkknew} we obtain that
there exist $\betaig\{u_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
\mathcal{S}^{(n)}_1\betaig(V^+,V^-,I_{v_\ec\nu}\betaig)$ and
$\betaig\{\psi_\varepsilon(x)\betaig\}_{0<\varepsilon<1}\sigmaubset
\mathcal{S}^{(n)}_2\betaig(\varphi^+,\varphi^-,I_{v_\ec\nu}\betaig)$ such
that
\betaegin{multline}\label{ggfghjjhfhfjfhjkkkhjkgghghnewhjgjkghhfg}
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^nv_\ec P\cdot\{\nabla^{n+1} v_{\varepsilon}\},
\ldots,\varepsilon\nabla^2 v_{\varepsilon},\nabla v_{\varepsilon}\Big\}, \Big\{ \varepsilon^mv_\ec
Q\cdot\{\nabla^m \varphi_{\varepsilon}\},
\ldots,\varepsilon\nabla
\varphi_{\varepsilon},\varphi_{\varepsilon}\Big\}\Bigg)dx\gammaeq\\
\varliminf_{\varepsilon\thetao 0^+}\int\limits_{I_{v_\ec \nu}}\varphirac{1}{\varepsilon}
F\Bigg(\Big\{\varepsilon^nv_\ec P\cdot\{\nabla^{n+1} u_{\varepsilon}\},
\ldots,\varepsilon\nabla^2 u_{\varepsilon},\nabla u_{\varepsilon}\Big\}, \Big\{ \varepsilon^mv_\ec
Q\cdot\{\nabla^m \psi_{\varepsilon}\},
\ldots,\varepsilon\nabla \psi_{\varepsilon},\psi_{\varepsilon}\Big\}\Bigg)dx.
\varepsilonnd{multline}
This completes the proof.
\varepsilonnd{proof}
\varepsilonnd{comment}
Next by the composition of Theorem \ref{L2009.02kkkjkhkjhnew} and
Theorem \ref{dehgfrygfrgygenjklhhj}
we obtain the
following result describing the lower bound for the first order
problems.
\betaegin{proposition}\label{dehgfrygfrgygenbgggggggggggggkgkgnew}
Let $\Omega\sigmaubset\varphiield{R}^N$ be an open set. Furthermore, let $F\in
C^0\betaig(\varphiield{R}^{m\thetaimes N^n}\thetaimes\varphiield{R}^{m\thetaimes
N^{(n-1)}}\thetaimes\ldots\thetaimes\varphiield{R}^{m\thetaimes N}\thetaimes
\varphiield{R}^m\thetaimes\varphiield{R}^N,\varphiield{R}\betaig)$, be such that $F\gammaeq 0$ and there exist
$g(x)\in C^0\betaig(\varphiield{R}^N,(0,+\infty)\betaig)$ and $p\gammaeq 1$ satisfying
\betaegin{multline}\label{hgdfvdhvdhfvjjjjiiiuyyyjinew}
\varphirac{1}{g(x)}|A|^p
\leq F\Big(A,a_1,\ldots,a_{n-1},b,x\Big) \leq
g(x)\betaigg(|A|^p+\sigmaum_{j=1}^{n-1}|a_j|^{p}+|b|^p+1\betaigg)\quad
\thetaext{for every}\;\;\betaig(A,a_1,a_2,\ldots,a_{n-1},b,x\betaig) \,.
\varepsilonnd{multline}
Assume also that for every $x_0\in\Omega$ and every $\thetaau>0$ there
exists $\alphalpha>0$ satisfying
\betaegin{multline}\label{vcjhfjhgjkgkgjgghjfhfhfmjghj}
F\betaig(a_1,a_2,\ldots, a_n,b,x\betaig)-F\betaig(a_1,a_2,\ldots,
a_n,b,x_0\betaig)\gammaeq -\thetaau F\betaig(a_1,a_2,\ldots, a_n,b,x_0\betaig)\\
\varphiorall\, a_1\in \thetaimes\varphiield{R}^{m\thetaimes N^n}\; \ldots\;\varphiorall\,
a_n\in\varphiield{R}^{m\thetaimes N}\;\;\varphiorall\, b\in\varphiield{R}^{m}\;\;\varphiorall\,
x\in\varphiield{R}^N\;\;\thetaext{such that}\;\;|x-x_0|<\alphalpha\,.
\varepsilonnd{multline}
Next let $\varphi\in L^p(\Omega,\varphiield{R}^m)$ be such that $F\betaig(0,0,\ldots,0,
\varphi(x),x\betaig)=0$ for a.e. $x\in\Omega$. Assume also that there exist
a $\mathcal{H}^{N-1}$ $\sigmaigma$-finite Borel set $D\sigmaubset\Omega$ and
three Borel mappings $\,\varphi^+(x):D\thetao\varphiield{R}^m$,
$\varphi^-(x):D\thetao\varphiield{R}^m$ and $v_\ec n(x):D\thetao S^{N-1}$ such that for
every $x\in D$ we have
\betaegin{equation}\label{L2009surfhh8128odno888jjjjjkkkkkkgenjnhhhnew}
\lim\limits_{\rho\thetao 0^+}\varphirac{\int_{B_\rho^+(x,v_\ec
n(x))}\betaig|\varphi(y)-\varphi^+(x)\betaig|^p\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0\quad\thetaext{and}\quad
\lim\limits_{\rho\thetao 0^+}\varphirac{\int_{B_\rho^-(x,v_\ec
n(x))}\betaig|\varphi(y)-\varphi^-(x)\betaig|^p\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0\,.
\varepsilonnd{equation}
Then for every
$\{\varphi_\varepsilon\}_{\varepsilon>0}\sigmaubset W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$ such that
$\varphi_\varepsilon\thetao \varphi$ in $L^p_{loc}(\Omega,\varphiield{R}^m)$ as $\varepsilon\thetao 0^+$, we
will have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenhjhhhhjnew}
\varliminf_{\varepsilon\thetao 0^+}\varphirac{1}{\varepsilon}\int_\Omega F\betaigg(\,\varepsilon^n\nabla^n
\varphi_\varepsilon(x),\,\varepsilon^{n-1}\nabla^{n-1}\varphi_\varepsilon(x),\,\ldots,\,\varepsilon\nabla \varphi_\varepsilon(x),\, \varphi_\varepsilon(x),\,x\betaigg)dx\\
\gammaeq \int_{D}\betaar E^{(n)}_{per}\Big(\varphi^+(x),\varphi^-(x),v_\ec
n(x),x\Big)d \mathcal H^{N-1}(x)\,,
\varepsilonnd{multline}
where
\betaegin{multline}\label{L2009hhffff12kkkhjhjghghgvgvggcjhggghnew}
\betaar E^{(n)}_{per}\Big(\varphi^+,\varphi^-,v_\ec n,x\Big)\;:=\;\\
\inf\Bigg\{\int_{I_{v_\ec n}}\varphirac{1}{L} F\betaigg(L^n\,\nabla^n
z_\eta(y),\,L^{n-1}\,\nabla^{n-1} z_\eta(y),\,\ldots,\,L\,\nabla
z_\eta(y),\,z_\eta(y),\,x\betaigg)\,dy:\;\; L\in(0,+\infty)\,,\;z_\eta\in
\mathcal{S}^{(n)}(\varphi^+,\varphi^-,I_{v_\ec n})\Bigg\}\,,
\varepsilonnd{multline}
with
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddhdkgkkkhgghjhjhjkjnew}
\mathcal{S}^{(n)}\betaig(\varphi^+,\varphi^-,I_{v_\ec n}\betaig):=
\betaigg\{z_\eta\in C^n(\varphiield{R}^N,\varphiield{R}^m):\;\;z_\eta(y)=\varphi^-\;\thetaext{ if }\;y\cdotv_\ec n\leq-1/2,\\
z_\eta(y)=\varphi^+\;\thetaext{ if }\; y\cdotv_\ec n\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec \nu_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,
\ldots,
N\betaigg\}\,,
\varepsilonnd{multline}
Here $I_{v_\ec n}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec\nu_j|<1/2\;\;\;\varphiorall
j=1,2\ldots N\}$ where
$\{v_\ec\nu_1,
\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec n$.
\varepsilonnd{proposition}
Thus by plugging Proposition
\ref{dehgfrygfrgygenbgggggggggggggkgkgnew} into Theorem
\ref{ffgvfgfhthjghgjhg}
we deduce the $\Gammaamma$-limit result for the
first order problem.
\betaegin{theorem}\label{dehgfrygfrgygenbgggggggggggggkgkgthtjtfnew}
Let $\Omega\sigmaubset\varphiield{R}^N$ be an open set. Furthermore, let $F\in
C^1\betaig(\varphiield{R}^{m\thetaimes N^n}\thetaimes\varphiield{R}^{m\thetaimes
N^{(n-1)}}\thetaimes\ldots\thetaimes\varphiield{R}^{m\thetaimes N}\thetaimes
\varphiield{R}^m\thetaimes\varphiield{R}^N,\varphiield{R}\betaig)$, be such that $F\gammaeq 0$ and there exist
$g(x)\in C^0\betaig(\varphiield{R}^N,(0,+\infty)\betaig)$ and $p\gammaeq 1$ satisfying
\betaegin{multline}\label{hgdfvdhvdhfvjjjjiiiuyyyjitghujtrnew}
\varphirac{1}{g(x)}|A|^p
\leq F\Big(A,a_1,\ldots,a_{n-1},b,x\Big) \leq
g(x)\betaigg(|A|^p+\sigmaum_{j=1}^{n-1}|a_j|^{p}+|b|^p+1\betaigg)\quad
\thetaext{for every}\;\;\betaig(A,a_1,a_2,\ldots,a_{n-1},b,x\betaig) \,.
\varepsilonnd{multline}
Assume also that for every $x_0\in\Omega$ and every $\thetaau>0$ there
exists $\alphalpha>0$ satisfying
\betaegin{multline}\label{vcjhfjhgjkgkgjgghjfhfhfmjghjvjhvjhgh}
F\betaig(a_1,a_2,\ldots, a_n,b,x\betaig)-F\betaig(a_1,a_2,\ldots,
a_n,b,x_0\betaig)\gammaeq -\thetaau F\betaig(a_1,a_2,\ldots, a_n,b,x_0\betaig)\\
\varphiorall\, a_1\in \thetaimes\varphiield{R}^{m\thetaimes N^n}\; \ldots\;\varphiorall\,
a_n\in\varphiield{R}^{m\thetaimes N}\;\;\varphiorall\, b\in\varphiield{R}^{m}\;\;\varphiorall\,
x\in\varphiield{R}^N\;\;\thetaext{such that}\;\;|x-x_0|<\alphalpha\,.
\varepsilonnd{multline}
Next let $\varphi\in BV(\varphiield{R}^N,\varphiield{R}^{m})\cap L^\infty$ be such that $\|D
\varphi\|(\partial\Omegamega)=0$ and $F\betaig(0,0,\ldots,0,
\varphi(x),x\betaig)=0$ for a.e. $x\in\Omega$.
Then for every
$\{\varphi_\varepsilon\}_{\varepsilon>0}\sigmaubset W^{n,p}_{loc}(\Omega,\varphiield{R}^m)$ such that
$\varphi_\varepsilon\thetao \varphi$ in $L^p_{loc}(\Omega,\varphiield{R}^m)$ as $\varepsilon\thetao 0^+$, we
will have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenhjhhhhjtjurtnew}
\varliminf_{\varepsilon\thetao 0^+}\varphirac{1}{\varepsilon}\int_\Omega F\betaigg(\,\varepsilon^n\nabla^n
\varphi_\varepsilon(x),\,\varepsilon^{n-1}\nabla^{n-1}\varphi_\varepsilon(x),\,\ldots,\,\varepsilon\nabla \varphi_\varepsilon(x),\, \varphi_\varepsilon(x),\,x\betaigg)dx\\
\gammaeq \int_{\Omega\cap J_\varphi}\betaar
E^{(n)}_{per}\Big(\varphi^+(x),\varphi^-(x),v_\ec \nu(x),x\Big)d
\mathcal H^{N-1}(x)\,,
\varepsilonnd{multline}
where
\betaegin{multline}\label{L2009hhffff12kkkhjhjghghgvgvggcjhggghtgjutnew}
\betaar E^{(n)}_{per}\Big(\varphi^+,\varphi^-,v_\ec \nu,x\Big)\;:=\;\\
\inf\Bigg\{\int_{I_{v_\ec \nu}}\varphirac{1}{L} F\betaigg(L^n\,\nabla^n
z_\eta(y),\,L^{n-1}\,\nabla^{n-1} z_\eta(y),\,\ldots,\,L\,\nabla
z_\eta(y),\,z_\eta(y),\,x\betaigg)\,dy:\;\; L\in(0,+\infty)\,,\;z_\eta\in
\mathcal{S}^{(n)}(\varphi^+,\varphi^-,I_{v_\ec\nu})\Bigg\}\,,
\varepsilonnd{multline}
with
\betaegin{multline}\label{L2009Ddef2hhhjjjj77788hhhkkkkllkjjjjkkkhhhhffggdddkkkgjhikhhhjjddddhdkgkkkhgghjhjhjkjtjytrjnew}
\mathcal{S}^{(n)}\betaig(\varphi^+,\varphi^-,I_{v_\ec \nu}\betaig):=
\betaigg\{z_\eta\in C^n(\varphiield{R}^N,\varphiield{R}^m):\;\;z_\eta(y)=\varphi^-\;\thetaext{ if }\;y\cdotv_\ec \nu\leq-1/2,\\
z_\eta(y)=\varphi^+\;\thetaext{ if }\; y\cdotv_\ec \nu\gammaeq 1/2\;\thetaext{ and
}\;z_\eta\betaig(y+v_\ec \nu_j\betaig)=z_\eta(y)\;\;\varphiorall j=2,
\ldots,
N\betaigg\}\,,
\varepsilonnd{multline}
Here $I_{v_\ec \nu}:=\{y\in\varphiield{R}^N:\;|y\cdot v_\ec\nu_j|<1/2\;\;\;\varphiorall
j=1,
\ldots, N\}$, where
$\{v_\ec\nu_1,
\ldots,v_\ec\nu_N\}\sigmaubset\varphiield{R}^N$ is an
orthonormal base in $\varphiield{R}^N$ such that $v_\ec\nu_1:=v_\ec \nu$.
Moreover, there exists e sequence $\{\psi_\varepsilon\}_{\varepsilon>0}\sigmaubset
C^\infty(\varphiield{R}^N,\varphiield{R}^m)$ such that
$\int_\Omega\psi_\varepsilon(x)dx=\int_\Omega\varphi(x)dx$, $\psi_\varepsilon\thetao \varphi$ in
$L^p(\Omega,\varphiield{R}^m)$ as $\varepsilon\thetao 0^+$ and we have
\betaegin{multline}\label{a1a2a3a4a5a6a7s8hhjhjjhjjjjjjkkkkgenhjhhhhjtjurtgfhfhfjfjfjnew}
\lim_{\varepsilon\thetao 0^+}\varphirac{1}{\varepsilon}\int_\Omega F\betaigg(\,\varepsilon^n\nabla^n
\psi_\varepsilon(x),\,\varepsilon^{n-1}\nabla^{n-1}\psi_\varepsilon(x),\,\ldots,\,\varepsilon\nabla \psi_\varepsilon(x),\, \psi_\varepsilon(x),\,x\betaigg)dx\\
= \int_{\Omega\cap J_\varphi}\betaar
E^{(n)}_{per}\Big(\varphi^+(x),\varphi^-(x),v_\ec \nu(x),x\Big)d
\mathcal H^{N-1}(x)\,.
\varepsilonnd{multline}
\varepsilonnd{theorem}
\betaegin{comment}
\alphappendix
\sigmaection{Notations and basic results about $BV$-functions}
\label{sec:pre2}
\betaegin{itemize}
\item For given a real topological linear space $X$ we denote by $X^*$ the dual space (the space of continuous linear functionals from $X$ to $\varphiield{R}$).
\item For given $h\in X$ and $x^*\in X^*$ we denote by $\betaig<h,x^*\betaig>_{X\thetaimes X^*}$ the value in $\varphiield{R}$ of the functional $x^*$ on the vector $h$.
\item For given two normed linear spaces $X$ and $Y$ we denote by $\mathcal{L}(X;Y)$ the linear space of continuous (bounded) linear operators from $X$ to $Y$.
\item For given $A\in\mathcal{L}(X;Y)$ and $h\in X$ we denote by $A\cdot h$ the value in $Y$ of the operator $A$ on the vector $h$.
\item For given two reflexive Banach spaces $X,Y$ and
$S\in\mathcal{L}(X;Y)$ we denote by $S^*\in \mathcal{L}(Y^*;X^*)$
the corresponding adjoint operator, which satisfy
\betaegin{equation*}\betaig<x,S^*\cdot y^*\betaig>_{X\thetaimes X^*}:=\betaig<S\cdot
x,y^*\betaig>_{Y\thetaimes Y^*}\quad\quad\thetaext{for every}\; y^*\in
Y^*\;\thetaext{and}\;x\in X\,.
\varepsilonnd{equation*}
\item Given open set $G\sigmaubset\varphiield{R}^N$ we denote by
$\mathcal{D}(G,\varphiield{R}^d)$ the real topological linear space of
compactly supported $\varphiield{R}^d$-valued test functions i.e.
$C^\infty_c(G,\varphiield{R}^d)$ with the usual topology.
\item
We denote $\mathcal{D}'(G,\varphiield{R}^d):=\betaig\{\mathcal{D}(G,\varphiield{R}^d)\betaig\}^*$
(the space of $\varphiield{R}^d$ valued distributions in $G$).
\item
Given $h\in\mathcal{D}'(G,\varphiield{R}^d)$ and $\deltaelta\in\mathcal{D}(G,\varphiield{R}^d)$
we denote $<\deltaelta,h>:=\betaig<\deltaelta,h\betaig>_{\mathcal{D}(G,\varphiield{R}^d)\thetaimes
\mathcal{D}'(G,\varphiield{R}^d)}$ i.e. the value in $\varphiield{R}$ of the distribution
$h$ on the test function $\deltaelta$.
\item
Given a linear operator $v_\ec A\in\mathcal{L}(\varphiield{R}^d;\varphiield{R}^k)$ and a
distribution $h\in\mathcal{D}'(G,\varphiield{R}^d)$ we denote by $v_\ec A\cdot h$
the distribution in $\mathcal{D}'(G,\varphiield{R}^k)$ defined by
\betaegin{equation*}
<\deltaelta,v_\ec A \cdot h>:=<v_\ec A^*\cdot
\deltaelta,h>\quad\quad\varphiorall\deltaelta\in\mathcal{D}(G,\varphiield{R}^k).
\varepsilonnd{equation*}
\item
Given $h\in\mathcal{D}'(G,\varphiield{R}^d)$ and $\deltaelta\in\mathcal{D}(G,\varphiield{R})$ by
$<\deltaelta,h>$ we denote the vector in $\varphiield{R}^d$ which satisfy
$<\deltaelta,h>\cdot v_\ec e:=<\deltaeltav_\ec e,h>$ for every $v_\ec
e\in\varphiield{R}^d$.
\item
For a $p\thetaimes q$ matrix $A$ with $ij$-th entry $a_{ij}$ we denote
by $|A|=\betaigl(\Sigmaigma_{i=1}^{p}\Sigmaigma_{j=1}^{q}a_{ij}^2\betaigr)^{1/2}$
the Frobenius norm of $A$.
\item For two matrices $A,B\in\varphiield{R}^{p\thetaimes q}$ with $ij$-th entries
$a_{ij}$ and $b_{ij}$ respectively, we write\\
$A:B\,:=\,\sigmaum\limits_{i=1}^{p}\sigmaum\limits_{j=1}^{q}a_{ij}b_{ij}$.
\item For the $p\thetaimes q$ matrix $A$ with
$ij$-th entry $a_{ij}$ and for the $q\thetaimes d$ matrix $B$ with
$ij$-th entry $b_{ij}$ we denote by $AB:=A\cdot B$ their product,
i.e. the $p\thetaimes d$ matrix, with $ij$-th entry
$\sigmaum\limits_{k=1}^{q}a_{ik}b_{kj}$.
\item We identify
the $v_\ec u=(u_1,\ldots,u_q)\in\varphiield{R}^q$ with the $q\thetaimes 1$ matrix $A$
with $i1$-th entry $u_i$, so that for the $p\thetaimes q$ matrix $A$
with $ij$-th entry $a_{ij}$ and for $v_\ec
v=(v_1,v_2,\ldots,v_q)\in\varphiield{R}^q$ we denote by $A\,v_\ec v :=A\cdotv_\ec
v$ the $p$-dimensional vector $v_\ec u=(u_1,\ldots,u_p)\in\varphiield{R}^p$,
given by $u_i=\sigmaum\limits_{k=1}^{q}a_{ik}v_k$ for every $1\leq i\leq
p$.
\item As usual $A^T$ denotes the transpose of the matrix $A$.
\item For
$v_\ec u=(u_1,\ldots,u_p)\in\varphiield{R}^p$ and $v_\ec
v=(v_1,\ldots,v_p)\in\varphiield{R}^p$ we denote by $v_\ec uv_\ec v:=v_\ec
u\cdotv_\ec v:=\sigmaum\limits_{k=1}^{p}u_k v_k$ the standard scalar
product. We also note that $v_\ec uv_\ec v=v_\ec u^Tv_\ec v=v_\ec v^Tv_\ec
u$ as products of matrices.
\item For $v_\ec
u=(u_1,\ldots,u_p)\in\varphiield{R}^p$ and $v_\ec v=(v_1,\ldots,v_q)\in\varphiield{R}^q$ we
denote by $v_\ec u\omegatimesv_\ec v$ the $p\thetaimes q$ matrix with $ij$-th
entry $u_i v_j$ (i.e. $v_\ec u\omegatimesv_\ec v=v_\ec u\,v_\ec v^T$ as
product of matrices).
\item For
any $p\thetaimes q$ matrix $A$ with $ij$-th entry $a_{ij}$ and $v_\ec
v=(v_1,v_2,\ldots,v_d)\in\varphiield{R}^d$ we denote by $A\omegatimesv_\ec v$ the
$p\thetaimes q\thetaimes d$ tensor with $ijk$-th entry $a_{ij}v_k$.
\item
Given a vector valued function
$f(x)=\betaig(f_1(x),\ldots,f_k(x)\betaig):\Omega\thetao\varphiield{R}^k$ ($\Omega\sigmaubset\varphiield{R}^N$) we
denote by $Df$ or by $\nabla_x f$ the $k\thetaimes N$ matrix with
$ij$-th entry $\varphirac{\partial f_i}{\partial x_j}$.
\item
Given a matrix valued function
$F(x):=\{F_{ij}(x)\}:\varphiield{R}^N\thetao\varphiield{R}^{k\thetaimes N}$ ($\Omega\sigmaubset\varphiield{R}^N$) we
denote by $div\,F$ the $\varphiield{R}^k$-valued vector field defined by
$div\,F:=(l_1,\ldots,l_k)$ where
$l_i=\sigmaum\limits_{j=1}^{N}\varphirac{\partial F_{ij}}{\partial x_j}$.
\item Given a
matrix valued function $F(x)=\betaig\{f_{ij}(x)\betaig\}(1\leq i\leq
p,\,1\leq j\leq q):\Omega\thetao\varphiield{R}^{p\thetaimes q}$ ($\Omega\sigmaubset\varphiield{R}^N$) we denote
by $DF$ or by $\nabla_x F$ the $p\thetaimes q\thetaimes N$ tensor with
$ijk$-th entry $\varphirac{\partial f_{ij}}{\partial x_k}$.
\item For every dimension $d$
we denote by $I$ the unit $d\thetaimes d$-matrix and by $O$ the null
$d\thetaimes d$-matrix.
\item Given a vector valued
measure $\mu=(\mu_1,\ldots,\mu_k)$ (where for any $1\leq j\leq k$,
$\mu_j$ is a finite signed measure) we denote by $\|\mu\|(E)$ its
total variation measure of the set $E$.
\item For any $\mu$-measurable function $f$, we define the product measure
$f\cdot\mu$ by: $f\cdot\mu(E)=\int_E f\,d\mu$, for every
$\mu$-measurable set $E$.
\item Throughout this paper we assume that
$\Omega\sigmaubset\varphiield{R}^N$ is an open set.
\varepsilonnd{itemize}
In what follows we present some known results on BV-spaces. We rely mainly on the book \cite{amb}
by Ambrosio, Fusco and Pallara. Other sources are the books
by Hudjaev and Volpert~\cite{vol}, Giusti~\cite{giusti} and Evans and Gariepy~\cite{evans}.
We begin by introducing some notation.
For every $v_\ec\nu\in S^{N-1}$ (the unit sphere in $\varphiield{R}^N$) and $R>0$
we set
\betaegin{align}
B_{R}^+(x,v_\ec\nu)&=\{y\in\varphiield{R}^N\,:\,|y-x|<R,\,
(y-x)\cdotv_\ec\nu>0\}\,,\label{eq:B+2}\\
B_{R}^-(x,v_\ec\nu)&=\{y\in\varphiield{R}^N:|y-x|<R,\,
(y-x)\cdotv_\ec\nu<0\}\,,\label{eq:B-2}\\
H_+(x,v_\ec\nu)&=\{y\in\varphiield{R}^N:(y-x)\cdotv_\ec\nu>0\}\,,\label{HN+2}\\
H_-(x,v_\ec\nu)&=\{y\in\varphiield{R}^N:(y-x)\cdotv_\ec\nu<0\}\label{HN-2}\\
\intertext{and} H^0_{v_\ec \nu}&=\{ y\in\varphiield{R}^N\,:\, y\cdotv_\ec
\nu=0\}\label{HN2}\,.
\varepsilonnd{align}
Next we recall the definition of the space of functions with bounded
variation. In what follows, ${\mathcal L}^N$ denotes the Lebesgue measure in $\varphiield{R}^N$.
\betaegin{definition}
Let $\Omegamega$ be a domain in $\varphiield{R}^N$ and let $f\in L^1(\Omegamega,\varphiield{R}^m)$.
We say that $f\in BV(\Omegamega,\varphiield{R}^m)$ if
\betaegin{equation*}
\int_\Omegamega|Df|:=\sigmaup\betaigg\{\int_\Omegamega\sigmaum\limits_{k=1}^{m}f_k\Deltaiv\,\varphi_k\,d\mathcal{L}^N
:\;\varphi_k\in C^1_c(\Omegamega,\varphiield{R}^N)\;\varphiorall
k,\,\sigmaum\limits_{k=1}^{m}|\varphi_k(x)|^2\leq 1\;\varphiorall
x\in\Omegamega\betaigg\}
\varepsilonnd{equation*}
is finite. In this case we define the BV-norm of $f$ by
$\|f\|_{BV}:=\|f\|_{L^1}+\int_\Omegamega|D f|$.
\varepsilonnd{definition}
We recall below some basic notions in Geometric Measure Theory (see
\cite{amb}).
\betaegin{definition}\label{defjac8898782}
Let $\Omegamega$ be a domain in $\varphiield{R}^N$. Consider a function
$f\in L^1_{loc}(\Omegamega,\varphiield{R}^m)$ and a point $x\in\Omegamega$.\\
i) We say that $x$ is a point of {\varepsilonm approximate continuity} of $f$
if there exists $z\in\varphiield{R}^m$ such that
$$\lim\limits_{\rho\thetao 0^+}\varphirac{\int_{B_\rho(x)}|f(y)-z|\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0\,.
$$
In this case $z$ is called an {\varepsilonm approximate limit} of $f$ at $x$
and
we denote $z$ by $\thetailde{f}(x)$. The set of points of approximate continuity of
$f$ is denoted by $G_f$.\\
ii) We say that $x$ is an {\varepsilonm approximate jump point} of $f$ if
there exist $a,b\in\varphiield{R}^m$ and $v_\ec\nu\in S^{N-1}$ such that $a\neq
b$ and \betaegin{equation}\label{aprplmin2} \lim\limits_{\rho\thetao
0^+}\varphirac{\int_{B_\rho^+(x,v_\ec\nu)}|f(y)-a|\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0,\quad \lim\limits_{\rho\thetao
0^+}\varphirac{\int_{B_\rho^-(x,v_\ec\nu)}|f(y)-b|\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0.
\varepsilonnd{equation}
The triple $(a,b,v_\ec\nu)$, uniquely determined by \varepsilonqref{aprplmin2}
up to a permutation of $(a,b)$ and a change of sign of $v_\ec\nu$, is
denoted by $(f^+(x),f^-(x),v_\ec\nu_f(x))$. We shall call
$v_\ec\nu_f(x)$ the {\varepsilonm approximate jump vector} and we shall
sometimes write simply $v_\ec\nu(x)$ if the reference to the function
$f$ is clear. The set of approximate jump points is denoted by
$J_f$. A choice of $v_\ec\nu(x)$ for every $x\in J_f$ (which is
unique up to sign) determines an orientation of $J_f$. At a point of
approximate continuity $x$, we shall use the convention
$f^+(x)=f^-(x)=\thetailde f(x)$.
\varepsilonnd{definition}
We recall the following results on BV-functions that we shall use in
the sequel. They are all taken from \cite{amb}. In all of them
$\Omegamega$ is a domain in $\varphiield{R}^N$ and $f$ belongs to $BV(\Omegamega,\varphiield{R}^m)$.
\betaegin{theorem}[Theorems 3.69 and 3.78 from \cite{amb}]\label{petTh2}
$ $ \\i) $\mathcal{H}^{N-1}$-almost every point in
$\Omegamega\sigmaetminus J_f$ is a point of approximate continuity of $f$.\\
ii) The set $J_f$ is a countably $\mathcal{H}^{N-1}$-rectifiable
Borel set, oriented by $v_\ec\nu(x)$. In other words, $J_f$ is
$\sigmaigma$-finite with respect to $\mathcal{H}^{N-1}$, there exist
countably many $C^1$ hypersurfaces $\{S_k\}^{\infty}_{k=1}$ such
that
$\mathcal{H}^{N-1}\Big(J_f\sigmaetminus\betaigcup\limits_{k=1}^{\infty}S_k\Big)=0$,
and for $\mathcal{H}^{N-1}$-almost every $x\in J_f\cap S_k$, the
approximate jump vector $v_\ec\nu(x)$ is normal to $S_k$ at the
point $x$.\\iii) $\betaig[(f^+-f^-)\omegatimesv_\ec\nu_f\betaig](x)\in
L^1(J_f,d\mathcal{H}^{N-1})$.
\varepsilonnd{theorem}
\betaegin{theorem}[Theorems 3.92 and 3.78 from \cite{amb}]\label{vtTh2}
The distributional gradient
$D f$ can be decomposed
as a sum of three Borel regular finite matrix-valued measures on
$\Omegamega$,
\betaegin{equation*}
D f=D^a f+D^c f+D^j f
\varepsilonnd{equation*}
with
\betaegin{equation*}
D^a f=(\nabla f)\,\mathcal{L}^N ~\thetaext{ and }~ D^j f=(f^+-f^-)\omegatimesv_\ec\nu_f
\mathcal{H}^{N-1}\llcorner J_f\,.
\varepsilonnd{equation*}
$D^a$, $D^c$ and $D^j$ are called absolutely continuous part, Cantor
and jump part of $D f$, respectively, and $\nabla f\in
L^1(\Omegamega,\varphiield{R}^{m\thetaimes N})$ is the approximate differential of $f$.
The three parts are mutually singular to each other. Moreover we
have the
following properties:\\
i) The support of $D^cf$ is concentrated on a set of
$\mathcal{L}^N$-measure zero, but $(D^c f) (B)=0$ for any Borel set
$B\sigmaubset\Omegamega$ which is $\sigmaigma$-finite with respect to
$\mathcal{H}^{N-1}$;\\ii) $[D^a f]\betaig(f^{-1}(H)\betaig)=0$ and $[D^c
f]\betaig(\thetailde f^{-1}(H)\betaig)=0$ for every $H\sigmaubset\varphiield{R}^m$ satisfying
$\mathcal{H}^1(H)=0$.
\varepsilonnd{theorem}
\betaegin{theorem}[Volpert chain rule, Theorems 3.96 and 3.99 from \cite{amb}]\label{trTh2}
Let $\Phi\in C^1(\varphiield{R}^m,\varphiield{R}^q)$ be a Lipschitz function satisfying
$\Phi(0)=0$ if $|\Omegamega|=\infty$. Then, $v(x)=(\Phi\circ f)(x)$
belongs to $BV(\Omegamega,\varphiield{R}^q)$ and we have
\betaegin{equation*}\betaegin{split}
D^a v = \nabla\Phi(f)\,\nabla f\,\mathcal{L}^N,\; D^c v =
\nabla\Phi(\thetailde f)\,D^c f,\; D^j v =
\betaig[\Phi(f^+)-\Phi(f^-)\betaig]\omegatimesv_\ec\nu_f\,
\mathcal{H}^{N-1}\llcorner J_f\,.
\varepsilonnd{split}
\varepsilonnd{equation*}
\varepsilonnd{theorem}
We also recall that the trace operator $T$ is a continuous map from
$BV(\Omegamega)$, endowed with the strong topology (or more generally,
the topology induced by strict convergence), to
$L^1(\partial\Omegamega,{\mathcal H}^{N-1}\llcorner\partial\Omegamega)$,
provided that $\Omegamega$ has a bounded Lipschitz boundary (see
\cite[Theorems 3.87 and 3.88]{amb}).
\betaegin{thebibliography}{66}
\betaibitem{ARS} F.~Alouges, T.~Rivière, S.~Serfaty, {\varepsilonm N\'{e}el and cross-tie wall energies
for planar micromagnetic configurations},
ESAIM Control Optim. Calc. Var. {\betaf 8} (2002), 31--68.
\betaibitem{ambrosio} L.~Ambrosio, {\varepsilonm Metric space valued functions of
bounded variation},
Ann. Scuola Norm. Sup. Pisa Cl. Sci. (4) {\betaf 17} (1990), 439--478.
\betaibitem{adm} L.~Ambrosio, C.~De Lellis and C.~ Mantegazza, {\varepsilonm Line
energies for gradient vector fields in the plane}, Calc. Var. PDE
{\betaf 9 }(1999), 327--355.
\betaibitem{amb} L.~Ambrosio, N.~Fusco and D.~Pallara, Functions of
Bounded Variation and Free Discontinuity Problems, Oxford
Mathematical Monographs. Oxford University Press, New York, 2000.
\betaibitem{ag1} P.~Aviles and Y.~Giga, {\varepsilonm A mathematical problem related to the physical theory of liquid
crystal configurations}, Proc. Centre Math. Anal. Austral. Nat. Univ. {\betaf 12} (1987), 1--16.
\betaibitem{ag2} P.~Aviles and Y.~Giga, {\varepsilonm On lower semicontinuity of a
defect energy obtained by a singular limit of the
Ginzburg-Landau type energy for gradient
fields}, Proc. Roy. Soc. Edinburgh Sect. A {\betaf 129} (1999), 1--17.
\betaibitem{CdL} Sergio Conti and Camillo de Lellis, {\varepsilonm Sharp upper bounds for a variational problem with singular
perturbation}, Math. Ann. {\betaf 338} (2007), no. 1, 119--146.
\betaibitem{contiS} Sergio Conti and Ben Schweizer {\varepsilonm A sharp-interface limit for a
two-well problem in geometrically linear elasticity}. Arch. Ration.
Mech. Anal. 179 (2006), no. 3, 413--452.
\betaibitem{contiS1} Sergio Conti and Ben Schweizer {\varepsilonm Rigidity and Gamma convergence for solid-solid phase
transitions with $SO(2)$-invariance}, Comm. Pure Appl. Math. 59
(2006), no. 6, 830--868.
\betaibitem{contiFL} S. Conti, I. Fonseca, G. Leoni {\varepsilonm A $\Gammaamma$-convergence
result for the two-gradient theory of phase transitions}, Comm. Pure
Appl. Math. {\betaf 55} (2002), pp. 857-936.
\betaibitem{conti} S.~Conti and C.~De Lellis, {\varepsilonm Sharp upper bounds for a variational problem
with singular perturbation}, Math. Ann. 338 (2007), no. 1, 119--146.
\betaibitem{dl} C.~De Lellis, {\varepsilonm An example in the gradient theory of phase
transitions} ESAIM Control Optim. Calc. Var. {\betaf 7} (2002),
285--289 (electronic).
\betaibitem{otto} A.~DeSimone, S.~M\"uller, R.V.~Kohn and F.~Otto, {\varepsilonm A compactness result
in the gradient theory of phase transitions}, Proc. Roy. Soc.
Edinburgh Sect. A {\betaf 131} (2001), 833--844.
\betaibitem{DKMO} A.~DeSimone, S.~M\"uller, R.V.~Kohn and F.~Otto,
{\varepsilonm Recent analytical developments in micromagnetics}, In Giorgio
Bertotti and Isaak Mayergoyz, editors, The Science of Hysteresis,
volume 2, chapter 4, pages 269-381. Elsevier Academic Press, 2005.
\betaibitem{FM} I.~Fonseca and C.~Mantegazza, {\varepsilonm Second order singular perturbation models
for phase transitions}, SIAM J. Math. Anal. 31 (2000), no. 5,
1121--1143 (electronic).
\betaibitem{FonP} I.~Fonseca, C.~Popovici, {\varepsilonm Coupled singular perturbations for phase transitions},
Assymptotic Analisis {\betaf 44} (2005), 299-325.
\betaibitem{evansbook} L.C.~Evans, Partial Differential Equations, Graduate
Studies in Mathematics, Vol.~{\betaf 19}, American Mathematical Society, 1998.
\betaibitem{evans} L.C.~Evans and R.F.~Gariepy, Measure Theory and Fine
Properties of Functions, Studies in Advanced Mathematics, CRC Press,
Boca Raton, FL, 1992.
\betaibitem{gt} D.~Gilbarg and N.~Trudinger, Elliptic Partial Differential
Equations of Elliptic Type, 2nd ed., Springer-Verlag,
Berlin-Heidelberg, 1983.
\betaibitem{giusti} E.~Giusti, Minimal Surfaces and Functions of Bounded
Variation, Monographs in Mathematics, {\betaf 80}, Birkh{\"a}user Verlag,
Basel, 1984.
\betaibitem{HaS} A.~Hubert and R.~Sch\"{a}fer, Magnetic domains, Springer, 1998.
\betaibitem{jin} W.~Jin and R.V.~Kohn, {\varepsilonm Singular perturbation and
the energy of folds}, J. Nonlinear Sci. {\betaf 10} (2000), 355--390.
\betaibitem{modica} L.~Modica,
{\varepsilonm The gradient theory of phase transitions and the minimal
interface criterion}, Arch. Rational Mech. Anal. {\betaf 98} (1987), 123--142.
\betaibitem{mm1} L.~Modica and S.~Mortola, {\varepsilonm Un esempio di $\Gammaamma
\sigmap{-}$-convergenza},
Boll. Un. Mat. Ital. B {\betaf 14 } (1977), 285--299.
\betaibitem{mm2} L.~Modica and S.~Mortola, {\varepsilonm Il limite nella $\Gammaamma
$-convergenza di una famiglia di funzionali ellittici},
Boll. Un. Mat. Ital. A {\betaf 14} (1977), 526--529.
\betaibitem{polgen} A.~Poliakovsky, {\varepsilonm A general technique to prove upper bounds for
singular perturbation problems}, Journal d'Analyse Mathematique,
{\betaf 104} (2008), no. 1, 247-290.
\betaibitem{P3} A.~Poliakovsky, {\varepsilonm Sharp upper bounds for a singular perturbation
problem related to micromagnetics}, Annali della Scuola Normale
Superiore di Pisa, Classe di Scienze. {\betaf 6} (2007), no. 4,
673--701.
\betaibitem{polmag} A.~Poliakovsky, {\varepsilonm Upper bounds for a class of energies containing a non-local
term}, , ESAIM: Control, Optimization and Calculus of Variations,
{\betaf 16} (2010), 856--886.
\betaibitem{polcras} A.~Poliakovsky, {\varepsilonm A method for establishing upper bounds for singular
perturbation problems}, C. R. Math. Acad. Sci. Paris 341 (2005), no.
2, 97--102.
\betaibitem{pol} A.~Poliakovsky, {\varepsilonm Upper bounds for singular perturbation problems involving gradient
fields}, J.~Eur.~Math.~Soc., {\betaf 9} (2007), 1--43.
\betaibitem{pollift} A.~Poliakovsky, {\varepsilonm On a singular perturbation problem related to optimal lifting
in BV-space}, Calculus of Variations and PDE, {\betaf
28} (2007), 411--426.
\betaibitem{P4} A.~Poliakovsky, {\varepsilonm On a
variational approach to the Method of Vanishing Viscosity for
Conservation Laws}, Advances in Mathematical Sciences and
Applications, {\betaf 18} (2008), no. 2., 429--451.
\betaibitem{PI} A.~Poliakovsky, {\varepsilonm On the $\Gammaamma$-limit of singular perturbation problems with
optimal profiles which are not one-dimensional. Part I: The upper
bound}, preprint
\betaibitem{PII} A.~Poliakovsky, {\varepsilonm On the $\Gammaamma$-limit of singular perturbation problems with
optimal profiles which are not one-dimensional. Part II: The lower
bound}, preprint
\betaibitem{PIII} A.~Poliakovsky, {\varepsilonm On the $\Gammaamma$-limit of singular perturbation problems with
optimal profiles which are not one-dimensional. Part III: The
energies with non local terms}, preprint
\betaibitem{RS1} T. Rivi\`{e}re and S. Serfaty, {\varepsilonm Limiting domain wall energy for a problem
related to micromagnetics}, Comm. Pure Appl. Math., 54 No 3 (2001),
294-338.
\betaibitem{RS2} T. Rivi\`{e}re and S. Serfaty, {\varepsilonm Compactness, kinetic formulation and entropies for a
problem related to mocromagnetics}, Comm. in Partial Differential
Equations 28 (2003), no. 1-2, 249-269.
\betaibitem{sternberg} P.~Sternberg,
{\varepsilonm The effect of a singular perturbation on nonconvex
variational problems}, Arch. Rational Mech. Anal. {\betaf 101} (1988), 209--260.
\betaibitem{vol} A.I.~Volpert and S.I.~Hudjaev, Analysis in Classes of Discontinuous Functions and
Equations of Mathematical Physics, Martinus Nijhoff Publishers,
Dordrecht, 1985.
\varepsilonnd{comment}
\betaegin{comment}
\alphappendix
\sigmaection{Appendix: Notations and basic results about $BV$-functions}
\label{sec:pre2}
\betaegin{itemize}
\item For given a real topological linear space $X$ we denote by $X^*$ the dual space (the space of continuous linear functionals from $X$ to $\varphiield{R}$).
\item For given $h\in X$ and $x^*\in X^*$ we denote by $\betaig<h,x^*\betaig>_{X\thetaimes X^*}$ the value in $\varphiield{R}$ of the functional $x^*$ on the vector $h$.
\item For given two normed linear spaces $X$ and $Y$ we denote by $\mathcal{L}(X;Y)$ the linear space of continuous (bounded) linear operators from $X$ to $Y$.
\item For given $A\in\mathcal{L}(X;Y)$ and $h\in X$ we denote by $A\cdot h$ the value in $Y$ of the operator $A$ on the vector $h$.
\item For given two reflexive Banach spaces $X,Y$ and
$S\in\mathcal{L}(X;Y)$ we denote by $S^*\in \mathcal{L}(Y^*;X^*)$
the corresponding adjoint operator, which satisfies
\betaegin{equation*}\betaig<x,S^*\cdot y^*\betaig>_{X\thetaimes X^*}:=\betaig<S\cdot
x,y^*\betaig>_{Y\thetaimes Y^*}\quad\quad\thetaext{for every}\; y^*\in
Y^*\;\thetaext{and}\;x\in X\,.
\varepsilonnd{equation*}
\item Given open set $G\sigmaubset\varphiield{R}^N$ we denote by
$\mathcal{D}(G,\varphiield{R}^d)$ the real topological linear space of
compactly supported $\varphiield{R}^d$-valued test functions i.e.
$C^\infty_c(G,\varphiield{R}^d)$ with the usual topology.
\item
We denote $\mathcal{D}'(G,\varphiield{R}^d):=\betaig\{\mathcal{D}(G,\varphiield{R}^d)\betaig\}^*$
(the space of $\varphiield{R}^d$ valued distributions in $G$).
\item
Given $h\in\mathcal{D}'(G,\varphiield{R}^d)$ and $\deltaelta\in\mathcal{D}(G,\varphiield{R}^d)$
we denote $<\deltaelta,h>:=\betaig<\deltaelta,h\betaig>_{\mathcal{D}(G,\varphiield{R}^d)\thetaimes
\mathcal{D}'(G,\varphiield{R}^d)}$ i.e. the value in $\varphiield{R}$ of the distribution
$h$ on the test function $\deltaelta$.
\item
Given a linear operator $v_\ec A\in\mathcal{L}(\varphiield{R}^d;\varphiield{R}^k)$ and a
distribution $h\in\mathcal{D}'(G,\varphiield{R}^d)$ we denote by $v_\ec A\cdot h$
the distribution in $\mathcal{D}'(G,\varphiield{R}^k)$ defined by
\betaegin{equation*}
<\deltaelta,v_\ec A \cdot h>:=<v_\ec A^*\cdot
\deltaelta,h>\quad\quad\varphiorall\deltaelta\in\mathcal{D}(G,\varphiield{R}^k).
\varepsilonnd{equation*}
\item
Given $h\in\mathcal{D}'(G,\varphiield{R}^d)$ and $\deltaelta\in\mathcal{D}(G,\varphiield{R})$ by
$<\deltaelta,h>$ we denote the vector in $\varphiield{R}^d$ which satisfy
$<\deltaelta,h>\cdot v_\ec e:=<\deltaeltav_\ec e,h>$ for every $v_\ec
e\in\varphiield{R}^d$.
\item
For a $p\thetaimes q$ matrix $A$ with $ij$-th entry $a_{ij}$ we denote
by $|A|=\betaigl(\Sigmaigma_{i=1}^{p}\Sigmaigma_{j=1}^{q}a_{ij}^2\betaigr)^{1/2}$
the Frobenius norm of $A$.
\item For two matrices $A,B\in\varphiield{R}^{p\thetaimes q}$ with $ij$-th entries
$a_{ij}$ and $b_{ij}$ respectively, we write\\
$A:B\,:=\,\sigmaum\limits_{i=1}^{p}\sigmaum\limits_{j=1}^{q}a_{ij}b_{ij}$.
\item For a $p\thetaimes q$ matrix $A$ with
$ij$-th entry $a_{ij}$ and for a $q\thetaimes d$ matrix $B$ with $ij$-th
entry $b_{ij}$ we denote by $AB:=A\cdot B$ their product, i.e. the
$p\thetaimes d$ matrix, with $ij$-th entry
$\sigmaum\limits_{k=1}^{q}a_{ik}b_{kj}$.
\item We identify
a $v_\ec u=(u_1,\ldots,u_q)\in\varphiield{R}^q$ with the $q\thetaimes 1$ matrix
having $i1$-th entry $u_i$, so that for a $p\thetaimes q$ matrix $A$
with $ij$-th entry $a_{ij}$ and for $v_\ec
v=(v_1,v_2,\ldots,v_q)\in\varphiield{R}^q$ we denote by $A\,v_\ec v :=A\cdotv_\ec
v$ the $p$-dimensional vector $v_\ec u=(u_1,\ldots,u_p)\in\varphiield{R}^p$,
given by $u_i=\sigmaum\limits_{k=1}^{q}a_{ik}v_k$ for every $1\leq i\leq
p$.
\item As usual $A^T$ denotes the transpose of the matrix $A$.
\item For
$v_\ec u=(u_1,\ldots,u_p)\in\varphiield{R}^p$ and $v_\ec
v=(v_1,\ldots,v_p)\in\varphiield{R}^p$ we denote by $v_\ec uv_\ec v:=v_\ec
u\cdotv_\ec v:=\sigmaum\limits_{k=1}^{p}u_k v_k$ the standard scalar
product. We also note that $v_\ec uv_\ec v=v_\ec u^Tv_\ec v=v_\ec v^Tv_\ec
u$ as products of matrices.
\item For $v_\ec
u=(u_1,\ldots,u_p)\in\varphiield{R}^p$ and $v_\ec v=(v_1,\ldots,v_q)\in\varphiield{R}^q$ we
denote by $v_\ec u\omegatimesv_\ec v$ the $p\thetaimes q$ matrix with $ij$-th
entry $u_i v_j$ (i.e. $v_\ec u\omegatimesv_\ec v=v_\ec u\,v_\ec v^T$ as
product of matrices).
\item For
any $p\thetaimes q$ matrix $A$ with $ij$-th entry $a_{ij}$ and $v_\ec
v=(v_1,v_2,\ldots,v_d)\in\varphiield{R}^d$ we denote by $A\omegatimesv_\ec v$ the
$p\thetaimes q\thetaimes d$ tensor with $ijk$-th entry $a_{ij}v_k$.
\item
Given a vector valued function
$f(x)=\betaig(f_1(x),\ldots,f_k(x)\betaig):\Omega\thetao\varphiield{R}^k$ ($\Omega\sigmaubset\varphiield{R}^N$) we
denote by $Df$ or by $\nabla_x f$ the $k\thetaimes N$ matrix with
$ij$-th entry $\varphirac{\partial f_i}{\partial x_j}$.
\item
Given a matrix valued function
$F(x):=\{F_{ij}(x)\}:\varphiield{R}^N\thetao\varphiield{R}^{k\thetaimes N}$ ($\Omega\sigmaubset\varphiield{R}^N$) we
denote by $div\,F$ the $\varphiield{R}^k$-valued vector field defined by
$div\,F:=(l_1,\ldots,l_k)$ where
$l_i=\sigmaum\limits_{j=1}^{N}\varphirac{\partial F_{ij}}{\partial x_j}$.
\item Given a
matrix valued function $F(x)=\betaig\{f_{ij}(x)\betaig\}(1\leq i\leq
p,\,1\leq j\leq q):\Omega\thetao\varphiield{R}^{p\thetaimes q}$ ($\Omega\sigmaubset\varphiield{R}^N$) we denote
by $DF$ or by $\nabla_x F$ the $p\thetaimes q\thetaimes N$ tensor with
$ijk$-th entry $\varphirac{\partial f_{ij}}{\partial x_k}$.
\item For every dimension $d$
we denote by $I$ the unit $d\thetaimes d$-matrix and by $O$ the null
$d\thetaimes d$-matrix.
\item Given a vector valued
measure $\mu=(\mu_1,\ldots,\mu_k)$ (where for any $1\leq j\leq k$,
$\mu_j$ is a finite signed measure) we denote by $\|\mu\|(E)$ its
total variation measure of the set $E$.
\item For any $\mu$-measurable function $f$, we define the product measure
$f\cdot\mu$ by: $f\cdot\mu(E)=\int_E f\,d\mu$, for every
$\mu$-measurable set $E$.
\item Throughout this paper we assume that
$\Omega\sigmaubset\varphiield{R}^N$ is an open set.
\varepsilonnd{itemize}
In what follows we present some known results on BV-spaces. We rely mainly on the book \cite{amb}
by Ambrosio, Fusco and Pallara. Other sources are the books
by Hudjaev and Volpert~\cite{vol}, Giusti~\cite{giusti} and Evans and Gariepy~\cite{evans}.
We begin by introducing some notation.
For every $v_\ec\nu\in S^{N-1}$ (the unit sphere in $\varphiield{R}^N$) and $R>0$
we set
\betaegin{align}
B_{R}^+(x,v_\ec\nu)&=\{y\in\varphiield{R}^N\,:\,|y-x|<R,\,
(y-x)\cdotv_\ec\nu>0\}\,,\label{eq:B+}\\
B_{R}^-(x,v_\ec\nu)&=\{y\in\varphiield{R}^N:|y-x|<R,\,
(y-x)\cdotv_\ec\nu<0\}\,,\label{eq:B-}\\
H_+(x,v_\ec\nu)&=\{y\in\varphiield{R}^N:(y-x)\cdotv_\ec\nu>0\}\,,\label{HN+}\\
H_-(x,v_\ec\nu)&=\{y\in\varphiield{R}^N:(y-x)\cdotv_\ec\nu<0\}\label{HN-}\\
\intertext{and} H^0_{v_\ec \nu}&=\{ y\in\varphiield{R}^N\,:\, y\cdotv_\ec
\nu=0\}\label{HN}\,.
\varepsilonnd{align}
Next we recall the definition of the space of functions with bounded
variation. In what follows, ${\mathcal L}^N$ denotes the Lebesgue measure in $\varphiield{R}^N$.
\betaegin{definition}
Let $\Omegamega$ be a domain in $\varphiield{R}^N$ and let $f\in L^1(\Omegamega,\varphiield{R}^m)$.
We say that $f\in BV(\Omegamega,\varphiield{R}^m)$ if
\betaegin{equation*}
\int_\Omegamega|Df|:=\sigmaup\betaigg\{\int_\Omegamega\sigmaum\limits_{k=1}^{m}f_k\Deltaiv\,\varphi_k\,d\mathcal{L}^N
:\;\varphi_k\in C^1_c(\Omegamega,\varphiield{R}^N)\;\varphiorall
k,\,\sigmaum\limits_{k=1}^{m}|\varphi_k(x)|^2\leq 1\;\varphiorall
x\in\Omegamega\betaigg\}
\varepsilonnd{equation*}
is finite. In this case we define the BV-norm of $f$ by
$\|f\|_{BV}:=\|f\|_{L^1}+\int_\Omegamega|D f|$.
\varepsilonnd{definition}
We recall below some basic notions in Geometric Measure Theory (see
\cite{amb}).
\betaegin{definition}\label{defjac889878}
Let $\Omegamega$ be a domain in $\varphiield{R}^N$. Consider a function
$f\in L^1_{loc}(\Omegamega,\varphiield{R}^m)$ and a point $x\in\Omegamega$.\\
i) We say that $x$ is a point of {\varepsilonm approximate continuity} of $f$
if there exists $z\in\varphiield{R}^m$ such that
$$\lim\limits_{\rho\thetao 0^+}\varphirac{\int_{B_\rho(x)}|f(y)-z|\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0\,.
$$
In this case $z$ is called an {\varepsilonm approximate limit} of $f$ at $x$
and
we denote $z$ by $\thetailde{f}(x)$. The set of points of approximate continuity of
$f$ is denoted by $G_f$.\\
ii) We say that $x$ is an {\varepsilonm approximate jump point} of $f$ if
there exist $a,b\in\varphiield{R}^m$ and $v_\ec\nu\in S^{N-1}$ such that $a\neq
b$ and \betaegin{equation}\label{aprplmin} \lim\limits_{\rho\thetao
0^+}\varphirac{\int_{B_\rho^+(x,v_\ec\nu)}|f(y)-a|\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0,\quad \lim\limits_{\rho\thetao
0^+}\varphirac{\int_{B_\rho^-(x,v_\ec\nu)}|f(y)-b|\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0.
\varepsilonnd{equation}
The triple $(a,b,v_\ec\nu)$, uniquely determined by \varepsilonqref{aprplmin}
up to a permutation of $(a,b)$ and a change of sign of $v_\ec\nu$, is
denoted by $(f^+(x),f^-(x),v_\ec\nu_f(x))$. We shall call
$v_\ec\nu_f(x)$ the {\varepsilonm approximate jump vector} and we shall
sometimes write simply $v_\ec\nu(x)$ if the reference to the function
$f$ is clear. The set of approximate jump points is denoted by
$J_f$. A choice of $v_\ec\nu(x)$ for every $x\in J_f$ (which is
unique up to sign) determines an orientation of $J_f$. At a point of
approximate continuity $x$, we shall use the convention
$f^+(x)=f^-(x)=\thetailde f(x)$.
\varepsilonnd{definition}
We recall the following results on BV-functions that we shall use in
the sequel. They are all taken from \cite{amb}. In all of them
$\Omegamega$ is a domain in $\varphiield{R}^N$ and $f$ belongs to $BV(\Omegamega,\varphiield{R}^m)$.
\betaegin{theorem}[Theorems 3.69 and 3.78 from \cite{amb}]\label{petTh}
$ $ \\i) $\mathcal{H}^{N-1}$-almost every point in
$\Omegamega\sigmaetminus J_f$ is a point of approximate continuity of $f$.\\
ii) The set $J_f$ is a countably $\mathcal{H}^{N-1}$-rectifiable
Borel set, oriented by $v_\ec\nu(x)$. In other words, $J_f$ is
$\sigmaigma$-finite with respect to $\mathcal{H}^{N-1}$, there exist
countably many $C^1$ hypersurfaces $\{S_k\}^{\infty}_{k=1}$ such
that
$\mathcal{H}^{N-1}\Big(J_f\sigmaetminus\betaigcup\limits_{k=1}^{\infty}S_k\Big)=0$,
and for $\mathcal{H}^{N-1}$-almost every $x\in J_f\cap S_k$, the
approximate jump vector $v_\ec\nu(x)$ is normal to $S_k$ at the
point $x$.\\iii) $\betaig[(f^+-f^-)\omegatimesv_\ec\nu_f\betaig](x)\in
L^1(J_f,d\mathcal{H}^{N-1})$.
\varepsilonnd{theorem}
\betaegin{theorem}[Theorems 3.92 and 3.78 from \cite{amb}]\label{vtTh}
The distributional gradient
$D f$ can be decomposed
as a sum of three Borel regular finite matrix-valued measures on
$\Omegamega$,
\betaegin{equation*}
D f=D^a f+D^c f+D^j f
\varepsilonnd{equation*}
with
\betaegin{equation*}
D^a f=(\nabla f)\,\mathcal{L}^N ~\thetaext{ and }~ D^j f=(f^+-f^-)\omegatimesv_\ec\nu_f
\mathcal{H}^{N-1}\llcorner J_f\,.
\varepsilonnd{equation*}
$D^a$, $D^c$ and $D^j$ are called absolutely continuous part, Cantor
and jump part of $D f$, respectively, and $\nabla f\in
L^1(\Omegamega,\varphiield{R}^{m\thetaimes N})$ is the approximate differential of $f$.
The three parts are mutually singular to each other. Moreover we
have the
following properties:\\
i) The support of $D^cf$ is concentrated on a set of
$\mathcal{L}^N$-measure zero, but $(D^c f) (B)=0$ for any Borel set
$B\sigmaubset\Omegamega$ which is $\sigmaigma$-finite with respect to
$\mathcal{H}^{N-1}$;\\ii) $[D^a f]\betaig(f^{-1}(H)\betaig)=0$ and $[D^c
f]\betaig(\thetailde f^{-1}(H)\betaig)=0$ for every $H\sigmaubset\varphiield{R}^m$ satisfying
$\mathcal{H}^1(H)=0$.
\varepsilonnd{theorem}
\betaegin{theorem}[Volpert chain rule, Theorems 3.96 and 3.99 from \cite{amb}]\label{trTh}
Let $\Phi\in C^1(\varphiield{R}^m,\varphiield{R}^q)$ be a Lipschitz function satisfying
$\Phi(0)=0$ if $|\Omegamega|=\infty$. Then, $v(x)=(\Phi\circ f)(x)$
belongs to $BV(\Omegamega,\varphiield{R}^q)$ and we have
\betaegin{equation*}\betaegin{split}
D^a v = \nabla\Phi(f)\,\nabla f\,\mathcal{L}^N,\; D^c v =
\nabla\Phi(\thetailde f)\,D^c f,\; D^j v =
\betaig[\Phi(f^+)-\Phi(f^-)\betaig]\omegatimesv_\ec\nu_f\,
\mathcal{H}^{N-1}\llcorner J_f\,.
\varepsilonnd{split}
\varepsilonnd{equation*}
\varepsilonnd{theorem}
We also recall that the trace operator $T$ is a continuous map from
$BV(\Omegamega)$, endowed with the strong topology (or more generally,
the topology induced by strict convergence), to
$L^1(\partial\Omegamega,{\mathcal H}^{N-1}\llcorner\partial\Omegamega)$,
provided that $\Omegamega$ has a bounded Lipschitz boundary (see
\cite[Theorems 3.87 and 3.88]{amb}).
\varepsilonnd{comment}
\alphappendix
\sigmaection{Notations and basic results about $BV$-functions}
\label{sec:pre}
\noindent$\betaar ullet$ For given a real topological linear space $X$ we
denote by $X^*$ the dual space (the space of continuous linear
functionals from $X$ to $\varphiield{R}$).
\noindent$\betaar ullet$ For given $h\in X$ and $x^*\in X^*$ we denote by
$\betaig<h,x^*\betaig>_{X\thetaimes X^*}$ the value in $\varphiield{R}$ of the functional
$x^*$ on the vector $h$.
\noindent$\betaar ullet$ For given two normed linear spaces $X$ and $Y$ we
denote by $\mathcal{L}(X;Y)$ the linear space of continuous
(bounded) linear operators from $X$ to $Y$.
\noindent$\betaar ullet$ For given $A\in\mathcal{L}(X;Y)$ and $h\in X$ we
denote by $A\cdot h$ the value in $Y$ of the operator $A$ on the
vector $h$.
\noindent$\betaar ullet$ For given two reflexive Banach spaces $X,Y$ and
$S\in\mathcal{L}(X;Y)$ we denote by $S^*\in \mathcal{L}(Y^*;X^*)$
the corresponding adjoint operator, which satisfies
\betaegin{equation*}\betaig<x,S^*\cdot y^*\betaig>_{X\thetaimes X^*}:=\betaig<S\cdot
x,y^*\betaig>_{Y\thetaimes Y^*}\quad\quad\thetaext{for every}\; y^*\in
Y^*\;\thetaext{and}\;x\in X\,.
\varepsilonnd{equation*}
\noindent$\betaar ullet$ Given open set $G\sigmaubset\varphiield{R}^N$ we denote by
$\mathcal{D}(G,\varphiield{R}^d)$ the real topological linear space of
compactly supported $\varphiield{R}^d$-valued test functions i.e.
$C^\infty_c(G,\varphiield{R}^d)$ with the usual topology.
\noindent$\betaar ullet$ Denote
$\mathcal{D}'(G,\varphiield{R}^d):=\betaig\{\mathcal{D}(G,\varphiield{R}^d)\betaig\}^*$ (the space
of $\varphiield{R}^d$-valued distributions in $G$).
\noindent$\betaar ullet$ Given $h\in\mathcal{D}'(G,\varphiield{R}^d)$ and
$\deltaelta\in\mathcal{D}(G,\varphiield{R}^d)$ we denote the value in $\varphiield{R}$ of the
distribution $h$ on the test function $\deltaelta$ by
$<\deltaelta,h>:=\betaig<\deltaelta,h\betaig>_{\mathcal{D}(G,\varphiield{R}^d)\thetaimes
\mathcal{D}'(G,\varphiield{R}^d)}$.
\noindent$\betaar ullet$ Given a linear operator $v_\ec
A\in\mathcal{L}(\varphiield{R}^d;\varphiield{R}^k)$ and a distribution
$h\in\mathcal{D}'(G,\varphiield{R}^d)$ we denote by $v_\ec A\cdot h$ the
distribution in $\mathcal{D}'(G,\varphiield{R}^k)$ defined by
\betaegin{equation*}
<\deltaelta,v_\ec A \cdot h>:=<v_\ec A^*\cdot
\deltaelta,h>\quad\quad\varphiorall\deltaelta\in\mathcal{D}(G,\varphiield{R}^k).
\varepsilonnd{equation*}
\noindent$\betaar ullet$ Given $h\in\mathcal{D}'(G,\varphiield{R}^d)$ and
$\deltaelta\in\mathcal{D}(G,\varphiield{R})$ by $<\deltaelta,h>$ we denote the vector in
$\varphiield{R}^d$ which satisfy $<\deltaelta,h>\cdot v_\ec e:=<\deltaeltav_\ec e,h>$ for
every $v_\ec e\in\varphiield{R}^d$.
\noindent$\betaar ullet$ For a $p\thetaimes q$ matrix $A$ with $ij$-th entry
$a_{ij}$ and for a $q\thetaimes d$ matrix $B$ with $ij$-th entry
$b_{ij}$ we denote by $AB:=A\cdot B$ their product, i.e. the
$p\thetaimes d$ matrix, with $ij$-th entry
$\sigmaum\limits_{k=1}^{q}a_{ik}b_{kj}$.
\noindent$\betaar ullet$ We identify a $v_\ec u=(u_1,\ldots,u_q)\in\varphiield{R}^q$
with the $q\thetaimes 1$ matrix having $i1$-th entry $u_i$, so that for
a $p\thetaimes q$ matrix $A$ with $ij$-th entry $a_{ij}$ and for $v_\ec
v=(v_1,v_2,\ldots,v_q)\in\varphiield{R}^q$ we denote by $A\,v_\ec v :=A\cdotv_\ec
v$ the $p$-dimensional vector $v_\ec u=(u_1,\ldots,u_p)\in\varphiield{R}^p$,
given by $u_i=\sigmaum\limits_{k=1}^{q}a_{ik}v_k$ for every $1\leq i\leq
p$.
\noindent$\betaar ullet$ As usual $A^T$ denotes the transpose of the
matrix $A$.
\noindent$\betaar ullet$ For $v_\ec u=(u_1,\ldots,u_p)\in\varphiield{R}^p$ and $v_\ec
v=(v_1,\ldots,v_p)\in\varphiield{R}^p$ we denote by $v_\ec uv_\ec v:=v_\ec
u\cdotv_\ec v:=\sigmaum\limits_{k=1}^{p}u_k v_k$ the standard scalar
product. We also note that $v_\ec uv_\ec v=v_\ec u^Tv_\ec v=v_\ec v^Tv_\ec
u$ as products of matrices.
\noindent$\betaar ullet$ For $v_\ec u=(u_1,\ldots,u_p)\in\varphiield{R}^p$ and $v_\ec
v=(v_1,\ldots,v_q)\in\varphiield{R}^q$ we denote by $v_\ec u\omegatimesv_\ec v$ the
$p\thetaimes q$ matrix with $ij$-th entry $u_i v_j$.
\noindent$\betaar ullet$ For any $p\thetaimes q$ matrix $A$ with $ij$-th entry
$a_{ij}$ and $v_\ec v=(v_1,v_2,\ldots,v_d)\in\varphiield{R}^d$ we denote by
$A\omegatimesv_\ec v$ the $p\thetaimes q\thetaimes d$ tensor with $ijk$-th entry
$a_{ij}v_k$.
\noindent$\betaar ullet$ Given a vector valued function
$f(x)=\betaig(f_1(x),\ldots,f_k(x)\betaig):\Omega\thetao\varphiield{R}^k$ ($\Omega\sigmaubset\varphiield{R}^N$) we
denote by $Df$ or by $\nabla_x f$ the $k\thetaimes N$ matrix with
$ij$-th entry $\varphirac{\partial f_i}{\partial x_j}$.
\noindent$\betaar ullet$ Given a matrix valued function
$F(x):=\{F_{ij}(x)\}:\varphiield{R}^N\thetao\varphiield{R}^{k\thetaimes N}$ ($\Omega\sigmaubset\varphiield{R}^N$), we
denote $div\,F:=(l_1,\ldots,l_k)\in\varphiield{R}^k$, where
$l_i=\sigmaum\limits_{j=1}^{N}\varphirac{\partial F_{ij}}{\partial x_j}$.
\noindent$\betaar ullet$ Given a matrix valued function
$F(x)=\betaig\{f_{ij}(x)\betaig\}(1\leq i\leq p,\,1\leq j\leq
q):\Omega\thetao\varphiield{R}^{p\thetaimes q}$ ($\Omega\sigmaubset\varphiield{R}^N$) we denote by $DF$ or by
$\nabla_x F$ the $p\thetaimes q\thetaimes N$ tensor with $ijk$-th entry
$\varphirac{\partial f_{ij}}{\partial x_k}$.
\noindent$\betaar ullet$ Given a vector measure $\mu=(\mu_1,\ldots,\mu_k)$
(where $\varphiorall j=1,\ldots,k$ $\mu_j$ is a finite signed measure) we
denote by $\|\mu\|(E)$ the total variation of $\mu$ on the set $E$.
\noindent$\betaar ullet$ For any $\mu$-measurable function $f$, we define
the product measure $f\cdot\mu$ by: $f\cdot\mu(E)=\int_E f\,d\mu$,
for every $\mu$-measurable set $E$.
In what follows we present some known results on BV-spaces. We rely mainly on the book \cite{amb}
by Ambrosio, Fusco and Pallara. We begin by introducing some
notation. For every $v_\ec\nu\in S^{N-1}$ (the unit sphere in $\varphiield{R}^N$)
and $R>0$ we set
\betaegin{align}
B_{R}^+(x,v_\ec\nu)&=\{y\in\varphiield{R}^N\,:\,|y-x|<R,\,
(y-x)\cdotv_\ec\nu>0\}\,,\label{eq:B+}\\
B_{R}^-(x,v_\ec\nu)&=\{y\in\varphiield{R}^N:|y-x|<R,\,
(y-x)\cdotv_\ec\nu<0\}\,.\label{eq:B-}
\varepsilonnd{align}
\betaegin{definition}
Let $\Omegamega$ be a domain in $\varphiield{R}^N$ and let $f\in L^1(\Omegamega,\varphiield{R}^m)$.
We say that $f\in BV(\Omegamega,\varphiield{R}^m)$ if the following quantity is
finite:
\betaegin{equation*}
\int_\Omegamega|Df|:= \sigmaup\betaigg\{\int_\Omegamega f\cdot\Deltaiv\varphi \,dx :\,\varphi\in
C^1_c(\Omegamega,\varphiield{R}^{m\thetaimes N}),\;|\varphi(x)|\leq 1\;\varphiorall x \betaigg\}.
\varepsilonnd{equation*}
\varepsilonnd{definition}
\betaegin{definition}\label{defjac889878}
Let $\Omegamega$ be a domain in $\varphiield{R}^N$. Consider a function
$f\in L^1_{loc}(\Omegamega,\varphiield{R}^m)$ and a point $x\in\Omegamega$.\\
i) We say that $x$ is an {\varepsilonm approximate continuity point} of $f$
if there exists $z\in\varphiield{R}^m$ such that
$$\lim\limits_{\rho\thetao
0^+}\varphirac{\int_{B_\rho(x)}|f(y)-z|\,dy} {\rho^N}=0.$$
In this case
we denote $z$ by $\thetailde{f}(x)$. The set of approximate continuity
points of
$f$ is denoted by $G_f$.\\
ii) We say that $x$ is an {\varepsilonm approximate jump point} of $f$ if
there exist $a,b\in\varphiield{R}^m$ and $v_\ec\nu\in S^{N-1}$ such that $a\neq
b$ and
\betaegin{equation*}
\lim\limits_{\rho\thetao
0^+}\varphirac{\int_{B_\rho^+(x,v_\ec\nu)}|f(y)-a|\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0,\quad \lim\limits_{\rho\thetao
0^+}\varphirac{\int_{B_\rho^-(x,v_\ec\nu)}|f(y)-b|\,dy}
{\mathcal{L}^N\betaig(B_\rho(x)\betaig)}=0.
\varepsilonnd{equation*}
The triple $(a,b,v_\ec\nu)$, uniquely determined, up to a permutation
of $(a,b)$ and a change of sign of $v_\ec\nu$, is denoted by
$(f^+(x),f^-(x),v_\ec\nu_f(x))$. We shall call $v_\ec\nu_f(x)$ the
{\varepsilonm approximate jump vector} and we shall sometimes write simply
$v_\ec\nu(x)$ if the reference to the function $f$ is clear. The set
of approximate jump points is denoted by $J_f$. A choice of
$v_\ec\nu(x)$ for every $x\in J_f$ determines an orientation of
$J_f$. At an approximate continuity point $x$, we shall use the
convention $f^+(x)=f^-(x)=\thetailde f(x)$.
\varepsilonnd{definition}
\betaegin{theorem}[Theorems 3.69 and 3.78 from \cite{amb}]\label{petTh}
Consider an open set $\Omegamega\sigmaubset\varphiield{R}^N$ and $f\in BV(\Omegamega,\varphiield{R}^m)$.
Then:\\
\noindent i) $\mathcal{H}^{N-1}$-a.e. point in
$\Omegamega\sigmaetminus J_f$ is a point of approximate continuity of $f$.\\
\noindent ii) The set $J_f$ is
$\sigmaigma$-$\mathcal{H}^{N-1}$-rectifiable Borel set, oriented by
$v_\ec\nu(x)$. I.e. $J_f$ is $\sigmaigma$-finite with respect to
$\mathcal{H}^{N-1}$, there exist countably many $C^1$ hypersurfaces
$\{S_k\}^{\infty}_{k=1}$ such that
$\mathcal{H}^{N-1}\Big(J_f\sigmaetminus\betaigcup\limits_{k=1}^{\infty}S_k\Big)=0$,
and for $\mathcal{H}^{N-1}$-a.e. $x\in J_f\cap S_k$, the approximate
jump vector $v_\ec\nu(x)$ is normal to $S_k$ at the point $x$.
\\ \noindent iii)
$\betaig[(f^+-f^-)\omegatimesv_\ec\nu_f\betaig](x)\in
L^1(J_f,d\mathcal{H}^{N-1})$.
\varepsilonnd{theorem}
\betaegin{comment}
\betaegin{thebibliography}{66}
\betaibitem{ARS} F.~Alouges, T.~Rivière, S.~Serfaty, {\varepsilonm N\'{e}el and cross-tie wall energies
for planar micromagnetic configurations},
ESAIM Control Optim. Calc. Var. {\betaf 8} (2002), 31--68.
\betaibitem{ambrosio} L.~Ambrosio, {\varepsilonm Metric space valued functions of
bounded variation},
Ann. Scuola Norm. Sup. Pisa Cl. Sci. (4) {\betaf 17} (1990), 439--478.
\betaibitem{adm} L.~Ambrosio, C.~De Lellis and C.~ Mantegazza, {\varepsilonm Line
energies for gradient vector fields in the plane}, Calc. Var. PDE
{\betaf 9 }(1999), 327--355.
\betaibitem{amb} L.~Ambrosio, N.~Fusco and D.~Pallara, Functions of
Bounded Variation and Free Discontinuity Problems, Oxford
Mathematical Monographs. Oxford University Press, New York, 2000.
\betaibitem{ag1} P.~Aviles and Y.~Giga, {\varepsilonm A mathematical problem related to the physical theory of liquid
crystal configurations}, Proc. Centre Math. Anal. Austral. Nat. Univ. {\betaf 12} (1987), 1--16.
\betaibitem{ag2} P.~Aviles and Y.~Giga, {\varepsilonm On lower semicontinuity of a
defect energy obtained by a singular limit of the
Ginzburg-Landau type energy for gradient
fields}, Proc. Roy. Soc. Edinburgh Sect. A {\betaf 129} (1999), 1--17.
\betaibitem{CdL} Sergio Conti and Camillo de Lellis, {\varepsilonm Sharp upper bounds for a variational problem with singular
perturbation}, Math. Ann. {\betaf 338} (2007), no. 1, 119--146.
\betaibitem{contiS} Sergio Conti and Ben Schweizer {\varepsilonm A sharp-interface limit for a
two-well problem in geometrically linear elasticity}. Arch. Ration.
Mech. Anal. 179 (2006), no. 3, 413--452.
\betaibitem{contiS1} Sergio Conti and Ben Schweizer {\varepsilonm Rigidity and Gamma convergence for solid-solid phase
transitions with $SO(2)$-invariance}, Comm. Pure Appl. Math. 59
(2006), no. 6, 830--868.
\betaibitem{contiFL} S. Conti, I. Fonseca, G. Leoni {\varepsilonm A $\Gammaamma$-convergence
result for the two-gradient theory of phase transitions}, Comm. Pure
Appl. Math. {\betaf 55} (2002), pp. 857-936.
\betaibitem{conti} S.~Conti and C.~De Lellis, {\varepsilonm Sharp upper bounds for a variational problem
with singular perturbation}, Math. Ann. 338 (2007), no. 1, 119--146.
\betaibitem{dl} C.~De Lellis, {\varepsilonm An example in the gradient theory of phase
transitions} ESAIM Control Optim. Calc. Var. {\betaf 7} (2002),
285--289 (electronic).
\betaibitem{otto} A.~DeSimone, S.~M\"uller, R.V.~Kohn and F.~Otto, {\varepsilonm A compactness result
in the gradient theory of phase transitions}, Proc. Roy. Soc.
Edinburgh Sect. A {\betaf 131} (2001), 833--844.
\betaibitem{DKMO} A.~DeSimone, S.~M\"uller, R.V.~Kohn and F.~Otto,
{\varepsilonm Recent analytical developments in micromagnetics}, In Giorgio
Bertotti and Isaak Mayergoyz, editors, The Science of Hysteresis,
volume 2, chapter 4, pages 269-381. Elsevier Academic Press, 2005.
\betaibitem{FM} I.~Fonseca and C.~Mantegazza, {\varepsilonm Second order singular perturbation models
for phase transitions}, SIAM J. Math. Anal. 31 (2000), no. 5,
1121--1143 (electronic).
\betaibitem{FonP} I.~Fonseca, C.~Popovici, {\varepsilonm Coupled singular perturbations for phase transitions},
Assymptotic Analisis {\betaf 44} (2005), 299-325.
\betaibitem{evansbook} L.C.~Evans, Partial Differential Equations, Graduate
Studies in Mathematics, Vol.~{\betaf 19}, American Mathematical Society, 1998.
\betaibitem{evans} L.C.~Evans and R.F.~Gariepy, Measure Theory and Fine
Properties of Functions, Studies in Advanced Mathematics, CRC Press,
Boca Raton, FL, 1992.
\betaibitem{gt} D.~Gilbarg and N.~Trudinger, Elliptic Partial Differential
Equations of Elliptic Type, 2nd ed., Springer-Verlag,
Berlin-Heidelberg, 1983.
\betaibitem{giusti} E.~Giusti, Minimal Surfaces and Functions of Bounded
Variation, Monographs in Mathematics, {\betaf 80}, Birkh{\"a}user Verlag,
Basel, 1984.
\betaibitem{HaS} A.~Hubert and R.~Sch\"{a}fer, Magnetic domains, Springer, 1998.
\betaibitem{jin} W.~Jin and R.V.~Kohn, {\varepsilonm Singular perturbation and
the energy of folds}, J. Nonlinear Sci. {\betaf 10} (2000), 355--390.
\betaibitem{CDFO} C.~De Lellis, F.~Otto {\varepsilonm Structure of entropy solutions to the eikonal equation}
J. Eur. Math. Soc. {\betaf 5}, (2003), 107--145.
\betaibitem{modica} L.~Modica,
{\varepsilonm The gradient theory of phase transitions and the minimal
interface criterion}, Arch. Rational Mech. Anal. {\betaf 98} (1987), 123--142.
\betaibitem{mm1} L.~Modica and S.~Mortola, {\varepsilonm Un esempio di $\Gammaamma
\sigmap{-}$-convergenza},
Boll. Un. Mat. Ital. B {\betaf 14 } (1977), 285--299.
\betaibitem{mm2} L.~Modica and S.~Mortola, {\varepsilonm Il limite nella $\Gammaamma
$-convergenza di una famiglia di funzionali ellittici},
Boll. Un. Mat. Ital. A {\betaf 14} (1977), 526--529.
\betaibitem{polgen} A.~Poliakovsky, {\varepsilonm A general technique to prove upper bounds for
singular perturbation problems}, Journal d'Analyse Mathematique,
{\betaf 104} (2008), no. 1, 247-290.
\betaibitem{P3} A.~Poliakovsky, {\varepsilonm Sharp upper bounds for a singular perturbation
problem related to micromagnetics}, Annali della Scuola Normale
Superiore di Pisa, Classe di Scienze. {\betaf 6} (2007), no. 4,
673--701.
\betaibitem{polmag} A.~Poliakovsky, {\varepsilonm Upper bounds for a class of energies containing a non-local
term}, , ESAIM: Control, Optimization and Calculus of Variations,
{\betaf 16} (2010), 856--886.
\betaibitem{polcras} A.~Poliakovsky, {\varepsilonm A method for establishing upper bounds for singular
perturbation problems}, C. R. Math. Acad. Sci. Paris 341 (2005), no.
2, 97--102.
\betaibitem{pol} A.~Poliakovsky, {\varepsilonm Upper bounds for singular perturbation problems involving gradient
fields}, J.~Eur.~Math.~Soc., {\betaf 9} (2007), 1--43.
\betaibitem{pollift} A.~Poliakovsky, {\varepsilonm On a singular perturbation problem related to optimal lifting
in BV-space}, Calculus of Variations and PDE, {\betaf
28} (2007), 411--426.
\betaibitem{P4} A.~Poliakovsky, {\varepsilonm On a
variational approach to the Method of Vanishing Viscosity for
Conservation Laws}, Advances in Mathematical Sciences and
Applications, {\betaf 18} (2008), no. 2., 429--451.
\betaibitem{PI} A.~Poliakovsky, {\varepsilonm On the $\Gammaamma$-limit of singular perturbation problems with
optimal profiles which are not one-dimensional. Part I: The upper
bound}, preprint, http://arxiv.org/abs/1112.2305
\betaibitem{PIII} A.~Poliakovsky, {\varepsilonm On the $\Gammaamma$-limit of singular perturbation problems with
optimal profiles which are not one-dimensional. Part III: The
energies with non local terms}, preprint,
http://arxiv.org/abs/1112.2971
\betaibitem{RS1} T. Rivi\`{e}re and S. Serfaty, {\varepsilonm Limiting domain wall energy for a problem
related to micromagnetics}, Comm. Pure Appl. Math., 54 No 3 (2001),
294-338.
\betaibitem{RS2} T. Rivi\`{e}re and S. Serfaty, {\varepsilonm Compactness, kinetic formulation and entropies for a
problem related to mocromagnetics}, Comm. in Partial Differential
Equations 28 (2003), no. 1-2, 249-269.
\betaibitem{sternberg} P.~Sternberg,
{\varepsilonm The effect of a singular perturbation on nonconvex
variational problems}, Arch. Rational Mech. Anal. {\betaf 101} (1988), 209--260.
\betaibitem{vol} A.I.~Volpert and S.I.~Hudjaev, Analysis in Classes of Discontinuous Functions and
Equations of Mathematical Physics, Martinus Nijhoff Publishers,
Dordrecht, 1985.
\varepsilonnd{thebibliography}
\varepsilonnd{comment}
\betaegin{thebibliography}{66}
\betaibitem{ambrosio} L.~Ambrosio, {\varepsilonm Metric space valued functions of
bounded variation},
Ann. Scuola Norm. Sup. Pisa Cl. Sci. (4) {\betaf 17} (1990), 439--478.
\betaibitem{adm} L.~Ambrosio, C.~De Lellis and C.~ Mantegazza, {\varepsilonm Line
energies for gradient vector fields in the plane}, Calc. Var. PDE
{\betaf 9 }(1999), 327--355.
\betaibitem{amb} L.~Ambrosio, N.~Fusco and D.~Pallara, Functions of
Bounded Variation and Free Discontinuity Problems, Oxford
Mathematical Monographs. Oxford University Press, New York, 2000.
\betaibitem{ag1} P.~Aviles and Y.~Giga, {\varepsilonm A mathematical problem related to the physical theory of liquid
crystal configurations}, Proc. Centre Math. Anal. Austral. Nat. Univ. {\betaf 12} (1987), 1--16.
\betaibitem{ag2} P.~Aviles and Y.~Giga, {\varepsilonm On lower semicontinuity of a
defect energy obtained by a singular limit of the
Ginzburg-Landau type energy for gradient
fields}, Proc. Roy. Soc. Edinburgh Sect. A {\betaf 129} (1999), 1--17.
\betaibitem{CdL} Sergio Conti, Camillo de Lellis, {\varepsilonm Sharp upper bounds for a variational problem with singular
perturbation}, Math. Ann. {\betaf 338} (2007), no. 1, 119--146.
\betaibitem{contiS} Sergio Conti, Ben Schweizer {\varepsilonm A sharp-interface limit for a
two-well problem in geometrically linear elasticity}. Arch. Ration.
Mech. Anal. 179 (2006), no. 3, 413--452.
\betaibitem{contiS1} Sergio Conti, Ben Schweizer {\varepsilonm Rigidity and Gamma convergence for solid-solid phase
transitions with $SO(2)$-invariance}, Comm. Pure Appl. Math. 59
(2006), no. 6, 830--868.
\betaibitem{contiFL} S. Conti, I. Fonseca, G. Leoni {\varepsilonm A $\Gammaamma$-convergence
result for the two-gradient theory of phase transitions}, Comm. Pure
Appl. Math. {\betaf 55} (2002), pp. 857-936.
\betaibitem{otto} A.~DeSimone, S.~M\"uller, R.V.~Kohn and F.~Otto, {\varepsilonm A compactness result
in the gradient theory of phase transitions}, Proc. Roy. Soc.
Edinburgh Sect. A {\betaf 131} (2001), 833--844.
\betaibitem{FM} I.~Fonseca and C.~Mantegazza, {\varepsilonm Second order singular perturbation models
for phase transitions}, SIAM J. Math. Anal. 31 (2000), no. 5,
1121--1143 (electronic).
\betaibitem{FonP} I.~Fonseca, C.~Popovici, {\varepsilonm Coupled singular perturbations for phase transitions},
Assymptotic Analisis {\betaf 44} (2005), 299-325.
\betaibitem{gt} D.~Gilbarg and N.~Trudinger, Elliptic Partial Differential
Equations of Elliptic Type, 2nd ed., Springer-Verlag,
Berlin-Heidelberg, 1983.
\betaibitem{CDFO} C.~De Lellis, F.~Otto {\varepsilonm Structure of entropy solutions to the eikonal equation}
J. Eur. Math. Soc. {\betaf 5}, (2003), 107--145.
\betaibitem{modica} L.~Modica,
{\varepsilonm The gradient theory of phase transitions and the minimal
interface criterion}, Arch. Rational Mech. Anal. {\betaf 98} (1987), 123--142.
\betaibitem{mm1} L.~Modica and S.~Mortola, {\varepsilonm Un esempio di $\Gammaamma
\sigmap{-}$-convergenza},
Boll. Un. Mat. Ital. B {\betaf 14 } (1977), 285--299.
\betaibitem{mm2} L.~Modica and S.~Mortola, {\varepsilonm Il limite nella $\Gammaamma
$-convergenza di una famiglia di funzionali ellittici},
Boll. Un. Mat. Ital. A {\betaf 14} (1977), 526--529.
\betaibitem{polgen} A.~Poliakovsky, {\varepsilonm A general technique to prove upper bounds for
singular perturbation problems}, Journal d'Analyse Mathematique,
{\betaf 104} (2008), no. 1, 247-290.
\betaibitem{P3} A.~Poliakovsky, {\varepsilonm Sharp upper bounds for a singular perturbation
problem related to micromagnetics}, Annali della Scuola Normale
Superiore di Pisa, Classe di Scienze. {\betaf 6} (2007), no. 4,
673--701.
\betaibitem{polmag} A.~Poliakovsky, {\varepsilonm Upper bounds for a class of energies containing a non-local
term}, , ESAIM: Control, Optimization and Calculus of Variations,
{\betaf 16} (2010), 856--886.
\betaibitem{polcras} A.~Poliakovsky, {\varepsilonm A method for establishing upper bounds for singular
perturbation problems}, C. R. Math. Acad. Sci. Paris 341 (2005), no.
2, 97--102.
\betaibitem{pol} A.~Poliakovsky, {\varepsilonm Upper bounds for singular perturbation problems involving gradient
fields}, J.~Eur.~Math.~Soc., {\betaf 9} (2007), 1--43.
\betaibitem{pollift} A.~Poliakovsky, {\varepsilonm On a singular perturbation problem related to optimal lifting
in BV-space}, Calculus of Variations and PDE, {\betaf
28} (2007), 411--426.
\betaibitem{P4} A.~Poliakovsky, {\varepsilonm On a
variational approach to the Method of Vanishing Viscosity for
Conservation Laws}, Advances in Mathematical Sciences and
Applications, {\betaf 18} (2008), no. 2., 429--451.
\betaibitem{PI} A.~Poliakovsky, {\varepsilonm On the $\Gammaamma$-limit of singular perturbation problems with
optimal profiles which are not one-dimensional. Part I: The upper
bound}, preprint, http://arxiv.org/abs/1112.2305
\betaibitem{PIII} A.~Poliakovsky, {\varepsilonm On the $\Gammaamma$-limit of singular perturbation problems with
optimal profiles which are not one-dimensional. Part III: The
energies with non local terms}, preprint,
http://arxiv.org/abs/1112.2971
\betaibitem{RS1} T. Rivi\`{e}re and S. Serfaty, {\varepsilonm Limiting domain wall energy for a problem
related to micromagnetics}, Comm. Pure Appl. Math., 54 No 3 (2001),
294-338.
\betaibitem{RS2} T. Rivi\`{e}re and S. Serfaty, {\varepsilonm Compactness, kinetic formulation and entropies for a
problem related to mocromagnetics}, Comm. in Partial Differential
Equations 28 (2003), no. 1-2, 249-269.
\betaibitem{sternberg} P.~Sternberg,
{\varepsilonm The effect of a singular perturbation on nonconvex
variational problems}, Arch. Rational Mech. Anal. {\betaf 101} (1988), 209--260.
\varepsilonnd{thebibliography}
\varepsilonnd{document} |
\begin{document}
\begin{abstract}
We study the distribution of the zeroes of the zeta functions of the family of Artin-Schreier covers of the projective line over $\mathbb{F}_q$ when $q$ is fixed and the genus goes to infinity. We consider both the global and the mesoscopic regimes, proving that when the genus goes to infinity, the number of zeroes with angles in a prescribed non-trivial subinterval of $[-\pi,\pi)$ has a standard Gaussian distribution (when properly normalized).
\end{abstract}
\keywords{Artin-Schreier covers, finite fields, distribution of zeroes of $L$-functions of curves.}
\subjclass[2010]{Primary 11G20; Secondary 11M50, 14G15}
\title{Distribution of zeta zeroes of Artin--Schreier covers}
\section{Introduction}
Recently there has been a great deal of interest in statistics for numbers of rational points on
curves over finite fields, where the curve varies in a certain family but is always defined over
a fixed finite field. This is in contrast to situations studied using Deligne's equidistribution theorem \cite{deligne, deligne2}, which requires the size of the finite field to go to infinity, and which tends to produce
statistics related to random matrices in certain monodromy groups. When one fixes the base field, one instead tends to encounter discrete probabilities, typically sums of
independent identically distributed random variables. The first result in this direction
is the work of Kurlberg and Rudnick for hyperelliptic curves \cite{kr};
other cases considered include cyclic $p$-fold covers of the projective line \cite{bdfl1, bdfl2} (for a slightly different approach see \cite{x2}),
plane curves \cite{bdfl3}, complete intersections in projective spaces \cite{bk},
and general trigonal curves \cite{wood}.
The number of rational points on a curve over a finite field is determined by the zeta function,
and statistical properties of the number of points may be interpreted as properties of the
coefficients of the zeta function.
A related but somewhat deeper question is to consider statistical properties of \textit{zeroes}
of the zeta function. In the case of hyperelliptic curves, these properties were studied by
Faifman and Rudnick \cite{fr}. A related family was studied in \cite{x1}.
In this paper, we make similar considerations for
the family of \emph{Artin-Schreier covers of $\mathbb P^1$}; this family is interesting because the
characteristic of the base field plays a more central role in the definition than in any of the other
families mentioned so far. The Artin-Schreier construction is special because it cannot be obtained by base-change from a family of schemes over $\mathbb Z.$ Since Artin-Schreier covers are cyclic covers of $\mathbb{P}^1$, one obtains a direct link between their zeta functions and certain exponential sums; while this is also the case for cyclic $p$-fold covers in characteristics other than $p$, the Artin-Schreier case admits a much more precise analysis. One example of how to exploit this additional precision is the work of Rojas-Leon and Wan \cite{rw} refining the Weil bound for Artin-Schreier curves.
To explain our results in more detail, we introduce some notation.
Fix an odd prime $p$ and a finite field $\mathbb F_q$ of characteristic $p.$ Each polynomial $f \in \mathbb F_q[X]$ whose degree $d$ is not divisible by $p$ defines an Artin-Schreier cover $C_f$ of $\mathbb P^1$ with affine model
\begin{equation}\label{ascurve} Y^p-Y=f(X).\end{equation}
Since $f$ is a polynomial rather than a more general rational function, $C_f$ has $p$-rank $0.$ For more details about the structure of the moduli space of Artin-Schreier curves and its $p$-rank strata, see \cite{pz}. The Riemann-Hurwitz formula implies that the genus of the above curve is $g= (d-1)(p-1)/2.$ As usual, the Weil zeta function of $C_f$ has the form
\[Z_{C_f} (u) = \frac{P_{C_f}(u)}{(1-u)(1-qu)}.\] Here $P_{C_f}(u)$ is a polynomial of degree $2g=(d -1)(p-1)$ which factors as
\begin{equation}\label{product}P_{C_f}(u) = \prod_{\psi \neq 1} L(u, f, \psi),\end{equation}
where the product is taken over the non-trivial additive characters $\psi$ of $\mathbb F_p$ and $L(u,f,\psi)$ are certain $L$-functions (see \eqref{Euler-product} for the formula).
Computing the distribution of the zeroes of the zeta functions $Z_{C_f}(u)$ as $C_f$ runs over the $\mathbb{F}_q$-points of the moduli space $\mathcal{AS}_{g,0}$
of Artin-Schreier covers of genus $g$ and $p$-rank $0$ amounts to computing the distribution of the zeroes
of $\prod_{j=1}^{p-1}L(u,f,\psi^j)$ for a fixed non-trivial additive character $\psi$ as $f$ runs over polynomials of degree $d$. In fact, going over each $\mathbb F_q$-point of the moduli space $\mathcal{AS}_{g,0}$ once is equivalent to letting $f$ vary over the set $\mathcal{F}'_d$ of polynomials of degree
$d$ containing no non-constant terms of degree divisible by $p$, as such terms can always be eliminated
in a unique way without changing the resulting Artin-Schreier cover.
Some statistics for the zeroes in the family of Artin-Schreier covers were considered in the
recent work of Entin \cite{entin}, who employs the methods of Kurlberg and Rudnick \cite{kr} to study the variation of the number of points on
such a family, then translates the results into information about zeroes.
In the present work, we consider the global and mesoscopic regime, as was done by Faifman and Rudnick \cite{fr} for the
family of hyperelliptic curves.
More precisely, we write \begin{equation}\label{factorization-of-L}
L(u,f,\psi) = \prod_{j=1}^{d-1} (1-\alpha_j(f, \psi)ha_{j}(f, \psi)u),
\end{equation}
\noindent where $\alpha_j(f, \psi)ha_j(f, \psi) = \sqrt{q} e^{2 \pi i \theta_j(f, \psi)a_j(f, \psi)}$ and $\theta_j(f, \psi)a_j(f, \psi) \in [-1/2, 1/2)$.
We study the statistics of the set of angles $\{\theta_j(f, \psi)a_j(f, \psi)\}$ as $f$ varies. For an interval $\mathcal{I} \subset[-1/2,1/2),$ let
\[N_\mathcal{I}(f, \psi) := \#\{1\leq j \leq d-1:\,\theta_j(f, \psi)a_j(f, \psi) \in \mathcal{I}\},\]
\[
N_{\mathcal{I}}(f,\psi, \bar\psi):=N_\mathcal{I}(f, \psi)+ N_\mathcal{I}(f,\bar \psi),
\]
and
\[
N_\mathcal{I}(C_f):=\sum_{j=1}^{p-1}N_\mathcal{I}(f,\psi^j).
\]
We show that the number of zeroes with angle in a prescribed non-trivial subinterval $\mathcal I$
is asymptotic to $2g|\mathcal I|$ (Theorem \ref{boundN_I}), has variance asymptotic to $\frac{2(p-1)}{\pi^2} \log(g|\mathcal I|)$ and properly normalized has a Gaussian distribution.
\begin{thm}\label{mainthm} Fix a finite field $\mathbb F_q$ of characteristic $p$. Let $\mathcal{F}_d'$ be the family of polynomials defined in \eqref{ourfamily}. Then for any real numbers $a<b$ and $0<|\mathcal{I}|<1$ either fixed or
$|\mathcal{I}|\rightarrow 0$ while $d |\mathcal{I}|\rightarrow \infty$,
\[\lim_{d \rightarrow \infty} \mathrm{Prob}_{\mathcal{F}_d'}\left(a < \frac{N_\mathcal{I}(C_f)-(d-1)(p-1)|\mathcal{I}|}{\sqrt{\frac{2(p-1)}{\pi^2}\log(d|\mathcal{I}|)}} < b\right)=\frac{1}{\sqrt{2\pi}} \int_a^b e^{-x^2/2} dx.\]
\end{thm}
As noted earlier, this result can also be stated in terms of the $\mathbb{F}_q$-points
of $\mathcal{AS}_{g,0}$.
\begin{cor}\label{cor} Fix a finite field $\mathbb F_q$ of characteristic $p$. Then for any real numbers $a<b$ and $0<|\mathcal{I}|<1$ either fixed or
$|\mathcal{I}|\rightarrow 0$ while $g |\mathcal{I}|\rightarrow \infty$,
\[\lim_{g \rightarrow \infty} \mathrm{Prob}_{\mathcal{AS}_{g,0}(\mathbb{F}_q)}\left(a < \frac{N_\mathcal{I}(C_f)-2g|\mathcal{I}|}{\sqrt{\frac{2(p-1)}{\pi^2}\log\left(g|\mathcal{I}|\right)}} < b\right)=\frac{1}{\sqrt{2\pi}} \int_a^b e^{-x^2/2} dx.\]
\end{cor}
Theorem \ref{mainthm} is obtained by computing the
normalized moments of certain approximations of
\\${N_\mathcal{I}(C_f)-(p-1)(d-1)|\mathcal{I}|}$ given by Beurling-Selberg polynomials
to verify that they fit the Gaussian moments.
Our results are compatible with the following result for the distribution of zeroes of the $L$-functions $L(u,f, \psi)$ and $L(u, f, \bar \psi).$
\begin{prop}\label{prop}
Fix a finite field $\mathbb F_q$ of characteristic $p$. Then for any real numbers $a<b$ and $0<|\mathcal{I}|<1$ either fixed or
$|\mathcal{I}|\rightarrow 0$ while $d |\mathcal{I}|\rightarrow \infty$,
\[\lim_{d \rightarrow \infty} \mathrm{Prob}_{\mathcal{F}_d'}\left(a < \frac{N_{\mathcal{I}}(f,\psi, \bar\psi)-2(d-1)|\mathcal{I}|}{\sqrt{\frac{4}{\pi^2}\log(d|\mathcal{I}|)}} < b\right)=\frac{1}{\sqrt{2\pi}} \int_a^b e^{-x^2/2} dx.\]
\end{prop}
\begin{rem}
Analogous results hold for $N_{\mathcal I}(f,\psi)$ as long as the interval $\mathcal I$ is symmetric.
\end{rem}
Notice that Proposition \ref{prop} is compatible with the philosophy of Katz and Sarnak, which predicts that when
$q \rightarrow \infty$, the distribution of $N_{\mathcal{I}}(C_f)$ is the same as the distribution of
$\hat{N}_{\mathcal{I}}(U)$, the number of eigenvalues of a $2g \times 2g$ matrix $U$ in the monodromy group
of $C_f$ chosen uniformly at random with respect to the Haar measure. The monodromy groups of Artin-Schreier covers are computed by Katz in
\cite{Katz1, Katz2}. In the large matrix limit, which corresponds to the limit as
$d \rightarrow \infty$ for the family of Artin-Schreier covers because $g = (p-1)(d-1)/2$, the statistics
on $\hat{N}_{\mathcal{I}}(U)$ have been found to have Gaussian fluctuations in various ensembles of
random matrices.
\subsection{Outline of the article}
This article is set up as follows. We begin by reviewing basic Artin-Schreier theory in Section \ref{a-s}. In Section \ref{explicit} we prove two explicit formulas for the zeroes of $L(u, f, \psi)$ which we will need later to compute the moments. In Section \ref{distrzeros} we prove a result about the number of zeroes of the zeta function for a fixed Artin-Schreier cover of $\mathbb P^1$. In Section \ref{trigapprox} we recall some facts on Beurling-Selberg polynomials and use them to prove some technical statements about their coefficients. A certain sum of these trigonometric polynomials approximate the characteristic function of the interval $\mathcal I$.
We use the explicit formula to reduce the problem of studying this sum of Beurling-Selberg polynomials to a problem about sums of characters of traces of a polynomial $f$ evaluated at elements in extensions of $\mathbb F_q$. In Sections \ref{1mom}, \ref{2mom} and \ref{3mom} we analyze the first, second and third
moments of this sum. These moments tell us the expectation and
variance of the distribution. In Section \ref{genmom} we compute the general moments of our approximating function and conclude that it has a standard Gaussian limiting distribution as the degree $d$ of $f$ goes to infinity for $\mathcal I$ either fixed or in the mesoscopic regime. Finally, in Section \ref{proof} we conclude the proof of Theorem \ref{mainthm} by proving that under normalization $N_{\mathcal I}(C_f)-(d-1)(p-1)|\mathcal I|$ converges in mean square and hence distribution to our approximating function.
\section{Basic Artin-Schreier theory}\label{a-s}
We now recall some more facts about Artin-Schreier covers. For each integer $n\geq 1$, denote by $\tr_n: \mathbb F_{q^n} \to \mathbb F_p$ the absolute trace map (not the trace to $\mathbb F_q$). For each polynomial $g \in \mathbb F_q[X]$ and non-trivial additive character $\psi$ of $\mathbb F_p$, set
\[
S_n(g, \psi) = \sum_{x\in \mathbb F_{q^n}} \psi(\tr_n(g(x))).
\]
The $L$-functions that appear in \eqref{product} are given by
\begin{equation}\label{Euler-product}
L(u,f,\psi) = \exp\left(\sum_{n=1}^{\infty} S_n(f, \psi) \frac{u^n}{n}\right) = \prod_P \left(1-\psi_f(P)u^{\deg P}\right)^{-1},
\end{equation}
where the product is taken over monic irreducible polynomials in $\mathbb F_q[X].$ In fact, throughout this paper $P$ will denote such a polynomial and, if $n=\deg P$ we have
\[\psi_f(P) = \sum_{\alpha_j(f, \psi)ha \in \mathbb F_{q^n} \atop P(\alpha_j(f, \psi)ha)=0} \psi(f(\alpha_j(f, \psi)ha))= \psi(\tr_n(f(\alpha_j(f, \psi)ha))) \textrm{ for any root $\alpha_j(f, \psi)ha$ of $P$.}\]
To see that the exponential is equal to the product over primes in \eqref{Euler-product}, one has to write the exponential as an Euler product over the closed points of $\mathbb A^1.$ Namely, if we denote by $\mathcal S_n$ the set of closed points of $\mathbb A^1$ of degree $n,$ we can write
\begin{eqnarray*}
L(u,f,\psi) &= & \exp\left(\sum_{n=1}^{\infty} S_n(f, \psi) \frac{u^n}{n}\right)\\
&=& \exp\left(\sum_{n=1}^\infty \sum_{x \in \mathcal S_n} \sum_{k=1}^\infty \psi(\tr_{kn}(f(x))) \frac{u^{kn}}{k} \right).
\end{eqnarray*}
The denominator of the fraction is $k$, not $kn,$ because each closed point $x \in \mathcal S_n$ produces $n$ rational points of $\mathbb F_{q^n}.$ Thus,
\begin{eqnarray*}L(u,f,\psi) &=& \prod_{n=1}^\infty \prod_{x \in \mathcal S_n} \exp\left(\sum_{k=1}^\infty \frac{\big(\psi(\tr_{n}(f(x)))u^n\big)^k}{k} \right)\\
&=& \prod_{n=1}^\infty \prod_{x \in \mathcal S_n} (1-\psi(\tr_{n}(f(x)))u^n)^{-1}\\
&= &\prod_{x \textrm{ closed point of } \mathbb A^1} (1-\psi(\tr_{\deg x}(f(x)))u^{\deg x})^{-1},
\end{eqnarray*}
\noindent which is exactly the product over primes that appears in \eqref{Euler-product}.
Note that for the trivial character $\psi=1$, the same formula gives \[L(u,f,1)= Z_{\mathbb A^1}(u)= \frac{1}{1-qu}.\]
The factor at infinity is then given by \[\psi_f(\infty) = \begin{cases} 1 & \psi = 1, \\
0 & \psi \neq 1. \end{cases}\]
Therefore we have
\[Z_{C_f}(u) = \prod_{\psi} L^*(u,f,\psi),\]
where $L^*(u,f,\psi)$ are the completed $L$-functions,
\[L^*(u,f,\psi) = \prod_{v} \left(1-\psi_f(P_v) u^{\deg P_v}\right)^{-1}.\] Here the product is taken over all places $v$ of $\mathbb F_q(X).$
From now on we will fix a non-trivial additive character $\psi$ of $\mathbb F_p$ given by a certain choice $\zeta$ of a primitive $p$th root of unity in $\mathbb C.$ Then, all the other non-trivial characters of $\mathbb F_p$ are of the form $\sigma \circ \psi$ where $\sigma$ is an automorphism of the cyclotomic field $\mathbb Q(\zeta).$ The reciprocals of zeroes of the $L(u,f, \sigma\circ \psi)$ are exactly the Galois conjugates $\sigma(\alpha_j(f, \psi)ha_j(f,\psi)),$ $1\leq j \leq d-1,$ of the reciprocals of the roots of $L(u, f, \psi).$
In order to compute the distribution of the zeroes of the Weil zeta functions $Z_{C_f}$ as $C_f$ runs over
$\mathcal AS_{g,0}(\mathbb F_q)$ we are going to compute the distribution of the angles $\theta_j(f, \psi) , \theta_j(f, \psi)a_j(f, \bar \psi), 1 \leq j \leq d-1,$ for our specific choice of the additive character $\psi,$ as $f$ runs through $\mathcal F_d',$ where $g=(d-1)(p-1)/2.$
Since the roots of $L(u, f, \psi)$ and $L(u, f, \bar{\psi})$ are conjugate, it suffices to work with symmetric intervals. The distribution of the roots of the whole zeta function is then obtained by combining the $(p-1)/2$ distributions for the various choices of $\psi$.
As discussed in the introduction, we will consider $\mathbb F_q$-points of the moduli space $\mathcal{AS}_{g,0}$ of Artin-Schreier covers of $p$-rank $0.$ A cover consists of an Artin-Schreier curve for which we fix an automorphism of order $p$ and an isomorphism between the quotient and $\mathbb P^1.$ We also choose the ramification divisor to be $D=(\infty).$ Thus the one branch point of our $p$-rank $0$ covers is at infinity.
Concretely, we consider, up to $\mathbb F_q$-isomorphism, pairs of curves with affine model $C_f: Y^p - Y = f(X)$ with $f(X)$ a polynomial of degree $d = 2g/(p-1)+1$ not divisible by $p$ together with the automorphism $Y \mapsto Y+1.$
Using the $\mathbb F_q$-isomorphism $(X,Y) \mapsto (X, Y+aX^k)$, we get that $C_f$ is isomorphic to $C_g$ where
$g(X) = f(X) + aX^k - a^p X^{kp}$. By using this isomorphism, we are reduced to considering the Artin-Schreier curves with model
$C_f: Y^p-Y = f(X)$ where $f(X)$ is an element of the family $\mathcal F_d'$ defined in the introduction as
\begin{eqnarray} \label{ourfamily}
\mathcal F_d'=\left\{ a_dX^d + a_{d-1}X^{d-1} + \dots + a_0\in \mathbb F_q[X] : a_d \in \mathbb F_q^*, a_{pk}=0, 1 \leq k \leq \left\lfloor\frac{d}{p}\right\rfloor \right\}.
\end{eqnarray}
Except for the isomorphisms described above, no two such affine models are isomorphic. Therefore considering all affine
models $Y^p - Y = f(X)$ with $f(X) \in \mathcal F_d'$ is equivalent to considering
all the $\mathbb F_q$- points of the moduli space $\mathcal{AS}_{g,0}.$ For more details on this one-to-one correspondence between our family and $\mathcal{AS}_{g,0} (\mathbb F_q),$ see \cite[Proposition 3.6]{pz}.
In \cite{entin}, the author is considering a slightly different family by
also allowing twists, i.e. isomorphism over
$\mathbb F_{q^p}$. This amounts to the models $C_f: Y^p-Y = f(X),$ with $f(X) \in \mathcal F_d''$, where
\[
\mathcal F_d''=\left\{ a_dX^d + a_{d-1}X^{d-1} + \dots + a_0\in \mathbb F_q[X] : a_d \in \mathbb F_q^*, a_{pk}=0,
0 \leq k \leq\left\lfloor\frac{d}{p}\right\rfloor \right\}. \]
Finally, we will denote by
\begin{eqnarray*}
\mathcal F_d=\left\{ a_dX^d + a_{d-1}X^{d-1} + \dots + a_0\in \mathbb F_q[X] : a_d \in \mathbb F_q^* \right\},
\end{eqnarray*} \noindent the set of all polynomials of degree $d$ in $\mathbb F_q[X].$
We will also need the map $\mu: \mathcal{F}_d\rightarrow \mathcal{F}_d'$ defined by
\begin{equation}
\label{map-mu} \mu\left(\sum_{i=0}^d a_i X^i\right)= a_0+\sum_{{i=1}\atop{i \neq kp, k \geq 1}}^d \left( \sum_{j=0}^{\left\lfloor \log_p (d/i)\right\rfloor} a_{ip^j}^{p^{-j}}\right) X^i.
\end{equation} This map is $q^{\left\lfloor\frac{d}{p}\right\rfloor}$-to-one and preserves the
trace of $f(\alpha_j(f, \psi)ha)$, which will allow us to work with $\mathcal F_d$ instead of $\mathcal F'_d$ when taking averages.
\subsection{Remark on the number of points}
For $d$ large enough, the elements of $\mathcal F_d'$ have the same chance as any random polynomial of degree $d$ in $\mathbb F_q[X]$ to take a given value in some extension of $\mathbb F_q.$
Thus, if $p \nmid n,$ as soon as $d - \left\lfloor d/p \right\rfloor >q^n,$ the distribution of $\{\#C_f(\mathbb F_{q^n}): f \in \mathcal F_d'\}$ is given by a sum of i.i.d. random variables, one variable for each closed point of $\mathbb P^1$ of degree $e\mid n.$ As long as we stay away from the point at infinity where $f(X)$ has a pole, the fiber above each closed point $x$ of $\mathbb P^1$ contains $pe$ rational points on the Artin-Schreier cover $C_f$ if $x$ happens to be in the kernel of the absolute trace map $\tr_{n}:\mathbb F_{q^n} \to \mathbb F_p,$ and no points otherwise. Hence each random variable in the sum takes the value $pe$ with probability $1/p$ and $0$ with probability $1-1/p.$ The average number of points is then $1+q^n,$ the constant $1$ coming from the point at infinity where the polynomial $f(X)$ has a pole and the fiber above it contains just $1$ point.
If $p\mid n,$ the average is higher because there are certain points of $\mathbb P^1$ of degree $e$ for which the fiber is forced to have $pe$ points (i.e. the points of degree $ e \mid \frac{n}{p}$). One adjusts the computation accordingly and obtains that the average number in $C_f(\mathbb F_{q^n})$ is now $1+q^n + (p-1)q^{n/p}.$ This is the essential reason behind Entin's result on the matter \cite[Theorem 4]{entin}, except that his count does not take into account the point at infinity.
\section{Explicit Formulas}\label{explicit}
Let $K$ be a positive integer, $e(\theta_j(f, \psi)a) = e^{2\pi i \theta_j(f, \psi)a}$ and let $h(\theta_j(f, \psi)a) = \sum_{{|k|}\leq K} a_k e(k\theta_j(f, \psi)a)$ be a trigonometric polynomial.
Then the coefficients $a_k$ are given by the Fourier transform
$$a_k = \widehat{h}(k) = \int_{-1/2}^{1/2} h(\theta_j(f, \psi)a) e (- k \theta_j(f, \psi)a) d\theta_j(f, \psi)a.$$
We prove in this section two explicit formulas for $L(u,f,\psi)$, written as an exponential of a sum or as a product
over primes as in (\ref{Euler-product}). The first explicit formula (Lemma \ref{Explicit-Formula}) will be used to compute
the moments over the family ${\mathcal{F}}_d'$, and the second explicit formula (Lemma \ref{Explicit-formula-relevant}) will be used to
prove a result about the number of zeroes for a fixed $C_f$ (see Section \ref{distrzeros}).
\begin{lem}\label{Explicit-Formula}
Let $h(\theta_j(f, \psi)a) = \sum_{{|k|}\leq K}\widehat{h}(k)e(k\theta_j(f, \psi)a)$ be a trigonometric polynomial. Let $\theta_j(f, \psi)a_j(f, \psi)$ be
the eigenangles of the $L$-function $L(u,f,\psi)$.
Then we have
\begin{equation}
\sum_{j=1}^{d-1}h(\theta_j(f, \psi)) = (d-1)\widehat{h}(0) - \sum_{k=1}^{K}\frac{\widehat{h}(k)S_k(f,\psi) + \widehat{h}(-k)S_k(f,\overline{\psi})}{q^{k/2}}.
\end{equation}
\end{lem}
\begin{proof}
Recall from above that
$$L(u,f,\psi) = \exp\left(\sum_{n=1}^{\infty}S_n(f,\psi)\frac{u^n}{n}\right) = \prod_{j=1}^{d-1}(1- \alpha_j(f, \psi)ha_j(f,\psi)u).$$
Taking logarithmic derivatives, we have
$$\frac{d}{du}\sum_{j=1}^{d-1}\log(1- \alpha_j(f, \psi)ha_j(f,\psi)u) = \frac{d}{du}\sum_{n=1}^{\infty}S_n(f,\psi)\frac{u^n}{n}.$$
Multiplying both sides by $u,$ we get
$$\sum_{j=1}^{d-1}\frac{-\alpha_j(f, \psi)ha_j(f,\psi)u}{1-\alpha_j(f, \psi)ha_j(f,\psi)u} = \sum_{n=1}^{\infty}S_n(f,\psi)u^n,$$
that is,
$$-\sum_{j=1}^{d-1}\sum_{n=1}^{\infty}(\alpha_j(f, \psi)ha_j(f,\psi)u)^n = \sum_{n=1}^{\infty}S_n(f,\psi)u^n.$$
Comparing coefficients,
$$-\sum_{j=1}^{d-1}(\alpha_j(f, \psi)ha_j(f,\psi))^n = S_n(f,\psi).$$
Thus, for $n>0,$ we get
\begin{equation} \label{ngeq0} -\sum_{j=1}^{d-1}e^{2\pi i n \theta_j(f, \psi)} = \frac{S_n(f,\psi)}{q^{n/2}}.\end{equation}
For $n<0,$ taking complex conjugates, we have by (\ref{factorization-of-L}) and (\ref{ngeq0})
\begin{eqnarray*}
-\sum_{j=1}^{d-1}e^{2\pi i n \theta_j(f, \psi)}
&=& -\overline{\sum_{j=1}^{d-1}e^{2\pi i |n|\theta_j(f, \psi)}}
=-\overline{\sum_{j=1}^{d-1}\frac{\alpha_j(f, \psi)ha_j(f, \psi)^{|n|}}{q^{|n|/2}}}\\
&=&\overline{\frac{S_{|n|}(f,\psi)}{q^{|n|/2}}}
= \frac{S_{|n|}(f,\overline{\psi})}{q^{|n|/2}} = \frac{S_{|n|}(f,\psi^{-1})}{q^{|n|/2}}.
\end{eqnarray*}
Thus,
\begin{eqnarray*}
\sum_{j=0}^{d-1}h(\theta_j(f, \psi))\
&=&\sum_{j=1}^{d-1}\sum_{k=-K}^K\widehat{h}(k)e(k\theta_j(f, \psi))\\
&&=(d-1)\widehat{h}(0)+\sum_{j=1}^{d-1}\sum_{k=1}^K\widehat{h}(k)e(k\theta_j(f, \psi)) +\sum_{j=1}^{d-1}\sum_{k = -K}^{-1}\widehat{h}(k)e(k\theta_j(f, \psi))\\
&&=(d-1)\widehat{h}(0) - \sum_{k=1}^K\widehat{h}(k)\left(\frac{S_k(f,\psi)}{q^{k/2}}\right) - \sum_{k = -K}^{-1}\widehat{h}(k)\left(\frac{S_{-k}(f,\overline{\psi})}{q^{-k/2}}\right)\\
&&=(d-1)\widehat{h}(0) - \sum_{k=1}^K\frac{\widehat{h}(k)S_k(f,\psi) + \widehat{h}(-k)S_k(f,\overline{\psi})}{q^{k/2}}.
\end{eqnarray*}
\end{proof}
\begin{lem}\label{Explicit-formula-relevant} Let $\theta_j(f, \psi)a_j(f, \psi)$ be
the eigenangles of the $L$-function $L(u,f,\psi)$.
Then for any $n\geq 1,$
$$ -\sum_{j=1}^{d-1}e^{2\pi i n \theta_j(f, \psi)} = \sum_{\deg (M) = n}\frac{\Lambda(M)\psi_f(M)}{q^{n/2}}$$
where $M$ runs over monic polynomials in $\mathbb F_q[X],$
\[\Lambda(M) = \begin{cases}
\deg P&\textrm{ if } M = P^k\,\textrm{for some }k\geq 1\textrm{ and } P \textrm{ irreducible},\\ 0&\textrm { otherwise},
\end{cases}\]
and $\psi_f(P^k) = \psi_f(P)^k.$
\end{lem}
\begin{proof}
Comparing equations (\ref{Euler-product}) and (\ref{factorization-of-L}), we have
$$\prod_{j=1}^{d-1}(1 - \alpha_j(f, \psi) u) = \prod_P(1 - \psi_f(P)u^{\deg P})^{-1},$$
where the product on the right hand side is taken over monic irreducible polynomials in $\mathbb F_q[X].$
Taking logarithmic derivatives and multiplying by $u,$ we deduce that
$$-\sum_{j=1}^{d-1}\sum_{n=1}^{\infty}(\alpha_j(f, \psi) u )^n = \sum_M\Lambda(M)u^{\deg M}\psi_f(M).$$
Comparing the coefficients of $u^n,$ we get
$$-\sum_{j=1}^{d-1}\alpha_j(f, \psi)^n = \sum_{\deg(M) = n}\Lambda(M)\psi_f(M),$$
and the result follows by dividing both sides by $q^{n/2}$.
\end{proof}
\section{The distribution of zeroes of $L(u,f,\psi)$} \label{distrzeros}
In this section we use the Erd\"{o}s-Tur\'{a}n inequality (see \cite{M}, Corollary 1.1) to prove a result on the number of eigenangles $\theta_j(f, \psi)a_j(f, \psi)$ in an interval ${\mathcal{I}}$
for a fixed $L$-function $L(u,f,\psi)$.
\begin{thm}\label{Erdos-Turan}\text{[P. Erd\"{o}s, P. Tur\'{a}n]}
Let $x_1,x_2,\dots,x_N$ be real numbers lying in the unit interval $[-1/2,1/2).$ For any interval $\mathcal{I} \subseteq [-1/2,1/2),$
let $A(\mathcal{I},N,\{x_n\})$ denote the number of
elements from the above set in $\mathcal{I}$. Let $|\mathcal{I}|$ denote the length of the interval. There exist absolute constants $B_1$ and $B_2$ such that for any $K\geq 1,$
$$|A(\mathcal{I},N,\{x_n\}) - N |\mathcal{I}|| \leq \frac{B_1N}{K+1} + B_2\sum_{k=1}^K\frac{1}{k}\left|\sum_{n=1}^{N}e^{2\pi i k x_n}\right|.$$
\end{thm}
We now prove the following theorem, which is the analogue of Proposition 5.1 in \cite{fr}.
\begin{thm}\label{boundN_I}
For any $\mathcal{I} \subseteq [-1/2,1/2),$ let $N_\mathcal{I}(f, \psi) := \#\{1\leq j \leq d-1:\,\theta_j(f, \psi) \in \mathcal{I}\}.$ Then
$$N_\mathcal{I}(f,\psi) = (d-1) |\mathcal{I}| +O\left(\frac{d}{\log d}\right).$$
\end{thm}
\begin{proof}
By the Erd\"{o}s-Tur\'{a}n inequality and Lemma \ref{Explicit-formula-relevant}, we have
\begin{eqnarray*}
|N_\mathcal{I}(f,\psi) - (d-1) |\mathcal{I}| |\
&\ll&\frac{d}{K} + \sum_{k=1}^K\frac{1}{k}\left|\sum_{\deg M = k}\frac{\Lambda(M)\psi_f(M)}{q^{k/2}}\right|\\
&&\ll \frac{d}{K} + \sum_{k=1}^K\frac{1}{q^{k/2}}\sum_{M = P^a,\,a\geq 1 \atop{\deg M = k}}1.
\end{eqnarray*}
Applying the function-field analogue of the prime number theorem, the above expression is
$\ll \displaystyle \frac{d}{K} + \frac{q^{K/2}}{K}.$ Choosing $K = \left[\frac{\log d}{\log q}\right],$ we deduce the theorem.
\end{proof}
\section{Beurling-Selberg functions} \label{trigapprox}
By the functional equation, the conjugate of a root of $Z_{C_f}(u)$ is also a root so we can restrict to considering symmetric intervals. Let $0<\beta<1$ and set $\mathcal I = [-\beta/2, \beta/2] \subset [-1/2,1/2)$. We are going to
approximate the characteristic function of ${\mathcal{I}}$, $\chi_{\mathcal{I}}$, with Beurling-Selberg polynomials $I_K^\pm$.
We will use the following properties of the coefficients of Beurling-Selberg polynomials (see \cite{M}, ch 1.2).
\begin{itemize}
\item[{\bf (a)}] The $I^\pm_K$ are trigonometric polynomials of degree $\leq K$,
i.e., $$I_K^{\pm}(x) = \sum_{|k| \leq K} \widehat{I}_K^{\pm}(k) e(k x).$$
\item[{\bf (b)}] The Beurling-Selberg polynomials bound the characteristic function from below and above: \[I_K^- \leq \chi_{\mathcal{I}}\leq I_K^+.\]
\item[{\bf (c)}] The integral of Beurling-Selberg polynomials is close to the length of the interval: \[\int_{-1/2}^{1/2} I_K^\pm(x) dx =\int_{-1/2}^{1/2} \chi_{\mathcal{I}}(x) dx \pm \frac{1}{K+1}.\]
\item[{\bf (d)}] The $I^\pm_K$ are even (since we are taking the interval $\mathcal{I}$
to be symmetric about the origin). It then follows that the Fourier coefficients
are also even, i.e. $\widehat{I}_K^{\pm}(-k) = \widehat{I}_K^{\pm}(k)$ for
$|k| \leq K$.
\item[{\bf (e)}] The nonzero Fourier coefficients are also close to those of the characteristic function:
\[|\widehat{I}_K^\pm (k) - \widehat{\chi}_{
\mathcal{I}}(k) | \leq \frac{1}{K+1}
\quad \Longrightarrow \quad \widehat{I}^\pm_K(k)=\frac{\sin (\pi k|\mathcal{I}|)}{\pi k} + O
\left( \frac{1}{K+1}\right), \quad k \geq 1.
\]
This implies the following bound:
\[|\widehat{I}_K^\pm (k)| \leq \frac{1}{K+1} +\min \left \{|\mathcal{I}|, \frac{\pi}{|k|}\right \}, \quad 0<|k|\leq K;\]
\end{itemize}
\begin{prop}(Proposition 4.1, \cite{fr}) \label{propFR} For $K\geq 1$ such that $K|\mathcal{I}|>1$, we have
\begin{eqnarray*}
\sum_{k \geq 1} \widehat{I}_K^\pm (2k)&=&O(1),\\
\sum_{k \geq 1} \widehat{I}_K^\pm (k)^2 k&=&\frac{1}{2\pi^2} \log (K|\mathcal{I}|) +O(1),\\
\sum_{k \geq 1} \widehat{I}_K^+ (k)\widehat{I}_K^- (k) k&=&\frac{1}{2\pi^2} \log (K|\mathcal{I}|) +O(1).\\
\end{eqnarray*}
\end{prop}
Note that for a given $K$ these sums are actually finite, since the Beurling-Selberg polynomials $I_K^\pm$ have degree at most $K$.
\begin{proof} The first two statements are proven in Proposition 4.1 of \cite{fr}.
Since $$\widehat{I}_K^{\pm}(k) = \frac{\sin (\pi k|\mathcal{I}|)}{\pi k} + O
\left( \frac{1}{K}\right),$$
holds for both $\widehat{I}_K^{+}(k)$ and $\widehat{I}_K^{-}(k),$ the third statement follows by exactly the same proof as the second statement.
\end{proof}
We will also need the following estimates.
\begin{prop} \label{propmanysums} For $\alpha_j(f, \psi)ha_1,\dots, \alpha_j(f, \psi)ha_r, \gamma_1,\dots, \gamma_r>0$, and $\beta_1,\dots,\beta_r \in \mathbb{R}$, we have,
\[\sum_{k_1,\dots,k_r \geq 1} {\widehat{I}_K^\pm(k_1)}^{\alpha_j(f, \psi)ha_1} \dots {\widehat{I}_K^\pm(k_r)}^{\alpha_j(f, \psi)ha_r}k_1^{\beta_1}\dots k_r^{\beta_r}q^{-\gamma_1k_1-\dots-\gamma_r k_r}=O(1).\]
For $\alpha_j(f, \psi)ha_1,\alpha_j(f, \psi)ha_2,\gamma >0$, and $\beta \in \mathbb{R}$,
\[\sum_{k\geq1} {\widehat{I}_K^\pm(k)}^{\alpha_j(f, \psi)ha_1} {\widehat{I}_K^\pm(2k)}^{\alpha_j(f, \psi)ha_2} k^\beta q^{-\gamma k}=O(1).\]
\end{prop}
\begin{proof} Since $\left|\widehat{I}_K^\pm(k)\right|\leq \frac{1}{K+1}+\min\left\{|\mathcal{I}|, \frac{\pi}{|k|}\right\}$, we obtain
\begin{eqnarray*}
&&\left|\sum_{k_1,\dots,k_r \geq 1} {\widehat{I}_K^\pm(k_1)}^{\alpha_j(f, \psi)ha_1} \dots {\widehat{I}_K^\pm(k_r)}^{\alpha_j(f, \psi)ha_r}k_1^{\beta_1}\dots k_r^{\beta_r}q^{-\gamma_1k_1-\dots-\gamma_r k_r}\right|
\ll \sum_{k_1,\dots,k_r\geq 1} k_1^{\beta_1}\dots k_r^{\beta_r}q^{-\gamma_1k_1-\dots-\gamma_r k_r}
\end{eqnarray*}
Since $\sum_{k\geq 1} k^\beta q^{-\gamma k}=O(1)$ for $q>1$ and $\gamma>0$, we get that the right hand side above is also equal to $O(1).$
The second equation is a particular form of the more general equation established above.
\end{proof}
\section{First Moment} \label{1mom}
Recall that $N_{\mathcal I} (f,\psi)$ denotes the number of angles $\theta_j(f, \psi)$ of the zeroes of the $L$-function $L(u,f,\psi)$ in the interval $\mathcal I \subset [-1/2, 1/2)$ of length $0<|\mathcal I |<1.$
From now on, for a function $\phi:\mathcal{F}_d' \rightarrow\mathbb C$, we denote its average by \[\left<\phi(f)\right>:=\frac{1}{|\mathcal{F}_d'|}
\sum_{f \in \mathcal{F}_d'}\phi(f).\]
We want to compute the first moment
\begin{eqnarray*}
\left< N_{\mathcal I}(f,\psi) \right> = \frac{1}{|\mathcal{F}_d'|}
\sum_{f \in \mathcal{F}_d'} N_{\mathcal I}(f,\psi) .
\end{eqnarray*}
We will do so by proving the following result.
\begin{thm} \label{averageET} As $d \rightarrow \infty$,
\begin{eqnarray*}
\left< N_{\mathcal I}(f,\psi) - (d-1) |\mathcal{I}| \right> = O(1).
\end{eqnarray*}
\end{thm}
\begin{rem} Recall that in Theorem \ref{boundN_I} we showed that
$$N_{\mathcal I}(f,\psi) - (d-1) |\mathcal{I}| = O\left(\frac{d}{\log d}\right).$$ Theorem \ref{averageET}, on the other hand, gives us a far better estimate for the average of $\left< N_{\mathcal I}(f,\psi) - (d-1) |\mathcal{I}| \right>$ than we could have derived from Theorem \ref{boundN_I}.
\end{rem}
For the proof of Theorem \ref{averageET}, we will use the Beurling-Selberg approximation of the characteristic function of the interval $\mathcal I.$
By property {\bf (b)} of the Beurling-Selberg polynomials,
$$
\sum_{j=1}^{d-1} I_K^{-}(\theta_j(f, \psi)a_j(f,\psi)) \leq N_\mathcal{I}(f, \psi) \leq \sum_{j=1}^{d-1} I_K^{+}(\theta_j(f, \psi)a_j(f,\psi)) .
$$
With the explicit formula of Lemma \ref{Explicit-Formula} and property {\bf (c)},
we write
\begin{eqnarray*}
\sum_{j=1}^{d-1} I_K^{\pm}(\theta_j(f, \psi)a_j(f,\psi)) &=& (d-1) |\mathcal{I}| - S^{\pm}(K, f, \psi) \pm \frac{d-1}{K+1} \\
\end{eqnarray*}
where
\begin{eqnarray} \label{defSK}
S^{\pm}(K, f,\psi) := \sum_{k=1}^K \frac{\widehat{I}^\pm_K(k)S_k(f, \psi)+\widehat{I}^\pm_K(-k)S_k(f, \bar{\psi})}{q^{k/2}} . \end{eqnarray}
This gives
\begin{eqnarray} \label{T-estimate}
- S^{-}(K, f,\psi) -\frac{d-1}{K+1} \leq N_\mathcal{I}(f, \psi) - (d-1)|\mathcal{I}| \leq - S^{+}(K, f,\psi) + \frac{d-1}{K+1}.
\end{eqnarray}
In order to complete the proof it remains to estimate $\langle S^{\pm}(K, f, \psi) \rangle.$ We will need the following results from \cite{entin}.
As we remarked in Section \ref{a-s},
we are using a slightly different description for the family of Artin-Schreier covers since we do not allow twists.
Because of that, our results are slightly simpler than those stated in \cite{entin}. We have also modified the original notation so that it fits the generalization that we pursue in the next sections.
\begin{lem} (\cite{entin}, Lemma 5.2)\label{lem:avg}Let $h$ be an integer, $p\nmid h$. Assume $k<d$ and $\alpha_j(f, \psi)ha \in \mathbb F_{q^k}.$ Then
\[\left< h \psi(\tr_k f(\alpha_j(f, \psi)ha))\right> = \begin{cases} 1 & p\mid k, \, \alpha_j(f, \psi)ha \in \mathbb F_{q^{k/p}},\\
0 & \textrm{otherwise.}
\end{cases} \]
\end{lem}
\begin{proof}
If $p \mid k$ and $\alpha_j(f, \psi)ha \in \mathbb F_{q^{k/p}}$ then $\tr_k(f(\alpha_j(f, \psi)ha))=p \tr_{\frac{k}{p}}(f(\alpha_j(f, \psi)ha))=0$
so $\left< \psi(\tr_k f(\alpha_j(f, \psi)ha))\right>=1$. For the remaining case we first note that the average is the same if
we average over the family $\mathcal F_d$ of degree $d$ polynomials (without the condition $a_{pk}=0$).
This is due to the existence of the map $\mu$ defined by (\ref{map-mu}).
Denote by $u$ the degree of $\alpha_j(f, \psi)ha$ over $\mathbb F_q$. Since $u\leq k<d$ the map
\[
\tau : \mathcal F_d\rightarrow \mathbb F_{q^{u}}
\]
defined by $\tau(f)=f(\alpha_j(f, \psi)ha)$ is $(q-1)q^{d-u}$-to-one. Thus as $f$ ranges over $\mathcal F_d$, $f(\alpha_j(f, \psi)ha)$ takes each value
in $\mathbb F_{q^u}$ an equal number of times. Since $p\nmid \frac{k}{u}$, $\tr_k(f(\alpha_j(f, \psi)ha))=\frac{k}{u}\tr_u(f(\alpha_j(f, \psi)ha))$ also takes every value in $\mathbb F_p$ the same number of times as $f$ ranges over $\mathcal F_d$ and the same is true for $h\tr_k(f(\alpha_j(f, \psi)ha))$. Thus each $p$th root of unity occurs the same number of times in $\psi(h\tr_k(f(\alpha_j(f, \psi)ha)))$ as $f$ ranges over $\mathcal F_d$ and so the average is $0$.
\end{proof}
The lemma has the following consequence.
\begin{cor} (\cite{entin}, Corollary 5.3) \label{cor:moment1}
Let $h$ be an integer, $p\nmid h$. Assume $k <d$ and set
\[M^{k,1,h}_{1,d}:= \left<q^{-k/2} \sum_{\alpha_j(f, \psi)ha \in \mathbb F_{q^k}} \psi(h \tr_k f(\alpha_j(f, \psi)ha))\right>.\] Then \[M^{k,1,h}_{1,d}= e_{p,k}q^{-(1/2-1/p)k},\]
where
\[e_{p,k}=\begin{cases} 0 & p\nmid k, \\
1 & p\mid k. \end{cases}\]
\end{cor}
We also denote
\[M^{k,-1,h}_{1,d}:= \left<q^{-k/2} \sum_{\alpha_j(f, \psi)ha \in \mathbb F_{q^k}} \psi(- h \tr_k f(\alpha_j(f, \psi)ha))\right>.\]
Clearly, $M^{k,-1,h}_{1,d}=\overline{M^{k,1,h}_{1,d}}.$
Notice that changing $h$ allows us to vary the character from $\psi$ to $\psi^h$. This will be useful later.
\begin{proof}(Theorem \ref{averageET})
We have that
\begin{eqnarray*}
\left< S^{\pm}(K, f, \psi) \right> &=& \sum_{k=1}^K \frac{\widehat{I}^\pm_K(k)\left<S_k(f,\psi) \right>+\widehat{I}^\pm_K(-k)\left<S_k(f,\bar{\psi})\right>}{q^{k/2}}\\
&=& \sum_{k=1}^K \widehat{I}^\pm_K(k)M^{k,1,1}_{1,d}+\widehat{I}^\pm_K(-k)M^{k,-1,1}_{1,d}\\
&= & 2\sum_{k=1}^K \widehat{I}^\pm_K(k)e_{p,k}q^{-(1/2-1/p)k}\\
\end{eqnarray*}
and the result follows from property {\bf (e)} and \eqref{T-estimate} taking $K=cd$ with $c<1$.
\end{proof}
\begin{rem} \label{Chantal's favorite nonconstant} We denote by
\[C(K):=\sum_{k=1}^K \widehat{I}^\pm_K(k)e_{p,k}q^{-(1/2-1/p)k}\]
and
\[
C:=\sum_{k=1}^\infty \frac{\sin(\pi k |\mathcal I|)}{\pi k} e_{p,k} q^{-(1/2-1/p)k}.
\]
These terms will reappear in the computation of the higher moments. Note that, since $p>2,$ the above infinite series converges absolutely. By Proposition \ref{propmanysums}, $C(K)=O(1)$. By property {\bf (e)} of the Beurling-Selberg polynomials, $C=C(K) + O(1/K)$.
\end{rem}
\section{Second moment} \label{2mom}
Let
\begin{eqnarray}
S^\pm(K,C_f)=\sum_{h=1}^{p-1}S^{\pm}(K, f, \psi^h),
\end{eqnarray}
where $S^{\pm}(K, f, \psi)$ is defined in \eqref{defSK}.
In the next sections, we are computing the moments of $S^{\pm}(K, C_f)$. We show that they fit the Gaussian moments when properly normalized
(Theorem \ref{thm:sumisgaussian}). We will then use this result to show that
$$ \frac{N_{\mathcal{I}}(C_f)
- (p-1)(d-1) |\mathcal{I}|}{\sqrt{\frac{2(p-1)}{\pi^2} \log(d |\mathcal{I}|)}}$$
converges to a normal distribution as $d \rightarrow \infty$ since
it converges in mean square to $$\frac{S^{\pm}(K, C_f)}{{\sqrt{\frac{2(p-1)}{\pi^2} \log(d |\mathcal{I}|)}}}.$$
The following lemma is a generalization of Lemma 6.2 in \cite{entin}, that also takes into account the difference in our family of Artin-Schreier covers.
Recall that $\psi^j(\alpha_j(f, \psi)ha)=\psi(j\alpha_j(f, \psi)ha)$ for $\alpha_j(f, \psi)ha \in \mathbb F_p$. We have the following
\begin{lem} \label{lemmaindependence}
Fix $h_1, h_2$ such that $p\nmid h_1 h_2$ and let $e_1, e_2 \in \{-1,1 \}$. Assume $k_1,k_2 > 0$, $k_1+k_2 < d$. Let $\alpha_j(f, \psi)ha_1 \in \mathbb F_{q^{k_1}}$, $\alpha_j(f, \psi)ha_2 \in \mathbb F_{q^{k_2}}$
with monic minimal polynomials $g_1,g_2$ of degrees $u_1, u_2$ over $\mathbb F_q$ respectively.
We have
\begin{eqnarray*}
\left< \psi(e_1 h_1\tr_{k_1} f(\alpha_j(f, \psi)ha_1)+e_2h_2\tr_{k_2} f(\alpha_j(f, \psi)ha_2)) \right> &=&\begin{cases}
1, & \textrm{$g_1=g_2$, $p \mid \frac{{e_1 h_1 k_1}+{e_2 h_2 k_2}}{u_1}$, $p \nmid \frac{k_1k_2}{u_1u_2}$} \\
& \textrm{or $p\mid \left(\frac{k_1}{u_1}, \frac{k_2}{u_2}\right)$}; \\
0, & \textrm{otherwise.} \end{cases}
\end{eqnarray*}
\end{lem}
\begin{proof}
If $p \mid \frac{k_2}{u_2}$ then $\tr_{k_2} f(\alpha_j(f, \psi)ha_2)=p\tr_{{\frac{k_2}{p}}}f(\alpha_j(f, \psi)ha_2)=0$, so
\[
\left< \psi(e_1 h_1\tr_{k_1} f(\alpha_j(f, \psi)ha_1) +e_2 h_2 \tr_{k_2} f(\alpha_j(f, \psi)ha_2)) \right> =\left< \psi( e_1 h_1 \tr_{k_1} f(\alpha_j(f, \psi)ha_1)) \right>.
\]
By Lemma \ref{lem:avg}, this equals $0$ if $p \nmid \frac{k_1}{u_1}$ and $1$ if $p\mid \frac{k_1}{u_1}$ as $p \nmid e_1h_1$.
The only remaining case is when $p \nmid \frac{k_1k_2}{u_1u_2}$. We first suppose that $g_1 \neq g_2$.
We note that we will have the same value if we average over $\mathcal F_d$ rather than $\mathcal F_d'$ due to the existence of the map $\mu$ defined by (\ref{map-mu}). Since $u_1+u_2\leq {k_1}+{k_2} <d$, the map
\[
\tau : \mathcal F_d \rightarrow \mathbb F_q[X]/(g_1g_2) \simeq \mathbb F_{q^{u_1}}\times \mathbb F_{q^{u_2}}
\]
is exactly $(q-1)q^{d-{u_1-u_2}}$-to-one. Hence as $f$ ranges over $\mathcal F_d$, $(f(\alpha_j(f, \psi)ha_1), f(\alpha_j(f, \psi)ha_2))$ takes every value in $\mathbb F_{q^{u_1}}\times \mathbb F_{q^{u_2}}$ the same number of times. Now, since $p\nmid \frac{{e_1 h_1 k_1}}{u_1}$ and $p \nmid \frac{{e_2 h_2 k_2}}{u_2}$,
\[(\tr_{k_1} f(\alpha_j(f, \psi)ha_1), \tr_{k_2} f(\alpha_j(f, \psi)ha_2))=\left(\frac{{e_1h_1k_1}}{u_1}\tr_{u_1}(f(\alpha_j(f, \psi)ha_1)), \frac{{e_2h_2k_2}}{u_2}\tr_{u_2}(f(\alpha_j(f, \psi)ha_2))\right)\] also takes every value in $\mathbb F_p\times \mathbb F_p$
the same number of times as $f$ ranges over $\mathcal F_d$. Then
\begin{eqnarray*}
&&\psi\left(e_1h_1\tr_{{k_1}}(f(\alpha_j(f, \psi)ha_1))+e_2h_2 \tr_{{k_2}} (f(\alpha_j(f, \psi)ha_2))\right)= \\&=&\psi\left(e_1h_1\frac{{k_1}}{u_1}\tr_{u_1}(f(\alpha_j(f, \psi)ha_1))+e_2h_2\frac{{k_2}}{u_2} \tr_{u_2} (f(\alpha_j(f, \psi)ha_2))\right)
\end{eqnarray*}
assumes every $p$th root of unity equally many times as we average over $\mathcal F_d$ and so the average is $0$.
If $g_1=g_2$, then $\alpha_j(f, \psi)ha_1$ and $\alpha_j(f, \psi)ha_2$ are conjugates over $\mathbb F_q$ and so are $f(\alpha_j(f, \psi)ha_1)$ and $f(\alpha_j(f, \psi)ha_2).$ Then
$\tr_{u_1} f(\alpha_j(f, \psi)ha_1)=\tr_{u_1} f(\alpha_j(f, \psi)ha_2)$. This implies
\begin{eqnarray*}
e_1h_1\tr_{k_1} f(\alpha_j(f, \psi)ha_1)+e_2h_2\tr_{k_2} f(\alpha_j(f, \psi)ha_2) &=& e_1h_1\frac{{k_1}}{u_1}\tr_{u_1} f(\alpha_j(f, \psi)ha_1) +e_2h_2 \frac{{k_2}}{u_1}\tr_{u_1} f(\alpha_j(f, \psi)ha_2) = \\ &=& \frac{{e_1h_1k_1}+e_2h_2 {k_2}}{u_1} \tr_{u_1} f(\alpha_j(f, \psi)ha_1),
\end{eqnarray*} which is
zero when $p\mid \frac{e_1h_1{k_1}+e_2h_2{k_2}}{u_1}$. If $p$ does not divide $\frac{e_1h_1{k_1}+e_2h_2{k_2}}{u_1}$ then
\[\left< \psi(e_1h_1\tr_{k_1} f(\alpha_j(f, \psi)ha_1)+e_2h_2 \tr_{k_2} f(\alpha_j(f, \psi)ha_2)) \right> =
\left< \psi\left(\frac{{e_1h_1k_1}+e_2h_2 {k_2}}{u_1} \tr_{u_1} f(\alpha_j(f, \psi)ha_1)\right) \right> =0\] by Lemma \ref{lem:avg}.
\end{proof}
For positive integers $k_1,k_2,h_1,h_2$ with $p\nmid h_1h_2$ and $e_1, e_2 \in \{ -1, 1\}$, let
\begin{eqnarray*}
M_{2,d}^{(k_1,k_2),(e_1,e_2),(h_1,h_2)} &:=& \left< q^{-(k_1+k_2)/2} \sum_{{\alpha_j(f, \psi)ha_1 \in \mathbb F_{q^{k_1}}}\atop{\alpha_j(f, \psi)ha_2 \in \mathbb F_{q^{k_2}}}}\psi(e_1h_1\tr_{k_1} f(\alpha_j(f, \psi)ha_1)+e_2h_2 \tr_{k_2} f(\alpha_j(f, \psi)ha_2)) \right> \\
&=& q^{-(k_1+k_2)/2} \sum_{{\alpha_j(f, \psi)ha_1 \in \mathbb F_{q^{k_1}}}\atop{\alpha_j(f, \psi)ha_2 \in \mathbb F_{q^{k_2}}}}
\left< \psi(e_1h_1\tr_{k_1} f(\alpha_j(f, \psi)ha_1) +e_2h_2 \tr_{k_2} f(\alpha_j(f, \psi)ha_2)) \right> .
\end{eqnarray*}
Then we have the following analogue of Theorem 8 in \cite{entin}.
\begin{thm}\label{Mcovariance} Assume ${k_1} \geq {k_2} > 0$ and ${k_1}+{k_2} < d$. Let $0<h_1, h_2 \leq (p-1)/2$.
Then
\begin{eqnarray*}
M_{2,d}^{({k_1},{k_2}),(e_1,e_2),(h_1,h_2)}
&=&\delta_{{k_1},2{k_2}} O \left({k_1} q^{-{k_2}/2}\right) + O \left({k_1} q^{-{k_2}/2-{k_1}/6} + q^{-(1/2 - 1/p)({k_1}+{k_2})}\right)\\
&&+
\begin{cases}
\delta_{{k_1},{k_2}} {k_1} \left(1+O\left(q^{-{k_1}/2}\right)\right), & (e_1,e_2)=(1,-1), h_1=h_2,\\
0, & \text{ otherwise,}
\end{cases}
\end{eqnarray*}
where
\begin{eqnarray*}
\delta_{{k_1},{k_2}} = \begin{cases} 1, & {k_1}={k_2}, \\0, & {k_1} \neq {k_2}. \end{cases}
\end{eqnarray*}
\end{thm}
Before we proceed with the proof, we would like to make a few remarks. In the instances when we apply this result, we will choose $K=cd$, for $0<c<1/2$, and therefore ${k_1}, {k_2}\leq K$ will imply
that $k_1 + k_2 < d$, and will be able to
apply Theorem \ref{Mcovariance} for all values of $k_1, k_2$ under consideration.
Also note that the condition $k_1\geq k_2>0$ does not restrict the validity of the statement, since
$M_{2,d}^{({k_2},{k_1}),(1,-1),(h_1,h_2)}=\overline{M_{2,d}^{({k_1},{k_2}),(1,-1), (h_2,h_1)}}$.
\begin{proof} From Lemma \ref{lemmaindependence},
\begin{eqnarray*}
&&M_{2,d}^{({k_1},{k_2}),(e_1,e_2),(h_1,h_2)} = q^{-({k_1}+{k_2})/2}\left(e_{p,e_1h_1{k_1}+e_2h_2{k_2}}\sum_{{{m\mid ({k_1},{k_2})}\atop{mp\nmid {k_1},{k_2}}}\atop{mp \mid (e_1h_1{k_1}+e_2 h_2{k_2})}} \pi(m)m^2 +e_{p,{k_1}}e_{p,{k_2}}q^{({k_1}+{k_2})/p}\right),
\end{eqnarray*}
where $\pi(m)$ denotes the number of monic irreducible polynomials of degree $m$ over $\mathbb F_q[X]$. The prime number theorem for function fields (see \cite{rosen}, Theorem 2.2) states that $\pi(m) =\frac{q^m}{m}+O\left(\frac{q^{m/2}}{m}\right).$
When ${k_1}={k_2}$, the conditions on the summation indices become $m\mid {k_1}$, $mp\nmid {k_1}$, and $mp\mid (e_1h_1+e_2h_2){k_1}$, a contradiction unless $p\mid (e_1h_1+e_2h_2)$. Due to the range in which the $h_1, h_2$ take values, this can only happen when $e_1=-e_2$ and $h_1=h_2$.
In this case, one gets
\[\sum_{{m\mid {k_1}}\atop{mp\nmid {k_1}}} \pi(m)m^2 = {k_1}q^{k_1}+O\left({k_1}q^{{k_1}/2}\right).\]
On the other hand, when ${k_1}=2{k_2}$, one gets
\[\sum_{{{m\mid {k_2}}\atop{mp\nmid {k_2}}}\atop{mp \mid (2e_1h_1+e_2h_2){k_2}}} \pi(m)m^2=O({k_2}q^{k_2})=O\left({k_1}q^{{k_1}/2}\right).\]
Finally, if ${k_1}>{k_2}$ but ${k_1}\not = 2{k_2}$, we have $({k_1},{k_2})\leq {k_1}/3$ and
\[\sum_{{{m\mid ({k_1},{k_2})}\atop{mp\nmid {k_1},{k_2}}}\atop{mp \mid (e_1h_1{k_1}+e_2h_2{k_2})}} \pi(m)m^2=O\left({k_1} q^{{k_1}/3}\right).\]
This concludes the proof of the theorem.
\end{proof}
Finally, we are able to compute the covariances.
\begin{thm}\label{covariance}Let $h_1, h_2$ be integers such that $0< h_1, h_2\leq (p-1)/2$. Then for any $K$ with $\max\{1,1/|\mathcal I|\}<K<d/2$,
\begin{eqnarray*}
\left< S^\pm(K, f, \psi^{h_1}) S^\pm(K, f, \psi^{h_2})\right> &=&\left< S^\pm(K, f, \psi^{h_1}) S^\mp(K, f, \psi^{h_2}) \right>
=\begin{cases}
\displaystyle \frac{1}{\pi^2} \log (K |\mathcal{I}|)+ O\left(1\right), & h_1=h_2
\\
&\\
O\left(1\right), & h_1\neq h_2.
\end{cases}
\end{eqnarray*}
\end{thm}
\begin{proof}
By definition,
\begin{eqnarray*}
&&\left< S^\pm(K, f, \psi^{h_1}) S^\pm(K, f, \psi^{h_2}) \right> \\
&&= \sum_{{k_1},{k_2} =1}^K \widehat{I}_K^\pm({k_1}) \widehat{I}_K^\pm({k_2}) M_{2,d}^{({k_1},{k_2}),(1,1),(h_1,h_2)} +
\widehat{I}_K^\pm({k_1}) \widehat{I}_K^\pm(-{k_2}) M_{2,d}^{({k_1},{k_2}),(1,-1),(h_1,h_2)} \\
&& \;\;\;\; + \widehat{I}_K^\pm(-{k_1}) \widehat{I}_K^\pm({k_2}) M_{2,d}^{({k_1},{k_2}),(-1,1),(h_1,h_2)}+\widehat{I}_K^\pm(-{k_1}) \widehat{I}_K^\pm(-{k_2}) M_{2,d}^{({k_1},{k_2}),(-1,-1),(h_1,h_2)}.
\end{eqnarray*}
Then, by repeated use of Theorem \ref{Mcovariance}
and
Proposition \ref{propmanysums}, the summation over $k_1, k_2$ is $O(1)$ if $h_1\neq h_2$. If $h_1=h_2$ then
\begin{eqnarray*}
\left< S^\pm(K, f, \psi^{h_1}) ^2 \right> &=& 2 \sum_{{k_1}=1}^K \widehat{I}_K^\pm({k_1})\widehat{I}_K^\pm({-k_1}) {k_1} + O(1)
= 2 \sum_{{k_1}\geq 1} \widehat{I}_K^\pm({k_1})^2 {k_1}
+ O ( 1 ) \\
&=& \frac{1}{\pi^2} \log(K |\mathcal{I}|) + O(1)
\end{eqnarray*}
by applying Proposition \ref{propFR}.
The proof for $\left< S^\pm(K, f, \psi^{h_1}) S^\mp(K, f, \psi^{h_2}) \right>$ follows along exactly the same lines.
\end{proof}
\begin{cor}\label{cor:2ndmoment}
For any $K$ with $\max\{1,1/|\mathcal I|\}<K<d/2$,
\[
\langle S^\pm(K,C_f)^2 \rangle=\langle S^+(K,C_f)S^-(K,C_f)\rangle=\frac{2(p-1)}{\pi^2}\log(K|\mathcal I|) + O(1).
\]
\end{cor}
\begin{proof}
First we note that
\begin{eqnarray*}
\langle S^\pm(K,C_f)^2\rangle= \sum_{h_1,h_2=1}^{p-1} \left\langle S^{\pm}(K, f, \psi^{h_1}) S^{\pm}(K, f, \psi^{h_2})\right\rangle.
\end{eqnarray*}
Notice that by Theorem \ref{covariance}, the mixed average contributes $\frac{1}{\pi^2}\log(K|\mathcal I|)+O(1)$ for each term where $h_1=h_2$ or $h_1=p-h_2$. The proof for $\langle S^+(K,C_f)S^-(K,C_f)\rangle$ is identical.
\end{proof}
\section{Third moment} \label{3mom}
Let $k_1, k_2, k_3$ be positive integers, $e_1, e_2, e_3$ take values $\pm 1$, and $h_1,h_2,h_3$ be integers such that $p\nmid h_i$. Denote ${\bf k}=(k_1,k_2,k_3)$, ${\bf e}=(e_1,e_2,e_3)$, and ${\bf h}=(h_1,h_2,h_3)$.
For every ${\bm \alpha_j(f, \psi)ha}=(\alpha_j(f, \psi)ha_1,\alpha_j(f, \psi)ha_2,\alpha_j(f, \psi)ha_3) \in \mathbb F_{q^{k_1}} \times \mathbb F_{q^{k_2}} \times \mathbb F_{q^{k_3}} $, set
$$
m_{3,d}^{{\bf k}, {\bf e}, {\bf h}}({\bm \alpha_j(f, \psi)ha}) =
\left< \psi(e_1 h_1 \tr_{k_1}f(\alpha_j(f, \psi)ha_1) + e_2 h_2 \tr_{k_2}f(\alpha_j(f, \psi)ha_2) +
e_3 h_3 \tr_{k_{3}} f(\alpha_j(f, \psi)ha_3)) \right>,
$$
and
$$M_{3, d}^{{\bf k}, {\bf e}, {\bf h}}=
\sum_{{\alpha_j(f, \psi)ha_i \in \mathbb F_{q^{k_i}}} \atop {i=1,2,3}}
q^{-(k_1 + k_2 + k_3)/2} m_{3,d}^{{\bf k}, {\bf e}, {\bf h}}({\bm \alpha_j(f, \psi)ha}).$$
In an analogous manner to Section \ref{2mom}, one can prove the following.
\begin{lem}\label{lemm3}
Let $p\nmid h_1h_2h_3$ and let $e_1, e_2, e_3 \in \{-1, 1\}$. Assume $k_1, k_2, k_3 > 0$ and $k_1+k_2+k_3 < d$. For $i=1,2,3$ $\alpha_j(f, \psi)ha_i$ be an element of $\mathbb F_{q^{k_i}}$
with minimal polynomial $g_i$ over $\mathbb F_q$ of degree $u_i.$
We have $m_{3,d}^{{\bf k}, {\bf e}, {\bf h}}({\bm \alpha_j(f, \psi)ha})=1$ in any of the following cases
\begin{itemize}
\item $g_1=g_2=g_3, p \mid \frac{(e_1h_1k_1+e_2h_2k_2+e_3h_3k_3)}{u_1}, p\nmid\frac {k_1k_2k_3}{u_1u_2u_3}$.
\item $g_{j_1}=g_{j_2}, p \mid \frac{(e_{j_1}h_{j_1}k_{j_1}+e_{j_2}h_{j_2}k_{j_2})}{ u_{j_1}}, p \nmid \frac{k_{j_1}k_{j_2}}{u_{j_1}u_{j_2}}, p \mid\frac{k_{j_3}}{u_{j_3}}$ , where $(j_1,j_2,j_3)$ is any permutation of $(1,2,3)$.
\item $p \mid \frac{k_i}{u_i}, i=1,2,3$.
\end{itemize}
Otherwise $m_{3,d}^{{\bf k}, {\bf e}, {\bf h}}({\bm \alpha_j(f, \psi)ha})=0$.
\end{lem}
\begin{thm}\label{thm3} Assume $k_1 \geq k_2 \geq k_3 > 0$ and $k_1 + k_2 + k_3 < d$. Then
\begin{eqnarray*}
&&M_{3, d}^{{\bf k}, {\bf e}, {\bf h}}\\ &=&M_{1,d}^{k_1,e_1, h_1}M_{2,d}^{(k_2,k_3),(e_2,e_3), (h_1,h_2)}+M_{1,d}^{k_2,e_2, h_3}M_{2, d}^{(k_1,k_3),(e_1,e_3), (h_1,h_3)}\\ &&+M_{1,d}^{k_3,e_3, h_3}M_{2, d}^{(k_1,k_2),(e_1,e_2),(h_1,h_2)} -2M_{1,d}^{k_1,e_1,h_1}M_{1,d}^{k_2,e_2,h_2}M_{1,d}^{k_3,e_3,h_3}\\
&&+O\left(\delta_{k_1,k_2,k_3} k_1^2q^{-k_1/2}+\delta_{k_1,k_2,2k_3} k_1^2 q^{-3k_1/4} + \delta_{k_1,2k_2,2k_3} k_1^2 q^{-k_1/2}+k_1^2 q^{-k_1/6-k_2-k_3} \right)\\
&=& e_{p,k_1}q^{-(1/2-1/p)k_1}M_{2,d}^{(k_2,k_3),(e_2,e_3),(h_2,h_3)}+e_{p,k_2}q^{-(1/2-1/p)k_2}M_{2,d}^{(k_1,k_3),(e_1,e_3),(h_1,h_3)}\\
&&+e_{p,k_3}q^{-(1/2-1/p)k_3}M_{2, d}^{(k_1,k_2),(e_1,e_2),(h_1,h_2)}+O \left(\delta_{k_1,k_2,k_3} k_1^2q^{-k_1/2}+\delta_{k_1,k_2,2k_3} k_1^2 q^{-3k_1/4}\right) \\
&&+ O\left( \delta_{k_1,2k_2,2k_3} k_1^2 q^{-k_1/2}+k_1^2 q^{-k_1/6-k_2-k_3} +q^{-(1/2 - 1/p)(k_1+k_2+k_3)} \right).\\
\end{eqnarray*}
\end{thm}
\begin{proof}
We can use induction in the same way as we used it in the proof of Lemma \ref{lemm3}. The only new term to be considered is given by the case $g_1=g_2=g_3$ and $p u_1 \mid (e_1h_1k_1+e_2h_2k_2+e_3h_3k_3)$.
This term yields
\[ q^{-(k_1+k_2+k_3)/2}e_{p,e_1h_1k_1+e_2h_2k_2+e_3h_3k_3}\sum_{{{m\mid (k_1,k_2,k_3)}\atop{mp\nmid k_1,k_2,k_3}}\atop{mp \mid (e_1h_1k_1+e_2h_2k_2+e_3h_3k_3)}} \pi(m)m^3. \\\]
Suppose that $k_1\geq k_2\geq k_3$. If $k_1=k_3$, we have
\[\sum_{{{m\mid k_1}\atop{mp\nmid k_1}}\atop{mp \mid (e_1h_1+e_2h_2+e_3h_3)k_1}}\pi(m)m^3 = O\left(k_1^2q^{k_1}\right).\]
If $k_1=2k_3$, $k_2=k_1$ or $k_2=k_3$, we have
\[\sum_{{{m\mid (k_1,k_2,k_3)}\atop{mp\nmid k_3}}\atop{mp \mid (e_1h_1k_1+e_2h_2k_2+e_3h_3k_3)}} \pi(m)m^3=O\left(k_1^2q^{k_1/2}\right). \]
Finally, for the other cases,
\[\sum_{{{m\mid (k_1,k_2,k_3)}\atop{mp\nmid k_1,k_2,k_3}}\atop{mp \mid (e_1h_1k_1+e_2h_2k_2+e_3h_3k_3)}} \pi(m)m^3=O\left(k_1^2q^{k_1/3}\right). \]
\end{proof}
\begin{thm} Let $0< h_1, h_2, h_3 \leq (p-1)/2$. For any $K$ with $\max\{1, 1/|\mathcal I|\} <K< d/3$,
\begin{eqnarray*}
&&\left< S^\pm(K, f, \psi^{h_1}) S^\pm(K, f, \psi^{h_2})S^\pm(K, f, \psi^{h_3})\right>
\\&=& \begin{cases}\frac{3C}{\pi^2}\log(K|\mathcal{I}|)+O\left(1\right) & h_1=h_2= h_3,\\
\frac{C}{\pi^2}\log(K|\mathcal{I}|)+O\left(1\right) & h_{j_1}=h_{j_2}\not = h_{j_3}, \, (j_1,j_2,j_3)\, \text{a permutation of}\, (1,2,3),\\
O(1) & h_i \,\text{distinct}.
\end{cases}
\end{eqnarray*}
where $C$ is the constant defined in Remark $\ref{Chantal's favorite nonconstant}$.
\end{thm}
\begin{cor}
For any $K$ with $\max\{1, 1/|\mathcal I|\} <K< d/3$,
\[
\langle S^\pm(K,C_f)^3\rangle=\frac{6C(p-1)^2}{\pi^2}\log(K|\mathcal I|)+O(1).
\]
\end{cor}
\section{General Moments}\label{genmom}
Let $n, k_1, \dots, k_n$ be positive integers, let $e_1, \dots, e_n$ take values $\pm 1$ and let $h_1, \dots, h_n$ be integers such that $p\nmid h_i$, $1\leq i\leq n$.
Let ${\bf k} = (k_1, \dots, k_n)$, ${\bf e} = (e_1, \dots, e_n)$ and ${\bf h}=(h_1,\dots, h_n)$.
Let $\alpha_j(f, \psi)ha_i \in \mathbb F_{q^{k_i}}$, $1 \leq i \leq n$, and let ${\bm \alpha_j(f, \psi)ha}=(\alpha_j(f, \psi)ha_1,\dots,\alpha_j(f, \psi)ha_n)$. We define
$$
m_{n, d}^{{\bf k}, {\bf e}, {\bf h}}({\bm \alpha_j(f, \psi)ha}) =
\left< \psi(e_1 h_1 \tr_{k_1}f(\alpha_j(f, \psi)ha_1) + \dots +
e_nh_n \tr_{k_{n}} f(\alpha_j(f, \psi)ha_n)) \right>
$$
and
$$M_{n, d}^{{\bf k}, {\bf e}, {\bf h}} =
\sum_{{\alpha_j(f, \psi)ha_i \in \mathbb F_{q^{k_i}}} \atop {i=1, \dots, n}}
q^{-(k_1 + \dots + k_n)/2} m_{n, d}^{{\bf k}, {\bf e}, {\bf h}}({\bm \alpha_j(f, \psi)ha}).$$
We are computing in this section the general moments
$$\left< S^\pm(K, f, \psi)^n \right>
= \sum_{k_1, \dots, k_n=1}^K \sum_{e_1, \dots, e_n = \pm 1}
I_K^{\pm}(e_1 k_1) \dots I_K^{\pm}(e_n k_n) M_{n,d}^{{\bf k}, {\bf e}}
$$
and
\[
\left< S^\pm(K, f, \psi^{h_1})\dots S^\pm(K, f, \psi^{h_n}) \right>= \sum_{k_1, \dots, k_n=1}^K \sum_{e_j = \pm 1 , \atop {1 \leq j \leq n}}
I_K^{\pm}(e_1 k_1) \dots I_K^{\pm}(e_n k_n) M_{n,d}^{{\bf k}, {\bf e}, {\bf h}}.
\]
\begin{lem} \label{generalcase} Assume $k_1, \dots, k_n > 0$, $k_1 + \dots + k_n < d$.
Let $g_1, \dots, g_s$ of degree $u_1, \dots, u_s$ respectively be all the distinct minimal polynomials over $\mathbb F_q$ of
$\alpha_j(f, \psi)ha_1, \dots, \alpha_j(f, \psi)ha_n$ (we allow the possibility that some $\alpha_j(f, \psi)ha_i$'s are conjugate to each other, thus $s\leq n$), and let
$$\epsilon_i = \frac{1}{u_i} \sum_{\alpha_j(f, \psi)ha_j \in R(g_i)} {k_j} e_j h_j, \;\; 1 \leq i \leq s,$$
where $R(g)$ is the set of roots of $g$.
Then
$$m_{n, d}^{{\bf k}, {\bf e}, {\bf h}}({\bm \alpha_j(f, \psi)ha}) = \left\{ \begin{array}{ll} 1 & \mbox{if $p \mid \epsilon_i$ for $1 \leq i \leq s$}, \\
0 & \mbox{otherwise}. \end{array} \right.$$
\end{lem}
\begin{proof} As before, we can take the average over the family $\mathcal{F}_d$ of polynomials of degree $d$ without the
condition that $a_{kp} = 0$ for $1 \leq k \leq d/p$.
Renumbering, suppose that $\alpha_j(f, \psi)ha_i$ has minimal polynomial $g_i$ for $1 \leq i \leq s$.
Since $\sum_{i=1}^s u_i \leq \sum_{i=1}^s k_i < d$, the map
\begin{eqnarray*}
\tau: \mathcal{F}_d \rightarrow \mathbb F_q[X]/(g_1 \dots g_s) \simeq \mathbb F_{q^{u_1}}\times \dots
\times \mathbb F_{q^{u_s}}
\end{eqnarray*}
is exactly $(q-1) q^{d - (u_1+ \dots +u_s)}$-to-one, and as $f$ ranges over $\mathcal{F}_d$,
$\left( f(\alpha_j(f, \psi)ha_1), \dots , f(\alpha_j(f, \psi)ha_s) \right)$ takes every value in $\mathbb F_{q^{u_1}} \times \dots
\times \mathbb F_{q^{u_s}}$ the same number of times. Now, the product
$\left( \tr_{u_1} f(\alpha_j(f, \psi)ha_1), \dots, \tr_{u_s}f(\alpha_j(f, \psi)ha_s) \right)$ also takes every value in $(\mathbb F_p)^s$ the same number of times as $f$ ranges over $\mathcal{F}_d$, and the same holds for any linear combination
$$\gamma_1 \tr_{u_1} f(\alpha_j(f, \psi)ha_1) + \dots + \gamma_s \tr_{u_s}f(\alpha_j(f, \psi)ha_s),
$$
unless $p$ divides every $\gamma_i$. This shows that each $p$th root of unity occurs
as many times as
$$\psi \left( \gamma_1 \tr_{u_1} f(\alpha_j(f, \psi)ha_1) + \dots + \gamma_s \tr_{u_s}f(\alpha_j(f, \psi)ha_s) \right)$$
when $p$ does not divide all the $\gamma_i$.
We now determine the coefficients $\gamma_i$ for
$$m_{n, d}^{{\bf k}, {\bf e}, {\bf h}}({\bm \alpha_j(f, \psi)ha}) =
\sum_{f \in \mathcal{F}_d} \psi \left( e_1 h_1 \tr_{k_1} f(\alpha_j(f, \psi)ha_1) + \dots + e_nh_n \tr_{k_n}f(\alpha_j(f, \psi)ha_n) \right).$$
Recall that $\tr_{k_i}f(\alpha_j(f, \psi)ha_i)=\frac{k_i}{u_i} \tr_{u_i} f(\alpha_j(f, \psi)ha_i)$ for $i=1, \dots, s$.
Let
$$\epsilon_i = \frac{1}{u_i} \sum_{\alpha_j(f, \psi)ha_j \in R(g_i)} e_j h_j k_j, \;\; 1 \leq i \leq s.$$
Then $\gamma_i=\epsilon_i$, i.e.,
$$m_{n, d}^{{\bf k}, {\bf e}, {\bf h}}({\bm \alpha_j(f, \psi)ha}) =
\sum_{f \in \mathcal{F}_d} \psi \left( \epsilon_1 \tr_{u_1} f(\alpha_j(f, \psi)ha_1) + \dots + \epsilon_s \tr_{u_s}f(\alpha_j(f, \psi)ha_s) \right),$$
which implies that $m_{n, d}^{{\bf k}, {\bf e}, {\bf h}}({\bm \alpha_j(f, \psi)ha})$ takes the value 1 if $p \mid \epsilon_i$ for $1 \leq i \leq s$, and 0 otherwise.
\end{proof}
Recall that $\pi(m)$ denotes the number of monic irreducible polynomials in $\mathbb F_q[X]$.
\begin{lem} \label{generalcase2}
Assume $k_1, \dots, k_n > 0$, $k_1 + \dots + k_n < d$. Then
$M_{n, d}^{{\bf k}, {\bf e}, {\bf h}}$ is bounded by a sum of terms made of products of elementary terms of the
type
\[q^{-(j_1+\dots+j_r)/2} \sum_{{{m\mid (j_1,\dots,j_r)}\atop{mp\mid \sum_{i=1}^r e_i h_i j_i}}} \pi(m) m^r \]
where the indices $j_1, \dots, j_r$ of the elementary terms appearing in each product are in bijection with $k_1, \dots, k_n$.
Let $N_{n, d}^{{\bf k}, {\bf e}, {\bf h}}$ be the sum of the terms made exclusively of products of elementary terms
$$q^{-(j_1+j_2)/2} \sum_{{{m\mid (j_1,j_2)}\atop{mp\mid e_1h_1 j_1+ e_2h_2 j_2}}} \pi(m) m^2.
$$
If $n$ is odd, these terms will also be multiplied by an elementary term $$e_{p,j} q^{-j/2} \sum_{{{m\mid j}\atop{mp\mid e j}}} \pi(m) m
= e_{p,j} \sum_{m \mid \frac{j}{p}} \pi(m)m = e_{p,j} \# \mathbb F_{q^{j/p}} = e_{p,j} q^{j/p}.$$
Let $E_{n, d}^{{\bf k}, {\bf e}, {\bf h}}$ be the sum of all the other terms appearing in $M_{n, d}^{{\bf k}, {\bf e}, {\bf h}}$.
Then, $M_{n, d}^{{\bf k}, {\bf e}, {\bf h}} = N_{n, d}^{{\bf k}, {\bf e}, {\bf h}} + O \left( E_{n, d}^{{\bf k}, {\bf e}, {\bf h}} \right).$
\end{lem}
\begin{proof} We first remark that the number of $(\alpha_j(f, \psi)ha_1, \dots, \alpha_j(f, \psi)ha_t) \in
\mathbb F_{q^{k_1}} \times \dots \times \mathbb F_{q^{k_t}}$ which are conjugate over $\mathbb F_q$
is
$$\sum_{m \mid (k_1, \dots, k_t)} \pi(m) m^t.$$
Using Lemma \ref{generalcase},
we then have to count the contribution
coming from the ${\bm \alpha_j(f, \psi)ha} = (\alpha_j(f, \psi)ha_1, \dots, \alpha_j(f, \psi)ha_n)$ such that $p \mid \epsilon_i$ for $1 \leq i \leq s$.
Let $\mathcal{P}$ be the set of partitions of $n$ in $s$ subsets $T_1, \dots, T_s$. Let $k(T_j)$ be the gcd of the $k_i$ such that $i \in T_j$ and let $s(T_j)=\sum_{i\in T_j} e_i h_i k_i$.
Then, for any such partition, the number of ${\bf \alpha_j(f, \psi)ha} = (\alpha_j(f, \psi)ha_1, \dots, \alpha_j(f, \psi)ha_n) \in \mathbb F_{q^{k_1}}
\times \dots \times \mathbb F_{q^{k_n}}$
such that $\alpha_j(f, \psi)ha_i$ is a root of $g_j$ when $i \in T_j$ is less than or equal to
$$
\sum_{{{m\mid k(T_1)}\atop{mp \mid s(T_1)}}} \pi(m)m^{|T_1|} \dots
\sum_{{{m\mid k(T_s)}\atop{mp \mid s(T_s)}}} \pi(m)m^{|T_s|} .
$$
This proves the first statement of the lemma. We remark that the above count is an over-count, as it may also count polynomials $g_1, \dots, g_s$ which are not distinct. For example, the number of $(\alpha_j(f, \psi)ha_1, \alpha_j(f, \psi)ha_2, \alpha_j(f, \psi)ha_3, \alpha_j(f, \psi)ha_4) \in \mathbb F_{q^{j_1}} \times \dots \times \mathbb F_{q^{j_4}}$ with minimal polynomials $g_1=g_2, g_3=g_4$ and $g_1 \neq g_3$
is
\begin{eqnarray*} &&q^{-(j_1+\dots+j_4)/2}\sum_{{{m\mid (j_1,j_2)}\atop{mp\mid e_1 h_1 j_1 + e_2 h_2 j_2}}} \pi(m) m^2 \sum_{{{m\mid (j_3,j_4)}\atop{mp\mid e_3 h_3 j_3 + e_4 h_4 j_4}}} \pi(m) m^2 - q^{-(j_1+\dots+j_4)/2} \sum_{{{m\mid (j_1,\dots,j_4)}\atop{mp\mid e_1 h_1 j_1 + \dots + e_4 h_4 j_4}}} \pi(m) m^4,
\end{eqnarray*}
which can be written as a term in $N_{n, d}^{{\bf k}, {\bf e}, {\bf h}}$ and a term in $E_{n, d}^{{\bf k}, {\bf e}, {\bf h}}$. The general case
is similar. Suppose that $n=2\ell$ is even. Then, using inclusion-exclusion, the number of $(\alpha_j(f, \psi)ha_1, \dots, \alpha_j(f, \psi)ha_n)
\in (\mathbb F_{q^{k_1}}, \dots, \mathbb F_{q^{k_n}})$
such that $\alpha_j(f, \psi)ha_i$ and $\alpha_j(f, \psi)ha_{\ell+i}$ have minimal polynomial $g_i$, and all the $g_i$ are distinct can be written as
\begin{eqnarray*}
&&q^{-(k_1+\dots+k_{2\ell})/2} \left( \sum_{{{m\mid (k_1,k_{\ell+1})\atop{mp\mid e_1 h_1 k_1 + e_{\ell+1} h_{\ell+1}k_{\ell+1}}}}} \pi(m) m^2 \dots
\sum_{{{m\mid (k_{\ell},k_{2 \ell})}\atop{mp\mid e_\ell h_\ell k_\ell + e_{2 \ell} h_{2\ell}k_{e \ell}}}} \pi(m) m^2 \right) + S(k_1, \dots, k_n)\\
\end{eqnarray*}
\noindent where $S(k_1, \dots, k_n)$ is a sum of terms in $E_{n, d}^{{\bf k}, {\bf e}, {\bf h}}$.
The case of $n=2\ell+1$ follows similarly, taking into account
that one has to multiply by the factor $e_{p,k_n} q^{-k_n/2} \sum_{{{m\mid k_n}\atop{mp\mid e k_n}}} \pi(m) m$.
\end{proof}
We now compute
\[
\left< S^\pm(K, f, \psi^{h_1})\dots S^\pm(K, f, \psi^{h_n}) \right>=\sum_{{k_1, \dots, k_n =1}\atop{e_1, \dots, e_n = \pm 1}}^K
I_K^{\pm}(e_1 k_1) \dots I_K^{\pm}(e_n k_n) M_{n,d}^{{\bf k}, {\bf e}, {\bf h}}.
\]
We will use $K = cd$ where $0 < c < 1/n$. Then, $k_i \leq K$ implies that $k_1 + \dots + k_n <d$, and we
can apply the lemmas above.
Using Lemma \ref{generalcase2}, we have to compute sums of the
type
\begin{eqnarray} \sum_{k=1}^K \widehat{I}_K^{\pm}(k) q^{-(1/2 - 1/p)k} = C(K)= O(1),
\end{eqnarray}
and for $r \geq 2$
\begin{eqnarray*} \sum_{k_1,\dots,k_r=1}^K\widehat{I}_K^\pm(e_1 k_1) \dots \widehat{I}_K^\pm(e_r k_r)q^{-(k_1+\dots+k_r)/2}\sum_{{m\mid (k_1,\dots,k_r)}\atop{mp \mid \sum_{i=1}^f e_i h_ik_i}} \pi(m)m^r.
\end{eqnarray*}
If $r=2$, we have when $p \mid e_1 h_1 k_1 + e_2 h_2 k_2$
\begin{eqnarray} \label{bigones}
&&\sum_{k_1, k_2=1}^K\widehat{I}_K^\pm(e_1 k_1) \widehat{I}_K^\pm(e_2 k_2)
q^{-(k_1+k_2)/2}\sum_{{m\mid (k_1,k_2)}\atop{mp \mid (e_1h_1k_1+e_2h_2k_2)}} \pi(m)m^2 \nonumber \\&&=\left \{\begin{array}{ll} \frac{1}{2 \pi^2} \log{\left(K |\mathcal{I}|\right)} +O(1)&e_1h_1+e_2h_2\equiv 0\, \mathrm{mod}\, p,\\\\O(1) & \text{otherwise}\end{array}\right.
\end{eqnarray}
as we computed in the proof of Theorems \ref{Mcovariance} and \ref{covariance}. (In those theorems we
had the extra condition $mp \nmid k_1,k_2$ in the sum, but those additional terms only add an $O(1)$ to the
final sum, and we can ignore them.)
For the other terms, we have
\begin{lem} \label{rbig} Let $r>2$, then
\[S:=\sum_{k_1,\dots,k_r=1}^K\widehat{I}_K^\pm(k_1) \dots \widehat{I}_K^\pm(k_r)q^{-(k_1+\dots+k_r)/2}\sum_{{m\mid (k_1,\dots,k_r)}\atop{mp\nmid (k_1,\dots,k_r)}} \pi(m)m^r=O(1)\]
\end{lem}
\begin{proof}
Suppose for the moment that $k_1\geq\dots\geq k_r$. If $k_1=k_r$, we have
\[\sum_{{m\mid (k_1,\dots,k_r)}\atop{mp\nmid (k_1,\dots,k_r)}} \pi(m)m^f=O\left(k_1^r q^{k_1} \right).\]
If $k_1=2k_r$, and all the other $k_i$ are equal to $k_1$ or $k_r$, we have
\[\sum_{{m\mid (k_1,\dots,k_r)}\atop{mp\nmid (k_1,\dots,k_r)}} \pi(m)m^r=O\left(k_1^r q^{k_1/2} \right).\]
In all the other cases,
\[\sum_{{m\mid (k_1,\dots,k_r)}\atop{mp\nmid (k_1,\dots,k_r)}} \pi(m)m^r=O\left(k_1^r q^{k_1/3} \right).\]
Putting things together, we get
\begin{eqnarray*}S&\ll &\sum_{k=1}^K\widehat{I}_K^\pm(k)^r k^rq^{-(r-2)k/2}+\sum_{\ell=1}^{r-1}\sum_{k=1}^K\widehat{I}_K^\pm(2k)^\ell \widehat{I}_K^\pm(k)^{r-\ell}k^rq^{(1-r/2-\ell/2)k}\\
&& +\sum_{k_1,\dots,k_r=1}^K\widehat{I}_K^\pm(k_1) \dots \widehat{I}_K^\pm(k_r)k_1^rq^{-k_1/6-(k_2+\dots+k_r)/2}\\&\ll& 1\end{eqnarray*}
by Proposition \ref{propmanysums}.
\end{proof}
\begin{thm}\label{thm:Smoments}
For any $K$ with $\max\{1, 1/|\mathcal I|\} <K< d/n$
\[\left< S^\pm(K, f, \psi)^{n} \right>= \left\{\begin{array}{ll}\frac{(2 \ell)!}{\ell! (2\pi^2)^\ell} \log^\ell(K|\mathcal{I}|) \left(1+O\left(\log^{-1}(K|\mathcal{I}|)\right)\right) & n=2\ell,\\ \\
C\frac{(2\ell+1)!}{\ell! (2\pi^2)^{\ell}}\log^\ell(K|\mathcal{I}|)\left(1+O\left({\log^{-1}\left(K|\mathcal{I}|\right)}\right)\right) & n=2\ell+1, \end{array}\right.\]
where $C$ is defined in Remark $\ref{Chantal's favorite nonconstant}$.
\end{thm}
\begin{proof}
By Lemmas \ref{generalcase2} and \ref{rbig}, we observe that the leading term in $S^{\pm}(K, f, \psi)^n$
will come from the contributions $N_{n, d}^{{\bf k}, {\bf e}}$. By equation (\ref{bigones}), if $n=2\ell$, the leading terms are of the form
$$\left( \frac{1}{2\pi^2} \log{\left( K|\mathcal{I}| \right)} \right)^\ell$$
and if $n=2\ell+1$, the leading terms are of the
form
$$
C \left( \frac{1}{2\pi^2} \log{\left(K |\mathcal{I}|\right)} \right)^\ell.
$$
The final coefficient is obtained by counting the numbers of ways to choose the $\ell$ (or $\ell+1$) coefficients $k_i's$ with positive sign ($e_i=1$) and to pair them with those with negative sign ($e_j=-1$).
\end{proof}
As $S^\pm(K, f, \psi)=S^\pm(K,f,\bar\psi)$, it is sufficient to study the
sum of $S^\pm(K, f, \psi^j)$ for $j$ up to $(p-1)/2$ rather than $p-1$.
We let
\[
\delta_n(C)=\begin{cases} 1 & n=2\ell
\\
C & n=2\ell+1.
\end{cases}
\]
\begin{thm}\label{thm:genmoments}
Let $\ell=\lfloor \frac{n}{2}\rfloor$. Let $0< h_1,\dots, h_n\leq (p-1)/2$. Then for any $K$ with $\max\{1, 1/|\mathcal I|\} <K< d/n$,
\begin{eqnarray*}&&\left< S^\pm(K, f, \psi^{h_1})\dots S^\pm(K, f, \psi^{h_n}) \right>\\ &=& \delta_n(C)\frac{\Delta(h_1,\dots,h_n)}{(2\pi^2)^\ell} \log^\ell(K|\mathcal{I}|) \left(1+O\left(\log^{-1}(K|\mathcal{I}|)\right)\right) \end{eqnarray*}
where $C$ is defined in Remark $\ref{Chantal's favorite nonconstant}$ and
\begin{equation*}\Delta(h_1,\dots,h_{n})=\# \{(e_1,\dots,e_{n})\in \{-1,1\}, \sigma \in \mathbb{S}_{n} | e_1h_{\sigma(1)}+e_2h_{\sigma(2)}\equiv \dots \equiv e_{2\ell-1}h_{\sigma(2\ell-1)}+e_{2\ell}h_{\sigma(2\ell)}\equiv 0 \, \mathrm{mod}\, p \}\end{equation*}
where $\mathbb{S}_{n}$ denotes the permutations of the set of $n$ elements.
\end{thm}
\begin{proof}
By Lemmas \ref{generalcase2} and \ref{rbig}, we observe that the leading term in the product $S^\pm(K, f, \psi^{h_1})\dots S^\pm(K, f, \psi^{h_n})$
will come from the contributions $N_{n, d}^{{\bf k}, {\bf e}, {\bf h}}$. By Theorem \ref{covariance}, if $n=2\ell$, the leading terms are of the form
$$\left( \frac{1}{2\pi^2} \log{\left( K|\mathcal{I}| \right)} \right)^\ell$$
and if $n=2\ell+1$, the leading terms are of the
form
$$
C \left( \frac{1}{2\pi^2} \log{\left(K |\mathcal{I}|\right)} \right)^\ell.
$$
The final coefficient is obtained by counting the numbers of ways to choose the $\ell$ (or $\ell+1$) coefficients $k_i$ with positive sign ($e_i=1$) and to pair them with $k_j$ with negative sign ($e_j=-1$) in such a way that
$p$ divides $e_ih_i+e_jh_j$.
\end{proof}
We note that if $n=2\ell$,
\begin{equation}\label{combinatorics}
\sum_{h_1, \dots, h_n=1}^{(p-1)/2} \Delta(h_1,\dots,h_n)=\frac{(p-1)^\ell (2 \ell)!}{2^\ell\ell! }.\end{equation}
There are $\frac{(2\ell)!}{\ell!2^\ell}$ ways of choosing pairs $\{e_i,e_j\}$ (because the order does not count inside the pair). For each pair either $e_i$ or $e_j$ can be negative and the other one positive so there are a total $2^\ell$ choices for the signs. Finally, for each pair there are $((p-1)/2)$ possible values for $h_i$ and this determines $h_j$.
\begin{rem} A consequence of Theorem \ref{thm:genmoments} is that the moments are given by sums of products of covariances, exactly in the same way as the moments of a multivariate normal distribution. Moreover, the generating function of the moments converges due to \eqref{combinatorics}. Therefore, our random variables are jointly normal. Since the variables are uncorrelated (cf.~Theorem \ref{covariance}), it follows that our random variables are independent.
\end{rem}
Recall that
\[
S^\pm(K,C_f)=\sum_{j=1}^{p-1}S^{\pm}(K, f, \psi^j).
\]
\begin{thm}\label{thm:sumisgaussian}
Assume that $K=d/\log\log(d|\mathcal I|)$, $d \rightarrow \infty$ and either $0<|\mathcal{I}|<1$ is fixed or $|\mathcal{I}| \rightarrow 0$
while $d|\mathcal{I}| \rightarrow \infty$.
Then
\[
\frac{S^\pm(K, C_f)}{\sqrt{\frac {2(p-1)}{\pi^2}\log(d|\mathcal{I}|)}}
\]
has a standard Gaussian limiting distribution when $d \rightarrow \infty$.
\end{thm}
\begin{proof}
First we compute the moments and then we normalize them.
Let $\ell=\lfloor \frac{n}{2}\rfloor$. We note
that with our choice of $K$ we have
\[
\frac{\log (K|\mathcal I|)}{ \log (d|\mathcal I |)}=1- \frac{\log\log\log(d|\mathcal I|)}{\log (d|\mathcal I |)}.\]
Therefore, we can replace $\log (K|\mathcal I|)$ by $\log (d|\mathcal I |)$ in our formulas.
Recall that $S^\pm(K, f, \psi^j)=S^\pm(K, f, \psi^{p-j})$, then
\begin{eqnarray*}
S^\pm(K,C_f)^n=\left(2\sum_{j=1}^{(p-1)/2}S^\pm(K, f, \psi^j) \right)^n=2^n\sum_{j_1, \dots, j_n=1}^{(p-1)/2}S^\pm(K,f,\psi^{j_1})\dots S^\pm(K,f,\psi^{j_n}).
\end{eqnarray*}
Therefore, we can compute the moment
\begin{eqnarray*}
\left\langle S^\pm(K, C_f)^n \right\rangle
&=&2^n\sum_{j_1, \dots, j_n=1}^{(p-1)/2}\langle S^\pm(K,f,\psi^{j_1})\dots S^\pm(K,f,\psi^{j_n})\rangle
\end{eqnarray*}
and then by Theorem \ref{thm:genmoments} this is asymptotic to
\begin{eqnarray*}
&&\frac{2^n\delta_{n}(C)}{(2\pi^2)^\ell}\log^\ell(d|\mathcal{I}|)\sum_{j_1, \dots, j_n=1}^{(p-1)/2} \Delta(j_1,\dots,j_n).
\end{eqnarray*}
Finally we use equation \eqref{combinatorics} to conclude that when $n=2\ell$,
\[
\left\langle S^\pm(K, C_f)^n \right\rangle=\frac{2^n(p-1)^\ell (2 \ell)!}{2^\ell \ell! (2\pi^2)^\ell}\log^\ell(d|\mathcal{I}|)=\frac{(2\ell)!}{\ell!\pi^{2\ell}} (p-1)^\ell\log^\ell(d|\mathcal{I}|)
\]
and the variance is asymptotic to $\frac{2(p-1)}{\pi^2}\log(d|\mathcal{I}|)$.
Hence the normalized moment converges to $0$ for $n$ odd and for $n$ even,
\[\lim_{d\rightarrow \infty} \frac{\left\langle S^\pm(K, C_f)^{2\ell} \right\rangle}{\left( \sqrt{\frac{2(p-1)}{\pi^2} \log (d|\mathcal I|)}\right)^{2\ell}}
= \frac{(2\ell)!}{\ell!2^\ell}.\]
\end{proof}
\section{Proof of main theorem}\label{proof}
We prove in this section that
$$ \frac{N_{\mathcal{I}}(C_f)
- 2g |\mathcal{I}|}{\sqrt{(2(p-1)/\pi^2) \log(d |\mathcal{I}|)}|}$$
converges in mean square to
$$\frac{S^{\pm}(K, C_f)}{{\sqrt{(2(p-1)/\pi^2) \log(d |\mathcal{I}|)}}}.$$
Then, using Theorem \ref{thm:sumisgaussian}, we get the result of Theorem \ref{mainthm}
since convergence in mean square implies convergence in distribution.
\begin{lem} Assume that $K =d/\log \log(d|\mathcal{I}|)$, $d\rightarrow \infty$ and either $0<|\mathcal{I}|<1$ is fixed or
$|\mathcal{I}|\rightarrow 0$ while $d |\mathcal{I}|\rightarrow \infty$. Then
\[\left< \left| \frac{N_\mathcal{I}(C_f) - (d-1)(p-1) |\mathcal{I}| +S^{\pm}(K,C_f)}{\sqrt{(2(p-1)/\pi^2) \log (d |\mathcal{I}|)}}\right|^2 \right>\rightarrow 0\]
\end{lem}
\begin{proof}
From equation (\ref{T-estimate}) from Section \ref{1mom}, using the Beurling-Selberg polynomials and the explicit formula (Lemma \ref{Explicit-Formula}),
we deduce that
\begin{eqnarray*}
\frac{-(p-1)(d-1)}{K+1} &\leq& N_\mathcal{I}(C_f) - (p-1)(d-1) |\mathcal{I}| +S^-(K, C_f) \\& \leq &S^-(K, C_f)
- S^+(K,C_f) +\frac{(p-1)(d-1)}{K+1} \end{eqnarray*}
and
\begin{eqnarray*}
\frac{-(p-1)(d-1)}{K+1} &\leq& -N_\mathcal{I}(C_f) + (p-1)(d-1) |\mathcal{I}| -S^+(K, C_f) \\ &\leq& S^-(K, C_f)
- S^+(K,C_f) + \frac{(p-1)(d-1)}{K+1}.\ \end{eqnarray*}
Using these two inequalities to bound the absolute value of the central term, we obtain
\begin{eqnarray*}
&&\left< \left ( N_\mathcal{I}(C_f) - (p-1)(d-1) |\mathcal{I}| + S^{\pm}(K, C_f) \right)^2 \right>\\
&\leq& \max \left \{ \left(\frac{(p-1)(d-1)}{K+1} \right)^2, \left< \left(S^{-}(K, C_f) - S^{+}(K,C_f) +\frac{(p-1)(d-1)}{K+1}\right)^2 \right> \right\} \\
&&\leq \left(\frac{(p-1)(d-1)}{K+1} \right)^2 \\&+& \max \left \{ 0, \left< \left( S^{-}(K, C_f) - S^{+}(K, C_f) \right)^2 \right>
+ 2 \frac{(p-1)(d-1)}{K+1} \left< S^{-}(K, C_f) - S^{+}(K, C_f) \right> \right\}.
\end{eqnarray*}
Now using the estimate in the proof of Theorem \ref{averageET}, we have that
\begin{eqnarray*}
\left\langle S^{-}(K, C_f) - S^{+}(K, C_f) \right\rangle &=& \left< S^{-}(K, C_f) \right> -
\left< S^{+}(K, C_f) \right> =O(1).
\end{eqnarray*}
For the remaining term we note that
\begin{eqnarray*}
&&\left\langle \left(S^{-}(K, C_f) - S^{+}(K, C_f)\right)^2\right \rangle
\\
&=&\left\langle \left(S^{-}(K, C_f)\right)^2\right\rangle + \left\langle \left(S^{+}(K, C_f)\right)^2\right\rangle-2\left\langle \sum_{j_1, j_2=1}^{p-1} S^{-}(K, f, \psi^{j_1}) S^{+}(K, f, \psi^{j_2})\right\rangle.
\end{eqnarray*}
By Corollary \ref{cor:2ndmoment}, this equals
\[\frac{4(p-1)}{\pi^2}\log(d|\mathcal I|)+O(1)-\frac{4(p-1)}{\pi^2}\log(d|\mathcal I|)+O(1)=O(1).\]
Therefore,
\[\left< \left ( N_\mathcal{I}(C_f )- (p-1)(d-1) |\mathcal{I}| +S^{\pm}(K, C_f) \right)^2 \right>=O\left(\left(\frac{(p-1)(d-1)}{K+1}\right)^2\right)\]
and
\[\left \langle \left( \frac{N_\mathcal{I}(C_f) - (p-1)(d-1) |\mathcal{I}|+S^\pm(K, C_f)}{\sqrt{(2(p-1)/\pi^2) \log(d|\mathcal{I}|)} }\right)^2 \right \rangle \rightarrow 0\]
when $d$ tends to infinity and $K =d/\log \log(d|\mathcal{I}|)$.
\end{proof}
\begin{rem}
Proposition \ref{prop} is proved in a similar way. For this, one uses Theorem \ref{thm:Smoments} to examine the moments of
\[
\frac{S^\pm(K, f, \psi)+S^\pm(K, f, \bar{\psi})}{\sqrt{\frac {4}{\pi^2}\log(d|\mathcal{I}|)}}=\frac{2S^\pm(K, f, \psi)}{\sqrt{\frac {4}{\pi^2}\log(d|\mathcal{I}|)}}.
\]
\end{rem}
\section*{Acknowledgments}
The authors would like to thank Ze\'ev Rudnick for many useful discussions
while preparing this paper. The authors are also grateful to Louis-Pierre Arguin, Andrew Granville and Rachel Pries for helpful conversations related to this work. The first, third and fifth named authors thank the Centre de Recherche Math\'ematique (CRM) and the Mathematical Sciences Research Institute (MSRI) for their hospitality.
This work was supported by the National Science Foundation of U.S. [DMS-1201446 to B. F.],
the Simons Foundation [\#244988 to A. B.] the UCSD Hellman Fellows Program
[2012-2013 Hellman Fellowship to A. B.], the Natural Sciences and Engineering Research Council
of Canada [Discovery Grant 155635-2008 to C. D., 355412-2008 to M. L.] and the Fonds de recherche du Qu\'ebec - Nature et technologies [144987 to M. L., 166534 to C. D. and M. L.]
\end{document} |
\begin{document}
\title{The regularity of quotient paratopological groups}
\author{Taras Banakh}
\address{Department of Mathematics, Ivan Franko Lviv National University,
Universytetska 1, Lviv, 79000, Ukraine}
\email{[email protected]}
\author{Alex Ravsky}
\address{Department of Functional Analysis, Pidstryhach Institute for Applied Problems of Mechanics and Mathematics
National Academy of Sciences of Ukraine,
Naukova 2-b, Lviv, 79060, Ukraine}
\email{[email protected]}
\keywords{paratopological group, quotient paratopological group,
group reflexion, regularity}
\subjclass{22A15, 54H10, 54H11}
\begin{abstract}
Let $H$ be a closed subgroup of a regular abelian paratopological
group $G$. The group reflexion $G^\flat$ of $G$ is the group $G$
endowed with the strongest group topology, weaker that the
original topology of $G$. We show that the quotient $G/H$ is
Hausdorff (and regular) if $H$ is closed (and locally compact) in
$G^\flat$. On the other hand, we construct an example of a regular
abelian paratopological group $G$ containing a closed discrete
subgroup $H$ such that the quotient $G/H$ is Hausdorff but not
regular.\end{abstract}
\maketitle
In this paper we study the properties of the quotients of
paratopological groups by their normal subgroups.
By a paratopological group $G$ we understand a group $G$ endowed
with a topology $\tau$ making the group operation continuous, see \cite{ST}. If,
in addition, the operation of taking inverse is continuous, then
the paratopological group $(G,\tau)$ is a topological group. A
standard example of a paratopological group failing to be a
topological group is the Sorgefrey line ${\mathbb{L}}$, that is the real
line ${\mathbb{R}}$ endowed with the Sorgefrey topology (generated by the
base consisting of half-intervals $[a,b)$, $a<b$).
Let $(G,\tau)$ be a paratopological group and $H\subset G$ be a
closed normal subgroup of $G$. Then the quotient group $G/H$
endowed with the quotient topology is a paratopological group, see
\cite{Ra}. Like in the case of topological groups, the quotient
homomorphism $\pi:G\to G/H$ is open. If the subgroup $H\subset G$
is compact, then the quotient $G/H$ is Hausdorff (and regular)
provided so is the group $G$, see \cite{Ra}. The compactness of
$H$ in this result cannot be replaced by the local compactness as
the following simple example shows.
\begin{example}\label{ex1} The subgroup $H=\{(-x,x):x\in{\mathbb{Q}}\}$ is closed and discrete
in the square $G={\mathbb{L}}^2$ of the Sorgenfrey line ${\mathbb{L}}$. Nonetheless,
the quotient group $G/H$ fails to be Hausdorff: for any irrational
$x$ the coset $(-x,x)+H$ cannot be separated from zero $(0,0)+H$.
\end{example}
A necessary and sufficient condition for the quotient $G/H$ to be
Hausdorff is the closedness of $H$ in the topology of group
reflexion $G^\flat$ of $G$.
By {\em the group reflexion} $G^\flat=(G,\tau^\flat)$ of a
paratopological group $(G,\tau)$ we understand the group $G$
endowed with the strongest topology $\tau^\flat\subset\tau$
turning $G$ into a topological group. This topology admits a
categorial description: $\tau^\flat$ is a unique topology on $G$
such that\begin{itemize}
\item $(G,\tau^\flat)$ is a topological group; \item the identity
homomorphism ${\operatorname{id}}:(G,\tau)\to(G,\tau^\flat)$ is continuous; \item
for each continuous group homomorphism $h:G\to H$ into a
topological group $H$ the homomorphism $h\circ {\operatorname{id}}^{-1}:G^\flat\to
H$ is continuous.
\end{itemize}
Observe that the group reflexion of the Sorgenfrey line ${\mathbb{L}}$ is
the usual real line ${\mathbb{R}}$.
For so-called 2-oscillating paratopological groups $(G,\tau)$ the
topology $\tau^\flat$ admits a very simple description: its base
at the origin $e$ of $G$ consists of the sets $UU^{-1}$, where $U$
runs over open neighborhoods of $e$ in $G$. Following \cite{BR} we
define a paratopological group $G$ to be {\em 2-oscillating} if
for each neighborhood $U\subset G$ of the origin $e$ there is
another neighborhood $V\subset G$ of $e$ such that $V^{-1}V\subset
UU^{-1}$. The class of 2-oscillating paratopological groups is
quite wide: it contains all abelian (more generally all nilpotent)
as well as saturated paratopological groups. Following I.Guran we
call a paratopological group {\em saturated} if for each
neighborhood $U$ of the origin in $G$ its inverse $U^{-1}$ has
non-empty interior in $G$.
Given a subset $A$ of a paratopological group $(G,\tau)$ we can
talk of its properties in the topology $\tau^\flat$. In
particular, we shall say that a subset $A\subset G$ is {\em
$\flat$-closed} in $G$ if it is closed in the topology
$\tau^\flat$. Also with help of the group reflexion many helpful
properties of paratopological groups can be defined.
A paratopological group $G$ is called
\begin{itemize}
\item {\em $\flat$-separated} if the topology $\tau^\flat$ is
Hausdorff;
\item {\em $\flat$-regular} if it has a neighborhood
base at the origin, consisting of $\flat$-closed sets;
\item {\em $\flat$-compact} if $G^\flat$ is compact.
\end{itemize}
It is clear that each $\flat$-separated (and $\flat$-regular)
paratopological group is functionally Hausdorff (and regular).
Conversely, each Hausdorff (resp. regular) 2-oscillating group is
$\flat$-separated (resp. $\flat$-regular), see \cite{BR}. On the
other hand, there are examples of (nonabelian) Hausdorff
paratopological groups $G$ which are not $\flat$-separated, see
\cite{Ra}, \cite{BR}. The simplest example of a $\flat$-compact
non-compact paratopological group is the Sorgefrey circle
$\{z\in\mathbb C:|z|=1\}$ endowed with the topology generated by
the base consisting of ``half-intervals"
$\{e^{i\varphi}:\varphi\in[a,b)\}$, $a<b$.
Now we are able to state our principal positive result.
\begin{theorem}\label{main1} Let $H$ be a normal subgroup of a
$\flat$-separated paratopological group $G$. Then the quotient
paratopological group $G/H$ is
\begin{enumerate}
\item $\flat$-separated if and only if $H$ is closed in
$G^\flat$;
\item $\flat$-regular if $G$ is $\flat$-regular and
the set $H$ is locally compact in $G^\flat$.
\end{enumerate}
\end{theorem}
\begin{proof} Let $\pi:G\to G/H$ denote the quotient homomorphism.
1. If $H$ is closed in $G^\flat$ then $G^\flat/H$ is Hausdorff as
a quotient of a Hausdorff topological group $G^\flat$. Since the
identity homomorphism $G/H\to G^\flat/H$ is continuous, the
paratopological group $G/H$ is $\flat$-separated.
Now assume conversely that the paratopological group $G/H$ is
$\flat$-separated. Since the quotient map $\pi^\flat:G^\flat\to
(G/H)^\flat$ is continuous its kernel $H$ is closed in $G^\flat$.
2. Assume that $G$ is $\flat$-regular and $H$ is locally compact
in $G^\flat$. It follows that $H$ is closed in $G^\flat$ (this so
because the subgroup $H\subset G^\flat$, being locally compact, is
complete). Then there is a closed neighborhood $W_1\subset
G^\flat$ of the neutral element $e$ such that the intersection
$W_1\cap H$ is compact in $G^\flat$. Take any closed neighborhood
$W_2\subset G^\flat$ of $e$ such that $W_2^{-1}W_2\subset W_1$. We
claim that $W_2\cap gH$ is compact for each $g\in G$. This is
trivial if $W_2\cap gH$ is empty. If not, then $gh=w$ for some
$h\in H$ and $w\in W_2$. Hence $W_2\cap gH\subset W_2\cap
wh^{-1}H=W_2\cap wH=w(w^{-1}W_2\cap H)\subset w(W_2^{-1}W_2\cap
H)\subset w(W_1\cap H)$ and the closed subset $W_2\cap gH$ of $G$
lies in the compact subset $w(W_1\cap H)$ of $G$. Consequently,
$W_2\cap gH$ is compact for any $y\in G$. Let $W_3\subset G^\flat$
be a neighborhood of $e$ such that $W_3^{-1}W_3\subset W_2$.
To prove the $\flat$-regularity of the quotient group $G/H$, given
any neighborhood $U\subset G$ of $e$ it suffices to find a
neighborhood $V\subset U$ of $e$ such that $\pi(V)$ is
$\flat$-closed in $G/H$. By the $\flat$-regularity of $G$, we can
find a $\flat$-closed neighborhood $V\subset U\cap W_3$. We claim
that $\pi(V)$ is $\flat$-closed in $G/H$. Since the identity map
$(G/H)^\flat\to G^\flat/H$ is continuous, it suffices to verify
that $\pi(V)$ is closed in the topological group $G^\flat/H$.
Take any point $gH\notin\pi(V)$ of $G^\flat/H$. It follows from
$gH\cap V=\emptyset$ and the compactness of the set $W_2\cap gH$
that there is an open neighborhood $W_4\subset W_3$ of $e$ in
$G^\flat$ such that $W_4(W_2\cap gH)\cap V=\emptyset$. We claim
that $W_4z\cap V=\emptyset$ for any $z\in gH$. Assuming the
converse, find a point $v\in W_4z\cap V$. It follows that $z\notin
W_2$. On the other hand, $z\in W_4^{-1}v\subset W_4^{-1}V\subset
W_2$. This contradiction shows that $W_4gH\cap V=\emptyset$ and thus
$\pi(W_4g)$ is a neighborhood of $gH$ in $G^\flat/H$, disjoint
with $\pi(V)$.
\end{proof}
\begin{corollary} If $H$ is a $\flat$-compact normal subgroup of a $\flat$-regular
paratopological group $G$, then the quotient paratopological group
$G/H$ is $\flat$-regular.
\end{corollary}
\begin{proof} It follows that the identity inclusion
$H^\flat\to G^\flat$ is continuous and thus $H$ is compact in
$G^\flat$. Applying the preceding theorem, we conclude that the
quotient group $G/H$ is $\flat$-regular.
\end{proof}
\begin{remark} It is interesting to compare the latter corollary with a result
of \cite{Ra} asserting that the quotient $G/H$ of a Hausdorff
(regular) paratopological group $G$ by a compact normal subgroup
$H\subset G$ is Hausdorff (regular).
\end{remark}
Since for a 2-oscillating paratopological group $G$ the Hausdorff
property (the regularity) of $G$ is equivalent to the
$\flat$-separatedness (the $\flat$-regularity),
Theorem~\ref{main1} implies
\begin{corollary}\label{cor1} Let $H$ be a normal subgroup of a
Hausdorff 2-oscillating paratopological group $G$. Then the
quotient paratopological group $G/H$ is
\begin{enumerate}
\item Hausdorff if $H$ is closed in
$G^\flat$;
\item regular if $G$ is regular and
the set $H$ is locally compact in $G^\flat$.
\end{enumerate}
\end{corollary}
Example~\ref{ex1} supplies us with a locally compact closed
subgroup $H$ of a $\flat$-regular paratopological group $G={\mathbb{L}}^2$
such that the quotient $G/H$ is not Hausdorff. Next, we construct
a $\flat$-regular abelian paratopological group $G$ containing a
locally compact $\flat$-closed subgroup $H$ such that the quotient
is Hausdorff but not regular. This will show that in
Theorem~\ref{main1} and Corollary~\ref{cor1} the local compactness
of $H$ in $G^\flat$ cannot be replaced by the local compactness
plus $\flat$-closedness of $H$ in $G$.
Our construction is based on the notion of a {\em cone topology}
(see the paper~\cite{Ra4} of the second author).
Let $G$ be a topological group and $S\subset G$ be a closed
subsemigroup of $G$, containing the neutral element $e\in G$. The
{\em cone topology} $\tau_S$ on $G$ consists of sets $U\subset G$
such that for each $x\in U$ there is an open neighborhood
$W\subset G$ of $e$ such that $x(W\cap S)\subset U$. It is clear
that the group $G$ endowed with the cone topology $\tau_S$ is a
regular paratopological groups and its neighborhood base at $e$
consists of the sets $W\cap S$, where $W$ is a neighborhood of $e$
in $G$. Moreover, the paratopological group $(G,\tau_S)$ is
saturated if $e$ is a cluster point of the interior of $S$ in $G$.
In the latter case the paratopological group $(G,\tau_S)$ is
2-oscillating and thus $\flat$-regular, see \cite[Theorem 3]{BR}.
In the following example using the cone topology we construct a
saturated regular paratopological group $G$ containing a
$\flat$-closed discrete subgroup $H$ with non-regular quotient
$G/H$.
\begin{example} Consider the group ${\mathbb{Q}}^3$ endowed with the usual (Euclidean)
topology. A subsemigroup $S$ of ${\mathbb{Q}}^3$ is called a {\em cone} in
${\mathbb{Q}}^3$ if $q\cdot \vec x\in S$ for any non-negative $q\in{\mathbb{Q}}$ and
any vector $\vec x\in S$.
Fix a sequence $(z_n)$ of rational numbers such that
$0<\sqrt{2}-z_n<2^{-n}$ for all $n$ and let $S\subset {\mathbb{Q}}^3$ be
the smallest closed cone containing the vectors $(1,0,0)$ and
$(\frac1n,1,z_n)$ for all $n$. Let $\tau_S$ be the cone topology
on the group ${\mathbb{Q}}^3$ determined by $S$. Since the origin of
${\mathbb{Q}}^3$ is a cluster point of the interior of $S$, the
paratopological group $G=({\mathbb{Q}}^3,\tau_S)$ is saturated and
$\flat$-regular. Moreover, its group reflexion coincides with
${\mathbb{Q}}^3$.
Now consider the $\flat$-closed subgroup $H=\{(0,0,q):q\in{\mathbb{Q}}\}$
of the group $G$. Since $H\cap S=\{(0,0,0)\}$, the subgroup $H$ is
discrete (and thus locally compact) in $G$. On the other hand $H$
fails to be locally compact is ${\mathbb{Q}}^3$, the group reflexion of
$G$.
We claim that the quotient group $G/H$ is not regular. Let
$\pi:G\to G/H$ denote the quotient homomorphism. We can identify
$G/H$ with ${\mathbb{Q}}^2$ endowed with a suitable topology.
Let us show that $(0,1)\notin\pi(S)$. Assuming the converse we
would find $x\in{\mathbb{Q}}$ such that $(0,1,x)\in S$. It follows from the
definition of $S$ that $x\ge0$ and there is a sequence $(\vec
x_i)$ converging to $(0,1,x)$ such that
$$\vec x_i=\sum_n\lambda_{i,n}(n^{-1},1,z_n)+\lambda_i(1,0,0)$$
where all $\lambda_i,\lambda_{in}\ge 0$ and almost all of them
vanish. Taking into account that $\{\vec x_i\}$ converges to
$(0,1,x)$ we conclude that
\begin{itemize}
\item $\lambda_i\to0$ as $i\to\infty$;
\item
$\lambda_{in}\underset{i\to\infty}\longrightarrow0$ for every $n$;
\item $\sum_n\lambda_{in}$ tends to $1$ as $i\to\infty$.
\end{itemize}
Let $\varepsilon>0$. Then
$\exists N_1(\forall n>N_1)\{|z_n-\sqrt{2}|<\varepsilon\}$,
$\exists N_2(\forall i>N_2)(\forall n\le
N_1)\{\lambda_{in}<\varepsilon/N_1\}$ and
$\exists N_3(\forall i>N_3)(|\sum \lambda_{in}-1|<\varepsilon\}$.
Put $N=\max\{N_2,N_3\}$. Let $i>N$. Then
$$|\sqrt{2}-\sum_n\lambda_{in}z_n|\le
|\sqrt{2}-\sum_n\lambda_{in}\sqrt{2}|+ |\sum_{n\le
N_1}\lambda_{in}(\sqrt{2}-z_n)|+|\sum_{n>N_1}\lambda_{in}(\sqrt{2}-z_n)|\le$$
$$\varepsilon\sqrt{2}+\varepsilon+\sum_{n>N_1}\lambda_{in}\varepsilon\le
\varepsilon(\sqrt{2}+1+1+\varepsilon).$$
So $x=\sqrt{2}$ which is impossible. This contradiction shows that
$(0,1)\notin\pi(S)$ and thus $(0,\frac1n)\notin\pi(S)$ for all
$n\in{\mathbb{N}}$ (since $S$ is a cone).
It remains to prove that for each neighborhood $V\subset {\mathbb{Q}}^3$ of
the origin we get $\overline{\pi(V\cap S)}\not\subset\pi(S)$,
where the closure is taken in $G/H$. This will follow as soon as
we show that $(0,\frac1m)\in\overline{\pi(V\cap S)}$ for some $m$.
Since $V$ is a (usual) neighborhood of $(0,0,0)$ in ${\mathbb{Q}}^3$, there
is $m\in{\mathbb{N}}$ such that $\frac1m(\frac1n,1,z_n)\in V$ for all
$n\in{\mathbb{N}}$. Then $\frac1m(\frac1n,1)\in\pi(V\cap S)$ for all
$n\in{\mathbb{N}}$. Observe that the sequence $\{(\frac1{nm},\frac1m)\}_n$
converges to $(0,\frac1m)$ in $G/H$ since for each neighborhood
$W\subset{\mathbb{Q}}^3$ of $(0,0,0)$ the difference
$(\frac1{nm},\frac1m)-(0,\frac1m)=(\frac1{nm},0)$ belongs to
$\pi(W\cap S)$ for all sufficiently large $n$. Therefore
$(0,\frac1m)\in\overline{\pi(V\cap S)}\not\subset
\pi(S)\not\ni(0,\frac1m)$, which means that $G/H$ is not regular.
\end{example}
As we understood, in the submitted version of the paper~\cite{XieLiTu}
Li-Hong Xie, Piyu Li, and Jin-Ji Tu proved that if $\mathcal P$ is one of the following properties
$\{T_1,T_2,T_3,$ $regular\}$, a paratopological group $G$ has the property $\mathcal P$,
and $H$ is a compact normal semigroup of the group $G$ then the quotient group $G/H$
has the property $\mathcal P$ too. But the case when $\mathcal P=T_0$ was remarked as unknown.
We fill this gap here.
\begin{proposition} Let $H$ be a compact normal subgrop of a $T_0$ paratopological group $G$.
Then the quotient group $G/H$ is $T_0$ too.
\end{proposition}
\begin{proof} Let $\mathcal B$ be the family of all open neighborhoods of the unit
of the group $G$ and $\mathcal B'$ be the family of all open neighborhoods of the unit
of the group $G/H$.
Let $\pi:G\to G/H$ be the quotient map.
Let $S=\bigcap_{U\in\mathcal B} U$ and $S'=\bigcap_{U'\in\mathcal B'} U'$.
Then $S'\subset \bigcap_{U\in\mathcal B}\pi(UH)\subset \pi(\bigcap_{U\in\mathcal B}UH)$.
Let $x\in \bigcap_{U\in\mathcal B}UH$ be an arbitrary point and $U\in\mathcal B$ be an
arbitrary neighborhood. There exists a neighborhood $V\in\mathcal B$ such that $V^2\subset U$.
Then $U^{-1}x\supset \overline{V^{-1}x}\supset V^{-1}x$. So $U^{-1}x\cap H\supset
\overline{V^{-1}x}\cap H\ne\emptyset$. Since the set $H$ is compact there exists
point $y\in \bigcap_{U\in\mathcal B}(\overline{U^{-1}x}\cap H)=\bigcap_{U\in\mathcal B}(U^{-1}x\cap H)$.
So $x\in Sy\subset SH$. Hence $S'\subset\pi(SH)$ and
$S'\cap S'^{-1}\subset\pi(SH)\cap \pi(SH)^{-1} \subset\pi(SH\cap S^{-1}H)$.
Let $x\in SH\cap S^{-1}H$ be an arbitrary point. Then there exist elements $s_1,s_2\in S$ and
$h_1,h_2\in H$ such that $x=s_1h_1=s_2^{-1}h_2$. Then $s_2s_1=h_2h_1^{-1}\in S\cap H$.
But since $H$ is a compact paratopological group, by Lemma 5.4 from~\cite{Ra3}, $H$ is
a topological group. Since $H$ is a $T_0$ topological group the space $H$ is $T_1$ (in fact,
$T_{31/2}$), so $H\cap S=H\cap \bigcap_{U\in\mathcal B} U=\{e\}$. Thus $s_2s_1=h_2h_1^{-1}=e$,
so $s_2=s_1^{-1}$ and $h_2=h_1$. Then $xh^{-1}\in S\cap S^{-1}=\{e\}$. Hence $x\in H$.
At last, $S'\cap S'^{-1}\subset \pi(SH\cap S^{-1}H)\subset \pi(H)=\{e\}$ and thus
the group $G/H$ is $T_0$.
\end{proof}
\end{document} |
\begin{document}
\title{Cocycle Conjugacy of Free Bogoljubov Actions of $\mathbb{R}$}
\author{Joshua Keneda and Dimitri Shlyakhtenko}
\thanks{Research supported by NSF grant DMS-1762360.}
\email{[email protected], [email protected]}
\address{Department of Mathematics, UCLA, Los Angeles, CA 90095}
\address{Mathematics and Engineering Department, South Plains College, Levelland, TX 79336}
\begin{abstract}
We show that Bogoljubov actions of $\mathbb{R}$ on the free group
factor $L(\mathbb{F}_{\infty})$ associated to sums of
infinite multiplicity trivial and certain mixing representations are cocycle conjugate
if and only if the underlying representations are conjugate.
\end{abstract}
\maketitle
\section{Introduction}
Recall that two actions $\beta_{t}$, $\gamma_{t}$ of $\mathbb{R}$
on a von Neumann algebra $M$ are said to be \emph{conjugate} if $\beta_{t}=\alpha\circ\gamma_{t}\circ\alpha^{-1}$
for some automorphism $\alpha$ of $M$. The actions are said to be \emph{cocycle conjugate} if there exists a strongly continuous one-parameter
family of unitaries $u_{t}\in M$ and an automorphism $\alpha$ of
$M$ so that
\[
\beta_{t}(x)=\alpha(\textrm{Ad}_{u_{t}}(\gamma_{t}(\alpha^{-1}(x)))),\qquad\forall x\in M;
\]
in other words, $\beta_{t}$ and $\textrm{Ad}_{u_{t}}\circ\gamma_{t}$
are conjugate. Cocycle conjugacy is clearly a weaker notion of equivalence
than conjugacy.
A consequence of cocycle conjugacy is an isomorphism between the crossed
product von Neumann algebras $M\rtimes_{\beta}\mathbb{R}$ and $M\rtimes_{\gamma}\mathbb{R}$.
This isomorphism takes $M$ to $M$ and sends the unitary $U_{t}\in L(\mathbb{R})\subset M\rtimes_{\beta}\mathbb{R}$
implementing the automorphism $\beta_{t}$ to the unitary $u_{t}V_{t}$,
where $V_{t}\in M\rtimes_{\gamma}\mathbb{R}$ is the implementing unitary
for $\gamma_{t}$.
An important class of automorphisms of the free group factor $L(\mathbb{F}_{\infty}$)
are so-called \emph{free Bogoljubov automorphisms}, which are defined
using Voiculescu's free Gaussian functor. As a starting point, we
write $L(\mathbb{F}_{\infty})=W^{*}(S_{1},S_{2},\dots)$ where $S_{j}$
are an infinite free semicircular system. The closure in the $L^{2}$
norm of their real-linear span is an infinite dimensional real Hilbert
space. Voiculescu proved that any automorphism of that Hilbert space
extends to an automorphism of $L(\mathbb{F}_{\infty})$. In particular,
any representation of $\mathbb{R}$ on an infinite dimensional Hilbert
space canonically gives an action of $\mathbb{R}$ on $L(\mathbb{F}_{\infty})$.
Motivated by the approach in \cite{Classification}, we prove the following theorem,
which states that for a large class of Bogoljubov automorphisms, cocycle
conjugacy and conjugacy are equivalent to conjugacy of the underlying
representations and thus gives a classification of such automorphisms
up to cocycle conjugacy.
\begin{thm*}
\label{thm:SameSpectrum-1}Let $\pi_{1},\pi_{1}'$ be two mixing
orthogonal representations of $\mathbb{R}$, and assume that $\pi_{1}\otimes\pi_{1}\cong\pi_{1}$,
$\pi_{1}'\otimes\pi_{1}'\cong\pi_{1}'$. Denote by $\mathbf{1}$ the
trivial representation of $\mathbb{R}$. Let
\[
\pi=(\mathbf{1}\oplus\pi_{1})^{\oplus\infty},\qquad\pi'=(\mathbf{1}\oplus\pi_{1}')^{\oplus\infty},
\]
and let $\alpha$ (resp.,
$\alpha'$) be the associated free Bogoljubov actions of $\mathbb{R}$ on $L(\mathbb{F}_{\infty})$.
Then $\alpha$ and $\alpha'$ are cocycle conjugate iff the representations
$\pi^{\oplus\infty}$ and $(\pi')^{\oplus\infty}$ are conjugate,
i.e.
\[
\pi^{\oplus\infty}=V(\pi')^{\oplus\infty}V^{-1}
\]
for some orthogonal isomorphism $V$ of the underlying real Hilbert
spaces.
\end{thm*}
It is worth noting that the conjugacy class of a representation of
$\mathbb{R}$ on a real Hilbert space is determined by the measure
class of its spectral measure (a measure on $\mathbb{R}$ satisfying
$\mu(-X)=\mu(X)$ for all Borel sets $X$) and a multiplicity function
which is measurable with respect to that class (for the purposes of
our Theorem, we may assume that this multiplicity function is identically
infinite). Our Theorem then states that, for Bogoljubov actions
satisfying the hypothesis of the Theorem, cocycle conjugacy occurs
if and only if these measure classes are the same.
\section{Preliminaries on conjugacy of automorphisms}
\subsection{Crossed products.}
If $M$ is a type II$_{1}$ factor and $\alpha_{t}:\mathbb{R}\to\textrm{Aut}(M)$
is a one-parameter group of automorphisms, the \emph{crossed product}
$M\rtimes_{\alpha}\mathbb{R}$ is of type II$_{\infty}$ with a canonical
trace $Tr$. Furthermore, the crossed product construction produces
in a canonical way a distinguished copy of the group algebra $L(\mathbb{R})$
inside the crossed product algebra. We denote this copy by $L_{\alpha}(\mathbb{R})$.
The relative commutant $L_{\alpha}(\mathbb{R})'\cap M\rtimes_{\alpha}\mathbb{R}$
is generated by $L_{\alpha}(\mathbb{R})$ and the fixed point algebra
$M^{\alpha}$.
\subsection{Conjugacy of actions.}
Recall that if $\beta_{t}$ and $\gamma_{t}$ are cocycle conjugate,
each choice of a cocycle conjugacy produces an isomorphism $\Pi_{\gamma,\beta}$
of the crossed product algebras $M\rtimes_{\beta}\mathbb{R}$ and
$M\rtimes_{\gamma}\mathbb{R}$. Note that $\Pi_{\gamma,\beta}$ does
\emph{not} necessarily map $L_{\beta}(\mathbb{R})$ to $L_{\gamma}(\mathbb{R})$.
In fact, as we shall see, this is rarely the case even if we compare
the image $\Pi_{\gamma,\beta}(L_{\beta}(\mathbb{R}))$ with $L_{\gamma}(\mathbb{R})$
up to a weaker notion of equivalence, $\prec_{M\rtimes_{\gamma}\mathbb{R}}$
which was introduced by Popa in the framework of his deformation-rigidity
theory. Indeed, in parallel to Theorem 3.1 in \cite{Classification},
we show that, very roughly, conjugacy of $\Pi_{\gamma,\beta}(L_{\beta}(\mathbb{R}))$
and $L_{\gamma}(\mathbb{R})$ inside the crossed product is essentially
equivalent (up to compressing by projections) to \emph{conjugacy}
of the actual actions by an inner automorphism of $M$.
\begin{thm}
\label{embedding} Let $M$ be a tracial von Neumann algebra with
a fixed faithful normal trace $\tau$. Suppose $\alpha,\beta:\mathbb{R}\rightarrow\text{Aut}(M)$
are two trace-preserving actions of $\mathbb{R}$ on $M$ which are
cocycle conjugate, and suppose that the only finite-dimensional $\alpha$-invariant
subspaces of $L^{2}(M)$ are those on which $\alpha$ acts trivially.
Fix any $q\in M^{\beta}$ a nonzero projection. The following are
equivalent:
(a) There exists a nonzero projection $r\in L_{\beta}(\mathbb{R})$
such that
\[
\Pi_{\alpha,\beta}(L_{\beta}(\mathbb{R})qr)\prec_{M\rtimes_{\alpha}\mathbb{R}}L_{\alpha}(\mathbb{R})
\]
(b) There exists a nonzero partial isometry $v\in M$ such that $v^{*}v\in qM^{\beta}q$,
$vv^{*}\in M^{\alpha}$, and for all $x\in M$,
\[
\alpha_{t}(vxv^{*})=v\beta_{t}(x)v^{*}.
\]
\end{thm}
\begin{proof}
To see that (a) implies (b), take $r$ as in (a), so that $\Pi_{\alpha,\beta}(L_{\beta}(\mathbb{R})qr)\prec_{M\rtimes_{\alpha}\mathbb{R}}L_{\alpha}(\mathbb{R})$,
and take $w_{t}\in M$ with $\text{Ad }w_{t}\circ\alpha_{t}=\beta_{t}$.
First, we claim that there's a $\delta>0$ for which there exist $x_{1},...,x_{k}\in qM$
with
\[
\sum_{i,j=1}^{k}|\tau(x_{i}^{*}w_{t}\alpha_{t}(x_{j}))|^{2}\ge\delta
\]
for all $t$. Suppose for a contradiction that no such $\delta$ exists.
Then we can find a net $(t_{i})_{i\in I}$ such that
\[
\lim_{i}\tau(x^{*}w_{t_{i}}\alpha_{t_{i}}(y))=0
\]
for any $x,y\in qM$.
But then for any $p,p'$ finite trace projections in $L_{\alpha}(\mathbb{R})$,
$s,s'\in\mathbb{R}$, and $x,y\in M$, we have (in the 2-norm from
the trace on $M\rtimes_{\alpha}\mathbb{R}$):
\begin{align*}
\|E_{L_{\alpha}(\mathbb{R})}(p\lambda_{\alpha}(s)^{*}x^{*}\Pi_{\alpha,\beta}(\lambda_{\beta}(t_{i})q)y\lambda_{\alpha}(s')p')\|_{2} & =\|\lambda_{\alpha}(s)^{*}pE_{L_{\alpha}(\mathbb{R})}(x^{*}q\Pi_{\alpha,\beta}(\lambda_{\beta}(t_{i})qy)p'\lambda_{\alpha}(s'))\|_{2}\\
& =\|pE_{L_{\alpha}(\mathbb{R})}((qx)^{*}w_{t_{i}}\alpha_{t_{i}}(qy))p'\lambda_\alpha(s'+t_{i})\|_{2}\\
& =\|E_{L_{\alpha}(\mathbb{R})}((qx)^{*}w_{t_{i}}\alpha_{t_{i}}(qy))pp'\|_{2}\rightarrow0,
\end{align*}
where the last equality follows from the fact that $(qx)^{*}w_{t_{i}}\alpha_{t_{i}}(qy)\in M$,
so
\[
E_{L_{\alpha}(\mathbb{R})}((qx)^{*}w_{t_{i}}\alpha_{t_{i}}(qy))=\tau((qx)^{*}w_{t_{i}}\alpha_{t_{i}}(qy)),
\]
and the latter term goes to zero by supposition for any $x,y\in M$.
Now note that linear combinations of terms of the form $x\lambda_{\alpha}(s)p$
(resp. $y\lambda_{\beta}(s')p'$) as above are dense in $L^{2}(M\rtimes_{\alpha}\mathbb{R},Tr),$
so by approximating $\Pi_{\alpha,\beta}(r)a$, (resp. $\Pi_{\alpha,\beta}(r)b$)
with such sums for any $a,b\in M\rtimes_{\alpha}\mathbb{R}$, it follows
from the above estimate that
\[
\|E_{L_{\alpha}(\mathbb{R})}(a^{*}\Pi_{\alpha,\beta}(\lambda_{\beta}(t_{i})qr)b)\|_{2}\rightarrow0.
\]
But this contradicts $\Pi_{\alpha,\beta}(L_{\beta}(\mathbb{R})qr)\prec_{M\rtimes_{\alpha}\mathbb{R}}L_{\alpha}(\mathbb{R})$,
so the $\delta>0$ of our above claim exists.
We can thus find $\delta>0$, $x_{1},...,x_{k}\in qM$ such that $\sum_{i,j=1}^{k}|\tau(x_{i}^{*}w_{t}\alpha_{t}(x_{j}))|^{2}\ge\delta$
for all $t$.
Let us now consider the space $B(L^{2}(M))$ of bounded operators on
$L^{2}(M,\tau)$ and the rank-one orthogonal projection $e_{\tau}$
onto $1\in L^{2}(M,\tau)$. We can identify $(B(L^{2}(M)),e_{\tau})$
with the basic construction $\langle M, e_\tau \rangle$ for $\mathbb{C}\subset M$, so that $e_{\tau}$
is the Jones projection. Let $\hat{\tau}$ be the usual trace on $B(L^{2}(M))$,
satisfying $\hat{\tau}(xe_{\tau}y)=\tau(xy)$ for all $x,y\in M$.
Finally, for a finite-rank operator $Q=\sum_{i}y_{i}e_{\tau}z_{i}^{*}$
with $y_{i},z_{i}\in M$, let
\[
T_{M}(Q)=\sum y_{i}z_{i}^{*}\in M.
\]
Then $T_{M}$ extends to a normal operator-valued weight from the
basic construction to $M$ satisfying $\hat{\tau}=\tau\circ T_{M}$
(i.e. $T_{M}$ is the pull-down map).
Consider now the positive element
\[
X=\sum_{i=1}^{k}x_{i}e_{\tau}x_{i}^{*},
\]
together with the following normal positive linear functional on $\langle M,e_{\tau}\rangle$:
\[
\psi(T)=\sum_{i=1}^{k}\hat{\tau}(e_{\tau}x_{i}^{*}Tx_{i}e_{\tau}).
\]
Note that $T_{M}(X)=\sum_{i=1}^{k}x_{i}x_{i}^{*}\in M$, so in particular
$\|T_{M}(X)\|<\infty$.
For every $t\in\mathbb{R}$, we have:
\begin{align*}
\psi(\beta_{t}(X)) & =\sum_{i,j}\hat{\tau}(e_{\tau}x_{i}^{*}w_{t}\alpha_{t}(x_{j})e_{\tau}\alpha_{t}(x_{j})^{*}w_{t}^{*}x_{i}e_{\tau})\\
& =\sum_{i,j}|\tau(x_{i}^{*}w_{t}\alpha_{t}(x_{j})|^{2}\ge\delta>0.
\end{align*}
Now consider $K$, the ultraweak closure of the convex hull of $\{\beta_{t}(X):t\in\mathbb{R}\}$
inside $q\langle M,e_{\tau}\rangle q$. Note that by normality of
$\psi$, $\psi(x)\ge\delta$ for any $x\in K$.
Since $K$ is convex and $\|\cdot\|_{2}$-closed, there exists a unique
$X_{0}\in K$ of minimal 2-norm. But since the 2-norm is invariant
under $\beta$, we must have that $\|\beta_{t}(X_{0})\|_{2}=\|X_{0}\|_{2}$
for all $t$, so by uniqueness of the minimizer, $X_{0}$ is itself
fixed by the extended $\beta$ action (and nonzero since $\psi(X_{0})\ge\delta$).
Also, by ultraweak lower semicontinuity of $T_{M}$, we know that
$\|T_{M}(X_{0})\|\le\|T_{M}(X)\|<\infty$.
Take a nonzero spectral projection $e$ of $X_{0}$. Then $e$ is
still $\beta$-invariant and satisfies $\|T_{M}(e)\|<\infty$. But
this means that $\hat{\tau}(e)=\tau(T_{M}(e))<\infty$, so $e$ must
be a finite rank projection, since $\hat{\tau}$ corresponds to the
usual trace $Tr$ on the trace-class operators in $B(L^{2}(M),\tau)$.
Now since $e_{\tau}$ has central support 1 in $\langle M,e_{\tau}\rangle$
(and because $e_{\tau}$ is minimal), we have that there exists $V$
a partial isometry in $\langle M,e_{\tau}\rangle$ such that $V^{*}V=f\le e$
and $VV^{*}=e_{\tau}$. We remark that $f$ remains $\beta$-invariant,
since $e$ was finite rank, and our finite-dimensional invariant subspaces
are all fixed by the action. Note also that $e\le q$ (since $X_{0}\in q\langle M,e_{\tau}\rangle q$),
so that $V=Vq=e_{\tau}V$.
Applying the pull-down lemma, we see that:
\[
V=e_{\tau}V=e_{\tau}(T_{M}(e_{\tau}V))=e_{\tau}T_{M}(V).
\]
Set $v=T_{M}(V)$ and note that because $\|T_{M}(V^{*}V)\| \le \|T_{M}(e)\|<\infty$,
we have $v\in M$, and $V=e_{\tau}v$.
Since $e_{\tau}\langle M,e_{\tau}\rangle e_{\tau}=\mathbb{C}e_{\tau}$,
and since $V$ is left-supported by $e_{\tau}$, we have that for
each $t$ there exists a $\lambda_{t}\in\mathbb{C}$ such that $\lambda_{t}e_{\tau}=Vw_{t}{\alpha_{t}}(V^{*})$.
Note that since $Vw_{t}{\alpha_{t}}(V^{*})(Vw_{t}{\alpha_{t}}(V^{*}))^{*}=Vw_{t}{\alpha_{t}}(V^{*}V)w_{t}^{*}V^{*}=V{\beta_{t}}(e)V^{*}=VV^{*}=e_{\tau}$,
the last equality of the previous sentence implies that $\lambda_{t}\overline{\lambda_{t}}=1$.
We also have:
\begin{align*}
e_{\tau}\lambda_{t}\alpha_{t}(v) & =\lambda_{t}e_{\tau}\alpha_t(v)=\lambda_{t}e_{\tau}{\alpha_t}(V)\\
& =Vw_{t}{\alpha_{t}}(V^{*}V)=V{\beta_{t}}(e)w_{t}=Vw_{t}\\
& =e_{\tau}vw_{t}.
\end{align*}
Thus, applying the pull-down map, we have that $\lambda_{t}\alpha_{t}(v)=vw_{t}$,
and, replacing $v$ by its polar part if necessary, we've found a
partial isometry in $M$, conjugation by which intertwines the actions.
We have for any $x\in M$:
\begin{align*}
\alpha_{t}(vxv^{*})=\alpha_{t}(v)\alpha_{t}(x)\alpha_{t}(v^{*})=\overline{\lambda_{t}}vw_{t}\alpha_{t}(x)w_{t}^{*}v^{*}\lambda_{t}=v\beta_{t}(x)v^{*}.
\end{align*}
Furthermore, with some applications of $\alpha_{t}(v)=\overline{\lambda_{t}}vw_{t}$,
we see that
\[
\beta_{t}(v^{*}v)=w_{t}\alpha_{t}(v^{*}v)w_{t}^{*}=w_{t}(w_{t}^{*}v^{*}\lambda_{t})(\overline{\lambda_{t}}vw_{t})w_{t}^{*}=v^{*}v,
\]
and
\[
\alpha_{t}(vv^{*})=(\overline{\lambda_{t}}vw_{t})(w_{t}^{*}v^{*}\lambda_{t})=vv^{*},
\]
so we've found the promised intertwiner.
Conversely, assume that we have $v\in M$ satisfying $v^{*}v\in qM^{\beta}q$,
$vv^{*}\in M^{\alpha}$, and $\alpha_{t}(vxv^{*})=v\beta_{t}(x)v^{*}$
for all $x\in M$. Take $w_{t}\in M$ with $\text{Ad }w_{t}\circ\alpha_{t}=\beta_{t}$.
Then, as above, we have $vw_{t}=\lambda_{t}\alpha_{t}(v)$, for some
$\lambda_{t}\in \mathbb{T}$. Multiplying both sides by $\overline{\lambda_{t}}$
and absorbing this factor into $w_{t}$, we may assume without loss
of generality that $\lambda_{t}=1$ for all $t$, so we have $vw_{t}=\alpha_{t}(v)$.
Now let $\lambda_{t}^{\alpha}$ (resp., $\lambda_{t}^{\beta}$) denote
the canonical unitaries that implement the respective actions on $M$
in the crossed product $M\rtimes_{\alpha}\mathbb{R}$ (resp., $M\rtimes_{\beta}\mathbb{R}$).
Then the relation $vw_{t}=\alpha_{t}(v)$ implies $v\Pi_{\alpha,\beta}(\lambda_{t}^{\beta})=\lambda_{t}^{\alpha}v$.
Furthermore, for any finite trace projection $r\in L_{\beta}(\mathbb{R})$,
we have $v\Pi_{\alpha,\beta}(qr)=vq\Pi_{\alpha,\beta}(r)=v\Pi_{\alpha,\beta}(r)\neq0$,
so $v^{*}$ is a partial isometry that witnesses $\Pi_{\alpha,\beta}(L_{\beta}(\mathbb{R})qr)\prec_{M\rtimes_{\alpha}\mathbb{R}}L_{\alpha}(\mathbb{R})$
(e.g. see condition (4) of Theorem F.12 in \cite{BO}). Thus, (b) implies (a).
\end{proof}
\section{Cocycle conjugacy of Bogoljubov Automorphisms}
\subsection{Free Bogoljubov automorphisms.}
Let $\pi$ be an orthogonal representation of $\mathbb{R}$ on a real
Hilbert space $H_{\mathbb{R}}$. Recall that Voiculescu's free Gaussian
functor associates to $H_{\mathbb{R}}$ a von Neumann algebra
\[
\Phi(H_{\mathbb{R}})=\{s(h):h\in H_{\mathbb{R}}\}''\cong L(\mathbb{F}_{\dim H_{\mathbb{R}}})
\]
where $s(h)=\ell(h)+\ell(h)^{*}$ and $\ell(h)$ is the creation operator
$\xi\mapsto h\otimes\xi$ acting on the full Fock space $\mathscr{F}(H)=\bigoplus_{n\geq0}(H_{\mathbb{R}}\otimes_{\mathbb{R}}\mathbb{C})^{\otimes n}$.
Denoting by $\Omega$ the unit basis vector of $(H_{\mathbb{R}}\otimes_{\mathbb{R}}\mathbb{C})^{\otimes0}=\mathbb{C}$,
it is well-known that the vector-state
\[
\tau(\cdot)=\langle\Omega,\cdot\Omega\rangle
\]
defines a faithful trace-state on $\Phi(H_{\mathbb{R}})$. Furthermore,
$\mathbb{R}$ acts on $\mathscr{F}(H)$ by unitary transformations
$U_{t}=\bigoplus_{n\geq0}(\pi\otimes1)^{\otimes n}$, and conjugation
by $U_{t}$ leaves $\Phi(H_{\mathbb{R}})$ globally invariant thus
defining a strongly continuous one-parameter family of \emph{free
Bogoljubov automorphisms}
\[
t\mapsto\alpha_{t}\in\textrm{Aut}(\Phi(H_{\mathbb{R}})).
\]
Note that if $\pi$ is such that $\pi\otimes\pi$ and $\pi$ are conjugate (as
representations of $\mathbb{R}$), then the representation
$U_{t}$ is conjugate to $\mathbf{1}\oplus\pi^{\oplus\infty}$.
A complete invariant for the orthogonal representation $\pi$ consists
of the absolute continuity class $\mathscr{C}_{\pi}$ of a measure
$\mu$ on $\mathbb{R}$ satisfying the symmetry condition $\mu(B)=\mu(-B)$
for all $\mu$-measurable sets $B$ and a $\mu$-measurable multiplicity
function $n:\mathbb{R}\to\mathbb{N}\cup\{+\infty\}$ satisfying $n(x)=n(-x)$
almost surely in $x$. In particular, assuming that $\pi\cong\pi\otimes\pi$
(i.e., that for some (hence any) probability measure $\mu\in\mathscr{C}_{\pi}$
that generates $\mathscr{C}_{\pi}$, $\mu*\mu$ and $\mu$ are mutually absolutely continuous), then the measure class $\mathscr{C}_{\pi}$
is an invariant of $\alpha$ up to conjugacy (since it can be recovered
from $U_{t}$, the unitary representation induced by $\alpha_{t}$
on $L^{2}(\Phi(H_{\mathbb{R}})$).
Recall that the representation $\pi$ is said to be \emph{mixing}
if for all $\xi,\eta\in H_{\mathbb{R}}$, $\lim_{|t|\to\infty}\langle\xi,\pi(t)\eta\rangle\to0$.
This is equivalent to saying that for some (hence any) probability
measure $\mu\in\mathscr{C}_{\pi}$ that generates $\mathscr{C}_{\pi}$,
the Fourier transform satisfies $\hat{\mu}(t)\to0$ whenever $t\to\pm\infty$.
\subsection{Operator-valued semicircular systems.\label{subsec:OpValuSemSys}}
The crossed product $\Phi(H_{\mathbb{R}})\rtimes_{\alpha}\mathbb{R}$
has a description in terms of so-called operator-valued semicircular
systems (see \cite[Examples 2.8, 5.2]{A-valued}). Decompose $\pi=\bigoplus_{i\in I}\pi_{i}$
into cyclic representations $\pi_{i}$ with cyclic vectors $\xi_{i}$.
Let $\mu_{i}$ be the measure with Fourier transform $t\mapsto\langle\xi_{i},\pi_{i}(t)\xi_{i}\rangle$,
and denote by $\eta_{i}:L^{\infty}(\mathbb{R})\to L^{\infty}(\mathbb{R})$
the completely positive map given by
\[
\eta_{i}(f)(x)=\int f(y)d\mu(x-y).
\]
Then \cite[Proposition 2.18]{A-valued} shows that $\Phi(H_{\mathbb{R}})\rtimes_{\alpha}\mathbb{R}=W^{*}(L(\mathbb{R}),S_{i}:i\in I)$
where $S_{i}$ are free with amalgamation over $L(\mathbb{R})\cong L^{\infty}(\mathbb{R})$
and each $S_{i}$ is an $L^{\infty}(\mathbb{R})$-valued semicircular
system with covariance $\eta_{i}$.
Operator-valued semicircular variables can be associated to any normal
self-adjoint completely positive map on $L(\mathbb{R})\cong L^{\infty}(\mathbb{R})$.
In particular, given any measure $K$ on $\mathbb{R}^{2}$ satisfying
$\pi_{x}K,\pi_{y}K\prec\textrm{Lebesgue measure}$ and $dK(x,y)=dK(y,x)$
(here $\pi_{x},\pi_{y}$ are projections on the two coordinate axes),
we can construct an $L^{\infty}(\mathbb{R})$-valued semicircular
variable $S=S_{K}$ with covariance
\[
\eta(f)(x)=\int f(y)dK(x,y).
\]
If $K'$ is absolutely continuous with respect to $K$, then $W^{*}(L^{\infty}(\mathbb{R}),S_{K'})\subset W^{*}(L^{\infty}(\mathbb{R}),S_{K})$;
if $K=\sum K_{j}$ with $K_{j}$ disjoint, then $S_{K_{j}}$ are free
with amalgamation over $L^{\infty}(\mathbb{R}).$
The algebra $W^{*}(L^{\infty}(\mathbb{R}),S_{K})$ is denoted $\Phi(L^{\infty}(\mathbb{R}),\eta)$.
For our choices of $\eta$ it is semifinite and $L^{\infty}(\mathbb{R})$
is in the range of a conditional expectation.
If $I\subset\mathbb{R}$ is a finite interval and $K$ is a measure
on $I^{2}$ satisfying $\pi_{x}K,\pi_{y}K\prec\textrm{Lebesgue measure}$
and $dK(x,y)=dK(y,x)$, then one can in a similar way associate a
completely positive map to $K$ and consider an $L^{\infty}(I)$-semicircular
variable $S_{K}$. This time, composition of the conditional expectation
onto $L^{\infty}(I)$ with integration with respect to (rescaled)
Lebesgue measure on $L^{\infty}(I)$ gives rise to a normal faithful
trace on the algebra $\Phi(L^{\infty}(I),\eta)=W^{*}(L^{\infty}(I),S_{K})$.
We call measures $K$ (on $\mathbb{R}^{2}$ or on the square of some
finite interval $I$) satisfying the conditions $\pi_{x}K,\pi_{y}K\prec\textrm{Lebesgue measure}$
and $dK(x,y)=dK(y,x)$ \emph{kernel measures.}
\subsection{Solidity of certain algebras generated by operator-valued semicircular
systems.}
Let $\eta$ be a normal self-adjoint completely positive map defined
on the von Neumann algebra $A = L^{\infty}(\mathbb{T})$ (with Haar measure on $\mathbb{T}$). By \cite[Corollary 4.2]{Thin}, if the $A,A$-bimodule associated to $\eta$
is mixing (see Def. 2.2 of that paper for a definition), then
$\Phi(A,\eta)$ is strongly solid, and in particular, solid: the relative
commutant of any diffuse abelian von Neumann subalgebra of $\Phi(A,\eta)$
is amenable. As noted in \cite[Proposition 7.3.4]{Thin} and its surrounding remarks, if $\mu$ is a measure on $\mathbb{T}$
so that its Fourier transform $\hat{\mu}$ satisfies $\lim_{n\to\pm\infty}\hat{\mu}(n)=0$
(i.e., $\mu$ is a measure associated to a mixing representation),
then the bimodule associated to the completely positive map $\eta:f\mapsto f*\mu$
is mixing. We record the following lemma, whose proof is straightforward
from the definition of mixing bimodules:
\begin{lem}
Suppose that $H,H'$ are mixing $A,A$-bimodules, $p_{0}\in A$
is a nonzero projection, and $K\subset H$ is an $A,A$-submodule.
Then:
(i) $H\oplus H'$ is mixing; (ii) $K$ is mixing; (iii) $p_{0}Hp_{0}$ is mixing as a $p_{0}A,p_{0}A$-bimodule.
\end{lem}
We now make use of this lemma.
\begin{lem}
\label{lem:ACTrick}Let $(K_{j}:j\in J)$ be a family of kernel measures
on $\mathbb{R}^{2}$ and let $\eta_{j}$ be the associated completely
positive maps on $L^{\infty}(\mathbb{R})$.
Assume that each $K_{j}$ can be written as a sum of measures $K_{j}=\sum_{i\in S(j)}K_{j}^{(i)}$
with $K_{j}^{(i)}$ disjoint, and so that $K_{j}^{(i)}$ is supported
on the square $I_{j}^{(i)}\times I_{j}^{(i)}$ for a finite interval
$I_{j}^{(i)}$. Finally, suppose that there exist measures $\hat{K}_{j}^{(i)}$
on $I_{j}^{(i)}\times I_{j}^{(i)}$, so that $K_{j}^{(i)}$ is absolutely
continuous with respect to $\hat{K}_{j}^{(i)}$ and so that the associated
completely positive map
\[
\hat{\eta}_{j}^{(i)}:f\mapsto(x\mapsto\int f(y)d\hat{K}_{j}^{(i)}(x,y))
\]
defines a mixing $L^{\infty}(I_{j}^{(i)})$-bimodule.
Let $X_{j}$ be $\eta_{j}$-semicircular variables over $L^{\infty}(\mathbb{R})$,
and assume that $X_{j}$ are free with amalgamation over $L^{\infty}(\mathbb{R})$.
Then the semifinite von Neumann algebra $M=W^{*}(L^{\infty}(\mathbb{R}),X_{j}:j\in J)$
is solid, in the sense that if $A\subset M$ is any diffuse abelian
von Neumann subalgebra generated by its finite projections, then $A'\cap M$
is amenable.
\end{lem}
\begin{proof}
Denote by $\hat{X}_{j}^{(i)}$ the $\hat{\eta}_{j}^{(i)}$-semicircular
family, and assume that $\hat{X}_{j}^{(i)}$ are free with amalgamation
over $L^{\infty}(\mathbb{R})$. Let $\hat{M}=W^{*}(L^{\infty}(\mathbb{R}),\{\hat{X}_{j}^{(i)}:j\in J,i\in S(j)\})$.
Since $K_{j}=\sum_{i\in S(j)}K_{j}^{(i)}$ is a disjoint sum and $K_{j}^{(i)}$
is absolutely continuous with respect to $\hat{K}_{j}^{(i)}$, we conclude
that $M\subset\hat{M}$ and moreover $M$ is in the range of a conditional
expectation from $\hat{M}$. Thus is sufficient to prove that $\hat{M}$
is solid.
By freeness with amalgamation, we know that $\hat{M}$ is the amalgamated
free product of the algebras $\hat{M}_{j}^{(i)}=W^{*}(L^{\infty}(\mathbb{R}),\hat{X}_{j}^{(i)})$.
Thus by \cite[Theorem 4.4]{Rigidity}, if $B\subset\hat{M}$ is an abelian algebra generated
by its finite projections and $B'\cap\hat{M}$ is non-amenable, then
$B\prec_{\hat{M}}\hat{M}_{j}^{(i)}$ for some $j\in J$ and $i\in S(j)$
and moreover it follows that $\hat{M}_{j}^{(i)}$ is not solid. But
\[
\hat{M}_{j}^{(i)}\cong L^{\infty}(\mathbb{R}\setminus I_{j}^{(i)})\oplus W^{*}(L^{\infty}(I_{J}^{(i)}),\hat{X}_{j}^{(i)})
\]
and the (finite) von Neumann algebra $W^{*}(L^{\infty}(I_{J}^{(i)}),\hat{X}_{j}^{(i)})$
is solid by \cite[Corollary 4.2]{Thin}, which is a contradiction.
\end{proof}
\begin{cor}
\label{cor:Solid}Suppose that $\pi$ is a mixing orthogonal
representation of $\mathbb{R}$ on a real Hilbert space $H_{\mathbb{R}}$,
and let $\alpha$ be the free Bogoljubov action on $\Phi(H_{\mathbb{R}})$
associated to $\pi$. Then the semi-finite von Neumann algebra $M=\Phi(H_{\mathbb{R}})\rtimes_{\alpha}\mathbb{R}$
is solid: if $B\subset M$ is an diffuse abelian subalgebra generated
by its finite projections, then $B'\cap M$ is amenable.
\end{cor}
\begin{proof}
Our goal is to apply Lemma \ref{lem:ACTrick}. Fix any decomposition
of $\pi$ into cyclic representations $(\pi_{j}:j\in J)$ with associated
cyclic vectors $\xi_{j}$ in such a way that the spectrum of $\pi_{j}(t)$
is contained in the set $\exp(iI_{j}t)$ for a finite subinterval
$I_{j}\subset\mathbb{R}$. Let us fix integers $n_{j}$ so that $I_{j}\subset[-n_{j},n_{j}]$. Selecting a possibly different
set of cyclic vectors and subrepresentations $\pi_j(t)$, we may assume that $\pi(t)=\bigoplus_j \pi_j(t)$, and
that the spectrum of the infinitesimal generator of $\pi_j$ is contained in $I_j$.
Denote by $\mu_{j}$ the measures with Fourier transform
\[
\hat{\mu}_{j}(t)=\langle\xi_{j},\pi(t)\xi_{j}\rangle.
\]
By assumption that $\pi$ is mixing, $\lim_{t\to\pm\infty}\hat{\mu}_{j}(t)=0$.
Moreover, by construction, the support of $\mu_{j}$ is contained
in $[-n_{j},n_{j}]$.
Let $\eta_{j}:L^{\infty}(\mathbb{R})\to L^{\infty}(\mathbb{R})$ be
the completely positive map given by convolution with $\mu_{j}$.
Then $\eta_{j}$ has an associated kernel measure $K_{j}$ given by
$dK_{j}(x,y)=\mu_{j}(x-y)$.
Let $K_{j}^{(i)}$ denote the restriction of $K_{j}$ to the region
$[-4n_{j}+i,4n_{j}+i]\times[-4n_{j}+i,4n_{j}+i] \setminus [-4n_j + i-1, 4n_j + i-1] \times [-4n_j + i-1, 4n_j + i -1 ] $, $i\in\mathbb{Z}$, and let $\hat{K}_{j}^{(i)}$ be the restriction of $K_j$ to $[-4n_{j}+i,4n_{j}+i]\times[-4n_{j}+i,4n_{j}+i]$. If we identify
$[-4n_{j}+i,4n_{j}+i]$ with the circle, then the completely positive
map associated to $\hat{K}_{j}^{(i)}$ is given by convolution with
the measure $\mu'_{j}$ whose Fourier transform is given by $k\mapsto\hat{\mu}_{j}(k/8n_{j})$;
since $\lim_{t\pm\infty}\hat{\mu}_{j}(t)=0$, it follows that $\lim_{k\to\pm\infty}\hat{\mu}'_{j}(k)=0$,
so that the hypothesis of Lemma \ref{lem:ACTrick} is satisfied.
\end{proof}
\subsection{Cocycle conjugacy.}
We are now ready to prove the main result of this paper:
\begin{thm}
\label{thm:SameSpectrum}Let $\pi_{1},\pi_{1}'$ be two mixing
orthogonal representations of $\mathbb{R}$, and assume that $\pi_{1}\otimes\pi_{1}\cong\pi_{1}$,
$\pi_{1}'\otimes\pi_{1}'\cong\pi_{1}'$. Denote by $\mathbf{1}$ the
trivial representation of $\mathbb{R}$. Let
\[
\pi=(\mathbf{1}\oplus\pi_{1})^{\oplus\infty},\qquad\pi'=(\mathbf{1}\oplus\pi_{1}')^{\oplus\infty},
\]
and let $\alpha$ (resp.,
$\alpha'$) be the corresponding free Bogoljubov actions of $\mathbb{R}$ on $L(\mathbb{F}_{\infty})$.
Then $\alpha$ and $\alpha'$ are cocycle conjugate iff $\mathscr{C}_{\pi_{1}}=\mathscr{C}_{\pi_{1}'}$.
\end{thm}
\begin{proof}
If $\mathscr{C}_{\pi_{1}}=\mathscr{C}_{\pi_{1}'}$, then $\pi$ and
$\pi'$ are conjugate representations and the associated Bogoljubov
actions are conjugate; thus it is the opposite direction that needs
to be proved. The proof will be broken into several steps.
If $H_{\pi}$ is the representation space of $\pi$, then $H_{\pi}\cong H_{0}\oplus H_{1}$
corresponding to the decomposition $\pi=\mathbf{1}^{\infty}\oplus\pi_{1}^{\oplus\infty}$.
Let $N=\Phi(H_{\pi})\cong L(\mathbb{F}_{\infty})$. Then $N$ decomposes
as a free product $N\cong\Phi(H_{0})*\Phi(H_{1})$; moreover, the
free Bogoljubov action is also a free product $\alpha=\mathbf{1}*\alpha_{1}$.
Note that the subalgebra $L(\mathbb{F}_{\infty})\cong\Phi(H_{0})\subset N$
is fixed by the action $\alpha$. In particular, the crossed product
$M=N\rtimes_{\alpha}\mathbb{R}$ decomposes as a free product:
\[
M=N\rtimes_{\alpha}\mathbb{R}\cong(\Phi(H_{0})\otimes L(\mathbb{R}))*_{L(\mathbb{R})}(\Phi(H_{1})\rtimes_{\alpha_{1}}\mathbb{R}).
\]
Let us assume that $\alpha$ and $\alpha'$ are cocycle conjugate.
Denote by $A=L_{\alpha}(\mathbb{R})\subset M$. Then $N\rtimes_{\alpha}\mathbb{R}\cong N\rtimes_{\alpha'}\mathbb{R}$
and thus (up to this identification, which we fix once and for all) also $L_{\alpha'}(\mathbb{R})\subset M$. Thus $A'\cap M\supset\Phi(H_{0})\cong L(\mathbb{F}_{\infty})$,
so that $A'\cap M$ is non-amenable. Exactly the same argument implies
that $L_{\alpha'}(\mathbb{R})\cap M$ is non-amenable.
By \cite[Theorem 4.4]{Rigidity}, it follows from the amalgamated free product decomposition
of $M$ that $L_{\alpha'}(\mathbb{R})\prec_{M}\Phi(H_{0})\otimes L(\mathbb{R})$
or $L_{\alpha'}(\mathbb{R})\prec_{M}\Phi(H_{1})\rtimes_{\alpha_1}\mathbb{R}$.
But the latter is impossible by Corollary \ref{cor:Solid}, since
$\alpha_{1}$ comes from a mixing representation $\pi_{1}$.
Thus it must be that $L_{\alpha'}(\mathbb{R})\prec_{M}\text{\ensuremath{\Phi(H_{0})\otimes L(\mathbb{R})}}\cong L(\mathbb{F}_{\infty})\otimes L(\mathbb{R})$
and thus $L_{\alpha'}(\mathbb{R})\prec_{M}L_{\alpha}(\mathbb{R})$.
By Theorem \ref{embedding}, $L_{\alpha'}(\mathbb{R})\prec_{M}L_{\alpha}(\mathbb{R})$
implies that there exists a nonzero partial isometry $v\in N$ such that
$v^{*}v\in N^{\alpha'}$, $vv^{*}\in N^{\alpha}$, and for all $x\in N$,
$\alpha_{t}(vxv^{*})=v\alpha'_{t}(x)v^{*}.$
Let $p=vv^{*}\in N^\alpha$, and denote by $\hat{\alpha}$
the restriction of $\alpha$ to $pNp$.
Let $H=H_{0}\oplus H_{1}$ be as above. By replacing $p\in\Phi(H_{0})$
with a subprojection and modifying $v$, we may assume that $\tau(p)=1/n$ for some $n$. Then we can find partial isometries
$v_{i}\in\Phi(H_{0})$, $i \in \{1,...,n\}$, such that $v_{i}v_{i}^{*}=p$
for all $i$ and $\sum_{i}v_{i}^{*}v_{i}=1.$
Let $\{s(h):h\in H_{1}\}$ be a semicircular family of generators
for $\Phi(H_{1})$. Then $N$ is generated by $\Phi(H_{0})\cup\{s(h):h\in H_{1}\}$,
so that $pNp$ is generated by $p\Phi(H_{0})p$ and $\{v_{i}s(h)v_{j}^{*}:1\leq i,j\leq n,h\in H_{1}\}$
\cite[Lemma 5.2.1]{FRV}.
For $i,j\in \{1,...,n\}$ and $h\in H_{1}$, denote $S_{ij}(h)=\operatorname{Re}(n^{1/2}v_{i}s(h)v_{j}^{*})$,
$S'_{ij}(h)=\operatorname{Im}(n^{1/2}v_{i}s(h)v_{j}^{*})$. The
normalization is chosen so that in the compressed $W^{*}$-probability
space $(pNp,n\tau|_{pNp})$ these elements form a semicircular
family \cite[Prop. 5.1.7]{FRV}. So, all together, $pNp$ is generated
$*$-freely by $p\Phi(H_{0})p$ and the semicircular family $\{S_{ij}(h):h\in H_{1},1\leq i,j\leq n\}\cup\{S'_{ij}(h):h\in H_{1},1\leq i<j\leq n\}$.
The action of the restriction $\hat{\alpha}_{t}$ of $\alpha_{t}$
to $pNp$ is given, on these generators, as follows: $\hat{\alpha}_{t}(x)=x$
for $x\in p\Phi(H_{0})p$; $\hat{\alpha}_{t}(S_{ij}(h))=S_{ij}(\pi_t|_{H_{1}}(h))$,
$\hat{\alpha}_{t}(S'_{ij}(h))=S_{ij}'(\pi_t|_{H_{1}}(h))$. From this we see
that $\hat{\alpha}_{t}$ is once again a Bogoljubov automorphism but
corresponding to the representation $\mathbf{1}^{\oplus\infty}\oplus(\pi_{1}^{\oplus\infty})^{\oplus n^{2}}\cong\pi$.
Since by assumption $\pi_{1}\cong\pi_{1}\otimes\pi_{1}$, also $\pi\cong\pi\otimes\pi$
and so conjugacy of $\hat{\alpha}_{t}$ and $\hat{\alpha}'_{t}$ implies
equality of measure classes $\mathscr{C}_{\pi}$ and $\mathscr{C}_{\pi'}$
and thus of $\mathscr{C}_{\pi_{1}}$ and $\mathscr{C}_{\pi_{1}'}$.
\end{proof}
\begin{bibdiv}
\begin{biblist}
\bib{Classification}{article}{
title={Classification of a Family of Non-Almost Periodic Free Araki-Woods Factors},
author={Houdayer, Cyril},
author={Shlyakhtenko, Dimitri},
author={Stefaan Vaes},
journal={Journal of the European Mathematical Society},
year={to appear}
eprint={arXiv:1605.06057 [math.OA]}
}
\bib{BO}{book}{
title={C*-algebras and Finite-dimensional Approximations},
author={Brown, Nathanial Patrick},
author = {Ozawa, Narutaka},
volume={88},
year={2008},
publisher={American Mathematical Society}
}
\bib{A-valued}{article}{
title={A-valued Semicircular Systems},
author={Shlyakhtenko, Dimitri},
journal={Journal of Functional Analysis},
volume={166},
number={1},
pages={1--47},
year={1999},
publisher={Academic Press}
}
\bib{Thin}{article}{
title={Thin {II$_1$} factors with no Cartan subalgebras},
author={Krogager, Anna Sofie},
author={Vaes, Stefaan},
journal={Kyoto Journal of Mathematics},
volume={59},
number={4},
pages={815-867},
year={2019}
eprint={arXiv:1611.02138 [math.OA]}
}
\bib{Rigidity}{article}{
title={Rigidity of Free Product von Neumann Algebras},
author={Houdayer, Cyril},
author={Ueda, Yoshimichi},
journal={Compositio Mathematica},
volume={152},
number={12},
pages={2461--2492},
year={2016},
publisher={London Mathematical Society}
eprint={arXiv:1507.02157 [math.OA]}
}
\bib{FRV}{book}{
title={Free Random Variables},
author={Voiculescu, Dan-Virgil},
author={Dykema, Ken},
author={Nica, Alexandru},
number={1},
year={1992},
publisher={American Mathematical Society}
}
\end{biblist}
\end{bibdiv}
\end{document} |
\begin{document}
\title
{Markov Infinitely-Divisible Stationary\\ Time-Reversible Integer-Valued
Processes}
\author{Robert L. Wolpert$^{a*}$~\& Lawrence D. Brown$^b$\\
{ $^a$Department of Statistical Science}\\
{ Duke University, Durham NC 27708-0251, USA}\\
{ $^b$Deceased, 2017-02-21}\\ {}}
\date{\today}
\maketitle
\renewcommand{{\bar a}stractname}{Summary}
\begin{abstract}
We prove a complete class theorem that characterizes \emph{all}
stationary time reversible Markov processes whose finite dimensional
marginal distributions (of all orders) are infinitely divisible. Aside
from two degenerate cases (iid and constant), in both discrete and
continuous time every such process with full support is a branching
process with Poisson or Negative Binomial marginal univariate
distributions and a specific bivariate distribution at pairs of times.
As a corollary, we prove that every nondegenerate stationary integer
valued processes constructed by the Markov thinning process fails to have
infinitely divisible multivariate marginal distributions, except for the
Poisson. These results offer guidance to anyone modeling integer-valued
Markov data exhibiting autocorrelation.
\par
\textbf{Key Words:}
Decomposable; Markov branching process; negative binomial; negative
trinomial; time reversible.
\end{abstract}
\section{Introduction}\label{s:intro}
Many applications feature autocorrelated count data $X_t$ at discrete times
$t$. A number of authors have constructed and studied stationary stochastic
processes $X_t$ whose one-dimensional marginal distributions come from an
arbitrary infinitely-divisible distribution family $\{\mu^{\theta}\}$, such
as the Poisson $\Po(\theta)$ or negative binomial $\NB(\theta,p)$, and that
are ``$\AR1$-like'' in the sense that their autocorrelation function is
${\textsf{Corr}}[X_s,X_t] =\rho^{|s-t|}$ for some $\rho\in (0,1)$ \citep {Lewi:1983,
Lewi:McKe:Hugu:1989, McKe:1988, AlOs:Alza:1987, Joe:1996}. The most common
approach is to build a time-reversible Markov process using \emph{thinning},
in which the process at any two consecutive times may be written in the form
\[ X_{t-1}=\xi_t+\eta_t \qquad X_t=\xi_t+\zeta_t \] with $\xi_t$, $\eta_t$,
and $\zeta_t$ all independent and from the same infinitely-divisible family
(see \Sec{ss:thin} below for details). A second construction of a stationary
time-reversible process with the same one-dimensional marginal distributions
and autocorrelation function, with the feature that its finite-dimensional
marginal distributions of all orders are infinitely-divisible, is to set
$X_t:={\mathcal{N}}(G_t)$ for a \emph {random measure} ${\mathcal{N}}$
\iftrue
on some measure space $(E,{\mathcal{E}},m)$ that assigns independent
infinitely-divisible random variables ${\mathcal{N}}(A_i)\sim\mu^{\theta_i}$ to
disjoint sets $A_i\in{\mathcal{E}}$ of measure $\theta_i=m(A_i)$, and a family of sets
$\{G_t\}\subset{\mathcal{E}}$ whose intersections have measure $m\big(G_s\cap
G_t\big)=\theta \rho^{|s-t|}$ \else on some space that assigns independent
infinitely-divisible random variables ${\mathcal{N}}(A_i)\sim\mu^{|A_i|}$ to disjoint
sets $A_i$ of measure $|A_i|$, and a family of sets $\{G_t\}$ whose
intersections have measure $|G_s\cap G_t| =\theta \rho^{|s-t|}$ \fi (see
\Sec{ss:meas}).
For the normal distribution $X_t\sim\No(\mu,\sigma^2)$, these two
constructions both yield the usual Gaussian $\AR1$ process. The two
constructions also yield identical processes for the Poisson $X_t\sim
\Po(\theta)$ distribution, but they differ for all other nonnegative
integer-valued infinitely-divisible distributions. For each nonnegative
integer-valued infinitely-divisible marginal distribution except the Poisson,
the process constructed by thinning does not have infinitely-divisible
marginal distributions of all orders (Thm{t:thin}, \Sec{ss:thm}), and the
process constructed using random measures does not have the Markov property
(Thm{t:meas}, \Sec{ss:thm}). Thus none of these is completely satisfactory
for modeling autocorrelated count data with heavier tails than the Poisson
distribution.
In the present manuscript we construct and characterize every process that is
Markov, infinitely-divisible, stationary, and time-reversible with
non-negative integer values. The formal characterization is contained in the
statement of Thm{t:struc} in \Sec {ss:thm}, which follows necessary
definitions and the investigation of special cases needed to establish the
general result.
\subsection{Thinning Process}\label{ss:thin}
Any univariate infinitely-divisible (ID) distribution $\mu(dx)$ on ${\mathbb{R}}^1$
is $\mu^1$ the for a convolution semigroup $\{\mu^\theta:~\theta\ge0\}$ and,
for $0<\theta<\infty$ and $0<\rho<1$, determines uniquely a ``thinning
distribution'' $\mu^\theta_\rho(dy\mid x)$ of $Y$ conditional on the sum
$X=Y+Z$ of independent $Y\sim\mu^{\rho\theta}$ and $Z\sim\mu^{(1{-}r)h \theta}$.
This thinning distribution determines a unique stationary time-reversible
Markov process with one-step transition probability distribution given by the
convolution
\[ \P[X_{t+1}\in A\mid {\mathcal{F}}_t ]
= \int_{\xi+\zeta\in A}
\mu^{(1{-}r)h\theta}(d\zeta)~\mu^\theta_\rho (d\xi\mid X_t)
\]
for Borel sets $A\subset{\mathbb{R}}$, where ${\mathcal{F}}_t=\sigma\{X_s:~s\le t\}$ is the
minimal filtration. By induction the auto-correlation is ${\textsf{Corr}} (X_s,X_t)
=\rho^{|s-t|}$ for square-integrable $X_t$. The process can be constructed
beginning at any $t_0\in{\mathbb{Z}}$ by setting
\begin{subequations}\label{e:thin}
\begin{align}
X_{t_0}&\sim \mu^\theta(dx)\label{e:thin1}\\
\xi_t &\sim \mu^\theta_\rho (d\xi\mid x) \text{ with }
x=\begin{cases} X_{t-1}& \text{if }t>t_0\\
X_{t+1}&\text{if } t<t_0\end{cases}\label{e:thin2}\\
X_t &:= \xi_t+\zeta_t\qquad\text{for }
\zeta_t\sim \mu^{\theta(1{-}r)h}(d\zeta).\label{e:thin3}
\end{align}
\end{subequations}
Time-reversibility and hence the lack of dependence of this definition on the
choice of $t_0$ follows from the argument presented in the proof of
Thm{t:thin} in \Sec{ss:thm} below.
\subsubsection{Thinning Example 1: Poisson}\label{sss:pth}
For Poisson-distributed $X_t\sim\mu^\theta=\Po(\theta)$ with mean $\theta>0$,
for example, the thinning recursion step for $0<\rho<1$ and $t>t_0$ can be
written
\begin{align*}
X_{t} &= \xi_t + \zeta_t \text{\quad for independent:}\\
\xi_t &\sim \Bi\big(X_{t-1}, \rho\big), \qquad
\zeta_t \sim \Po\big(\theta(1{-}r)h\big)
\end{align*}
and hence the joint generating function at two consecutive times is
\begin{alignat*}3
\phi(s,z) &= {\textsf{E}}\Big[ s^{X_{t-1}} z^{X_t}\Big] &
&= \exp\Big\{(s+z-2)\theta(1{-}r)h + (s\,z-1)\theta\rho\Big\}
.
\end{alignat*}
This was called the ``Poisson $\mathrm{AR}(1)$ Process'' by
McKenzie \citep{McKe:1985} and has been studied by many other
authors since its introduction.
\subsubsection{Thinning Example 2: Negative Binomial}
In the thinning process applied to the Negative Binomial $X_t\sim \mu^\theta
=\NB(\theta,p)$ distribution with mean $\theta(1{-}p)/p$, recursion for $t>t_0$
takes the form
\begin{align}
X_{t} &= \xi_t + \zeta_t \text{\quad for independent:}\notag\\
\xi_t &\sim {\mathsf{BB}}\big(X_{t-1};~ \theta\rho,~\theta(1{-}r)h\big), \qquad
\zeta_t \sim \NB\big(\theta(1{-}r)h,~p\big)\notag
\intertext{for beta-binomial distributed $\xi_t \sim {\mathsf{BB}}
(n;\alpha,\beta)$ \citep[see] [\S2.2] {John:Kemp:Kotz:2005} with
$n=X_{t-1}$, $\alpha=\theta \rho$, and $\beta= \theta (1{-}r)h$, and
negative binomial $\zeta_t \sim \NB\big(\theta (1{-}r)h,p\big)$. Thus the
joint generating function is}
\phi(s,z) &= {\textsf{E}}\Big[ s^{X_{t-1}} z^{X_t}\Big]\notag\\
&= p^{\theta(2-\rho)} (1-q\,s)^{-\theta(1{-}r)h}\, (1-q\,z)^{-\theta(1{-}r)h}\,
(1-q\,s\,z)^{-\theta \rho}.\label{e:phi-sz-nbt}
\end{align}
From this one can compute the conditional generating function
\[
\phi(z\mid x)={\textsf{E}}\bet{z^{X_{t}}\mid X_{t-1}=x}
= \cet{\frac{p}{1-qz}}^{\theta(1{-}r)h}~
{}_2F_1(\theta\rho, -x; \theta; 1-z)
\]
where $_2F_1(a,b;c;z)$ denotes Gauss' hypergeometric function \citep [\S15]
{Abra:Steg:1964} and, from this (for comparison below),
\begin{align}
\P[ X_{t-1}=0, X_{t+1}=0\mid X_t=2] &=
[p^{\theta(1{-}r)h}~{}_2F_1(\theta\rho, -x; \theta; 1)]^2\notag\\
&= \bet{p^{\theta(1{-}r)h} (1{-}r)h}^2
\bet{\frac{1+\theta(1{-}r)h}{1+\theta}}^2.\label{e:nbt020}
\end{align}
This process, as we will see below in Thm{t:thin}, is Markov, stationary,
and time-reversible, with infinitely-divisible one-dimensional marginal
distributions $X_t\sim\NB(\theta,p)$, but the joint marginal distributions at
three or more consecutive times are not ID. It appears to have been
introduced by Joe
\citep[\textit{p.}\thinspace665] {Joe:1996}.
\subsection{Random Measure Process}\label{ss:meas}
Another approach to the construction of processes with specified univariate
marginal stationary distribution $\mu^\theta(dx)$ is to set $X_t := {\mathcal{N}}(G_t)$
for a \emph {random measure} ${\mathcal{N}}$ and a class of sets $\set{G_t}$,
as in \citep [\S3.3, 4.4] {Wolp:Taqq:2005}. We begin with a countably
additive random measure ${\mathcal{N}}(dx\,dy)$ that assigns independent random
variables ${\mathcal{N}}(A_i)\sim\mu^{|A_i|}$ to disjoint Borel sets $A_i\in{\mathcal{B}}
({\mathbb{R}}^2)$ of finite area $|A_i|$ (this is possible by the Kolmogorov
consistency conditions), and a collection of sets
\[ G_t :=\set{(x,y):~x\in{\mathbb{R}}, ~0 \le y < \theta\lambda~e^{-2\lambda
|t-x|}}\]
\begin{figure}
\caption{\label{f:Gt}
\label{f:Gt}
\end{figure}
(shown in \Fig{f:Gt}) whose intersections satisfy $|G_s\cap G_t| =
\theta e^{-\lambda|s-t|}$. For $t\in{\mathbb{Z}}$, set
\iffalse
\begin{subequations}\label{e:meas}
\begin{align}
X_t &:= {\mathcal{N}}(G_t)\label{e:meas.X}
\intertext{for the set}
G_t &:=\set{(x,y):~x\in{\mathbb{R}}, ~0 \le y < \theta\lambda~e^{-2\lambda
|t-x|}}\label{e:meas.G}
\end{align}\end{subequations}
\else
\begin{equation}\label{e:meas}
X_t := {\mathcal{N}}(G_t).
\end{equation}
\fi
For any $n$ times $t_1<t_2<\dots<t_n$ the sets $\set {G_{t_i}}$ partition
${\mathbb{R}}^2$ into $n(n+1)/2$ sets of finite area (and one with infinite area,
$(\cup G_{t_i})^c$), so each $X_{t_i}$ can be written as the sum of some
subset of $n(n+1)/2$ independent random variables. In particular, any $n=2$
variables $X_s$ and $X_t$ can be written as
\[ X_s={\mathcal{N}}(G_s\backslash G_t)+{\mathcal{N}}(G_s \cap G_t),\qquad
X_t={\mathcal{N}}(G_t\backslash G_s)+{\mathcal{N}}(G_s \cap G_t)
\]
just as in the thinning approach, so both 1-dimensional and 2-dimensional
marginal distributions for the random measure process coincide with those
for the thinning process of \Sec{ss:thin}.
Evidently the process $X_t$ constructed from this random measure is
stationary, time-reversible and infinitely divisible in the strong sense that
all finite-dimensional marginal distributions are ID. Although the 1- and
2-dimensional marginal distributions of this process coincide with those of
the thinning process, the $k$-dimensional marginals may differ for $k\ge3$,
so this process cannot be Markov. We will see in Thm{t:meas} below that the
only nonnegative integer-valued distribution for which it is Markov is the
Poisson.
\subsubsection{Random Measure Example 1: Poisson}\label{sss:prm}
The conditional distribution of $X_{t_n}={\mathcal{N}}(G_{t_n})$ given $\{X_{t_j}:
~j<n\}$ can be written as the sum of $n$ independent terms, $(n-1)$ of them
with binomial distributions (all with the same probability parameter
$p=\rho^{|t_n-t_{n-1}|}$, and with size parameters that sum to $X_{t_{n-1}}$)
and one with a Poisson distribution (with mean $\theta(1-\rho^
{|t_n-t_{n-1}|}$). It follows by induction that the random-measure Poisson
process is identical in distribution to the thinning Poisson process of
\Sec{sss:pth}.
\subsubsection{Random Measure Example 2: Negative Binomial}
The random variables $X_1$, $X_2$, $X_3$ for the random measure process built
on the Negative Binomial distribution $X_t\sim\NB(\theta,p)$ with
autocorrelation $\rho\in(0,1)$ can be written as sums
\[ X_1 = \zeta_1+\zeta_{12}+\zeta_{123} \qquad
X_2 = \zeta_2+\zeta_{12}+\zeta_{23}+\zeta_{123} \qquad
X_3 = \zeta_3+\zeta_{23}+\zeta_{123}
\]
of six independent negative binomial random variables
$\zeta_s\sim\NB(\theta_s, p)$ with shape parameters
\[ \theta_1=\theta_3=\theta(1{-}r)h,\qquad
\theta_2=\theta(1{-}r)h^2,\qquad
\theta_{12}=\theta_{23}=\theta\rho(1{-}r)h,\qquad
\theta_{123}=\theta\rho^2
\]
(each $\zeta_s={\mathcal{N}}\big(\cap_{t\in s} G_t\big)$ and $\theta_s=|\cap_{t\in s}
G_t|$ in \Fig{f:Gt}). It follows that the conditional probability
\begin{align}
\P[ X_1=0,~X_3=0\mid X_2=2]
&= \P[\zeta_1=\zeta_{12}=\zeta_{123}=\zeta_{23}=\zeta_3=0 \mid
\zeta_2+\zeta_{12}+\zeta_{23}+\zeta_{123}=2]\notag\\
&=\frac{\P[\zeta_2=2, \text{ all other } \zeta_s=0]} {\P[X_2=2]}\notag\\
&= \bet{p^{\theta(1{-}r)h}(1{-}r)h}^2{\frac{1+\theta(1{-}r)h^2}{1+\theta}}\label{e:nbrm020}
\end{align}
differs from that of the thinning negative binomial process in {\textsf{E}}qn{e:nbt020}
for all $\theta>0$ and $\rho>0$. Thus this process is stationary,
time-reversible, and has infinitely-divisible marginal distributions of all
orders, but it cannot be Markov since its 2-dimensional marginal
distributions coincide with those of the Markov thinning process but its
3-dimensional marginal distributions do not.
In \Sec {s:2sol} of this paper we characterize every discrete-time process
that is Markov, Infinitely-divisible, Stationary, and Time-reversible with
non-negative Integer values (\emph{MISTI} for short). In \Sec {s:simm} we
first present the necessary definitions and preliminary results; in \Sec
{s:cont} we extend the results to continuous time, with discussion in \Sec
{s:disc}.
\section{MISTI Processes}\label{s:simm}
A real-valued stochastic process $X_t$ indexed by $t\in{\mathbb{Z}}$ is
\emph{stationary} if each finite-dimensional marginal distribution
\[ {\mu_\sT}(B):=\P\bet{ X_T\in B} \]
\begin{subequations}\label{e:props}
satisfies
\begin{equation}
{\mu_\sT}(B)=\mu_{s+{\scriptscriptstyle T}}(B)\label{e:stat}
\end{equation}
for each set $T\subset{\mathbb{Z}}$ of cardinality $|T|<\infty$, Borel set
$B\in{\mathcal{B}} ({\mathbb{R}}^{|T|})$, and $s\in{\mathbb{Z}}$, where as usual ``$s+T$'' denotes
$\{(s+t):~t\inT\}$. A stationary process is \emph{time-reversible} if also
\begin{equation}
{\mu_\sT}(B)=\mu_{-{\scriptscriptstyle T}}(B)\label{e:trev}
\end{equation}
(where ``$-T$'' is $\{-t:~t\in T\}$) and \emph{Markov} if for every
$t\in{\mathbb{Z}}$ and finite $T\subset\{s\in{\mathbb{Z}}:~s\ge t\}$,
\begin{equation}
\P[ X_T\in B\mid {\mathcal{F}}_t] = \P[ X_T\in B\mid X_t] \label{e:mark}
\end{equation}
for all $B\in{\mathcal{B}} ({\mathbb{R}}^{|T|})$, where ${\mathcal{F}}_t:=\sigma\{X_s:~s\le t\}$. The
process $X_t$ is Infinitely Divisible (ID) or, more specifically,
\emph{multivariate} infinitely divisible (MVID) if each ${\mu_\sT}$ is the $n$-fold
convolution of some other distribution $\mu_{\scriptscriptstyle T}^{(1/n)}$ for each $n\in{\mathbb{N}}$.
This is more restrictive than requiring only that the one-dimensional
marginal distributions be ID and, for integer-valued processes that satisfy
\begin{equation}
{\mu_\sT}({\mathbb{Z}}^{|T|})=1, \label{e:integ}
\end{equation}
it is equivalent by the L\'evy-Khinchine formula \cite
[\textit{p.}\thinspace74] {roge:will:2000a} to the condition that each ${\mu_\sT}$ have
characteristic function of the form
\begin{equation}
\int_{{\mathbb{R}}^{|T|}} e^{i\omega'x}\,{\mu_\sT}(dx)
= \exp\set{\int_{{\mathbb{Z}}^{|T|}} \big(e^{i\omega'u}-1\big)\,
\nu_{\scriptscriptstyle T}(du)},\qquad \omega\in{\mathbb{R}}^{|T|} \label{e:mvid}
\end{equation}
\end{subequations}
for some finite measure $\nu_{\scriptscriptstyle T}$ on ${\mathcal{B}} ({\mathbb{Z}}^{|T|})$. Call a process $X_t$
or its distributions ${\mu_\sT}(du)$ \emph{MISTI} if it is
Markov,
nonnegative Integer-valued,
Stationary,
Time-reversible,
and
Infinitely divisible,
\textit{i.e.{}}, satisfies {\textsf{E}}qss {e:stat} {e:mvid}. We now turn to the problem of
characterizing all MISTI distributions.
\subsection{Three-dimensional Marginals}\label{ss:3dim}
By stationarity and the Markov property all MISTI finite-dimensional
distributions ${\mu_\sT}(du)$ are determined completely by the marginal
distribution for $X_t$ at two consecutive times; to exploit the MVID property
we will study the three-dimensional marginal distribution for $X_t$ at any
set $T$ of ${|T|}=3$ consecutive times--- say, $T=\{1,2,3\}$. By {\textsf{E}}qn{e:mvid}
we can represent $X_{\{1,2,3\}}$ in the form
\[
X_1 = \sum i \NNN i++\qquad
X_2 = \sum j \NNN +j+\qquad
X_3 = \sum k \NNN ++k
\]
for independent Poisson-distributed random variables
\[ \NNN ijk \mathrel{\mathop{\sim}\limits^{\mathrm{ind}}} \Po(\lll ijk) \] with means $\lll ijk := \nu(\{(i,j,k)\})$;
here and hereafter, a subscript ``$+$'' indicates summation over the entire
range of that index--- ${\mathbb{N}}_0=\set{0,2\dots}$ for $\set{\NNN ijk}$ and
$\set{\lll ijk}$, ${\mathbb{N}}=\set{1,2,\dots}$ for $\set{\th j}$. The sums $\th
j:=\lll+j+$ for $j\ge1$ characterize the univariate marginal distribution of
each $X_t$--- for example, through the probability generating function (pgf)
\[ \varphi(\zz2) := {\textsf{E}}[ \zz2^{X_t} ]
= \exp\left[\sum\nolimits_{j\ge1} \big(\zz2^j-1\big)\th j\right].
\]
To avoid trivial technicalities we will assume that $0 < \P[X_t=1]=\varphi'(0)
=\th1e^{-\th+}$, \textit{i.e.{}}, $\th1>0$. Now set $\rr i:=\lll i1+/\th1$, and for
later use define functions:
\begin{equation}\label{e:pP}
\psi_j(\zz1,\zz3) := \sum_{i,k\ge0} \zz1^i\zz3^k\lll ijk\qquad\qquad
p(\zz1) := \psi_1(s,1)/\th1=\sum_{i\ge0} \zz1^i \,\rr i\qquad\qquad
P(\zz2) := \sum_{j\ge1} \zz2^j \,\th j.
\end{equation}
Since $\rr i$ and $\th j$ are nonnegative and summable (by {\textsf{E}}qns
{e:integ}{e:mvid}), $p(\zz1)$ and $P(\zz2)$ are analytic on the open unit
ball ${\mathbb{U}} \subset {\mathbb{C}}$ and continuous on its closure. Similarly, since $\lll
ijk$ is summable, each $\psi_j(\zz1,\zz3)$ is analytic on ${\mathbb{U}}^2$ and
continuous on its closure. Note $\psi_j(1,1)=\th j$, $p(0)=\rr0$ and
$p(1)=1$, while $P(0)=0$ and $P(1)=\th+$; also $\varphi(\zz2) =
\exp\set{P(\zz2)-\th+}$. Each $\psi_j(s,t)=\psi_j(t,s)$ is symmetric by
{\textsf{E}}qn{e:trev}, as are the conditional probability generating functions:
\begin{equation}\notag
\vpz z := {\textsf{E}}\big[\zz1^{X_1}\zz3^{X_3}\mid X_2=z\big].
\end{equation}
\subsubsection{Conditioning on $X_2=0$}\label{sss:x2=0}
By the Markov property {\textsf{E}}qn{e:mark}, $X_1$ and $X_3$ must be conditionally
independent given $X_2$, so the conditional probability generating function
must factor:
\begin{align}
\vpz0 &:= {\textsf{E}}\big[\zz1^{X_1}\zz3^{X_3}\mid X_2=0\big]
= {\textsf{E}}\big[\zz1^{\sum_{i\ge0} i\NNN i0+}~
\zz3^{\sum_{k\ge0} k\NNN+0k}\big]\notag\\
&= \exp\Big\{\sum_{i,k\ge0}
\big(\zz1^i\zz3^k-1\big)\lll i0k\Big\}\notag\\
&\equiv \varphi(\zz1,1\mid0)~ \varphi(1,\zz3\mid0).\label{e:fac0}
\intertext{Taking logarithms,}
{\sum\big(\zz1^i\zz3^k-1\big)\lll i0k}
&\equiv {\sum\big(\zz1^i-1\big)\lll i0k}
+{\sum\big(\zz3^k-1\big)\lll i0k}\notag
\intertext{or, for all $\zz1$ and $\zz3$ in the unit ball in ${\mathbb{C}}$,}
0&\equiv\sum(\zz1^i-1)(\zz3^k-1\big)\lll i0k.\label{e:x2=0}
\intertext{Thus $\lll i0k=0$ whenever both $i>0$ and $k>0$ and, by symmetry,}
\varphi(1,z\mid0)=\varphi(z,1\mid0)
&=\exp\set{\sum\nolimits_{i\ge0} (z^i-1)\lll i00}.\notag
\end{align}
\subsubsection{Conditioning on $X_2=1$}\label{sss:x2=1}
Similarly
\begin{align}
\vpz1 := {\textsf{E}}\big[\zz1^{X_1}\zz3^{X_3}\mid X_2=1\big]
&= {\textsf{E}}\big[\zz1^{\sum_{i\ge0} i(\NNN i0+ + \NNN i1+)}\quad
\zz3^{\sum_{k\ge0} k(\NNN +0k + \NNN+1k)} \mid
\NNN+1+=1\big]\notag\\
&= \vpz0 {\textsf{E}}\big[\zz1^{\sum_{i\ge0} i\NNN i1+}\quad
\zz3^{\sum_{k\ge0} k\NNN +1k} \mid
\NNN+1+=1\big]\notag\\
&= \vpz0 \set{\sum_{i,k\ge0} \zz1^i\zz3^k\,\big[\lll i1k/\lll
+1+\big]}\notag
\intertext{since $\set{\NNN i1k}$ is conditionally multinomial given $\NNN
+1+$ and independent of $\set{\NNN i0k}$. By the Markov property
this too must factor, as
$\varphi(\zz1,\zz3\mid1)=\varphi(\zz1,1\mid1)
\,\varphi(1,\zz3\mid1)$, so by {\textsf{E}}qn{e:fac0}}
\th1 \set{\sum\nolimits_{i,k\ge0} \zz1^i\zz3^k\lll i1k}
&= \set{\sum\nolimits_{i\ge0} \zz1^i \lll i1+}
\set{\sum\nolimits_{k\ge0} \zz3^k \lll +1k}\notag
\intertext{or, since $\lll i1k=\lll k1i$ by {\textsf{E}}qns{e:trev}{e:pP},}
\psi_1(\zz1,\zz3) &:= \sum\nolimits_{i,k\ge0} \zz1^i\zz3^k \lll i1k
= \th1 p(\zz1)\,p(\zz3),\notag\\
\vpz1 &= \vpz0\, p(\zz1)\, p(\zz3).\notag
\end{align}
\subsubsection{Conditioning on $X_2=2$}\label{sss:x2=2}
The event $\set{X_2=2}$ for $X_2:=\sum_{j\ge1} j\NNN+j+$ can happen in two ways:
either $\NNN+1+ =2$ and each $\NNN+j+=0$ for $j\ge2$, or $\NNN+2+=1$ and
$\NNN+j+=0$ for $j=1$ and $j\ge3$, with $\NNN+0+$ unrestricted in each case.
These two events have probabilities $(\th1^2/2)e^{-\th+}$ and
$(\th2)e^{-\th+}$, respectively, so the joint generating function for
$\{X_1,X_3\}$ given $X_2=2$ is
\begin{align}
\vpz2 &:= {\textsf{E}}\big[\zz1^{X_1}\zz3^{X_3}\mid X_2=2\big]\notag\\
&= {\textsf{E}}\big[\zz1^{\sum_{i\ge0} i(\NNN i0+ + \NNN i1+ + \NNN i2+)}\quad
\zz3^{\sum_{k\ge0} k(\NNN +0k + \NNN +1k + \NNN +2k)}
\mid \NNN+1++2\NNN+2+=2\big]\notag\\
&= \vpz0\set{
\frac{\th1^2/2}{\th1^2/2+\th2}
\bet{\sum_{i,k\ge0} \zz1^i\zz3^k\lll i1k/\lll+1+}^2 +
\frac{\th2}{\th1^2/2+\th2}
\bet{\sum_{i,k\ge0} \zz1^i\zz3^k\lll i2k/\lll+2+} }\notag\\
&= \frac{\vpz0}{\th1^2/2+\th2}\set{
\frac{\th1^2}{2}
\bet{\sum_{i,k\ge0} \zz1^i\zz3^k\lll i1k/\th1}^2 +
\th2 \bet{\sum_{i,k\ge0} \zz1^i\zz3^k\lll i2k/\th2} }\notag\\
&= \frac{\vpz0}{\th1^2/2+\th2}~
\set{\frac{\th1^2}2 p(\zz1)^2 p(\zz3)^2 +
\psi_2(s,t)}.\label{e:x2=2}
\end{align}
In view of {\textsf{E}}qn{e:fac0}, this will factor in the form $\varphi(\zz1,\zz3\mid2)=
\varphi(\zz1,1\mid2)\, \varphi(1,\zz3\mid2)$ as required by Markov property
{\textsf{E}}qn{e:mark} if and only if for all $s,t$ in the unit ball:
\begin{align}
\bet{\frac{\th1^2}{2}+\th2}
\bet{\frac{\th1^2}2 p(\zz1)^2 p(\zz3)^2 + \psi_2(s,t)}
&= \bet{\frac{\th1^2}2 p(\zz1)^2 + \psi_2(s,1)}
\bet{\frac{\th1^2}2 p(\zz3)^2 + \psi_2(1,t)} \notag
\end{align}
or
\begin{multline}
\frac{\th1^2}2 \Bet{\th2 p(s)^2 p(t)^2 - p(s)^2 \psi_2(1,t)
- \psi_2(s,1) p(t)^2 + \psi_2(s,t)} \\
= \Bet{ \psi_2(s,1)\,\psi_2(1,t)-\th2\psi_2(s,t) }. \notag
\end{multline}
To satisfy the ID requirement of {\textsf{E}}qn {e:mvid}, this must hold with each $\th
j$ replaced by $\th j/n$ for each integer $n\in{\mathbb{N}}$. Since the left and
right sides are homogeneous in $\theta$ of degrees $3$ and $2$ respectively,
this will only happen if each square-bracketed term vanishes identically,
\textit{i.e.{}}, if
\begin{align}
\th2 \psi_2(s,t) &\equiv \psi_2(s,1)\psi_2(1,t)\notag
\intertext{and}
0 &=
\th2 \Bet{\th2 p(s)^2 p(t)^2 - p(s)^2 \psi_2(1,t)
- \psi_2(s,1) p(t)^2} + \psi_2(s,1) \psi_2(1,t)\notag\\
&= \bet{\th2 p(s)^2-\psi_2(s,1)} \bet{\th2 p(t)^2-\psi_2(1,t)},\notag
\intertext{so}
\psi_2(s,t) & := \sum_{i,k\ge0} \zz1^i\zz3^k\lll i2k
= \th2 p(s)^2\,p(t)^2,\notag\\
\vpz2 &= \vpz0\, p(\zz1)^2 p(\zz3)^2.\notag
\end{align}
\subsubsection{Conditioning on $X_2=j$}\label{sss:x2=j}
The same argument applied recursively, using the Markov property for each
$j\ge1$ in succession, leads to:
\begin{multline} \notag
\Bet{\frac{\th1^j}{j!}+\dots+\th1\th{j-1}}
\Bet{\th j p(s)^j p(t)^j - p(s)^j\psi_j(1,t)
-\psi_j(s,1)p(t)^j + \psi_j(s,t)}\\
= \Bet{\psi_j(s,1)\psi_j(1,t)- \th j \psi_j(s,t)}
\end{multline}
so
\begin{equation}
\psi_j(s,t) := \sum_{i,k\ge0} \zz1^i\zz3^k\lll ijk
= \th j\, p(\zz1)^j p(\zz3)^j,\qquad j\ge1\label{e:key}
\end{equation}
and consequently
\begin{align}
\vpz j &= {\textsf{E}}\big[\zz1^{X_1}\zz3^{X_3}\mid X_2=j\big]
= \bet{\varphi(\zz1,1\mid0)\,
p(\zz1)^j} ~
\bet{\varphi(1,\zz3\mid0)\,
p(\zz3)^j}.\notag
\end{align}
Conditionally on $\set{X_2=j}$, $X_1$ and $X_3$ are distributed
independently, each as the sum of $j$ independent random variables with
generating function $p(s)$, plus one with generating function
$\varphi(s,1\mid0)$--- so $X_t$ is a branching process \citep {Harr:1963}
whose unconditional three-dimensional marginal distributions have generating
function:
\begin{align}
\varphi(\zz1,\zz2,\zz3) &:= {\textsf{E}}\big[\zz1^{X_1}\zz2^{X_2}\zz3^{X_3}\big]\notag\\
&= \vpz0 \sum_{j\ge0} \zz2^j p(\zz1)^j p(\zz3)^j \P[X_2=j]\notag\\
&= \vpz0 {\textsf{E}}\left[ z p(\zz1) p(\zz3)\right]^{X_2}\notag\\
&= \vpz0 \varphi\big( z p(\zz1) p(\zz3)\big)\notag\\
&= \vpz0 \exp\big[P\big( z p(\zz1) p(\zz3)\big)-\th+\big].\label{e:gen3}
\end{align}
See \Secs {ss:pops} {s:disc} for further development of this branching
process representation.
\subsection{Stationarity}\label{ss:sta}
Without loss of generality we may take $\lll 000 = 0$. By {\textsf{E}}qn{e:key} with
$\zz1=0$ and $\zz3=1$ we have $\lll 0j+=\th j\rr0^j$; by {\textsf{E}}qn{e:x2=0} we have
$\lll i00 = \lll i0+$. By time-reversibility we conclude that $\lll i00 = 0$
for $i=0$ and, for $i\ge1$,
\begin{equation}
\lll i00 = \th i \rr0^i \label{e:i00}.
\end{equation}
Now we can evaluate
\[
\varphi(\zz1,\zz3\mid0) = \exp\set{P(\zz1\,\rr0)+P(\zz3\,\rr0)-2P(\rr0)}
\]
and, from this and {\textsf{E}}qn{e:gen3}, evaluate the joint generating function for
$X_{\{1,2,3\}}$ as:
\begin{align}
\varphi(\zz1,\zz2,\zz3)
&=\exp\set{P\big( z\, p(\zz1) p(\zz3)\big)-\th+
+P(\zz1\,\rr0)+P(\zz3\,\rr0)-2P(\rr0)},\qquad j\ge1 \label{e:phi}
\intertext{and so that for $X_{\{1,2\}}$ as:}
\varphi(\zz1,\zz2,1)
&=\exp\set{P\big( z\, p(\zz1)\big)-\th+
+ P(\zz1\,\rr0)-P(\rr0)}. \label{e:x1x2}
\end{align}
Now consider {\textsf{E}}qn{e:key} with $\zz3=1$,
\begin{align}
\sum_{i\ge0} \zz1^i\lll ij+ &= \th j\, p(\zz1)^j.\label{e:sz}
\intertext{It follows first for $j=1$ and then for $i=1$ that}
\lll i1+ &= \th1 \rr i& i&\ge1\notag\\
\lll 1j+ &= \th j [ j \rr0^{j-1} \rr1 ]&j&\ge1\notag
\intertext{so again by time reversibility with $i=j$, since $\th1>0$, we have}
\rr j &= \th j [j\, \rr0^{j-1} \rr1]/ \th1\qquad j\ge1. \label{e:rj}
\end{align}
Thus $\rr0$, $\rr1$, and $\{\th j\}$ determine all the $\{\rr j\}$ and so all
the $\{\lll ijk\}$ by {\textsf{E}}qns {e:key} {e:i00} and hence the joint distribution
of $\{X_t\}$.
Now consider {\textsf{E}}qn {e:sz} first for $j=2$ and then $i=2$:
\begin{align}
\sum_{i\ge0} \zz1^i \lll ij+ &= \th j\,\left[\sum\nolimits_{i\ge0} \zz1^i \rr
i\right]^j\notag \\
\lll i2+ &= \th2 \sum_{k=0}^i \rr k \rr {i-k} & i&\ge2\notag\\
\lll 2j+ &= \th j \left[
j \rr0^{j-1} \rr2 +
\binom{j}{2} \rr0^{j-2} \rr1^2 \right] & j&\ge2\notag
\end{align}
Equating these for $i=j\ge2$ (by time-reversibility) and applying {\textsf{E}}qn{e:rj}
for $0< k<i$ (the cases $k=0$ and $k=i$ need to be handled separately),
\begin{equation} \label{e:thi}
\rr0^{i-2} \rr1^2 \bet{
\th2 \sum_{0<k<i}\th k\th{i-k} k(i-k)-\th i\frac{i(i-1)}2 \th1^2}=0.
\end{equation}
\section{The Solutions}\label{s:2sol}
{\textsf{E}}qn{e:thi} holds for all $i\ge2$ if $\rr0=0$ or $\rr1=0$, leaving $\rr j=0$
by {\textsf{E}}qn{e:rj} for all $j\ge2$, hence $\rr0+\rr1=1$ and $\set{\th j}$ is
restricted only by the conditions $\th1>0$ and $\th+<\infty$.
\subsection{The Constant Case}\label{ss:con}
The case $\rr0=0$ leads to $\rr1=1$ and $\rr j=0$ for all $j\ne1$, so
$p(z)\equiv z$. By {\textsf{E}}qn{e:phi} the joint pgf is
\[ \varphi(\zz1,\zz2,\zz3)
=\exp\set{P( \zz1\,\zz2\,\zz3)-\th+}, \]
so $X_1=X_2=X_3$ and all $\{X_t\}$ are identical, with an arbitrary ID
distribution.
\subsection{The IID Case}\label{ss:iid}
The case $\rr1=0$ leads to $\rr0=1$ and $\rr j=0$ for all $j\ne0$ so
$p(z)\equiv1$ and
\[ \varphi(\zz1,\zz2,\zz3) =\exp\set{P(\zz1)+P(\zz2)+P(\zz3)-3\th+} \]
by {\textsf{E}}qn{e:phi}, making all $\{X_t\}$ independent, with identical but
arbitrary ID distributions.
\subsection{The Poisson Case}\label{ss:poi}
Aside from these two degenerate cases, we may assume $\rr0>0$ and $\rr1>0$,
and (by {\textsf{E}}qn{e:rj}) rewrite {\textsf{E}}qn{e:thi} in the form:
\begin{align}
\rr i&= \frac{\rr2}{\rr1^2(i-1)}\sum_{k=1}^{i-1}
\rr k \rr{i-k},\quad i\ge2, \notag
\intertext{whose unique solution for all integers $i\ge1$ (by induction) is}
\rr i&= \rr1(\rr2/\rr1)^{i-1}.\label{e:geo}
\end{align}
If $\rr2=0$, then again $\rr i=0$ for all $i\ge2$ but, by {\textsf{E}}qn{e:rj}, $\th
j=0$ for all $j\ge2$; thus $P(z)=\th1 z$ so each $X_t\sim\Po(\th1)$ has a
Poisson marginal distribution with mean $\th1=\th+$. In this case
$\rr0+\rr1=1$, $p(z)=\rr0+\rr1z$, and the two-dimensional marginals (by
{\textsf{E}}qn{e:x1x2}) of $X_1$, $X_2$ have joint pgf
\begin{align}
\varphi(\zz1,\zz2)
&= \exp\set{P\big( z\, p(\zz1)\big)-\th+
+P(\zz1\,\rr0)-P(\rr0)}\label{e:phi-sz-p}\\
&= \exp\set{\th1\rr0(s+z-2)+\th1\rr1(sz-1)},\notag
\end{align}
the bivariate Poisson distribution \citep [\S\thinspace37.2]
{John:Kotz:Bala:1997}, so $X_t$ is the familiar ``Poisson $\mathrm{AR}(1)$
Process'' of
McKenzie \citep{McKe:1985,McKe:1988} (with autocorrelation $\rho=\rr1$)
considered in \Sec {sss:pth}. Its connection with Markov branching
processes was recognized earlier
\citep{Steu:Verv:Wolf:1983}.
By {\textsf{E}}qn{e:phi-sz-p} the conditional distribution of $X_{t+1}$, given ${\mathcal{F}}_t
:=\sigma\set{ X_s:~s\le t}$, is that of the sum of $X_t$ independent
Bernoulli random variables with pgf $p(s)$ and a Poisson innovation term with
pgf $\exp\{P(\rr0 s)-P(\rr0)\}$,
so the Markov process $X_t$ may be written recursively starting at any $t_0$ as
\begin{align*}
X_{t_0} &\sim \Po(\th+)\\
X_{t} &= \xi_t+\zeta_t,\text{\quad where }
\xi_t \sim \Bi(X_{t-1}, \rr1) \text{ and }
\zeta_t \sim \Po(\th t\rr0)\\
\end{align*}
(all independent) for $t> t_0$, the thinning construction of \Sec{s:intro}
\subsection{The Negative Binomial case}\label{ss:nb}
Finally if $\rr0>0$, $\rr1>0$, and $\rr2>0$, then (by {\textsf{E}}qn{e:geo}) $\rr i=
\rr1(q\rr0)^{i-1}$ for $i\ge1$ and hence (by {\textsf{E}}qn{e:rj}) $\th j=\alpha q^j/j$
for $j\ge1$ with $q:=(1-\rr0-\rr1)/\rr0(1-\rr0)$ and $\alpha:= \th1/q$. The
condition $\th+<\infty$ entails $q<1$ and $\th+=-\alpha\log(1{-}q)$. The
1-marginal distribution is $X_t\sim\NB(\alpha,p)$ with $p:=(1{-}q)$, and the
functions $P(\cdot)$ and $p(\cdot)$ are $P(z)=-\alpha \log(1-qz)$,
$p(s)=\rr0+\rr1 s/(1-q\rr0 s)$, so the joint pgf for the 2-marginal
distribution of $X_1,X_2$ is
\begin{align}
\varphi(\zz1,\zz2)
&= \exp\set{P\big( z\, p(\zz1)\big)-\th+ +P(\zz1\,\rr0)-P(\rr0)}\notag\\
&= p^{2\alpha}[(1-q\rho)-q(1{-}r)h(\zz1+\zz2)
+q(q-\rho)\zz1\zz2]^{-\alpha}\label{e:phi-sz-nbb}
\end{align}
with one-step autocorrelation $\rho:=(1{-}\rr0)^2/\rr1$. This bivariate
distribution was introduced as the ``compound correlated bivariate
Poisson''\citep{Edwa:Gurl:1961}, but we prefer to call it the Branching
Negative Binomial distribution. In the branching formulation $X_{t}$ may
be viewed as the sum of $X_{t-1}$ iid random variables with pgf $p(s)=\rr0
+\rr1 s/(1-q\rr0 s)$ and one with pgf $\exp\set{P(s\rr0)-P(\rr0)}
=(1-q\rr0)^\alpha (1-q\rr0\,s)^{-\alpha}$. The first of these may be
viewed as $Y_t$ plus a random variable with the $\NB(Y_t,1{-}q\rr0)$
distribution, for $Y_t\sim\Bi(X_{t-1},1-\rr0)$, and the second has the
$\NB(\alpha,1{-}q\rr0)$ distribution, so a recursive updating scheme
beginning with $X_{t_0} \sim \NB(\alpha,p)$ is:
\[ X_t = Y_t+\zeta_t,\text{\quad where }
Y_t \sim \Bi(X_{t-1}, ~1{-}\rr0)\text{ and }
\zeta_t\sim \NB(\alpha+Y_t,~1{-}q\rr0).
\]
In the special case of $\rho=q$ the joint pgf simplifies to $\varphi
(\zz1,\zz2) = p^\alpha[1+q(1-\zz1-\zz2)]^{-\alpha}$ and the joint
distribution of $X_1,X_2$ reduces to the negative trinomial distribution
\citep [Ch.~36] {John:Kotz:Bala:1997} with pmf
\begin{align*}
\P[X_1=i,X_2=j] &=
\frac {\Gamma(\alpha+i+j)} {\Gamma(\alpha)~i!~j!}
\cet{\frac{1-q}{1+q}}^\alpha\,\cet{\frac{q}{1+q}}^{i+j}
\end{align*}
and simple recursion $X_t\mid X_{t-1}\sim\NB\big(\alpha+X_{t-1},
~\frac1{1+q}\big)$.
\subsection{Results}\label{ss:thm}
We have just proved:
\begin{thm}\label{t:struc}
Let $\set{X_t}$ be a Markov process indexed by $t\in{\mathbb{Z}}$ taking values in
the non-negative integers ${\mathbb{N}}_0$ that is stationary, time-reversible, has
infinitely-divisible marginal distributions of all finite orders, and
satisfies $\P[X_t=1]>0$. Then $\set{X_t}$ is one of four processes:
\begin{enumerate}
\item \label{i:const}
$X_t\equiv X_0\sim\mu_0(dx)$ for an arbitrary ID distribution $\mu_0$
on ${\mathbb{N}}_0$ with $\mu_0(\{1\})>0$;
\item \label{i:iid}
$X_t\mathrel{\mathop{\sim}\limits^{\mathrm{iid}}} \mu_0(dx)$ for an arbitrary ID distribution $\mu_0$ on
${\mathbb{N}}_0$ with $\mu_0(\{1\})>0$;
\item \label{i:po}
For some $\theta>0$ and $0<\rho<1$,
$X_t\sim\Po(\theta)$ with bivariate joint generating function
\[
{\textsf{E}}\left[\zz1^{X_1}~\zz2^{X_2}\right]
= \exp\set{\theta (1{-}r)h(\zz1-1) +
\theta (1{-}r)h(\zz2-1) +
\theta \rho(\zz1\zz2-1) }
\]
and hence correlation ${\textsf{Corr}}(X_s,X_t)= \rho^{|s-t|}$ and recursive update
\[ X_t = \xi_t+\zeta_t,\text{\quad where }
\xi_t \sim \Bi(X_{t-1}, ~\rho)\text{ and }
\zeta_t\sim \Po\big(\theta(1{-}r)h);
\]
\item \label{i:nb}
For some $\alpha>0$, $0<p<1$, and $0<\rho<1$, $X_t\sim\NB(\alpha,p)$, with
bivariate joint generating function
\[
{\textsf{E}}\left[\zz1^{X_1}~\zz2^{X_2}\right] =
p^{2\alpha}[(1-q\rho)-q(1{-}r)h(s+z)+q(q-\rho)sz]^{-\alpha}
\]
where $q=1{-}p$, and hence correlation ${\textsf{Corr}}(X_s,X_t)= \rho^{|s-t|}$ and
recursive update
\[ X_t = Y_t+\zeta_t,\text{\quad where }
Y_t \sim \Bi\big(X_{t-1}, ~\rho\,p/(1-\rho q)\big)\text{ and }
\zeta_t\sim \NB\big(\alpha+Y_t,~p/(1-\rho q)\big).
\]
\end{enumerate}
\end{thm}
Note the limiting cases of autocorrelation $\rho=1$ and $\rho=0$ in cases
\ref{i:po}., \ref{i:nb}. are subsumed by the degenerate cases
\ref{i:const}. and \ref{i:iid}., respectively. The theorem follows from
this.
From this theorem follows:
\begin{thm}\label{t:thin}
Let $\set{\mu^\theta:~\theta\ge0}$ be an ID semigroup of probability
distributions on the nonnegative integers ${\mathbb{N}}_0$ with $\mu^\theta
(\{1\})>0$. Fix $\theta>0$ and $0<\rho<1$ and let $\set{X_t}$ be the
``thinning process'' of {\textsf{E}}qn{e:thin} in \Sec{ss:thin} with the
representation
\begin{equation}\label{e:rev}
X_{t-1}=\xi_t+\eta_t \qquad X_t=\xi_t+\zeta_t
\end{equation}
for each $t\in{\mathbb{Z}}$ with independent
\[ \xi_t\sim\mu^{\rho\theta}(d\xi)\qquad
\eta_t\sim\mu^{(1{-}r)h\theta}(d\eta)\qquad
\zeta_t\sim\mu^{(1{-}r)h\theta}(d\zeta).\]
Then $X_t$ is Markov, stationary, time-reversible, and nonnegative integer
valued, but it does not have infinitely-divisible marginal distributions
of all orders unless $\{\mu^\theta\}$ is the Poisson family.
\end{thm}
\proof By construction $X_t$ is obviously Markov and stationary. The joint
distribution of the process at consecutive times is symmetric (see {\textsf{E}}qn {e:rev})
since the marginal and conditional pmfs
\[ p(x) := \mu^\theta(\{x\}),\qquad q(y\mid x) :=
\frac{\sum_z \mu^{\rho\theta}(\{z\})
~\mu^{(1{-}r)h\theta}(\{x-z\})
~\mu^{(1{-}r)h\theta}(\{y-z\})}
{\mu^{\theta}(\{x\})}
\]
of $X_t$ and $X_t\mid X_{t-1}$ satisfy the symmetric relation
\[ p(x)~q(y\mid x) = q(x\mid y)~ p(y). \]
Applying this inductively, for any $s<t$ and any $\{x_s,\cdots,x_t\}\subset
{\mathbb{N}}_0$ we find
\begin{align*}
\P[X_s=x_s,\cdots,X_t=x_t]
&= p(x_s)\hspace{4mm} q(x_{s+1}\mid x_s) \hspace{1mm}
q(x_{s+2}\mid x_{s+1})\cdots q(x_t\mid x_{t-1})\\
&= q(x_s\mid x_{s+1}) p(x_{s+1}) \hspace{1mm}
q(x_{s+2}\mid x_{s+1}) \cdots q(x_t\mid x_{t-1})\\
&=\cdots\\
&= q(x_s\mid x_{s+1}) q(x_{s+1}\mid x_{s+2})\cdots
q(x_{t-1}\mid x_t)\hspace{1mm} p(x_t),
\end{align*}
and so the distribution of $X_t$ is time-reversible. Now suppose that it is
also ID. Then by Thm{t:struc} it must be one of the four specified
processes: constant, iid, branching Poisson, or branching negative binomial.
Since $\rho<1$ it cannot be the constant $\set{X_t\equiv X_0}$ process; since
$\rho>0$ it cannot be the independent $\set{X_t\mathrel{\mathop{\sim}\limits^{\mathrm{iid}}}\mu^\theta(dx)}$ process.
The joint generating function $\phi(s,z)$ at two consecutive times for the
negative binomial thinning process, given in {\textsf{E}}qn {e:phi-sz-nbt}, differs
from that for the negative binomial branching process, given in {\textsf{E}}qn
{e:phi-sz-nbb}. The only remaining option is the Poisson branching process
of \Sec {sss:pth}.
\endproof
\begin{thm}\label{t:meas}
Let $\set{\mu^\theta:~\theta\ge0}$ be an ID semigroup of probability
distributions on the nonnegative integers ${\mathbb{N}}_0$ with $\mu^\theta
(\{1\})>0$. Fix $\theta>0$ and $0<\rho<1$ and let $\set{X_t}$ be the
``random measure process'' of {\textsf{E}}qn{e:meas} in \Sec{ss:meas}. Then $X_t$ is
ID, stationary, time-reversible, and nonnegative integer valued, but it is
not a Markov process unless $\{\mu^\theta\}$ is the Poisson family.
\end{thm}
\proof By construction $X_t$ is ID, stationary, and time-reversible; suppose
that it is also Markov. Then by Thm{t:struc} it must be one of the four
specified processes: constant, iid, branching Poisson, or branching negative
binomial.
Since $\rho<1$ it cannot be the constant $\set{X_t\equiv X_0}$ process; since
$\rho>0$ it cannot be the independent $\set{X_t\mathrel{\mathop{\sim}\limits^{\mathrm{iid}}}\mu^\theta(dx)}$ process.
The joint generating function $\phi(s,z)$ at two consecutive times for the
negative binomial random measure process coincides with that for the negative
binomial thinning process, given in {\textsf{E}}qn {e:phi-sz-nbt}, and differs from
that for the negative binomial branching process, given in {\textsf{E}}qn
{e:phi-sz-nbb}. The only remaining option is the Poisson branching process
of \Sec {sss:pth}.
\endproof
\section{Continuous Time}\label{s:cont}
Now consider ${\mathbb{N}}_0$-valued time-reversible stationary Markov processes
indexed by continuous time $t\in{\mathbb{R}}$. The restriction of any such process
to $t\in{\mathbb{Z}}$ will still be Markov, hence MISTI, so there can be at most two
non-trivial ones--- one with univariate Poisson marginal distributions, and
one with univariate Negative Binomial distributions. Both do in fact exist.
\subsection{Continuous-Time Poisson Branching Process}\label{ss:ctpoi}
Fix $\theta>0$ and $\lambda>0$ and construct a nonnegative integer-valued
Markov process with generator
\begin{subequations}\label{e:GenPoi}
\begin{align}
{\mathfrak{A}} f(x) &= \frac\partial{\partial s}
{\textsf{E}}[ f(X_s)-f(X_t)\mid X_t=x ] \Big|_{s=t}\notag\\
&= \lambda\theta \big[f(x+1)-f(x)]\big]
+ \lambda x \big[f(x-1)-f(x)\big]\label{e:GenPoi.a}
\intertext{or, less precisely but more intuitively, for all $i,j\in{\mathbb{N}}_0$
and $\epsilon}\newcommand{\hide}[1]{>0$,}
\P\big[X_{t+\epsilon}\newcommand{\hide}[1]{}=i\mid X_t=j\big] &= o(\epsilon}\newcommand{\hide}[1]{) +
\begin{cases}
\epsilon}\newcommand{\hide}[1]{\lambda\theta &i=j+1\\
1-\epsilon}\newcommand{\hide}[1]{\lambda(\theta+j) &i=j\\
\epsilon}\newcommand{\hide}[1]{\lambda j &i=j-1\\
\end{cases}\label{e:GenPoi.b}
\end{align}
\end{subequations}
$X_t$ could be described as a linear death process with immigration. In
\Sec{ss:marg} we verify that its univariate marginal distribution and
autocorrelation are
\begin{align*}
X_t& \sim\Po(\theta)\\
{\textsf{Corr}}(X_s,X_t) &= e^{-\lambda|s-t|},
\end{align*}
and its restriction to integer times $t\in{\mathbb{Z}}$ is precisely the process
described in \Sec{s:2sol} item 3, with one-step autocorrelation
$\rho=e^{-\lambda}$.
\subsection{Continuous-Time Negative Binomial Branching Process}\label{ss:ctnb}
Now fix $\theta>0$, $\lambda>0$, and $0<p<1$ and construct a nonnegative
integer-valued Markov process with generator
\begin{subequations}\label{e:GenNB}
\begin{align}
{\mathfrak{A}} f(x) &= \frac\partial{\partial s}
{\textsf{E}}[ f(X_s)-f(X_t)\mid X_t=x ] \Big|_{s=t}\notag\\
&= \frac{\lambda(\alpha+x)(1{-}p)}p \big[f(x+1)-f(x)]\big]
+ \frac{\lambda x}p \big[f(x-1)-f(x)\big]\label{e:GenNB.a}
\intertext{or, for all $i,j\in{\mathbb{N}}_0$ and $\epsilon}\newcommand{\hide}[1]{>0$,}
\P\big[X_{t+\epsilon}\newcommand{\hide}[1]{}=i\mid X_t=j\big] &= o(\epsilon}\newcommand{\hide}[1]{) +
\begin{cases}
\epsilon}\newcommand{\hide}[1]{\lambda(\alpha+j)(1{-}p)/p &i=j+1\\
1-\epsilon}\newcommand{\hide}[1]{\lambda[(\alpha+j)(1{-}p)+j]/p &i=j\\
\epsilon}\newcommand{\hide}[1]{ \lambda j/p &i=j-1,
\end{cases}\label{e:GenNB.b}
\end{align}
\end{subequations}
so $X_t$ is a linear birth-death process with immigration. The univariate
marginal distribution and autocorrelation (see \Sec{ss:marg}) are now
\begin{align*}
X_t& \sim \NB(\alpha,p)\\
{\textsf{Corr}}(X_s,X_t) &= e^{-\lambda|s-t|},
\end{align*}
and its restriction to integer times $t\in{\mathbb{Z}}$ is precisely the process
described in \Sec{s:2sol} item 4, with autocorrelation $\rho =e^{-\lambda}$.
\subsection{Markov Branching (Linear Birth/Death) Processes }\label{ss:pops}
The process $X_t$ of \Sec{ss:ctpoi} can also be described as the size of a
population at time $t$ if individuals arrive in a Poisson stream with rate
$\lambda\theta$ and die or depart independently after exponential holding
times with rate $\lambda$; as such, it is a continuous-time Markov branching
process.
Similarly, that of \Sec{ss:ctnb} can be described as the size of a population
at time $t$ if individuals arrive in a Poisson stream with rate
$\lambda\alpha (1{-}p)/p$, give birth (introducing one new individual)
independently at rate $\lambda (1{-}p)/p$, and die or depart at rate
$\lambda/p$. In the limit as $p\to1$ and $\alpha\to\infty$ with
$\alpha(1{-}p)\to\theta$ this will converge in distribution to the Poisson
example of \Sec{ss:ctpoi}.
\subsection{Marginal Distributions}\label{ss:marg}
Here we verify that the Poisson and Negative Binomial distributions are the
univariate marginal stationary distributions for the Markov chains with
generators ${\mathfrak{A}}$ given in
{\textsf{E}}qn{e:GenPoi} and {\textsf{E}}qn{e:GenNB}, respectively.
Denote by $\pi^0_i=\P[X_t=i]$ the pmf for $X_t$ and by
$\pi^\epsilon}\newcommand{\hide}[1]{_i=\P[X_{t+\epsilon}\newcommand{\hide}[1]{}=i]$ that for $X_{t+\epsilon}\newcommand{\hide}[1]{}$, and by
$\varphi_0(s)={\textsf{E}}[s^{X_t}]$ and $\varphi_\epsilon}\newcommand{\hide}[1]{(s)={\textsf{E}}[s^{X_{t+\epsilon}\newcommand{\hide}[1]{}}]$ their
generating functions. The stationarity requirement that $\varphi_0(s)\equiv
\varphi_\epsilon}\newcommand{\hide}[1]{(s)$ will determine $\varphi(s)$ and hence $\{\pi_i\}$ uniquely.
\subsubsection{Poisson}\label{sss:ctPo}
From {\textsf{E}}qn{e:GenPoi.b} for $\epsilon}\newcommand{\hide}[1]{>0$ we have
\begin{alignat}5
\pi^\epsilon}\newcommand{\hide}[1]{_i &= \epsilon}\newcommand{\hide}[1]{\lambda\theta \pi^0_{i-1}&
&+ [1-\epsilon}\newcommand{\hide}[1]{\lambda(\theta+i)] \pi^0_{i}&
&+\epsilon}\newcommand{\hide}[1]{\lambda(i+1) \pi^0_{i+1}&
&+ o(\epsilon}\newcommand{\hide}[1]{). \notag
\intertext{Multiplying by $s^i$ and summing, we get:}
\varphi_\epsilon}\newcommand{\hide}[1]{(s)
&= \epsilon}\newcommand{\hide}[1]{\lambda\theta s \sum_{i\ge1} s^{i-1} \pi^0_{i-1}&
&+ [1-\epsilon}\newcommand{\hide}[1]{\lambda\theta] \varphi_0(s)
- \epsilon}\newcommand{\hide}[1]{\lambda s \sum_{i\ge0} i s^{i-1} \pi^0_{i}&
&+ \epsilon}\newcommand{\hide}[1]{\lambda \sum_{i\ge0} (i+1)s^i \pi^0_{i+1}&
&+ o(\epsilon}\newcommand{\hide}[1]{) \notag\\
&= \epsilon}\newcommand{\hide}[1]{\lambda\theta s \varphi_0(s) &
&+ [1-\epsilon}\newcommand{\hide}[1]{\lambda\theta] \varphi_0(s)
- \epsilon}\newcommand{\hide}[1]{\lambda s \varphi_0'(s)&
&+ \epsilon}\newcommand{\hide}[1]{\lambda \varphi_0'(s) &
&+ o(\epsilon}\newcommand{\hide}[1]{) \notag
\end{alignat}
so
\begin{align*}
\varphi_\epsilon}\newcommand{\hide}[1]{(s)-\varphi_0(s)
&= \epsilon}\newcommand{\hide}[1]{\lambda(s-1)\bet{\theta\varphi_0(s)-\varphi_0'(s)}+ o(\epsilon}\newcommand{\hide}[1]{)
\notag
\intertext{and stationarity ($\varphi_0(s)\equiv\varphi_\epsilon}\newcommand{\hide}[1]{(s)$) entails
$\lambda=0$ or $\varphi_0'(s)/\varphi_0(s) \equiv \theta$, so
$\log\varphi_0(s) \equiv (s-1)\theta$ and:}
\varphi_0(s) &= \exp\set{(s-1)\theta}\notag\\
\end{align*}
so $X_t\sim\Po(\theta)$ is the unique stationary distribution.
\subsubsection{Negative Binomial}\label{sss:ctNB}
From {\textsf{E}}qn{e:GenNB.b} for $\epsilon}\newcommand{\hide}[1]{>0$ we have
\begin{align*}
\pi^\epsilon}\newcommand{\hide}[1]{_i &= (\epsilon}\newcommand{\hide}[1]{\lambda(1{-}p)/p)(\alpha+i-1)~\pi^0_{i-1}
+ \{1-(\epsilon}\newcommand{\hide}[1]{\lambda/p)[(\alpha+i)(1{-}p)+i]\}~ \pi^0_{i}
+ (\epsilon}\newcommand{\hide}[1]{\lambda/p)(i+1)~ \pi^0_{i+1}+ o(\epsilon}\newcommand{\hide}[1]{) \notag\\
\varphi_\epsilon}\newcommand{\hide}[1]{(s)
&= (\epsilon}\newcommand{\hide}[1]{\lambda(1{-}p)/p)\alpha~ s\varphi_0(s) +
(\epsilon}\newcommand{\hide}[1]{\lambda(1{-}p)/p)~ s^2\varphi_0'(s)\\
&+ \varphi_0(s) - (\epsilon}\newcommand{\hide}[1]{\lambda(1{-}p)/p) \alpha\varphi_0(s)
- (\epsilon}\newcommand{\hide}[1]{\lambda/p)((1{-}p)+1)~s\varphi_0'(s)\\
&+ (\epsilon}\newcommand{\hide}[1]{\lambda/p)~\varphi_0'(s)+ o(\epsilon}\newcommand{\hide}[1]{) \notag\\
\varphi_\epsilon}\newcommand{\hide}[1]{(s)-\varphi_0(s)
&=(\epsilon}\newcommand{\hide}[1]{\lambda/p)~\set{
\varphi_0(s)~\alpha(1{-}p) (s-1)
+ \varphi_0'(s)~[(1{-}p) s^2 -((1{-}p)+1)s +1]}+ o(\epsilon}\newcommand{\hide}[1]{) \notag\\
&=(\epsilon}\newcommand{\hide}[1]{\lambda/p)(s-1)~\set{
\varphi_0(s)~\alpha(1{-}p)
+ \varphi_0'(s)~((1{-}p) s-1)}+ o(\epsilon}\newcommand{\hide}[1]{) \notag
\end{align*}
so either $\lambda=0$ (the trivial case where $X_t\equiv X$) or $\lambda>0$ and:
\begin{align*}
\varphi_0'(s)/\varphi_0(s) &= \alpha(1{-}p) (1-(1{-}p) s)^{-1}\\
\log \varphi_0(s) &= -\alpha\log (1-(1{-}p) s)+\alpha\log(p)\\
\varphi_0(s) &= p^\alpha (1-(1{-}p) s)^{-\alpha}
\end{align*}
and $X_t\sim\NB(\alpha,p)$ is the unique stationary distribution.
\subsubsection{Alternate Proof}\label{sss:alt}
A detailed-balance argument \citep[\textit{p.}\thinspace105]{Hoel:Port:Ston:1972} shows that
the stationary distribution $\pi_i:=\P[X_t=i]$ for linear birth/death chains
is proportional to
\begin{align*}
\pi_i &\propto \prod_{0\le j<i} \frac{\beta_{j}}{\delta_{j+1}}\\
\intertext{where $\beta_j$ and $\delta_j$ are the birth and death rates
when $X_t=j$, respectively. For the Poisson case, from {\textsf{E}}qn{e:GenPoi.b}
this is} \pi_i &\propto \prod_{0\le j<i} \frac{\lambda\theta}{\lambda
(j+1)} = \theta^i/i!, \intertext{so $X_t\sim\Po(\theta)$, while for the
Negative Binomial case from {\textsf{E}}qn{e:GenNB.b} we have} \pi_i &\propto
\prod_{0\le j<i} \frac{\lambda(\alpha+j)(1{-}p)/p} {\lambda (j+1)/p} =
\frac{\Gamma(\alpha+i)}{\Gamma(\alpha)\,i!}~ (1{-}p)^i,
\end{align*}
so $X_t\sim\NB(\alpha,p)$. In each case the proportionality constant is
$\pi_0=P[X_t=0]$: $\pi_0=e^{-\theta}$ for the Poisson case, and
$\pi_0=p^\alpha$ for the negative binomial.
\subsubsection{Autocorrelation}\label{sss:auto}
Aside from the two trivial (iid and constant) cases, MISTI processes have
finite $p$th moments for all $p<\infty$ and, in particular, have finite
variance and well-defined autocorrelation. It follows by the Markov property
and induction that the autocorrelation must be of the form
\[ {\textsf{Corr}}[X_s,X_t] = \rho^{-|t-s|} \] for some $\rho\in[-1,1]$. In both the
Poisson and negative binomial cases the one-step autocorrelation $\rho$ is
nonnegative; without loss of generality we may take $0<\rho<1$.
\hide{
From {\textsf{E}}qn{e:GenNB.b} the conditional expectation of $X_{t+\epsilon}\newcommand{\hide}[1]{}$ for $\epsilon}\newcommand{\hide}[1]{>0$
given ${\mathcal{F}}_t$ is
\begin{align*}
{\textsf{E}}[X_{t+\epsilon}\newcommand{\hide}[1]{} \mid X_t] &= X_t(1-\epsilon}\newcommand{\hide}[1]{\lambda)
+ \epsilon}\newcommand{\hide}[1]{\lambda\alpha(1{-}p)/p +o(\epsilon}\newcommand{\hide}[1]{)
\intertext{and hence for $t>0$ and $\epsilon}\newcommand{\hide}[1]{>0$ the autocorrelation
$\rho(t)={\textsf{Corr}}(X_0,X_t)$ satisfies}
\rho(t+\epsilon}\newcommand{\hide}[1]{)&= {\textsf{Corr}}[X_0,X_{t+\epsilon}\newcommand{\hide}[1]{}]\\
&= \rho(t) [1-\epsilon}\newcommand{\hide}[1]{\lambda] + o(\epsilon}\newcommand{\hide}[1]{)
\intertext{so $\rho'(t) = -\lambda\rho(t)$, $\rho(t)=e^{-\lambda t}$ for
$t\ge0$ and, by symmetry,}
{\textsf{Corr}}[X_s, X_t] &= e^{-\lambda|t-s|}
\end{align*}
as claimed. A similar argument beginning with {\textsf{E}}qn{e:GenPoi.b} shows
${\textsf{E}}[X_{t+\epsilon}\newcommand{\hide}[1]{}\mid X_t] = X_t(1-\epsilon}\newcommand{\hide}[1]{\lambda) + \epsilon}\newcommand{\hide}[1]{\lambda\theta+o(\epsilon}\newcommand{\hide}[1]{)$ and
hence $\rho(t)=e^{-\lambda|t|}$ for the Poisson example of
\Sec{sss:ctPo}.
}
\section{Discussion}\label{s:disc}
The condition $\mu^\theta(\{1\})>0$ introduced in \Sec{ss:3dim} to avoid
trivial technicalities is equivalent to a requirement that the support
$\mathop{\mathrm{spt}}(\mu^\theta) ={\mathbb{N}}_0$ be all of the nonnegative integers. Without this
condition, for any MISTI process $X_t$ and any integer $k\in{\mathbb{N}}$ the process
$Y_t=k\,X_t$ would also be MISTI, leading to a wide range of essentially
equivalent processes.
The branching approach of \Sec {ss:pops} could be used to generate a wider
class of continuous-time stationary Markov processes with ID marginal
distributions \citep {Verv:1979, Steu:Verv:Wolf:1983}. If families of size
$k\ge1$ immigrate independently in Poisson streams at rate $\lambda_k$, with
$\sum_{k\ge1} \lambda_k\log k<\infty$, and if individuals (after independent
exponential waiting times) either die (at rate $\delta>0$) or give birth to
some number $j\ge1$ of progeny (at rate $\beta_j\ge0$), respectively, with
$\delta> \sum_{j\ge1} j\,\beta_j$, then the population size $X_t$ at time $t$
will be a Markov, infinitely-divisible, stationary processes with nonnegative
integer values. Unlike the MISTI processes, these may have infinite $p$th
moments if $\sum_{k\ge1} \lambda_k k^p=\infty$ for some $p>0$ and, in
particular, may not have finite means, variances, or autocorrelations.
Unless $\lambda_k =0$ and $\beta_j=0$ for all $k,j>1$, however, these will
not be time-reversible, and hence not MISTI. Decreases in population size
are always of unit size (necessary for the Markov property to hold), while
increases might be of size $k>1$ (if immigrating family sizes exceed one) or
$j>1$ (if multiple births occur).
\section*{Acknowledgments}
The authors would like to thank
Xuefeng Li,
Avi Mandelbaum,
Yosef Rinott,
Larry Shepp,
and
Henry Wynn
for helpful conversations.
This work was supported in part by National
Science Foundation grants
DMS--1228317 and
DMS-2015382
and National Air and Space Administration Applied Information Science
Research Program grant NNX09AK60G.
Larry Brown is sorely missed, both for his deep intellect and his
delightful charm, generosity, and humanity.
\hide{
\section{Leftovers}\label{s:left}
\begin{itemize}
\item The requirement $\th1>0$ (made just above {\textsf{E}}qn{e:pP}) probably isn't
necessary... if not, set $m\equiv\min\{i:~\th i>0\}$ and define $\rr j :=
\lll jm+/\th m$ and I expect we can still characterize all the MISTI's. My
guess is that they will be the same two degenerate ones, plus $m$ times the
Po$^*$ and $\mathsf{NT}(\alpha,p)$ solutions (for example, if $\th1=0$ and
$\th2>0$, then I expect all $X_t$ to be even, and for $X_t/2$ to be one of
our solutions), but I haven't checked that. The key will be showing that
if $\th m>0$ and $\th n>0$ then ${\textsf{E}}[\zz1^{X_1}\,\zz3^{X_3}\mid X_2=
\mathsf{lcm}(m,n)]$ factors properly in two ways--- so that the
conditional distribution of $X_3$ given $X_2=j$ won't depend on what mix
of $m$'s and $n$'s comprise $j$, and hence won't depend on $X_1$.
\end{itemize}
}
\ifBib
\else
\newcommand{\noopsort}[1]{}
\fi
\par
\centerline{\begin{tabular}{l@{\qquad}l}
Robert L. Wolpert &Lawrence D. Brown\\
Department of Statistical Science &Department of Statistics\\
Duke University &Wharton School, University of Pennsylvania\\
Durham, NC 27708-0251 USA &Philadelphia, PA 19104 USA\\
\texttt{[email protected]}&(Deceased)\\
\url{http://www.stat.duke.edu/~rlw/}\\
\end{tabular}}
\end{document} |
\begin{document}
\author{Robert Alicki}
\thanks{These authors contributed equally to this work.}
\affiliation{Institute of Theoretical Physics and Astrophysics, University of Gdansk, Wita Stwosza 57, 80-952 Gdansk, Poland}
\author{David Gelbwaser-Klimovsky}
\thanks{These authors contributed equally to this work.}
\affiliation{Department of Chemistry and Chemical Biology, Harvard University,
Cambridge, MA 02138}
\title{Non-equilibrium quantum heat machines }
\begin{abstract}
Standard heat machines (engine, heat pump, refrigerator) are composed of a system (``working fluid") coupled to at least two equilibrium baths at different temperatures and periodically driven by an external device (piston or rotor) called sometimes work reservoir. The aim of this paper is to go beyond this scheme by considering environments which are stationary but cannot be decomposed into few baths at thermal equilibrium. Such situations are important, for example in solar cells, chemical machines in biology, various realizations of laser cooling or nanoscopic machines
driven by laser radiation. We classify non-equilibrium baths depending on their thermodynamic behavior and show that the efficiency of heat machines operating under their influences is limited by a generalized Carnot bound.
\end{abstract}
\maketitle
\section{Introduction}
Quantum systems are rarely completely isolated from their environment, whose influence, positive or negative, should be considered. The theory of open quantum system was developed \cite{Davies:1974,Lindblad76,AlickiLendi:2006,HuelgaRivas:2012,spohn2007irreversible} to achieve this goal and in particular open the way to the study of quantum heat machines, such as engines and refrigerators \cite{gelbwaser2015thermodynamics,kosloff2013quantum,segal2006molecular,esposito2010quantum,Levy:2012,correa2013performance}. Those models generally assume the interaction between a system and one or two environments, in thermal equilibrium, thereby termed heat baths. The efficiency of these machines is limited by the Carnot bound, requiring at least two baths at different temperatures in order to extract work.
\par
Nevertheless, there are many examples in nature where the environment is not in thermal equilibrium, such as sunlight, continuous laser radiation, biological cells, etc. Our goal is to establish maximum efficiency bounds, as well as to determine the output (work power or cooling power) of quantum heat machines operating with non-equilibrium baths. Because the bath is not at thermal equilibrium, the second law does not demand the presence of a second bath for work extraction. Our aim is to go beyond a simple situation when an environment is a collection of independent heat baths with different temperatures which, in principle can be also treated as a single non-equilibrium bath.
\par
We study a micro or mesoscopic externally driven quantum system, the working fluid, coupled to a large environment. On the relevant time-scale, which is longer than the scales of driving and of microscopic irreversible processes, the basic parameters of the reservoir are constant, hence the reference state of the environment is a stationary state. Such non-equlibrium stationary systems are well known in macroscopic thermodynamics and usually described in terms of local equilibrium with space-dependent temperature, density, pressure, fluid velocity etc \cite{kondepudi2014modern}.
\par
However, there are important situations where non-equilibrium character of the environment state is not related to spatial non-homogeneity but rather to some internal properties of its state. For example, sunlight at the Earth surface is a rather homogeneous environment, the shape of its spectrum roughly corresponds to the Planck distribution at the Sun surface temperature $T_s$, but the photon density is much lower than the equilibrium one, and the absorption in the atmosphere creates ``holes'' in the spectrum.
\par
Another example is a laser radiation \cite{scully1997quantum} in a continuous wave operation mode, which for the idealized single mode situation can be treated as a single quantum oscillator or even a classical monochromatic wave, while for multimode case with strong phase diffusion it acts on optically active centers like a non-equilibrium bath. Biological machines also provide examples of systems coupled to non-equilibrium baths either of the chemical nature or consisting of photons or different types of excitons.
In the next Section we briefly review the theory of quantum heat machines operating with equilibrium baths and consider the simplest case, where the working fluid is a two-level system (TLS). In Section III the non-equilibrium TLS heat machine is analyzed. The notion of ``local temperatures'' is introduced. Different examples of non-equilibrium baths are given and they are classified according to their effects on the heat machine operation. Finally, in Section IV a general theory for non-equilibrium quantum heat machines composed of a general quantum working fluid is given and showed that their maximum efficiency is limited by a Carnot-like bound.
\section{Standard equilibrium heat machines}
As a first step and reference point we review the main results of
quantum heat machines operating in contact with two equilibrium baths.
The machine is composed of a periodic modulated working fluid, the system, which is
permanently coupled to the hot (cold) bath at temperature $T_{H(C)}$,
and follows a continuous cyclic evolution \cite{Gelbwaser:2013,SzczygielskiGelbwaserAlicki:2013}. The total Hamiltonian is
\begin{equation}
H_{tot}(t) = H_{S}(t)+\sum_{i= H,C} \bigl(S_{i}\otimes B_{i}+H_{B_{i}}\bigr) ,
\label{eq:hamtot}
\end{equation}
where $H_{B_{i}}$ is the $i$-bath free Hamiltonian, $B_{i}$ its interaction
operator and $S_{i}$ is a system operator. The system Hamiltonian fulfills
periodicity condition $H_{S}(t)=H_{S}(t+\frac{2\pi}{\Omega})$,
$\Omega$ being the modulation angular frequency.
To illustrate our approach, we choose the simplest realization as an example, nevertheless the same analysis may be applied to more complex models. We
assume the working fluid is a TLS whose frequency is modulated,
$H_{S}(t)=(\omega_{o}+\omega(t))\frac{\sigma_{Z}}{2}$. The modulation
may be decomposed into a Fourier series $e^{-i\int_{0}^{t}\omega(t')dt}=\sum\xi_{q}e^{-iq\Omega t}$
leading to the Floquet expansion of the Lindblad operator \cite{SzczygielskiGelbwaserAlicki:2013}.
For a detailed derivation we refer the reader to \cite{Szczygielski:2014}. In the interaction
picture the evolution equation of the working fluid density matrix, $\rho$, is given by the quantum Markovian master equation of Lindblad-Gorini-Kossakowaski-Sudarshan type \cite{GKS76,Lindblad76}, which for diagonal matrix elements (${\rho}_{gg(ee)}$, ground (excited) state population) yields the rate equation
\[
\dot{\rho}_{ee}=-\sum_{q\in Z}\sum_{i= H,C} \left(P_{q}G^{i}(\omega_{q})\rho_{ee}+P_{q}G^{i}(-\omega_{q})\rho_{gg} \right),
\]
where $\omega_q=\omega_0+q\Omega$, $P_{q}=||\xi_{q}||^{2}$ are the harmonic strength satisfying normalization condition $\sum_{q\in Z}P_{q} = 1$. The bath
coupling spectrum is defined as $G^{i}(\omega)=\int_{-\infty}^{\infty}e^{it\omega}\langle B^{i}(t)B^{i}(0)\rangle dt$
and measures the interaction strength between the TLS and the bath mode $\omega$.
If the bath is at thermal equilibrium, it fulfills the Kubo-Martin-Schwinger (KMS) condition (we put $h=1$ and $k_B=1$) \cite{kubo1957statistical,martin1959theory}
:
\begin{equation}
\frac{G^{i}(-\omega)}{G^{i}(\omega)}=e^{-\omega/T_{i}},\label{eq:kms}
\end{equation}
where $T_{i}$ is the bath temperature. If the TLS interacts only with the i-bath and is not being modulated it will equilibrate to this temperature, i.e, $\rho_{ee}/\rho_{gg}=e^{-\omega_{0}/T_i}$.
We are interested in the thermodynamic behavior at steady state (or
limit cycle) where any transient effect vanishes. The steady state
heat currents from the cold and hot bath, respectively, and the power supplied by the source of modulation (work reservoir) are given by the expressions
\begin{gather}
\bar{J}_{C(H)}=\sum_{q \in \mathbb{Z}}\frac{\omega_{q}P_{q}}{w+1}\left(G^{C(H)}(-\omega_{q})-G^{C(H)}(\omega_{q})w\right), \notag\\
\bar{P}=-\bar{J}_{H}-\bar{J}_{C},
\end{gather}
\\
where $w$ is the steady state population rate
\[
w=\left(\frac{\rho_{ee}}{\rho_{gg}}\right)^{SS}=\frac{\sum_{q \in \mathbb{Z}}\sum_{i = H,C}P_{q}G^{i}(-\omega_{q})}{\sum_{q \in \mathbb{Z}}\sum_{i = H,C}P_{q}G^{i}(\omega_{q})}
\]
and the sign convention is that extracted power is negative.
As expected from the Second Law, it has been shown \cite{Gelbwaser:2013,Gelbwaser:2013a} that work extraction requires at
least two equilibrium baths at different temperatures. Nevertheless, this does not apply to non-equilibrium baths. In the following sections
we address this scenario and find the conditions needed for extracting work from a single non-equilibrum bath.
\section{How to introduce the formalism of non-equilibrium baths}
We consider the case of a TLS coupled to a \textit{single} stationary non-equilibrium bath \cite{kondepudi2014modern} through the interaction Hamiltonian $H_{int}=S\otimes B$. Stationarity requires the bath state to be diagonal in the bath free Hamiltonian basis. Thermal states are just particular cases of this kind of states. Stationarity is essential to ensure that the bath two-times auto-correlations depend only on time difference.
In order to describe non-thermal but stationary baths, we generalize the KMS condition \eqref{eq:kms}, by introducing
``local temperature''
\begin{gather}
e^{-\omega/T_B(\omega)} \equiv \frac{G(-\omega)}{G(\omega)},
\label{eq:nokms}
\end{gather}
where $B$ is the bath interaction operator. Local temperatures depend on $B$, the frequency and the state of the bath. Only for a thermal equilibrium bath $T_B(\omega) = T$ independently of $B$ and $\omega$.
In the next section we show some examples of non-equilibrium baths and calculate their effective local temperature.
\subsection{Harmonic oscillator baths}
Baths like electromagnetic radiation or vibration modes of a material are just a collection of independent quantum oscillators with quasi-continuous spectrum of frequencies \cite{breuer2002theory}. Baths could also be composed of fermions, e.g., spin-baths, but even then by suitable transformations (e.g. Holstein-Primakoff Hamiltonian) can often be approximated by bosonic baths. The bosonic bath free Hamiltonian is given by
\begin{equation}
H_B = \sum_{k} \omega_k\, b_k^+ b_k , \quad [b_k , b_l^+] = \delta_{kl}.
\label{harmonic_bath}
\end{equation}
In most applications the bath operator that couples to the system is linear in creation and annihilation operators
\begin{equation}
B= \sum_{k} \bigl\{ g_k b_k + \bar{g}_k b_k^+\bigr\}.
\label{harmonic_bath1}
\end{equation}
The state of the bath is assumed to be stationary and hence diagonal in the particle number basis.
Then, the coupling spectrum yields
\begin{equation}
G(\omega) = \left\{ \begin{array}{ll}
\sum_{k} |g_k |^2 ( n_k + 1)\delta(\omega_k - \omega), & \omega > 0 \\
\sum_{k} |g_k |^2 n_k \delta(\omega_k - \omega), & \omega < 0
\end{array}\right\},
\label{harmonic_spec}
\end{equation}
where $n_k = \mathrm{Tr}(\rho_B b_k^+ b_k)$ is the k-mode population, the upper (lower) line in \eqref{harmonic_spec} is the emission (absorption) rate. The local temperature is given by
\begin{equation}
T_B(\omega) = \frac{\omega}{\ln\bigl[ \frac{G(\omega)}{G(-\omega)} \bigr]}
\label{linear_temp}
\end{equation}
or
\begin{gather}
e^{-\omega/T_B(\omega)} = \frac{n(\omega)}{n(\omega) +1},
\label{eq:nokms1}
\end{gather}
where $n(\omega)= \frac{\sum_{k} |g_k |^2 n_k \delta(\omega_k - \omega)}{\sum_{k} |g_k |^2 \delta(\omega_k - \omega)}$ denotes the average population number for the frequency $\omega$.
As we show below, the frequency dependence of the local temperature determines whether work can be extracted from the single non-equilibrium bath.
\subsubsection{Sunlight}
The Sun is a thermal source, emitting thermal radiation at $T_s= 6000k$. Due to geometrical considerations \cite{wurfel2009physics,alicki2015solar}, just a small fraction of the emitted photons reach the Earth, reducing the effective mode population and thereby $n(\omega) = \lambda \bigl[e^{\omega/T_s} -1\bigr]^{-1}$, where $\lambda= 2.5\times 10^{-5}$ is a geometric factor equal to the angle subtended by the Sun seen from the Earth. Effectively, sunlight on Earth is out of equilibrium, and systems with different frequencies, will ``equilibrate'' to different temperatures. Fig \ref{fig:C}, shows the equilibration temperature, $\rho_{ee}/\rho_{gg}=e^{- \omega/T_B(\omega)}$ as a function of the TLS frequency. Moreover, the atmosphere acts like a filter and produces a more complicated shape of $n(\omega)$ with many ``holes".
\begin{figure}
\caption{(Color online) Local temperature of a TLS coupled to sunlight. The continuous line corresponds to the case of thermal radiation at the temperature of Sun surface $T_s \simeq 6000 K$, while the dashed line to sunlight out of equilibrium due to a geometrical factor, $\lambda= 2.5\times 10^{-5}
\label{fig:C}
\end{figure}
\subsubsection{Multimode laser radiation}
A multimode laser radiation in a continuous wave operation mode may be modeled as a bath at the phase average, $\bar{\rho}_B$, of the multimode coherent state $\rho_B$,
\begin{equation}
\rho_B=U_{z} |\mathrm{vac}\rangle\langle \mathrm{vac}| U_{z}^{\dagger}, \quad |\mathrm{vac}\rangle\langle \mathrm{vac}|= \bigotimes^k |0_k\rangle\langle 0_k|,
\label{coherent}
\end{equation}
where $U_{z}^{\dagger}$ is the displacement operator
\begin{equation}
U_{z}^{\dagger} b_k U_{z} = b_k - z(\omega_k).
\label{coherent1}
\end{equation}
This non-equilbrium bath is obtained by displacing a thermal equilibrium bath at zero temperature and performing a phase averaging.
The phase average of the bath state, or diagonality in the photon occupation number basis, is required in order for the bath to be stationary.
The local temperature for this bath is equal to
\begin{equation}
T_B(\omega) = \frac{\omega }{\ln\bigl(1 + |z(\omega)|^{-2}\bigr)} .
\label{coherent2}
\end{equation}
For large $|z(\omega)|$ yields
\begin{equation}
T_B(\omega) \simeq \omega |z(\omega)|^{2} .
\label{coherent3}
\end{equation}
Therefore, for a constant shift, $z$, for all the modes, the local temperature is a linearly increasing function of the frequency.
\subsubsection{Squeezed thermal bath}
The state of a stationary squeezed thermal bath is the phase average \cite{abah2014efficiency,rossnagel2014nanoscale}, $\bar{\rho}_B$, of the following density matrix
\begin{equation}
\rho_B = Z^{-1}S_{r}\, e^{- H_B/T_{eq}}\, S_{r}^{\dagger},
\label{squeezed}
\end{equation}
where $S_r$ is the squeezing unitary operator defined by
\begin{equation}
S_{r}^{\dagger} b_k S_{r} = \cosh(r(\omega)) b_k + \sinh(r(\omega)) b_k^+ .
\label{squeezed1}
\end{equation}
Its local temperature is given by the expression
\begin{equation}
T_B(\omega) = \frac{\omega }{\ln\bigl\{1 + \bigl[n_{T_{eq}}(\omega) + (2 n_{T_{eq}}(\omega) +1)\sinh^2(r(\omega))\bigr]^{-1}\bigr\}} ,
\label{squeezed2}
\end{equation}
where $n_{T_{eq}}(\omega) = [e^{\omega/T_{eq}} -1]^{-1} $, is the mode population without squeezing. For large $r$ \eqref{squeezed2} reduces to
\begin{equation}
T_B(\omega) \simeq \frac{\omega }{4} (2n_{T_{eq}}(\omega) +1) e^{2r} .
\label{squeezed3}
\end{equation}
\subsection{Classification of non equilibrium baths}
The functional dependence of $T_B(\omega)$ can be used to classify
non-equilibrium baths. It depends on the bath state, coupling to the system and frequency. In this sections we consider
a heat engine composed of a TLS coupled to a single non-equilibrium
bath and find the required conditions for work extraction. The dynamics of the total system is governed by the Hamiltonian
\begin{equation}
H_{tot}(t) = (\omega_{o}+\omega(t))\frac{\sigma_{Z}}{2}+ S\otimes B + H_{B}.
\label{eq:hamtot2}
\end{equation}
The steady heat current is a sum of contributions corresponding to all harmonics of $\Omega$ (see section IV for a general case)
\begin{equation}
\bar{J}=\sum_{q \in \mathbb{Z}} \frac{\omega_{q}P_{q}}{w+1}\left(G(-\omega_{q})-G(\omega_{q})w\right) = -\bar{P},
\label{eq:curstd}
\end{equation}
with the TLS steady state population ratio
\begin{equation}
w=\left(\frac{\rho_{ee}}{\rho_{gg}}\right)^{SS}=\frac{\sum_{q \in \mathbb{Z}}P_{q}G(-\omega_{q})}{\sum_{q \in \mathbb{Z}}P_{q}G(\omega_{q})}.
\label{eq:w}
\end{equation}
\subsubsection{Non-equilibrium but passive }
Using Eqs \ref{eq:nokms},\eqref{eq:curstd}, \eqref{eq:w} we can write the expression for power supplied to the system as
\begin{equation}
\bar{P}= z^{-1}\sum_{\{q_{1}>q_{2}\in \mathbb{Z}\}}{(q_{1}-q_{2})\Omega P_{q_{1}}P_{q_{2}}G(\omega_{q_1})G(\omega_{q_2})}\left(e^{-\omega_{q_2}/T_B(\omega_{q_2})}-e^{-\omega_{q_1}/T_B(\omega_{q_1})}\right),
\label{eq:ppasive}
\end{equation}
where
\begin{equation}
z =\sum_{q \in \mathbb{Z}}P_{q}G(\omega_{q})\bigl[ 1+ e^{-\omega_{q}/T_B(\omega_{q})}\bigr] .
\label{eq:z}
\end{equation}
A sufficient condition
\begin{equation}
\omega_{q_2}/T_B(\omega_{q_2})>\omega_{q_1}/T_B(\omega_{q_1}), \quad \mathrm{for} \quad \{q_{1}>q_{2}\},
\label{eq:workcond}
\end{equation}
assures that $\bar{P} < 0$ and hence the engine extracts work from the bath.
\par
We define the \textit{passivity function} as $f(\omega) \equiv \frac{d}{d\omega} \left(\omega/T_B(\omega) \right)$. If for all frequencies $f(\omega)>0$, no work can be extracted. We term such couplings to bath \textit{passive} (in analogy to passive states \cite{pusz1978passive,lenard1978thermodynamical,Gelbwaser:2013a,gelbwaser2014heat} which do not allow
for work extraction). Previous examples, based on linear coupling to bosonic bath, are passive if we consider only frequency independent deformations of a thermal bath (constant filtering, displacement or squeezing). From Eq \ref{eq:ppasive} we deduce that work
extracted from a single non-equilibrium bath depends on the coupling spectrum shape and requires the passivity function $f(\omega)$
to be negative in some range of frequencies.
\subsubsection{Two equilibrium baths as a single non-equilibrium bath}
As a first example of a non passive bath, we consider the standard quantum heat engine where the working fluid interacts
with two baths at equilibrium. We model it as a quantum heat machine with a single non-equilibrium bath. For this we need
to consider the spectrum as a sum of two baths spectra. Therefore,
the local temperature of the composed bath satisfies the following relation
\[
e^{-\omega/T_B(\omega)}=\frac{e^{-\omega/T_h}G^{h}(\omega)}{G^{h}(\omega)+G^{c}(\omega)}+\frac{e^{-\omega/T_c}G^{c}(\omega)}{G^{h}(\omega)+G^{c}(\omega)}=e^{-\omega/T_h}(1-m(\omega))+e^{-\omega/T_c}m(\omega),\quad 0\leq m(\omega)\leq 1 ,
\]
where we use the fact that both baths are in equilibrium and the standard
KMS condition holds. The effective Boltzmann factor is a weighted average of
both two Boltzmann ones, where the weights depend on how strong is the
working fluid coupled to each bath at the given frequency. Therefore, $T_h\geq T_B(\omega)\geq T_c$. For the sake of simplicity, we assume that the bath coupling spectrum overlaps with only two
harmonic frequencies ($\omega_{q_{1}}>\omega_{q_{2}}$).
As shown in \cite{Gelbwaser:2013} this condition is required in order to achieve high efficiency.
Work extraction requires the hot bath being coupled more strongly
to the high frequency mode ($G^{h}(\omega_{q_1})\gg G^{c}(\omega_{q_1})$)
and the opposite for the low frequency mode ($G^{h}(\omega_{q_2})\ll G^{c}(\omega_{q_2})$) \cite{Gelbwaser:2013}.
The efficiency of the engine is given by the Carnot-type formula
\[
\eta=1-\frac{T_B(\omega_{q_2})}{T_B(\omega_{q_1})}\leq1-\frac{T_c}{T_h}.
\]
The extreme case, where the engine reaches the maximum efficiency,
the Carnot bound, is when the bath are spectrally separated, $T_B(\omega_{q_1})\simeq T_{h}$
and $T_B(\omega_{q_2})\simeq T_{c}$.
\subsubsection{Non-equilibrium and non-passive bosonic bath}
As shown above, frequency independent deformation of a thermal bosonic bath with linear coupling, creates passive baths. Therefore, work extraction requires the use of \textit{selective} filters. In order to not contradict the second law of thermodynamics, which
forbids work extraction from a single thermal bath, this selective
filter should involve the presence of other bath, a non equilibrium
process or a hidden work injection.
Assume a single bosonic bath at the equilibrium temperature $T_{eq}$ and linearly coupled to the system. If a selective filter, $\lambda(\omega)$, is applied, the local temperature satisfies
\begin{equation}
e^{-\omega/T_B(\omega)}=\frac{\lambda(\omega)n(\omega)}{\lambda(\omega) n(\omega)+1}.
\end{equation}
What are the conditions required for this filter to allow work
extraction?
\par
Consider again the bath coupling spectrum which overlaps with only two
harmonic frequencies $\omega_{q_1}, \omega_{q_2}$ with $q_1 > q_2$. As shown in \cite{Gelbwaser:2013} this condition is required in order to achieve high efficiency and due to \eqref{eq:ppasive} the power is given by
\begin{equation}
\bar{P}= z^{-1}(q_{1}-q_{2})\Omega P_{q_{1}}P_{q_{2}}G(\omega_{q_1})G(\omega_{q_2})\left(e^{-\omega_{q_2}/T_B(\omega_{q_2})}-e^{-\omega_{q_1}/T_B(\omega_{q_1})}\right).
\label{eq:pnopasive}
\end{equation}
Assume that we reduce the population of the mode $\omega_{q_2}$
($\lambda(\omega_{q_2})$<1), and we do not filter the other
mode ($\lambda(\omega_{q_1})=1)$. In order to allow work extraction, the filter should satisfy the following condition
\[
\lambda(\omega_{q_2})<e^{\frac{(q_{2}-q_{1})}{2T_{eq}}\Omega}\frac{\sinh(\frac{\omega_{q_2}}{2T_{eq}})}{\sinh(\frac{\omega_{q_1}}{2T_{eq}})}<1 .
\]
As paradoxical it may sound, by reducing a specific
mode population, we can extract work from a single thermal bath! The filtering lowers the effective temperature $T_B(\omega_{q_2})$, reducing the excitations that are emitted to this mode, an ``saving'' energy which is ultimately transformed into work.
The efficiency of this machine is bounded by a generalized Carnot
limit
\[
\eta=\frac{-\bar{P}}{\bar{J}_{H}}\leq1-\frac{T_B(\omega_{q_2})}{T_{eq}},
\]
where $T_{eq}$ and $T_B(\omega_{q_2})$ play the
role of the effective hot and cold bath temperature respectively.
\subsubsection{Deviation from equilibrium of engineered bath}
Using the selective filtering introduced in the last section, we can engineer a non-equilibrium bath from an equilibrium one, and characterize its deviation from equilibrium by the parameter
\[
D=1-\lambda(\omega_{q_2}).
\]
For the equilibrium bath $D=0$, and when we start reducing
population of the modes with frequencies around $\omega_{q_2}$, the bath will go away from equilibrium
producing some non-equilibrium effects, like frequency dependent equilibration
temperature. Nevertheless work extraction will be possible only when the bath is far enough from equilibrium, i.e.
\begin{equation}
D>n(\omega_{q_1})(e^{\omega_{q_1}/T_{eq}}-e^{\omega_{q_2}/T_{eq}}).
\label{eq:distwork}
\end{equation}
When the deviation from equilibrium increases, the local temperature of
the lower frequency mode reduces and the efficiency of the quantum heat engine rises.
For $D<0$ the bath is also out of equilibrium. But, in this case
instead of reducing the mode population, it is being increased by some
external mechanism (for example, selective concentration of light). The equilibration temperature of the system will
depend on the frequency, but for $D<0$, work cannot be extracted.
Again, paradoxically, the increase of energy in
``incorrect modes'' reduces the possibility of work extraction.
The bath is taken away from thermal equilibrium, but in the ``opposite direction''
to that leading to work extraction.
\section{General theory for non-equilibrium quantum heat machines}
The model based on the TLS and studied above is an example of a large class of open quantum systems with physical Hamiltonian $H_S(t) = H_S(t +\tau)$ under the assumption that the perturbation frequency $\Omega = 2\pi/\tau$ is comparable to or higher than the relevant Bohr frequencies. Such fast driving is typically provided by a strong coherent laser field and appears in the thermodynamical approach to the theory of lasers \cite{scully1997quantum,scovil1959three,geva1996quantum,boukobza2007three}, various types of laser cooling \cite{phillips1998nobel,gelbwaser2015laser}, optomechanical devices \cite{aspelmeyer2014cavity}, etc.
Potential applications include new light harvesting systems, both of biological nature or man-made devices. For this class of systems a consistent theory can be developed which includes the general case of stationary nonequilibrium environment characterized by local temperatures. The laws of thermodynamics can be derived and Carnot-type bounds are obtained.
\subsection{Master equations}
We begin with a presentation of the canonical construction of the Markovian generator for an open system weakly coupled to a stationary, but generally nonequilibrium, environment.
The system is assumed to be ``small" and described by the periodic in time physical Hamiltonian $H_S(t) = H_S(t +\tau)$ under the assumption that the perturbation frequency $\Omega = 2\pi/\tau$ is comparable to or higher than the relevant temporal Bohr frequencies. We assume that the Hamiltonian of the system already contains all Lamb-like shifts induced by interaction with environment \cite{AlickiLendi:2006,HuelgaRivas:2012,Szczygielski:2014}.\\
The system bath-interaction is parametrized as
\begin{equation}
H_{\mathrm{int}} = \sum_{\alpha } S_{\alpha} \otimes B_{\alpha},
\label{Hint}
\end{equation}
where $S_{\alpha}$ and $B_{\alpha}$ are hermitian operators of system and bath, respectively. The environment (bath) is assumed to be a large quantum system with practically continuous Hamiltonian spectrum and a proper behavior of multi-time correlation functions of relevant observables. The initial state of the bath is stationary, therefore is invariant with respect to the free dynamics of the bath and satisfies $\langle B_{\alpha}\rangle_B =0$, where $\langle\cdots\rangle_B$ denotes the average over the bath state. We assume also, for simplicity, that the cross-correlations between $B_{\alpha}$ and $B_{\beta}$ vanish for $\alpha\neq\beta$.
\par
Applying the Floquet theory one obtains the following decomposition of the associated unitary propagator
\begin{equation}\label{eq_PropagatorResolution}
U(t) = \mathbb{T} \exp\Bigl\{-i\int_0^t H_S(s) ds\Bigr\} = P(t) e^{-i\bar{H}t},
\end{equation}
where $P(t) = P(t+\tau)$ is a family of periodic unitaries and $\bar{H}$ is the \emph{averaged Hamiltonian} satisfying
\begin{equation}\label{eq_FloquetOperatorGeneral}
U(\tau) = e^{-i\bar{H}\tau} .
\end{equation}
The \emph{Floquet operator} $U(\tau)$, and the averaged Hamiltonian $\bar{H}$ posses common eigenvectors $\{\phi_{k}\}$, i.e.,
\begin{equation}\label{eq_Floqueteigen}
\bar{H} \phi_{k} = \bar{\epsilon}_k \phi_{k} ,\quad U(\tau)\phi_{k} = e^{-i\bar{\epsilon}_{k}\tau}\phi_{k},
\end{equation}
where $\{\epsilon_{k}\}$ are called \emph{quasi-energies} of the system. These properties imply a particular form of the Fourier decomposition
\begin{equation}\label{eq_SoperatorExpansion}
S_{\alpha}(t) \equiv U(t)^{\dagger} S_{\alpha} U(t) = \sum_{\{\bar{\omega}_q\}}S_{\alpha}(\bar{\omega}_q) e^{-it\bar{\omega}_q}, \\
\end{equation}
where
\begin{equation}\label{quasiBohr}
\{\bar{\omega}_q\} = \{ \bar{\omega} + q\Omega\}\, ;\, \{\bar{\omega}\} = \{\bar{\epsilon}_k - \bar{\epsilon}_l\} , q\in\mathbb{Z}\},
\end{equation}
i.e., it is a set of all sums of the \emph{relevant Bohr quasi-frequencies} and all multiplicities of the modulation frequency. By $\{\bar{\omega}_q\}_+$ we denote the subset of $\{\bar{\omega}_q\}$ with non-negative relevant Bohr quasi-frequencies $\bar{\omega}$.
\par
The operators $S_{\alpha}(\bar{\omega}_q)$ are subject to relations
\begin{align}\label{SrelationsP}
S_{\alpha}(\bar{\omega}_q)^{\dagger} &= S_{\alpha}(-\bar{\omega}_q), \nonumber\\ [\bar{H}, S_{\alpha}(\bar{\omega}_q)] &= -\bar{\omega} S_{\alpha}(\bar{\omega}_q).
\end{align}
Physically, the harmonics $ q\Omega$ correspond to the energy quanta which are exchanged with the external periodic driving.\\
Repeating the construction of the weak coupling generator in the interaction picture one obtains
\begin{equation}\label{decomposition1}
\mathcal{L} = \sum_{\alpha}\sum_{\{\bar{\omega}_q \}_+}\mathcal{L}^{\alpha}_{\bar{\omega}_q},
\end{equation}
with a single term defined as
\begin{align}\label{eq_LindbladFast}
\mathcal{L}^{\alpha}_{\bar{\omega}_q}\rho = &\frac{1}{2}\Bigl\{G_{\alpha}(\bar{\omega}_q) \Bigl( [S_{\alpha}(\bar{\omega}_q), \rho S_{\alpha}(\bar{\omega}_q)^{\dagger}] +
[S_{\alpha}(\bar{\omega}_q) \rho, S_{\alpha}(\bar{\omega}_q)^{\dagger}] \Bigr) \nonumber\\
& +G_{\alpha}(-\bar{\omega}_q)\Bigl( [S_{\alpha}(\bar{\omega}_q)^{\dagger}, \rho S_{\alpha}(\bar{\omega}_q)] +
[S_{\alpha}(\bar{\omega}_q)^{\dagger} \rho, S_{\alpha}(\bar{\omega}_q)] \Bigr)\Bigr\}.
\end{align}
Using \eqref{SrelationsP} one can show that $\mathcal{L}^{\alpha}_{\bar{\omega}_q}$ commutes with $-i[\bar{H},\cdot]$ and possesses a Gibbs-like stationary state
\begin{equation}\label{Gibbst}
\bar{\rho}^{\alpha}_{\bar{\omega}_q}= Z^{-1} e^{- (\bar{\omega}_q/\bar{\omega})\bar{H}/T_{\alpha}(\bar{\omega}_q)},
\end{equation}
with the local temperature $ T_{\alpha}(\bar{\omega}_ q)$ corresponding to the \emph{coupling channel} $(\alpha,\bar{\omega}_q)$. The ``renormalizing" term ${\bar{\omega}_q}/{\bar{\omega}}$ in front of $\bar{H}$ in the Gibbs-like state takes into account the total energy exchange including $q\Omega$ corresponding to external driving device. The properties \eqref{SrelationsP} imply that the generator \eqref{eq_LindbladFast} transforms independently the diagonal and off-diagonal elements of $\rho$ computed in the eigenbasis of $\bar{H}$.
\par
The MME the \emph{Schroedinger picture} possesses the following structure
\begin{equation}\label{eq_MME_Schroedinger}
\frac{d\rho_{sch}(t)}{dt} = -i \comm{H_S(t)}{\rho_{sch}(t)} + (\mathcal{U}(t) \mathcal{L} \, \, \mathcal{U}(t)^{\dagger}) \rho_{sch}(t),
\end{equation}
where $\mathcal{U}(t) \rho = U(t) \rho U(t)^{\dagger}$. A very useful factorization property for the solution of \eqref{eq_MME_Schroedinger} holds
\begin{equation}\label{eq_MME_Schroedingersol}
\rho_{sch}(t) = \mathcal{U}(t) e^{t\mathcal{L}} \rho_{sch}(0),
\end{equation}
which allows to discuss separately the decoherence/dissipation effects described by $\mathcal{L}$ and the unitary evolution $\mathcal{U}(t)$.
\subsection{The Laws of Thermodynamics}
The structure of MME's derived above and the introduced notion of local temperatures allow to formulate the first and second law of thermodynamics in terms of energy, work, heat and entropy balance. The basic tool
is the following inequality valid for any LGKS generator $\mathcal{L}$ with a stationary state $\bar{\rho}$ \cite{spohn2007irreversible} and arbitrary $\rho$
\begin{equation}
\mathrm{Tr}\bigl(\mathcal{L}\rho[\ln \rho - \ln \bar{\rho})]\bigr) \leq 0.
\label{spohn}
\end{equation}
This inequality is used to show positivity of entropy production under the assumption that the physical entropy of the system $S(t)$ is identified with the von Neumann entropy of its state
\begin{equation}
S(t)= -\mathrm{Tr}\bigl(\rho(t)\ln\rho(t)\bigr) .
\label{vNeumann}
\end{equation}
\subsubsection{Entropy balance and local heat currents}
In the case of fast driving there is no obvious definition of the temporal internal energy of the system because a fast exchange of energy quanta $q\Omega$ between system and the external source of driving makes a temporal partition of energy between both systems ambiguous. The situation is different for the entropy balance because the entropy change is due to irreversible processes which are slow under the weak system-environment coupling assumption. The weak coupling scheme yields the coarse-grained in time effective dynamics described by the MME
\eqref{eq_MME_Schroedinger} what suggest the following definition of the heat current $J_{\alpha}(\bar{\omega}_q)$ supplied to the system by the coupling channel $(\alpha, \bar{\omega}_q)$ and involving the averaged Hamiltonian (multiplied by ${\bar{\omega}_q}/{\bar{\omega}}$)
\begin{equation}
J^{\alpha}_{\bar{\omega}_q}(t) =\frac{\bar{\omega}_q}{\bar{\omega}} \mathrm{Tr}\bigl(\bar{H}\mathcal{L}^{\alpha}_{\bar{\omega}_q}\rho(t)\bigr),
\label{heat current}
\end{equation}
where $\rho(t)$ is the system density matrix in the interaction picture and according to \eqref{eq_MME_Schroedinger} given by
\begin{equation}\label{eq_MME_Schroedinger1}
\rho(t) = e^{t\mathcal{L}} \rho(0).
\end{equation}
Those definition allow to formulate the Second Law which is again a consequence of \eqref{spohn} applied to each single coupling channel
\begin{equation}
\frac{d}{dt}S(t) - \sum_{\alpha}\sum_{\{\bar{\omega}_q \}_+}\frac{J^{\alpha}_{\bar{\omega}_q}(t)}{T_{\alpha}(\bar{\omega}_q)} =
\sum_{\alpha}\sum_{\{\bar{\omega}_q \}_+}\sigma^{\alpha}_{\bar{\omega}_q}(t)\geq 0,
\label{SIIlaw_fast}
\end{equation}
where $\sigma^{\alpha}_{\bar{\omega}_q}(t)\geq 0 $ is an entropy production caused by a single coupling channel $(\alpha, \bar{\omega}_q) $ given by
\begin{equation}
\sigma^{\alpha}_{\bar{\omega}_q}(t) = \mathrm{Tr}\bigl(\mathcal{L}^{\alpha}_{\bar{\omega}_q}\rho(t)[\ln \rho(t) - \ln \bar{\rho}^{\alpha}_{\bar{\omega}_q}]\bigr)\geq 0.
\label{Senprod_fast}
\end{equation}
For the case of environment composed of several independent heat baths the equation \eqref{SIIlaw_fast} reduces to the standard form of the Second Law for open systems with usual temperatures.
\subsubsection{Steady state regime }
Under natural ergodic conditions and due to \eqref{eq_MME_Schroedinger} any initial state tends to a limit cycle (or fixed point in particular cases), i.e.,
\begin{equation}
\rho(t) \to \bar{\rho}(t) = U(t) \bar{\rho}\, U(t)^{\dagger}= \bar{\rho}(t+\tau) , \quad\mathrm{where} \quad \mathcal{L}\bar{\rho} = 0.
\label{limit}
\end{equation}
Then the entropy $S(\bar{\rho}(t))$ and the heat currents given by
\begin{equation}
\bar{J}^{\alpha}_{\bar{\omega}_q} =\frac{\bar{\omega}_q}{\bar{\omega}} \mathrm{Tr}\bigl(\bar{H}\mathcal{L}^{\alpha}_{\bar{\omega}_q}\bar{\rho}\bigr),
\label{heat current_st}
\end{equation}
become constants leading to the following form of the Second Law
\begin{equation}
\sum_{\alpha}\sum_{\{\bar{\omega}_q \}_+}\frac{\bar{J}^{\alpha}_{\bar{\omega}_q} }{T_{\alpha}(\bar{\omega}_q)} \leq 0.
\label{SIIlaw_fast1}
\end{equation}
The averaged internal energy of the system is constant in the limit cycle, and hence we can use the total energy conservation to write the First Law in the form
\begin{equation}
\bar{P}= -\sum_{\alpha}\sum_{\{\bar{\omega}_q \}_+}{\bar{J}^{\alpha}_{\bar{\omega}_q} },
\label{Ilaw_fast1}
\end{equation}
where $\bar{P}$ is the stationary power and if it is negative, it is supplied to the source of external driving.
\par
\textbf{Remark}\\
Because each $\mathcal{L}^{\alpha}_{\bar{\omega}_q}$ transforms diagonal (in $\bar{H}$ basis) elements of the density matrix into diagonal ones, the stationary state $\bar{\rho}$ is diagonal and hence the expressions for the stationary local heat currents and power involve only diagonal elements and the ``classical'' transition probabilities between them.
\subsection{ Carnot bound at steady state}
In the steady state regime the incoming and outgoing heat currents can be defined as follows
\begin{equation}
\bar{J}^{(+)} = \sum_{\{\alpha,\{\omega_q\}_+ ; {\bar{J}}^{\alpha}_{\omega_q} > 0\}}{\bar{J}}^{\alpha}_{\omega_q},\quad \bar{J}^{(-)} = \sum_{\{\alpha,\{\omega_q\}_+ ; {\bar{J}}^{\alpha}_{\omega_q} < 0\}}\bigl[-{\bar{J}}^{\alpha}_{\omega_q}\bigr].
\label{inout}
\end{equation}
We can introduce also effective ``hot/cold bath temperatures'' by averaging the inverse local temperatures with the weights proportional to incoming/outgoing heat currents
\begin{equation}
\frac{1}{T^{(+)}}= \sum_{\{\alpha,\{\omega_q\}_+ ; {\bar{J}}^{\alpha}_{\omega_q} > 0\}}\Bigl[\frac{{\bar{J}}^{\alpha}_{\omega_q}}{\bar{J}^{(+)}}\Bigr]\frac{1}{T_{\alpha}(\omega_q)}, \quad \frac{1}{T^{(-)}}= \sum_{\{\alpha,\{\omega_q\}_+ ; {\bar{J}}^{\alpha}_{\omega_q} < 0\}}\Bigl[\frac{{\bar{J}}^{\alpha}_{\omega_q}}{\bar{J}^{(-)}}\Bigr]\frac{1}{T_{\alpha}(\omega_q)}.
\label{hot_cold}
\end{equation}
Combining now \eqref{SIIlaw_fast1} - \eqref{hot_cold} and the standard notion of an efficiency of a heat engine one obtains the generalized Carnot bound
\begin{equation}
\eta = \frac{-\bar{P}}{\bar{J}^{(+)}}\leq 1-\frac{T^{(-)}}{T^{(+)}},
\label{efficiency}
\end{equation}
which again coincides with the standard one in the case of environment composed of two heat baths.
\section{Conclusions}
We showed that quantum machines weakly coupled to a single non-equilibrium stationary environment, and subject to fast periodic driving by work reservoirs, can be described by the thermodynamical principles and bounds which are very similar to the standard ones if only the proper definitions of the basic notions are used. In particular the notion of local temperature which depends not only on the state of environment but also on the form of system-environment coupling is crucial.
The developed non-equilibrium theoretical framework may be used also to described the standard heat engine model, which operates under the interaction with two thermal baths. They can been effectively described as a single non-equilibrium bath. Therefore, we show that standard heat engines are just particular examples of non-equilibrium heat machines.
Starting from a bosonic thermal bath we showed how to obtain non-equilibrium baths by using different filters. Displacement or squezeeing operations may also be used.
We found out that such non-equilibrium baths may be divided into two different types: (i)\textit{passive}, which equilibrate systems to different temperatures depending on their frequency but cannot drive heat engines, such baths can be obtained by frequency independent transformations of equilibrium ones; (ii) \textit{non-passive}, which in addition allow work extraction from a single bath and, in the case of bosonic reservoirs, can be engineered by frequency dependent transformations of equilibrium states. They are also farther away from equilibrium than passive baths.
A case where non-equilibrium bath is not stationary but, for example, is also perturbed by an external periodic driving is another interesting topic with possible applications.
A natural example is a spin-1/2 coupled to a spin-bath, both periodically perturbed by external magnetic field. It seems that the theory presented above can be extended to these cases as well.
\end{document} |
\begin{document}
\begin{abstract}
We show that the exponent of distribution of the sequence of squarefree numbers in arithmetic progressions of prime modulus is $\geq 2/3 + 1/57$, improving a result of Prachar from 1958. Our main tool is an upper bound for certain bilinear sums of exponential sums which resemble Kloosterman sums, going beyond what can be obtained by the Polya-Vinogradov completion method.
\end{abstract}
\keywords{arithmetic progressions, exponential sums, exponent of distribution, squarefree numbers}
\subjclass[2010]{Primary 11N37; Secondary 11L05}
\maketitle
\section{Introduction and statement of results}
\subsection{Squarefree numbers in arithmetic progressions}
Let $\mu$ denote the M\"obius function, \textit{i.e.}~$\mu$ is the multiplicative function such that for every prime number $p$ and every positive integer $\alpha$, one has,
$$
\mu(p^{\alpha})=
\begin{cases}
-1,\,\text{if }\alpha=1,\\
\;\;\;0,\,\text{otherwise}.
\end{cases}
$$
We remark that $\mu^2(n)=1$ if $n$ is squarefree and $\mu^2(n)=0$ otherwise. In this paper we are concerned with the distribution of squarefree numbers in arithmetic progressions. By the above discussion, this is equivalent to studying the distribution of the $\mu^2$ function in arithmetic progressions.
In this direction, a result of Prachar \citep{prachar1958kleinste}, subsequently improved by Hooley \citep{hooley1975note} says that
\begin{equation}\label{Hoo-sqf}
\sum_{\substack{n\leq x\\n \equiv a\!\!\!\!\pmod q}}\mu^2(n) = \frac{1}{\varphi(q)}\sum_{\substack{n\leq x\\(n,q)=1}}\mu^2(n) + O\left(\frac{X^{1/2}}{q^{1/2}} + q^{1/2+\varepsilonilon}\right).
\end{equation}
It follows from Asymptotic formula \eqref{Hoo-sqf} that the sequence of squarefree numbers $\leq X$ is well distributed in arithmetic progressions modulo $q$ whenever
\begin{equation}\label{q<X23}
q\leq X^{2/3-\varepsilonilon},
\end{equation}
for some fixed positive $\varepsilonilon$. Even though it is largely believed that one should be able to replace $2/3$ by $1$, this constant has resisted any improvement since Prachar \citep{prachar1958kleinste}.
In \citep{nunes2015conjectures}, we were able to show a slight improvement, meaning that we proved that one can replace \eqref{q<X23} by $q\leq X^{2/3}(\log X)^{\delta}$, where $\delta$ is some small (but fixed) constant. The technique there was based on non-trivial upper bounds for exponential sums by Bourgain and Garaev. These upper bounds show cancellation in very short sums but the upper bound is only better than the trivial by some small power of the logarithm of the length of the sum, this is the reason for the rather modest improvement in \citep{nunes2015conjectures}.
Our main result proves that one can replace $2/3$ by $13/19 = 2/3 + 1/57$ in \eqref{q<X23}. Precisely, we have
\begin{thm}\label{2/3}
Let $\varepsilonilon>0$ and $A >0$. Then, uniformly for $X\geq 2$, integers $a$ and prime numbers $q$ coprime with $a$ satisfying
$$
q\leq X^{\frac{13}{19}-\varepsilonilon},
$$
we have
$$
\sum_{\substack{n\leq X\\n \equiv a\!\!\!\!\pmod q}}\mu^2(n) = \frac{1}{\varphi(q)}\sum_{\substack{n\leq X\\(n,q)=1}}\mu^2(n) + O\left(\frac{X}{q(\log X)^A}\right),
$$
In other terms, the value $\Theta=\frac{13}{19}$ is an exponent of distribution for the characteristic function of the sequence of squarefree numbers $\mu^2$ restricted to prime moduli.
\end{thm}
We believe it is helpful to compare this result with \citep[Theorem 1.1]{fouvry2015exponent} on the level of distribution of the ternary divisor function. In \citep{fouvry2015exponent}, one can see that Poisson summation and a straightforward application of the Deligne bound for two-dimensional Kloosterman sums would already give that the ternary divisor function on integers up to $X$ is well distributed in arithmetic progressions modulo $q\leq X^{1/2-\varepsilonilon}$. Improving the constant $1/2$ requires a way to get further cancellation than what comes from the Deligne bound and this is done by means of estimates of bilinear sums of Kloosterman sums.
In our case one sees that using only the Weil bound \eqref{Weil}, one can retrieve Hooley's result \eqref{Hoo-sqf} and again the way to get further cancellation is by means of estimates for sums of exponential sums. In the present case, the estimate needed is exactly that of Theorem \ref{expsum1} below.
In the following we discuss these sums of exponential sums from a general perspective before specializing to our case the case that interest us here.
\subsection{Sums of exponential sums}
Upper bounds for exponential sums play a major role in modern analytic number theory. The classical Weil bound for one-variable exponential sums states that for any prime number $q$, and any rational function $f\in \mathbb{Z}(X)$ satisfying some mild conditions, we have the upper bound
\begin{equation}\label{WeilKloo}
\sideset{}{^{\ast}}\sum_{x\!\!\!\!\pmod q}e_q(f(x))\ll q^{1/2},
\end{equation}
where the implied constant depends only on the number of roots and poles of $f$. Throughout the article, $e_q(x):=e^{2i\pi x/q}$, the $\ast$ means that we only sum over the $x$ that are not poles of $f$, and finally, $\bar x$ denotes the multiplicative inverse of $x$ modulo $q$.
A much deeper result of Deligne provides similar upper bounds for sums in several variables. Many problems in analytic number theory are reduced to obtaining estimates for exponential sums that follow directly from the Weil or the Deligne bound. However, in some problems, a straightforward application of these fails to give the desired result. One way of getting by is to take advantage of some extra summation that may be offered by the problem.
This is at the heart of a recent series of papers by Fouvry, Kowalski and Michel (\citep{fouvry2014algebraic}, \citep{fouvry2015algebraic}, \citep{fouvry2015exponent}, etc.). For instance, in \citep{fouvry2014algebraic} they prove upper bounds for sums such as
\begin{equation}\label{fkm1}
\sum_{M/2<m\leq M}\sum_{N/2<n\leq N}K(mn),
\end{equation}
where $K$ is a general \textit{algebraic trace function of bounded conductor} (see \citep{fouvry2014algebraic} for a precise statement and some examples). For instance, their result applies for hyper-Kloosterman sums, i.e. for $K(t)=\operatorname{Kl}_k$, where
\begin{equation}\label{h-kloo}
\operatorname{Kl}_k(t):=
q^{-\frac{k-1}{2}}\underset{u_1\cdots u_k=t}{\sum\ldots\sum}e_q(u_1+\ldots+u_k),\text{ if }t\neq 0.
\end{equation}
We remark that the Deligne bound $|\operatorname{Kl}_k(t)|\leq k$ is already highly non-trivial and the upper bound from \citep{fouvry2014algebraic} is saying the we can get even further cancellation when averaging as in \eqref{fkm1}. We also mention that their results apply for functions such as $K_1(t)$ and $K_2(t)$ in definition \eqref{KK} below.
The upper bounds in \citep{fouvry2014algebraic} are non-trivial as soon as $MN\geq q^{3/4+\varepsilonilon}$. In particular one can take $M=N=q^{\theta}$ with $\theta<1/2$. This is an important threshold, since in general, a much simpler method, using orthogonality of characters can give non trivial upper bounds by only taking advantage of one of the sums. This method is usually called the completion method. See \citep{fouvry2015short} for discussions on this method and for some examples where one can go beyond this threshold for one-dimensional sums.
Sometimes one even needs to consider more general bilinear sums:
\begin{equation}\label{kms}
\sum_{m}\sum_{n}\alpha_m\beta_nK(mn),
\end{equation}
where $\bm{\alpha}=(\alpha_m)_m$ and $\bm{\beta}=(\beta_n)_n$ are sequences of complex numbers supported in $[M/2,M]$ and $[N/2,N]$ respectively. Note that the sum in \eqref{fkm1} correspond to the sequences $\alpha_m=\bm{1}_{[M/2,M]}$ and $\beta_n=\bm{1}_{[N/2,N]}$, where, for $A\subset \mathbb{R}$, $\bm{1}_A$ denotes its characteristic function.
In \citep{blomer2014moments} and \citep{kowalski2015bilinear}, sums such as those in \eqref{kms} are studied in the case where $K(t)$ is a hyper-Kloosterman sum.
In this paper, we are led to study the following type of bilinear sums:
\begin{equation}\label{smooth-mn2}
\sum_{M/2<m\leq M}\sum_{N/2<n\leq N}K(mn^2).
\end{equation}
Here, again, our interest lies in ranges where $M,N\leq q^{\theta}$ for some $\theta<1/2$.
Notice that the sums in \eqref{smooth-mn2}, like those in \eqref{fkm1}, are \textit{smooth}, meaning that there are no annoying terms $\alpha_m$ or $\beta_n$. It is natural to think that the techniques of \citep{fouvry2014algebraic} could be adapted to our situation. Unfortunately this is not the case, at least not in a straightforward manner. The technique in \citep{fouvry2014algebraic} uses the spectral theory of modular forms and the fact that the divisor function
$$
d(t):=\sum_{mn=t}1
$$
has an interpretation in terms of Fourier coefficients of certain Eisenstein series. Due to the lack of intepretation in terms of modular forms for the function $d_{1,2}(t):=\sum_{mn^2=t}1$, we are not able to transpose the methods of \citep{fouvry2014algebraic} to our case. Instead we will follow the methods in \citep[Section 5]{blomer2014moments}, which are in turn inspired by those of \citep{fouvry1998certaines}.
We are now ready to state our main estimate on sums of exponential sums, but first we must define the $K$-functions in which we are interested. For a prime number $q$ and integers $m$ and $n$, we let
\begin{equation}\label{Smnq}
S(m,n;q):=\sideset{}{{}^{\ast}}\sum_{u\!\!\!\!\pmod q}e_q(m{\bar u}^2+nu).
\end{equation}
If $m$ is coprime with $q$, we have the Weil bound:
\begin{equation}\label{Weil}
S(m,n;q)\leq 3q^{1/2}.
\end{equation}
For a fixed prime number $q$ and integers $a$ and $b$ coprime to $q$, we introduce the normalized sums
\begin{equation}\label{KK}
K_1(t):=q^{-1/2}S(a,bt;q)\text{ and }K_2(t):=q^{-1/2}S(at,b;q),
\end{equation}
where $S(m,n;q)$ is as in \eqref{Smnq}. As far as the notation is concerned, we forget about the depedency on $a$ and $b$ and $q$.
We prove the following:
\begin{thm}\label{expsum1}
Let $q$ be a prime number. Let $M, N\geq 1$ be such that
$$
1\leq M\leq N^2,\,\,N<q,\,\,MN^2< q^2.
$$
Let ${\bm{\alpha}}=(\alpha_m)_{m\leq M}$ be a sequence of complex numbers bounded by 1, and let $\mathcal{N}\subset [1,q-1]$ be an interval of length $N$. Finally, let $a$ and $b$ be coprime with $q$ and let $K_2(t)$ be give by \eqref{KK}. Then for any $\varepsilonilon>0$, we have
$$
\sum_{m\leq M}\sum_{n\in \mathcal{N}}\alpha_mK_2(mn^2)\ll q^{\varepsilonilon}\|\bm{\alpha}\|_1^{1/2}\|\bm{\alpha}\|_2^{1/2}M^{1/4}N\left(\frac{M^3N^6}{q^4}\right)^{-1/16},
$$
where the implied constant depends on $\varepsilonilon$, and where
$$
\|\bm{\alpha}\|_1=\sum_{m}|\alpha_m|\text{ and }\|\bm{\alpha}\|_2=\left(\sum_{m}|\alpha_m|\right)^{1/2}.
$$
\end{thm}
This can be thought of as an inhomogeneous version of \citep[Inequality (5.3)]{blomer2014moments} or \citep[Theorem 1.3]{kowalski2015bilinear}, where $K(mn)$ is replaced $K(mn^2)$.
The proof of Theorem \ref{expsum1} will be intertwined with that of
\begin{thm}\label{expsum2}
Let $q$ be a prime number. Let $M, N\geq 1$ be such that
$$
1\leq M\leq N^2,\,\,N<q,\,\,MN< q^{3/2}.
$$
Let ${\bm{\alpha}}=(\alpha_m)_{m\leq M}$ be a sequence of complex numbers bounded by 1, and let $\mathcal{N}\subset [1,q-1]$ be an interval of length $N$. Finally, let $a$ and $b$ be coprime with $q$ and let $K_1(t)$ be give by \eqref{KK}. Then for any $\varepsilonilon>0$, we have
$$
\sum_{m\leq M}\sum_{n\in \mathcal{N}}\alpha_mK_1(mn)\ll q^{\varepsilonilon}\|\bm{\alpha}\|_1^{1/2}\|\bm{\alpha}\|_2^{1/2}M^{1/4}N\left(\frac{M^2N^5}{q^3}\right)^{-1/12},
$$
where the implied constant depends on $\varepsilonilon$.
\end{thm}
Notice that this is exactly \citep[Theorem 1.3.]{kowalski2015bilinear} for our modified Kloosterman sum in \eqref{KK}.
To appreciate the strength of Theorems \ref{expsum1} and \ref{expsum2}, let us assume $\alpha_m=1$ for every $m\leq M$. In this case, the bound $\ll MN$ follows directly from \eqref{Weil}. The upper bound from Theorem \ref{expsum1} (respectively \ref{expsum2}) improves on this bound, for instance, when $M=N=q^{\theta}$ with $\theta>4/9$ (respectively $\theta>3/7$). The remarkable feature is that both $4/9$ and $3/7$ are smaller than $1/2$, meaning that our methods go beyond what can be obtained by the completion method.
\section*{Structure of the article}
In the next section we make some algebraic considerations that will be useful when verifying the necessary conditions to apply a result of Hooley (see Lemma \ref{Hoo} below). These results are mostly about when certain rational functions can be written as the square of another rational function. These considerations are a bit tedious but rather elementary and are mainly based in the partial fractional decomposition for rational functions.
The third section is dedicated to bounding bilinear sums. In particular, we prove Theorems \ref{expsum1} and \ref{expsum2}. Our approach is inspired by those in \citep{fouvry1998certaines} and \citep{blomer2014moments}. Indeed, the argument in \citep{blomer2014moments} adapts here almost straightforwardly. The only extra difficulty that comes up is that in our case we need to guarantee that certain rational functions are not squares, at which point we recur to the results from Section \ref{algebraic}.
Finally, Section \ref{proofof23} is dedicated to the proof of Theorem \ref{2/3}. The main ingredient here is, as we mentioned, Theorem \ref{expsum1}, but before we can use it, some preparation is necessary. The first thing we need is a bilinear structure for $\mu^2$. This is given by the classical formula \eqref{mu-decomp}. It turns out that the term $\mu(n_2)$ plays no role in studying the problem in Theorem \ref{2/3}, which is what allows for an application of Poisson summation in both variables. Finally, we conclude by applying Theorem \ref{expsum1}.
\section{Algebraic considerations}\label{algebraic}
Let $q$ be an odd prime number and let $\mathbb{F}_q$ be a finite field with $q$ elements that we identify with $\mathbb{Z}/q\mathbb{Z}$ whenever is convenient. Finally, we fix $\overline{\mathbb{F}_q}$ an algebraic closure of $\mathbb{F}_q$.
The next three lemmas investigate when certain rational functions are squares. The first two are simple and follow almost directly by partial fraction decomposition. The third one is a bit more involved and will be deduced from the previous ones.
\begin{lem}\label{AB-sq}
Let $A,B,\rho_1,\rho_2\in\overline{\mathbb{F}_q}$ be such that $A$ and $B$ are non-zero and $\rho_1$ and $\rho_2$ are distinct. Then the rational function
$$
1+\frac{A}{(X-\rho_1)^2}+\frac{B}{(X-\rho_2)^2}
$$
is a square if and only if $A=B=(\rho_1-\rho_2)^2$.
\end{lem}
\begin{proof}
We start by noticing that
$$
\left(1+\frac{(\rho_1-\rho_2)}{(X-\rho_1)}-\frac{(\rho_1-\rho_2)}{(X-\rho_2)}\right)^2=1+\frac{(\rho_1-\rho_2)^2}{(X-\rho_1)^2}+\frac{(\rho_1-\rho_2)^2}{(X-\rho_2)}.
$$
On the other hand, suppose there exists $g(X)\in\overline{\mathbb{F}_q}(X)$ such that
\begin{equation}\label{AB=g2}
1+\frac{A}{(X-\rho_1)^2}+\frac{B}{(X-\rho_2)^2}=g(X)^2.
\end{equation}
We consider the partial fraction decomposition of $g(X)$. It is not difficult to see that the polynomial part of $g(X)$ must be constant and that $\rho_1$ and $\rho_2$ are the only poles of $g(X)$ and both are simple. In other words, we have
$$
g(X)=c_0 +\frac{c_1}{X-\rho_2}+\frac{c_2}{X-\rho_2},
$$
for some $c_0,\,c_1,\,c_2\in \overline{\mathbb{F}_q}$. Using the identity
\begin{equation}\label{easy-id}
\frac{1}{(X-\rho_1)(X-\rho_2)}=\frac{1}{\rho_1-\rho_2}\left(\frac{1}{X-\rho_1}-\frac{1}{X-\rho_2}\right),
\end{equation}
we see that
\begin{multline*}
g(X)^2=c_0^2+\frac{c_1^2}{(X-\rho_1)^2}+\frac{c_2^2}{(X-\rho_2)^2}+2c_1\left(c_0+\frac{c_2}{\rho_1-\rho_2}\right)\frac{1}{X-\rho_1}\\+2c_2\left(c_0-\frac{c_1}{\rho_1-\rho_2}\right)\frac{1}{X-\rho_2}.
\end{multline*}
Comparing it to \eqref{AB=g2}, we see that we must have
\begin{equation}\label{squaresAB}
c_0^2=1,\,c_1^2=A,\,c_2^2=B
\end{equation}
In particular $c_1,c_2\neq 0$. Furthermore,
$$
c_1\left(c_0+\frac{c_2}{(\rho_1-\rho_2)}\right)=c_2\left(c_0-\frac{c_1}{(\rho_1-\rho_2)}\right)=0,
$$
which implies that $c_1=-c_2=c_0(\rho_1-\rho_2)$. Squaring this relation and comparing it to \eqref{squaresAB} concludes the proof.
\end{proof}
\begin{lem}\label{ABC-sq}
Let $A,B,C,\rho_1,\rho_2,\rho_3\in\overline{\mathbb{F}_q}$ be such that $A$, $B$ and $C$ are non-zero and $\rho_1$, $\rho_2$ and $\rho_3$ are distinct. If the rational fraction
$$
1+\frac{A}{(X-\rho_1)^2}+\frac{B}{(X-\rho_2)^2}+\frac{C}{(X-\rho_3)^2}
$$
is a square, then
$$
\frac{1}{\rho_1-\rho_2}+\frac{1}{\rho_2-\rho_3}+\frac{1}{\rho_3-\rho_1}=0.
$$
\end{lem}
\begin{proof}
Suppose there exists $g(X)\in\overline{\mathbb{F}_q}(X)$ such that
\begin{equation}\label{ABC=g2}
1+\frac{A}{(X-\rho_1)^2}+\frac{B}{(X-\rho_2)^2}+\frac{C}{(X-\rho_3)^2}=g(X)^2.
\end{equation}
We consider the partial fraction decomposition of $g(X)$ as before. We find out that
$$
g(X)=c_0 +\frac{c_1}{X-\rho_2}+\frac{c_2}{X-\rho_2}+\frac{c_3}{X-\rho_3},
$$
for some $c_0,\,c_1,\,c_2,\,c_3\in\overline{\mathbb{F}_q}$. Squaring both sides and using the identity \eqref{easy-id}, we obtain
\begin{multline}\label{squaresABC}
g(X)^2=c_0^2+\frac{c_1^2}{(X-\rho_1)^2}+\frac{c_2^2}{(X-\rho_2)^2}+\frac{c_3^2}{(X-\rho_3)^2}\\
+2c_1\left(c_0+\frac{c_2}{\rho_1-\rho_2}+\frac{c_3}{\rho_1-\rho_3}\right)\frac{1}{X-\rho_1}+2c_2\left(c_0-\frac{c_1}{\rho_1-\rho_2}+\frac{c_3}{\rho_2-\rho_3}\right)\frac{1}{X-\rho_2}\\
+2c_3\left(c_0-\frac{c_1}{\rho_1-\rho_3}-\frac{c_2}{\rho_2-\rho_3}\right)\frac{1}{X-\rho_3}.
\end{multline}
As before, we notice that $c_i\neq 0$, $i=0,1,2,3$. This implies that
$$
\begin{cases}
c_0+\frac{c_2}{\rho_1-\rho_2}+\frac{c_3}{\rho_1-\rho_3}=0,\\
c_0-\frac{c_1}{\rho_1-\rho_2}+\frac{c_3}{\rho_2-\rho_3}=0,\\
c_0-\frac{c_1}{\rho_1-\rho_3}-\frac{c_2}{\rho_2-\rho_3}=0.
\end{cases}
\\
$$
Multiplying these equations by $\frac{1}{\rho_2-\rho_3}$, $\frac{1}{\rho_3-\rho_1}$ and $\frac{1}{\rho_1-\rho_2}$ respectively and adding them up gives the result.
\end{proof}
In the proof on the next lemma, $C$ will always denote a non-zero constant that might be different at each appearance.
\begin{lem}\label{X-sq}
Let $\alpha$ and $\beta$ be elements of $\overline{\mathbb{F}}_q$. Let
$$
f_{\alpha,\beta}(X):=1+\frac{1}{X^2}-\frac{1}{(\alpha X+\beta)^2}-\frac{1}{((1-\alpha)X+(1-\beta))^2}.
$$
Then there exists a set $\mathcal{E}\in {\overline{\mathbb{F}_q}}^2$ with $|\mathcal{E}|\leq 14$ such that for all $(\alpha,\beta)\in {\overline{\mathbb{F}_q}}^2\backslash \mathcal{E}$, the rational fraction $f_{\alpha,\beta}(X)$ is not a square in $\overline{\mathbb{F}_q}(X)$.
\end{lem}
\begin{proof}
Suppose there exists $g(X)\in\overline{\mathbb{F}_q}(X)$ such that
\begin{equation}\label{f=g2}
f_{\alpha,\beta}(X)=g(X)^2.
\end{equation}
\textbf{First case.} Suppose the polynomials $X$, $L(X)=\alpha X+\beta$ and $\tilde{L}(X)=(1-\alpha)X+(1-\beta)$ are non-constant and pairwise coprime.
In this case, Lemma \ref{ABC-sq} gives
\begin{equation}\label{usingABC}
\frac{\alpha}{\beta}+\frac{\alpha(1-\alpha)}{\alpha-\beta}-\frac{1-\alpha}{1-\beta}=0.
\end{equation}
We consider the partial fraction decomposition of $g(X)$. It is not difficult to see that the polynomial part of $g(X)$ must be constant and that the roots of $X$, $L(X)$ and $\tilde{L}(X)$ are the only poles of $g(X)$ and this poles are simple. In other words, we must have that
$$
g(X)=a +\frac{b}{X}+\frac{c}{L(X)}+\frac{d}{\tilde{L}(X)},
$$
for some $a,b,c,d \in \overline{\mathbb{F}_q}$. This and \eqref{f=g2} give
\begin{multline}\label{numerators}
X^2L(X)^2\tilde{L}(X)^2 +L(X)^2\tilde{L}(X)^2-X^2\tilde{L}(X)^2-X^2L(X)^2=\\
\big(aXL(X)\tilde{L}(X) +bL(X)\tilde{L}(X)+cX\tilde{L}(X)+dXL(X)\big)^2.
\end{multline}
In particular, $a^2=b^2=1$ and $c^2=d^2=-1$.
We remark that
\begin{equation}\label{L1XL}
L(X)-1=X-\tilde{L}(X),
\end{equation}
and since the left-hand side of \eqref{numerators} can be written as
$$
X^2\tilde{L}(X)^2(L(X)^2-1)-L(X)^2(X^2-\tilde{L}(X)^2),
$$
we see that it is divisible by $L(X)-1$. Hence the same holds for the right-hand side.
We notice that (recall \eqref{L1XL})
\begin{multline}
aXL(X)\tilde{L}(X) +bL(X)\tilde{L}(X)+cX\tilde{L}(X)+dXL(X)\equiv\\
(a+c)X^2 +(b+d)X\pmod {L(X)-1}.
\end{multline}
Therefore $L(X)-1$ divides $(a+c)X^2 +(b+d)X$. Since $L(X)-1=X-\tilde{L}(X)$, it follows that $L(X)-1$ is coprime with $X$, and thus $L(X)-1$ divides $(a+c)X +(b+d)$. Finally, since $a^2=1$ and $c^2=-1$, and $q$ is odd, we see that $a+c$ is non-zero. It follows from the above discussion that
$$
\frac{\beta-1}{\alpha}=\frac{b+d}{a+c}.
$$
By interchanging the roles of $L(X)$ and $\tilde{L}(X)$ in the above argument, leads to
$$
\frac{-\beta}{1-\alpha}=\frac{b+c}{a+d}.
$$
Since $c^2=d^2=-1$, then either $c=d$, in which case
$$
\frac{\beta-1}{\alpha}=\frac{-\beta}{1-\alpha},
$$
and hence, $\alpha+\beta=1$.
On the other hand, if $c=-d$, then
$$
\frac{\beta-1}{\alpha}\frac{-\beta}{1-\alpha}=\frac{b-c}{a+c}\frac{b+c}{a-c}=1,
$$
in which case $\alpha(1-\alpha)=\beta(1-\beta)$. That is $\alpha=\beta$ or $\alpha+\beta=1$. Notice that $\alpha=\beta$ contradicts the hypothesis that $L(X)$ and $\tilde{L}(X)$ are coprime.
Suppose we have $\alpha+\beta=1$. Then, by \eqref{usingABC}, we see that
$$
\frac{\alpha}{1-\alpha}+\frac{\alpha(1-\alpha)}{2\alpha-1}-\frac{1-\alpha}{\alpha}=0,
$$
which implies $\alpha^4-2\alpha^3+5\alpha^2-4\alpha+1=0$. We put
$$
\mathcal{E}_1=\{(\alpha,1-\alpha)\in \overline{\mathbb{F}_q}^2;\; \alpha^4-2\alpha^3+5\alpha^2-4\alpha+1=0\},
$$
so that if we assume $\mathcal{E}_1\subset \mathcal{E}$, then we are done in this case.
\textbf{Second case} Suppose now that $X$, $L(X)$, $\tilde{L}(X)$ are not pairwise coprime or one of them is constant.
There are a few cases to consider, namely $\alpha\in\{0,1\}$, $\beta\in\{0,1\}$ and $\alpha=\beta$.
\begin{itemize}
\item If $\alpha=0$.
\end{itemize}
Suppose further that $\beta\neq 0,1,-1$. In this case we have
$$
f(X)=\left(1-\frac{1}{\beta^2}\right)\left(1+\frac{\beta^2}{\beta^2-1}\cdot\frac{1}{X^2}-\frac{\beta^2}{\beta^2-1}\cdot\frac{1}{(X+(1-\beta))^2}\right).
$$
But since $q$ is odd, Lemma \ref{AB-sq} implies that $f_{\alpha,\beta}(X)$ is not a square unless $(0,\beta)\in \mathcal{E}_2$, where
$$
\mathcal{E}_2=\{(0,-1),(0,0),(0,1)\}.
$$
\begin{itemize}
\item If $\alpha=1$.
\end{itemize}
Since $f_{\alpha,\beta}=f_{1-\alpha,1-\beta}$, it follows from the previous case that $f_{\alpha,\beta}(X)$ is not a square unless $(1,\beta)\in \mathcal{E}_3$, where
$$
\mathcal{E}_3=\{(1,0),(1,1),(1,2)\}.
$$
\begin{itemize}
\item If $\beta=0$.
\end{itemize}
Suppose further that $\alpha\neq 0,1$. In this case we have
$$
f_{\alpha,\beta}(X)=1+\left(1-\frac{1}{\alpha^2}\right)\frac{1}{X^2}-\frac{1}{(1-\alpha)^2}\cdot\frac{1}{(X-\frac{1}{\alpha-1})^2}.
$$
Lemma \ref{AB-sq} now says that if $f_{\alpha,\beta}$ is a square, then
$$
-\frac{1}{\alpha^2}=-\frac{1}{(1-\alpha)^2}=\frac{1}{(1-\alpha)^2}.
$$
And since $q$ is odd, this is impossible. So that $f_{\alpha,\beta}(X)$ is not a square unless $(\alpha,0)
\in \mathcal{E}_4$, where
$$
\mathcal{E}_4=\{(0,0),(0,1)\}.
$$
\begin{itemize}
\item If $\beta=1$.
\end{itemize}
Again, by using the identity $f_{\alpha,\beta}=f_{1-\alpha,1-\beta}$, it follows from the previous case that $f_{\alpha,\beta}(X)$ is not a square unless $(\alpha,1)\in \mathcal{E}_4$, where
$$
\mathcal{E}_5=\{(1,0),(1,1)\}.
$$
\begin{itemize}
\item If $\alpha=\beta$
\end{itemize}
Suppose further that $\alpha\neq 0,1$. In this case we have
$$
f_{\alpha,\beta}(X)=1+\frac{1}{X^2}-\left(\frac{1}{\alpha^2}+\frac{1}{(1-\alpha)^2}\right)\frac{1}{(X+1)^2}.
$$
Once again by Lemma \ref{AB-sq}, we have that $f(X)$ is not a square unless
$$
1=-\left(\frac{1}{\alpha^2}+\frac{1}{(1-\alpha)^2}\right),
$$
which implies $\alpha^4-2\alpha^3+3\alpha^2-2\alpha+1=0.$ Thus it follows that $f_{\alpha,\beta}(X)$ is not a square unless $(\alpha,\alpha)\in \mathcal{E}_6$, where
$$
\mathcal{E}_6= \left\{(0,0),(1,1)\right\}\cup\{(\alpha,\alpha)\in \overline{\mathbb{F}_q}^2;\; \alpha^4-2\alpha^3+3\alpha^2-2\alpha+1=0\}.
$$
Then assuming $\mathcal{E}_6 \subset \mathcal{E}$ concludes this case.
We summarize by saying that putting $\mathcal{E}=\mathcal{E}_1\cup\ldots\cup\mathcal{E}_6$, so that $|\mathcal{E}|\leq 14$, we finish the proof of the lemma.
\end{proof}
We close this section with the following lemma, whose proof is to a large extent an adaptation of the argument in \citep[pages 27-29]{blomer2014moments}.
\begin{lem}\label{UV-sq}
Let $q$ be an odd prime. $\alpha$, $\beta$, $h$ be elements of $\overline{\mathbb{F}}_q$. Let $F=F_{\alpha,\beta,h}$ be the rational function given by
$$
F(U,V):=\frac{1}{U^2}+\frac{1}{V^2}-\frac{1}{(\alpha U+\beta V +h)^2}-\frac{1}{((1-\alpha)U+(1-\beta)V - h)^2}.
$$
Then there exists a set $\mathcal{E}\in {\overline{\mathbb{F}_q}}^2$ with $|\mathcal{E}|\leq 14$ such that for all $(\alpha,\beta)\in {\overline{\mathbb{F}_q}}^2\backslash \mathcal{E}$ and every $h \in \overline{\mathbb{F}_q}$, the rational function $F(U,V)$ is \textit{well-defined} and is not \textit{composed}. That is, we cannot write
$$
F=Q\circ P,
$$
where $P(U,V)\in \overline{\mathbb{F}}_q(U,V)$ and $Q(T)\in\overline{\mathbb{F}}_q(T)$ is not a fractional linear transformation.
\end{lem}
\begin{proof}
We start by making the birational change of variables
$$
X=U/V,\;Y=V.
$$
Thus we have
$$
F(XY,Y)=\frac{1}{X^2Y^2}+\frac{1}{Y^2}-\frac{1}{(YL(X)+h)^2}-\frac{1}{(Y\tilde{L}(X)-h)^2},
$$
where we put
$$
L(X)=\alpha X+\beta;\; \tilde{L}(X)=(1-\alpha)X+(1-\beta).
$$
We need to prove that if $(\alpha,\beta)\not\in \mathcal{E}$, then $F(XY,Y)$ cannot be expressed in the form
$$
\frac{Q_1\left(P_1(X,Y)/P_2(X,Y)\right)}{Q_2\left(P_1(X,Y)/P_2(X,Y)\right)},
$$
where $P_1(X,Y),P_2(X,Y)\in\overline{\mathbb{F}}_q[X,Y]$ are coprime polynomials and
$$
Q_1(T)=C\prod_{\lambda}(T-\lambda)^{m(\lambda)},\,Q_2(T)=\prod_{\mu}(T-\mu)^{m(\mu)}
$$
are also coprime. Here the products are taken over the roots of $Q_1$ and $Q_2$ respectively. Moreover $m(\lambda)$ and $m(\mu)$ denote the multiplicities of these roots. Let $q_1=\deg Q_1 = \sum_{\lambda}m(\lambda)$ and $q_2=\deg Q_2 = \sum_{\mu}m(\mu)$. We remark that we can always suppose that $q_1>q_2$. If this is not the case, we simply make the change of variables
$$
T\mapsto \mu_0 + \frac{1}{T'},
$$
where $\mu_0$ is any root of $Q_2(X,Y)$.
We want to prove that $q_1=1$. We have
$$
F(XY,Y)=\frac{C\prod_{\lambda}(P_1(X,Y)-\lambda P_2(X,Y))^{m(\lambda)}}{P_2(X,Y)^{q_1-q_2}\prod_{\mu}(P_1(X,Y)-\mu P_2(X,Y))^{m(\mu)}}=:\frac{\mathbb{N}UM(X,Y)}{\operatorname{DEN}(X,Y)},
$$
with $\mathbb{N}UM(X,Y)$ and $\operatorname{DEN}(X,Y)$ coprime. In the other hand
\begin{equation}
F(XY,Y)=\frac{\mathbb{N}UM'(X,Y)}{\operatorname{DEN}'(X,Y)},
\end{equation}
where
\begin{multline*}
\mathbb{N}UM'(X,Y)=
(YL(X)+h)^2(Y\tilde{L}(X)-h)^2(X^2+1)\\
-X^2Y^2((YL(X)+h)^2+(Y\tilde{L}(X)-h)^2)
\end{multline*}
and
\begin{equation*}
\operatorname{DEN}'(X,Y)=X^2Y^2(YL(X)+h)^2(Y\tilde{L}(X)-h)^2 .
\end{equation*}
In what follows we distinguish two cases.
\textbf{Case I: }$h\neq 0$.\\
Since $X$, $Y$, $YL(X)+h$ and $Y\tilde{L}(X)-h$ are relatively coprime, then $\mathbb{N}UM'(X,Y)$ and $\operatorname{DEN}'(X,Y)$ are coprime and hence equal $C\cdot\mathbb{N}UM(X,Y)$ and $C\cdot \operatorname{DEN}(X,Y)$ respectively. Comparing the expressions for $\operatorname{DEN}(X,Y)$ and $\operatorname{DEN}'(X,Y)$ we see that either $Y$ divides $P_2$, or it divides $P_1-\mu P_2$ for some $\mu$, a root of $Q_2$. In the second case, up to making a linear change of variables $T\mapsto T+\mu$, we can suppose $\mu=0$, $Y$ divides $P_1$ and $\lambda \neq 0$ for every $\lambda$ which is a root of $Q_1$. In any case we have that
$$
\mathbb{N}UM(X,0)=C\cdot P_1(X,0)^{q_1}\text{ or }C\cdot P_2(X,0)^{q_1}.
$$
But $\mathbb{N}UM(X,0)=C(X^2+1)$, thus $\mathbb{N}UM(X,0)$ only has simple roots. Therefore $q_1=1$. And since $q_2< q_1$, then $q_2=0$.
We proved that when $h\neq 0$, $F(U,V)$ is not composed for any $(\alpha,\beta)\in \overline{\mathbb{F}_q}^2$.
\textbf{Case II:} $h=0$.\\
In this case we have
\begin{equation}\label{h=0}
F(XY,Y)=\frac{L(X)^2\tilde{L}(X)^2(X^2+1)-X^2(L(X)^2+\tilde{L}(X)^2)}{X^2Y^2L(X)^2\tilde{L}(X)^2}.
\end{equation}
Suppose that $F(XY,Y)\neq 0$. Then we see that $\mathbb{N}UM(X,Y)$ must divide the numerator of the right-hand side of \eqref{h=0}. Hence it is independent of $Y$.
\begin{itemize}
\item Suppose $q_2>0$.
\end{itemize}
We notice that we must have that $Y$ divides $DEN(X,Y)$ and that $DEN(X,Y)$ divides $X^2Y^2L(X)^2\tilde{L}(X)^2$. Since all the factors in
$$
P_2(X,Y)^{q_1-q_2}\prod_{\mu}\left(P_1(X,Y)-\mu P_2(X,Y)\right)
$$
are coprime, we see that one of them must be divisible by $Y$ and all the others must be independent of $Y$. Now by the same argument as above, we can suppose that $\lambda=0$ is not a root of $Q_1$ and that $P_1$ and $P_2$ are two non-zero polynomials such that one of which is divisible by $Y$ and the other is independent of $Y$. But this is not possible since
\begin{equation}\label{recallNUM}
\mathbb{N}UM(X,Y)=C\prod_{\lambda}(P_1(X,Y)-\lambda P_2(X,Y))^{m(\lambda)},
\end{equation}
and the left-hand side is independent of $Y$ and the right-hand side cannot be.
\begin{itemize}
\item Suppose now that $q_2=0$.
\end{itemize}
This case is more delicate. We have that $Y$ divides $P_2$. The fact that $\mathbb{N}UM(X,Y)$ is independent of $Y$ implies that the same holds for $(P_1(X,Y)-\lambda P_2(X,Y))$ for every $\lambda$ which is a root of $Q_1$. But this implies that $Q_1$ has a unique root. Indeed, if $\lambda\neq \lambda'$, then
$$
(P_1(X,Y)-\lambda P_2(X,Y)) - (P_1(X,Y)-\lambda' P_2(X,Y))=(\lambda-\lambda')P_2(X,Y)
$$
is non-zero and divisible by $Y$ so that it is not possible for both to be independent of $Y$. Therefore, up to making the linear change of variables $T\mapsto T+\lambda$, we have that
\begin{equation}\label{F=power}
F(XY,Y)=\frac{P_1(X,Y)^{q_1}}{P_2(X,Y)^{q_1}}.
\end{equation}
We notice that since $Y\mid P_2$ and $P_2^{q_1}\mid X^2Y^2L(X)^2\tilde{L}(X)^2$, we must have $q_1=1$ or $2$. We only have to rule out the case where $q_1=2$. That is, we need to ensure that
$$
F(XY,Y)= \frac{1}{Y^2}\left(1+\frac{1}{X^2}-\frac{1}{L(X)^2}-\frac{1}{\tilde{L}(X)^2}\right)
$$
is not a square in $\overline{\mathbb{F}_q}(X)$. But Lemma \ref{X-sq} precisely gives a set $\mathcal{E}$ whose cardinality is bounded by of $14$ and such that if $(\alpha,\beta)\not\in \mathcal{E}$, then
$$
\left(1+\frac{1}{X^2}-\frac{1}{L(X)^2}-\frac{1}{\tilde{L}(X)^2}\right)
$$
is not square. Thus the same holds for $F(XY,Y)$, which concludes this case.
Finally, we consider the case where $F(XY,Y)=0$. That is
$$
L(X)^2\tilde{L}(X)^2(X^2+1)-X^2(L(X)^2+\tilde{L}(X)^2)=0.
$$
By simply comparing the coefficients of degree 6 and 0, we see that this is only possible if $(\alpha,\beta)=(0,1)$ or $(1,0)$, both of which belong to the set $\mathcal{E}$ from Lemma \ref{X-sq}. This concludes the proof of Lemma \ref{UV-sq}.
\end{proof}
\section{Bounds for exponential sums}\label{exponentialsums}
In this section we prove the bounds for exponential sums on Theorems \ref{expsum1} and \ref{expsum2}. Let $q$ be an odd prime number. Let $j\geq 1$ be an integer, and let $M$ and $N$ be real numbers such that
$$
1\leq M\leq N^2,\,\,N<q,\,\,MN^j< q^{\frac{j+2}{2}}.
$$
Throughout this section we use the notation $x\sim X$ meaning the inequalities
$$
X/2<x\leq X.
$$
Let $\bm{\alpha}=(\alpha_m)$ a sequence of complex numbers supported on $m\sim M$. Let $\mathcal{N}$ be an interval of length $N$. Let further $K:\mathbb{Z}\rightarrow \mathbb{C}$ be a bounded periodic function of period $q$.
Finally, we let $S_{K,j}=\mathcal{S}_{K,j}(\bm{\alpha},M,\mathcal{N})$ be given by
$$
\mathcal{S}_{K,j}=\mathcal{S}_{K,j}(\bm{\alpha},M,\mathcal{N}):=\sum_{m\leq M}\sum_{n\in \mathcal{N}}\alpha_m K(mn^j).
$$
A simple application Cauchy's Inequality gives
\begin{equation}\label{onlyWeil}
\mathcal{S}_{K,j}\ll (\|\bm{\alpha}\|_1\|\bm{\alpha}\|_2)^{1/2}M^{1/4}N\|K\|_{\infty},
\end{equation}
where $\|K\|$ denotes the maximum of $K$ (recall that $K$ is periodic). In what follows we show how to improve upon this estimate for some specific choices of $K$ and $j$. To do so, we use Vinogradov's "shift by $ab$" technique in the following manner. Let $A,B\geq 1$ be such that
\begin{equation}\label{necessary}
AB\leq N,\,\,A^jM< q.
\end{equation}
We have
\begin{align*}
\mathcal{S}_{K,j}&=\frac{1}{AB}\sum_{a\sim A}\sum_{b\sim B}\sum_{m\leq M}\sum_{n+ab\in \mathcal{N}}\alpha_m K(m(n+ab)^j)\\
&=\frac{1}{AB}\sum_{a\sim A}\sum_{b\sim B}\sum_{m\leq M}\sum_{n+ab\in \mathcal{N}}\alpha_m K(a^jm({\bar a}n+b)^j).
\end{align*}
Suppose $I=[u,u']$ and let $g$ be an infinitely differentiable function supported on $[u-1,u'+1]$ such that $g(x)\geq 1$ for $x\in I$ and
$$
g^{j}(x)\ll x^{-j},\,\, j=0,1,2.
$$
We deduce
\begin{equation}\label{estimatesfourier}
\widehat{g}(y)\ll \min(N, |y|^{-1},|y|^{-2}).
\end{equation}
Following the lines of \citep[p.116]{fouvry1998certaines}, we see that by Fourier inversion, we have that
\begin{align*}
|\mathcal{S}_{K,j}|&\leq \frac{1}{AB}\sum_{a\sim A}\sum_{m\leq M}\sum_{n\in \mathcal{N}'}\left|\alpha_m \sum_{b\sim B}K(a^jm({\bar a}n+b)^j)g(n+ab)\right|\\
&\leq \frac{1}{AB}\sum_{a\sim A}\sum_{m\leq M}\sum_{n\in \mathcal{N}'}\frac{\left|\alpha_m \right|}{a}\int_{\mathbb{R}}\left|\widehat{g}\left( t/a\right)\right|\left|\sum_{b\sim B}K(a^jm({\bar a}n+b)^j)e(-bt)\right|dt.
\end{align*}
Now by \eqref{estimatesfourier} and the upper bound
$$
\int_{\mathbb{R}}\min(N, |y|^{-1},|y|^{-2})dy \ll \log N \leq \log q,
$$
we see that, there exists $t\in \mathbb{R}$ such that
$$
\mathcal{S}_{K,j} \ll \frac{\log q}{AB}\sum_{a\sim A}\sum_{m\leq M}\sum_{n\in \mathcal{N}'}\left|\alpha_m \right| \left|\sum_{b\sim B}K(a^jm({\bar a}n+b)^j)e(-bt)\right|.
$$
We make the change of variables $r=a^jm$ and $s=\overline{a}n$. We obtain (for $\eta_b=e(-bt)$)
$$
\mathcal{S}_{K,j}\ll \frac{\log q}{AB}\sum_{r\!\!\!\!\pmod q}\sum_{s\leq A^jM}\nu(r,s)\left|\sum_{b\sim B}\eta_b K(s(r+b)^j)\right|,
$$
for
$$
\nu(r,s)=\underset{am\equiv s,\,{\bar a}n\equiv r \!\!\!\!\pmod q}{\sum_{a\sim A}\sum_{m\leq M}\sum_{n\in \mathcal{N}'}}|\alpha_m|,
$$
where $\mathcal{N}'$ is an interval containing $\mathcal{N}$ of length $2N$ and $|\eta_b|\leq 1$. Now we see that exactly as in \citep[p.116]{fouvry1998certaines} or \citep[p.26]{blomer2014moments}, we have the inequalities
$$
\sum_{r,s}\nu(r,s)\ll AN\|\bm{\alpha}\|_1\text{ and }\sum_{r,s}\nu(r,s)^2\ll q^{\varepsilonilon}AN\|\bm{\alpha}\|_2^2.
$$
These bounds combined with another application of H\"older's inequality give
\begin{equation}\label{Holder}
AB\times \mathcal{S}_{K,j}\ll q^{\varepsilonilon}(AN)^{3/4}(\|\bm{\alpha}\|_1\|\bm{\alpha}\|_2)^{1/2}\left(\sum_{r\!\!\!\!\pmod q}\sum_{s\leq A^jM}\left|\sum_{b\sim B}{\eta}_b K(s(r+b)^j)\right|^4\right)^{1/4}.
\end{equation}
Expanding the fourth power, we see that the double sum over $r$ and $s$ can be written as
$$
\sum_{\bm{b}\in\mathcal{B}}\eta(\bm{b})\Sigma_j(K,\bm{b}),
$$
where $\mathcal{B}$ denotes the set of quadruples $\bm{b}=(b_1,b_2,b_3,b_4)$ such that $b_i\sim B$ for $1\leq i\leq 4$,
$$
\Sigma_j(K;\bm{b}):= \sum_{r\!\!\!\!\pmod q}\sum_{s\leq A^jM}K(s(r+b_1)^j)K(s(r+b_2)^j)\overline{K(s(r+b_3)^j)K(s(r+b_4)^j)}.
$$
and the coefficients $\eta(\bm{b})$ satisfy $|\eta(\bm{b})|\leq 1$ for every $\bm{b}\in \mathcal{B}$.
We now proceed to estimate $\Sigma_j(K,\bm{b})$. In most cases we expect a lot of cancellation when we sum over $r$ and $s$ but for certain (diagonal) cases, we cannot expect this to happen (for example when $\{b_1,b_2\}=\{b_3,b_4\}$).
Let $\mathcal{B}^{\Delta}$ be a subset of $\mathcal{B}$ to be specified later and such that $\mathcal{B}^{\Delta}$ contains $\{\bm{b}\in \mathcal{B};\,(b_1,b_3)=(b_2,b_4)\}$. For those $\bm{b}\in \mathcal{B}^{\Delta}$, we do not seek for cancellation when we sum over $r$ and $s$. We simply bound everything trivially:
\begin{equation}\label{diagonal}
\sum_{\bm{b}\in\mathcal{B}^{\Delta}}\eta(\bm{b})\Sigma_j(K,\bm{b}) \leq |\mathcal{B}^{\Delta}|\times A^jMq \times\|K\|_{\infty}^4.
\end{equation}
In the non-diagonal case, i.e. $\bm{b}\in \mathcal{B}\backslash \mathcal{B}^{\Delta}$, we complete the sum over $s$ using additive characters. We thus obtain
\begin{equation}\label{completion}
\Sigma_j(K,\bm{b})\ll (\log q)\max_{0\leq h< q}\Sigma_j(K,\bm{b},h),
\end{equation}
where
\begin{equation}\label{Kbh}
\Sigma_j(K;\bm{b},h):= \underset{r,s\!\!\!\!\pmod q}{\sum\sum}\prod_{i=1}^2K(s(r+b_i)^j)\overline{K(s(r+b_{i+2})^j)}e_q(hs).
\end{equation}
In the following we will prove square-root cancellation for most of the $\bm{b}\in\mathcal{B}$.
\begin{prop}\label{completesum}
Let $q$ be an odd prime number. Let $a$ and $b$ be coprime with $q$. Let $K_1$ and $K_2$ be given by \eqref{KK}. With notation as above, there exists a choice for $\mathcal{B}^{\Delta}$ satisfying $|\mathcal{B}^{\Delta}|\ll B^2$ and for every $\bm{b}\in \mathcal{B}\backslash\mathcal{B}^{\Delta}$, and every $h\in \mathbb{F}_q$, we have the inequalities
\begin{equation}
\Sigma_1(K_1,\bm{b},h)\ll q\text{ and }\Sigma_2(K_2,\bm{b},h)\ll q,
\end{equation}
where the implied constants are absolute.
\end{prop}
\begin{rmk}
At this point it is important to notice that a simpler argument, based solely on the Weil bound for exponential sums over curves could give an upper bound $\ll q^{3/2}$ in the proposition above. However, even with optimal choices for $A$ and $B$ this would fail to give an improvement of \eqref{onlyWeil}.
\end{rmk}
\subsection{Reduction to a two-dimensional exponential sum}
From this point on, we need to specify the exact form of our $K-$function. The approach is slightly different in the two cases of Proposition \ref{completesum}.
\begin{itemize}
\item \textbf{Case $j=1$, $K=K_1$.}
\end{itemize}
We begin by considering the case with $j=1$. We recall that in this case we have
\begin{equation}\label{1stK}
K_1(t)=q^{-1/2}\sideset{}{{}^{\ast}}\sum_{u\!\!\!\!\pmod q}e_q\left(a{\bar u}^2+btu\right),
\end{equation}
where $a$ and $b$ are coprime with $q$. We use definition \eqref{1stK} in formula \eqref{Kbh} and perform the sum over $s$. There are two separate cases according to whether $u_1+u_2-u_3-u_4\neq 0$ or $u_1+u_2-u_3-u_4= 0$. The first part equals
\begin{equation}\label{uuuu0}
q^{-1}\underset{\substack{u_1,u_2,u_3,u_4\in \mathbb{F}_q\\ u_1+u_2-u_3-u_4\neq 0}}{\sum\sum\sum\sideset{}{{}^{\ast}}\sum}e_q\left( a\left({\bar u_1}^2+{\bar u_2}^2-{\bar u_3}^2-{\bar u_4}^2\right) \right)= \left(q|K_1(0)|^4 - \!\!\sum_{r\!\!\!\!\pmod q}|K_1(r)|^4\right)\ll q,
\end{equation}
by the Weil bound \eqref{Weil}. We may now focus on the second part, i.e. when $u_1+u_2-u_3-u_4=0$. We see from \eqref{uuuu0} that in the present case, Proposition \ref{completesum} is equivalent to the upper bound
\begin{equation}\label{notsimplified}
\underset{(u_1,u_2,u_3,u_4)\in W(\mathbb{F}_q)}{\sum\sum\sum\sideset{}{^{\ast}}\sum}e_q\Big(a\left({\bar u_1}^2+{\bar u_2}^2-{\bar u_3}^2-{\bar u_4}^2\right)\Big)\ll q,
\end{equation}
where the variety $W$ is given by the equations
$$
\begin{cases}
u_1+u_2-u_3-u_4=0\\
b_1u_1+b_2u_2-b_3u_3-b_4u_4=-\overline{b}h.
\end{cases}
$$
Assume that $\mathcal{B}^{\Delta}$ contains the set $\{\bm{b}\in \mathcal{B};\,(b_1,b_3)=(b_2,b_4)\}$. Then for every $\bm{b}\in \mathcal{B}\backslash \mathcal{B}^{\Delta}$ we either have $b_1\neq b_2$ or $b_3\neq b_4$. We assume that the second possibility holds. The other case is analogous.
Let
$$
\alpha=\frac{b_1-b_4}{b_3-b_4},\,\beta=\frac{b_2-b_4}{b_3-b_4},\,\lambda=\frac{{\bar b}h}{b_3-b_4}.
$$
Thus we can write the exponential sum on the left-hand side of \eqref{notsimplified} as
$$
\underset{u,v\!\!\!\!\pmod q}{\sum\sideset{}{^{\ast}}\sum}e_q\left(a\left({\overline{u}}^2+{\overline{v}}^2-{\overline{(\ell(u,v)-\lambda)}}^2-{\overline{(\tilde{\ell}(u,v)+\lambda)}}^2\right)\right),
$$
where
$$
\ell(u,v)=\alpha u+\beta v\text{ and }\tilde{\ell}(u,v)=(1-\alpha)u+(1-\beta)v.
$$
By Lemma \ref{Hoo} below, \eqref{notsimplified} will follow if, for instance, we can prove that the variety
\begin{equation}\label{W(t)}
W_{\lambda}(t):=\left\{(u,v)\in \overline{\mathbb{F}_q}^2;\; \left({\overline{u}}^2+{\overline{v}}^2-{\overline{(\ell(u,v)-\lambda)}}^2-{\overline{(\tilde{\ell}(u,v)+\lambda)}}^2\right)=t \right\}
\end{equation}
is an irreducible curve for all but finitely many $t\in \overline{\mathbb{F}_q}$. We argue that for a suitable choice of the set $\mathcal{B}^{\Delta}$ this is implied by Lemma \ref{UV-sq}. Indeed, let $\mathcal{E}$ be the finite set of exceptions given by Lemma \ref{UV-sq}. If
$\mathcal{B}^{\Delta}$ contains all the solutions of the linear system
\begin{equation}\label{system}
\begin{cases}
b_1=\alpha(b_3-b_4)+b_4\\
b_2=\beta(b_3-b_4)+b_4,
\end{cases}
\end{equation}
for every $\alpha,\beta\in \mathcal{E}$, then the rational function
$$
F(U,V):=\frac{1}{U^2}+\frac{1}{V^2}-\frac{1}{(\ell(U,V) +\lambda)^2}-\frac{1}{(\tilde{\ell}(U,V) - h)^2}
$$
can not be written as $Q\circ P$, where $P$ is a rational function in two variables and $Q$ is a rational function in two variables and $P$ is a rational function in one variable which is not a fractional linear transformation.
We argue that this implies that $W_{\lambda}(t)$ is an irreducible curve for all but finitely many $t\in\mathbb{F}_q$. Indeed, an argument based on L\"uroth's Theorem implies the desired result (see \citep[Proposition 2.1]{fouvry1998certaines} for details). We conclude this case by invoking the following result of Hooley (see \citep[Theorem 5]{hooley1980exponential}):
\begin{rmk}
Recall that we are considering the case where $b_3\neq b_4$. In order to take care the of the case where $b_1\neq b_2$ we must also ask that $\mathcal{B}^{\Delta}$ contains all the solutions of the dual system obtained from \eqref{system} by replacing the roles of $(b_1,b_2)$ and $(b_3,b_4)$. Note that this at most doubles the size of the set of exceptions $\mathcal{B}^{\Delta}$.
\end{rmk}
\begin{lem}\label{Hoo}
Let $q$ be a prime number. Let $f(X_1,X_2,X_3)$ and $g(X_1,X_2,X_3)$ be two rational functions over $\mathbb{F}_q$ such that
\begin{enumerate}[i)]
\item The variety $W(t)$ defined by the equation $f(X_1,X_2,X_3)=t$ and $g(X_1,X_2,X_3)=0$ is generically an absolutely irreducible curve.
\item For every specialisation of $t$ in $\overline{F_q}$, $W(t)$ is a (possibly reducible) curve.
\end{enumerate}
Then, we have the upper bound
$$
\sideset{}{{}^{\ast}}\sum_{\substack{X_1,X_2,X_3\!\!\!\!\pmod{q}\\g(X_1,X_2,X_3)\equiv 0 \!\!\!\!\pmod q}}e_q\left(f(X_1,X_2,X_3)\right)\ll q,
$$
where the implied constant depends at most on the degrees of the rational fractions $f$ and $g$.
\end{lem}
Lemmas \ref{UV-sq} and \ref{Hoo} now imply the upper bound \eqref{notsimplified} for a suitable choice of $\mathcal{B}^{\Delta}$.
\begin{itemize}
\item \textbf{Case $j=2$, $K=K_2$.}
\end{itemize}
We now turn our attention to the case relevant to Theorem \ref{expsum1}. Let
\begin{equation}\label{2ndK}
K_2(t):=q^{-1/2}\sideset{}{{}^{\ast}}\sum_{u\!\!\!\!\pmod q}e_q\left(at\bar{u}^2+bu\right),
\end{equation}
where $a$ and $b$ are coprime to $q$.
The first thing we notice is that if $t\neq0$, then by a linear change of variables, we have that
$$
K_2(st^2)=q^{-1/2}\sideset{}{{}^{\ast}}\sum_{u\!\!\!\!\pmod q}e_q\left(ab^2s\bar{u}^2+tu\right).
$$
By using it in \eqref{Kbh} and considering the cases where $r+b_i=0$ separately, we see that
\begin{equation}\label{K2develop}
\Sigma_2(K_2,\bm{b},h)=\underset{\substack{u_1,u_2,u_3,u_4\!\!\!\!\pmod q\\\ (u_1,u_2,u_3,u_4)\in V(\mathbb{F}_q)}}{\sum\sum\sum\sideset{}{^{\ast}}\sum}e_q\left(b_1u_1+b_2u_2-b_3u_3-b_4u_4)\right) + O(q),
\end{equation}
where $V(\mathbb{F}_q)$ is the surface defined by the equations
$$
\begin{cases}
u_1+u_2-u_3-u_4=0\\ \overline{u_1}^2+\overline{u_2}^2-\overline{u_3}^2-\overline{u_4}^2=-{\overline{ab^2}}h.
\end{cases}
$$
The situation here resembles that of \citep[Theorem 1.1]{Fouvry2001general}, where very general exponential sums are considered. A direct application of their result would give a version of Proposition \ref{completesum} with the weaker bound $|\mathcal{B}^{\Delta}|\ll B^3$ for the set of exceptions.
It should still be possible to obtain Theorem \ref{expsum1} from this weaker bound but some extra work would be necessary.
We adopt a different, more elementary approach reducing to the previous case (\textit{i.e.} $K=K_1$ and $j=1$) that we discuss now.
As in the previous case, we can suppose that $b_3\neq b_4$, the case where $b_1\neq b_2$ being analogous. This allows us to write the sum on right-hand side of \eqref{K2develop} as
$$
\underset{\substack{u_1,u_2,u_3\!\!\!\!\pmod q\\\ \overline{u_1}^2+\overline{u_2}^2-\overline{u_3}^2-\overline{(u_1+u_2-u_3)}^2\equiv-{\overline{ab^2}}h\!\!\!\!\pmod q}}{\sum\sum\sideset{}{^{\ast}}\sum}\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!e_q\left((b_1-b_4)u_1+(b_2-b_4)u_2-(b_3-b_4)u_3)\right).
$$
We need to prove that $\Sigma_2(K_2,\bm{b},h)\ll q$. By arguing exactly as before, it suffices to prove that for almost every $t\in\overline{\mathbb{F}_q}$, the variety $W'(t)$ defined by
$$
\begin{cases}
(b_1-b_4)u_1+(b_2-b_4)u_2-(b_3-b_4)u_3=t\\ \overline{u_1}^2+\overline{u_2}^2-\overline{u_3}^2-\overline{(u_1+u_2+u_3)}^2=-{\overline{ab^2}}h.
\end{cases}
$$
is an irreducible curve. Suppose $t\neq 0$. In this case, by making the change of variables $u_i\mapsto tu_i$, $i=1,2,3$ we see that $W'(t)$ is isomorphic to the variety $W''(-{\overline{ab^2}}ht^2)$, where $W''(t)$ is given by
$$
\begin{cases}
(b_1-b_4)u_1+(b_2-b_4)u_2-(b_3-b_4)u_3=1\\ \overline{u_1}^2+\overline{u_2}^2-\overline{u_3}^2-\overline{(u_1+u_2+u_3)}^2=t.
\end{cases}
$$
Let
$$
\alpha=\frac{b_1-b_4}{b_3-b_4},\,\beta=\frac{b_2-b_4}{b_3-b_4},\,\lambda=\frac{1}{b_3-b_4}.
$$
Then by forgetting variable $u_3$, we see that $W''(t)$ is isomorphic to $W_{\lambda}(t)$, where $W_{\lambda}(t)$ is the variety considered in the previous case and given by \eqref{W(t)}. But we already proved that, for every $\bm{b}\in \mathcal{B}\backslash\mathcal{B}^{\Delta}$, $W_{\lambda}(t)$ is an irreducible curve over $\overline{\mathbb{F}_q}$ for all but finitely many $t$. Thus, the inequality
\begin{equation}\label{desired}
\Sigma_2(K_2,\bm{b},h)\ll q
\end{equation}
also follows from Lemma \ref{Hoo} in this case. At least when $h\neq 0$.
Finally, if $h=0$, our goal is to modify the sum $\Sigma_2(K_2,\bm{b},h)$ by a change of variables and recover a case that was already considered before. We start by fixing $\xi$ any non-quadratic residue modulo $q$. Notice that for every $x\in \mathbb{Z}/q\mathbb{Z}$ there exists exactly two solutions to the equation
$$
x=\eta y^2,
$$
with $\eta\in\{1,\xi\}$ and $y\in \mathbb{Z}/q\mathbb{Z}$. With that in mind, we see that
$$
\Sigma_2(K_2;\bm{b},0)=\frac12\sum_{\eta\in\{1,\xi\}}\underset{r,s\!\!\!\!\pmod q}{\sum\sum}\prod_{i=1}^2K_2(\eta s^2(r+b_i)^2)\overline{K_2(\eta s^2(r+b_{i+2})^2)}.
$$
We see from definition \eqref{2ndK}, that whenever $t\neq 0$, we have the identity
$$
K_2(\eta t^2)=K_{1,\eta}(t),
$$
where $K_{1,\eta}$ is given by the right-hand-side of \eqref{1stK} with $a$ replaced by $\eta a$.
By treating the cases where $s(r+b_i)=0$ separately, we have that
\begin{align*}
\Sigma_2(K_2;\bm{b},0)&=\frac12\sum_{\eta\in\{1,\xi\}}\underset{r,s\!\!\!\!\pmod q}{\sum\sum}\prod_{i=1}^2K_{1,\eta}(s(r+b_i))\overline{K_{1,\eta}(s(r+b_{i+2}))}+O(q)\\
&=\frac12\sum_{\eta\in\{1,\xi\}}\Sigma_1(K_{1,\eta},\bm{b},0)+O(q).
\end{align*}
Therefore, \eqref{desired} for $h = 0$ follows from the first case considered above. This concludes the proof of Proposition \ref{completesum} provided that we can prove that we can impose $|\mathcal{B}^{\Delta}|\ll B^2$.
\subsection{The choice of $\mathcal{B}^{\Delta}$ and proof of Theorems \ref{expsum1} and \ref{expsum2}}
Let
$$
\mathcal{D}=\left\{\bm{b}\in\mathcal{B};\; (b_1,b_3)=(b_2,b_4)\right\}.
$$
Let $\mathcal{E}\in \overline{\mathbb{F}_q}^2$ be the finite set given by Lemma \ref{UV-sq}. Then for each $(\alpha,\beta)\in \mathcal{E}\cap \mathbb{F}_q^2$, let $\mathcal{S}_{\alpha,\beta}$ be the set of solutions $\bm{b}\in\mathcal{B}$ of the linear system \eqref{system} and $\mathcal{S}_{\alpha,\beta}^{\ast}$ be the set of solutions to the dual system, obtained by replacing the roles of $(b_1,b_2)$ and $(b_3,b_4)$. Notice that
$$
\left|\mathcal{D}\right|=\left|\mathcal{S}_{\alpha,\beta}\right|=\left|\mathcal{S}_{\alpha,\beta}^{\ast}\right|=B^2.
$$
Finally, we put
$$
\mathcal{B}^{\Delta} = \mathcal{D}\cup \displaystyle\bigcup_{(\alpha,\beta)\in \mathcal{E}\cap \mathbb{F}_q^2} \left(\mathcal{S}_{\alpha,\beta} \cup \mathcal{S}_{\alpha,\beta}^{\ast}\right).
$$
Notice that this choice clearly satisfies the inequality
$$
\mathcal{B}^{\Delta}\leq 30B^2,
$$
As we saw this was the last missing part in the proof of Proposition \ref{completesum}.
We must now put together the bounds for $\Sigma_j(K_j,\bm{b})$ in the cases where $\bm{b}\in \mathcal{B}^{\Delta}$ and $\bm{b}\in\mathcal{B}\backslash \mathcal{B}^{\Delta}$. Combining \eqref{diagonal}, \eqref{completion} and Proposition \ref{completesum}, we obtain
$$
\sum_{\bm{b}\in\mathcal{B}}\eta(b)\Sigma(K_j,\bm{b})\ll A^jB^2Mq+ B^4q\log q.
$$
The inequality \eqref{Holder} now gives
\begin{equation}\label{almostthere}
AB\times S_{K_j,j}\ll q^{\varepsilonilon}(AN)^{3/4}(\|\bm{\alpha}\|_1\|\bm{\alpha}\|_2)^{1/2}\left( A^jB^2Mq+ B^4q \right)^{1/4}.
\end{equation}
We make the choices
\begin{equation}\label{choices}
A=N^{\frac{2}{j+2}}M^{-\frac{1}{j+2}},\,\,B=N^{\frac{j}{j+2}}M^{\frac{1}{j+2}},
\end{equation}
so that the conditions \eqref{necessary} become equivalent to
$$
M\leq N^2\text{ and }MN^{j}\leq q^{\frac{j+2}{2}},
$$
which are part of the hypotheses in Theorems \ref{expsum1} and \ref{expsum2}. With the choices as in \eqref{choices}, Inequality \eqref{almostthere} becomes
$$
S_{K_j,j}\ll q^{\varepsilonilon}(\|\bm{\alpha}\|_1\|\bm{\alpha}\|_2)^{1/2}M^{1/4}N\left(\frac{q^{j+2}}{M^{j+1}N^{j+4}}\right)^{\frac{1}{4(j+2)}}\,\,(j=1,2),
$$
which proves both Theorem \ref{expsum1} and Theorem \ref{expsum2}.
\section{Proof of Theorem \ref{2/3}}\label{proofof23}
Let $q$ be a prime number, let $a$ be coprime with $q$ and $X\geq q$. We consider $E=E(X,q,a)$ given by
$$
E:=\sum_{\substack{n\leq X\\n\equiv a \!\!\!\!\pmod q}}\mu^2(n) - \frac{1}{\varphi(q)}\sum_{\substack{n\leq X\\(n,q)=1}}\mu^2(n).
$$
Our goal is to prove that for every $A>0$, we have the inequality $E\ll X/q(\log X)^A$
uniformly for $q\leq X^{13/19-\varepsilonilon}$, where the iéplied constant depends at most on $\varepsilonilon$ and $A$.
We use the classical identity
\begin{equation}\label{mu-decomp}
\mu^2(n)=\underset{\substack{n_1,n_2\geq 1\\n_1n_2^2=n}}{\sum\sum}\mu(n_2),
\end{equation}
giving
$$
E=\sum_{n\leq X^{1/2}}\mu(n)\Delta(X/n^2,q,a{\bar n}^2),
$$
where for every $x\geq 1$, $q$ integer and $a\in \mathbb{Z}/q\mathbb{Z}$,
$$
\Delta(x,q,a):=\sum_{\substack{m\leq x\\m\equiv a \!\!\!\!\pmod q}}1 - \frac{1}{\varphi(q)}\sum_{\substack{m\leq x\\(m,q)=1}}1.
$$
It is clear that for any $x,q,a$, we have
$$
\Delta(x,q,a) \ll 1.
$$
Let $N_0$ be a parameter to be chosen optimally later such that $1\leq N_0\leq X^{1/2}$. The previous inequality shows us that
\begin{equation}\label{N0-out}
E=\sum_{N_0< n\leq X^{1/2}}\mu(n)\Delta(X/n^2,q,a{\bar n}^2) + O(N_0).
\end{equation}
Notice that
\begin{align*}
\frac{1}{\varphi(q)}\sum_{N_0<n\leq X^{1/2}}\mu(n)
\sum_{m\leq X/n^2}1\ll\frac{X}{N_0q},
\end{align*}
since $q$ is a prime number. This and \eqref{N0-out} combined give
\begin{equation}\label{beforedyadic}
|E|\leq \sum_{N_0< n\leq X^{1/2}}\sum_{\substack{m\leq X/n^2\\m\equiv a{\bar n}^2\!\!\!\!\pmod q}}1 + O\left(N_0+\frac{X}{N_0 q}\right).
\end{equation}
We now proceed by means of a dyadic decomposition. Let $V$ be a infinitely differentiable function defined on the real line vanishing outside $[1/2,4]$ and identical to $1$ in $[1,2]$. If we put
\begin{equation}\label{SV}
S_V(M,N;q,a)=\underset{\substack{m,n\\ mn^2\equiv a\!\!\!\!\pmod{q}}}{\sum\sum}V\left(\frac mM\right)V\left(\frac nN\right),
\end{equation}
we deduce from \eqref{beforedyadic} the upper bound
$$
E\ll (\log X)^2\cdot \sup_{M,N}S_V\left(M,N;q,a\right) + N_0 + \frac{X}{N_0 q},
$$
where the supremum is taken over all $M$ and $N$ such that
\begin{equation}\label{cond1}
M,N\geq 1,\, N_0\leq N\leq 2X^{1/2},\,MN^2\leq 8X.
\end{equation}
Let $M_0\geq 1$ be a parameter to be chosen optimally later. Suppose that $M\leq M_0$ and that $M,N$ satisfy the conditions \eqref{cond1}. Then, by the crude estimate
$$
\sum_{n \equiv \alpha\!\!\!\!\pmod{q}}V(\frac{n}{N})\ll \left(\frac{N}{q} + 1\right),
$$
we see that
\begin{align*}
S_V(M,N;q,a)\ll M\left(\frac{N}{q}+1\right)\\
\ll \frac{X}{N_0q} + M_0.
\end{align*}
Suppose now that $MN^2\leq q^{\frac{101}{100}}$. In this case, we write $u=mn^2$ so that we obtain the inequality
$$
S_V(M,N;q,a)\ll \sum_{\substack{u\leq 8q^{\frac{101}{100}}\\ u\equiv a\!\!\!\!\pmod{q}}}d(u)\ll q^{\frac{1}{100} + \varepsilonilon},
$$
where we used the classical bound $d(n)\ll n^{\varepsilonilon}$ for every $\varepsilonilon>0$. Putting everything together we see that
\begin{equation}\label{E-sup}
E\ll (\log X)^2\sup_{M,N}S_V\left(M,N;q,a\right) +q^{\frac1{100}+\varepsilonilon} + M_0 + N_0 + \frac{X}{N_0 q},
\end{equation}
where now the supremum is taken over all $M$ and $N$ satisfying
\begin{equation}\label{cond2}
M\geq M_0,\, N\geq N_0,\,q^{\frac{101}{100}}\leq MN^2\leq 8X.
\end{equation}
In the next subsection, we will use Theorem \ref{expsum1} to estimate $S_V(M,N;q,a)$, but before doing that, we need some preparation. Indeed, we use Poisson summation in both variables and than we separate the contribution coming from the main terms.
\subsection{Double Poisson summation}
Let $q$ be a prime number and $a$ be coprime with $q$. Let $M$ and $N$ be real numbers satisfying \eqref{cond2}. Let $S_V(M,N;q,a)$ be given by \eqref{SV}, then by applying Poisson summation in both variables, we get
$$
S_V(M,N;q,a)=\frac{MN}{q^2}\underset{m,n}{\sum\sum}\widehat{V}\left(\frac{mM}{q}\right)\widehat{V}\left(\frac{nN}{q}\right)\sideset{}{^{\ast}}\sum_{u\!\!\!\!\pmod q}e_q\left(m{\bar u}^2+anu\right).
$$
We first notice that since $V$ is smooth, integrating by parts gives the inequalities
\begin{equation}\label{RapDecay}
\widehat{V}(x)\ll x^{-j}, \;x\in \mathbb{R},\, j=0,1,2,\ldots
\end{equation}
Hence, it follows that for every $\varepsilonilon>0$, the contribution of the terms where $|m|>q^{1+\varepsilonilon}M^{-1}$ or $|n|>q^{1+\varepsilonilon}N^{-1}$ is negligible. For instance, we have
\begin{equation}\label{useRapDecay}
S_V(M,N;q,a)=\frac{MN}{q^2}\underset{\substack{|m|\leq q^{1+\varepsilonilon}M^{-1}\\ |n|\leq q^{1+\varepsilonilon}N^{-1} }}{\sum\sum}\widehat{V}\left(\frac{mM}{q}\right)\widehat{V}\left(\frac{nN}{q}\right)S(m,an;q) + O(q^{-200}),
\end{equation}
where $S(m,an;q)$ is as defined in \eqref{Smnq}.
The contribution of the terms where $mn=0$ can also be estimated easily by directly computing the exponential sums and using the estimates \eqref{RapDecay} with $j=0$. Indeed, if $0<|m|,|n|<q$, we have the following identities:
\begin{equation}\label{calc}
\begin{cases}
|S(m,0;q)|=q^{1/2},\\
S(0,an;q)=-1,\\
S(0,0;q)=q-1,
\end{cases}
\end{equation}
since the first of this sums is a Gauss sum, the second one is a Ramanujan sum and the last one is a trivial sum.
Suppose $\varepsilonilon$ satisfies $M_0, N_0\geq q^{\varepsilonilon}$. We see from \eqref{calc} that one has the upper bound
$$
\underset{\substack{|m|\leq q^{1+\varepsilonilon}M^{-1},\, |n|\leq q^{1+\varepsilonilon}N^{-1} \\ mn=0}}{\sum\sum}\widehat{V}\left(\frac{mM}{q}\right)\widehat{V}\left(\frac{nN}{q}\right)S(m,an;q) \ll q^{3/2+\varepsilonilon}M^{-1} + q^{1+\varepsilonilon}N^{-1} + q.
$$
By \eqref{useRapDecay}, we see that
\begin{equation}\label{SV=T}
S_V(M,N;q,a)=\frac{MN}{q^2}\mathcal{T} + O\left(q^{\varepsilonilon}\left(\frac{N}{q^{1/2}} + \frac{MN}{q}\right)\right),
\end{equation}
where
\begin{align}\label{T=Teps}
\mathcal{T}&=\sum_{\varepsilon_1=\pm 1}\sum_{\varepsilon_2=\pm 1}\sum_{m=1}^{q^{1+\varepsilonilon}M^{-1}}\sum_{n=1}^{q^{1+\varepsilonilon}N^{-1}}\widehat{V}\left(\frac{\varepsilon_1mM}{q}\right)\widehat{V}\left(\frac{\varepsilon_2nN}{q}\right)S(\varepsilon_1m,\varepsilon_2an;q)\\
&=:\sum_{\varepsilon_1=\pm 1}\sum_{\varepsilon_2=\pm 1}\mathcal{T}_{\varepsilon_1,\varepsilon_2},\notag
\end{align}
say. We must now estimate $\mathcal{T}_{\varepsilon_1,\varepsilon_2}$. By integration by parts and the trivial upper bounds
$$
\widehat{V}(x),\,\widehat{V}'(x)\ll 1,
$$
we deduce the inequality
\begin{equation}\label{Teps}
\mathcal{T}_{\varepsilon_1,\varepsilon_2}\ll q^{2\varepsilonilon}\sup_{M^{\ast},N^{\ast}}\sum_{m=1}^{M^{\ast}}\sum_{n=1}^{N^{\ast}}S(\varepsilon_1m,\varepsilon_2an;q),
\end{equation}
where the supremum is taken over all $M^{\ast}$ and $N^{\ast}$ such that
\begin{equation}\label{cond*}
1\leq M^{\ast}\leq q^{1+\varepsilonilon}M^{-1},\,\, 1\leq N^{\ast}\leq q^{1+\varepsilonilon}N^{-1}.
\end{equation}
We are now ready to use Theorem \ref{expsum1}. We prove the following
\begin{prop}\label{majoration}
Let $q$ be a prime number. Let $a$ and $b$ be coprime with $q$. Let $M, N\geq 1$ be such that
$$
M,N<q,\,MN^2< q^2.
$$
Let $S(m,n;q)$ be as in \eqref{Smnq}. Then for any $\varepsilonilon>0$, we have
$$
\sum_{m\leq M}\sum_{n\leq N}S(am,bn;q)\ll MNq^{1/2+\varepsilonilon}\left(\frac{M^3N^6}{q^4}\right)^{-1/16} + M^{3/2}q^{1/2+\varepsilonilon},
$$
where the implied constant only depends on $\varepsilonilon$.
\end{prop}
\begin{proof}
There are two cases to consider. First, if $M\leq N^2$, the proposition follows from Theorem \ref{expsum1} with $\alpha_m=1$ for every $1\leq m\leq M$, $\mathcal{N}=[1,N]$ and
$$
K(t)=\frac{1}{q^{1/2}}S(at,b;q).
$$
Indeed, for $1\leq n<q$, we have
$$
S(am,bn;q)=S(amn^2,b;q).
$$
On the other hand, if $M>N^2$, a simple application of the Weil bound \eqref{Weil} gives
$$
\sum_{m\leq M}\sum_{n\leq N}S(m,n;q)\ll MNq^{1/2} \leq M^{3/2}q^{1/2}.
$$
This concludes the proof of the proposition.
\end{proof}
We want to apply this proposition to the right-hand side of \eqref{Teps}. In order to do so, we need to be sure that any $M^{\ast},\, N^{\ast}$ satisfying \eqref{cond*} will also satisfy the conditions of Proposition \ref{majoration}. It suffices to have
$$
M,N>q^{\varepsilonilon}, MN^2> q^{1+3\varepsilonilon}.
$$
By \eqref{cond2}, this follows from the assumptions
\begin{equation}\label{lastcond}
M_0,N_0>q^{\varepsilonilon}\text{ and }\varepsilonilon<\frac{1}{300}.
\end{equation}
Assume \eqref{lastcond}. Then Proposition \ref{majoration} applied to the right-hand side of \eqref{Teps} gives
$$
\mathcal{T}_{\varepsilon_1,\varepsilon_2}\ll \frac{q^{5/2+\varepsilonilon}}{MN}\left(\frac{M^3N^6}{q^5}\right)^{1/16} + \frac{q^{2}}{M^{3/2}}.
$$
This together with \eqref{SV=T} and \eqref{T=Teps} gives
$$
S_V(M,N;q,a)\ll q^{\varepsilonilon}\left((MN^2)^{3/16}q^{3/16} + NM^{-1/2} + Nq^{-1/2} + MNq^{-1}\right).
$$
We now see from \eqref{E-sup} and \eqref{cond2} that we have the inequality
$$
E\ll q^{\varepsilonilon}\left(X^{3/16}q^{3/16} + \frac{X^{1/2}}{M_0} + \frac{X^{1/2}}{M_0^{1/2}q^{1/2}} + \frac{X}{N_0q} + q^{\frac1{100}} + M_0 + N_0\right).
$$
We make the choices (clearly satisfying \eqref{lastcond})
$$
M_0=X^{1/4}\text{ and }N_0=(X/q)^{1/2},
$$
thus obtaining
$$
E\ll q^{\varepsilonilon}\left(X^{3/16}q^{3/16} + X^{1/4} + X^{1/2}q^{-1/2} \right).
$$
It is now easy to see that for every $\varepsilonilon,A>0$ and whenever $q\leq X^{13/19-\varepsilonilon}$, then
$$
E\ll \frac{X}{q(\log X)^A}.
$$
We are now done proving Theorem \ref{2/3}.
\end{document} |
\begin{document}
\begin{abstract} We give bilateral pointwise estimates for positive solutions of the equation
\begin{equation*}
\left\{ \begin{aligned}
-\triangle u & = \omega u \, \,& & \mbox{in} \, \, \Omega, \quad u \ge 0, \\
u & = f \, \, & &\mbox{on} \, \, \partial \Omega ,
\end{aligned}
\right.
\end{equation*}
in a bounded uniform domain $\Omega\subset \mathbb{R}^n$, where $\omega$ is a locally finite Borel
measure in $\Omega$, and $f\ge 0$ is integrable with respect to harmonic measure $d H^{x}$ on $\partial\Omega$.
We also give sufficient
and matching necessary conditions for the existence of a positive solution in terms of the exponential
integrability of $M^{*} (m \omega)(z)=\int_\Omega M(x, z) m(x)\, d \omega (x)$ on $\partial\Omega$
with respect to $f \, d H^{x_0}$, where $M(x, \cdot)$ is Martin's function
with pole at $x_0\in \Omega, m(x)=\min (1, G(x, x_0))$, and $G$ is Green's function.
These results give bilateral bounds for the harmonic measure associated with the Schr\"{o}dinger operator $-\triangle - \omega $ on $\Omega$,
and in the case $f=1$, a criterion for the existence of the gauge function. Applications to elliptic equations of Riccati type with quadratic growth in the gradient
are given.
\end{abstract}
\maketitle
\eject
\tableofcontents
\section{Introduction}
Let $\Omega \subset \mathbb{R}^n$ ($n \geq 3$) be a nonempty, connected, open set (a domain).
It is called a non-tangentially accessible (NTA) domain if it is bounded, and satisfies both the interior and exterior corkscrew conditions, and the Harnack chain condition (\cite{JK}). For instance, any bounded Lipschitz domain is an NTA domain. The exterior corkscrew condition yields that any NTA domain is regular (in the sense of Wiener).
More generally, a uniform domain
is defined as a bounded domain which
satisfies the interior corkscrew condition and the Harnack chain condition.
Uniform domains satisfy the local (or uniform) boundary Harnack principle (\cite{Aik}; see \cite{An1}, \cite{JK} for Lipschitz and NTA domains). However, they are not necessarily regular.
Our main
results hold for bounded uniform domains, and the regularity of $\Omega$ is not used below.
In \cite{Ken}, a slightly more general version of an NTA domain $\Omega$ is defined as a uniform domain
of class $\mathcal{S}$ (Definition 1.1.20), i.e., satisfying the volume density condition, which ensures that $\Omega$ is a regular domain.
Most of our results, including Theorem \ref{mainufest} and Theorem \ref{gaugecrit} below, hold in this setup for uniformly elliptic operators in divergence form
$\mathcal{L} = \text{div} (A \nabla \cdot)$, with bounded measurable symmetric $A$,
in place of the Laplacian $\triangle$, as in \cite{JK}, p. 138 and \cite{Ken}, Sec. 1.3. The same class of operators $\mathcal{L}$ in uniform domains with Ahlfors regular boundary
can be covered as well (see \cite{Zha}).
In this paper, for simplicity, we consider mostly the case $n\ge 3$. In two dimensions, our results hold if $\Omega$ is a bounded finitely connected domain in $\mathbb{R}^2$, in particular, a bounded Lipschitz domain (see
\cite{CZ}, Theorem 6.23; \cite{Han1}, Remark 3.5).
Let $\omega$ be a locally finite Borel measure on $\Omega$ and let $f$ be a non-negative Borel measurable function on $\partial \Omega$. We consider the equation
\begin{equation}\lambdabel{ufeqn}
\left\{ \begin{aligned}
-\triangle u & = \omega u \, \,& & \mbox{in} \, \, \Omega, \quad u \ge 0, \\
u & = f \, \, & &\mbox{on} \, \, \partial \Omega.
\end{aligned}
\right.
\end{equation}
Solutions of \eqref{ufeqn} are understood either in the potential theoretic sense, or $d \omega$-a.e.
The precise definitions are discussed in \S \ref{sec2} below.
In the case of $C^2$ domains, or bounded Lipschitz domains $\Omega$, they
coincide with ``very weak'' solutions in the sense of Brezis (see \cite{BCMR}, \cite{FV2}, \cite{MV}, Sec. 1.2, \cite{MR}).
Let $\Omega \subseteq \mathbb{R}^n$ be a bounded domain with Green's function
$G(x,y)$; then $G$ is symmetric and strictly positive on $\Omega \times \Omega$. For a
Borel measure $\nu$ on $\Omega$,
\begin{equation} \lambdabel{Greenop}
G \nu (x) = \int_{\Omega} G(x,y) \, d\nu(y), \qquad x\in\Omega,
\end{equation}
is Green's operator. We call $G \nu$ the Green's potential if $G \nu\noindentt\equiv +\infty$.
For a Borel measurable function $f$ on $\partial \Omega$,
define the harmonic extension $Pf$ of $f$ into $\Omega$ (the generalized solution to the Dirichlet problem) by
\begin{equation} \lambdabel{harm-rep}
Pf (x) = \int_{\partial \Omega} f(z) \, dH^x (z), \qquad x \in \Omega,
\end{equation}
where $dH^x$ is the harmonic measure at $x$, if the integral in \eqref{harm-rep} exists.
A solution $u$ to \eqref{ufeqn} satisfies, formally, the equation
\begin{equation} \lambdabel{form-sol}
u (x) = G(u \omega) (x) + Pf (x), \qquad x \in \Omega .
\end{equation}
We remark that if $u\noindentt\equiv +\infty$ satisfies \eqref{form-sol},
then it is a superharmonic function in $\Omega$, and $Pf$ is its greatest harmonic minorant.
In particular, $u\in L^1_{loc}(\Omega, dx)\cap L^1_{loc} (\Omega, d\omega)$, and $u< +\infty$ q.e., that is, quasi-everywhere with respect to the Green capacity, see \cite{AG}, \cite{Lan}.
We also consider more general equations with an arbitrary positive harmonic function $h$ in place of $Pf$ (see
\S \ref{sec2} and \S \ref{sec3}), when irregular boundary points may come into play.
For an appropriate function $g$ on $\Omega$, we define
\begin{equation} \lambdabel{defT}
T g (x) = G(g \omega)(x) = \int_{\Omega} G(x,y) \, g(y) \, d \omega(y) ,
\end{equation}
for $x \in \Omega$, so that equation \eqref{form-sol} becomes $(I-T)u = Pf$, with formal solution
\begin{equation} \lambdabel{ufsolndef}
u_f = \sum_{j=0}^{\infty} T^j (Pf) .
\end{equation}
This \textit{minimal} solution $u_f$ of \eqref{ufeqn} satisfies
\begin{equation} \lambdabel{minsolnform}
u_f (x) = G(u_f \omega) (x) + Pf (x), \qquad x \in \Omega ,
\end{equation}
if $u_f\noindentt\equiv +\infty$.
Under conditions which guarantee the finiteness of the right side of equation (\ref{ufsolndef}) (see Theorem \ref{mainufest} and Theorem \ref{gaugecrit}), we will see that $u_f$ defined by (\ref{ufsolndef}) gives a (generalized) solution of (\ref{ufeqn}).
It was shown in \cite{FV3}, Lemma 2.5, that the following are equivalent: for $\beta >0$,
\begin{equation} \lambdabel{normTless1}
T \,\, \mbox{is bounded on} \,\, L^2 (\Omega, \omega) \,\, \mbox{with} \,\, \Vert T \Vert = \Vert T \Vert_{L^2 (\Omega, \omega) \rightarrow L^2 (\Omega, \omega)} \le \beta^2
\end{equation}
and
\begin{equation} \lambdabel{equivnormTless1}
\Vert \varphi \Vert_{L^2 (\omega)} \leq \beta \, \Vert \nabla \varphi \Vert_{L^2 (dx)}, \,\, \mbox{for all} \,\, \varphi \in C^{\infty}_0 (\Omega) .
\end{equation}
Our results are expressed in terms of the \textit{Martin kernel} $M(x,z)$. In a bounded uniform domain $\Omega\subset\mathbb{R}^n$, the Martin boundary $\triangle$ is homeomorphic to the Euclidean boundary $\partial \Omega$ (\cite{Aik}, Corollary 3; see \cite{HW}, \cite{AG}, for a bounded Lipschitz domain, and \cite{JK}, \cite{Ken}
for an NTA domain.) Martin's kernel, defined with respect to a reference point $x_0 \in \Omega$, is given by
\begin{equation} \lambdabel{martin-K}
M(x, z)=\lim_{y\to z, \, \, y\in \Omega} \frac{G(x, y)}{G(x_0, y)}, \quad x \in \Omega, \, \, z \in \partial \Omega,
\end{equation}
where the limit exists, and is a minimal harmonic function in $x \in \Omega$. We will see in \S \ref{sec2} that
\begin{equation} \lambdabel{harmmeasiden}
dH^x (z) = M(x,z) \, dH^{x_0} (z) , \qquad (x, z) \in \Omega\times\partial \Omega ,
\end{equation}
for uniform domains (see \cite{HW}, p. 519; \cite{CZ}, p. 137 for Lipschitz domains;
\cite{JK}, pp. 104, 115 for NTA domains). Combining \eqref{harm-rep} and \eqref{harmmeasiden} yields
\begin{equation} \lambdabel{pf-rep-martin}
Pf (x) = \int_{\partial \Omega} M(x,z) \, f(z) \, dH^{x_0} (z), \quad x \in \Omega,
\end{equation}
for Borel measurable $f\ge 0$, whenever the integral exists. Hence, \eqref{ufsolndef} yields
\begin{equation} \lambdabel{ufrepresent}
u_f (x) = \int_{\partial \Omega} \sum_{j=0}^{\infty} T^j M(\cdot,z) (x)\, f(z) \, dH^{x_0} (z), \quad x \in \Omega.
\end{equation}
We define
\begin{equation} \lambdabel{Martin-schro}
\mathcal{M}(x, z)= \sum_{j=0}^{\infty} T^j M(\cdot, z) (x), \qquad (x, z) \in \Omega\times\partial \Omega ,
\end{equation}
and
\begin{equation} \lambdabel{harm-schro}
d \mathcal{H}^x(z) =\mathcal{M}(x, z) \, dH^{x_0} (z), \qquad (x, z) \in \Omega\times\partial \Omega.
\end{equation}
Then \eqref{ufrepresent} gives
\begin{equation} \lambdabel{ufrepresent2}
\begin{aligned}
u_f(x) &= \int_{\partial \Omega} \mathcal{M}(x,z)\, f(z) \, dH^{x_0} (z) \\ & = \int_{\partial\Omega} f(z) \, d \mathcal{H}^x(z), \qquad x\in \Omega.
\end{aligned}
\end{equation}
Comparing this last equation with equation \eqref{harm-rep}, we see that $ d \mathcal{H}^x$ is harmonic measure for the Schr\"{o}dinger operator $-\triangle -\omega$.
By \eqref{Martin-schro},
\begin{align*} \mathcal{M}(x,z) &= M(x,z) + \sum_{j=1}^{\infty} T^j M (\cdot, z) (x) \\
& = M(x,z) + T \mathcal{M}(\cdot, z)(x)\\
& = M(x,z) + G (\mathcal{M}(\cdot, z) \omega)(x).
\end{align*}
Hence $\mathcal{M}(x, z)$ is a superharmonic function of $x \in \Omega$, and $M(x,z)$ is its greatest harmonic minorant, for every $z\in \partial \Omega$, provided $M(\cdot,z)\noindentt\equiv\infty$. In fact, $\mathcal{M}(\cdot, z)$ is $\omega$-harmonic, i.e., it satisfies the Schr\"odinger equation
$-\triangle u=\omega \, u$ in $\Omega$.
Notice that $\mathcal{H}^x$ defined by \eqref{harm-schro} is not a probability measure on $\partial \Omega$ unless $\omega=0$. Letting $f\equiv 1$ on $\partial\Omega$, we see by \eqref{ufrepresent2} that $\mathcal{H}^x$ is a finite measure on $\partial \Omega$ if and only if
$u_1(x)<\infty$, where $u_1$ is the so-called gauge function defined by \eqref{gauge-def} below (see Corollary \ref{cor} for conditions under which $u_1<\infty$ $d\omega$-a.e.).
We remark that for the normalized version of $\mathcal{M}(x, z)$ defined by
\[
\widetilde{\mathcal{M}}(x, z)=\frac{\mathcal{M}(x, z)}{\mathcal{M}(x_0, z)}, \qquad (x, z) \in \Omega\times\partial \Omega ,
\]
where $x_0\in \Omega$ is to be chosen so that $\mathcal{M}(x_0, z)<\infty$ for every $z \in \partial \Omega$, we have
\[
d \mathcal{H}^x(z) = \widetilde{\mathcal{M}}(x, z)
\,
d \mathcal{H}^{x_0} (z), \qquad (x, z) \in \Omega\times\partial \Omega ,
\]
which is analogous to \eqref{harmmeasiden}. Obviously, $\widetilde{\mathcal{M}}(x_0, z)=1$, as for the unperturbed Martin's kernel $M(x,z)$. Moreover, \textit{formally} we have
\[
\widetilde{\mathcal{M}}(x, z)=\lim_{y\to z, \, y \in \Omega}\frac{\mathcal{G}(x, y)}{\mathcal{G}(x_0, y)},
\qquad (x, z) \in \Omega\times\partial \Omega ,
\]
where $\mathcal{G}(x, y)$ is the minimal Green's function associated with the
Schr\"{o}dinger operator $-\triangle -\omega$ (see \cite{FNV}). Thus,
$\widetilde{\mathcal{M}}(x, z)$
serves the role of the (normalized) Martin
kernel associated with the Schr\"{o}dinger operator $-\triangle -\omega$.
Nevertheless, we prefer to use the kernel $\mathcal{M}(x, z)$, since it does not exclude the case $\mathcal{M}(x_0, z)=\infty$, and is more convenient in applications. Pointwise estimates of
$\widetilde{\mathcal{M}}(x, z)$ are deduced easily from the estimates
of $\mathcal{M}(x, z)$ discussed below.
Our bilateral estimates of $\mathcal{M}(x, z)$ (see \eqref{upperTM} and \eqref{lowerTM} below)
are stated in terms of exponentials:
\begin{equation} \lambdabel{exp-term}
\begin{aligned}
M(x,z) & \, e^{\int_{\Omega} G(x,y) \, \frac{M(y,z)}{M(x,z)} \, d \omega (y)} \le \mathcal{M}(x, z)
\\ & \le M(x,z) \, e^{C\int_{\Omega} G(x,y) \, \frac{M(y,z)}{M(x,z)} \, d \omega (y)} ,
\end{aligned}
\end{equation}
for all $(x, z) \in \Omega\times\partial \Omega$,
with an appropriate constant $C>0$. We remark that
\[
\mathcal{M}(x, z) = U(x, z) \, M(x,z), \qquad (x, z) \in \Omega\times\partial \Omega ,
\]
where
\begin{equation} \lambdabel{cond-gauge-def}
U(x, z) = 1+ \frac{1}{M(x,z)}\sum_{j=1}^\infty T^j M(\cdot, z) (x) , \qquad (x, z) \in \Omega\times\partial \Omega ,
\end{equation}
is the so-called \textit{conditional gauge} (\cite{CZ}, Sec. 4.3).
From \eqref{exp-term} it is immediate that
\begin{equation} \lambdabel{exp-gauge}
e^{\int_{\Omega} G(x,y) \, \frac{M(y,z)}{M(x,z)} \, d \omega (y)} \le U(x, z)
\le e^{C\int_{\Omega} G(x,y) \, \frac{M(y,z)}{M(x,z)} \, d \omega (y)} ,
\end{equation}
for all $(x, z) \in \Omega\times\partial \Omega$. We emphasize that in the exponents of \eqref{exp-gauge}
we only use the \textit{first term} in the sum on the right-hand side of \eqref{cond-gauge-def}.
A probabilistic definition of the
conditional gauge in the case $d\omega =q \, dx$ ($q \in L^1_{loc} (\Omega)$) is provided by
\[
U(x, z)= {E}_z^{x} \left[ e^{\int_0^{\zeta} q(X_t) \, dt}\right], \qquad (x, z) \in \Omega\times\partial \Omega ,
\]
where $X_t$ is a path of the Brownian motion (properly scaled to
replace $\frac{1}{2} \triangle$ used in the probabilistic literature with $\triangle$) starting at $x$, ${E}_z^{x}$ is the conditional expectation conditioned on the event that $X_t$ exits $\Omega$ at $z\in \partial\Omega$, and $\zeta$ is the
time when $X_t$ first hits $z$. Properties of the conditional gauge for potentials $q$ in Kato's class
in a bounded Lipschitz domain $\Omega$ are discussed in \cite{CZ}, Ch. 7; in particular, $U(x, z)\approx 1$ if $U(x, z)\noindentt\equiv +\infty$.
For general $\omega \ge 0$, we clearly have $U(x, z)\ge 1$, but $U(x, z)$
is no longer uniformly bounded from above, even if $U(x, z) \noindentt\equiv +\infty$ and $||T||<1$. Consequently,
the so-called Conditional Gauge Theorem fails in this setup.
\begin{Thm} \lambdabel{mainufest} Let $\Omega\subset \mathbb{R}^n$ be a bounded uniform domain, $\omega$ a locally finite Borel measure on $\Omega$, and $f \geq 0$ a Borel measurable function on $\partial \Omega$.
(A) If $\Vert T \Vert <1$ (equvalently, (\ref{equivnormTless1}) holds with $\beta <1$), then there exists a positive constant $C$ depending only on $\Omega$ and $\Vert T \Vert$ such that
\begin{equation} \lambdabel{ptwiseupperbnd}
u_f (x) \leq \int_{\partial \Omega} e^{C \int_{\Omega} G(x,y) \frac{M(y,z)}{M(x,z)} d \omega (y)} f(z) \, dH^x(z), \quad x \in \Omega .
\end{equation}
(B) If $u$ is a positive solution of \eqref{ufeqn}, then $\Vert T \Vert \leq 1$ (equivalently, (\ref{equivnormTless1}) holds for some $\beta \leq 1$) and
\begin{equation} \lambdabel{ptwiselowerbnd}
u (x) \geq \int_{\partial \Omega} e^{\int_{\Omega} G(x,y) \frac{M(y,z)}{M(x,z)} d \omega (y)} f(z) \, dH^x(z), \quad x \in \Omega .
\end{equation}
\end{Thm}
In view of \eqref{ufrepresent2}, Theorem \ref{mainufest} gives estimates for the Schr\"{o}dinger harmonic measure $d \mathcal{H}^x$ in terms of the harmonic measure $dH^{x}$ for the Laplacian.
The solution $u_1$ of (\ref{ufeqn}), in the case where $f$ is identically $1$ on $\partial \Omega$, is called the (Feynman-Kac) \textit{gauge}:
\begin{equation} \lambdabel{gauge-def}
u_1 = 1 + \sum_{j=1}^{\infty} T^j 1 ,
\end{equation}
provided $u_1\noindentt\equiv +\infty$. An equivalent probabilistic interpretation of the gauge when $d\omega =q(x) \, dx$ ($q \in L^1_{loc} (\Omega)$, $q \ge 0$) is given by (see \cite{CZ}, Sec. 4.3)
\[
u_1(x)= {E}^{x} \left[ e^{\int_0^{\tau_\Omega} q(X_t) \, dt}\right], \qquad x \in \Omega ,
\]
where $X_t$ is the Brownian path (properly scaled as above) starting at $x$, ${E}^{x} $ is the expectation operator, and $\tau_\Omega$ is the exit time from $\Omega$. Notice that $u_1$ given by \eqref{gauge-def} is related
to the conditional gauge $U(x,z)$ defined by \eqref{cond-gauge-def} via
the equation
\[
u_1(x)= \int_{\partial\Omega} U(x, z) \, dH^x(z), \qquad x \in \Omega .
\]
In particular,
\[
\inf_{z \in \partial \Omega} U(x, z) \le u_1(x ) \le\sup_{z \in \partial \Omega} U(x, z), \qquad x \in \Omega .
\]
The following theorem gives sufficient and matching necessary criteria for the existence of $u_f$. For Martin's kernel $M(x,z)$, we define the adjoint operator $M^*$ for a Borel measure $\mu$ on $\Omega$ by
\begin{equation} \lambdabel{defMstar}
M^* \mu (z) = \int_{\Omega} M(x,z) \, d \mu (x), \quad \mbox{for} \,\, z \in \partial \Omega.
\end{equation}
The role of $M^*$ in the following theorem is analogous to the role of the balayage operator $P^*$ in \cite{FV2} for $C^{1,1}$ domains $\Omega$, where
all integrals over $\partial \Omega$ are taken with respect to surface area in place of harmonic measure.
\begin{Thm} \lambdabel{gaugecrit} Suppose $ \Omega \subset \mathbb{R}^n$ is a bounded uniform domain,
$\omega$ is a locally finite Borel measure on $\Omega$, and $f \geq 0$ ($f$ not a.e. $0$ with respect to harmonic measure) is a Borel measurable function on $\partial \Omega$. Let $x_0 \in \Omega$ be the reference point in the definition of Martin's kernel. Let $m(x) = \min (1, G(x,x_0))$.
(A) There exists $C>0$ ($C$ depending only on $\Omega$ and $\Vert T \Vert$) such that if $\Vert T \Vert <1$ (equivalently, (\ref{equivnormTless1}) holds with $\beta <1$) and
\begin{equation} \lambdabel{martincritsuff}
\int_{\partial \Omega} e^{C M^* (m \omega)} \, f \, dH^{x_0} < \infty ,
\end{equation}
then $u_f \in L^1_{loc} (\Omega, dx)$.
(B) If $u_f \in L^1_{loc} (\Omega, dx) $, then $\Vert T \Vert \leq 1$ and
\begin{equation} \lambdabel{martincritnec}
\int_{\partial \Omega} e^{M^* (m \omega)} \, f \, dH^{x_0} < \infty .
\end{equation}
\end{Thm}
\noindentindent {\bf Remark.}
More general results for equation (\ref{form-sol}) with an arbitrary positive harmonic function $h$ in place of $Pf$, in terms of
Martin's representation, are given in Theorem \ref{mainu-harm} and Theorem \ref{u_h-exist} below.
For $C^{1,1}$ domains $\Omega$ and absolutely continuous $\omega$, Theorem \ref{mainufest} and an analogue of Theorem \ref{gaugecrit} were proved in the special case $f=1$ in \cite{FV2}, Theorem 1.2. To see this observation, note that for a $C^{1,1}$ domain, $M(x, z) = P(x,z)/P(x_0, z)$, by \eqref{harmmeasiden}, which shows that inequalities (1.12) and (1.14) in \cite{FV2} follow from Theorem \ref{mainufest} above. To see that (1.10) and (1.13) in \cite{FV2} follow from Theorem \ref{gaugecrit}, choose $x_0$ with dist$(x_0, \partial \Omega) > \trianglelta$, where $0< \trianglelta < \mbox{diam} ( \Omega) /2$, so that $P(x_0, z)$ is equivalent to a constant depending only on $\Omega$. An extension to the case of uniform domains of the criteria in \cite{FV2} for the existence of the nontrivial gauge ($u_1\noindentt\equiv +\infty$) is provided by the following corollary.
\begin{Cor} \lambdabel{cor} Suppose $ \Omega \subset \mathbb{R}^n$ is a bounded uniform domain, and $\omega$ is a locally finite Borel measure on $\Omega$. Let $x_0 \in \Omega$ be the reference point in the definition of Martin's kernel, and $m(x) = \min (1, G(x,x_0))$.
(A) There exists $C>0$ ($C$ depending only on $\Omega$ and $\Vert T \Vert$) such that if $\Vert T \Vert <1$ and
\begin{equation} \lambdabel{martincritsuff-g}
\int_{\partial \Omega} e^{C M^* (m \omega)} \, dH^{x_0} < \infty ,
\end{equation}
then the gauge $u_1$ is nontrivial.
(B) If the gauge $u_1$ is nontrivial, then $\Vert T \Vert \leq 1$ and
\begin{equation} \lambdabel{martincritnec-g}
\int_{\partial \Omega} e^{M^* (m \omega)} \, dH^{x_0} < \infty .
\end{equation}
\end{Cor}
As an application of Corollary \ref{cor}, we consider elliptic equations of Riccati type with quadratic growth in the gradient,
\begin{equation}\lambdabel{nonlineareqn-1}
\left\{
\begin{aligned}
-\triangle v & = |\nabla v|\,^2 + \omega \, \, & \mbox{in} \, \, \Omega \\
v & = 0 \quad & \mbox{on} \, \, \partial \Omega
\end{aligned}
\right.
\end{equation}
for locally finite Borel measures $\omega$,
in bounded uniform domains $\Omega \subset \mathbb{R}^n$. Although \eqref{nonlineareqn-1} is formally related to equation \eqref{ufeqn} with $f=1$ by the relation $v = \log u$, it is well-known that this formal relation is not sufficient to guarantee equivalence of the two equations (see \S4). Nevertheless we obtain the following result.
\begin{Thm}\lambdabel{riccatithm} Suppose $\Omega \subset \mathbb{R}^n$ is a bounded uniform domain, and $\omega$ is a locally finite Borel measure in $\Omega$.
(A) Suppose $||T||<1$, or equivalently (\ref{equivnormTless1}) holds with $\beta<1$, and (\ref{martincritsuff-g}) holds with a large enough constant $C>0$ (depending only on $\Omega$ and $||T||$). Then $v= \log u_1 \in W^{1,2}_{loc} (\Omega)$ is a weak solution of (\ref{nonlineareqn-1}).
(B) Conversely, if (\ref{nonlineareqn-1}) has a weak solution $v\in W^{1,2}_{loc} (\Omega)$, then
$u=e^v$ is a supersolution to
(\ref{ufeqn}) with $f=1$, i.e., $u\ge G(\omega u) +1$.
Moreover, $||T||\le 1$,
or equivalently (\ref{equivnormTless1})
holds with $\beta = 1$, and (\ref{martincritnec-g}) holds.
\end{Thm}
\noindentindent{\bf Remarks.} 1. In Theorem \ref{gaugecrit}, $u_f \in L^1_{loc} (\Omega, dx)$ actually yields
$u_f \in L^1(\Omega, m dx)\cap L^1(\Omega, m d \omega)$, or equivalently
$G (u_f \omega) \noindentt\equiv+\infty$.
2. For bounded Lipschitz domains $\Omega$, $u_1$ is a ``very weak'' solution in the sense of \cite{MR}. More precisely,
$u=u_1 -1 $ is a ``very weak'' solution
to $-\triangle u = \omega u + \omega$ with $u=0$ on $\partial \Omega$. Here one can use $\phi_1$ in place
of $m$, where $\phi_1$ is the first eigenfunction of the Dirichlet Laplacian in $\Omega$ (see
\cite{AAC}, Lemma 3.2). Then
$u_1 \in L^1(\Omega, \phi_1 dx)$ and $\int_\Omega \phi_1 \, d \omega<+\infty$.
3. Our main results for uniform domains $\Omega$ are based on the exponential bounds for Green's function $\mathcal{G}(x,y)$ (see Theorem \ref{FNVTheorem} below) obtained in \cite{FNV}. Here $\mathcal{G}(x,y)$ is the kernel of
the operator $(I-T)^{-1}$
defined by \eqref{defGreenSchr}, where $T$ is an integral operator with positive quasi-metric kernel.
The case of $C^{1,1}$ domains $\Omega$ and $d \omega =q \, dx$, where $q\in L^1_{loc}(\Omega, dx)$, was treated earlier in \cite{FV1} for
small $||T||$, and in \cite{FV2} for $||T||<1$.
4. In the special case of Kato class potentials, or more generally, $G$-bounded perturbations $\omega$ for the Schr\"{o}dinger
operator $-\triangle -\omega$, it is known that $\mathcal{G}(x,y)\approx G(x, y)$. In this case,
the gauge $u_1$ exists, and is uniformly bounded, if and only if $||T||<1$ (see \cite{CZ}, \cite{Han1}, \cite{Pin}).
5. For the fractional Schr\"{o}dinger operator $(-\triangle)^{\frac{\alphapha}{2}} -\omega$, criteria of the existence of the gauge $u_1$ in the case $0<\alphapha<2$ were obtained in
\cite{FV3}. They are quite different from Corollary \ref{cor}
and require no extra boundary restrictions on $\Omega$ like \eqref{martincritsuff-g},
\eqref{martincritnec-g} in the case $\alphapha=2$.
\section{Pointwise estimates for $u_f$}\lambdabel{sec2}
Recall that the Martin kernel is defined by \eqref{martin-K}. Then
$M(x, z)$ is a H\"older continuous function in $z\in \partial \Omega$ (\cite{Aik}, Theorem 3).
It is worth mentioning that in uniform domains, harmonic measure may vanish on some surface balls, and so the Radon-Nykodim derivative formula $M(x,z)=\frac{dH^x }{dH^{x_0}}(z)$, which holds for NTA domains, is no longer available as a means to recover \eqref{martin-K} at every point $z \in \partial \Omega$. Instead, it can be determined via \eqref{martin-K}, so that \eqref{harmmeasiden} still holds (see \cite{Aik}, p. 122).
In this case, the Martin representation for every nonnegative harmonic function $h$ in $\Omega$ can be expressed
in the form
\begin{equation} \lambdabel{martin-rep}
h(x)= \int_{\partial \Omega} M(x,z) \, d \mu_h (z), \qquad x \in \Omega,
\end{equation}
where $\mu_h$ is a finite Borel measure on $\partial \Omega$ uniquely determined by $h$.
The connection between Martin's kernel and harmonic measure in a uniform domain is provided by the equation (see \cite{Aik}, p. 142):
\begin{equation} \lambdabel{hm-mu1}
dH^x (z) = M(x,z) \, d \mu_1 (z), \qquad x \in \Omega, \, z \in \partial \Omega.
\end{equation}
Here $\mu_1$ is the representing measure in \eqref{martin-rep} for the function $h \equiv 1$.
Equation \eqref{hm-mu1} can be justified using \cite{AG}, Theorem 9.1.7
(in the special case $h \equiv 1$) for a bounded domain whose Martin boundary $\triangle$ is identified
with $\partial \Omega$. It yields that, for every $f \in C(\partial \Omega)$, its harmonic extension $Pf$
via harmonic measure \eqref{harm-rep} can be represented in the form
\begin{equation} \lambdabel{pf-rep}
Pf (x) = \int_{\partial \Omega} M(x,z) \, f(z) \, d \mu_1 (z), \quad x \in \Omega.
\end{equation}
By the uniqueness of the representing measure in \eqref{harm-rep}
for all $f \in C(\partial \Omega)$, it follows that \eqref{hm-mu1} holds.
In particular, since $M(x_0,z) =1$ for all $z \in \partial \Omega$,
letting $x=x_0$ in \eqref{hm-mu1} yields $dH^{x_0}= d \mu_1$,
and consequently \eqref{harmmeasiden} holds.
Let $\Omega$ be a bounded uniform domain in $\mathbb{R}^n$, $\omega$ a
finite Borel measure in $\Omega$, and $f\ge 0$ a Borel measurable function in $\partial \Omega$ integrable with respect to harmonic measure.
We consider solutions $u$ to \eqref{ufeqn} understood in the \textit{potential theoretic} sense.
Namely, a function $u: \Omega \rightarrow [0, +\infty]$ is said to be a solution to \eqref{ufeqn} if
$u$ is \textit{superharmonic} in $\Omega$ ($u\noindentt\equiv+\infty$), and
\begin{equation} \lambdabel{u-def}
u(x) = G(u \omega)(x) + Pf (x), \qquad \text{for all} \, \, x \in \Omega ,
\end{equation}
where $Pf$ is the harmonic function defined by \eqref{harm-rep}. Then $Pf$ is the greatest harmonic minorant of $u$, and
$u \in L_{loc}^1(\Omega, \omega)$, so that $u \, d \omega$ is the associated Riesz measure of $u$, where
$-\triangle u = \omega u$
in the distributional sense.
In fact, if
a potential theoretic solution to \eqref{u-def} exists, then $u \in L^1(\Omega, m \omega)$, where $m(x)=\min (1, G(x, x_0))$
for some $x_0 \in \Omega$;
otherwise $G(u \omega)\equiv +\infty$ (see \cite{AG}, Theorem 4.2.4).
We note that all potential theoretic solutions are by definition
lower semicontinuous functions in $\Omega$. For a superharmonic function $u$, it is enough to require that equation \eqref{u-def} holds $dx$-a.e. Moreover, in a bounded uniform domain, any
potential theoretic solution $u\in L^1(\Omega, m dx)$. This is not difficult to see using the estimate
$G (m dx) \le C \, m$ in $\Omega$, which is a consequence of the
so-called $3$-G inequality (see \cite{CZ}, \cite{Han1}, \cite{Han2}, \cite{Pin}). We remark that in $C^2$ domains $Pf$ is the Poisson integral, and
in fact $u\in L^1(\Omega, dx)$ (see \cite{FV2}, \cite{MV}, Theorem 1.2.). The latter
is no longer true for bounded Lipschitz domains (see, e.g., \cite{MR}).
Another useful way to define a solution of \eqref{ufeqn} is to require that \eqref{u-def} hold
$d \omega$-a.e. More precisely, a measurable function $0\le u <+\infty$ $d\omega$-a.e. is said to be a solution
of \eqref{ufeqn} with respect to $\omega$ if
\begin{equation} \lambdabel{u-omega}
u = G(u \omega) + Pf \qquad d \omega\text{-a.e.} \, \, \text{in}\,\, \Omega .
\end{equation}
If such a solution exists, then obviously $u \in L^1_{loc} (\Omega, \omega)$, and in fact, as above, $u \in L^1(\Omega, m \omega)$.
We remark that if $f \noindentt= 0$ (with respect to $d H^{x}$), and \eqref{u-omega} has a positive solution in this sense, then $||T||\le 1$ by Schur's lemma,
and consequently \eqref{equivnormTless1} holds for $\beta=1$. It follows that
$\omega (K) \le \text{cap} (K)$ for any compact set $K \subset \Omega$. In particular,
$\omega$ must be absolutely continuous with respect to the Green (or Wiener) capacity, i.e.,
\begin{equation} \lambdabel{abs-cap}
\text{cap} (K)=0 \, \Longrightarrow \, \omega (K)=0.
\end{equation}
(See details in \cite{FNV}, \cite{FV2}.)
A connection between these two approaches is provided by the following claim used below. If $u$ is a solution of \eqref{u-omega} (with respect to $\omega$), then there exists a unique superharmonic function $\hat u \ge 0$ in $\Omega$ such $\hat u=u$ $d \omega$-a.e. in $\Omega$, and
$\hat u \in L^1_{loc} (\Omega, \omega)$ is a potential theoretic solution that satisfies \eqref{u-def}.
Indeed, let $\hat u:= G(u \omega) + Pf $ everywhere in $\Omega$. Then $\hat u= u$ $d \omega$-a.e. by \eqref{u-omega}, $\hat u \in L_{loc}^1(\Omega, \omega)$, and consequently
\[
\hat u(x) =G(u \omega) (x) + Pf (x) = G(\hat u \omega) (x)+ Pf (x) \quad \text{for all} \,\, x \in \Omega .
\]
Clearly, $\hat u$ is superharmonic since $G(u \omega)<+\infty$ $d \omega$-a.e., and hence $G(u \omega)$ is a Green potential, and
$Pf$ is the greatest harmonic minorant of $\hat u$. Thus, $\hat u$ is a potential theoretic solution.
Moreover, such a superharmonic solution $\hat u$ is unique: if $\hat v$ is a superharmonic solution to \eqref{u-def} for which $\hat v = u$ $d \omega$-a.e., it follows that
\[
\hat v = G(\hat v \omega) + Pf =G(u \omega) + Pf = \hat u
\]
everywhere in $\Omega$.
If $\omega$ satisfies \eqref{abs-cap}, then it is enough
to require that $u<+\infty$ and \eqref{u-def} hold q.e. Then $u$ is a solution of \eqref{u-omega}
with respect to $\omega$, and
$\hat u:=G(u \omega) + Pf $ is
a potential theoretic solution to \eqref{ufeqn}, and $\hat u$ is a quasicontinuous representative of $u$, so that
$\hat u = u$ q.e.
From now on, we will not distinguish between a solution $u$ to \eqref{u-omega} understood $d \omega$-a.e., and its superharmonic representative $\hat u= u$ $d \omega$-a.e. which satisfies \eqref{u-def} everywhere in $\Omega$.
In particular, the solution $u_f$ of \eqref{u-def} defined by \eqref{ufsolndef}
\textit{everywhere} in $\Omega$ is a potential theoretic (superharmonic) solution of
\eqref{ufeqn} provided $u_f\noindentt\equiv +\infty$.
Indeed, for $m \in \mathbb{N}$,
\[
\sum_{j=0}^m T^j(Pf)) (x) = Pf(x) + T \sum_{j=0}^{m-1} T^j(Pf)) (x), \quad \text{for all} \, \, x \in \Omega .
\]
Letting $m \to \infty$, by the monotone convergence theorem we have
\begin{align*}
u_f & := \sum_{j=0}^\infty T^j(Pf)\\ & = Pf + T(\sum_{j=0}^\infty T^j(Pf))\\& = Pf + G(u_f \omega)
\end{align*}
everywhere in $\Omega$.
Clearly, $u_f$ is a superharmonic function provided $u_f \noindentt\equiv+\infty$
in $\Omega$, which occurs if and only if $G (u_f \, \omega)\noindentt\equiv +\infty$ in $\Omega$, or equivalently $u_f \in L^1(\Omega, m \omega)$.
Moreover, $u_f$ is a \textit{minimal} solution since, for every other
solution $u$, we obviously have, for every $m \in \mathbb{N}$,
\[
u=G(u \omega) + Pf = G(u \omega) +\sum_{j=0}^m T^j(Pf) \ge \sum_{j=0}^m T^j(Pf).
\]
Letting $m \rightarrow \infty$, we see that $u \ge u_f$.
\begin{Def} \lambdabel{qmkernel} Let $(\Omega, \omega)$ be a measure space. A quasi-metric kernel $K$ is a measurable function $K: \Omega \times \Omega \rightarrow (0, +\infty]$ such that $K$ is symmetric ($K(x,y)=K(y,x)$) and $d= \frac{1}{K}$ satisfies
\[ d(x,y) \leq \kappa (d(x,z) + d(z,y)) \quad \mbox{for all} \quad x,y,z \in \Omega, \]
for some $\kappa >0$, called the \textbf{quasi-metric constant} for $K$.
A measurable function $K: \Omega \times \Omega \rightarrow (0, +\infty]$ is called \textbf{quasi-metrically modifiable} if there exists a measurable function $m: \Omega \rightarrow (0, \infty)$ such that $\tilde{K} (x,y) = \frac{K(x,y)}{m(x)m(y)}$ is a quasi-metric kernel. The function $m$ is called a \textbf{modifier} for $K$.
\end{Def}
We will use the following result, from \cite{FNV}, Corollary 3.5.
\begin{Thm} \lambdabel{FNVTheorem} Let $(\Omega, \omega)$ be a measure space. Suppose $K$ is a quasi-metrically modifiable kernel on $\Omega$ with modifier $m$. Let $\kappa$ be the quasi-metric constant for $\frac{K(x,y)}{m(x)m(y)}$. For a non-negative, measurable function $h$ on $\Omega$, define
\[ Th (x) = \int_{\Omega} K(x,y) h(y) \, d \omega (y), \quad \mbox{for} \,\, x \in \Omega. \]
For $j \in \mathbb{N}$, let $T^j$ be the $j^{th}$ iterate of $T$, and let $T^0 h =h$.
(A) If $\Vert T \Vert <1$, then there exists a positive constant $C$, depending only on $\kappa$ and $\Vert T \Vert$, such that
\begin{equation} \lambdabel{qmkernelupperbnd}
\sum_{j=0}^{\infty} T^j m (x) \leq m(x) e^{C (Tm(x))/m(x)}, \qquad \text{for all} \, \, x \in \Omega .
\end{equation}
(B) There exists a positive constant $c$, depending only on $\kappa$, such that
\begin{equation} \lambdabel{qmkernellowerbnd}
\sum_{j=0}^{\infty} T^j m (x)\geq m(x) e^{c (Tm(x))/m(x)}, \qquad \text{for all} \, \, x \in \Omega .
\end{equation}
\end{Thm}
It is known (\cite{An2}, \cite{Han1}) that in a bounded uniform domain $\Omega$ (in particular, an NTA domain), the Green's kernel $G(x,y)$ is quasi-metrically modifiable, with modifier $m(x) = \min (1, G(x,x_0))$, where $x_0$ is any fixed point in $\Omega$, and the quasi-metric constant of
the modified kernel $G(x,y)/(m (x) \, m(y))$ is independent of $x_0$.
In fact, in a bounded uniform domain $\Omega\subset \mathbb{R}^n$ ($n\ge 3$), the following slightly stronger property (called the strong generalized triangle property) holds (\cite{Han1}, p. 465):
\begin{equation}\lambdabel{strong-quasi}
|x_1-x_2| \le |x_1-y| \Longrightarrow \frac{G(x_1, y)}{m(x_1)} \le \kappa \, \frac{G(x_2, y)}{m(x_2)},
\end{equation}
for all $x_1, x_2, y \in \Omega$, where $\kappa$ depends only on $\Omega$. It is known (\cite{Han1}, Corollary 2.8)
that \eqref{strong-quasi} is equivalent to the uniform boundary Harnack principle established for uniform domains in (\cite{Aik}, Theorem 1). By (\ref{strong-quasi}),
\begin{equation}\lambdabel{liminf-limsup}
\limsup_{x_1\rightarrow z, \, x_1 \in \Omega} \frac{G(x_1, y)}{m(x_1)} \le \kappa \, \liminf_{x_2\rightarrow z, \, x_2 \in \Omega} \frac{G(x_2, y)}{m(x_2)} ,
\end{equation}
for all $y\in\Omega$ and $z \in \partial \Omega$, where $\kappa$ depends only on $\Omega$, because the condition $|x_1-x_2| \le |x_1-y|$ is satisfied for $x_1$ and $x_2$ sufficiently close to $z$.
We will need the following lemma for punctured quasi-metric spaces due to Hansen and Netuka (\cite{HN}, Proposition 8.1 and Corollary 8.2); it originated in (Pinchover \cite{Pin}, Lemma A.1)
for normed spaces.
\begin{Lemma}\lambdabel{hansen} Suppose $d$ is a quasi-metric on a set $\Omega$ with quasi-metric constant $\kappa$. Suppose $x_1 \in \Omega$. Then
\begin{equation}\lambdabel{quasi-cond}
\tilde d(x,y) = \frac{d(x,y)}{d(x,x_1) \cdot d(y,x_1)}, \qquad x, y
\in \Omega \setminus\{x_1\},
\end{equation}
is a quasi-metric on $\Omega\setminus\{x_1\}$ with quasi-metric constant
$4 \kappa^2$.
\end{Lemma}
\begin{Lemma} \lambdabel{Martinkernelquasimetric} Let $\Omega$ be a bounded uniform domain with Green's function $G(x,y)$. Fix some $x_0 \in \Omega$ and define Martin's kernel $M(x,z)$ for $x \in \Omega$ and $z \in \partial \Omega$ by (\ref{martin-K}). Then for each $z \in \partial \Omega$, the function $\tilde{m}(x) = M(x,z)$ is a quasi-metric modifier for $G$, with quasi-metric constant $\kappa$ independent of $z \in \partial \Omega$.
\end{Lemma}
\begin{proof} Fix $x_0\in \Omega$, $z\in \partial \Omega$. As noted above, $m (x) = \min (1, G(x,x_0))$ is a modifier for $G$, so that
$d(x, y)=\frac{m (x) \, m(y)}{G(x,y)}$ is a quasi-metric on $\Omega$ with positive constant $\kappa$ independent of $x_0$, so that
\[ \frac{m (x) \, m(y)}{G(x,y)} \leq \kappa \left( \frac{m (x) \, m(w)}{G(x,w)} + \frac{m (w) \, m(y)}{G(w,y)} \right) ,\]
for all points $x,y,w \in \Omega$. Suppose $x_1 \in \Omega$ with $x_1 \neq x_0$. Clearly, for $\tilde d$ defined by \eqref{quasi-cond}, we have
\[
\tilde d(x,y) = \frac{1}{m(x_1)^2} \, \frac{G(x, x_1) \, G(y, x_1)}{G(x, y)}, \qquad x, y\in \Omega\setminus\{x_1\} .
\]
Then by Lemma
\ref{hansen} it follows that $\tilde d$ is a quasi-metric on $\Omega\setminus\{x_1\}$ with quasi-metric constant
$4 \kappa^2$. Assuming that $x, y, w \in \Omega\setminus\{x_1\}$, from the inequality
$\tilde d(x,y) \le 4 \kappa^2 [\tilde d(x,w) + \tilde d(y,w)]$,
we deduce
\begin{align*}
\frac{1}{m(x_1)^2} \, \frac{G(x, x_1) \, G(y, x_1)}{G(x, y)} &\le \frac{4 \kappa^2}{m(x_1)^2} \, \\
\times & \left[ \frac{G(x, x_1) \, G(w, x_1)}{G(x, w)} + \frac{G(y, x_1) \, G(w, x_1)}{G(y, w)}
\right].
\end{align*}
Multiplying both sides of the preceding inequality by $\frac{m(x_1)^2}{[G(x_0, x_1)]^2}$ yields
\begin{align*}
& \frac{G(x, x_1) \, G(y, x_1)}{G(x_0, x_1) \, G(x, y) \, G(x_0, x_1)} \le 4 \kappa^2 \, \\
& \times \left[ \frac{G(x, x_1) \, G(w, x_1)}{G(x_0, x_1) \, G(x, w) \, G(x_0, x_1)} + \frac{G(y, x_1) \, G(w, x_1)}{G(x_0, x_1) \, G(y, w) \, G(x_0, x_1)}
\right].
\end{align*}
Letting $x_1 \rightarrow z$, with $x_1 \in \Omega$, we have
\[
\lim_{x_1 \rightarrow z, \, x_1 \in \Omega} \frac{G(x, x_1)}{G(x_0, x_1)}= M(x,z)=\tilde{m} (x), \]
by (\ref{martin-K}), and similarly with $x$ replaced by $y$ or $w$. We obtain
\[ \frac{\tilde{m} (x)\tilde{m}(y)}{G(x,y)} \leq 4 \kappa^2 \left( \frac{\tilde{m} (x)\tilde{m}(w)}{G(x,w)} + \frac{\tilde{m}(w)\tilde{m}(y)}{G(w,y)} \right) . \]
\end{proof}
\begin{proofof} Theorem \ref{mainufest}. By Lemma \ref{Martinkernelquasimetric}, $\tilde{m}(x)=M(x,z)$ is a quasi-metric modifier for $T$, for all $z \in \partial \Omega$, with quasi-metric constant independent of $z$. Hence by part (A) of Theorem \ref{FNVTheorem} with $\tilde m$ in place of $m$, under the assumption that $\Vert T \Vert <1$,
(note that the estimates in Theorem \ref{FNVTheorem} hold everywhere)
\begin{equation}\lambdabel{upperTM}
\begin{aligned}
\mathcal{M}(x, z) & = \sum_{j=0}^{\infty} T^j M(\cdot, z) (x) \leq M(x,z) e^{C \, (TM(\cdot,z))(x)/M(x, z)}\\ & = M(x,z) e^{C \int_{\Omega} G(x,y) \frac{M(y,z)}{M(x,z)} d \omega(y)} , \qquad (x, z) \in \Omega\times\partial \Omega ,
\end{aligned}
\end{equation}
with $C$ depending only on $\Omega$ and $||T||$. Substituting this estimate in \eqref{ufrepresent} and using equation (\ref{harmmeasiden}) gives (\ref{ptwiseupperbnd}). This proves part (A) of Theorem \ref{mainufest}.
Suppose now that $u$ is a solution to \eqref{ufeqn}. Assuming without loss of generality that $f \noindentt=0$ $d H^{x}$-a.e., so that $u \geq Pf>0$
is a positive solution, we see that $T u \leq u$, where $0<u<\infty$ $d \omega$-a.e. Hence, $\Vert T \Vert \leq 1$,
and consequently \eqref{equivnormTless1} holds with $\beta=1$,
by Schur's lemma (see \cite{FNV}, \cite{FV2}). In particular, \eqref{abs-cap} holds.
Since $Pf$ is a positive harmonic function, obviously $Pf\ge c_K>0$ on every compact set $K\subset \Omega$,
and consequently
\begin{equation}\lambdabel{F-M-dom}
c_K \, G(\chi_K \omega)\le G(Pf \omega) \le G (u \omega) \le u < \infty \quad d \omega\text{-a.e.}
\end{equation}
This simple observation will be used below.
For the minimal solution $u_f$ to \eqref{ufeqn}
given by \eqref{ufsolndef} we have $u\ge u_f$.
Applying part (B) of Theorem \ref{FNVTheorem} with $\tilde m=M(\cdot, z)$ in place of $m$ gives
\begin{equation}\lambdabel{lowerTM}
\begin{aligned}
\mathcal{M}(x, z) & = \sum_{j=0}^{\infty} T^j M(\cdot, z) (x) \geq M(x,z) e^{c \, (TM(\cdot,z))(x)/M(x, z)}\\ & = M(x,z) e^{c \int_{\Omega} G(x,y) \frac{M(y,z)}{M(x,z)} d \omega(y)} ,
\qquad (x, z) \in \Omega\times\partial \Omega ,
\end{aligned}
\end{equation}
with $c$ depending only on $\Omega$.
In fact, we can let $c=1$ in \eqref{lowerTM} if instead of statement (B) of Theorem \ref{FNVTheorem} we use
a recent lower estimate of solutions obtained in \cite{GV2}, Theorem 1.2, with $q=1$,
$\mathfrak{b}=1$, and $h=\tilde m$.
Here $\mathfrak{b}$ is the constant in the so-called weak domination principle, which states that,
for any bounded measurable function $g$ with compact support,
\begin{equation}\lambdabel{weak-dom}
G (g \omega)(x)\leq h(x)\ \text{in }\mathrm{supp} (g)\ \ \Longrightarrow \ \
G(g \omega)(x) \leq \mathfrak{b}\ h(x) \, \, \text{in\ }\Omega ,
\end{equation}
where $h$ is a given positive lower semicontinuous function on $\Omega$.
For Green's kernel $G$, this property with $\mathfrak{b}=1$ is a consequence of the classical Maria--Frostman
domination principle (see \cite{Hel}, Theorem 5.4.8), for any positive superharmonic function $h$. We only need to verify that
$G (g \omega)<\infty$ $d \omega$-a.e., which is immediate from \eqref{F-M-dom}. Hence,
\eqref{weak-dom} holds with $\mathfrak{b}=1$, and so
\eqref{lowerTM} holds with $c=1$ by \cite{GV2}, Theorem 1.2.
Consequently, by the same argument as above,
\begin{align*}
u_f (x)& = \int_{\partial \Omega} f(z) \,
\sum_{j=0}^{\infty} T^j M(\cdot, z) (x) \, d H^{x_0}(z)\\ & \geq
\int_{\partial \Omega} e^{\int_{\Omega} G(x,y) \frac{M(y,z)}{M(x,z)} d \omega(y)}\, f(z) \, M(x,z) \, d H^{x_0}(z), \quad \text{for all} \, \, x \in \Omega,
\end{align*}
where $M(x,z) \, H^{x_0}(z)= d H^{x}(z)$. This yields the lower bound (\ref{ptwiselowerbnd}),
The proof of part (B) of Theorem \ref{mainufest} is complete.
\end{proofof}
We complete this section with an extension of Theorem \ref{mainufest} which covers
solutions of \eqref{form-sol} with an arbitrary positive harmonic function $h$ in place of $Pf$. Such solutions arise naturally, because, if $u$ positive superharmonic function in $\Omega$ such that
\begin{equation}\lambdabel{u-harm}
-\triangle u = \omega u, \quad u\ge 0, \, \, \mbox{in} \, \, \Omega ,
\end{equation}
and if the greatest harmonic minorant of $u$ is $h>0$, then by the Riesz decomposition theorem,
\begin{equation}\lambdabel{u-harm-int}
u= G(u \omega)+ h \quad u\ge 0, \, \, \mbox{in} \, \, \Omega ,
\end{equation}
where $G(u \omega)\noindentt\equiv +\infty$, and $u d \omega$ is the corresponding Riesz measure, a locally finite Borel measure in $\Omega$.
Given a positive harmonic function $h$ on $\Omega$, we will estimate the minimal solution
\[ u_h = h + \sum_{j=1}^\infty T^j h \]
of \eqref{u-harm-int} and in particular give conditions for $u_h$ to exist, i.e., such that $u_h \noindentt\equiv +\infty$. The proof is based on Martin's representation \eqref{martin-rep}, which takes the place of \eqref{harm-rep} in the proof of Theorem \ref{mainufest}.
\begin{Thm} \lambdabel{mainu-harm} Let $\Omega\subset \mathbb{R}^n$ be a bounded uniform domain,
$\omega$ a locally finite Borel measure on $\Omega$, and $h$ a positive harmonic
function in $\Omega$.
(A) If $\Vert T \Vert <1$, then there exists a positive constant $C$ depending only on $\Omega$ and $\Vert T \Vert$ such that
\begin{equation} \lambdabel{upperbnd-harm}
u_h (x) \leq \int_{\partial \Omega} e^{C \int_{\Omega} G(x,y) \frac{M(y,z)}{M(x,z)} d \omega (y)} M(x, z) \, d \mu_h(z), \quad x \in \Omega .
\end{equation}
(B) If $u$ is a positive solution of \eqref{u-harm-int}, then $\Vert T \Vert \leq 1$, and
\begin{equation} \lambdabel{lowerbnd-harm}
u (x) \geq \int_{\partial \Omega} e^{\int_{\Omega} G(x,y) \frac{M(y,z)}{M(x,z)} d \omega (y)} M(x, z) \,
d \mu_h(z), \quad x \in \Omega .
\end{equation}
\end{Thm}
The proof of Theorem \ref{mainu-harm} is very similar to that of Theorem \ref{mainufest}
above. We only need to integrate both sides of estimates \eqref{upperTM} and \eqref{lowerTM}
over $\partial\Omega$ against $d \mu_h(z)$ in place of $f(z) \, dH^{x_0} (z)$.
\section{Existence criteria for $u_f$}\lambdabel{sec3}
We require a few results prior to giving the proof of Theorem \ref{gaugecrit}. The following lemma is well-known (see, for instance, \cite{AG}, Lemma 4.1.8 and Theorem 5.7.4), but we include a proof for the sake of completeness. Recall that $x_0 \in \Omega$ is a fixed reference point and $m(x) = \min (1, G(x,x_0))$.
\begin{Lemma} \lambdabel{estGchiK} Let $\Omega \subseteq{\mathbb{R}^n}$ ($n\ge 2$) be a domain
with nontrivial Green's function $G$. Let $K$ be a compact subset of $\Omega$ and let $\chi_K$ be the characteristic function of $K$. There exists a constant $C_K$ depending on $\Omega$, $K$, and the choice of $x_0$, such that
\begin{equation}\lambdabel{GchiKest}
G \chi_K (x) \leq C_K \, m (x), \quad x \in \Omega.
\end{equation}
Also, if $|K|>0$, there exists a constant $c_K>0$ depending on $\Omega$, $K$ and $x_0$ such that
\begin{equation}\lambdabel{lowerGchiKest}
G \chi_K (x) \geq c_K \, m (x), \quad x \in \Omega.
\end{equation}
\end{Lemma}
\begin{proof} We first prove inequality (\ref{GchiKest}). Suppose $n \geq 3$ (the case $n=2$ is handled in a similar way with obvious modifications). We assume $|K|>0$, else the result is trivial. We also assume that $x_0 \in K$; if not, replacing $K$ with $K \cup \{x_0\}$ does not change $G \chi_K$. We first claim that there exists a constant $C_1(K)$ depending on $K$ and $x_0$ such that
\begin{equation} \lambdabel{GchiKbnd1}
G \chi_K (x) \leq C_1 (K) ,
\end{equation}
for all $x \in \Omega$. To prove this claim, we recall the standard fact that $G(x,y) \leq C|x-y|^{2-n}$ for all $x, y \in \Omega$. Let $R$ be the diameter of $K$. Then there exists $y_0 \in K$ such that $K \subseteq \overline{B(y_0, R)}$. If $x \in B(y_0, 2R)$, then $K \subseteq B(x, 3R)$ and
\[ \int_K G(x,y) \, dy \leq \int_{B(x, 3R)} \frac{c}{|x-y|^{n-2}} \, dy \leq c \int_0^{3R} \frac{r^{n-1}}{r^{n-2}} \, dr = c R^2. \]
If $x \noindentt\in B(y_0, 2R)$ then $|x-y|^{2-n} \leq R^{2-n}$ for all $y \in K$, so
\[ \int_K G(x,y) \, dy \leq C R^{2-n} |K| \leq c R^2 . \]
Next we claim that there exists a constant $C_2$ depending on $\Omega$, $K$ and $x_0$ such that
\begin{equation} \lambdabel{GchiKbnd2}
G \chi_K (x) \leq C_2 \, G(x, x_0) ,
\end{equation}
for all $x \in \Omega$. For this claim, let $U$ be a subdomain of $\Omega$ such that
$x_0\in U$, $K \subseteq U$ and $\overline{U} \subseteq \Omega$.
If $x \in \Omega \setminus U$, then $G(x,y)$ is a positive harmonic function of $y$ in $U$, so by Harnack's inequality (e.g., see \cite{AG}, Corollary 1.4.4), there exists a constant $C(K, U)$ such that $G(x,y) \leq C(K, U) \, G(x, x_0)$ for all $y \in K$. Hence
\[ \int_K G(x,y) \, dy \leq C(K, U) \, |K| \, G(x, x_0).\]
Since a fixed domain $U$ depends only on $x_0$, $K$, and $\Omega$, we can replace $C(K, U)$ with $C(x_0, K, \Omega)$.
On the other hand, suppose $x \in U$. Note that $G(z, x_0)$ is a strictly positive lower semi-continuous function of $z \in \Omega$ and hence $M = \min \{ G(z, x_0): \, z \in \overline{U} \} >0$, where $M$ depends on $\Omega, x_0$ and $U$, hence $K$. Hence by equation (\ref{GchiKbnd1}),
\[ G \chi_K (x) \leq C_1 (K) \leq \frac{C_1(K)}{M} \, G(x, x_0). \]
Since $m(x) = \min (1, G(x, x_0))$, inequalities (\ref{GchiKbnd1}) and (\ref{GchiKbnd2}) imply inequality (\ref{GchiKest}).
To prove inequality (\ref{lowerGchiKest}), let $U$ be as above. For $x \in \Omega \setminus U$, the same application of Harnack's inequality as above gives that $G(x,y) \geq C (x_0, K, \Omega)^{-1} G(x, x_0)$ for all $y \in K$. Hence
\[ \int_K G(x,y) \, dy \geq C(K, \Omega)^{-1} \, |K| \, G(x, x_0) \geq C(K, \Omega)^{-1} |K| \, m(x). \]
Now suppose $x \in \overline{U}$. Note that $G(z,y) $ is a strictly positive lower semi-continuous function of $(z,y)$ in $\Omega\times \Omega$ (see \cite{AG}, Theorem 4.1.9). Hence $C_3 (\overline{U}) = \min \{ G(z,y) \,: \,
(z, y) \in \overline{U}\times \overline{U}$ is attained at some point in the compact set $\overline{U}\times \overline{U}$.
In particular, $C_3(\overline{U})>0$. Since $m(x) \leq 1$,
\[ \int_K G(x,y) \, dy \geq C_3 (\overline{U}) \, |K| = C_3 (x_0, K, \Omega) \, m(x) . \]
\end{proof}
\begin{Lemma}\lambdabel{low-M-est} Suppose $\Omega \subset \mathbb{R}^n$ ($n\ge 2$) is a bounded uniform domain.
Suppose $x_0\in \Omega$ is a reference point for the
Martin kernel. Then there exists a positive constant $c$ depending only on $x_0$ and
$\Omega$ such that
\begin{equation}\lambdabel{martin-low}
M(x, z) \ge c \, m(x), \qquad \text{for all} \, \, (x, z) \in \Omega\times\partial \Omega ,
\end{equation}
where $m(x)=\min (1, G(x, x_0))$.
In particular, if $\omega$ is a locally finite Borel measure in $\Omega$ such that $M^{*} (m \, \omega) \noindentt\equiv+\infty$, then $m \in L^2 (\Omega, \omega)$.
\end{Lemma}
\begin{proof} Fix $z \in \partial \Omega$. Let $B(x_0, r) \subset \Omega$,
where $0<r\le \frac{1}{2} \, \text{dist} \, (x_0, \partial \Omega)$. Since $M(\cdot, z)$ is a positive harmonic
function in $\Omega$, by Harnack's inequality in $B(x_0, 2r)$,
there exists a constant $c>0$ depending only on $x_0$ and $r$ such that $M(x, z) \ge c \, M(x_0, z)$,
for all $x \in B(x_0, r)$ where $M(x_0, z)=1$. Hence,
\begin{equation}\lambdabel{m-c}
M(x, z) \geq c >0, \quad \text{for all} \, \, x \in B(x_0, r) .
\end{equation}
For $x \in \Omega\setminus B(x_0, r)$, we argue that by the $3$-G inequality in a
bounded uniform domain ($n\ge 3$),
\[
\frac{G(x, x_0) \, G(x_0, y)}{G(x, y)}\le C \, \left( |x-x_0|^{2-n} + |y-x_0|^{2-n} \right) ,
\]
for all $y \in \Omega$, where $C$ depends only on $\Omega$, see \cite{Han1}. Hence, for $x, y \in \Omega\setminus B(x_0, r)$,
\[
\frac{G(x, y)}{G(x_0, y)}\ge C^{-1} \, \frac{G(x, x_0)}{|x-x_0|^{2-n} + |y-x_0|^{2-n} }\ge C^{-1} 2 r^{n-2} \, G(x, x_0).
\]
(For $n=2$, an analogue of the $3$-G inequality holds in any bounded domain \cite{Han2}.)
Letting $y \rightarrow z$, where without loss of generality we may assume that $y \in \Omega\setminus B(x_0, r)$, we deduce
\begin{equation}\lambdabel{m-g}
M(x, z) \ge C^{-1} 2 r^{n-2}\, G(x, x_0) , \quad \text{for all} \, \, x \in \Omega\setminus B(x_0, r) .
\end{equation}
Combining estimates \eqref{m-c} and \eqref{m-g}
yields \eqref{martin-low}.
If $\omega$ is a locally finite Borel measure in $\Omega$ such that $M^{*} (m \, \omega)\noindentt\equiv
+\infty$, then for some $z\in \partial \Omega$ by \eqref{martin-low} $\int_\Omega m^2 d \omega
\le c M^* (m \omega)(z) <+\infty$, i.e., $m \in L^2(\Omega, \omega)$.
\end{proof}
\begin{Lemma}\lambdabel{conv-lemma}
Suppose $\Omega \subset \mathbb{R}^n$ ($n\ge 2$) is a bounded uniform domain.
Suppose $\mu$ is a finite Borel measure with compact support in $\Omega$. Let $z \in \partial \Omega$.
Then
\begin{equation}\lambdabel{min-thin2}
\lim_{x \rightarrow z, \, x\in\Omega} \frac{G \mu (x)}{G(x, x_0)} = \int_{\Omega} M(y,z) \, d\mu(y) = M^* \mu (z).
\end{equation}
In addition, if $z$ is a regular point of $\partial \Omega$, then
\begin{equation}\lambdabel{min-thin}
\lim_{x \rightarrow z, \, x\in\Omega} \frac{G \mu (x)}{m(x)} = \int_{\Omega} M(y,z) \, d\mu(y) = M^* \mu (z).
\end{equation}
\end{Lemma}
\begin{proof} By \eqref{martin-K}, if $y\in\Omega$ and $x_j\rightarrow z$ ($x_j\in \Omega$), then
\[
\lim_{j \rightarrow \infty} G(y, x_j) / G (x_j, x_0) = M(y,z) .
\]
As in the proof of Lemma \ref{estGchiK}, we denote by $U$ a relatively compact domain in $\Omega$
that contains both $x_0$ and $K$. Since $x_j \rightarrow z$, where $z\in \partial\Omega$, we have that $x_j \noindentt \in \overline{U}$ for $j \ge j_0$. Then $G(y, x_j)$ is a harmonic function of $y\in U$, and for each $j \ge j_0$, by Harnack's inequality,
\[
G(y, x_j) \le C(K, U) \, G(x_0, x_j), \qquad \text{for all} \, \, y \in K .
\]
Since $\mu$ is a finite measure, we obtain \eqref{min-thin2} by the dominated
convergence theorem.
If $z$ is a regular point of $\partial \Omega$, then $G (x_j, x_0)\rightarrow 0$ as $j \rightarrow \infty$,
and consequently $m(x_j)=G (x_j, x_0)$ for $j$ large enough. Hence, \eqref{min-thin} follows from
\eqref{min-thin2}.
\end{proof}
In Lemma \ref{conv-lemma}, $\mu$ is a finite Borel measure with compact support in $\Omega$. We remark that more generally, for $\mu$ only locally finite,
\begin{equation}\lambdabel{martin1}
\liminf_{x\rightarrow z, \, \, x\in \Omega} \frac{G \mu(x)}{G(x_0, x)}\ge \int_\Omega M(x, z) \, d \mu(x),
\end{equation}
for $z\in \triangle$ (a Martin boundary point), by Fatou's Lemma. In fact, by \cite{AG}, Theorem 9.2.7, for any Green's potential $G \mu$ and $z\in \triangle_1$
(a Martin boundary point where $\Omega$ is not minimally thin),
\begin{equation}\lambdabel{martin2}
\liminf_{x\rightarrow z, \, \, x\in \Omega} \frac{G \mu(x)}{G(x_0, x)}= \int_\Omega M(x, z) \, d \mu(x).
\end{equation}
For uniform domains, $\triangle=\triangle_1=\partial\Omega$, so that \eqref{martin2} holds for all
$z\in \partial\Omega$. We could use this fact in our proof below, but we prefer the more elementary approach in Lemma \ref{conv-lemma}. The compact support restriction can be removed later in the proof by exhausting $\Omega$ with a sequence of nested domains $\Omega_j$, and using the monotone convergence theorem.
\begin{proofof} Theorem \ref{gaugecrit}.
(A) Suppose $\Vert T \Vert <1$ and \eqref{martincritsuff} holds. Define
\begin{equation}\lambdabel{defngreenpot}
Gf (x) = \int_{\Omega} G(x,y) f(y) \, dy, \,\,\, x \in \Omega.
\end{equation}
Let $G_1 =G$, and let $G_j (x,y)$ be the kernel of the $j^{th}$ iterate $T^j$ of $T$ defined by (\ref{defT}), so that
\begin{equation} \lambdabel{defGj}
T^j h(x) = \int_{\Omega} G_j(x,y) h(y)\, d \omega (y).
\end{equation}
Then $G_j$ in \eqref{defGj} is determined inductively for $j \geq 2$ by
\begin{equation}\lambdabel{new-defGj}
G_j (x,y) = \int_{\Omega} G_{j-1} (x, w) G(w,y) \, d\omega(w).
\end{equation}
We define the minimal Green's function associated with the Schr\"{o}dinger operator $-\triangle - \omega$ to be
\begin{equation}\lambdabel{defGreenSchr}
\mathcal{G} (x,y) = \sum_{j=1}^{\infty} G_j (x,y), \qquad \text{for all} \, \, x, y \in \Omega.
\end{equation}
The corresponding Green's operator is
\[ \mathcal{G}f(x) = \int_{\Omega} \mathcal{G}(x,y) f(y) \, dy, \quad x \in \Omega .\]
Let
$K$ be a compact set in $\Omega$. Denote by $u_K$ a solution to the equation
\begin{equation}\lambdabel{K-eqn}
\left\{ \begin{aligned}
-\triangle u & = \omega u + \chi_K\, \,& & \mbox{in} \, \, \Omega, \quad u \ge 0, \\
u & = 0 \, \, & &\mbox{on} \, \, \partial \Omega .
\end{aligned}
\right.
\end{equation}
In other words,
\begin{equation} \lambdabel{eqnforuK}
u_K = G(u_K \omega) + G\chi_K .
\end{equation}
By Lemma \ref{estGchiK}, $G\chi_K (x)\approx m(x)$ in $\Omega$ if
$m(x)=\min(1, G(x, x_0))$.
Without loss of generality we may assume that $m \in L^2 (\Omega, \omega)$; otherwise
$M^*(m \, \omega) \equiv +\infty$ by Lemma \ref{low-M-est}, and
condition \eqref{martincritsuff} is not valid. It follows that $G \chi_K \in L^2 (\Omega, \omega)$.
But $||T||<1$, so that $u_K=(I-T)^{-1} G \chi_K\in L^2(\Omega, \omega)$, and the series in \eqref{defuK} converges in
$L^2(\Omega, \omega)$ (and hence $d \omega$-a.e.). In particular, $G(u_K \omega)\noindentt\equiv \infty$.
From this fact it is immediate that the minimal superharmonic solution to \eqref{eqnforuK} is given by
\begin{equation} \lambdabel{defuK}
\begin{aligned}
u_K (x) & := G(u_K \omega) + G\chi_K = (I-T)^{-1} G \chi_K (x) \\& = \sum_{j=0}^{\infty} T^j (G \chi_K) (x)= \int_{K} \mathcal{G} (x,y) \, dy ,
\end{aligned}
\end{equation}
for all $x \in \Omega$.
By equation (\ref{ufsolndef}),
\begin{align*}
u_f (x) & = Pf(x) + \sum_{j=1}^{\infty} T^j (Pf)(x) \\
& = Pf(x)+ \int_{\Omega} \mathcal{G} (x,y) \, Pf (y) \, d\omega(y),
\end{align*}
for all $x \in \Omega$. Integrating both sides of this equation over $K$ with respect to $dx$,
\begin{equation}\lambdabel{intKuf}
\begin{aligned}
\int_{K} u_f (x) \, dx & = \int_K Pf(x) \, dx + \int_{K} \int_{\Omega} \mathcal{G} (x,y) \, Pf(y) \, d \omega (y) \, dx \\
& = \int_K Pf(x) \, dx + \int_{\Omega} \int_{K} \mathcal{G} (x,y) \, dx \, Pf (y) \, d\omega(y) \\ & = \int_K Pf(x) \, dx + \int_{\Omega} u_K (y)\, Pf (y) \, d\omega(y) ,
\end{aligned}
\end{equation}
by Fubini's theorem, equation (\ref{defuK}) and the symmetry of $\mathcal{G}$.
The term $\int_K Pf(x) \, dx $ is finite because \eqref{martincritsuff} guarantees that $f$ is integrable with respect to harmonic measure, so $Pf$ is not identically infinite, and so is harmonic. Thus to prove that $u_f \in L^1 (K, dx)$, it suffices to show that $u_K Pf \in L^1 (\Omega, \omega)$.
By \eqref{pf-rep-martin} and Fubini's theorem,
\[
\int_{\Omega} u_K(y) \, Pf(y) \, d\omega(y) = \int_{\partial \Omega} \int_{\Omega} M(y,z) u_K (y) \, d \omega (y) \, f(z) \, dH^{x_0} (z) .
\]
We claim that
\begin{equation} \lambdabel{Mu0est}
\int_{\Omega} M(y,z) u_K (y) \, d \omega (y) \leq C_K \, e^{C \, M^* (m\omega) (z)},
\end{equation}
if $z$ is a regular point of $\partial\Omega$. Assuming \eqref{Mu0est} for the moment,
the set of irregular boundary points $E \subset \partial\Omega$
is known to be Borel and polar, i.e., $\text{cap}(E)=0$ (\cite{AG}, Theorem 6.6.8), and consequently
negligible, i.e., of harmonic measure zero (\cite{AG}, Theorem 6.5.5). Therefore \eqref{Mu0est} yields
\begin{equation} \lambdabel{u_K-Pf}
\int_{\Omega} u_K(y) \, Pf(y) \, d\omega(y)
\leq C_K \int_{\partial \Omega} e^{C \, M^* (m\omega) (z)} \,
f(z) \, d H^{x_0} (z).
\end{equation}
Hence our assumption \eqref{martincritsuff} guarantees that $u_K \, Pf \in L^1 (\Omega, \omega)$.
To prove (\ref{Mu0est}), let us assume first that $\omega$ is compactly supported. Then as mentioned above after \eqref{eqnforuK}, $u_K \in L^2 (\Omega, \omega)$. Hence, by Cauchy's inequality, $d \mu= u_K \, d \omega$ is a finite compactly supported measure.
By equation \eqref{defuK}, Lemma \ref{estGchiK}, and Theorem \ref{FNVTheorem},
\begin{equation} \lambdabel{uKexpest}
u_K (x) \leq C_K \sum_{j=0}^{\infty} T^j m (x) \leq C_K \, m(x) \, e^{C G(m\omega)(x) /m(x) },
\end{equation}
since $Tm= G(m \omega)$. Using the trivial estimate $m(\cdot) \le G(x_0, \cdot)$, followed by \eqref{eqnforuK} and then \eqref{uKexpest},
\begin{equation} \lambdabel{GuKomegaest}
\frac{G(u_K\omega)(x)}{G(x, x_0)} \leq \frac{G(u_K\omega)(x)}{m(x)} \leq \frac{u_K(x)}{m(x)} \leq C_K e^{C G(m\omega)(x)/m(x)} ,
\end{equation}
for $x \in \Omega$. Applying \eqref{min-thin2} with $d\mu = u_K d\omega$ and then \eqref{min-thin} with $d\mu = m d\omega$,
\begin{align*}
\int_{\Omega} M(y,z) u_K (y) \, d \omega (y) & = \lim_{x \rightarrow z, x \in \Omega} \frac{G(u_K\omega)(x)}{G(x, x_0)} \\
& \leq \lim_{x \rightarrow z, x \in \Omega} C_K e^{C G(m\omega)(x)/m(x)} \\
& = C_K e^{ M^* (m\omega) (z) } ,
\end{align*}
where the regularity of $z \in \partial \Omega$ is used only at the last step. Hence \eqref{Mu0est} is established for compactly supported measures $\omega$.
In the general case, consider an exhaustion $\Omega=\cup_{k=1}^{\infty} \Omega_k$, where $\{\Omega_k\}$ is a family of nested, relatively compact subdomains of $\Omega$. Without loss of generality we may assume that $x_0\in \Omega_k$, for all $k \in \mathbb{N}$.
In $\Omega\times\Omega$, define the iterated Green's kernels $G_j^{(k)} (x,y)$ for $j\in \mathbb{N}$, and $\mathcal{G}^{(k)}(x,y)=\sum_{j=1}^\infty G_j^{(k)} (x,y)$, as in (\ref{new-defGj}), (\ref{defGreenSchr}), except with $\omega$ replaced by $\omega_k$, $k \in \mathbb{N}$. Let $u_K^{(k)} = \mathcal{G}^{(k)} \chi_K$. By repeated use of the monotone convergence theorem, we see that $G_j^{(k)} (x,y)$ increases monotonically as $k \rightarrow \infty$ to $G_j (x,y)$ for each $j$, $\mathcal{G}^{(k)}(x,y)$ increases monotonically to $\mathcal{G} (x,y)$, and $u_K^{(k)}$ increases monotonically to $u_K$. Applying the compact support case gives
\begin{align*}
\int_{\Omega} M(y,z) u_K^{(k)} (y) \ \chi_{\Omega_k}(y) \, d\omega(y) & \leq C_K e^{C \, M^* (m \, \omega_k) (z)} \\ & \leq C_K e^{C \, M^* (m \, \omega) (z)} .
\end{align*}
Then, as $k \rightarrow \infty$, the monotone convergence theorem yields (\ref{Mu0est}).
(B) Suppose $u_f \in L^1_{loc} (\Omega, dx)$, where $f \noindentt= 0$ a.e. relative to harmonic measure, and
\[ u_f = Tu_f + Pf \qquad \,\, \text{on} \, \, \Omega .\]
So $Tu_f \leq u_f$, where $0<u_f<\infty$ $d \omega$-a.e.
It follows by Schur's lemma that $\Vert T \Vert_{L^2 (\omega)\rightarrow L^2 (\omega)} \leq 1$.
It remains to show that \eqref{martincritnec} holds.
We remark that this condition follows immediately from \eqref{ptwiselowerbnd} with
$x=x_0$ provided $u_f(x_0) < \infty$. Since this is not necessarily the case, we proceed as follows.
Choose any compact set $K \subseteq \Omega$ with $|K|>0$.
By Lemma \ref{estGchiK} and Theorem \ref{FNVTheorem},
\begin{equation} \lambdabel{lowestuK}
u_K (x) = \sum_{j=0}^{\infty} T^j G\chi_K (x) \geq c_K \sum_{j=0}^{\infty} T^j m (x) \geq c_K m(x) e^{c(Tm(x))/(m(x))},
\end{equation}
for all $x \in \Omega$. In fact, we can let $c=1$ in the preceding
estimate, exactly as in the proof of \eqref{lowerTM} above, by using \cite{GV2}, Theorem 1.2 with $q=1$, $h=m$, and $\mathfrak{b}=1$. Notice
that $m$ is a superharmonic function in $\Omega$, and so the Maria-Frostman domination
principle yields \eqref{weak-dom} with $\mathfrak{b}=1$ and $h=m$.
By inequality \eqref{lowestuK}, equation (\ref{eqnforuK}) and inequality (\ref{GchiKest}),
\begin{equation} \lambdabel{Tm-m}
\begin{aligned}
e^{Tm(x)/(m(x))} & \leq c_K^{-1} \frac{u_K(x)}{m(x)} = c_K^{-1} \left( \frac{G(u_K \omega)(x)}{m(x)} + \frac{G\chi_K (x)}{m(x)} \right)
\\ & \leq c_K^{-1} \frac{G(u_K \omega)(x)}{m(x)} + C_K c_K^{-1} .
\end{aligned}
\end{equation}
Let $z \in \partial \Omega$ be a regular point. Applying Lemma \ref{conv-lemma} with $d\mu= m d\omega$ on the left side of (\ref{Tm-m}) (recalling that $Tm= G(m \omega)$), and with $d\mu = u_K \omega$ on the right side, we obtain
\begin{equation} \lambdabel{M-claim}
e^{ M^* (m \omega) (z)} \leq c_K^{-1} \, \int_{\Omega} M(y,z) u_K (y) \, d \omega (y) + C_K c_K^{-1},
\end{equation}
if $\omega$ has compact support in $\Omega$. By the same exhaustion process that was used in the opposite direction, (\ref{M-claim}) holds for $\omega$ locally finite in $\Omega$.
Since the set of irregular points in $\partial \Omega$ has harmonic measure $0$, as noted above, we can integrate \eqref{M-claim} over $\partial \Omega$ with respect to $f \, dH^{x_0}$ and apply Fubini's theorem to obtain
\begin{align*}
& \int_{\partial \Omega} e^{M^* (m \omega) (z)} \, f(z) \, dH^{x_0} (z) \\ &
\leq C_1 c_K^{-1} \int_{\Omega} \int_{\partial \Omega} M(y,z) \, f(z) \, dH^{x_0} (z) u_K (y) \, d \omega (y) \\& + C_K c_K^{-1} \int_{\partial \Omega} \, f(z) \, dH^{x_0} (z) \\
& = C_1 c_K^{-1} \int_{\Omega} u_K (y) \, Pf(y) \, d \omega (y) + C_K c_K^{-1} \int_{\partial \Omega} \, f(z) \, dH^{x_0} (z) ,
\end{align*}
using equation \eqref{pf-rep-martin}. Since $u_K \, Pf \in L^1 (\Omega, \omega)$ by \eqref{intKuf}, we have condition \eqref{martincritnec}.
\end{proofof}
\noindentindent{\bf Remark.} For part (A) of Theorem \ref{gaugecrit} and Corollary \ref{cor}, if $\Omega$ is a bounded
$C^{1,1}$ domain, or a bounded Lipschitz domain with sufficiently small Lipschitz constant, then $G\chi_{\Omega} \approx m$ (see, for instance, \cite{AAC}, Theorem 1.1 and Remark 1.2(i)).
Hence, $\int_\Omega M(x, z) \, dx\le C$, where $C$ does not depend on $z \in\partial \Omega$. Then one can replace $\chi_K$ above with $\chi_{\Omega}$ and obtain that $u_f \in L^1 (\Omega, dx)$ with
\[ \int_{\Omega} u_f (x) \, dx \leq C \int_{\partial \Omega} f(z) \, dH^{x_0} (z)
+ C \int_{\partial \Omega} e^{CM^* (m \omega) (z)} \, f(z) \, dH^{x_0} (z) . \]
In the same way that Theorem \ref{mainu-harm} generalizes Theorem \ref{mainufest}, there is a complete analogue of Theorem \ref{gaugecrit} for solutions of equation \eqref{u-harm-int}, with an arbitrary positive harmonic function $h$ in place of $Pf$. It gives sufficient and matching necessary conditions for the existence of solutions whose pointwise estimates are provided in Theorem \ref{mainu-harm}. The primary difference in this case is that $\mu_h$ is not necessarily zero on the set of irregular points of $\partial\Omega$. Hence we need to consider
\begin{equation}\lambdabel{phi-def}
\begin{aligned}
\varphi(z) & = \liminf_{x \rightarrow z, \, x\in\Omega} \max(1, G(x, x_0)), \\
\psi(z) & = \limsup_{x \rightarrow z, \, x\in\Omega} \, \max(1, G(x, x_0)) ,
\end{aligned}
\end{equation}
for $z \in \partial \Omega$.
Note that $\varphi = \psi =1$ at regular boundary points. The following result is a generalization of Lemma \ref{conv-lemma}, which allows us to control the behavior of $\varphi $ and $\psi$ at irregular points in a uniform domain.
\begin{Lemma}\lambdabel{conv-lemma-alt}
Suppose $\Omega \subset \mathbb{R}^n$ is a bounded uniform domain, for $n \geq 2$.
Suppose $\mu$ is a finite Borel measure with compact support in $\Omega$. Let $z \in \partial \Omega$.
Then
\begin{equation}\lambdabel{min-thin3a}
1\le \varphi(z) \le \psi(z) \le \kappa \,
\varphi(z) \le \kappa \, C_1 , \qquad z \in \Omega ,
\end{equation}
for constants $\kappa$ and $C_1$, where $\kappa$ depends only on $\Omega$ and $C_1$ depends only on ${\rm dist} (x_0, \partial \Omega)$. Moreover, for all $z \in \partial\Omega$,
\begin{equation} \lambdabel{min-thin3}
\begin{aligned}
\limsup_{x \rightarrow z, \, x\in\Omega} \frac{G \mu (x)}{m(x)} = \psi(z) M^*\mu(z)
& \leq \kappa \varphi(z) M^*\mu(z) \\ &= \kappa \liminf_{x \rightarrow z, \, x\in\Omega} \frac{G \mu (x)}{m(x)} .
\end{aligned}
\end{equation}
\end{Lemma}
\begin{proof} The inequalities $1 \leq \varphi (z) \leq \psi(z)$ are trivial. The inequality $\psi (z) \leq \kappa \varphi(z)$ follows from inequality (\ref{liminf-limsup}) with $y=x_0$ and the observation that $\max(1, G(x, x_0)) = G(x, x_0)/m(x)$. Since $x\rightarrow z$,
we may assume that $|x-x_{0} | \geq c_1$ for any $c_1 < \text{dist} \, (x_0, \partial \Omega)$, for $x$ close enough to $z$. Then
\[
G(x, x_0) \le c(n) \, |x-x_0|^{2-n} \le c(n) \, c_1^{2-n} ,
\]
where we suppose again that $n\ge 3$ (the case $n=2$ is treated in a similar way). Hence,
\[
\psi (z) \le C_1=\max \left(1, c(n) \, [\text{dist} \, (x_0, \partial \Omega)]^{2-n}\right) , \quad
\text{for all} \, \, z\in \partial \Omega ,
\]
and consequently \eqref{min-thin3a} holds.
To prove \eqref{min-thin3},
note that by \eqref{min-thin2},
\[ \limsup_{x \rightarrow z, \, x\in\Omega} \frac{G \mu (x)}{m(x)} = \limsup_{x \rightarrow z, \, x\in\Omega} \frac{G(x, x_0)}{m(x)} \, \lim_{x \rightarrow z, \, x\in\Omega} \frac{G \mu (x)}{G(x, x_0)} = \psi(z) M^* \mu (z) \]
and
\[ \liminf_{x \rightarrow z, \, x\in\Omega} \frac{G \mu (x)}{m(x)} =
\liminf_{x \rightarrow z, \, x\in\Omega} \frac{G(x, x_0)}{m(x)} \, \lim_{x \rightarrow z, \, x\in\Omega} \frac{G \mu (x)}{G(x, x_0)} = \varphi(z) M^* \mu (z) . \]
Hence, \eqref{min-thin3} is immediate from \eqref{min-thin3a}.
\end{proof}
\begin{Thm} \lambdabel{u_h-exist} Suppose $ \Omega \subset \mathbb{R}^n$ is a bounded uniform domain, $\omega$ is a locally finite Borel measure on $\Omega$, and $h$ is a positive
harmonic function in $\Omega$. Let $x_0 \in \Omega$ be the reference point in the definition of Martin's kernel. Let $m(x) = \min (1, G(x,x_0))$, and let $\mu_h$ be the Martin's representing measure for $h$.
(A) There exists $C>0$ ($C$ depending only on $\Omega$ and $\Vert T \Vert$) such that if $\Vert T \Vert <1$ (equivalently, (\ref{equivnormTless1}) holds with $\beta <1$) and
\begin{equation} \lambdabel{martin-suff}
\int_{\partial \Omega} e^{C \, \varphi (z)\, M^* (m \omega)(z)} \, d\mu_h (z)< \infty ,
\end{equation}
then $u_h = \sum_{j=0}^{\infty} T^j h \in L^1_{loc} (\Omega, dx)$ is a positive solution to (\ref{u-harm-int}).
(B) If $u \in L^1_{loc} (\Omega, dx) $ is a positive solution of \eqref{u-harm-int}, then $\Vert T \Vert \leq 1$ and
\begin{equation} \lambdabel{martin-nec}
\int_{\partial \Omega} e^{\psi(z) \, M^* (m \omega)(z)} \, d\mu_h(z) < \infty .
\end{equation}
\end{Thm}
\begin{proof}
The proof follows the lines of the proof of Theorem \ref{gaugecrit}, so we only sketch the differences. Let $K \subseteq \Omega$ be compact with $|K|>0$. Replacing $Pf$ with $h$, we obtain
\begin{equation} \lambdabel{martin2-alt}
\int_K u_h (x) \, dx = \int_K h (x) \, dx + \int_{\Omega} u_K (y) h(y) \, d\omega (y)\end{equation}
instead of \eqref{intKuf}. Using Martin's representation \eqref{martin-rep} instead of
\eqref{pf-rep-martin},
\begin{equation} \lambdabel{uKhdomega}
\int_{\Omega} u_K (y) h(y) \, d\omega (y) = \int_{\partial \Omega} \int_{\Omega} M(y,z) u_K (y) \, d \omega (y) \, d \mu_h (z) .
\end{equation}
For part (A), it suffices to show that $u_K h \in L^1 (\Omega, d\omega)$.
We claim that
\begin{equation} \lambdabel{Mu0est-alt}
\int_{\Omega} M(y,z) u_K (y) \, d \omega (y) \leq C_K \, e^{C \, \varphi(z) \, M^* (m\omega) (z)}, \quad z \in \partial \Omega,
\end{equation}
which replaces \eqref{Mu0est}, and completes the proof of (A). To prove \eqref{Mu0est-alt}, we can assume $\omega$ is compactly supported by the exhaustion process above. Choose a sequence of points $x_j$ in $\Omega$ converging to $z$, such that
\[ \lim_{j \rightarrow \infty} \frac{G(m \omega)(x_j)}{m(x_j)} = \lim_{w \rightarrow z} \inf_{w \in \Omega} \frac{G(m\omega)(w)}{m(w)}. \]
Then by \eqref{min-thin} with $d \mu= u_K d \omega$, \eqref{GuKomegaest}, and \eqref{min-thin3} with $\mu = m \omega$,
\begin{align*}
\int_{\Omega} M(y,z) u_K (y) \, d \omega (y) & = \lim_{j \rightarrow \infty} \frac{G(u_K\omega)(x_j)}{G(x_j, x_0)} \\
& \leq \lim \inf_{j \rightarrow \infty} C_K e^{C G(m\omega)(x_j)/m(x_j)} \\
& = C_K e^{C \varphi(z) M^* (m\omega) (z) } .
\end{align*}
For part (B), equation \eqref{u-harm-int} and Schur's Lemma show that $\Vert T \Vert \leq 1$, as in Theorem \ref{gaugecrit}. If $u \in L^1_{loc} (\Omega, dx)$, then the minimal solution $u_h$ also belongs to $L^1_{loc} (\Omega, dx)$ (see the remarks before Definition \ref{qmkernel}). We claim that the following analogue of \eqref{M-claim} holds:
\begin{equation} \lambdabel{M-claim-alt}
e^{\psi(z) M^* (m \omega) (z)} \leq c_K^{-1} \kappa C_1 \, \int_{\Omega} M(y,z) u_K (y) \, d \omega (y) + C_K c_K^{-1},
\end{equation}
for all $z \in \partial \Omega$, where $C_1\ge 1$ is the constant in \eqref{min-thin3a}, which depends only on $x_0$ and $\Omega$. Assuming this claim, then \eqref{uKhdomega} implies \eqref{martin-nec} since $u_K h \in L^1 (\Omega, d \omega)$ by \eqref{martin2-alt}. To prove \eqref{M-claim-alt}, let $x_j$ be a sequence of points such that
\[ \lim_{j \rightarrow \infty} \frac{G(m \omega)(x_j)}{m(x_j)} = \lim_{w \rightarrow z} \sup_{w \in \Omega} \frac{G(m\omega)(w)}{m(w)}. \]
By \eqref{min-thin3} with $d\mu = m\, d\omega$, and recalling that $G(m \omega) = Tm$,
\begin{align*}
e^{\psi(z) M^* (m \omega) (z)} & \leq \lim_{j \rightarrow \infty} e^{Tm(x_j)/m(x_j)} \\
& \leq \lim \sup_{j \rightarrow \infty} C_K^{-1} \frac{G(u_K \omega) (x_j)}{m(x_j)} + C_K c_K^{-1},
\end{align*}
by \eqref{Tm-m} with $c=1$. By \eqref{min-thin3} with $\mu= u_k \omega$,
\[ \lim \sup_{j \rightarrow \infty} \frac{G(u_K \omega) (x_j)}{m(x_j)}= \psi (z) M^* (u_k \omega) (z) \leq \kappa C_1 M^* (u_k \omega) (z) , \]
which establishes \eqref{M-claim-alt}.
\end{proof}
\section{Nonlinear elliptic equations of Riccati type}\lambdabel{riccati}
In this section we treat equation \eqref{nonlineareqn-1}.
The definition of solutions of \eqref{nonlineareqn-1} is consistent with our approach
in the previous sections.
\begin{Def}\lambdabel{defveryweakriccati} A nonnegative function $v \in W^{1,2}_{loc} (\Omega) $ is a solution of (\ref{nonlineareqn-1}) if
$v$ is a weak solution in $\Omega$, i.e.,
\begin{equation}\lambdabel{weakriccati}
\int_{\Omega} \nabla v \cdot \nabla h \, dx = \int_{\Omega} |\nabla v|^2 h \, dx + \int_{\Omega} h \, d\omega, \,\,\, \mbox{for all} \,\,\, h \in C^\infty_0 (\Omega),
\end{equation}
and $v$ has a superharmonic representative (denoted also by $v$) in $\Omega$ whose
greatest harmonic minorant is the zero function.
\end{Def}
Since $v \in W^{1,2}_{loc} (\Omega) $, it is easy to see that \eqref{weakriccati}
is equivalent to
\begin{equation}\lambdabel{ric-eq-1}
- \triangle v = |\nabla v|^2 + \omega \quad \mbox{in} \, \, \, \, D^{\, \prime}(\Omega),
\end{equation}
i.e., $v$ is
a distributional solution in $\Omega$. In other words, by the Riesz decomposition theorem (\cite{AG}, Sec. 4.4),
$|\nabla v|^2 + \omega$ is the Riesz measure associated with
$-\triangle v$, and $v$ satisfies the integral equation
\begin{equation} \lambdabel{integralformmeasure}
v = G (|\nabla v|^2 + \omega) \,\, \hbox{in} \,\, \Omega.
\end{equation}
In bounded Lipschitz domains, \eqref{integralformmeasure} is equivalent to $v$ being a very weak solution of \eqref{nonlineareqn-1} in the sense of \cite{MR}.
Via the relation $v=\log u$, solutions $v$ of \eqref{nonlineareqn-1} correspond formally to solutions $u$ of
\eqref{ufeqn} with $f=1$, i.e.,
\begin{equation}\lambdabel{dirichlet}
\left\{ \begin{aligned}
-\triangle u & = \omega \, u, \, \, & u > 0 \quad &\mbox{in} \, \, \Omega, \\
u & = 1 \, \, &\mbox{on} \, \, \partial \Omega.
\end{aligned}
\right.
\end{equation}
The minimal solution $u_1$ to \eqref{dirichlet} (the gauge) is given by \eqref{gauge-def}.
Earlier results on \eqref{nonlineareqn-1}
were obtained in \cite{HMV}, where the problem was posed of
finding precise conditions on the boundary behavior of $\omega$ that ensure the existence of solutions.
The precise relation between solutions to \eqref{dirichlet} and \eqref{nonlineareqn-1} is complicated, as discovered by Ferone and Murat (see \cite{FM1}--\cite{FM3} or Remark 4.2 in \cite{FV2}). In the special case of smooth domains and absolutely continuous $\omega$, the problem was studied by the authors in \cite{FV2}, where the condition of the exponential integrability of the balayage of $m \, \omega$ appeared for the first time. In that setup, it was shown that if $u_1$ is the minimal solution of (\ref{dirichlet}), then $v = \log u_1$ is a solution of (\ref{nonlineareqn-1}). However, if $v$ is a solution to (\ref{nonlineareqn-1}) then $u=e^v$ is in general only a supersolution to (\ref{dirichlet}).
In Theorem \ref{riccatithm}, we treat general measures $\omega$ and uniform domains $\Omega$ based on the results of the previous sections. We take this opportunity to give further details on some points in the arguments presented in \cite{FV2}, Sec. 4. We also improve the constant in the exponent of the necessary condition (exponential integrability of the balayage).
\begin{proofof} Theorem \ref{riccatithm}. First suppose that $\Vert T \Vert<1$ and (\ref{martincritsuff-g}) holds with sufficiently large $C>0$. By Corollary~\ref{cor}, the Schr\"odinger equation (\ref{dirichlet}) has a positive solution $ u= 1 + \mathcal{G} \omega$. (This solution was called $u_1$ in the statement of Corollary~\ref{cor}.) Then $u \in L^1_{loc} (\Omega, d\omega)$ and $u$ satisfies the integral equation $u = 1 + G(\omega u)$. Therefore $u: \Omega \to [1, +\infty]$ is defined everywhere as a positive superharmonic function in $\Omega$ and hence is quasi-continuous by the known properties of superhamonic functions.
In particular, the infinity set $E=\{x\in \Omega: \, u(x)=+\infty\}$ has zero capacity, $\text{cap}(E)=0$, and
$u \in W^{1,p}_{loc}(\Omega)$ when $p< \frac{n}{n-1}$. In fact, $u \in W^{1,2}_{loc}(\Omega)$ as shown in \cite{JMV}, Theorem 6.2,
but the proof of this stronger property is more involved, and it will not be used below.
Define $d \mu = -\triangle u = \omega \, u$, where a solution $u \in L^1_{loc}(\Omega, \omega)$
to \eqref{dirichlet}
is understood as in \S \ref{sec2} above. Notice that $u = \frac{d\mu}{d \omega}$
is the Radon--Nikodym derivative defined $d\omega$-a.e.
Let $v=\log u$. Then $0\leq v < +\infty$ $d\omega$-a.e., $v$ is superharmonic in $\Omega$ by Jensen's inequality, and
$v \in W^{1,2}_{loc}(\Omega)$ (see \cite{HKM}, Theorem 7.48; \cite{MZ}, Sec. 2.2).
We claim that
\eqref{weakriccati} holds. We will apply the integration by parts formula
\begin{equation}\lambdabel{by-parts}
\int_\Omega g \, d \rho = - \lambdangle g, \triangle r \rangle= \int_\Omega \nabla g \cdot \nabla r \, dx,
\end{equation}
where $g\in W^{1,2}(\Omega)$ is compactly supported and quasi-continuous in $\Omega$, and $\rho = -\triangle r$ where $r \in W^{1,2}_{loc}(\Omega)$ is superharmonic (see, e.g., \cite{MZ}, Theorem 2.39 and Lemma 2.33). This proof would simplify if we could apply (\ref{by-parts}) with $g = \frac h u, \rho = \mu$, and $r=u$, for $h \in C^{\infty}_0 (\Omega)$. However, we do not
use the property $u \in W^{1,2}_{loc}(\Omega)$, so we need an approximation argument. For $k \in \mathbb{N}$, let
\[ u_k = \min (u, \, e^k), \quad v_k=\min (v, \, k), \quad \mbox{and} \quad \mu_k = - \triangle u_k.\]
Clearly $u_k$ and $v_k$ are superharmonic, hence $\mu_k $ is a positive measure. Moreover, $u_k$ and $v_k$ belong to $W^{1,2}_{loc}(\Omega)\bigcap L^\infty(\Omega)$ (see \cite{HKM}, Corollary 7.20).
Let $h\in C^\infty_0(\Omega)$.
We invoke (\ref{by-parts}) with $g= \frac {h}{u_k}, \rho=\mu_k$, and
$r=u_k$. Note that $u_k\ge 1$, $g $ is compactly supported since $h$ is, and $ g \in W^{1,2} (\Omega)$ since $u_k\in W^{1,2}_{loc} (\Omega)$ and $h \in W^{1,\infty}(\Omega)$ is compactly supported. Then by (\ref{by-parts}), we have
\begin{equation}\lambdabel{approx-v_k}
\begin{aligned}
\int_\Omega \frac {h}{u_k} \, d \mu_k & = \int_{\Omega} \nabla \left(\frac {h}{u_k}\right) \cdot \nabla u_k \, dx\\
& = \int_\Omega \frac {\nabla h}{u_k} \cdot \nabla u_k \, dx - \int_\Omega \frac {|\nabla u_k|^2}{u_k^2}
h \, dx \\
& = \int_\Omega \nabla h \cdot \nabla v_k \, dx - \int_\Omega |\nabla v_k|^2 \, h \, dx.
\end{aligned}
\end{equation}
As mentioned above, $v \in W^{1,2}_{loc} (\Omega)$, and consequently
$\nabla v_k = \nabla v$ a.e. on $\{ v<k\}$, and $\nabla v_k = 0$ a.e. on $\{ v\ge k\}$ (see \cite{MZ}, Corollary 1.43). Hence,
\begin{equation*}
\begin{aligned}
\lim_{k \rightarrow \infty} \int_\Omega \nabla h \cdot \nabla v_k \, dx &= \int_\Omega \nabla h \cdot \nabla v \, dx , \\
\lim_{k \rightarrow \infty} \int_\Omega |\nabla v_k|^2 \, h \, dx & = \int_\Omega |\nabla v|^2 \, h \, dx
\end{aligned}
\end{equation*}
by the dominated convergence theorem.
Since $u$ is superharmonic, $u$ is lower semi-continuous, so the set $\{ x \in \Omega: u(x) > e^k \} \equiv \{u>e^k\}$ is open, and the measure $\mu_k = - \triangle u_k$ is supported on the closed
set $\{u \le e^k\}$ where $u=u_k$. Hence $u=u_k$ $d \mu_k$-a.e., and
\[
\int_\Omega \frac {h}{u_k} \, d \mu_k=\int_\Omega \frac {h}{u} \, d \mu_k.
\]
We next show that, for any continuous function $h$ with compact support in $\Omega$,
\begin{equation}\lambdabel{claim}
\lim_{k\to \infty} \int_{\Omega} \frac{h}{ u} \, d \mu_k = \int_\Omega \frac h u \, d \mu.
\end{equation}
Without loss of generality
we assume here that $h\ge 0$. Otherwise we apply the argument below to $h_{+}$ and
$h_{-}$ separately.
Notice that $u_k \uparrow u$, and consequently $\mu_k \to \mu$ weakly in $\Omega$,
by the weak continuity property (see, for instance, \cite{TW}
in a rather more general setting), i.e.,
\begin{equation*}
\lim_{k\to \infty} \int_\Omega \phi \, d \mu_k = \int_\Omega \phi \, d \mu
\end{equation*}
for all continuous functions $\phi$ with compact support in $\Omega$. It follows (see \cite{Lan}, Lemma 0.1) that
\begin{equation}\lambdabel{lsc}
\liminf_{k\to \infty} \int_\Omega \phi \, d \mu_k \ge \int_\Omega \phi \, d \mu
\end{equation}
for all lower semicontinuous functions $\phi$ with compact support in $\Omega$. The function $\frac{h}{ u}$ is obviously upper semicontinuous with compact support, so by \eqref{lsc} applied to $-\frac{h}{ u}$, we deduce
\begin{equation}\lambdabel{upper}
\limsup_{k\to \infty} \int_{\Omega} \frac{h}{ u} \, d \mu_k \le \int_\Omega \frac h u \, d \mu.
\end{equation}
To prove an estimate in the opposite direction, we claim that $ \mu_k \ge \mu$ on the closed
set $F_k=\{ x \in \Omega: \, u(x)\le e^k\}$.
It is enough to prove that
\begin{equation}\lambdabel{claim-u_k}
\mu_k (K) \ge \mu(K), \quad \text{for every compact set} \, \, K \subset F_k.
\end{equation}
We verify \eqref{claim-u_k} by using another approximation argument based on a version of Lusin's theorem for certain Green potentials (the so-called semibounded
potentials, see \cite{Fug}, Sec. 2.6). Notice that $u = G \mu+1$,
where $d \mu =u \, d \omega$, and $u<\infty$ $d \omega$-a.e., as discussed in
\S \ref{sec2}. Moreover,
$u<\infty$ on $\Omega\setminus\!E$, i.e., outside
the infinity set $E$, which is obviously a Borel set such that $\mu(E)=0$
since $\omega(E)=0$.
This is also a consequence of the fact that $E$ is a set of zero capacity,
and $\omega(E)\le \text{cap}(E)$, which follows immediately from \eqref{equivnormTless1}. In fact, the condition $\mu(E)=0$ is equivalent to absolute
continuity of $\mu$ with respect to capacity, i.e.,
$\text{cap}(K)=0 \Longrightarrow \mu(K)=0$ for all compact sets $K\subset \Omega$.
Consequently (see \cite{Fug}, Theorem 2.6; \cite{Hel}, Theorem 4.6.3), there exists an increasing sequence of compactly supported measures $\mu^j$ such that $u^j=G \mu^j +1\in C(\Omega)$, so that $\mu^j(K) \uparrow \mu(K)$, for every compact set $K\subset\Omega$, and $G \mu^j\uparrow G \mu$ on $\Omega$, as $j\to \infty$ . It follows that $u^j\uparrow u$,
and so $\min(u^j, e^k) \uparrow \min(u, e^k)=u_k$ as $j\to \infty$, which yields that the corresponding Riesz measures
$\mu_k^j$ associated with the superharmonic functions $\min(u^j, e^k)$ have the property
$\mu_k^j \to \mu_k$ weakly in $\Omega$ as $j\to \infty$.
Without loss of generality we may assume that actually $u^j(x)<u(x)$ for all $x\in \Omega$. Otherwise
we replace $u^j $ with $\epsilonsilon_j \, u^j$, where $\epsilonsilon_j\uparrow 1$ is a strictly increasing
sequence of positive numbers. Then all the properties of $u^j$ remain true.
Obviously, $F_k\subset G^j_k$ where $G^j_k=\{ x \in \Omega: \, u^j(x)< e^k\}$ is an open set
for every $j, k \in \mathbb{N}$, since $u^j \in C(\Omega)$. Clearly, $u^j=\min(u^j, e^k)$ on $G^j_k$, and so
$\mu^j$ coincides with $\mu_k^j$ on $G^j_k$. In particular, $\mu_k^j(K)=\mu^j(K)$
for every compact set
$K \subseteq F_k \subset G_k^j$.
Since $\mu_k^j \to \mu_k$ weakly, it follows
by \eqref{lsc} applied to the lower semicontinuous function
$-\chi_K$ that
\[
\limsup_{j\to \infty} \mu_k^j(K) \le \mu_k(K).
\]
Hence,
\[
\mu(K)=\lim_{j \to \infty} \mu^j(K) =\limsup_{j\to \infty} \mu_k^j(K) \le \mu_k(K),
\]
which proves \eqref{claim-u_k}. Consequently,
\begin{equation}\lambdabel{usc-appl}
\begin{aligned}
\liminf_{k \to \infty} \int_{\Omega} \frac{h}{ u} \, d \mu_k & \ge \liminf_{k \to \infty} \int_{F_k} \frac{h}{ u} \, d \mu_k
\\ & \ge \liminf_{k \to \infty} \int_{F_k} \frac{h}{ u} \, d \mu = \int_{\Omega\setminus E} \frac{h}{ u} \, d \mu,
\end{aligned}
\end{equation}
where $E$ is the infinity set of $u$. As mentioned above, $\mu(E)=0$, so \eqref{usc-appl}
actually yields
\[
\liminf_{k \to \infty} \int_{\Omega} \frac{h}{ u} \, d \mu_k \ge \int_{\Omega} \frac{h}{ u} \, d \mu.
\]
Combining the preceding inequality with \eqref{upper} proves \eqref{claim}.
In fact, $\mu_k$ coincides with $\mu$ on the
set $G_k=\{x\in \Omega: \, \, u(x)<e^k\}$, i.e.,
\begin{equation}\lambdabel{claim-great}
\mu_k (K) = \mu(K), \quad \text{for every compact set} \, \, K \subset G_k.
\end{equation}
To prove \eqref{claim-great}, notice that the set $G_k$
is finely open (see \cite{AG}, Sec. 7.1).
Let $U_k=\{x\in \Omega: \, \, u(x)>e^k\}$, and $\lambdambda=\chi_{U_k} \mu$.
Then clearly $G \lambdambda\le G \mu=u$ in $\Omega$, and so
$G \lambdambda <e^k$ on $G_k$. Moreover,
$\lambdambda (G_k)=0$ since $U_k$ and $G_k$ are disjoint. Hence by \cite{Fug},
Theorem 8.10, $G \lambdambda$ is finely harmonic on $G_k$.
On the other hand, let
\[
\tilde \mu = \mu_k -\mu|_{F_k},
\]
where $\mu_k$ is supported on the closed set $F_k=\Omega\setminus U_k$. By \eqref{claim-u_k}, $\tilde \mu$ is a nonnegative measure on $\Omega$.
Clearly, $G \tilde \mu \le G \mu_k=u_k\le e^k$ in $\Omega$.
Since $u_k-u=0$ on $G_k$, it follows that
\[
G \tilde \mu=u_k-u + G \lambdambda
\]
is finely harmonic on $G_k$.
Hence applying
\cite{Fug}, Theorem 8.10 in the opposite direction, we deduce that $\tilde\mu(G_k)=0$, so
$\tilde \mu(K)=\mu_k(K) - \mu(K) =0$
for every compact set $K\subset G_k$. The proof of \eqref{claim-great}
is complete.
As noted above, $u = \frac{d\mu}{d \omega}$
is the Radon--Nikodym derivative defined $d\omega$-a.e., and $\mu(E)=\omega(E)=0$, where $E= \{ x \in \Omega: u(x)=\infty\}$,
hence
\[
\int_\Omega h \, d\omega = \int_\Omega \frac{h}{u} \, d \mu
= \lim_{k \to \infty} \, \int_\Omega \frac {h}{u_k} \, d \mu_k.
\]
Passing to the limit as $k \to \infty$ in \eqref{approx-v_k}, we obtain
\begin{equation*}
\begin{aligned}
\int_\Omega h \, d\omega & = \int_\Omega \nabla h \cdot \nabla v \, dx - \int_\Omega |\nabla v|^2 \, h \, dx ,
\end{aligned}
\end{equation*}
for all $h \in C^\infty_0(\Omega)$,
which justifies equation \eqref{weakriccati}.
By the Riesz decomposition theorem,
\begin{equation}\lambdabel{integral-form}
v = G(-\triangle v) + g = G(|\nabla v|^2 +\omega) + g,
\end{equation}
where $g$ is the greatest harmonic minorant of $v$. Since $v \geq 0$, a harmonic minorant of $v$ is $0$, so $g \ge 0$. It follows from (\ref{integral-form}) and the equation $u = G(u\omega) + 1$ that
$$
g \le v=\log u = \log \left (G (u \omega) + 1\right)\le G (u \omega).
$$
Since $G(u \omega)$ is a Green potential, the greatest harmonic minorant of $G (u\omega)$ is $0$, therefore $g=0$. Hence $v$ is a solution of (\ref{nonlineareqn-1}). This completes the proof of Theorem \ref{riccatithm} (A).
Conversely, suppose $v\in W^{1,2}_{loc}(\Omega)$ is a solution of equation (\ref{nonlineareqn-1}), that is, $v = G (|\nabla v|^2 + \omega)$. Then $v \geq 0$ is superharmonic,
$d\nu = |\nabla v|^2 dx + d\omega$ is the corresponding Riesz measure,
and
\eqref{ric-eq-1} holds. Let $v_k = \min\, (v, \, k)$ and $\nu_k = -\triangle v_k$, for $k=1,2, \ldots$. Clearly, $v_k\in W^{1,2}_{loc}(\Omega)\bigcap L^\infty(\Omega)$ is superharmonic.
Next, as in the proof of \eqref{claim-u_k} above, we observe that
$\nu_k \ge \nu$ on the set $F_k=\{x\in \Omega: \, v(x)\le k\}$. To verify this claim, it is enough to check that
\begin{equation}\lambdabel{claim-v_k}
\nu_k (K) \ge \nu(K), \quad \text{for every compact set} \, \, K \subseteq F_k.
\end{equation}
The preceding inequality is deduced again using the approximation argument based on \cite{Hel}, Theorem 4.6.3. It requires
the existence of a Borel set $E\subset \Omega$ such that
$G \nu<\infty$ on $\Omega\!\setminus\!E$, and $\nu(E)=0$. Let $E=\{x \in \Omega: \, v(x)=\infty\}$.
Then $E$ is a Borel set and $\text{cap}(E)=0$.
We need to show that
$\nu(E)=0$.
It is known (see \cite{HMV}, Lemma 2.1) that since $v\in W^{1,2}_{loc}(\Omega)$ is a solution to \eqref{ric-eq-1}, then
\[
\int_\Omega h^2 d\nu=\int_\Omega |v|^2 h^2 dx + \int_\Omega h^2 d\omega \le 4 \int_\Omega |\nabla h|^2 dx,
\]
for all $h \in C^\infty_0(\Omega)$. It follows immediately that $\nu(F)\le 4 \, \text{cap}(F)$
for all compact (and hence Borel) sets $F$. Since $\text{cap}(E)=0$, we see that
$\nu(E)=0$, which completes the proof of \eqref{claim-v_k}.
We remark that actually $\nu_k = \nu$ on $G_k$,
where $G_k=\{x \in \Omega: \, v(x)<k\}$,
exactly as was shown above for $\mu_k = \mu$ on $G_k$ (with $e^k$ in place of $k$). However, we do not need this fact in the remaining part of the proof.
Since $\nabla v = \nabla v_k$ $dx$-a.e. on $F_k$, and $\nabla v_k=0$ $dx$-a.e. outside $F_k$,
it follows from \eqref{claim-v_k} that
\begin{equation}\lambdabel{just0}
-\triangle v_k =\nu_k \ge \chi_{F_k} \nu = |\nabla v_k|^2 + \chi_{F_k}\, \omega,
\end{equation}
as measures. In other words,
\begin{equation}\lambdabel{just1}
-\triangle v_k=\nu_k=|\nabla v_k|^2 + \chi_{F_k}\, \omega +\lambdambda_k,
\end{equation}
where $\lambdambda_k$ is a nonnegative measure in $\Omega$ supported on $F_k$.
In fact, as discussed above, $\lambdambda_k=0$ outside the set $\{x\in \Omega: \, u(x)=k\}$.
Let $u = e^v \geq 1$, $u_k=e^{v_k}$ and $\mu_k=-\triangle u_k$. Clearly,
$\nabla u_k=\nabla v_k \, e^{v_k}$, so $u_k\in W^{1,2}_{loc}(\Omega)\bigcap L^\infty(\Omega)$.
We claim that
\begin{equation}\lambdabel{just2}
\mu_k=-\triangle u_k = -\triangle v_k \, e^{v_k} -|\nabla v_k|^2 \, e^{v_k} \ge 0.
\end{equation}
To prove (\ref{just2}), we use integration by parts
(\ref{by-parts}) with $g=h e^{v_k}$, where $h \in C^\infty_0(\Omega)$, and
$v_k$ in place of $r$:
\begin{equation*}
\begin{aligned}
\int_\Omega h \, e^{v_k} \, d \nu_k & =
\int_\Omega \nabla (h \, e^{v_k}) \cdot \nabla v_k \, dx
\\ & = \int_\Omega e^{v_k} \, \nabla h \cdot \nabla v_k \, dx + \int_\Omega h \, |\nabla v_k|^2 \, e^{v_k} \,dx \\
& = \int_\Omega \, \nabla h \cdot \nabla u_k \, dx + \int_\Omega h \, |\nabla v_k|^2 \, e^{v_k} \,dx
\\
&
= \int_\Omega h \, d \mu_k + \int_\Omega h \, |\nabla v_k|^2 \, e^{v_k} \,dx.
\end{aligned}
\end{equation*}
Hence, first applying \eqref{just2} and then \eqref{just1},
we obtain
\begin{equation*}
\begin{aligned}
\lambdangle h, \mu_k\rangle & =\int_\Omega h \, d \mu_k \\
& = \int_\Omega h \, e^{v_k} \, d \nu_k
- \int_\Omega h \, | \nabla v_k |^2 \, e^{v_k} \, dx \\
& =\int_\Omega h \, e^{v_k} \ \chi_{F_k} \, d \omega + \int_\Omega h \, e^{v_k} \ d \lambdambda_k \\
& =\int_\Omega h \, e^{v} \ \chi_{F_k} \, d \omega + \int_\Omega h \, e^{v} \ d \lambdambda_k.
\end{aligned}
\end{equation*}
From the preceding equation it follows that, for all $h\in C^\infty_0(\Omega)$, $h \ge 0$,
\begin{equation}\lambdabel{mu_k-G_k}
\lambdangle h, \mu_k\rangle \ge \int_\Omega h \, u \ \chi_{F_k} \, d \omega \ge 0.
\end{equation}
Since $v_k$, and hence $u_k$, is lower semicontinuous,
it follows that $u_k$ is superharmonic in $\Omega$.
Clearly,
$u= \lim
_{k \to +\infty} u_k$ is a superharmonic function in $\Omega$ as the limit of the
increasing sequence of superharmonic functions $u_k$, since $u=e^v\noindentt\equiv\infty$.
Moreover, as mentioned above, the infinity set $E$ on which $u=e^v=\infty$
has zero capacity, and
$\omega(E)\le \nu(E)\le 4 \, \text{cap}(E)$, so
$\omega(E)=0$.
Since $-\triangle u_k=\mu_k \to \mu$ weakly in $\Omega$, where $\mu = - \triangle u$, passing to the limit as
$k \to \infty$ in \eqref{mu_k-G_k} and using the monotone convergence theorem on the right-hand side yields
\[
\lambdangle h, \mu\rangle \ge \int_{\Omega\setminus E} h \, u \, d \omega =\int_{\Omega} h \, u \, d \omega \ge 0.
\]
Hence $u$ is superharmonic, and
\begin{equation}\lambdabel{just3}
-\triangle u \ge \omega \, u \quad \text{in} \, \, \Omega
\end{equation}
in the sense of measures.
It follows from (\ref{just3}) that
$\tilde \omega = - \triangle u-\omega u$ is a non-negative measure in $\Omega$, so by the Riesz decomposition theorem
$$
u = G(-\triangle u) + g = G(\omega u) + G \tilde \omega+ g \geq G(\omega u) + g,
$$
where $g$ is the greatest harmonic minorant of $u$. Since $u \ge 1$, i.e.,
$1$ is a harmonic minorant of $u$, it follows that $g \ge 1$, and consequently,
\begin{equation}\lambdabel{iter}
u \ge G(\omega u ) + 1 = Tu + 1,
\end{equation}
for $T$ defined by (\ref{defT}). Since $u \ge Tu$, it follows by Schur's test that
$||T||_{L^2(\Omega, \omega) \to L^2(\Omega, \omega)} \le 1$, and hence
(\ref{equivnormTless1})
holds with $\beta =1$.
Iterating (\ref{iter}) and taking the limit, we see that
$$
\phi \equiv 1 + \mathcal{G} \omega = 1 + \sum_{j=1}^{\infty} G_j \omega= 1 + \sum_{j=1}^{\infty} T^j 1 \le u < +\infty \, \, \text{a.e.},
$$
and
$$
\phi = G(\omega \phi ) + 1.
$$
Hence $\phi$ is a positive solution of (\ref{dirichlet}). Thus (\ref{martincritsuff-g}) holds by Corollary \ref{cor} (B). This completes the proof of Theorem \ref{riccatithm} (B).
\end{proofof}
\noindentindent{\bf Remarks.} 1. As in \cite{FV2} for smooth domains and $\omega \in L^1_{loc} (\Omega)$,
our sufficiency results hold in uniform domains for signed measures $\omega$, if $\omega$ is
replaced with $|\omega|$ both in the spectral conditions (\ref{normTless1}), (\ref{equivnormTless1}), and
conditions (\ref{martincritsuff-g}), (\ref{martincritnec-g}).
2. The lower pointwise estimates of solutions in Theorem \ref{mainufest}(B)
are still true for signed measures $\omega$, under some additional assumptions
(see \cite{GV1}). However, the upper pointwise estimates Theorem \ref{mainufest}(A)
are no longer true in general, unless we replace
$\omega$ with $|\omega|$.
3. It is still unclear under which (precise) additional assumptions on the quadratic form of $\omega$
the main existence results and upper estimates of solutions remain valid. Some results of this type are discussed in \cite{JMV}, but without
the prescribed boundary conditions.
\end{document} |
\begin{document}
\global\long\def\cond#1{\left|#1\right.}
\global\long\def\case#1#2{#1=\left\{ #2\right.}
\global\long\def\flr#1{\left\lfloor #1\right\rfloor }
\global\long\def\ceil#1{\left\lceil #1\right\rceil }
\title{Exchange of Services in Networks: Competition, Cooperation, and Fairness}
\numberofauthors{3}
\author{
\alignauthor
Leonidas Georgiadis\\
\affaddr{Dept. of ECE, AUTH, Greece, [email protected]}\\
\alignauthor
George Iosifidis\\ \affaddr{Dept. of Elec. Eng., and YINS, Yale University, [email protected]}\\
\and
\alignauthor
Leandros Tassiulas\\ \affaddr{Dept. of Elec. Eng., and YINS, Yale University, [email protected]}\\
}
\maketitle
\begin{abstract}
Exchange of services and resources in, or over, networks is attracting nowadays renewed interest. However, despite the broad applicability and the extensive study of such models, e.g., in the context of P2P networks, many fundamental questions regarding their properties and efficiency remain unanswered. We consider such a service exchange model and analyze the users' interactions under three different approaches. First, we study a centrally designed service allocation policy that yields the fair total service each user should receive based on the service it offers to the others. Accordingly, we consider a competitive market where each user determines selfishly its allocation policy so as to maximize the service it receives in return, and a coalitional game model where users are allowed to coordinate their policies. We prove that there is a unique equilibrium exchange allocation for both game theoretic formulations, which also coincides with the central fair service allocation. Furthermore, we characterize its properties in terms of the coalitions that emerge and the equilibrium allocations, and analyze its dependency on the underlying network graph. That servicing policy is the natural reference point to the various mechanisms that are currently proposed to incentivize user participation and improve the efficiency of such networked service (or, resource) exchange markets.
\end{abstract}
\section{Introduction}
\textbf{Motivation}. Today we are witnessing a renewed interest about models for exchanging services and resources in (or, over) networks, that go beyond the well-known peer-to-peer (P2P) file sharing idea. Some examples in communication networks are the WiFi sharing communities \cite{FON}, \cite{Sofia-UPN}, the mobile data sharing applications \cite{opengarden}, the commercial or community mesh networks \cite{confine}, \cite{bewifi}, and various peer-assisted services \cite{ioannidis-peer-assisted}, Fig. \ref{fig:system-model}. \rev{Similar schemes have been studied and implemented for other technological systems as well, e.g., for renewable energy sharing over smart grid \cite{gridmates}, \cite{saad-smart} where users share their energy surpluses with each other.} Finally, there is nowadays a plethora of online platforms, motivated by the sharing economy concept \cite{collaborativeconsumption}, which facilitate the exchange of commodities and services among users who, for example, are co-located, have common interests, etc, \cite{adalbdal}, \cite{getrridapp}, \cite{homeexchange}, \cite{neighborgood}, \cite{swapit}.
In essence, all the above scenarios apply the idea of collaborative consumption \cite{felson-coco-book} of underutilized resources (such as the Internet access) to networks with autonomous and self-interested nodes (or, users). \rev{Whenever a user has some idle resource, he offers it to other users who at that time have excess needs, and benefits in exchange from the resources they offer to him in the future \cite{georgiadis-netgcoop}. The goal is to exploit the nodes' complementarity in resource availability and demand, and increase the benefits for all the participants. Such models capture also more static settings where users have different preferences for the various resources, and exchange them in order to acquire those that are more valuable to them \cite{unver-book}.} There is a broad consensus that these models are of major importance for the economy, society and technological evolution \cite{collaborativeconsumption}, \cite{nytimes-sharing}. However, despite their significance and wide applicability, and although they have been subject to extensive research (e.g., in the context of P2P networks \cite{RJohariToNBilateral2011}, \cite{zhang-proportional}), some very important related questions remain unanswered.
\begin{figure}
\caption{\small{Instances of Service Exchange in Networks: a content sharing network, and an Internet sharing residential/community mesh network.}
\label{fig:system-model}
\end{figure}
\emph{Definition and Properties of a Fair Exchange Policy.} This is one of the most critical issues in these cooperation schemes. Ideally, from a system design point of view, each user should receive service (or, resource) commensurate to its contribution. However, this is not always possible because there is an underlying graph that prescribes, for each user, the subset of the users it can serve and receive services from\footnote{\small{For example, in mesh networks the graph captures which nodes are within communication range, while in smart grid networks the graph shows which microgrids can exchange energy without significant transfer losses.}}. Additionally, there may be multiple feasible service exchange solutions that differ on the amount of service each user receives. We would prefer to select among them a \emph{fair} outcome that balances the exchanges as much as possible. The existence and the characterization of the properties of such fair policies (e.g., their dependency on the underlying graph) is an important and currently open question.
\emph{Existence and Fairness of Competitive Equilibriums}. Additionally, most often these systems are not controlled by a central entity that can exogenously impose such a fair solution. Instead, each user tries to greedily maximize its own benefit by allocating its idle resource to those users from which it expects to receive more service in return. A first question here is if such a competitive interaction among the nodes admits an equilibrium allocation, where each node cannot unilaterally improve the resource it accumulates. Also, we need to analyze how these equilibriums are affected by the graph structure and the nodes' resources. Finally, it is important to understand if such equilibriums are related to the centrally designed fair policy discussed above.
\emph{Robustness of the Fair Exchange Policy}. The latter question is related to the robustness of the fair policy: when a central designer proposes such a fair policy, is it possible for a user to deviate from it and improve his performance? More interestingly, in many cases it is possible to have a subset of users that deviate from the fair policy by forming a coalition and excluding non-members from bartering. For example, in a WiFi community a subset of users may decide to serve only each other, expecting that this will increase their benefits. Such strategies are very likely to deteriorate the overall system performance. A key challenge is to explore whether the fair policy is robust to such group deviations.
\textbf{Methodology and Contributions}. In order to shed light on these questions, we employ a general model that abstracts all the above scenarios. We consider a set of nodes, where each one has a certain idle resource that it allocates to its neighbors, and unsaturated demand for the resources of others. \rev{The model captures situations where the nodes have complementary resource availability over time, or generic static bartering markets where nodes simply have different preferences for the resources}. Neighborhood relationships are described by a bidirectional connected graph. The \emph{exchange ratio} (or, simply \emph{ratio}) of total received over allocated resource\footnote{\small{The idle resource can be the Internet bandwidth a user shares within a WiFi community during a month, the uploading capacity of a node in a P2P overlay. Similarly, the demand is the average request for additional Internet bandwidth (WiFi), the downloading capacity of a peer node, etc. Hereafter, we will use the term resource and service interchangeably.}} characterizes the performance of each node, as it quantifies the resource that it receives for each resource unit it offers.
From a system point of view, a central designer would prefer to have a vector of exchange ratios where each coordinate, that corresponds to a node, has value equal to one. Often this will not be possible due to the graph exchange constraints and asymmetries in nodes' resource availability. For that cases, the lexicographically maximum (lex-optimal), or max-min, exchange vector is a meaningful performance criterion as it is Pareto optimal and balances the exchanged resources as much as possible \cite{nace-tutorial}.
In the absence of a network controller however, we assume that each node makes greedy myopic allocation decisions so as to maximize the aggregate resource it receives in return. The interactions of the nodes give rise to a competitive market, which however differ from previous similar models \cite{osborne}, \cite{arrow-debreu}, \cite{zhang-proportional} due to the existence of the graph and the absence of side-payments (money) among the nodes (bartering). We introduce the concept of exchange equilibrium that is appropriate for this setting, characterize the equilibrium allocations, and study its relation to the max-min fair policy.
Accordingly, we assume that subset of nodes can coordinate and form coalitions exchanging resources only with each other. A coalitional graph-constrained game with non-transferable utility (NTU) is identified in the above set-up. We focus on the existence and properties of stable equilibrium allocations. Given a certain global allocation, if there is a subset of nodes that when they reallocate their own resources among themselves manage to improve the exchange ratio of \emph{at least one} node in the subset, then they have an incentive to deviate from the global allocation (and hence destabilize it). Therefore, when an allocation is in equilibrium, it should be \emph{strongly stable} and no such subset should exist.
We study the above frameworks, that differ on the assumptions about the system control and the users behavior, and find a surprising connection among them. In particular:
(\textbf{i}) We prove that there is a unique equilibrium exchange ratio vector that is a solution for the competitive market, and lies in the core of the NTU graph-constrained coalitional game, being also strongly stable. This is the max-min fair (lex-optimal) ratio vector. It reveals that a centrally designed meaningful fair solution can be reached by nodes who act independently and selfishly, and it is also robust to group deviations. This finding has many implications for the applicability of such fair policies to decentralized and autonomous graph-constrained systems.
(\textbf{ii}) We show that the equilibrium exhibits rich structure and a number of interesting properties. For example, in the equilibrium allocation there is exchange of resources only among the nodes with the lowest exchange ratios and the nodes with the highest ratios, the nodes with the second lowest ratios with the set of the second highest ratios, and so on. We also study how the exchange ratios are affected by the graph properties, such as the node degree. This latter aspect is particularly important from a network design point of view as it reveals, among others, the impact a link removal or addition has on the equilibrium. Our findings hold for any graph, and therefore they can help a controller to predict or even dictate the exchange equilibrium.
(\textbf{iii}) We provide a polynomial-time algorithm that finds the lex-optimal exchange ratio vector and the resource exchange strategies that lead to it. Hence, it can also be used to find the equilibriums of the respective competitive and coalitional games. This is a highly non-trivial task in such exchange markets, that is further compounded here due to the graph constraints.
The rest of this paper is organized as follows. In Sec. \ref{sec:Model-Notation} we introduce the model, and in Sec. \ref{sec:Central-design} we prove the existence and analyze the properties of the lex-optimal exchange policy. In Sec. \ref{sec:game-theory-frameworks} we define and solve the coalitional and the competitive games. In Sec. \ref{sec:algorithms} we provide a polynomial algorithm for computing the lex-optimal ratios. We present several numerical examples in Sec. \ref{sec:Numerical-Results}. In Sec. \ref{sec:Related} we discuss related works, and conclude in Sec. \ref{sec:Conclusions}. In the Appendix we provide the additional proofs.
\section{Model and Problem Statement} \label{sec:Model-Notation}
We consider a service exchange market that is modeled as an undirected connected graph $G=({\cal N},{\cal E})$ with node and edge set ${\cal N}$ and ${\cal E}$, respectively. Let $\mathcal{N}_{i}=\{j\,:\,(i,j)\in\mathcal{E}\}$ be the set of neighbors of node $i\in{\cal N}$, and $D_i>0$ its idle resource (endowment). Let $d_{ij}\geq0$ be the resource that node $i$ allocates to node $j\in{\cal N}_{i}$. We assume that each non-isolated node allocates all its (idle) resource\footnote{\small{\rev{For example, in P2P overlays, each node allocates all its uplink bandwidth, and in other settings it exchanges resources for which it has zero valuation (e.g., excess food).}}}, i.e.,
\begin{equation}
\sum_{j\in{\cal N}_{i}}d_{ij}=D_{i},\,\,\forall\, i\in{\cal N},\,\,\mathcal{N}_i\neq \emptyset.\label{eq:Allloc}
\end{equation}
A vector $\bm{d}=(d_{ij}) _{(i,j)\in{\cal E}}$ satisfying (\ref{eq:Allloc}) is called ``allocation''. The set of allocations is denoted by $\mathbb{D}$. Note that as long as not all nodes are isolated, i.e., $\mathcal{E}\neq \emptyset$, it holds $\mathbb{D}\neq\emptyset$.
\rev{This model captures either (i) a static setting where each user has a certain amount of a perfectly divisible resource which wishes to trade with other, more valuable to him, resources, or (ii) a dynamic setting where users have at random time instances a single unit of unsplittable excess resource which they allocate to one of their neighbors expecting similar benefits in the future. This latter dynamic setting will become more clear in the sequel.}
A vector $\bm{r}$ of received resources induced by an allocation $d\in\mathbb{D}$ is called feasible. The set of feasible received resource vectors when $\mathcal{E} \neq \emptyset$ is defined as:
\begin{equation}
\mathbb{R}=\big\{ \bm{r}=(r_{i}) _{i\in{\cal N}}:\ r_{i}=\sum_{j\in{\cal N}_{i}}d_{ji},\ i\in{\cal N},\ \bm{d}\in\mathbb{D}\big\}, \label{eq:RateSpace}
\end{equation}
where we adopt the convention that for any isolated node $i$ it is $r_i=0$. In case $\mathcal{E} = \emptyset$, we define $\mathbb{R}=\{\boldsymbol{r}:\ r_i=0, i\in \cal{N}\}$.
Throughout this work, we will be interested in the \emph{exchange ratio} (or, simply \emph{ratio}) vector $\bm{\rho}=(\rho_{i}=r_i/D_i: i\in\mathcal{N})$, where the $i^{th}$ coordinate quantifies the aggregate amount of resource that node $i$ receives per unit of resource that offers to its neighbors. \rev{Notice that, under assumption (\ref{eq:Allloc}), maximizing $\rho_i$ ensures the maximization of $r_i$}. We denote by $\mathbb{P}$ the set of all feasible ratio vectors. In the sequel, we consider three different problem formulations based on the above model.
\subsection{Fairness Framework}
\rev{In this setting, the total allocated resource is always equal to $\sum_{i\in\mathcal{N}}D_i$, and therefore the various allocations differ on how they split this amount across the nodes.} A centrally designed policy for this cooperative setting would ideally allocate to every node $i\in\mathcal{N}$ resource equal to its contribution, i.e., $r_i=D_i$. However, due to the graph that constraints resource exchanges, and the different resource endowments of the nodes, such policies will not be realizable in general. Given this, the designer would prefer to ensure the most balanced allocation.
A suitable method to achieve this goal is to employ the lexicographic optimal (or, lex-optimal) criterion, which has been extensively used for resource allocation and load balancing in communication networks \cite{georgiadislexopt02}, \cite{nace-tutorial}, \cite{boudecfairness07}. This multi-objective optimization method first increases as much as possible the allocated resource to the node with the smaller exchange ratio. Next, if there are many choices, it attempts to increase the resource allocated to the node with the second smaller exchange ratio, and so on. The resulting allocation is max-min fair, thus as balanced as possible. Next we provide the necessary definitions.
\begin{definition}
\textbf{Lexicographical order}. Let $\bm{x}$ and $\bm{y}$ be $N$-dimensional vectors, and $\phi(\bm{x})$ and $\phi(\bm{y})$ the respective $N$-dimensional vectors that are created by sorting the components of $\bm{x}$ and $\bm{y}$ respectively, in non-decreasing order. We say that $\bm{x}$ is lexicographically larger than $\bm{y}$, denoted by $\bm{x}\succ\bm{y}$, if the first non-zero component of the vector $\phi(\bm{x})-\phi(\bm{y})$ is positive. The notation $\bm{x}\succeq\bm{y}$ means that either $\bm{x}\succ\bm{y}$ or, $\bm{x}=\bm{y}$.
\end{definition}
It is easy to see that the set of received resource vectors $\mathbb{R}$ defined in (\ref{eq:RateSpace}) is compact and convex. Hence, as is shown in \cite{boudecfairness07}, there is a unique lex-optimal $\bm{r}^{*}\in\mathbb{R}$ such that, with $\bm{\rho}^{*}=\left(r_{i}^{*}/D_{i}\right)_{i\in{\cal N}}$ and for any $\bm{\rho}=\left(r_{i}/D_{i}\right)_{i\in{\cal N}}\in \mathbb{P}$, it holds $\bm{\rho}^{*}\succeq\bm{\rho},\ \forall\,\bm{r}\in\mathbb{R}$. We are also interested in the respective \emph{lex-optimal} allocations $\bm{d}$, which are those that result in the unique lex-optimal ratio vector. Note that there may be \emph{many} allocations $\bar{\bm{d}}$ for which $\bar{\bm{\rho}}=\bm{\rho}^{*}$, as shown in Fig. \ref{fig:Def2-example}. \emph{Within this framework, we are interested in studying the properties of the lex-optimal exchange ratio vector, and the respective lex-optimal allocations, for any graph $G=(\mathcal{N},\mathcal{E})$ and any endowments $\{D_i\}_{i\in\mathcal{N}}$}.
\begin{figure}
\caption{\small{A 4-node graph with node resource $D_{i}
\label{fig:Def2-example}
\end{figure}
\subsection{Coalitional Framework}
Before providing the details of this framework, let us introduce some additional notation. We denote by $G_{{\cal S}}=\left({\cal S},{\cal E_{{\cal S}}}\right)$ the subgraph of $G$ induced by a nonempty set of nodes $\mathcal{S}\subseteq\mathcal{N}$, i.e., the graph with node set ${\cal S}$, and edge set the edges $(i,j)\in{\cal E}$, with $i,j\in\mathcal{S}$. By ``allocation on ${\cal S}$'', we mean a vector $\bm{d}_{{\cal S}}=\left\{ d_{ij}\right\} _{(i,j)\in{\cal E}_{{\cal S}}}$ defined on graph ${G}_{\mathcal{S}}=\left(\mathcal{S},\mathcal{E}_{\mathcal{S}}\right)$ satisfying (\ref{eq:Allloc}) (with $\mathcal{N}\leftarrow\mathcal{S}$ and ${\cal E}\leftarrow{\cal E}_{{\cal S}}$). We denote by $\mathbb{D}_{{\cal S}}$ the set of all allocations on ${\cal S}$, and by $\mathbb{R}_{{\cal S}}$ the set of all received resource vectors on ${\cal S}$ which can be obtained by any allocation on $\mathcal{S}$. By definition it is $\mathbb{R}_{\{i\}}=\left\{ 0\right\}$, $\forall\,i\in{\cal N}$.
We assume here that the nodes are able to coordinate with each other, they can form coalitions and deviate from the proposed fair solution if this will ensure higher resources for one or more of them. In game theoretic terms, this behavior leads to a coalitional (or, cooperative) game \cite{myerson-gametheory-book} played by the nodes. Specifically, we call any subset of nodes ${\cal S\subseteq\mathcal{N}}$ a coalition when they allocate their resources only among each other. That is, there is no resource exchange among nodes in $\mathcal{S}$ and nodes in its complement set $\mathcal{S}^{c}=\mathcal{N}-\mathcal{S}$. Hence, the feasible resource vectors that nodes in ${\cal S}$ get, are the vectors of the set $\mathbb{R}_{{\cal S}}$. We also refer to the set $\mathcal{N}$ as the grand coalition. This coalitional game is one with non-transferable utilities, as the received resource vector $\bm{r}$ cannot be split arbitrary among the nodes, due to the exchange constraints imposed by the graph and the lack of side payments. More formally, we define \cite{myerson-gametheory-book}:
\begin{definition} \textbf{Coalitional Service Exchange Game}. A non-transferable utility (NTU) game in graph form consists of the triplet $<\mathcal{N},G,\{\mathbb{R}_{\cal{S}},\mathcal{S}\in\mathcal{N}\}>$, where $\mathcal{N}$ is the set of players (nodes) with initial resource endowments $\left\{ D_{i}\right\} _{i\in\mathcal{N}}$, and $G=(\mathcal{N},\mathcal{E})$ is the graph describing the service exchange possibilities among the nodes. Moreover, $\mathbb{R_{{\cal S}}}$, $\,\mathcal{S}\subseteq\mathcal{N}$, is the set of feasible $|\mathcal{S}|$-dimensional vectors of players' received resources $\{\bm{r}_{i}\}_{i\in\mathcal{S}}$, satisfying properties (i) $\mathbb{R}_{\{i\}}=\left\{ 0\right\} ,\,\forall i\in\mathcal{N}$, (ii) $\mathbb{R_{{\cal S}}}$ is closed and bounded.
\end{definition}
Our goal is to study the existence and the properties of self-enforcing allocations. This property is formally captured by the notion of stability for the grand coalition.
\begin{definition} \textbf{Stability}. An allocation $\bm{d}$, and the respective resource vector $\bm{r}$ is called \emph{strongly} stable if for any node set ${\cal S}\subseteq{\cal N}$, there is no allocation $\widehat{\bm{d}}_{{\cal S}}$ on the induced subgraph $G_{{\cal S}}=\left({\cal S},{\cal E_{{\cal S}}}\right)$, such that $\hat{r}_{i}\geq r_{i}$ for all $i\in{\cal S}$, and $\hat{r}_{j}>r_{j}$ for at least one node $j\in{\cal S}$. The allocation is called \emph{weakly} stable if for any node set ${\cal S}\subseteq{\cal N}$ , there is no allocation $\widehat{\bm{d}}_{{\cal S}}$ such that $\hat{r}_{i}>r_{i}$ for all $i\in{\cal S}$.
\end{definition}
Note that strong stability implies weak stability but not the other way around. In particular, the concept of weak stability for the grand coalition is directly related to the concept of the \emph{core} which is formally defined\footnote{\small{With a slight abuse of terminology we refer both to the received resource vectors and the respective allocations as stable.}} \cite{myerson-gametheory-book}:
\begin{definition}\textbf{Core}. Given an NTU coalitional game $<\mathcal{N},G,\{\mathbb{R}_{\cal{S}},\ \cal{S}\in\cal{N}\}>$, the \emph{core} of $\mathbb{R}$ is defined as the subset of $\mathbb{R}$ which consists of all received resource vectors $\bm{r}\in\mathbb{R}_{{\cal }}$, such that for any possible coalition $\mathcal{S}$ and any allocation $\hat{\bm{d}}\in\mathbb{D}^{|\mathcal{S}|}$, if $\hat{r}_{i}>r_{i}$, for all $i\in\mathcal{S}$, then $\hat{\bm{r}}\notin\mathbb{R_{{\cal S}}}$.
\end{definition}
In this coalitional framework, we ask the question: \emph{Is there a (weakly or strongly) stable allocation for this service exchange coalitional game, and if yes, what are its properties in terms of allocations and exchange ratios}?
\subsection{Competitive Framework}
Assume now that each node $i\in\mathcal{N}$ is an independent decision maker, devising its allocation vector $\bm{d}_i=\big(d_{ij}\big)_{j\in\mathcal{N}_i}$ so as to maximize the resource $r_i$ it receives. In such a competitive market setting, the nodes are allowed to select any policy that satisfies eq. (\ref{eq:Allloc}), i.e., allocating their entire resource (market clearing condition). \rev{Namely, the solution concept for this market is related to the competitive (or, Walrasian) equilibrium \cite{arrow-debreu}, \cite{ColellWhinstonGreenBook1995}, which has been also applied in communication networks \cite{RJohariToNBilateral2011}, and extended to graphical economies (which exhibit \emph{localities}) \cite{KearnsGraphEcon2004}, \cite{KearnsEconSocial2004}. However, for the problem under consideration, there do not exist explicit price variables (or, price signals), and hence we employ a different equilibrium concept}:
\begin{definition}
\textbf{\emph{Exchange Equilibrium.}} An allocation $\bm{d}$ is an exchange equilibrium, if and only if (iff) for any node $i\in \mathcal{N}$ it holds (i) $d_{ji}=\rho_{i}d_{ij}$ for all $j\in\mathcal{N}_{i}$, and (ii) if $d_{ji}>0$ for some $j\in\mathcal{N}_{i}$ then $\rho_{j}=\min_{k\in\mathcal{N}_{i}}\rho_{k}$.
\end{definition}
In other words, at the equilibrium each node $i$ exchanges services only with its neighbors that trade in the lowest exchange ratio, so as to receive the maximum possible total service. Additionally, all the nodes that interact with $i$, have the same exchange ratio, while there may exist neighbors that do not allocate any resource to it. These latter nodes will certainly have higher exchange ratios, i.e., reciprocate with less resource for each unit of resource they receive. In this context, the question we want to tackle is the following: \emph{Does this game have exchange equilibrium(s), and if so, what are its properties and how does it depend on the graph $G$}.
\section{Lex-optimal Allocations}\label{sec:Central-design}
In this section we study the properties of the lex-optimal vector $\bm{\rho}^{*}$ and the respective allocations. These results hold for any graph $G=(\mathcal{N},\mathcal{E})$, and resource endowments $\{D_i\}_{i\in\mathcal{N}}$. To avoid trivial cases, we assume that there are no isolated nodes in the network\footnote{\small{If a graph $G$ has a set of isolated nodes $\mathcal{I}$ then we set $\rho_i =0$ for all $i\in \mathcal{I}$ and we proceed by considering the graph $G_{\mathcal{N}-\mathcal{I}}$.}}. We give first some notations. We denote the set of neighbors of nodes in a set $\mathcal{S}$, that do not belong to $\mathcal{S}$, by $\mathcal{N}\left(\mathcal{S}\right)=\cup_{i\in\mathcal{S}}\mathcal{N}_{i}-\mathcal{S}$. Given an allocation $\bm{d}$, for each node $i\in{\cal N}$ we define the subset $\mathcal{D}_{i}=\left\{ j\in\mathcal{N}_{i}:\, d_{ij}>0\right\}$ of nodes that receive resource from $i$, and the subset of nodes that don't receive resource ${\cal H}_{i}={\cal N}_{i}-{\cal D}_{i}=\left\{ j\in\mathcal{N}_{i}:\, d_{ij}=0\right\}$. Also, we define the subset $\mathcal{R}_{i}=\left\{ j\in\mathcal{N}_{i}:\, d_{ji}>0\right\}$ of nodes that give resource to $i$.
For a given $\bm{r}$, the set of \emph{different values (levels)} the coordinates of vector $\bm{\rho}$ take, will be denoted by $l_{k},\ i=1,...,K\leq N$, where $l_{1}<l_{2}<...<l_{K}$. The index of the level to which $\rho_{i}$ is equal, is denoted by $k(i)$, i.e., $l_{k(i)}=\rho_{i}$. We call $k(i)$ the ``level of node $i$''. The set of nodes with level $m$ is denoted by $\mathcal{L}_{m}=\left\{ i\in{\cal N}:\ k(i)=m\right\}$. If a subset of nodes $\mathcal{S\subseteq\mathcal{N}}$ has the same level under an allocation $\bm{d}$, then we denote the index of this level by $k\left({\cal S}\right)$. Note that the above quantities depend on the allocation $\bm{d}$, and hence, in order to facilitate notation, we will use the same overline symbol for them whenever applicable.
\textbf{Properties}. An important well-known property of the lex-optimal policy is that it is \emph{Pareto efficient} \cite{ColellWhinstonGreenBook1995}, \cite{nace-tutorial}, i.e., we cannot increase the exchange ratio for one node without decreasing the ratio of another node. The first property of the lex-optimal allocations that we prove is that the neighbors of each node $i\in\mathcal{N}$, that receive non-zero resource from $i$, belong to the same exchange ratio level set\footnote{\small{Recall that this ratio is determined by the total resource each of these nodes receives, i.e., not only from that allocated by node $i$.}}. Moreover, all the neighbors that do not receive resource from $i$, have a higher level index. Specifically:
\begin{lemma}
\label{lem:neighbor}
Let $\bm{d}^{*}$ be a lex-optimal allocation, and let $i\in{\cal N}$. Then all nodes $j\in\mathcal{D}_{i}^{*}$ have the same level $l_{k(\mathcal{D}_{i}^{*})}^{*}$ and hence belong to the same set $\mathcal{L}_{k(\mathcal{D}_{i}^{*})}^{*}$. Moreover, for any node $j\in \mathcal{H}_{i}^{*}$, it is $l_{k(j)}^{*}\geq l_{k(\mathcal{D}_{i}^{*})}^{*}$.
\end{lemma}
\begin{proof}
Consider a lex-optimal allocation $\boldsymbol{\bar{d}}$ and let $j_{1},\ j_{2}$ be such that $\bar{d}_{ij_{1}}>0$, $\bar{d}_{ij_{2}}>0$, but $j_{1}\in{\cal L}_{m}^{*}$ and $j_{2}\in\mathcal{L}_{n}^{*}$ with $m<n$. Recall that the lex-optimal price vectors - and hence the respective level sets - are unique so we use the star ($*$) notation for them. We can then move some resource from $j_{2}$ and give it to $j_{1}$ while ensuring that with the resulting allocation $\widehat{\boldsymbol{d}}$, it is $l_{k(j_{1})}^{*}<\widehat{l}_{k(j_{1})}\leq\widehat{l}_{k(j_{2})}<l_{k(j_{2})}^{*}$. Since the received resources of all other nodes remain the same, it follows that $\widehat{\boldsymbol{r}}\succ\boldsymbol{\bar{r}}=\boldsymbol{r}^{*}$, which is a contradiction as we assumed that it is lex-optimal. Assume next that $d_{ij}^{*}=0$ for a node $j\in\mathcal{N}_{i}$ for which it holds that $l_{k(j)}^{*}<l_{k(\mathcal{\bar{D}}_{i})}^{*}$. Using a similar argument we arrive at a contradiction again. $\blacksquare$
\end{proof}
\noindent Note that this lemma shows already that lex-optimal allocations have some of the required properties of exchange equilibriums. As will be shown later there are lex-optimal allocations that are in fact exchange equilibriums.
\begin{figure}
\caption{\small{Structure of a graph with $K^{*}
\label{fig:Corollary-Example}
\end{figure}
We need some additional notation at this point. Let $\bm{d}\in\mathbb{D}$ and assume $K\geq2$, i.e., the allocation has at least two levels. Define ${\cal Q}_{1}={\cal N}$, and for $K\geq3$:
\begin{align*}
{\cal Q}_{k} & ={\cal N}-\cup_{m=1}^{k-1}\left({\cal L}_{m}\cup{\cal L}_{K-m+1}\right),\ 2\leq k\leq\lceil{K/2\rceil}.
\end{align*}
For example, $\mathcal{Q}_{2}$ consists of the nodes in $\mathcal{N}$ that remain after removing those that belong to the level sets $\mathcal{L}_1$ and $\mathcal{L}_K$. In the sequel, a quantity $X$ referring to induced subgraph $G_{{\cal Q}_{k}}=\left({\cal Q}_{k},{\cal E}_{{\cal Q}_{k}}\right)$ is denoted $X_{{\cal Q}_{k}}$. The next Theorem describes the structure of a lex-optimal allocation.
\begin{theorem}
\label{thm:MainTh0}
If an allocation $\bm{d}^{*}$ is lex-optimal and $K\geq2$, then the following Properties hold:
\begin{enumerate}
\item \label{enu:MainTh0Item1}${\cal L}_{k}^{*}$ is an independent set in graph $G_{{\cal Q}_{k}}$, for $k=1,....,\lfloor{\frac{K}{2}\rfloor}$.
\item \label{enu:MainTh0Item2}${\cal L}_{K-k+1}^{*}={\cal N}_{{\cal Q}_{k}}\left({\cal L}_{k}^{*}\right)$, for $k=1,....,\lfloor{\frac{K}{2}\rfloor}$.
\item \label{enu:MainTh0Item2.1}$l_{k}^{*}l_{K-k+1}^{*}=1$, for $k=1,....,\lfloor{K/2\rfloor}$.
\item \label{enu:MainTh0Item3}$\sum_{i\in{\cal L}_{k}^{*}}r_{i}^{*}=\sum_{i\in{\cal L}_{K-k+1}^{*}}D_{i}$, for $k=1,....,\lfloor{\frac{K}{2}\rfloor}$.
\end{enumerate}
\end{theorem}
Interestingly, sufficiency of this result holds as well:
\begin{theorem}
\label{thm:MainLexSuff}
If an allocation $\bm{d}$ with $K\geq2$ has properties \ref{enu:MainTh0Item1}-\ref{enu:MainTh0Item3} of Theorem \ref{thm:MainTh0}, then it is lex-optimal.
\end{theorem}
Finally, when there is only one level set, it holds:
\begin{theorem}
\label{thm:MainTh0_K1}
If an allocation $\bm{d}^{*}$ is lex-optimal, and $K^{*}=1$, then $l_{1}^{*}=1$. Also, if an allocation $\bm{d}$ has $K=1$, then $l_{1}=1$ and it is lex-optimal.
\end{theorem}
\subsection{Analysis and Discussion}
Let us now discuss the implications of the above theorems. Under a lex-optimal allocation, the nodes are classified in disjoint sets of different levels, in a fashion that depends both on their resource endowments and on the graph $G$. For the discussion below, please refer to Fig. \ref{fig:Corollary-Example}, that presents an example of the structure for $K^{*}=7$ levels. In this graph, we depict with solid lines the physical connections that may exist among the different sets of nodes. Notice that the actual nodes and their detailed connections are not shown.
\uline{Exchange ratios Structure}. According to Property $3$ of Theorem $1$, the exchange ratios have a certain structure. Specifically, the highest level of ratios is inversely proportional to the lowest level of ratios ($l_{7}^{*}=1/l_{1}^{*}$), the second highest exchange ratio level is inversely proportional to the second lowest ratio level ($l_{6}^{*}=1/l_{2}^{*}$), and so on. Additionally, resource exchanges satisfy Property $4$. For example, in Fig. \ref{fig:Corollary-Example} all the nodes of the highest ratio set $\mathcal{L}_{7}^{*}$, allocate their entire resources to the nodes belonging to the lowest level set $\mathcal{L}_{1}^{*}$. Moreover, the latter receive resource only from the nodes in $\mathcal{L}_{7}^{*}$. Similarly, the nodes in level set $\mathcal{L}_{6}^{*}$ allocate all their received resources to nodes in set $\mathcal{L}_{2}^{*}$ which are served only by these former nodes, and so on. Interestingly, when $K^{*}$ is odd, there is one set of nodes, here the set $\mathcal{L}_{4}^{*}$, which exchange resource only with each other.
\uline{Topological Properties}. On the other hand, Properties $1$ and $2$ reveal the \emph{impact of the network topology on the max-min solution}. First, from Property $2$, we can find the possible neighbors for each node, based on the level set it belongs to. For example, when $K^{*}=7$, it holds $\mathcal{L}_{7}^{*}=\mathcal{N}(\mathcal{L}_{1}^{*})$, i.e., the neighbors of nodes in $\mathcal{L}_{1}^{*}$, that do not have ratios $l_{1}^{*}$, belong only to set $\mathcal{L}_{7}^{*}$. Moreover, since Property $1$ states that the set $\mathcal{L}_{1}^{*}$ in independent in the graph $G_{{\cal Q}_{1}}\triangleq G$, we understand that $\mathcal{L}_1$ nodes have neighbors only in $\mathcal{L}_7$.
Similarly, it holds that $\mathcal{L}_{6}^{*}=\mathcal{N}_{\mathcal{Q}_{2}}(\mathcal{L}_{2}^{*})$. Hence, the nodes in set $\mathcal{L}_{2}^{*}$ can have links only with nodes in set $\mathcal{L}_{6}^{*}$ and possibly with nodes in $\mathcal{L}_{7}^{*}$ (since the latter do not belong in $G_{\mathcal{Q}_2}$). However, from Lemma \ref{lem:neighbor} it follows that nodes in $\mathcal{L}_{2}^{*}$ exchange resource only with nodes in $\mathcal{L}_{6}^{*}$. With the same reasoning, it is easy to see that nodes in set $\mathcal{L}_{3}^{*}$ can be physically connected with nodes in $\mathcal{L}_{7}^{*}$, $\mathcal{L}_{6}^{*}$ and $\mathcal{L}_{5}^{*}$, but they exchange resource only with nodes in the latter set. Finally, nodes in set $\mathcal{L}_{4}^{*}$ exchange resources only with each other.
These properties reveal how the graph affects the lex-optimal fair solution. For example, by adding a link between two nodes initially belonging to $\mathcal{L}_{1}^{*}$ (which is independent), the lex-optimal solution changes and places these (now connected) nodes to another set. This dependency among the graph structure and the lex-optimal exchange ratio vector will become more evident in the sequel.
\subsection{Proofs of Theorems}
In this subsection we provide the proofs of Theorems \ref{thm:MainTh0_K1} and \ref{thm:MainTh0}, while Theorem \ref{thm:MainLexSuff} is proved in the Appendix.
\uline{\textbf{3.2.1 PROOF of Theorem \ref{thm:MainTh0_K1}}}. Before proceeding with the proof, we provide some additional notation, lemmas and propositions. We denote the sum of received resources that are incoming to, and outgoing from set ${\cal S}\subseteq{\cal N}$, under allocation $\boldsymbol{d}$, as follows:
\begin{equation}
{\rm In}\left({\cal S}\right)=\sum_{i\in{\cal S}}\ \sum_{j\in{\cal N}_{i}\cap{\cal S}^{c}}d_{ji},\,\,\,{\rm Out}\left({\cal S}\right)=\sum_{i\in{\cal S}}\ \sum_{j\in{\cal N}_{i}\cap{\cal S}^{c}}d_{ij}, \label{eq:In-Out-3}
\end{equation}
where $\mathcal{S}^{c}=\mathcal{N}-\mathcal{S}$ is the complement set of $\mathcal{S}$. By definition it is ${\rm In}(\mathcal{N})={\rm Out}(\mathcal{N})=0$, and also:
\begin{equation}
{\rm In}\left({\cal S}\right)={\rm Out}(\mathcal{S}^{c}),\,\,{\rm Out(\mathcal{S})={\rm In(\mathcal{S}^{c})}}\,. \label{eq:InOutN1}
\end{equation}
\begin{lemma}
\label{lem:equality} For any set ${\cal S}\subseteq{\cal N}$ , under any feasible allocation $\boldsymbol{d}\in\mathbb{D}$, it holds
\begin{equation}
\sum_{i\in\mathcal{S}}r_{i}+{\rm Out}\left({\cal S}\right)=\sum_{i\in\mathcal{S}}D_{i}+{\rm In}\left({\cal S}\right).\label{eq:equality}
\end{equation}
\begin{equation}
{\rm Out}\left({\cal S}\right)\leq\sum_{i\in{\cal S}}D_{i}\label{eq:OutIneq}
\end{equation}
with equality holding iff all nodes in ${\cal S}$ give their resource only to nodes outside $\mathcal{S}$. Also, it is:
\begin{equation}
{\rm In}\left({\cal S}\right)\leq\sum_{i\in{\cal S}}r_{i}\label{eq:InIneq-1}
\end{equation}
with equality holding iff all nodes in $\mathcal{S}$ get resource only from nodes outside $\mathcal{S}$.
\end{lemma}
\begin{proof}
Note that for any node set ${\cal S}$, the following holds:
\begin{equation}
\sum_{i\in{\cal S}}\ \sum_{j\in{\cal N}_{i}\cap{\cal S}}d_{ji}=\sum_{i\in{\cal S}}\ \sum_{j\in{\cal N}_{i}\cap{\cal S}}d_{ij}.\label{eq:basic1}
\end{equation}
Also, by definition
\begin{equation}
r_{i}=\sum_{j\in{\cal N}_{i}\cap{\cal S}}d_{ji}+\sum_{j\in{\cal N}_{i}\cap{\cal S}^{c}}d_{ji},\label{eq:basic1-1}
\end{equation}
by feasibility of $\boldsymbol{d}\in\mathbb{D}$,
\begin{equation}
D_{i}=\sum_{j\in{\cal N}_{i}\cap{\cal S}}d_{ij}+\sum_{j\in{\cal N}_{i}\cap{\cal S}^{c}}d_{ij}.\label{eq:basic2}
\end{equation}
Hence we calculate
\begin{align*}
\sum_{i\in{\cal S}}r_{i} & =\sum_{i\in{\cal S}}\ \sum_{j\in{\cal N}_{i}\cap{\cal S}}d_{ji}+\sum_{i\in{\cal S}}\sum_{j\in{\cal N}_{i}\cap{\cal S}^{c}}d_{ji}\ {\rm by\ (}\ref{eq:basic1-1})\\
& =\sum_{i\in{\cal S}}\ \sum_{j\in{\cal N}_{i}\cap{\cal S}}d_{ij}+{\rm In}\left({\cal S}\right)\ \ \ {\rm by\ (\ref{eq:basic1}),\, (\ref{eq:In-Out-3})}\\
& =\sum_{i\in{\cal S}}\ \sum_{j\in{\cal N}_{i}\cap{\cal S}}d_{ij}+\sum_{i\in{\cal S}}\ \sum_{j\in{\cal N}_{i}\cap{\cal S}^{c}}d_{ij}+{\rm In}\left({\cal S}\right)-{\rm Out}\left({\cal S}\right)\\
& =\sum_{i\in\mathcal{S}}D_{i}+{\rm In}\left({\cal S}\right)-{\rm Out}\left({\cal S}\right)\ \ \ {\rm by\ (\ref{eq:basic2})}
\end{align*}
Inequalities (\ref{eq:OutIneq}), (\ref{eq:InIneq-1}) follow directly
from the definitions. $\blacksquare$
\end{proof}
\begin{lemma}
\label{lem:levels} Let $\boldsymbol{d}\in\mathbb{D}$. If $K=1$
then $l_{1}=1$. If $K>1$ , then $l_{1}<1$ and $l_{K}>1$.
\end{lemma}
\begin{proof}
If $K=1$, we have $r_{i}=l_{1}D_{i}$ for all $i\in{\cal N}$. From
Lemma \ref{lem:equality} (applied with ${\cal S}\leftarrow{\cal N}$) we then have $l_{1}\sum_{i\in{\cal N}}D_{i}=\sum_{i\in{\cal N}}D_{i}$, hence $l_{1}=1$. Let now $K>1$. If $l_{1}\geq1$, then since $l_{K}>l_{1}$, we have
\begin{equation}
\sum_{i\in{\cal N}}r_{i} = \sum_{k=1}^{K}l_{k}\sum_{i\in{\cal \mathcal{L}}_{k}}D_{i}> \sum_{i\in{\cal N}}D_{i}
\end{equation}
which contradicts Lemma \ref{lem:equality}. Similarly is shown $l_{K}>1$. $\blacksquare$
\end{proof}
\begin{prop}
\label{prop:LexK1}Let $\bar{\boldsymbol{d}}\in\mathbb{D}.$ If $\bar{K}=1$
then $\bar{\boldsymbol{d}}$ is lex-optimal.
\end{prop}
\begin{proof}
From Lemma \ref{lem:levels}, $\bar{l}_{1}=1$ and hence $\bar{r}_{i}=D_{i},\ i\in{\cal N}$. If there is another allocation $\hat{\boldsymbol{d}}$ such $\hat{\boldsymbol{r}}\succ\bar{\boldsymbol{r}}$, then it should hold $\hat{r}_{i}\geq r_{i}=D_{i}\ \forall i\in{\cal N}$ and $\hat{r}_{j}>\bar{r}_{j}=D_{j}$ for at least one $j\in{\cal N}$. But then, it would be $\sum_{i\in{\cal S}}\hat{r}_{i}>\sum_{i\in{\cal S}}D_{i}$, which contradicts (\ref{eq:equality}) (applied with ${\cal S}\leftarrow{\cal N}$). $\blacksquare$
\end{proof}
Now we are ready to provide the \uline{proof of Theorem \ref{thm:MainTh0_K1}}: From Lemma \ref{lem:levels} we have that for a feasible allocation $\boldsymbol{d}\in\mathbb{D}$, with $K=1$, it is $l_1=1$. From Proposition \ref{prop:LexK1} we also get that this is a lex-optimal allocation. Moreover, since a lex-optimal allocation $\boldsymbol{d}^{*}$ is also feasible, when $K^{*}=1$, it is also $l_{1}^{*}=1$ from Lemma \ref{lem:levels}. $\blacksquare$
\uline{\textbf{3.2.2 PROOF of Theorem \ref{thm:MainTh0}}}. First, we need the following corollary.
\begin{cor}
\label{cor:GiveToK} If under a lex-optimal allocation $\boldsymbol{\bar{d}}$ it holds $k(\mathcal{\bar{D}}_{i})=K^{*}$ for some $i\in\mathcal{N}$, then ${\cal N}_{i}\subseteq\mathcal{L}_{K}^{*}$.\end{cor}
\begin{proof}
Since under a lex-optimal allocation there can be no node with level higher that $l_{K^{*}}$, Lemma \ref{lem:neighbor} is applied with the equality, i.e., $\forall\,j\in{\cal \bar{H}}_{i}$, it holds $l_{k(j)}^{*}=l_{k(\mathcal{\bar{D}}_{i})}^{*}=K^{*}$. Since the same also holds by definition for all nodes in $\mathcal{\bar{D}}_{i}$, the results follows. $\blacksquare$
\end{proof}
We introduce some additional definitions and results. Consider a lex-optimal allocation $\boldsymbol{\bar{d}}$ and let ${\cal \bar{Z}}$ be the subset of nodes in ${\cal L}_{K}^{*}$, with
the property: $i\in{\cal \bar{Z}}$ iff $\mathcal{L}_{k(\mathcal{\bar{D}}_{i})}^{*}=K^{*}$. Hence any node $i$ in ${\cal \bar{Z}}\subset{\cal L}_{K}^{*}$ gives resource only to nodes in ${\cal L}_{K}^{*}$. The next lemma shows that the set ${\cal \bar{Z}}$ is empty if $K^{*}\geq 2$.
\begin{lemma}
\label{lem:NoGive}Let $\bar{\boldsymbol{d}}$ be a lex-optimal allocation. If $K^{*}\geq2$, then ${\cal \bar{Z}}=\emptyset$, i.e., the nodes in ${\cal L}_{K}^{*}$ give all their resource to nodes outside ${\cal L}_{K}^{*}.$ Hence\footnote{\small{To facilitate the reader, we repeat the notation: $\overline{{\rm Out}}$ and ${\cal \bar{Z}}$ are annotated with the bar symbol since they depend on $\bar{\boldsymbol{d}}$, while the optimal level sets and the received resources are unique and hence annotated with the star symbol. }},
\begin{equation}
\overline{{\rm Out}}\left({\cal L}_{K}^{*}\right)=\sum_{i\in{\cal L}_{K}^{*}}D_{i}.\label{eq:OutZero}
\end{equation}
\end{lemma}
\begin{proof}
According to Corollary \ref{cor:GiveToK} all neighbors of any node $i\in{\cal \bar{Z}}$ are in ${\cal L}_{K}^{*}$. It follows that a node $i$ in ${\cal \bar{Z}}$ gets resource only from nodes in ${\cal \bar{Z}}$: if node $i$ was getting resource from a neighbor node $j\notin{\cal \bar{Z}}$, then since as the previous sentence says $j\in{\cal L}_{K}^{*}$, node $j$ should belong to ${\cal \bar{Z}}$ by definition; which is a contradiction. This implies that ${\rm \overline{In}}\left({\cal \bar{Z}}\right)=0$ and hence according to Lemma \ref{lem:equality}:
\[
\sum_{i\in{\cal \bar{Z}}}r_{i}^{*}\leq\sum_{i\in{\cal \bar{Z}}}D_{i}. \label{eq:xx}
\]
If ${\cal \bar{Z}}\neq\emptyset$, then since $r_{i}^{*}=l_{K}^{*}D_{i}$, $\forall i\in{\cal \bar{Z}},$ we conclude from (\ref{eq:xx}) that $l_{K}^{*}\leq1$, which contradicts Lemma \ref{lem:levels}. Equality (\ref{eq:OutZero}) follows immediately: since ${\cal \bar{Z}}=\emptyset,$ the nodes in ${\cal L}_{K}^{*}$ give all their resource to nodes outside ${\cal L}_{K}^{*}$ and hence (\ref{eq:OutIneq}) applies with equality. $\blacksquare$
\end{proof}
Let $\bar{{\cal G}}$ be the set of nodes from which nodes in ${\cal L}_{K}^{*}$ get resource, i.e., $\bar{{\cal G}}=(i\in\mathcal{N}: k(\bar{\mathcal{D}}_{i})=K^{*})$. It holds:
\begin{lemma}
\label{lem:PropOfG}
Let $\bar{\boldsymbol{d}}$ be a lex-optimal allocation and $K^{*}\geq2$. It holds \textup{${\cal L}_{K}^{*}\cap\bar{{\cal G}}=\emptyset$. Moreover,} the set $\bar{{\cal G}}$ is nonempty, independent, it holds ${\cal N}\left(\bar{{\cal G}}\right)={\cal L}_{K}^{*}$, and
\begin{equation}
{\rm \overline{In}}\left({\cal \bar{G}}\cup{\cal L}_{K}^{*}\right)=0.\label{eq:separation}
\end{equation}
\end{lemma}
\begin{proof}
According to Lemma \ref{lem:NoGive}, ${\cal L}_{K}^{*}\cap\bar{{\cal G}}={\cal \bar{Z}}=\emptyset$. Also, according to (\ref{eq:equality}), (\ref{eq:OutZero}), and the definition of $\bar{{\cal G}}$, it is
\[
\sum_{i\in{\cal L}_{K}^{*}}r_{i}^{*}=\overline{{\rm In}}\left({\cal L}_{K}^{*}\right)\leq\sum_{i\in{\cal \bar{G}}}D_{i}.
\]
Since $\sum_{i\in{\cal L}_{K}^{*}}r_{i}^{*}=l_{K}^{*}\sum_{i\in{\cal L}_{K}^{*}}D_{i}>0$, we get ${\cal \bar{G}}\neq\emptyset$. According to Corollary \ref{cor:GiveToK} and the definition of $\bar{{\cal G}},$ it holds ${\cal N}_{i}\subseteq{\cal L}_{K}^{*},\ \forall\,i\in\bar{{\cal G}}$. Since ${\cal L}_{K}^{*}\cap\bar{{\cal G}}=\emptyset$, $\bar{{\cal G}}$ is independent.
To show that ${\cal N}\left(\bar{{\cal G}}\right)={\cal L}_{K}^{*}$ we argue as follows. According to Corollary \ref{cor:GiveToK}, it is ${\cal N}\left(\bar{{\cal G}}\right)\subseteq{\cal L}_{K}^{*}$. Also, if ${\cal L}_{K}^{*}-{\cal N}\left(\bar{{\cal G}}\right)\neq\emptyset$, there would be a node $i\in{\cal L}_{K}^{*}$ not connected to any of the nodes in $\bar{{\cal G}}$; but since by definition of $\bar{{\cal G}}$ node $i$ gets resource only from nodes in $\bar{{\cal G},}$ we would then have $r_{i}^{*}=l_{K}^{*}D_{i}=0$, a contradiction since $l_{K}^{*}>1$ and $D_{i}>0$.
Notice next that ${\cal N}\left(\bar{{\cal G}}\right)={\cal L}_{K}^{*}$ and the set $\bar{{\cal G}}$ is independent, all neighbors of nodes in ${\cal \bar{G}}$ are in ${\cal L}_{K}^{*}$, and hence nodes in ${\cal \bar{G}}$ can get resource only from nodes in ${\cal L}_{K}^{*}$. Since by definition nodes ${\cal L}_{K}^{*}$ get resource only from ${\cal \bar{G}}$, (\ref{eq:separation}) holds. $\blacksquare$
\end{proof}
\begin{lemma}
\label{lem:ProdLess}
Let $\bar{\boldsymbol{d}}$ be a lex-optimal allocation and $K^{*}\geq2$. Let $k_{0}$ be the index of the smallest level in ${\cal \bar{G}}$$.$ Then $l_{K}^{*}l_{k_{0}}^{*}\leq1$. Strict inequality holds if
\begin{enumerate}
\item \textup{either ${\rm \overline{Out}}\left({\cal \bar{G}}\cup{\cal L}_{K}^{*}\right)>0,$}
\item \textup{or ${\rm \overline{Out}}\left({\cal \bar{G}}\cup{\cal L}_{K}^{*}\right)=0$
and ${\cal \bar{G}}-{\cal L}_{k_{0}}^{*}\neq\emptyset.$}
\end{enumerate}
If ${\rm \overline{Ou}t}\left({\cal \bar{G}}\cup{\cal L}_{K}^{*}\right)=0$ and ${\cal \bar{G}}_{K}-{\cal L}_{k_{0}}^{*}=\emptyset$, then $l_{K}^{*}l_{k_{0}}^{*}=1$.
\end{lemma}
\begin{proof}
Since by Lemma \ref{lem:PropOfG} ${\cal \bar{G}}$ is independent, and ${\cal N}\left(\bar{{\cal G}}\right)\subseteq{\cal L}_{K}^{*}$, the nodes in ${\cal \bar{G}}$ can give resource only to nodes in ${\cal L}_{K}^{*}$. Hence only nodes in ${\cal L}_{K}^{*}$ give resource to nodes in $\left({\cal \bar{G}}\cup{\cal L}_{K}^{*}\right)^{c}$, hence:
\begin{align*}
\overline{{\rm Out}}\left({\cal L}_{K}^{*}\right) & ={\rm \overline{In}}\left({\cal \bar{G}}\right)+{\rm \overline{Out}}\left({\cal \bar{G}}\cup{\cal L}_{K}^{*}\right).
\end{align*}
Taking into account (\ref{eq:OutZero}) we conclude:
\begin{equation}
\overline{{\rm In}}\left({\cal \bar{G}}\right)=\sum_{i\in{\cal L}_{K}^{*}}D_{i}-\overline{{\rm Out}}\left(\bar{{\cal G}}\cup{\cal L}_{K}^{*}\right).\label{eq:InIneq}
\end{equation}
Since nodes in ${\cal \bar{G}}$ constitute an independent set it follows:
\begin{equation}
{\rm \overline{Out}}\left({\cal \bar{G}}\right)=\sum_{i\in\bar{{\cal G}}}D_{i}.\label{eq:OutGk}
\end{equation}
From Lemma \ref{lem:equality} applied to set ${\cal \bar{G}}$, and using (\ref{eq:InIneq}-\ref{eq:OutGk}) we get
\begin{align}
&\sum_{i\in{\cal \bar{G}}}r_{i}=\overline{{\rm In}}\left({\cal \bar{G}}\right)=\sum_{i\in{\cal L}_{K}^{*}}D_{i}-\overline{{\rm Out}}\left(\bar{{\cal G}}\cup{\cal L}_{K}^{*}\right),\,\text{or} \nonumber \\
&\sum_{k=k_{0}}^{K^{*}-1}l_{k}^{*}\sum_{i\in{\cal L}_{k}^{*}\cap{\cal \bar{G}}}D_{i}=\sum_{i\in{\cal L}_{K}^{*}}D_{i}-\overline{{\rm Out}}\left(\bar{{\cal G}}\cup{\cal L}_{K}^{*}\right).\label{eq:rel1}
\end{align}
Next, since nodes in ${\cal L}_{K}^{*}$ get resource only from nodes in $\bar{{\cal G}}$ (and all of it) we have
\begin{equation}
l_{K}^{*}\sum_{i\in{\cal L}_{K}^{*}}D_{i}=\sum_{i\in\bar{{\cal G}}}D_{i}\,.\label{eq:rel2}
\end{equation}
Multiplying (\ref{eq:rel1}) and (\ref{eq:rel2}), and rearranging terms:
\begin{equation}
\sum_{k=k_{0}}^{K^{*}-1}l_{K}^{*}l_{k}^{*}\sum_{i\in{\cal L}_{k}^{*}\cap{\cal \bar{G}}}D_{i}=\sum_{i\in\bar{{\cal G}}}D_{i}-\frac{\sum_{i\in\bar{{\cal G}}}D_{i}}{\sum_{i\in{\cal L}_{K}^{*}}D_{i}}\overline{{\rm Out}}\left(\bar{{\cal G}}\cup{\cal L}_{K}^{*}\right).\label{eq:rel3}
\end{equation}
Eq. (\ref{eq:rel3}) implies that $l_{K}^{*}l_{k_{0}}^{*}\leq1$: if $l_{K}^{*}l_{k_{0}}^{*}>1$ then, because it will also hold $l_{K}^{*}l_{k}^{*}>1,\ \forall k\geq k_{0}$, (\ref{eq:rel3}) would not hold.
Now, if $\overline{{\rm Out}}\left(\bar{{\cal G}}\cup{\cal L}_{K}^{*}\right)>0$ then from (\ref{eq:rel3}) we have:
\[
\sum_{k=k_{0}}^{K^{*}-1}l_{K}^{*}l_{k}^{*}\sum_{i\in{\cal L}_{k}^{*}\cap{\cal \bar{G}}}D_{i}<\sum_{i\in\bar{{\cal G}}}D_{i}
\]
and arguing as above we see that necessarily $l_{K}^{*}l_{k_{0}}^{*}<1$. If $\overline{{\rm Out}}\left(\bar{{\cal G}}\cup{\cal L}_{K}^{*}\right)=0$ and ${\cal \bar{G}}-{\cal L}_{k_{0}}^{*}\neq\emptyset$ then again $l_{K}^{*}l_{k_{0}}^{*}<1$. To see this, notice that if $l_{K}^{*}l_{k_{0}}^{*}\geq1$ and ${\cal \bar{G}}-{\cal L}_{k_{0}}^{*}\neq\emptyset$ then it would hold:
\begin{equation}
l_{K}^{*}l_{k}^{*}\sum_{i\in{\cal L}_{k}^{*}\cap{\cal \bar{G}}}D_{i}\geq\sum_{i\in{\cal L}_{k}^{*}\cap{\cal \bar{G}}}D_{i}\ \forall\, k\geq k_{0}.\label{eq:HelpIneq}
\end{equation}
Also, since ${\cal \bar{G}}-{\cal L}_{k_{0}}^{*}\neq\emptyset$, for some $k>k_{0}$ there must be a nonempty set ${\cal L}_{k}^{*}\cap{\cal \bar{G}}$ which implies that the inequality is strict for some $k>k_{0}$. Adding inequalities (\ref{eq:HelpIneq}) we would then get,
\[
\sum_{k=k_{0}}^{K^{*}-1}l_{K}^{*}l_{k}^{*}\sum_{i\in{\cal L}_{k}^{*}\cap{\cal \bar{G}}}D_{i}>\sum_{i\in\bar{{\cal G}}}D_{i},
\]
which contradicts (\ref{eq:rel3}).
Assume finally that $\overline{{\rm Out}}\left(\bar{{\cal G}}\cup{\cal L}_{K}^{*}\right)=0$ and ${\cal \bar{G}}-{\cal L}_{k_{0}}^{*}=\emptyset$. From (\ref{eq:rel3})
we have $l_{K}^{*}l_{k_{0}}^{*}\sum_{i\in\bar{{\cal G}}}D_{i}=\sum_{i\in\bar{{\cal G}}}D_{i}$, and since $\bar{{\cal G}}\neq\emptyset$ implies $\sum_{i\in\bar{{\cal G}}}D_{i}>0$, we get $l_{K}^{*}l_{k_{0}}^{*}=1$. $\blacksquare$
\end{proof}
\begin{lemma}
\label{lem:Independent}If $K^{*}\geq2$, then the set ${\cal L}_{1}^{*}$
is independent.
\end{lemma}
\begin{proof}
Assume that for some pair $i,j\in{\cal L}_{1}^{*}$, it is $(i,j)\in{\cal E}$. Consider the largest set ${\cal C}\subseteq{\cal L}_{1}^{*}$ such that a) ${\cal C}$ contains both $i$ and $j$, b) the induced subgraph of ${\cal C}$ is connected. Therefore, each node in ${\cal C}$ has a node in ${\cal C}$, and hence a node in ${\cal L}_{1}^{*}$ as neighbor. By Lemma \ref{lem:neighbor} we have that under any lex-optimal allocation $\boldsymbol{d},$ it holds $\mathcal{L}_{k(\mathcal{D}_{i})}^{*}=1$ for all $i\in{\cal C}.$ That is, all nodes in ${\cal C}$ give resource only to other nodes in ${\cal C}$, hence, ${\rm Out}({\cal C})=0$. It follows from (\ref{eq:equality}) that $\sum_{i\in{\cal C}}r_{i}^{*}\geq\sum_{i\in{\cal C}}D_{i}$.
But we also have
\begin{eqnarray*}
\sum_{i\in{\cal C}}r_{i}^{*} & = & l_{1}^{*}\sum_{i\in{\cal C}}D_{i}< \sum_{i\in{\cal C}}D_{i},
\end{eqnarray*}
since $l_{1}^{*}<1$ by Lemma \ref{lem:levels}, which is a contradiction. $\blacksquare$
\end{proof}
\begin{lemma}
\textup{\label{lem:AllInG}}Let $\bar{\boldsymbol{d}}$ be a lex-optimal allocation and $K^{*}\geq2$.\textup{ It holds ${\cal L}_{1}^{*}\subseteq\bar{{\cal G}}$. }
\end{lemma}
\begin{proof}
Let $\bar{\mathcal{F}}_{k}\triangleq\left(\bar{{\cal G}}\cup{\cal L}_{K}^{*}\right)^{c}\cap{\cal L}_{k}^{*},\ k=1,...,K^{*}-1.$ It suffices to show that $\bar{\mathcal{F}}_{1}=\emptyset.$ Assume that $\bar{\mathcal{F}}_{1}\neq\emptyset$. Let ${\cal \bar{B}}$ be the set of nodes in $\left(\bar{{\cal G}}\cup{\cal L}_{K}^{*}\right)^{c}$ that are neighbors of nodes in $\bar{\mathcal{F}}_{1}$. The set ${\cal \bar{B}}$ is nonempty because otherwise, since ${\cal L}_{1}^{*}$ (and hence $\bar{\mathcal{F}}_{1}$ ) is an independent set and by Lemma \ref{lem:PropOfG} it is ${\cal N}\left(\bar{{\cal G}}\right)\subseteq{\cal L}_{K}^{*}$, all neighbors of any node in $\bar{\mathcal{F}}_{1}$ would be in ${\cal L}_{K}^{*}$ which implies that $\bar{\mathcal{F}}_{1}\subseteq\bar{{\cal G}}$, a contradiction.
Notice next that (\ref{eq:separation}) and Lemma \ref{lem:neighbor} imply that all nodes in ${\cal \bar{B}}$ give resource only to nodes in $\bar{\mathcal{F}}_{1}$ $.$ Hence
\[
l_{1}^{*}\sum_{i\in\mathcal{\bar{F}}_{1}}D_{i}\geq\sum_{i\in\mathcal{\bar{B}}}D_{i}.
\]
Also, since by Lemma \ref{lem:Independent} the set $\bar{\mathcal{F}}_{1}$ is independent, and by (\ref{eq:separation}) nodes in $\bar{\mathcal{F}}_{1}$ do not give resource to nodes in ${\cal L}_{K}^{*}$, all nodes in this set give resource only to nodes in ${\cal \bar{B}}$ and (notice that since ${\cal \bar{B}}\neq\emptyset$, is should hold $K^{*}-1\geq2$),
\[
\sum_{k=2}^{K^{*}-1}l_{k}^{*}\sum_{i\in\bar{\mathcal{F}}_{k}\cap{\cal \bar{B}}}D_{i}\geq\sum_{i\in\mathcal{\bar{F}}_{1}}D_{i}.
\]
Multiplying the last two inequalities and canceling terms:
\[
\sum_{k=2}^{K^{*}-1}l_{1}^{*}l_{k}^{*}\sum_{i\in\bar{\mathcal{F}}_{k}\cap{\cal \bar{B}}}D_{i}\geq\sum_{i\in\mathcal{\bar{B}}}D_{i},
\]
which implies that $l_{1}^{*}l_{K-1}^{*}\geq1$. But $l_{k_{0}}^{*}l_{K}^{*}>l_{1}^{*}l_{K-1}^{*},\ k_{0}\geq1$ and hence $l_{k_{0}}^{*}l_{K}^{*}>1$,
which contradicts Lemma \ref{lem:ProdLess}. $\blacksquare$
\end{proof}
Now we are ready to prove the following proposition.
\begin{prop}
\label{lem:MainLem}Let $\boldsymbol{d}^{*}$ be a lex-optimal allocation and $K^{*}\geq2$. The set ${\cal L}_{1}^{*}$ is independent, ${\cal N}\left({\cal L}_{1}^{*}\right)={\cal L}_{K}^{*}$, and
\begin{align}
&l_{1}^{*}l_{K}^{*}=1 \label{eq:MainLemProd1}\,,\\
&\sum_{i\in{\cal L}_{1}^{*}}r_{i}^{*}=\sum_{i\in{\cal L}_{K}^{*}}D_{i},\label{eq:MainLemL1LK}\\
&{\rm \overline{In}}\left({\cal L}_{1}^{*}\cup{\cal L}_{K}^{*}\right)={\rm \overline{Out}}\left({\cal L}_{1}^{*}\cup{\cal L}_{K}^{*}\right)=0,\label{eq:MainLemInOut0}
\end{align}
\end{prop}
\begin{proof}
By Lemma \ref{lem:neighbor} the nodes in the set ${\cal N}\left({\cal L}_{1}^{*}\right)=\cup_{i\in{\cal L}_{1}}{\cal N}_{i}-{\cal L}_{1}^{*}=\cup_{i\in{\cal L}_{1}}{\cal N}_{i}$ (the last equality hold because $\forall i\in{\cal L}_{i}^{*}$, it is ${\cal N}_{i}\cap{\cal L}_{1}^{*}=\emptyset$) give resource only to nodes in ${\cal L}_{1}^{*}$, hence
\begin{equation}
{\rm \overline{In}}\left({\cal L}_{1}^{*}\right)=\sum_{i\in{\cal N}\left({\cal L}_{1}^{*}\right)}D_{i} \,.\nonumber
\end{equation}
Also, since ${\cal L}_{1}^{*}$ is an independent set, its nodes give all their resource to nodes in ${\cal N}\left({\cal L}_{1}^{*}\right)$, hence it is:
\[
{\rm \overline{Out}}\left({\cal L}_{1}^{*}\right)=\sum_{i\in{\cal L}_{1}^{*}}D_{i}\,.
\]
Applying (\ref{eq:equality}) to the set ${\cal L}_{1}^{*},$ we then have
\begin{equation}
\sum_{i\in{\cal L}_{1}^{*}}r_{i}^{*}=l_{1}^{*}\sum_{i\in{\cal L}_{1}^{*}}D_{i}=\sum_{i\in{\cal N}\left({\cal L}_{1}^{*}\right)}D_{i}\label{eq:help2-1}
\end{equation}
On the other hand, since according to Lemma \ref{lem:NoGive} the nodes in ${\cal N}\left({\cal L}_{1}^{*}\right)\subseteq{\cal L}_{K}^{*}$ give all their resource to nodes outside ${\cal L}_{K}^{*}$, according to (\ref{eq:OutIneq}) applied with equality, we get
\begin{equation}
{\rm \overline{Out}}\left({\cal N}\left({\cal L}_{1}^{*}\right)\right)=\sum_{i\in{\cal N}\left({\cal L}_{1}^{*}\right)}D_{i}\,.\label{eq:d1}
\end{equation}
Moreover, ${\cal L}_{1}^{*}$ is an independent set and thus the nodes in ${\cal N}\left({\cal L}_{1}^{*}\right)$ get all the resource from nodes in ${\cal L}_{1}^{*}$. Hence:
\[
{\rm \overline{In}}\left({\cal L}_{K}^{*}\right)\geq\sum_{i\in{\cal N}\left({\cal L}_{1}^{*}\right)}D_{i}.
\]
Applying now (\ref{eq:equality}) to the set ${\cal N}\left({\cal L}_{1}^{*}\right)$ we have:
\begin{equation}
\sum_{i\in{\cal N}\left({\cal L}_{1}^{*}\right)}r_{i}^{*}=l_{K}^{*}\sum_{i\in{\cal N}\left({\cal L}_{1}^{*}\right)}D_{i}\geq\sum_{i\in{\cal L}_{1}^{*}}D_{i}.\label{eq:d2}
\end{equation}
Multiplying (\ref{eq:help2-1}), (\ref{eq:d2}) we get $l_{K}^{*}l_{1}^{*}\geq1$. If ${\rm \overline{Out}}\left({\cal \bar{G}}\cup{\cal L}_{K}^{*}\right)>0$, from Lemma \ref{lem:ProdLess} we have $l_{K}^{*}l_{1}^{*}<1$, i.e., a contradiction. If ${\rm \overline{Out}}\left({\cal \bar{G}}\cup{\cal L}_{K}^{*}\right)=0$ and ${\cal \bar{G}}-{\cal L}_{1}^{*}\neq\emptyset$ then from the same lemma we have $l_{K}^{*}l_{1}^{*}<1$, again a contradiction.
The only case that remains is ${\rm \overline{Out}}\left({\cal \bar{G}}\cup{\cal L}_{K}^{*}\right)=0$ and ${\cal \bar{G}}-{\cal L}_{1}^{*}=\emptyset$ (i.e., ${\cal \bar{G}}\subseteq{\cal L}_{1}^{*}$) which again by the lemma implies $l_{K}^{*}l_{1}^{*}=1.$ Also, Lemma \ref{lem:AllInG} implies ${\cal \bar{G}}={\cal L}_{1}^{*}.$
${\cal N}\left({\cal L}_{1}^{*}\right)={\cal L}_{K}^{*}$ follows from Lemma \ref{lem:PropOfG}, and (\ref{eq:MainLemL1LK}) follows from (\ref{eq:help2-1}). $\blacksquare$
\end{proof}
After providing this last proposition, we can \uline{proceed with the proof for Theorem \ref{thm:MainTh0}}: For $k=1,$ Items \ref{enu:MainTh0Item1}- \ref{enu:MainTh0Item3} follow from Proposition \ref{lem:MainLem}. Hence the theorem is true when $K\in\left\{ 2,3\right\} $. Assume now that $K\geq4$. Since according to Proposition \ref{lem:MainLem} it is ${\rm Out}\left({\cal L}_{1}\cup{\cal L}_{K}\right)={\rm In}\left({\cal L}_{1}\cup{\cal L}_{K}\right)=0$, the restriction of $\boldsymbol{d}$ in ${\cal Q}_{2}$, $\boldsymbol{d}_{{\cal Q}_{2}}=\left\{ d_{ij}\right\} _{(i,j)\in{\cal E}_{{\cal Q}_{2}}}$ is an allocation on the graph with $K-2$ levels. But then $\boldsymbol{d}_{{\cal Q}_{2}}$ must be a lex-optimal allocation in $G_{{\cal Q}_{2}}=({\cal Q}_{2},{\cal E}_{{\cal Q}_{2}})$ since otherwise we could combine an allocation $\hat{\boldsymbol{d}}_{{\cal Q}_{2}}\succ\boldsymbol{d}_{{\cal Q}_{2}}$ with the components of $\boldsymbol{d}$ in ${\cal E}-{\cal E}_{{\cal Q}_{2}}$ and get a lexicographically better allocation on the original graph. Moreover, by construction we have for the lowest level set in $G_{{\cal Q}_{2}}$: ${\cal L}_{{\cal Q}_{2},1}={\cal L}_{2}$ and ${\cal L}_{{\cal Q}_{2},K-2}={\cal L}_{K-1}={\cal L}_{K-2+1}$. Hence properties \ref{enu:MainTh0Item1}- \ref{enu:MainTh0Item3} hold for $k=2$ and we can repeat the same arguments for the graph $G_{{\cal Q}_{2}}$to deduce inductively the stated properties for all $k=1,....,\flr{K/2}$. $\blacksquare$
\section{Game-theoretic Analysis}\label{sec:game-theory-frameworks}
\subsection{Coalitional Game} \label{sec:coalitional-subsection}
We consider two notions for coalition stability \cite{myerson-gametheory-book}, namely weak and strong stability\footnote{\small{Please note that this service exchange game does not posses the \emph{comprehensive property}, due to the fact that nodes allocate their entire idle resource, and hence we cannot define the inner core and the Shapley values and compare them with our solution. For more details on this, please see \cite{myerson-gametheory-book}.}}. The latter is a more restrictive condition, and preferable as it ensures there is no other allocation that will yield a strictly better payoff \emph{even} for one user. The main result in this context is:
\begin{theorem}
Any lex-optimal allocation $\bm{d}^{*}$ yields a received resource vector $\bm{r}^{*}$, that lies in the core of the NTU service exchange game, and it is strongly stable.
\label{thm:Stability}
\end{theorem}
Therefore, no subset of nodes can deviate and improve the total received resource, \emph{for at least one} of its members, without reducing the total received resource of at least another one of its members. Combining Theorems 1 and 4 we have the following corollary:
\begin{cor}
\label{thm:MainLex}Let $K^{*}\geq2.$
Under any lex-optimal allocation $\bm{d}^{*}$, the respective received resource vector $\bm{r}^{*}$, belongs to the core of the NTU coalitional servicing game, and has the following structure:
\begin{enumerate}
\item The set of nodes ${\cal N}$ is partitioned into disjoint groups ${\cal M}_{1}^{*},...,{\cal M}_{L}^{*},$ where $\left\lceil K^{*}/2\right\rceil$, and each group contains nodes with exchange ratios belonging to at most two different levels.
\item There are exactly $\left\lfloor K^{*}/2\right\rfloor $ groups with 2 levels. For group ${\cal M}_{k}^{*},\ 1\leq k\leq\lfloor{K^{*}/2\rfloor}$ it holds ${\cal M}_{k}^{*}={\cal L}_{k}^{*}\cup{\cal L}_{K-k+1}^{*}.$
\item \label{enu:CorItems4}If $K^{*}$ is odd, there is also a group with one ratio level, i.e., $\mathcal{M}_{\lceil{K^{*}/2}\rceil}^{*}=\mathcal{L}_{\lceil{K^{*}/2}\rceil}^{*}$.
\item \label{enu:CorItem5}It holds, $l_{k}^{*}l_{K-k+1}^{*}=1,\ 1\leq k\leq\lfloor{K^{*}/2\rfloor}$ and if $K^{*}$ is odd, the single level group has ratio $l_{\lceil{K^{*}/2\rceil}}^{*}=1.$
\item The set $\cup_{k=1}^{\left\lfloor K^{*}/2\right\rfloor }{\cal {\cal L}}_{k}^{*}$ is independent.
\end{enumerate}
\end{cor}
Finally, if $K^*=1$, there is one group of nodes with $l^*=1$.
\textbf{Analysis and Discussion}. \uline{Existence of Core}. The above results reveal that this coalitional service exchange NTU game has always a non-empty core, for any graph $G$, and any resource endowments $\{D_i\}_{i\in\mathcal{N}}$. Moreover, the core contains all lex-optimal allocations, which are also strongly stable. This is a more demanding condition than the non-emptiness of the core.
\uline{Groups of Nodes}. Within the grand coalition, not all the nodes interact with each other. For example, in Figure \ref{fig:Corollary-Example} where $K^{*}=7$, all the lex-optimal allocations result in $4$ groups (denoted with the dotted circles): $\mathcal{M}_{1}^{*}=\mathcal{L}_{1}^{*}\cup\mathcal{L}_{7}^{*}$, $\mathcal{M}_{2}^{*}=\mathcal{L}_{2}^{*}\cup\mathcal{L}_{6}^{*}$, $\mathcal{M}_{3}^{*}=\mathcal{L}_{3}^{*}\cup\mathcal{L}_{5}^{*}$, and $\mathcal{M}_{4}^{*}=\mathcal{L}_{4}^{*}$. Each group consists of nodes belonging to one or two exchange ratio level sets, and none of them exchange resources with nodes in different groups\footnote{\small{Notice that these groups do not constitute coalitions according to the given definitions, and they are derived by the grand coalition solution.}}. These properties are very useful for network design. For example, for a given network we can predict which nodes will interact in the fair and stable allocation policy, and remove the redundant physical links, which in certain cases induce additional cost \cite{JacksonWolinsky1996}.
\subsection{Competitive Market}
In the competitive market framework, each node acts greedily, without any information about the graph or the other nodes' resources, and allocates its resource so as to maximize the total resource it receives in return. Interestingly, equilibriums always exist in this autonomous and decentralized setting, and lead to lex-optimal exchange ratio vectors:
\begin{theorem}
\label{lem:reccip1}The following hold: \textbf{(i)} There is a lex-optimal allocation $\bm{d}^{*}$ under which each node $i\in\mathcal{N}$ gives resource to its neighbors in proportion to what it gets from them, i.e., $d_{ij}^{*}=d_{ji}^{*}D_{i}/{r_{i}^{*}}$, or $d_{ji}^{*}/d_{ij}^{*}=r_{i}^{*}/D_i=l_{k(i)}^{*},\, j\in \mathcal{N}_{i}$, and the neighbors not receiving resource from $i$ have higher exchange ratio, i.e., $l_{k(j)}^{*}\geq 1/l_{k(i)}^{*}=l_{k({\cal D}_{i})}^{*},\, j\in \mathcal{H}_{i}$. \textbf{(ii)} if the allocation satisfies the above conditions, then the allocation is lexicographically optimal.
\end{theorem}
The proof of the theorem is provided in the Appendix.
\textbf{Analysis and Discussion}. This theorem states that there is a fair lex-optimal allocation, where every node $i\in\mathcal{N}$ serves its neighbors $j\in\mathcal{D}_i$ with a resource $d_{ij}$, so as to have a constant and equal exchange ratio $d_{ij}/d_{ji}$ with all of them. Therefore, the lex-optimal allocation is an exchange equilibrium, and, additionally, any possible exchange equilibrium is also a lex-optimal allocation. In other words, the competitive interactions of rational users embedded in a graph, lead to the same allocation point that a central designer would have selected for such a system.
\uline{Dynamic Model}. \rev{An important aspect to notice it that this framework can capture both models where infinitely divisible resources are exchanged among users with different preferences, and also dynamic settings where users exchange indivisible resources over time, exploiting their diverse resource availability. To make the latter case more clear, consider a dynamic resource exchange system which operates in the continuous time domain}. Every node $i\in\mathcal{N}$ creates service opportunities for its neighbors (or, \emph{tokens}\footnote{\small{These are 0-1 token allocation decisions: whenever a user has an idle resource, e.g., an amount of unused bandwidth or energy, it can allocate it to one of its neighbors.}}) according to a Poisson process with possibly different rate $\lambda_{i}>0$. A meaningful strategy from the perspective of the nodes is the following: each node $i$ allocates a token generated at time $t$ to its neighbor that has, until then, given to $i$ the largest number of service tokens (per received token from $i$).
A rational user, with no information about the graph and the nodes' endowments, is reasonable to expect that this strategy can increase its benefit. Besides, this type of best response policies have been considered before, e.g., for P2P networks \cite{zhang-proportional} where it was shown that they converge to a steady state. In our case however, the scheme is decentralized and totally asynchronous. Interestingly, numerical results in Sec. \ref{sec:Numerical-Results} indicate that such myopic policies do converge to a steady state which moreover satisfies Theorem \ref{lem:reccip1}. This means that this dynamic model has a steady state that asymptotically coincides with the respective static model, and the equilibrium can be found if we set $D_i=\lambda_i$, $\forall\,i\in\mathcal{N}$. This is very important as it reveals that the results of this work do not apply only for the above static models, but also characterize the steady state allocations and equilibriums of more dynamic systems, where idle resources or service opportunities are created and allocated by each node asynchronously, greedily, and with no global network information (i.e., beyond the one hop neighbors).
\section{Lex-optimal Algorithms}\label{sec:algorithms}
In this section, we provide a polynomial (in $|\mathcal{N}|$) time algorithm that finds the lex-optimal allocation, and the respective exchange ratio vectors. The proposed algorithm uses the idea of max-min programming algorithm proposed in \cite{boudecfairness07} and takes advantage of the structure of lex-optimal exchange ratio vector described in Theorem \ref{thm:MainTh0} to improve performance. The algorithm is based on the following result.
\begin{lemma}\label{lem:MaxMin}
Let $\bar{\bm{d}}\in\mathbb{D}$ and $K\geq2$. If the set $\bar{{\cal L}}_{1}$ is independent and
\begin{equation}
\sum_{i\in\bar{{\cal L}}_{1}}\bar{r}_{i}=\sum_{i\in{\cal N}\left(\bar{{\cal L}}_{1}\right)}D_{i},\label{eq:AllFromD}
\end{equation}
then:
\begin{enumerate}
\item \label{enu:ItemH1} For any allocation $\hat{\bm{d}}$ that solves the problem
\begin{equation}
\underset{\bm{d}\in\mathbb{D}}{{\rm maximize}}\min_{j\in{\cal N}}\frac{r_{j}}{d_{j}}\label{eq:maxmmin}
\end{equation}
it holds $\bar{{\cal L}}_{1}\subseteq\widehat{{\cal L}}_{1}$ and $\bar{l}_{1}=\hat{l}_{1}$.
\item \label{enu:ItemH2} The set $\bar{\mathcal{L}}_{1}$ coincides with the respective set of the lex-optimal ratio vector, i.e., $\bar{{\cal L}}_{1}={\cal L}_{1}^{*}$ and $\bar{l}_{1}=l_{1}^{*}$.
\end{enumerate}
\end{lemma}
\begin{proof}
$1)\,$ Since $\hat{\boldsymbol{d}}$ solves (\ref{eq:maxmmin}) it holds $\hat{l}_{1}\geq\bar{l}_{1}$ and hence $\frac{\hat{r}_{i}}{D_{i}}\geq\hat{l}_{1}\geq\bar{l}_{1}=\frac{\bar{r}_{i}}{D_{i}},\ i\in\bar{{\cal L}_{1}}$
i.e.,
\begin{equation}
\hat{r}_{i}\geq\bar{r}_{i},\ \ i\in\bar{{\cal L}}_{1}.\label{eq:ineqrrate}
\end{equation}
We will show next that equality holds in (\ref{eq:ineqrrate}) which implies that $\bar{{\cal L}}_{1}\subseteq\widehat{{\cal L}}_{1}$. To see this notice that if strict inequality holds for at least one $i\in\bar{{\cal L}_{1}}$ then
\begin{equation}
\sum_{i{\cal \in\bar{L}}_{1}}\hat{r}_{i}>\sum_{i\in\bar{{\cal L}}_{1}}\bar{r}_{i}.\label{eq:RateIneq-1}
\end{equation}
But since $\bar{{\cal L}}_{1}$ is an independent set, we have
\begin{equation}
\sum_{i{\cal \in\bar{L}}_{1}}\hat{r}_{i}={\rm \widehat{In}}\left(\bar{{\cal L}}_{1}\right) \leq\sum_{i\in{\cal N}\left(\bar{{\cal L}}_{1}\right)}D_{i}=\sum_{i\in\bar{{\cal L}}_{1}}\bar{r}_{i}
\end{equation}
where the inequality holds by definition of inflow, and the last equality by assumption. This result contradicts (\ref{eq:RateIneq-1}).
$2)\,$ Since any lex-optimal allocation $\hat{\boldsymbol{d}}$ solves (\ref{eq:maxmmin}), we have $\bar{{\cal L}}_{1}\subseteq\hat{{\cal L}}_{1}={\cal L}_{1}^{*}.$
If $\bar{{\cal L}}_{1}$ were a strict subset of $\hat{{\cal L}_{1}}$, then $\bar{\boldsymbol{d}}$ would be lexicographically better that $\hat{\boldsymbol{d}},$ a contradiction. $\blacksquare$
\end{proof}
Algorithm $1$ provides the details. Recall the definition of graphs $G_{{\cal Q}_{k}}=\left({\cal Q}_{k},{\cal E}_{{\cal Q}_{k}}\right)$ used in Theorem \ref{thm:MainTh0}. The number of iterations of the algorithm is equal to the number of sets ${\cal Q}_{k}$, i.e., $\lceil{K^{*}/2}\rceil$. Since each of the level sets contains at least 2 nodes, it holds $K^{*}\leq N/2$, and hence the number of iterations is at most $\lceil{N/4}\rceil$.
In Step \ref{enu:solve_max_min}, $l_{k}^{*}$ is computed as the optimal value of optimization problem (1.1.), as ensured by Lemma \ref{lem:MaxMin}.
The latter optimization problem can be transformed to a linear programming problem and hence can be solved in polynomial time. Note that the dimensionality
of the problem is reduced at each iteration. If the conditions in Steps \ref{enu:if--stop} and \ref{enu:is-stop1} are satisfied, then the lex-optimal allocation
has been determined on all links and the algorithm terminates.
The implementation and polynomial complexity of Step \ref{enu:Find_L1} will be discussed shortly. This step determines the set ${\cal L}_{k}^{*}$,
and hence ${\cal L}_{K^{*}-k+1}^{*}={\cal N}\left({\cal L}_{k}^{*}\right)$ and $l_{K^{*}-k+1}^{*}=1/l_{k}^{*}$. Also, at the exit from this step,
the allocated resources of all outgoing links from nodes in ${\cal L}_{K^{*}}^{*}$ to nodes in ${\cal N}-\left({\cal L}_{K^{*}}^{*}\cup{\cal L}_{1}^{*}\right)$,
will be zero and the allocated resources of all outgoing links from nodes in ${\cal L}_{K^{*}}^{*}$ to nodes in ${\cal L}_{1}^{*}$ will be determined.
\begin{algorithm}
\nl $k\leftarrow1$; \\%
\nl \While{1}{
\nl \label{enu:solve_max_min} Find $\bm{\hat{r}}$ and $\hat{\bm{d}}$ solving:
$\underset{\bm{d}\in\mathbb{D}_{{\cal Q}_{k}}}{{\rm maximize}}\min_{j\in{\cal N}_{{\cal Q}_{k}}}\frac{r_{j}}{D_{j}};$ (1.1) \\% \label{eq:maxminopt} \\%
\nl Set $l_{{\cal Q}_{k},1}^{*}$ to the value of the solution to (1.1.) \\% (\ref{eq:maxminopt}) \\%
\nl \label{enu:if--stop}\textbf{If} $\left(\hat{r}_{i}/D_{i}=1\ i\in{\cal Q}_{k}\right)\,$ \textbf{then} $\,K^{*}=k$; \uline{Exit}; \\%
\nl \label{enu:Find_L1}Find the set ${\cal L}_{{\cal Q}_{k},1}^{*}$; \\
\nl Determine set ${\cal L}_{{\cal Q}_{k},2}^{*}={\cal N}\left({\cal L}_{{\cal Q}_{k},1}^{*}\right)$
and level value $l_{{\cal Q}_{k},2}^{*}=1/l_{{\cal Q}_{k},1}^{*}$ \\%
\nl \label{enu:ZeroAlloc} $d_{ji}\leftarrow 0$, $\forall$ $(j,i)$ $i\in{\cal L}_{{\cal Q}_{k},2}^{*}$, $j\in{\cal Q}_{{\cal Q}_{k}}$ - $\left({\cal L}_{{\cal Q}_{k},1}^{*}\cup{\cal L}_{{\cal Q}_{k},2}^{*}\right)$; \\%
\nl \label{enu:FindRates} Find lex-optimal allocations on links $\left(i,j\right),\ i\in{\cal L}_{{\cal Q}_{k},1}^{*},\ j\in{\cal L}_{{\cal Q}_{k},2}^{*}$; \\%
\nl \label{enu:is-stop1} \textbf{If} $\left({\cal L}_{{\cal Q}_{k},1}^{*}\cup{\cal L}_{{\cal Q}_{k},2}^{*}={\cal Q}_{k}\right)\,$ \textbf{then} {$\,K^{*}=k;$ \uline{Exit};}\\%
\nl $k\leftarrow k+1$; }
\caption{Finding the Lex-optimal allocation}
\end{algorithm}
Step \ref{enu:ZeroAlloc} sets to zero all allocations of incoming links from nodes in ${\cal Q}_{k}-\left({\cal L}_{{\cal Q}_{k},1}^{*}\cup{\cal L}_{{\cal Q}_{k},2}^{*}\right)$
to nodes in ${\cal L}_{{\cal Q}_{k},2}^{*}$, as is required by Theorem \ref{thm:MainTh0}. Step \ref{enu:FindRates} determines allocations $d_{ij}^{*},\ i\in{\cal L}_{k}^{*},\ j\in{\cal L}_{K^{*}-k+1}^{*}$. Since it is known by Theorem \ref{thm:MainTh0} that
\[
\sum_{j\in{\cal N}_{i}}d_{ji}^{*}=1/l_{k}^{*},\ i\in{\cal L}_{K^{*}-k+1}^{*},
\]
this steps is equivalent to finding a feasible solution to a linear programming problem and hence it takes polynomial time to execute.
\begin{algorithm}
\nl ${\cal L}=\widehat{{\cal L}}_{1}$; $r_{ij}=\widehat{r}_{ij}$; /{*} \emph{on exit ${\cal L}={\cal L}_{1}^{*}*/$} \\%
\nl \label{enu:While1}\While{$\exists\,(i,j)$ where $i,j\in{\cal L}$
\textbf{and $d_{ij_{1}}>0$ }for some $j_{1}\in{\cal N}\left({\cal L}\right)$}{
\nl \label{enu:Reallocate-rate}Reallocate resource from link $(i,j_{1})$ to link $(i,j)$ ensuring that with the new allocation $\min\{r_{j}/D_{i},\ r_{j_{1}}/D_{i}\}>l_{1}^{*}$; \\%
\nl Set ${\cal L}\leftarrow{\cal L}-\{j\}$; } /{*}\emph{on exit the set ${\cal L}$ is independent}{*}/ \\%\\%
\nl \label{enu:While2} \While{$\exists\,(i,j_{1})$, $i\in{\cal N}\left({\cal L}\right),\ j_{1}\in{\cal N}-{\cal L}$ with $r_{ij_{1}}>0,$}{
\nl Reallocate resource from node $j_{1}$ to a node $j$ in ${\cal N}\left(i\right)$ ensuring that with the new allocation $\min\{r_{j}/D_{i},\ r_{j_{1}}/D_{i}\}>l_{1}^{*}$; \\%
\nl Set ${\cal L}\leftarrow{\cal L}-\{j\}$; \emph{/{*} on exit, set ${\cal L}$ satisfies (\ref{eq:AllFromD}) {*}/}
\caption{Finding the set $\mathcal{L}_{1}^{*}$}}
\end{algorithm}
It remains to show that Step \ref{enu:Find_L1} has polynomial complexity. According to Lemma \ref{lem:MaxMin}, the solution
to (\ref{eq:maxmmin}) determines $l_{1}^{*}<1$ and in general provides a solution $\widehat{{\cal L}}_{1}$ which is a superset of ${\cal L}_{1}^{*}$.
Furthermore, if by reallocating some of the link resources $\widehat{d}_{ij}$ we are able to create an allocation $\bar{\bm{d}}$ such that (i) the
set $\bar{{\cal L}_{1}}$ is independent, and (ii) the relation $\sum_{i\in\bar{{\cal L}}_{1}}\bar{r}_{i}=\sum_{i\in{\cal N}\left(\bar{{\cal L}}_{1}\right)}D_{i}$ holds,
then it will be ${\cal L}_{1}^{*}=\bar{{\cal L}}_{1}$.
The resource reallocation is described in Algorithm 2. There are two iteration loops. First, starting from the set ${\cal L}=\widehat{{\cal L}}_{1}$, if there
is a link $(i,j)$ such that $i,j\in{\cal L}$ then we select a node $j_{1}\in{\cal N}\left({\cal L}\right)$, with $d_{ij_{1}}>0$, and transfer resource from link $(i,j_{1})$
to the link $(i,j)$. This selection is always possible since otherwise the condition ``for all links $(i,j)$ such that $i,j\in{\cal L}$ there is no node
$j_{1}\in{\cal N}\left({\cal L}\right)$, with $d_{ij_{1}}>0$'' would hold; however, this implies that $l_{1}^{*}=1$ which is excluded because at this point
we have $K^{*}\geq2$. The transfer of resource from$(i,j_{1})$ to link $(i,j)$ in Step \ref{enu:Reallocate-rate} of the algorithm ensures that the received resource
ratios of nodes $j$ and $j_{1}$ are larger than $l_{1}^{*}$ and hence $j$ necessarily does not belong to ${\cal L}_{1}^{*}.$ Hence on exit from the while loop
in Step \ref{enu:While1} the set ${\cal L}$ is independent. However, in order to ensure equality to ${\cal L}_{1}^{*}$ we may need to further modify ${\cal L}$
to ensure that the condition (\ref{eq:AllFromD}) holds. This is done in the second while loop that starts at Step \ref{enu:While2}. Also, at the exit from the
algorithm, as a result of this reallocation process, the allocated resources of all outgoing links from nodes
in ${\cal L}_{K^{*}}^{*}$ to nodes in ${\cal N}-\left({\cal L}_{K^{*}}^{*}\cup{\cal L}_{1}^{*}\right)$ will be zero and, the allocated resources of all
outgoing links from nodes in ${\cal L}_{K^{*}}^{*}$ to nodes in ${\cal L}_{1}^{*}$ will be determined. As is clear from the above description, Algorithm
$1$ and $2$ take polynomial time to execute.
\section{Numerical Examples}\label{sec:Numerical-Results}
In this section, we analyze representative numerical examples to shed light on the above results. Consider first the network of Fig. $\ref{fig:1st-example-6nodes}$ which has $6$ nodes. Solid lines represent the physical connections of each node and dotted arrows indicate resource allocation. Next to each node $i$ we depict its resource endowment. At the lex-optimal point, we have $K^{*}=3$ levels with 3 node sets $\mathcal{L}_{1}^{*}={\{1,6\}},\,\mathcal{L}_{2}^{*}={\{3,4\}},\;\mathcal{L}_{3}^{*}={\{2,5\}}$ which are marked with different colors.
\begin{figure}
\caption{\small{A network with 6 nodes that create 2 groups, each one marked with the dotted-line rectangle. There are 3 different levels of exchange ratios. The color of each node is analogous to its exchange ratio value (increasing from white to black colour). The received resources are $r_{1}
\label{fig:1st-example-6nodes}
\end{figure}
Let us now verify the properties that the lex-optimal allocation should have according to Theorem $\ref{thm:MainTh0}$. First, notice that set $\mathcal{L}_{1}^{*}$ is independent in graph $G$. Moreover, all the neighbors of nodes in set $\mathcal{L}_{3}^{*}$, i.e., nodes $2$ and $5$, belong in $\mathcal{L}_{1}^{*}$. Although nodes in $\mathcal{L}_{3}^{*}$ are physically connected, they only allocate resource to nodes in $\mathcal{L}_{1}^{*}$ and it holds $\sum_{i\in\mathcal{L}_{3}^{*}}D_{i}=\sum_{i\in l_{1}}r_{i}=20+30$. Moreover, the highest and the lowest levels satisfy the condition $l_{1}^{*}l_{3}^{*}=1$.
Similarly, we can verify that the structure of the lex-optimal allocation satisfies Corollary $\ref{thm:MainLex}$. The nodes are partitioned into $2$ disjoint groups $\mathcal{M}_{1}^{*}=\mathcal{L}_{1}^{*}\cup\mathcal{L}_{3}^{*}$ and $\mathcal{M}_{2}^{*}=\mathcal{L}_{2}^{*}$, each one containing nodes with at most two levels. Also, the nodes in $\mathcal{L}_{1}^{*}$ are connected only to nodes in the set $\mathcal{L}_{3}^{*}$, and it is $l_{2}^{*}=1$. Finally, the conditions of Theorem $\ref{lem:reccip1}$ are satisfied. For example, node $2$ allocates resource only to node $1$, with $l_{k(1)}=1/l_{k(2)}$, and not to node $4$ since it is $l_{k(4)}=1>0.5=l_{k(\mathcal{D}_{2})}$, where $l_{k(\mathcal{D}_{2})}=l_{k(1)}$.
For the example of Fig. $\ref{fig:3rd-example-13nodes}$ we used a network with $13$ nodes that yields $K^{*}=6$ levels, with $l_{1}^{*}=0.25$, $l_{2}^{*}=0.43$, $l_{3}^{*}=0.77$, $l_{4}^{*}=2.34$, $l_{5}^{*}=1.3$, and $l_{6}^{*}=4$. The sets are $\mathcal{L}_{1}^{*}=\{12,\,13\}$, $\mathcal{L}_{2}^{*}=\{4,\,6,\,8,\,10\}$, $\mathcal{L}_{3}^{*}=\{2\}$, $\mathcal{L}_{4}^{*}=\{1\}$, $\mathcal{L}_{5}^{*}=\{3,\,5,\,7,\,9\}$, and $\mathcal{L}_{6}^{*}=\{11\}$. Sets $\mathcal{L}_{1}^{*}$, $\mathcal{L}_{2}^{*}$, and $\mathcal{L}_{3}^{*}$ are independent in graphs $G_{\mathcal{Q}_{1}},$ $G_{\mathcal{Q}_{2}}$, $G_{\mathcal{Q}_{3}}$, and the set $\mathcal{L}_{1}^{*}\cup\mathcal{L}_{2}^{*}\cup\mathcal{L}_{3}^{*}$ is independent in $G$. Moreover, it is $\mathcal{L}_{6}^{*}=\mathcal{N}_{\mathcal{Q}_{1}}(\mathcal{L}_{1}^{*})$, $\mathcal{L}_{5}^{*}=\mathcal{N}_{\mathcal{Q}_{2}}(\mathcal{L}_{2}^{*})$ and $\mathcal{L}_{4}^{*}=\mathcal{N}_{\mathcal{Q}_{3}}(\mathcal{L}_{3}^{*})$, and holds $l_{6}^{*}l_{1}^{*}=l_{5}^{*}l_{2}^{*}=l_{4}^{*}l_{3}^{*}=1$. In this example we have 3 disjoint groups $\mathcal{M}_{1}^{*}=\mathcal{L}_{1}^{*}\cup\mathcal{L}_{6}^{*}$, $\mathcal{M}_{2}^{*}=\mathcal{L}_{2}^{*}\cup\mathcal{L}_{5}^{*}$, and $\mathcal{M}_{3}^{*}=\mathcal{L}_{3}^{*}\cup\mathcal{L}_{4}^{*}$. We see that links $(10,11)$, $(5,11)$, $(1,3)$, $(1,5)$ and $(2,7)$ are redundant and can be removed without affecting the lex-optimal allocation.
\begin{figure}
\caption{\small{A network with $13$ nodes which create $3$ groups. Received resources are $r_{1}
\label{fig:3rd-example-13nodes}
\end{figure}
\begin{figure}
\caption{\small{A complete graph of $6$ nodes with $1$ coalition and $2$ levels.}
\label{fig:complete-graph-6nodes}
\end{figure}
Figure $\ref{fig:complete-graph-6nodes}$ (right) depicts a complete graph with $6$ nodes, where node $i=4$ has level $l_{1}^{*}=0.988$ while the other nodes have level $l_{2}^{*}=1.012$. In general for complete graphs, from Property 6 of Corollary $\ref{thm:MainLex}$ and the fact that independent sets in such graphs contain only one node, it follows that lex-optimal allocations may have at most two levels. Moreover a complete graph has two levels iff the resource of node $i_{0}$ with the maximum endowment is larger than the sum of the resources of the rest of the nodes, and it is $\mathcal{L}_{1}=\{i_{0}\}$. On the other hand, for the respective $6$-node ring graph, the lex-optimal solution yields $2$ groups and $4$ levels.
\begin{figure}
\caption{\small{Convergence results for the dynamic and asynchronous best response strategy of nodes, for networks in Fig. \ref{fig:1st-example-6nodes}
\label{figure:convergence}
\end{figure}
Finally, we show that the naive best response strategy of the nodes in any graph-constrained dynamic resource exchange market, converges to a steady state point. Moreover, the latter coincides with the lex-optimal point of a static market in which every node has an average resource that is equal to the respective token generation rate of the dynamic market, i.e., $\lambda_i=D_i,\,\forall\,i\in\mathcal{N}$. In Figure \ref{figure:convergence} we present the quite fast convergence (each slot corresponds to the creation of a service opportunity) of this scheme for the above four networks, where we see that the system converges to the expected ratio values.
\section{Related Works}\label{sec:Related}
The model we consider is generic and representative for many communication or economic networks. For example, such models arise in graphical economies \cite{KearnsGraphEcon2004}, \cite{KearnsEconSocial2004}, which extend the classical Walrasian equilibrium \cite{ColellWhinstonGreenBook1995} and Arrow - Debreu analysis \cite{arrow-debreu} by imposing graph constraints on the subsets of buyers and sellers that can trade. However, our model does not presume any type of money transfers, i.e., there is no budget constraints (as in typical exchange economies) and the nodes do not value money (as in market games) \cite{osborne}. \rev{Similar bartering models have been studied for housing markets \cite{shapley-scarf} or timeshare exchanges \cite{krishna06}, where the focus has been to prove existence of equilibriums.}
Here, we fully characterize the equilibriums, relate them to the max-min fair solution, and study how they are affected by the graph. We also prove that these exchange equilibriums lie within the core of the respective NTU game. Although this relation is known for market games and the respective coalitional games \cite{osborne}, to the best of our knowledge this is the first result for NTU coalitional graph-constrained games without money. This property is also related to \emph{strong} Nash equilibriums (see \cite{strong-nash} and references therein), for which however there are no general existence results, nor they are appropriate for this competitive framework. Finally, \cite{Herings2000}, and \cite{JacksonWolinsky1996} studied also core solutions of coalitional graph games where the nodes are allowed to create new or severe existing connections. In our model the graph is exogenously given, e.g., based on the location of the nodes.
The problem of enabling cooperation in networks (or, networked systems) is of paramount importance and has been considered in different contexts, such as routing in ad hoc networks \cite{hubaux-coop}, WiFi sharing models \cite{efstathiou}, mesh networks \cite{wu-mesh}, and P2P overlays \cite{RJohariToNBilateral2011}. This is a problem that gains increasing interest in communication networks \cite{Sofia-UPN}, \cite{bewifi}, \cite{FON}, \cite{confine}, and in social and economic networks as well \cite{collaborativeconsumption}, \cite{KearnsGraphEcon2004}, \cite{JacksonWolinsky1996}. Unlike previous works, our model does not presume any kind of infrastructure, e.g., for transaction or reputation systems. Instead, we show numerically that asynchronous best response algorithms, with no information about the graph and resource endowments, converge to a fair and robust (i.e., in the core) exchange equilibrium.
Previous works e.g., \cite{zhang-proportional} have studied similar mechanisms for P2P file sharing systems, without however characterizing its properties and relation to competitive and coalitional equilibriums. The max-min fair criterion is natural for this setting, as it is defined with respect to each user's contribution. Also, while our model is similar to previous works, e.g., see \cite{RJohariToNBilateral2011} and references therein, our analysis provides novel insights for the structure and properties of the resulting equilibriums, and we also propose polynomial-time algorithms for their calculations. These algorithms can be also used for deriving the competitive equilibriums in graphical economies \cite{KearnsEconSocial2004}, \cite{KearnsGraphEcon2004}.
\section{Discussion and Conclusions}\label{sec:Conclusions}
We considered a service (or, resource) exchange model among self-interested nodes embedded in a graph that prescribes their possible interactions. This is a key network model that represents Internet sharing communities \cite{FON}, \cite{Sofia-UPN}, \cite{bewifi}, \cite{confine}, P2P file sharing \cite{RJohariToNBilateral2011}, \cite{ioannidis-peer-assisted}, energy sharing networks \cite{saad-smart}, graphical economies \cite{KearnsGraphEcon2004}, \cite{KearnsEconSocial2004} and many online resource sharing platforms \cite{collaborativeconsumption}, \cite{neighborgood}, \cite{adalbdal}, \cite{swapit}, \cite{homeexchange}. \rev{Such systems can be dynamic where the users share their resource surpluses that they have in different (and diverse) time instances, or static where users having different resource preferences barter with each other so as to acquire the resources they value higher}. Despite the large interest of the research community and the previous contributions for specific related models (e.g., for P2P overlays), the fundamental properties of these systems remain unexplored.
We showed that the max-min fair policy exhibits a very rich structure, and characterized its properties for any given graph and node resource endowments. More importantly, we proved that this policy coincides with the exchange equilibrium of the respective competitive game, and lies in the core of the respective NTU coalitional game. This important result reveals that there is a unifying approach that solves the resource allocation problem for graph-constrained systems (or, economies), for different node behaviors. In other words, we can apply the max-min fair criterion, that has been extensively used for load balancing in centralized communication networks (e.g., see \cite{nace-tutorial} and references therein), to service exchange models with autonomous and selfish nodes.
Finally, our findings contribute to the game theoretic literature since the connection between the competitive equilibrium for this graph-constrained model and the core of the respective NTU coalitional game is a new finding. We also proved the more strict \emph{strong stability} property. A special aspect of our model is that we do not consider side payments (money), not even in the form of budget constraints. This renders the analysis significantly different than most of the previous models \cite{osborne}, \cite{collaborativeconsumption}, \cite{zhang-proportional}, yet very appropriate for the considered problem. We believe that these results open many fascinating directions for future work. Among them, it is important to relax the common assumption of large demand that exceeds resource availability for the users (considered also in \cite{RJohariToNBilateral2011}, \cite{zhang-proportional}, \cite{saad-smart}, \cite{KearnsGraphEcon2004}, \cite{KearnsEconSocial2004}, \cite{myerson-gametheory-book}, \cite{efstathiou}), and provide a formal proof for the convergence of the dynamic asynchronous user interaction model.
\section*{Appendix}
We provide the additional proofs for the theorems and the lemmas.
\subsection*{Proof of Theorem \ref{lem:reccip1}}
We begin with \textbf{(i)}. Let $\bar{\boldsymbol{d}}$ be a lex-optimal allocation. Starting from $\bar{\boldsymbol{d}}$ we will construct $\boldsymbol{d}$ by a sequence of link resource reallocations. Notice that for any allocation $\boldsymbol{\hat{d}}$, it holds:
\begin{equation}
\hat{r}_{i} =\hat{l}_{\hat{k}(i)}D_{i}=\sum_{j\in{\cal N}_{i}}\left(\hat{l}_{\hat{k}(i)}\hat{d}_{ij}\right)=\sum_{j\in{\cal N}_{i}}\hat{d}_{ji}
\end{equation}
where the first equality is by definition of $\hat{l}_{\hat{k}(i)}$, the second by (\ref{eq:Allloc}), and the last by definition of $r_{i}$. Hence under any allocation $\hat{\boldsymbol{d}}$ we have the following fact.
\begin{align}
&\hat{l}_{\hat{k}(i)}\hat{d}_{ij_{1}}>\hat{d}_{j_{1}i}\mbox{ for some }j_{1}\in{\cal N}_{i},\nonumber \\
&\mbox{ iff }\hat{l}_{k(i)}\hat{d}_{ij_{2}}<\hat{d}_{j_{2}i}\mbox{ for another }j_{2}\in{\cal N}_{i}.\label{eq:iff}
\end{align}
Let ${\cal Y}_{0}$ be the set of links $\left(i,j\right)$ for which it holds $\,\bar{l}_{\bar{k}(i)}\bar{d}_{ij}>\bar{d}_{ji}$. If ${\cal Y}_{0}$
is nonempty, we will show that we can reallocate resource so that under the new allocaton $\boldsymbol{d}_{1}$, the resources the nodes receive remain the same, while for at least one of the links $(i,j)$ in ${\cal Y}_{0}$ it holds, $\bar{l}_{\bar{k}(i)}\bar{d}_{ij}=\bar{d}_{ji}$ while the rest of the inequalities still hold in their original direction. Hence the new allocation $\boldsymbol{d}_{1}$ is also lex-optimal, while ${\cal Y}_{1}={\cal Y}_{0}-\{\left(i,j\right)\}$. Proceeding in this manner we will arrive at an allocation $\boldsymbol{d}$ which is still lex-optimal but for which ${\cal Y}=\emptyset$. Based on (\ref{eq:iff}) we will then conclude that the last allocation satisfies
\begin{equation}
d_{ji}^{*}/d_{ij}^{*}=r_{i}^{*}/D_i=l_{k(i)}^{*},\, j\in \mathcal{N}_{i},
\end{equation}
as stated in \textbf{(i)}. Also, let $(i_{1},i_{0})$ be a link such that $\bar{l}_{\bar{k}(i_{1})}\bar{d}_{i_{1}i_{0}}>\bar{d}_{i_{0}i_{1}}\geq0$ (hence $\bar{l}_{\bar{k}(i_{1})}>0$). Then according to (\ref{eq:iff}) there must be a link $(i_{1},i_{2})$ such that $\bar{l}_{\bar{k}(i_{1})}\bar{d}_{i_{1}i_{2}}<\bar{d}_{i_{2}i_{1}}$ or
\[
\frac{1}{\bar{l}_{\bar{k}(i_{1})}}\bar{d}_{i_{2}i_{1}}>\bar{d}_{i_{1}i_{2}}\geq0.
\]
But due to lex optimality of $\boldsymbol{\bar{d}}$, we conclude from Corollary \ref{thm:MainLex} \textcolor{black}{Property \ref{enu:CorItem5}} that $\bar{l}_{\bar{k}(i_{2})}=\left(1/\bar{l}_{\bar{k}(i_{1})}\right)$ and hence the above becomes $\bar{l}_{\bar{k}(i_{2})}\bar{d}_{i_{2}i_{1}}>\bar{d}_{i_{1}i_{2}}$. Repeating this procedure we find a sequence of links $\left(i_{m},i_{m-1}\right),\ m=1,2,..$ for which it holds:
\begin{equation}
\bar{l}_{\bar{k}(i_{m})}\bar{d}_{i_{m}i_{m-1}}>\bar{d}_{i_{m-1}i_{m}}.\label{eq:cycle}
\end{equation}
Since the number of nodes is finite, we will eventually find a simple
(no repeated nodes) cycle that satisfies (\ref{eq:cycle}). For all
nodes $m=1,2,..M$ on this cycle, subtract resource $\delta$ from $\bar{d}_{i_{m}i_{m-1}}$
and increase by $\delta$ the resource $\bar{d}_{i_{m}i_{m+1}}.$ In addition,
we require that the following relation must be satisfied for all nodes
$m$ on the cycle,
\begin{equation}
\bar{l}_{i_{m}}(\bar{d}_{i_{m}i_{m-1}}-\delta)\geq\bar{d}_{i_{m-1}i_{m}}+\delta,\ m=1,2,...,M,\,\text{or}
\end{equation}
\[
0<\delta\leq\frac{\bar{l}_{i_{m}}\bar{d}_{i_{m}i_{m-1}}-\bar{d}_{i_{m-1}i_{m}}}{\bar{l}_{i_{m}}+1},\ m=1,2,...,M
\]
This choice of $\delta$ ensures that the increase-decrease of resource
allocation gives a new allocation and that with the new allocation
inequalities (\ref{eq:cycle}) either still hold in their original
direction or become equalities Since we have a cycle, this increase-decrease
does not alter the resource the nodes of the cycle get. Since the resources
of the rest of the nodes in the network are not changed, the resulting
allocation is still lex-optimal. We now pick
\[
\delta_{1}=\max_{m}\left\{ \frac{\bar{l}_{i_{m}}\bar{d}_{i_{m}i_{m-1}}-\bar{d}_{i_{m-1}i_{m}}}{\bar{l}_{i_{m}}+1}\right\} >0.
\]
This choice ensures that at least one of the inequalities (\ref{eq:cycle})
become equality for some node in the cycle as desired.
Next we prove \textbf{(ii)}. First, we need a useful result: if an allocation $\bm{d}$ satisfies $d_{ji}^{*}=d_{ij}^{*}l_{k(i)}^{*},\, j\in \mathcal{N}_{i}$, then for any node $j\in\mathcal{D}_i$ it holds:
\begin{equation}
l_{k(j)}>0,\,\,\text{and}\,\,l_{k(j)}=1/l_{k(i)} \label{eq:itemreceip22}
\end{equation}
To see this, note that since $j\in{\cal D}_{i},$ by definition $d_{ij}>0$, hence $l_{k(j)}=\left(r_{j}/D_{j}\right)\geq\left(d_{ij}/D_{j}\right)>0$ and since by assumption
\begin{equation}
d_{ij}=l_{k(j)}d_{ji},\label{eq:help1}
\end{equation}
it also holds $d_{ji}>0$. Next, since by assumption
\begin{equation}
d_{ji}=l_{k(i)}d_{ij},\label{eq:help2}
\end{equation}
multiplying (\ref{eq:help1}) (\ref{eq:help2}) and canceling (the nonzero) terms we have $l_{k_{i}}l_{k_{j}}=1.$ Therefore (\ref{eq:help1}) holds.
Now, notice first that any node $i$ gives resource to at least one node $j$, hence $d_{ij}>0$. Therefore, it follows from eq. (\ref{eq:itemreceip22}) that $l_{1}>0$. If under allocation $\boldsymbol{d}$ there is only one level, i.e., $K=1$, then by Proposition \ref{prop:LexK1}, it is lex-optimal. Hence we concentrate on the case $K\ge2$. We will show that allocation $\boldsymbol{d}$ satisfies the properties of Theorem \ref{thm:MainTh0} and hence, by Theorem \ref{thm:MainLexSuff}, it is lex-optimal.
Consider first $k=1$. The nodes in ${\cal L}_{1}$ constitute an independent set. To see this note that $d_{ij}=0$ for all links $(i,j)$ with $i,j\in{\cal L}_{1}$ since otherwise (i.e., $d_{ij}>0$) by eq. (\ref{eq:itemreceip22}), and the fact that $l_{k(1)}=l_{k(2)}=l_{1}$ it will follow that $l_{1}=1$, which contradicts Lemma \ref{lem:levels}.
Hence all nodes in ${\cal L}_{1}$ give their resource to nodes in higher layers. Using again eq. (\ref{eq:itemreceip22}), we conclude that all nodes in ${\cal L}_{1}$ give resource to nodes at level with value $1/l_{1}$. Now, if there is a link $(i,j)\in{\cal E}$ with $i,j\in{\cal L}_{1}$ then since $d_{ij}=0$, the condition in \textbf{(i)}, i.e., "the neighbors not receiving resource from $i$ have higher exchange ratio" (which holds according to Lemma \ref{lem:neighbor}), implies that $l_{k(j)}\geq1/l_{1}$ and since $l_{k(j)}=l_{1},$ we conclude $l_{1}\geq1$, a contradiction. Hence Item \ref{enu:MainTh0Item1} of Theorem \ref{thm:MainTh0} holds for $k=1.$
Consider now the nodes in ${\cal N}\left({\cal L}_{1}\right)$. Nodes in ${\cal N}\left({\cal L}_{1}\right)$ give resource only to nodes in ${\cal L}_{1}$. To see this, note that if node $j\in{\cal N}\left({\cal L}_{1}\right)$ were giving resource to a node $i\notin{\cal L}_{1}$, then, since there are neighbors of $j$ in ${\cal L}_{1}$ by a similar reasoning ("the neighbors not receiving resource from $i$ have higher exchange ratio", Lemma \ref{lem:neighbor}), it would hold $l_{1}\geq l_{k(i)}$, i.e., $l_{1}=l_{k(i)}$, which contradicts the fact that $i\notin{\cal L}_{1}$. Since all nodes in ${\cal N}\left({\cal L}_{1}\right)$ give resource to nodes in ${\cal L}_{1}$ it follows from (\ref{eq:itemreceip22}) that all nodes in ${\cal N}\left({\cal L}_{1}\right)$ are at the same level and $l_{k\left({\cal N}\left({\cal L}_{1}\right)\right)}=1/l_{1}$.
We claim now that for any node $i\in{\cal N}-{\cal N}\left({\cal L}_{1}\right)$ it holds, $l_{k(i)}<l_{k\left({\cal N}\left({\cal L}_{1}\right)\right)}$
which implies that ${\cal L}_{K}={\cal N}\left({\cal L}_{1}\right)$ and hence Item \ref{enu:MainTh0Item2} of Theorem \ref{thm:MainTh0} holds for $k=1.$ Indeed, assume that $l_{k(i)}\geq l_{k\left({\cal N}\left({\cal L}_{1}\right)\right)}=1/l_{1}$. Then since node $i$ gives resource to at least another neighbor node $j$, by (\ref{eq:itemreceip22}) we would have $l_{k(j)}=\left(1/l_{k(i)}\right)\leq l_{1}$ hence $l_{k(j)}=l_{1}$, i.e., $j\in{\cal L}_{1}$ which contradicts the fact that $i\in{\cal N}-{\cal N}\left({\cal L}_{1}\right)$.
The fact that Item \ref{enu:MainTh0Item2.1} of Theorem \ref{thm:MainTh0} holds for $k=1,$ follows again from (\ref{eq:itemreceip22}). Also, Item \ref{enu:MainTh0Item3} of Theorem \ref{thm:MainTh0} holds since as shown above, all nodes in ${\cal L}_{1}=$${\cal N}\left({\cal L}_{1}\right)$ give their resource to nodes in ${\cal L}_{1}$. If $K=2$, the lex- optimality of $\boldsymbol{d}$ follows from Theorem \ref{thm:MainLexSuff}. Consider next $K\geq3$. According to Lemma \ref{lem:InOutZero}, it holds
\[
{\rm In}\left({\cal L}_{k}\cup{\cal L}_{K-k+1}\right)={\rm Out}\left({\cal L}_{k}\cup{\cal L}_{K-k+1}\right)=0,
\]
hence the restriction of $\boldsymbol{d}$ on ${\cal Q}_{2}$, $\boldsymbol{d}_{{\cal Q}_{2}}$ constitutes an allocation on $G_{{\cal Q}_{2}}$with $K_{{\cal Q}_{2}}=K-2$ levels. Moreover, since no nodes at levels ${\cal L}_{1}$ and ${\cal L}_{K}$ are in ${\cal Q}_{2},$ we have ${\cal L}_{{\cal Q}_{2},1}={\cal L}_{2}$, and ${\cal L}_{{\cal Q}_{2},K_{{\cal Q}_{2}}}={\cal L}_{K-1}$. Also, (\ref{eq:itemreceip22}) and \textbf{(ii)} continue to hold for $\boldsymbol{d}_{{\cal Q}_{2}}$ on $G_{{\cal Q}_{2}}$.
If $K=3,$ then we have $K_{{\cal Q}_{2}}=1,$ hence Item of Theorem \ref{thm:MainTh0} holds. If $K\geq4,$ we can apply now the arguments we used for $k=1$ to show that Items \ref{enu:MainTh0Item1}-\ref{enu:MainTh0Item3} hold for $k=2.$ Proceeding iteratively we show that all properties in Theorem \ref{thm:MainTh0} hold for $\boldsymbol{d}$ and hence it is lex-optimal. $\blacksquare$
\subsection*{PROOF of Theorem \ref{thm:MainLexSuff}}
Before proving Theorem \ref{thm:MainLexSuff}, we need the following lemma.
\begin{lemma}
\label{lem:InOutZero}
If an allocation $\boldsymbol{d}$ with $K\geq2$ satisfies Properties \ref{enu:MainTh0Item1}- \ref{enu:MainTh0Item3} of Theorem \ref{thm:MainTh0}, it holds for $k=1,...,\flr{K/2}$:
\begin{equation}
{\rm In}\left({\cal L}_{k}\cup{\cal L}_{K-k+1}\right)={\rm Out}\left({\cal L}_{k}\cup{\cal L}_{K-k+1}\right)=0.\label{eq:InOutZero}
\end{equation}
\end{lemma}
\begin{proof}
Let $k=1$. Since by Property \ref{enu:MainTh0Item1} of Theorem \ref{thm:MainTh0} the set ${\cal L}_{1}$ is independent, we have $\sum_{i\in{\cal L}_{1}}r_{i}={\rm In}\left({\cal L}_{1}\right)$.
Also, since only nodes in ${\cal N}_{Q_{1}}\left({\cal L}_{1}\right)={\cal N}\left({\cal L}_{1}\right)={\cal L}_{K}$ may have links with nodes in $\left({\cal L}_{1}\cup{\cal L}_{K}\right)^{c}$, we have ${\rm Out}\left({\cal L}_{K}\right)={\rm Out}\left({\cal L}_{1}\cup{\cal L}_{K}\right)+{\rm In}\left({\cal L}_{1}\right)$, and hence:
\begin{align*}
&\sum_{i\in{\cal L}_{K}}D_{i} \geq{\rm Out}\left({\cal L}_{K}\right)={\rm Out}\left({\cal L}_{1}\cup{\cal L}_{K}\right)+{\rm In}\left({\cal L}_{1}\right)\\
& ={\rm Out}\left({\cal L}_{1}\cup{\cal L}_{K}\right)+\sum_{i\in{\cal L}_{1}}r_{i}={\rm Out}\left({\cal L}_{1}\cup{\cal L}_{K}\right)+\sum_{i\in{\cal L}_{K}}D_{i},
\end{align*}
where the last equality is due to Property \ref{enu:MainTh0Item3} of Th. \ref{thm:MainTh0}. The last equality implies that ${\rm Out}\left({\cal L}_{1}\cup{\cal L}_{K}\right)=0$ and:
\begin{equation}
{\rm Out}\left({\cal L}_{K}\right)=\sum_{i\in{\cal L}_{K}}D_{i}.\label{eq:InOut1}
\end{equation}
Next, we have for the nodes in ${\cal L}_{K}$:
\[
\sum_{i\in{\cal L}_{K}}r_{i}+{\rm Out}\left({\cal L}_{K}\right)=\sum_{{\cal L}_{K}}D_{i}+{\rm In}\left({\cal L}_{K}\right)
\]
Since by independence of ${\cal L}_{1}$ it holds:
\[
{\rm In}\left({\cal L}_{K}\right)=\sum_{i\in{\cal L}_{1}}D_{i}+{\rm In}\left({\cal L}_{1}\cup{\cal L}_{K}\right),
\]
and taking into account (\ref{eq:InOut1}), we conclude from (\ref{eq:equality}) $\sum_{i\in{\cal L}_{K}}r_{i}=\sum_{i\in{\cal L}_{1}}D_{i}+{\rm In}\left({\cal L}_{k}\cup{\cal L}_{K}\right)$,
or since $r_{i}=l_{K}D_{i},\ i\in{\cal L}_{K}$, it is:
\[
l_{K}\sum_{i\in{\cal L}_{K}}D_{i}=\sum_{i\in{\cal L}_{1}}D_{i}+{\rm In}\left({\cal L}_{k}\cup{\cal N}_{Q_{k}}\left({\cal L}_{k}\right)\right).
\]
Similarly, from the equality in Property \ref{enu:MainTh0Item3} we have $l_{1}\sum_{i\in{\cal L}_{1}}D_{i}=\sum_{i\in{\cal L}_{K}}D_{i}$. Multiplying the last two equalities and rearranging terms we get:
\begin{equation}
l_{1}l_{K}=1+\frac{{\rm In}\left({\cal L}_{1}\cup{\cal L}_{K}\right)}{\left(\sum_{i\in{\cal L}_{1}}D_{i}\right)\left(\sum_{i\in{\cal L}_{K}}r_{i}\right)}\,.
\end{equation}
But since by Item \ref{enu:MainTh0Item2.1} of Theorem \ref{thm:MainTh0} it hold $l_{1}l_{K}=1,$ we conclude that ${\rm In}\left({\cal L}_{1}\cup{\cal L}_{K}\right)=0$.
Hence, if $K\in\left\{ 2,3\right\} $ the lemma holds. Next, assume $K\geq4$ and observe that since (\ref{eq:InOutZero}) holds for $k=1$, the restriction $\boldsymbol{d}_{{\cal Q}_{2}}$ of $\boldsymbol{d}$ to ${\cal Q}_{2}$ is an allocation on ${\cal Q}_{2}$ with $K-2$ levels and by construction ${\cal L}_{{\cal Q}_{2},1}={\cal L}_{2}$, ${\cal L}_{{\cal Q}_{2},K-2}={\cal L}_{K-1}={\cal L}_{K-2+1}$. Therefore, we can repeat the arguments above for $k=2$ and inductively show that the lemma holds for $k=1,....,\flr{K^{*}/2}$. $\blacksquare$
\end{proof}
\uline{Proof of Theorem \ref{thm:MainLexSuff}}: For $k=1,$ since by Properties \ref{enu:MainTh0Item1} and \ref{enu:MainTh0Item3} of Theorem \ref{thm:MainTh0} $\bar{{\cal L}_{1}}$ is an independent set and $\sum_{i\in{\cal L}_{1}}r_{1}^{*}=\sum_{i\in{\cal N}\left({\cal L}_{1}\right)}D_{i}$,
it follows from Lemma \ref{lem:MaxMin} that ${\cal L}_{1}^{*}={\cal L}_{1}$ and $l_{1}^{*}=l_{1}$. Also, by Properties \ref{enu:MainTh0Item2}, \ref{enu:MainTh0Item2.1} of Theorem \ref{thm:MainTh0} we have $l_{K}=l_{K^{*}}^{*}$ and ${\cal L}_{K}={\cal N}\left({\cal L}_{1}\right)={\cal N}\left({\cal L}_{1}^{*}\right)={\cal L}_{K^{*}}^{*}$, where the last equality follows from Proposition \ref{lem:MainLem}. If $K=2,$ then since ${\cal N}={\cal L}_{1}\cup{\cal L}_{2}={\cal L}_{1}^{*}\cup{\cal L}_{K^{*}}^{*}$ we have necessarily $K^{*}=2$ and we conclude that $\boldsymbol{d}$ is lex-optimal. Assume now that $K=3$. From Proposition \ref{lem:MainLem} we then have ${\rm In}\left({\cal L}_{1}\cup{\cal L}_{3}\right)={\rm Out}\left({\cal L}_{2}\cup{\cal L}_{3}\right)=0$.
Hence the restriction $\boldsymbol{d}_{{\cal Q}_{2}}$ is an allocation on $G_{{\cal Q}_{2}}$ with $K_{{\cal Q}_{2}}=1.$ It follows by Proposition \ref{prop:LexK1} that $\boldsymbol{d}_{{\cal Q}_{2}}$ is lex-optimal in $G_{{\cal Q}_{2}}$ and $l_{2}=1$. This implies that any lex-optimal allocation allocation on $G_{{\cal Q}_{2}}$ has $K_{{\cal Q}_{2}}^{*}=1$ and $l_{2}^{*}=1$. We then conclude that $K^{*}=3$ and arguing as in the case $K=2$, that $\boldsymbol{d}$ is lex-optimal.
We will use induction to show the Theorem for allocations $\boldsymbol{d}$ with arbitray $K$. Assume that the theorem holds for allocation with up to $K-1,$ $K\geq4$ levels and let next $K\geq4$. By (\ref{eq:MainLemInOut0}), the vector $\boldsymbol{d}_{{\cal Q}_{2}}$ constitutes an allocation on graph $G_{{\cal Q}_{2}}.$ Since this allocation has $K-2$ levels
we can apply the inductive hypothesis to conclude that the allocation $\boldsymbol{d}_{{\cal Q}_{2}}$ is lex-optimal in $G_{{\cal Q}_{2}}$. But the same holds for the restriction $\hat{\boldsymbol{d}}_{{\cal Q}_{2}}$ to $G_{{\cal Q}_{2}}$, of any lex-optimal allocation $\hat{\boldsymbol{d}}$. By uniqueness of lex optimality we conclude that all levels $l_{k},$ level sets ${\cal L}_{k}$ and received resource $r_{i}$ of $\boldsymbol{d}_{{\cal Q}_{2}}$ for $k=2,...,K-1$ are identical to those of any lex-optimal allocation. It follows that $K=K^{*}$ and we already showed that ${\cal L}_{1}^{*}={\cal L}_{1}$, $l_{1}=l_{1}^{*}$ ${\cal L}_{K}={\cal L}_{K^{*}}^{*},$ and $l_{K}=l_{K^{*}}^{*}.$ The lex-optimality of $\boldsymbol{d}$ follows. $\blacksquare$
\section*{Proof of Theorem 4}
Corollary \ref{thm:MainLex} is a simple consequence of the properties of the lex-optimal policies and can be derived by combining Theorems \ref{thm:MainTh0} and \ref{thm:Stability}. Hence, we only need to focus on the main result of this subsection, i.e., Theorem \ref{thm:Stability}.
\begin{proof}
Based on the results of Theorem \ref{thm:MainTh0} (and using the notation of Corollary \ref{thm:MainLex}), let ${\cal M}_{k}^{*}={\cal L}_{k}^{*}\cup{\cal L}_{K-k+1}^{*},\ k=1,2,...,\flr{K^{*}/2}$ be the formed groups under $\boldsymbol{d}$; if $K^{*}$ is odd, there is also a group ${\cal M}_{\ceil{K^{*}/2}}^{*}={\cal L}_{\ceil{K^{*}/2}}^{*}$. Below it will help to denote ${\cal L}_{1,k}\triangleq{\cal L}_{k}^{*}$
and ${\cal L}_{2,k}\triangleq{\cal L}_{N-k+1}^{*},$ $1\leq k\leq\flr{K^{*}/2}$ so that ${\cal M}_{k}^{*}={\cal L}_{1,k}\cup{\cal L}_{2,k}$, $1\leq k\leq\flr{K^{*}/2}$. We also define $l_{1,k}\triangleq l_{k}^{*}$ and $l_{2,k}\triangleq l_{N-k+1}^{*},\ 1\leq k\leq\flr{K^{*}/2}.$ If $K^{*}$ is odd then define ${\cal L}_{1,\ceil{K^{*}/2}}=\emptyset$ ${\cal L}_{2,\ceil{K^{*}/2}}={\cal L}_{\ceil{K^{*}/2}}^{*}$, $l_{1,\ceil{K^{*}/2}}=l_{2,\ceil{K^{*}/2}}=l_{\ceil{K^{*}/2}}^{*}$=1.
Consider an arbitrary \emph{nonempty} set of nodes ${\cal C}$ and define ${\cal C}_{1,k}={\cal C}\cap{\cal L}_{1,k}$, ${\cal C}_{2,k}={\cal C}\cap{\cal L}_{2,k},$ $1\leq k\leq\flr{K^{*}/2}$, and in case $K^{*}$ is odd, ${\cal C}_{2,\ceil{K^{*}/2}}={\cal C}\cap{\cal L}_{\ceil{K^{*}/2}}$. Hence ${\cal C}_{1,k}\cup{\cal C}_{2,k}={\cal M}_{k}^{*}\cap{\cal C}.$ Let $\hat{\boldsymbol{d}}$ be an allocation on this set such that $\hat{r}_{i}\geq r_{i}^{*}$ for all $i\in{\cal C}$ and $\hat{r}_{j_{0}}>r_{j_{0}}^{*}$ for some $j_{0}\in{\cal C}$. Below we argue by contradiction that such set does not exist. In the case where the induced subgraph contains singletons, the results is trivial.
From Theorem \ref{thm:MainTh0} and specifically the properties of the lex-optimal allocations, we know that the nodes in the set $\mathcal{L}_{k}^{*}$, for $1\leq k\leq K^*/2$ may be connected only to nodes in sets $\mathcal{L}_{K-m+1}^{*}$, with $1\leq m\leq k$. Hence, we have the following properties
\begin{enumerate}
\item \label{enu:Prop1}Nodes in ${\cal L}_{1,k}\ 1\leq k\leq\flr{K^{*}/2}$ may be connected to nodes in ${\cal L}_{2,m},\ 1\leq m\leq k.$
\item \label{enu:Prop2}Nodes in ${\cal L}_{2,k},\ 1\leq k\leq\flr{K^{*}/2}$ may be connected to nodes in all sets ${\cal L}_{2,m},\ 1\leq m\leq\ceil{K^{*}/2}$
and to nodes in the sets ${\cal L}_{1,m},\ k\le m\leq\flr{K^{*}/2}.$
\item \label{enu:Prop3}If $K^{*}$ is odd, then nodes in ${\cal L}_{2,\ceil{K^{*}/2}}$ may be connected to nodes in all sets ${\cal L}_{2,m},\ 1\leq m\leq\ceil{K^{*}/2}$.
\end{enumerate}
Under allocation $\hat{\boldsymbol{d}}$, let $a_{(t,k)}^{(h,m)}$ be the proportion of offered resource by the nodes in ${\cal C}_{t,k}$ ( i.e., $\sum_{i\in{\cal C}_{t,k}}D_{i}$ ) to nodes in ${\cal C}_{h,m}$. From Properties \ref{enu:Prop1}) and \ref{enu:Prop2}) above we then have for any $k,$ $1\leq k\leq\flr{K^{*}/2}$
\begin{eqnarray}
\sum_{m=1}^{k}a_{(1,k)}^{(2,m)} & = & 1\label{eq:firstSum}\\
\sum_{m=1}^{\ceil{K^{*}/2}}a_{(2,k)}^{(2,m)}+\sum_{m=k}^{\flr{K^{*}/2}}a_{(2,k)}^{(1,m)} & = & 1,\label{eq:secondSum}
\end{eqnarray}
and from Property \ref{enu:Prop3}, if $K^{*}$ is odd,
\begin{equation}
\sum_{m=1}^{\ceil{K^{*}/2}}a_{(2,\ceil{K^{*}/2})}^{(2,m)}=1.\label{eq:ThirdSum}
\end{equation}
Since nodes in ${\cal C}_{1,k},\ 1\leq k\leq\flr{K^{*}/2}$ may be connected and hence get their resource from nodes in ${\cal L}_{2,m},\ 1\leq m\leq k,$ we have, for every $1\leq k\leq\flr{K^{*}/2}$:
\begin{eqnarray}
\sum_{m=1}^{k}a_{(2,m)}^{(1,k)}\sum_{i\in{\cal C}_{2,m}}D_{i} = \sum_{i\in{\cal C}_{1,k}}\hat{r}_{i}\geq l_{1,k}\sum_{i\in{\cal C}_{1,k}}D_{i},\label{eq:FirstIneq}
\end{eqnarray}
with strict inequality holding if $j_{0}\in{\cal C}_{1,k}$ for some $k,\ 1\leq k\leq\flr{K^{*}/2}.$
Similarly, since nodes in ${\cal C}_{2,k},\ 1\leq k\leq\flr{K^{*}/2}$ may be connected and hence get their resource from nodes in ${\cal L}_{2,m},\ 1\leq m\leq\ceil{K^{*}/2}$
and from nodes in the sets ${\cal L}_{1,m},\ k\le m\leq\flr{K^{*}/2}$, it holds:
\begin{eqnarray}
\sum_{m=1}^{\ceil{K^{*}/2}}a_{(2,m)}^{(2,k)}\sum_{i\in{\cal C}_{2,m}}D_{i}+\sum_{m=k}^{\flr{K^{*}/2}}a_{(1,m)}^{(2,k)}\sum_{i\in{\cal C}_{1,m}}D_{i} \nonumber \\
=\sum_{i\in{\cal C}_{2,k}}\hat{r}_{i} \geq l_{2,k}\sum_{i\in{\cal C}_{2,k}}D_{i},\ 1\leq k\leq\flr{K^{*}/2},\label{eq:SecondIneq-1}
\end{eqnarray}
with strict inequality holding if $j_{0}\in{\cal C}_{2,k}$ for some $k,\ 1\leq k\leq\flr{K^{*}/2}.$
For a given $k$, multiplying (\ref{eq:SecondIneq-1}) by $l_{1,k}$, adding (\ref{eq:FirstIneq}), and considering that $l_{1,k}l_{2,k}=l_{k}^{*}l_{N-k+1}^{*}=1$, we get:
\begin{align}
&\sum_{m=1}^{k}a_{(2,m)}^{(1,k)}\sum_{i\in{\cal C}_{2,m}}D_{i}+l_{1,k}\sum_{m=1}^{\ceil{K^{*}/2}}a_{(2,m)}^{(2,k)}\sum_{i\in{\cal C}_{2,m}}D_{i}\nonumber \\ &+l_{1,k}\sum_{m=k}^{\flr{K^{*}/2}}a_{(1,m)}^{(2,k)}\sum_{i\in{\cal C}_{1,m}}D_{i}\nonumber \\
&\geq l_{1,k}\sum_{i\in{\cal C}_{1,k}}D_{i}+\sum_{i\in{\cal C}_{2,k}}D_{i},\ k=1,...,\flr{K^{*}/2},\label{eq:BasicIneq}
\end{align}
with strict inequality holding if $j_{0}\in{\cal C}_{1,k}\cup{\cal C}_{2,k}$ for some $k,\ 1\leq k\leq\flr{K^{*}/2}.$ Adding the inequalities in (\ref{eq:BasicIneq}) we get
\begin{align}
&\sum_{k=1}^{\flr{K^{*}/2}}\sum_{m=1,}^{k}a_{(2,m)}^{(1,k)}\sum_{i\in{\cal C}_{2,m}}D_{i}\nonumber \\ &+\sum_{k=1}^{\flr{K^{*}/2}}l_{1,k}\sum_{m=1}^{\ceil{K^{*}/2}}a_{(2,m)}^{(2,k)}\sum_{i\in{\cal C}_{2,m}}D_{i} \nonumber \\
&+\sum_{k=1}^{\flr{K^{*}/2}}l_{1,k}\sum_{m=k}^{\flr{K^{*}/2}}a_{(1,m)}^{(2,k)}\sum_{i\in{\cal C}_{1,m}}D_{i}\nonumber \\
&\geq\sum_{k=1}^{\flr{K^{*}/2}}l_{1,k}\sum_{i\in{\cal C}_{1,k}}D_{i}+\sum_{k=1}^{\flr{K^{*}/2}}\sum_{i\in{\cal C}_{2,k}}D_{i},\label{eq:BasicIneq-1}
\end{align}
with strict inequality holding if $j_{0}\in\cup_{k=1}^{\flr{K^{*}/2}}\left({\cal C}_{1,k}\cup{\cal C}_{2,k}\right).$
Note now that:
\begin{align}
&\sum_{k=1}^{\flr{\frac{K^{*}}{2}}}\sum_{m=1}^{k}a_{(2,m)}^{(1,k)}\sum_{i\in{\cal C}_{2,m}}D_{i}=\nonumber \\
&=\sum_{m=1}^{\flr{\frac{K^{*}}{2}}}\big(\sum_{k=m}^{\flr{\frac{K^{*}}{2}}}a_{(2,m)}^{(1,k)}\big)\sum_{i\in{\cal C}_{2,m}}D_{i}\label{eq:interch1}
\end{align}
\begin{align}
&\sum_{k=1}^{\flr{\frac{K^{*}}{2}}}l_{1,k}\sum_{m=k}^{\flr{\frac{K^{*}}{2}}}a_{(1,m)}^{(2,k)}\sum_{i\in{\cal C}_{1,m}}D_{i}=\nonumber \\
&=\sum_{m=1}^{\flr{\frac{K^{*}}{2}}}\big(\sum_{k=1}^{m}l_{1,k}a_{(1,m)}^{(2,k)}\big)\sum_{i\in{\cal C}_{1,m}}D_{i}\label{eq:interch3}
\end{align}
where we have applied the identity
\begin{equation}
\sum_{k=1}^{K}\sum_{m=1}^{k}a_{km}=\sum_{m=1}^{K}\sum_{k=m}^{K}a_{km}
\end{equation}
Also, if $K^{*}$ is even, then since $\flr{K^{*}/2}=\ceil{K^{*}/2}$,
\begin{align}
&\sum_{k=1}^{\flr{\frac{K^{*}}{2}}}l_{1,k}\sum_{m=1}^{\ceil{K^{*}/2}}a_{(2,m)}^{(2,k)}\sum_{i\in{\cal C}_{2,m}}D_{i}= \nonumber \\
&=\sum_{m=1}^{\flr{\frac{K^{*}}{2}}}\big(\sum_{k=1}^{\flr{\frac{K^{*}}{2}}}l_{1,k}a_{(2,m)}^{(2,k)}\big)\sum_{i\in{\cal C}_{2,m}}D_{i},\label{eq:interch2}
\end{align}
while if $K^{*}$is odd,
\begin{align}
&\sum_{k=1}^{\flr{\frac{K^{*}}{2}}}l_{1,k}\sum_{m=1}^{\ceil{ \frac{K^{*}}{2} }}a_{(2,m)}^{(2,k)}\sum_{i\in{\cal C}_{2,m}}D_{i}=\sum_{m=1}^{\flr{\frac{K^{*}}{2}}}\big(\sum_{k=1}^{\flr{\frac{K^{*}}{2}}}l_{1,k}a_{(2,m)}^{(2,k)}\big)\cdot \nonumber\\ &\cdot \sum_{i\in{\cal C}_{2,m}}D_{i}+\sum_{k=1}^{\flr{\frac{K^{*}}{2}}}l_{1,k}a_{(2,\ceil{ \frac{K^{*}}{2} })}^{(2,k)}\sum_{i\in{\cal C}_{2,\ceil{\frac{K^{*}}{2}}}}D_{i} \label{eq:interch2Odd}
\end{align}
where we applied the identity
\begin{equation}
\sum_{k=1}^{K}\sum_{m=1}^{K+1}a_{km}=\sum_{m=1}^{K}\sum_{k=m}^{K}a_{km}+\sum_{k=1}^{K}a_{k,(K+1)}.
\end{equation}
Assume now that $K^{*}$ is even. Using equalities (\ref{eq:interch1}), (\ref{eq:interch3}), and (\ref{eq:interch2}) in (\ref{eq:BasicIneq-1}), we get:
\begin{align}
&\sum_{m=1}^{\flr{K^{*}/2}}\left(\sum_{k=m}^{\flr{K^{*}/2}}a_{(2,m)}^{(1,k)}+\sum_{k=1}^{\flr{K^{*}/2}}l_{1,k}a_{(2,m)}^{(2,k)}\right)\sum_{i\in{\cal C}_{2,m}}D_{i}\nonumber \\
&+\sum_{m=1}^{\flr{K^{*}/2}}\left(\sum_{k=1}^{m}l_{1,k}a_{(1,m)}^{(2,k)}\right)\sum_{i\in{\cal C}_{1,m}}D_{i} \nonumber \\
&>\sum_{k=1}^{\flr{K^{*}/2}}l_{1,k}\sum_{i\in{\cal C}_{1,k}}D_{i}+\sum_{k=1}^{\flr{K^{*}/2}}\sum_{i\in{\cal C}_{2,k}}D_{i}\label{eq:FinalIneq}
\end{align}
where the inequality is strict since now
\begin{equation}
j_{0}\in{\cal C}=\cup_{k=1}^{\flr{K^{*}/2}}\left({\cal C}_{1,k}\cup{\cal C}_{2,k}\right). \nonumber
\end{equation}
But since $l_{1,k}<1$, $\forall\, k\in[1, \flr{\frac{K^{*}}{2}}]$, we have:
\begin{align}
&\sum_{k=m}^{\flr{K^{*}/2}}a_{(2,m)}^{(1,k)}+\sum_{k=1}^{\flr{K^{*}/2}}l_{1,k}a_{(2,m)}^{(2,k)}\leq \\
&\leq \sum_{k=m}^{\flr{K^{*}/2}}a_{(2,m)}^{(1,k)}+\sum_{k=1}^{\flr{K^{*}/2}}a_{(2,m)}^{(2,k)}=1\ \ {\rm , by\ (\ref{eq:secondSum})} \nonumber
\end{align}
Taking into account that $l_{1,k}<l_{i,k'}$ if $k<k'$, we also have,
\begin{equation}
\sum_{k=1}^{m}l_{1,k}a_{(1,m)}^{(2,k)} \leq l_{1,m} \sum_{k=1}^{m}a_{(1,m)}^{(2,k)}= l_{1,m}\ \ {\rm by\ (\ref{eq:firstSum})}
\end{equation}
Hence, it holds:
\begin{align}
&\sum_{m=1}^{\flr{K^{*}/2}}\left(\sum_{k=m}^{\flr{K^{*}/2}}a_{(2,m)}^{(1,k)}+\sum_{k=1}^{\flr{K^{*}/2}}l_{1,k}\sum_{m=1}^{\flr{K^{*}/2}}a_{(2,m)}^{(2,k)}\right)\sum_{i\in{\cal C}_{2,m}}D_{i}\nonumber \\
&+\sum_{m=1}^{\flr{K^{*}/2}}\left(\sum_{k=1}^{m}l_{1,k}a_{(1,m)}^{(2,k)}\right)\sum_{i\in{\cal C}_{1,m}}D_{i} \nonumber \\
&\leq\sum_{m=1}^{\flr{K^{*}/2}}\sum_{i\in{\cal C}_{2,m}}D_{i}+\sum_{m=1}^{\flr{K^{*}/2}}l_{1,m}\sum_{i\in{\cal C}_{1,m}}D_{i}
\end{align}
which contradicts (\ref{eq:FinalIneq}).
It remains to consider the case that $K^{*}$ is odd. In this case, using equalities (\ref{eq:interch1}), (\ref{eq:interch3}), and (\ref{eq:interch2Odd}) in (\ref{eq:BasicIneq-1}), we obtain,
\begin{align}
&\sum_{m=1}^{\flr{K^{*}/2}}\left(\sum_{k=m}^{\flr{K^{*}/2}}a_{(2,m)}^{(1,k)}+\sum_{k=1}^{\flr{K^{*}/2}}l_{1,k}a_{(2,m)}^{(2,k)}\right)\sum_{i\in{\cal C}_{2,m}}D_{i} \nonumber \\
&+\sum_{m=1}^{\flr{K^{*}/2}}\left(\sum_{k=1}^{m}l_{1,k}a_{(1,m)}^{(2,k)}\right)\sum_{i\in{\cal C}_{1,m}}D_{i}\nonumber \\
&+\sum_{k=1}^{\flr{K^{*}/2}}l_{1,k}a_{(2,\ceil{K^{*}/2})}^{(2,k)}\sum_{i\in{\cal C}_{2,\ceil{K^{*}/2}}}D_{i}\nonumber \\
& \geq\sum_{k=1}^{\flr{K^{*}/2}}l_{1,k}\sum_{i\in{\cal C}_{1,k}}D_{i}+\sum_{k=1}^{\flr{K^{*}/2}}\sum_{i\in{\cal C}_{2,k}}D_{i}\label{eq:FinalIneq-1}
\end{align}
with strict inequality holding if $j_{0}\in\cup_{k=1}^{\flr{K^{*}/2}}\left({\cal C}_{1,k}\cup{\cal C}_{2,k}\right).$
Observe now that since nodes in ${\cal L}_{2,\ceil{K^{*}/2}}$ may be connected to nodes in all sets ${\cal L}_{2,m},\ 1\leq m\leq\ceil{K^{*}/2}$ we have,
\begin{align}
&\sum_{m=1}^{\ceil{K^{*}/2}}a_{(2,m)}^{(2,\ceil{K^{*}/2})}\sum_{i\in{\cal C}_{2,m}}D_{i} \nonumber \\
& =\sum_{i\in{\cal C}_{2,\ceil{K^{*}/2}}}\hat{r}_{i}\geq l_{1,\ceil{K^{*}/2}}\sum_{i\in{\cal C}_{1,\ceil{K^{*}/2}}}D_{i}\nonumber \\
& = \sum_{i\in{\cal C}_{1,\ceil{K^{*}/2}}}D_{i}\,,\mbox{ since \ensuremath{l_{1,\ceil{K^{*}/2}}=1.}}\label{eq:FinalIneq1}
\end{align}
with equality holding if $j_{0}\in{\cal C}_{1,\ceil{K^{*}/2}}.$ Adding (\ref{eq:FinalIneq-1}), (\ref{eq:FinalIneq1}):
\begin{align}
&\sum_{m=1}^{\flr{\frac{K^{*}}{2}}}\left(\sum_{k=m}^{\flr{\frac{K^{*}}{2}}}a_{(2,m)}^{(1,k)}+\sum_{k=1}^{\flr{\frac{K^{*}}{2}}}l_{1,k}a_{(2,m)}^{(2,k)}+a_{(2,m)}^{(1,\ceil{\frac{K^{*}}{2}})}\right)\sum_{i\in{\cal C}_{2,m}}D_{i} \nonumber \\
&+\sum_{m=1}^{\flr{\frac{K^{*}}{2}}}\left(\sum_{k=1}^{m}l_{1,k}a_{(1,m)}^{(2,k)}\right)\sum_{i\in{\cal C}_{1,m}}D_{i}+\sum_{k=1}^{\flr{\frac{K^{*}}{2}}}l_{1,k}a_{(2,\ceil{\frac{K^{*}}{2}})}^{(2,k)}\cdot \nonumber \\
&\cdot \sum_{i\in{\cal C}_{2,\ceil{\frac{K^{*}}{2}}}}D_{i}>\sum_{k=1}^{\flr{\frac{K^{*}}{2}}}l_{1,k}\sum_{i\in{\cal C}_{1,k}}D_{i}+\sum_{k=1}^{\ceil{\frac{K^{*}}{2}}}\sum_{i\in{\cal C}_{2,k}}D_{i}
\end{align}
where the inequality is strict since $j_{0}\in{\cal C}=\cup_{k=1}^{\ceil{K^{*}/2}}\left({\cal C}_{1,k}\cup{\cal C}_{2,k}\right)$. Using again arguments similar to the case $K^{*}$ even, we arrive again at a contradiction. $\blacksquare$
\end{proof}
\end{document} |
\betaegin{document}
\betaegin{abstract}
In this paper, we prove a converse theorem for half-integral weight modular forms assuming functional equations for $L$-series with additive twists. This result is an extension of Booker, Farmer, and Lee's result in \cite{booker2022extension} to the half-integral weight setting. Similar to their work, the main result of this paper is obtained as a consequence of the half-integral weight Petersson trace formula.
\varepsilonnd{abstract}
\title{A converse theorem in half-integral weight}
\sigmaection{Introduction}
Converse theorems provide sufficient conditions to classify Dirichlet series as $L$-functions. The first result was due to Hamburger (see \cite{hamburger1921riemannsche}) in 1921 which uniquely classified the Riemann zeta function $\zeta(s)$ by its functional equation. Just over 15 years later (see \cite{hecke1936bestimmung}), Hecke extended the work of Hamburger by determining when the coefficients $a_{n}$ of a Dirichlet series $D(s)$ are the Fourier coefficients, suitably normalized, of a cuspform $f$ on the full modular group and hence $D(s) = L(s,f)$. Like Hamburger's result, the essential analytic ingredient in Hecke's converse theorem is the functional equation for $L$-functions associated to modular forms. Later, Weil was able to extend Hecke's converse theorem to modular forms of higher level. In this case, one needs to assume functional equations for $L$-series twisted by Dirichlet characters, namely, $L(s,f \thetaimes \chi)$ (see \cite{weil1967bestimmung}). In 2002, Venkatesh's was able to prove Hecke's result using additively twisted $L$-series via Voronoi summation (see \cite{venkatesh2002limiting}). Recently, Booker, Farmer, and Lee (in \cite{booker2022extension}) extended Venkatesh's converse theorem to modular forms of arbitrary (integral) weight, level, and character. We prove an analogous result in the half-integral weight setting.
\thetaextbf{Acknowledgements}: The authors would like to thank Min Lee for suggesting the problem and for helpful conversations. The authors would also like to thank Jeff Hoffstein and Junehyuk Jung for additional conversations. The second author was supported by the NSF GRFP.
The paper is structured as follows. In the introduction, we set our notation and give the statement of the main theorem. In the second section, we quote two lemmas that we need to use for the proof of the main theorem. In the third section, we prove the main theorem. Our proof mimics that of \cite{booker2022extension}. In the appendicies, we derive the functional equation for the $L$-function of a half-integral weight modular form twisted by an additive character and a variant of a result of Hecke used for classical converse theorems.
Let $f(z)$ be a half-integral weight modular form on $\Gamma_{0}(4N)\betaackslash\mathbb{H}$ of weight $\lambda$ and character $\chi$. Under any $\gamma = \betaegin{psmallmatrix} a & b \\ c & d \varepsilonnd{psmallmatrix} \iotan \Gamma_{0}(4N)$, $f(\gamma z)$ transforms as
\[
f(\gamma z) = \chi(d)\varepsilon_{d}^{-2\lambda}\lambdaegendre{c}{d}(cz+d)^{\lambda}f(z),
\]
where $\thetalegendre{c}{d}$ is the modified Jacobi symbol as given in \cite{shimura1973modular} and $\varepsilon_{d} = 1,i$ according to whether $d \varepsilonquiv 1,3 \thetamod{4}$ respectively. The factor $\varepsilon_{d}^{-1}\thetalegendre{c}{d}(cz+d)^{\frac{1}{2}}$ is called the theta multiplier. Setting $e(nz) = e^{2\pi inz}$, $f(z)$ admits a Fourier expansion of the form
\[
f(z)=\sigmaum_{n \gammae 1}f_{n}n^{\frac{\lambdaambda-1}{2}}e(nz).
\]
For a rational $\alpha = \frac{a}{c}$, we define the complete additive twist of $f$, $\Lambda_{f}(s,\alpha)$, by
\[
\Lambda_{f}(s,\alpha) = \Gamma_{\mathbb{C}}\lambdaeft(s+\frac{\lambda-1}{2}\right)\sigmaum_{n \gammae 1}\frac{f_{n}e(n\alpha)}{n^{s}},
\]
where we define $\Gamma_{\mathbb{C}}(s) = 2(2\pi)^{-s}\Gamma(s)$. Furthermore, for any $c\iotan 4N\zetaz_{>0}$, and $a,\overline{a}\iotan\zetaz$ such that $a\overline{a}\varepsilonquiv 1\pmod{c}$, the completion will satisfy the functional equation (see \cref{append:functional_equation_additive_twist})
\[
\Lambdaambda_f\lambdaeft(s,\frac{a}{c}\right)=i^\lambdaambda\chi(\overline{a})\varepsilonpsilon_{a}^{-2\lambdaambda}\lambdaegendre{c}{a}c^{1-2s}\Lambdaambda_f\lambdaeft(1-s,-\frac{\overline{a}}{c}\right).
\]
These additively twisted functional equations are the main analytic ingredient in the following converse theorem:
\betaegin{theorem}\lambdaabel{thm:ConverseTheorem}
Let $N \gammae 1$, $\chi$ be a Dirichlet character modulo $4N$, $\{f_n\}_{n\gammaeq 1}$ be a sequence of complex numbers, $\gammaamma(s)$ be a complex function, and $\omega$ be a nonzero complex number. Given any $\alphalpha\iotan \qq$, define the complete additive twist $L$-series
\[
\Lambdaambda_f(s,\alphalpha)=\gammaamma(s)\sigmaum_{n\gammaeq 1} \frac{f_n e(n\alphalpha)}{n^s}.
\]
Suppose the following properties are satisfied:
\betaegin{enumerate}
\iotatem $\sigmaum_{n\gammaeq 1}f_n n^{-s}$ converges absolutely in the half-plane $\mathbb{R}e(s)>1$.
\iotatem $\gammaamma(s)=Q^s\prod_{j=1}^r \Gammaamma(\lambdaambda_j s+\mu_j)$ with $Q,\lambdaambda_j\iotan\rr_{>0}$ and $\mu_j\iotan\cc$ such that $\mathbb{R}e(\mu_j)>-\frac{1}{2}\lambdaambda_j$ and $\sigmaum_{j=1}^r\lambdaambda_j=1$.
\iotatem For every $c\iotan 4N\zetaz_{>0}$ and for every pair of integers $a,\overline{a}\iotan\zetaz$ such that $a\overline{a}\varepsilonquiv 1\pmod{c}$, the $L$-series $\Lambdaambda_f\lambdaeft(s,\frac{a}{c}\right)$ and $\Lambdaambda_f\lambdaeft(s,-\frac{\overline{a}}{c}\right)$ continue to entire functions of finite order and satisfy the functional equation
\betaegin{equation}\lambdaabel{equ:functional_equation_assumption}
\Lambda_{f}\lambdaeft(s,\frac{a}{c}\right) = \omega\chi(\overline{a})\varepsilon_{a}^{-\delta}\lambdaegendre{c}{a}c^{1-2s}\Lambda_{f}\lambdaeft(1-s,-\frac{\overline{a}}{c}\right),
\varepsilonnd{equation}
for some fixed odd $\delta \thetamod{4}$.
\varepsilonnd{enumerate}
Then there exists a half-integer $\lambda$ with $2\lambda \varepsilonquiv \delta \thetamod{4}$ such that if we set
\[
f(z) = \sigmaum_{n\gammaeq 1}f_nn^{\frac{\lambdaambda-1}{2}}e(nz),
\]
then $f(z) \iotan \mathcal{S}_{\lambda}(\Gamma_{0}(4N),\chi)$.
\varepsilonnd{theorem}
\sigmaection{Lemmas}
The proof of \cref{thm:ConverseTheorem} uses the same lemmas as found in \cite{booker2022extension}. We restate the lemmas below without proof. Furthermore, a proof of the first lemma can be found in \cite{booker2022extension} while a proof of the second lemma can be found in \cite{hoffstein2021first}.
\betaegin{lemma}\lambdaabel{lemma:GammaFactor}
Let $\gammaamma(s)$ be as described in \cref{thm:ConverseTheorem}, and suppose that $\gammaamma(s)$ has poles at all but finitely many nonpositive integers. Then $\gammaamma(s) = CP(s)H^s\Gamma_{\cc}(s)$, where $C,H\iotan\rr_{>0}$ and $P(s)$ is a monic polynomial whose roots are distinct nonpositive integers.
\varepsilonnd{lemma}
\betaegin{lemma}\lambdaabel{lemma:SumOfRamanujanSums}
Let $r(n;q)$ be the Ramanujan sum. Then for $n,N>0$ and $\mathbb{R}e(s)>1$, we have
\[
\sigmaum_{\sigmaubstack{q\gammaeq 1\\ 4N\mid q}}\frac{r(n;q)}{q^{2s}}=\betaegin{cases}
\frac{\sigmaigma_{1-2s}(n;4N)}{\zetaeta^{(4N)}(2s)} & n\neq 0,\\
(4N)^{1-2s}\prod_{p\mid 4N}(1-p\iotanv)\frac{\zeta(2s-1)}{\zeta^{(4N)}(2s)} & n=0,
\varepsilonnd{cases}
\]
where if $\frac{4N}{\prod_{p\mid 4N}p}\mid n$,
\[
\sigmaigma_s(n;4N)=\prod_{\sigmaubstack{p\mid n\\ p\nmid 4N}}\frac{p^{(\ord_p(n)+1)s}-1}{p^s-1}\cdot \prod_{p\mid 4N}\lambdaeft(\frac{(1-p^{s-1})p^{(\ord_p(n)+1)s}-(1-p\iotanv)p^{\ord_p(4N)s}}{p^s-1}\right),
\]
and otherwise, $\sigma_s(n;4N)=0$.
\varepsilonnd{lemma}
\sigmaection{The Main Argument}
The line of argument in proving \cref{thm:ConverseTheorem} can be reduced to a sequence of steps. First, we use the Petersson trace formula to relate the Dirichlet series $\sigmaum_{n \gammae 1}f_{n}n^{-s}$ to a sum of Rankin-Selberg convolutions $L(s,f \thetaimes \overline{g})$ where $g$ ranges over an orthonormal basis of eigenforms. Rewriting the geometric side of the trace formula using the functional equations in \cref{equ:functional_equation_assumption}, we will be able to meromorphicially continue the Rankin-Selberg convolutions to the region $\mathbb{R}e(s) > \frac{1}{2}$ with a possible pole at $s = 1$. If the weight $\lambda$ (with $2\lambda \varepsilonquiv \delta \thetamod{4}$) is not too small, taking a residue at $s = 1$ will show that each $f_{n}$ is linear combination of the $n$-th Fourier coefficients of the eigenbasis and our result follows. In the case the weight is small, we reach an obstruction as there is no pole. In this case, we rely on a variant of Hecke's converse theorem in half-integral weight to show that the $f_{n}$ are the Fourier coefficients of a modular form.
Now for the initial setup. Fix a half-integer $\lambda$ and let $\mathcal{H}_{\lambda}(\Gamma_{0}(4N),\chi)$ be an orthonormal basis for the space $\mathcal{S}_{\lambda}(\Gamma_{0}(4N),\chi)$. For $g \iotan \mathcal{H}_{\lambda}(\Gamma_{0}(4N),\chi)$, denote its Fourier expansion by
\[
g(z) = \sigmaum_{n \gammae 1}\rho_{g}(n)n^{\frac{\lambda-1}{2}}e(nz).
\]
For fixed $n,m \gammae 1$, the (half-integral weight) Petersson trace formula for weight $\lambda$ is
\[
\frac{\Gamma\lambdaeft(\lambda-1\right)}{(4\pi)^{\lambda-1}}\sigmaum_{g \iotan \mathcal{H}_{\lambda}(\Gamma_{0}(4N),\chi)}\rho_{g}(n)\overline{\rho_{g}(m)} = \delta_{n,m}+\sigmaum_{\sigmaubstack{c \gammae 1 \\ 4N \mid c}}\frac{2\pi i^{-\lambda}}{c}J_{\lambda-1}\lambdaeft(\frac{4\pi\sigmaqrt{nm}}{c}\right)S_{\chi,\lambda}(m,n;c),
\]
where $S_{\chi,\lambda}(m,n;c)$ is the Salie sum with theta multiplier:
\[
S_{\chi,\lambda}(m,n;c) = \sigmaum_{\sigmaubstack{a \thetamod{c} \\ (a,c) = 1}}\chi(a)\lambdaegendre{a}{c}\varepsilon_{c}^{2\lambda}e\lambdaeft(\frac{am+\overline{a}n}{c}\right).
\]
Multiply the spectral side by $\zeta^{(4N)}(2s)\frac{f_{m}}{m^{s}}$, sum over $m$, and define $K_{n}(s,f,\chi)$ to be the result:
\betaegin{equation}\lambdaabel{equ:K_function_spectral_side}
\betaegin{aligned}
K_{n}(s,f,\chi) &= \zeta^{(4N)}(2s)\sigmaum_{m \gammae 1}\frac{f_{m}}{m^{s}}\frac{\Gamma\lambdaeft(\lambda-1\right)}{(4\pi)^{\lambda-1}}\sigmaum_{g \iotan \mathcal{H}_{\lambda}(\Gamma_{0}(4N),\chi)}\rho_{g}(n)\overline{\rho_{g}(m)} \\
&= \frac{\Gamma\lambdaeft(\lambda-1\right)}{(4\pi)^{\lambda-1}}\sigmaum_{g \iotan \mathcal{H}_{\lambda}(\Gamma_{0}(4N),\chi)}\rho_{g}(n)L(s,f \thetaimes \overline{g}),
\varepsilonnd{aligned}
\varepsilonnd{equation}
where we set
\[
L(s,f \thetaimes \overline{g}) = \zeta^{(4N)}(2s)\sigmaum_{m \gammae 1}\frac{f_{m}\overline{\rho_{g}(m)}}{m^{s}}.
\]
As $\sigmaum_{m \gammae 1}\frac{f_{m}}{m^{s}}$ is holomorphic for $\mathbb{R}e(s) > 1$ by assumption, $\sigmaum_{m \lambdae X}|f_{m}|^{2} \lambdal_{\varepsilon} X^{1+\varepsilon}$ for some $\varepsilon > 0$. Since $L(s,g \thetaimes \overline{g})$ has a pole at $s = 1$ we have by Landau's theorem an analogous bound $\sigmaum_{m \lambdae X}|\rho_{g}(m)|^{2} \lambdal_{\varepsilon} X^{1+\varepsilon}$. By Cauchy-Schwarz, these averages estimates together imply $\sigmaum_{m \lambdae X}|f_{m}\overline{\rho_{g}(m)}| \lambdal_{\varepsilon} X^{1+\varepsilon}$ which further implies $L(s,f \thetaimes \overline{g})$ is absolutely convergent for $\mathbb{R}e(s) > 1$. In particular, $K_{n}(s,f,\chi)$ is holomorphic for $\mathbb{R}e(s) > 1$.
It will be convienent to setup some notation for a certain integral involved throughout the proof of \cref{thm:ConverseTheorem}. For $\lambda \gammae \frac{9}{2}$, $x > 0$, $\mathbb{R}e(s) \iotan \lambdaeft(\frac{1}{2},\frac{\lambda-1}{2}\right)$, and $\sigma_{1} \iotan \lambdaeft(\frac{1-\lambda}{2},-\mathbb{R}e(s)\right)$, set
\[
F_{\lambda}(s,x) = \frac{1}{2\pi i}\iotant_{\mathbb{R}e(u) = \sigma_{1}}\frac{\Gamma_{\mathbb{C}}\lambdaeft(u+\frac{\lambda-1}{2}\right)\gamma\lambdaeft(1-s-u\right)}{\Gamma_{\mathbb{C}}\lambdaeft(-u+\frac{\lambda+1}{2}\right)\gamma\lambdaeft(s+u\right)}x^{u}\,du.
\]
Our bounds for $\mathbb{R}e(s)$ and $\sigma_{1}$ ensure that the contour avoids the poles of $\Gamma_{\mathbb{C}}$-factor and $\gamma$-factor in the numerator. Moreover, from Stirling's formula we have
\betaegin{equation}\lambdaabel{equ:gamma_ratio_estimate}
\frac{\Gamma_{\mathbb{C}}\lambdaeft(u+\frac{\lambda-1}{2}\right)\gamma\lambdaeft(1-s-u\right)}{\Gamma_{\mathbb{C}}\lambdaeft(-u+\frac{\lambda+1}{2}\right)\gamma\lambdaeft(s+u\right)} \lambdal |u|^{-2\mathbb{R}e(s)}.
\varepsilonnd{equation}
Indeed, the ratio of the $\Gamma_{\mathbb{C}}$-factors is at most $O(|u|^{-2\mathbb{R}e(u)})$ and the ratio of the $\gamma$-factors is at most $O(|u|^{-2\mathbb{R}e(s+u)})$ because $\sigmaum_{j = 1}^{r}\lambda_{j} = 1$. So \cref{equ:gamma_ratio_estimate} implies
\betaegin{equation}\lambdaabel{equ:F_int_bound}
F_{\lambda}(s,x) \lambdal x^{\sigma_{1}}\frac{1}{2\pi i}\iotant_{\mathbb{R}e(u) = \sigma_{1}}|u|^{-2\mathbb{R}e(s)}\,du \lambdal x^{\sigma_{1}}.
\varepsilonnd{equation}
This estimate shows $F_{\lambda}(s,x)$ is absolutely bounded for $\mathbb{R}e(s) > \frac{1}{2}$ and fixed $x$. By manipulating the geometric side of the trace formula, we can show that $K_{n}(s,f,\chi)$ admits meromorphic continuation to the same region.
\betaegin{proposition}\lambdaabel{prop:analytic_continuation_of_K_function}
For $\lambda \gammae \frac{9}{2}$ with $2\lambda \varepsilonquiv \delta \thetamod{4}$, $K_{n}(s,f,\chi)$ admits meromorphic continuation to the region $\mathbb{R}e(s) > \frac{1}{2}$ with at most a simple pole at $s = 1$.
\varepsilonnd{proposition}
\betaegin{proof}
Restrict to $\mathbb{R}e(s) > \frac{5}{4}$. Looking at the geometric side of the Petersson trace formula,
\[
K_{n}(s,f,\chi) = \zeta^{(4N)}(2s)\frac{f_{n}}{n^{s}}+\zeta^{(4N)}(2s)\sigmaum_{m \gammae 1}\frac{f_{m}}{m^{s}}\sigmaum_{\sigmaubstack{c \gammae 1 \\ 4N \mid c}}\frac{2\pi i^{-\lambda}}{c}J_{\lambda-1}\lambdaeft(\frac{4\pi\sigmaqrt{nm}}{c}\right)S_{\chi,\lambda}(m,n;c).
\]
Recall the well-known estimates
\[
J_{\lambda-1}(y) \lambdal \min\lambdaeft\{y^{\lambda-1},y^{-\frac{1}{2}}\right\} \quad \thetaext{and} \quad S_{\chi, \lambda}(m,n;c) \lambdal_{m,n,\varepsilon}c^{\frac{1}{2}+\varepsilon}.
\]
Breaking the sum over $m$ and $c$ according to which value dominates, we have
\[
\sigmaum_{m \lambdal c}\frac{f_{m}}{m^{s}c}J_{\lambda-1}\lambdaeft(\frac{4\pi\sigmaqrt{nm}}{c}\right)S_{\chi,\lambda}(m,n;c)+\sigmaum_{m \gammag c}\frac{f_{m}}{m^{s}c}J_{\lambda-1}\lambdaeft(\frac{4\pi\sigmaqrt{nm}}{c}\right)S_{\chi,\lambda}(m,n;c).
\]
Applying the $J$-Bessel bound $\lambdal y^{\lambda-1}$ to the first sum and $\lambdal y^{-\frac{1}{2}}$ to the second (along with the Weil bound for both sums) the expression above is no larger than $O\lambdaeft(\sigmaum_{m \gammae 1}f_{m}m^{\frac{1}{4}-s}\right)$ which is absolutely convergent for $\mathbb{R}e(s) > \frac{5}{4}$. Therefore we may interchange the sums resulting in
\[
K_{n}(s,f,\chi) = \zeta^{(4N)}(2s)\frac{f_{n}}{n^{s}}+\zeta^{(4N)}(2s)2\pi i^{-\lambda}\sigmaum_{\sigmaubstack{c \gammae 1 \\ 4N \mid c}}\frac{1}{c}\sigmaum_{m \gammae 1}\frac{f_{m}S_{\chi,\lambda}(m,n;c)}{m^{s}}J_{\lambda-1}\lambdaeft(\frac{4\pi\sigmaqrt{nm}}{c}\right).
\]
Now restrict to the region $\mathbb{R}e(s) \iotan \lambdaeft(\frac{5}{4},\frac{\lambda-1}{2}\right)$. Recall the Mellin-Barnes integral repersentation for the $J$-Bessel function which is valid for $\sigma_{0} \iotan \lambdaeft(\frac{1-\lambda}{2},0\right)$:
\[
J_{\lambda-1}(4\pi y) = \frac{1}{4\pi^{2}i}\iotant_{\mathbb{R}e(u) = \sigma_{0}}\frac{\Gamma_{\mathbb{C}}\lambdaeft(u+\frac{\lambda-1}{2}\right)}{\Gamma_{\mathbb{C}}\lambdaeft(-u+\frac{\lambda+1}{2}\right)}y^{-2u}\,du.
\]
In particular, for $\sigma_{0} \iotan (1-\mathbb{R}e(s),0)$ we can interchange the integral and sum over $m$ to obtain
\betaegin{equation}\lambdaabel{equ:intermediate_sum_for_c}
\zeta^{(4N)}(2s)\frac{f_{n}}{n^{s}}+\zeta^{(4N)}(2s)i^{-\lambda}\sigmaum_{\sigmaubstack{c \gammae 1 \\ 4N \mid c}}\frac{1}{2\pi i}\iotant_{\mathbb{R}e(u) = \sigma_{0}}\frac{\Gamma_{\mathbb{C}}\lambdaeft(u+\frac{\lambda-1}{2}\right)}{\Gamma_{\mathbb{C}}\lambdaeft(-u+\frac{\lambda+1}{2}\right)}c^{2u-1}\sigmaum_{m \gammae 1}\frac{f_{m}S_{\chi,\lambda}(m,n;c)}{m^{s}}(nm)^{-u}\,du.
\varepsilonnd{equation}
Opening up the Salie sum and interchanging it with the sum over
$m$, the outside sum over $c$ is
\[
\sigmaum_{\sigmaubstack{c \gammae 1 \\ 4N \mid c}}\frac{1}{2\pi i}\iotant_{\mathbb{R}e(u) = \sigma_{0}}\frac{\Gamma_{\mathbb{C}}\lambdaeft(u+\frac{\lambda-1}{2}\right)}{\Gamma_{\mathbb{C}}\lambdaeft(-u+\frac{\lambda+1}{2}\right)}c^{2u-1}n^{-u}\sigmaum_{\sigmaubstack{a \thetamod{c} \\ (a,c) = 1}}\chi(a)\varepsilon_{a}^{2\lambda}\lambdaegendre{c}{a}e\lambdaeft(n\frac{\overline{a}}{c}\right)L_{f}\lambdaeft(s+u,\frac{a}{c}\right)\,du,
\]
where
\[
L_{f}\lambdaeft(s,\frac{a}{c}\right) = \frac{\Lambda_{f}\lambdaeft(s,\frac{a}{c}\right)}{\gamma(s)} = \sigmaum_{m \gammae 1}\frac{f_{m}e\lambdaeft(m\frac{a}{c}\right)}{m^{s}}.
\]
The next step is to apply the functional equation. First, shift the contour to $\sigma_{1} \iotan (-\frac{\lambda-1}{2},-\mathbb{R}e(s))$. We do not pick up residues since the first pole occurs at $u = -\frac{\lambda-1}{2}$ from $\Gamma_{\mathbb{C}}\lambdaeft(u+\frac{\lambda-1}{2}\right)$. Upon substiuting \cref{equ:functional_equation_assumption}, our assumption that $2\lambda \varepsilonquiv \delta \thetamod{4}$ yields
\betaegin{equation}\lambdaabel{equ:c_sum_after_fun_eq}
\betaegin{aligned}
\omega\sigmaum_{\sigmaubstack{c \gammae 1 \\ 4N \mid c}}\frac{1}{c^{2s}}\frac{1}{2\pi i}&\iotant_{\mathbb{R}e(u) = \sigma_{1}}\frac{\Gamma_{\mathbb{C}}\lambdaeft(u+\frac{\lambda-1}{2}\right)\gamma(1-s-u)}{\Gamma_{\mathbb{C}}\lambdaeft(-u+\frac{\lambda+1}{2}\right)\gamma(s+u)}n^{-u} \\
&\cdot \sigmaum_{\sigmaubstack{a \thetamod{c} \\ (a,c) = 1}}e\lambdaeft(n\frac{\overline{a}}{c}\right)L_{f}\lambdaeft(1-s-u,-\frac{\overline{a}}{c}\right)\,du.
\varepsilonnd{aligned}
\varepsilonnd{equation}
Now $\mathbb{R}e(1-s-u) > 1$ so that $L\lambdaeft(1-s-u,-\frac{\overline{a}}{c}\right) \lambdal 1$. Expanding the $L$-series as a Dirichlet series and interchanging with the sum over $a$, we have
\betaegin{equation}\lambdaabel{equ:L-series_Ramanujan}
\sigmaum_{\sigmaubstack{a \thetamod{c} \\ (a,c) = 1}}e\lambdaeft(n\frac{\overline{a}}{c}\right)L_{f}\lambdaeft(1-s-u,-\frac{\overline{a}}{c}\right) = \sigmaum_{m \gammae 1}\frac{f_{m}r(n-m;c)}{m^{1-s-u}}.
\varepsilonnd{equation}
where $r(n;c)$ is the Ramanujan sum. Inserting \cref{equ:L-series_Ramanujan} into \cref{equ:c_sum_after_fun_eq} and replacing the result with the sum over $c$ in \cref{equ:intermediate_sum_for_c} yields
\betaegin{align*}
K_{n}(s,f,\chi) &= \zeta^{(4N)}(2s)\frac{f_{n}}{n^{s}}+\zeta^{(4N)}(2s) \\
&\cdot \omega i^{-\lambda}\sigmaum_{\sigmaubstack{c \gammae 1 \\ 4N \mid c}}\frac{1}{c^{2s}}\frac{1}{2\pi i}\iotant_{\mathbb{R}e(u) = \sigma_{1}}\frac{\Gamma_{\mathbb{C}}\lambdaeft(u+\frac{\lambda-1}{2}\right)\gamma\lambdaeft(1-s-u\right)}{\Gamma_{\mathbb{C}}\lambdaeft(-u+\frac{\lambda+1}{2}\right)\gamma\lambdaeft(s+u\right)}n^{-u}\sigmaum_{m \gammae 1}\frac{f_{m}r(n-m;c)}{m^{1-s-u}}\,du.
\varepsilonnd{align*}
Since $L\lambdaeft(1-s-u,-\frac{\overline{a}}{c}\right) \lambdal 1$, \cref{equ:gamma_ratio_estimate,equ:L-series_Ramanujan} imply that the integrand is absolutely bounded so we may interchange the integral and sum over $m$ to obtain
\[
K_{n}(s,f,\chi) = \zeta^{(4N)}(2s)\frac{f_{n}}{n^{s}}+\zeta^{(4N)}(2s)\omega i^{-\lambda}\sigmaum_{\sigmaubstack{c \gammae 1 \\ 4N \mid c}}\frac{1}{c^{2s}}\sigmaum_{m \gammae 1}\frac{f_{m}r(n-m;c)}{m^{1-s}} F_{\lambda}\lambdaeft(s,\frac{m}{n}\right).
\]
In particular, $F_{\lambda}\lambdaeft(s,\frac{m}{n}\right) \lambdal_{n} m^{\sigma_{1}}$ by \cref{equ:F_int_bound}, so \cref{equ:L-series_Ramanujan} implies the sum over $m$ is absolutely bounded. Therefore the sum over $c$ is absolutely bounded too. Interchanging the sums over $c$ and $m$ yields
\[
K_{n}(s,f,\chi) = \zeta^{(4N)}(2s)\frac{f_{n}}{n^{s}}+\zeta^{(4N)}(2s)\omega i^{-\lambda}\sigmaum_{m \gammae 1}\frac{f_{m}}{m^{1-s}}\sigmaum_{\sigmaubstack{c \gammae 1 \\ 4N \mid c}}\frac{r(n-m;c)}{c^{2s}} F_{\lambda}\lambdaeft(s,\frac{m}{n}\right).
\]
Using \cref{lemma:SumOfRamanujanSums}, we can compute the sum over $c$ to obtain
\betaegin{equation}\lambdaabel{equ:K_geometric_side_analytic_continuation}
\betaegin{aligned}
K_{n}(s,f,\chi) = \zeta^{(4N)}(2s)\frac{f_{n}}{n^{s}}&+\omega i^{-\lambda}\frac{f_{n}}{n^{1-s}}\zeta(2s-1)(4N)^{1-2s}\prod_{p \mid 4N}(1-p^{-1})F_{\lambda}(s,1) \\
&+\omega i^{-\lambda}\sigmaum_{\sigmaubstack{m \gammae 1 \\ m \neq n}}\frac{f_{m}\sigma_{1-2s}(n-m;4N)}{m^{1-s}}F_{\lambda}\lambdaeft(s,\frac{m}{n}\right).
\varepsilonnd{aligned}
\varepsilonnd{equation}
We now argue that \cref{equ:K_geometric_side_analytic_continuation} gives meromorphic continuation to the region $\frac{1}{2} < \mathbb{R}e(s) < -\sigma_{1}$. Indeed, we have already remarked that $F_{\lambda}(s,1)$ is absolutely bounded. As for the sum over $m$, we use the estimates $F_{\lambda}\lambdaeft(s,\frac{m}{n}\right) \lambdal_{n} m^{\sigma_{1}}$ by \cref{equ:F_int_bound} and $\sigma_{1-2s}(n-m;N) \lambdal_{N,n,\varepsilon} m^{\varepsilon}$ for all $m \neq n$ so that
\[
\sigmaum_{\sigmaubstack{m \gammae 1 \\ m \neq n}}\frac{f_{m}\sigma_{1-2s}(n-m;N)}{m^{1-s}}F_{\lambda}\lambdaeft(s,\frac{m}{n}\right) \lambdal_{n} \sigmaum_{\sigmaubstack{m \gammae 1 \\ m \neq n}}\frac{f_{m}\sigma_{1-2s}(n-m;N)m^{\sigma_{1}}}{m^{1-s}} \lambdal_{N,n,\varepsilon} \sigmaum_{\sigmaubstack{m \gammae 1 \\ m \neq n}}\frac{f_{m}}{m^{1-s-\varepsilon-\sigma_{1}}},
\]
and this latter sum converges absolutely for $\mathbb{R}e(s) < -\varepsilon-\sigma_{1}$. Therefore all the terms in the right-hand side of \cref{equ:K_geometric_side_analytic_continuation} are meromorphic for $\frac{1}{2} < \mathbb{R}e(s) < -\sigma_{1}$ with a possible simple pole at $s = 1$ coming from $\zeta(2s-1)$ in the middle term. The meromorphic continuation of $K_{n}(s,f,\chi)$ is now established.
\varepsilonnd{proof}
The second result is that if $F_{\lambda}(1,1)$ vanishes for all sufficiently large $\lambda$ with $2\lambda \varepsilonquiv \delta \thetamod{4}$, then $\gamma(s)$ can be specified explicitely.
\betaegin{proposition}\lambdaabel{prop:specify_gamma_factor_if_integral_vanishes}
Let $\gamma(s)$ be given as in \cref{thm:ConverseTheorem} and suppose there is a residue class $\delta \thetamod{4}$ such that $F_{\lambda}(1,1) = 0$ for all $\lambda \gammae \frac{9}{2}$ with $2\lambda \varepsilonquiv \delta \thetamod{4}$. Then there exists $\nu \iotan \lambdaeft\{\frac{1}{2},\frac{3}{2},\frac{5}{2},\frac{7}{2}\right\}$ such that $2\nu \varepsilonquiv \delta \thetamod{4}$ with $\gamma(s) = CH^{s}\Gamma_{\mathbb{C}}\lambdaeft(s+\frac{\nu-1}{2}\right)$.
\varepsilonnd{proposition}
\betaegin{proof}
Make the change of variables $u \thetao \frac{u}{2}$ in $F_{\lambda}(1,1)$ and shift the contour to $\mathbb{R}e(u) = -\frac{5}{2}$ to obtain
\[
F_{\lambda}(1,1) = \frac{1}{2\pi i}\iotant_{\mathbb{R}e(u) = -\frac{5}{2}}\frac{\Gamma_{\mathbb{C}}\lambdaeft(\frac{\lambda-1+u}{2}\right)\gamma\lambdaeft(-\frac{u}{2}\right)}{2\Gamma_{\mathbb{C}}\lambdaeft(\frac{\lambda+1-u}{2}\right)\gamma\lambdaeft(1+\frac{u}{2}\right)}\,du = 0,
\]
for all $\lambda \gammae \frac{9}{2}$ with $2\lambda \varepsilonquiv \delta \thetamod{4}$. Now consider the function
\[
f_{\lambda}(y) = \frac{\chi_{(0,1)}(y)}{\sigmaqrt{1-y^{2}}}\cos(\lambda\alpharccos(y)).
\]
Upon changing variables $y \thetao \cos(y)$, the Mellin transform of $f_{\lambda}(y)$ is (see \cite{zwillinger2007table} \S3.631 \#9):
\[
\omegatilde{f_{\lambda}}(s) = \iotant_{0}^{\iotanfty}f_{\lambda}(y)y^{s-1}\,dy = \frac{\Gamma_{\mathbb{C}}(s)}{2^{s}\Gamma_{\mathbb{C}}\lambdaeft(\frac{s+\lambda+1}{2}\right)\Gamma_{\mathbb{C}}\lambdaeft(\frac{s-\lambda+1}{2}\right)},
\]
for $\mathbb{R}e(s) > 0$. Let $\overline{\nu} \iotan \lambdaeft\{\frac{1}{2},\frac{3}{2},\frac{5}{2},\frac{7}{2}\right\}$ be such that $2\overline{\nu}+2\lambda \varepsilonquiv 4 \thetamod{8}$. Then, making use of Euler's reflection formula, we have
\betaegin{align*}
\frac{\Gamma_{\mathbb{C}}\lambdaeft(\frac{\lambda-1+u}{2}\right)\gamma\lambdaeft(-\frac{u}{2}\right)}{2\Gamma_{\mathbb{C}}\lambdaeft(\frac{\lambda+1-u}{2}\right)\gamma\lambdaeft(1+\frac{u}{2}\right)} &= \frac{\Gamma_{\mathbb{C}}(1-u)}{2^{1-u}\Gamma_{\mathbb{C}}\lambdaeft(\frac{\lambda+1-u}{2}\right)\Gamma_{\mathbb{C}}\lambdaeft(\frac{3-\lambda-u}{2}\right)}\frac{2^{-u}\Gamma_{\mathbb{C}}\lambdaeft(\frac{3-\lambda-u}{2}\right)\Gamma_{\mathbb{C}}\lambdaeft(\frac{\lambda-1+u}{2}\right)}{\Gamma_{\mathbb{C}}(1-u)}\frac{\gamma\lambdaeft(-\frac{u}{2}\right)}{\gamma\lambdaeft(1+\frac{u}{2}\right)} \\
&= \omegatilde{f_{\lambda-1}}(1-u)\frac{2^{-u}\Gamma_{\mathbb{C}}\lambdaeft(\frac{3-\lambda-u}{2}\right)\Gamma_{\mathbb{C}}\lambdaeft(\frac{\lambda-1+u}{2}\right)}{\Gamma_{\mathbb{C}}(1-u)}\frac{\gamma\lambdaeft(-\frac{u}{2}\right)}{\gamma\lambdaeft(1+\frac{u}{2}\right)} \\
&= \omegatilde{f_{\lambda-1}}(1-u)\frac{2^{1-u}}{\sigmain\lambdaeft(\frac{\pi}{2}(\lambda-1+u)\right)\Gamma_{\mathbb{C}}(1-u)}\frac{\gamma\lambdaeft(-\frac{u}{2}\right)}{\gamma\lambdaeft(1+\frac{u}{2}\right)} \\
&= \omegatilde{f_{\lambda-1}}(1-u)\frac{2^{1-u}}{\sigmain\lambdaeft(\frac{\pi}{2}(u+1-\overline{\nu})\right)\Gamma_{\mathbb{C}}(1-u)}\frac{\gamma\lambdaeft(-\frac{u}{2}\right)}{\gamma\lambdaeft(1+\frac{u}{2}\right)} \\
&= \omegatilde{f_{\lambda-1}}(1-u)\frac{2^{-u}\Gamma_{\mathbb{C}}\lambdaeft(\frac{u+1-\overline{\nu}}{2}\right)\Gamma_{\mathbb{C}}\lambdaeft(\frac{1-u+\overline{\nu}}{2}\right)}{\Gamma_{\mathbb{C}}(1-u)}\frac{\gamma\lambdaeft(-\frac{u}{2}\right)}{\gamma\lambdaeft(1+\frac{u}{2}\right)},
\varepsilonnd{align*}
where the fourth equality follows because $2\overline{\nu}+2\lambda \varepsilonquiv 4 \thetamod{8}$. Set
\[
\omegatilde{g}(u) = \frac{2^{-u}\Gamma_{\mathbb{C}}\lambdaeft(\frac{u+1-\overline{\nu}}{2}\right)\Gamma_{\mathbb{C}}\lambdaeft(\frac{1-u+\overline{\nu}}{2}\right)}{\Gamma_{\mathbb{C}}(1-u)}\frac{\gamma\lambdaeft(-\frac{u}{2}\right)}{\gamma\lambdaeft(1+\frac{u}{2}\right)},
\]
so that we can express $F_{\lambda}(1,1)$ as
\[
F_{\lambda}(1,1) = \frac{1}{2\pi i}\iotant_{\mathbb{R}e(u) = -\frac{5}{2}}\omegatilde{f_{\lambda-1}}(1-u)\omegatilde{g}(u)\,du = 0.
\]
Now $\omegatilde{g}(u)$ is holomorphic in a strip containing $\mathbb{R}e(u) = -\frac{5}{2}$ since at $u = -\frac{5}{2}$ the gamma factors $\Gamma_{\mathbb{C}}\lambdaeft(\frac{u+1-\overline{\nu}}{2}\right)$, $\Gamma_{\mathbb{C}}\lambdaeft(\frac{1-u+\overline{\nu}}{2}\right)$, and $\gamma\lambdaeft(-\frac{u}{2}\right)$ are all holomorphic (for all $\overline{\nu}$). Moreover, by Stirling's formula we have that $\omegatilde{g}(u) \lambdal |u|^{-\frac{3}{2}}$ and so
\betaegin{equation}\lambdaabel{equ:Mellin_transform_bound_for_g}
\iotant_{\mathbb{R}e(u) = -\frac{5}{2}}\omegatilde{g}(u)y^{-u}\,du \lambdal y^{\frac{5}{2}}\iotant_{\mathbb{R}e(u) = -\frac{5}{2}}|u|^{-\frac{3}{2}}\,du \lambdal y^{\frac{5}{2}}.
\varepsilonnd{equation}
Therefore the Mellin inverse of $\omegatilde{g}(u)$ exsits. In particular,
\betaegin{equation}\lambdaabel{equ:g_as_Mellin_transform}
g(y) = \frac{1}{2\pi i}\iotant_{\mathbb{R}e(u) = -\frac{5}{2}}\omegatilde{g}(u)y^{-u}\,du,
\varepsilonnd{equation}
is a continuous function on $[0,\iotanfty)$. Our goal now is to show that $g$ is identically zero on $[0,1]$. By Fubini's theorem,
\betaegin{equation}\lambdaabel{equ:int_of_f_and_g_is_zero}
0 = \frac{1}{2\pi i}\iotant_{\mathbb{R}e(u) = -\frac{5}{2}}\omegatilde{f_{\lambda-1}}(1-u)\omegatilde{g}(u)\,du = \iotant_{0}^{\iotanfty}f_{\lambda-1}(y)g(y)\,dy.
\varepsilonnd{equation}
Making the change of variables $y = \cos(\theta)$ and noting that $\cos(\theta) = |\cos(\theta)|$ for $0 \lambdae \theta \lambdae \frac{\pi}{2}$, \cref{equ:int_of_f_and_g_is_zero} becomes
\[
\iotant_{0}^{\frac{\pi}{2}}\frac{\cos((\lambda-1)\theta)}{\cos\lambdaeft(\frac{\theta}{2}\right)}\cos\lambdaeft(\frac{\theta}{2}\right)g\lambdaeft(\lambdaeft|\cos(\theta)\right|\right)\,d\theta = 0.
\]
Changing variables $v = \cos(\theta)$, this latter integral is expressable as
\betaegin{equation}\lambdaabel{equ:orthogonality_with_Chebyschev}
\iotant_{-1}^{1}V_{\lambdafloor\lambda-1\rfloor}(v)h(v)\sigmaqrt{\frac{1+v}{1-v}}\,dv = 0,
\varepsilonnd{equation}
where $V_{n}$ is the $n$-th Chebyshev polynomial of the third kind and we set
\[
h(v) = \chi_{[0,1]}(v)\frac{g(v)}{\sigmaqrt{1+v}}.
\]
Suppose $\delta = 1$. Then by \cref{equ:orthogonality_with_Chebyschev}, $h(v)$ is orthogonal to all $V_{n}$ for odd $n \gammae 3$. So there is a an even function $P(v)$ and constant $a$ such that $h(v) = av+P(v)$. If $v \iotan [0,1]$, then $h(v) = 2av$ and therefore
\[
h(v) = \betaegin{cases} 0 & v \iotan [-1,0), \\ 2av & v \iotan [0,1]. \varepsilonnd{cases}
\]
So $g(y) = 2ay\sigmaqrt{1+y}$ for $y \iotan [0,1]$. By \cref{equ:Mellin_transform_bound_for_g,equ:g_as_Mellin_transform}, $g(y) = O(y^{\frac{5}{2}})$ as $y \thetao 0$ so that $a = 0$. Now suppose $\delta = 3$. Then $h(v)$ is orthogonal to all $V_{n}$ for even $n \gammae 4$. So there is an odd function $Q(v)$ and constants $b$ and $c$ such that $h(v) = b+cv^{2}+Q(v)$. If $v \iotan [0,1]$, we have $h(v) = 2(b+cv^{2})$ so that
\[
h(v) = \betaegin{cases} 0 & v \iotan [-1,0), \\ 2(b+cv^{2}) & v \iotan [0,1]. \varepsilonnd{cases}
\]
Then $g(y) = 2(b+cy^{2})\sigmaqrt{1+y}$ for $y \iotan [0,1]$. Since $\omegatilde{g}(u)$ is holomorphic on a strip containing $\mathbb{R}e(u) = -\frac{5}{2}$, shifting the contour in \cref{equ:Mellin_transform_bound_for_g} to $\mathbb{R}e(u) = -\frac{5}{2}-\varepsilon$ for some small $\varepsilon > 0$, \cref{equ:g_as_Mellin_transform} implies $g(y) = O(y^{\frac{5}{2}+\varepsilon})$ as $y \thetao 0$ so that $b = a = 0$. In either case, it follows that $g$ is identically zero on $[0,1]$. Then the Mellin transform of $g$ takes the form
\[
\omegatilde{g}(u) = \iotant_{1}^{\iotanfty}g(y)y^{u-1}\,dy.
\]
Since $g(y) \lambdal y^{\frac{5}{2}}$, the analytic continuation of $\omegatilde{g}(u)$ to $\mathbb{R}e(u) < -\frac{5}{2}$ follows. In this region, $\frac{\Gamma_{\mathbb{C}}\lambdaeft(\frac{1-u+v}{2}\right)\gamma\lambdaeft(-\frac{u}{2}\right)}{\Gamma_{\mathbb{C}}(1-u)}$ is analytic and nonvanishing. Since $\omegatilde{g}(u)$ is analytic, we conclude $\frac{\Gamma_{\mathbb{C}}\lambdaeft(\frac{u+1-\overline{\nu}}{2}\right)}{\gamma\lambdaeft(1+\frac{u}{2}\right)}$ is analytic for $\mathbb{R}e(u) < -\frac{5}{2}$ too. In this region, the poles of $\Gamma_{\mathbb{C}}\lambdaeft(\frac{u+1-\overline{\nu}}{2}\right)$ occur at $u = \overline{\nu}-1+2n$ for all $n < -\frac{3+2\overline{\nu}}{4}$. Therefore $\omegatilde{\gamma}(s) = \gamma(s+\frac{\overline{\nu}+1}{2})$ has poles at all but finitely many nonpositive integers (determined in \cref{tab:table_for_proof_of_lemma}). Applying \cref{lemma:GammaFactor} we get $\omegatilde{\gamma}(s) = C'P(s)H^{s}\Gamma_{\mathbb{C}}(s)$ for some constants $C',H \iotan \mathbb{R}_{>0}$ and a monic polynomial $P(s)$. In terms of $\gamma(s)$, we have
\[
\gamma(s) = CP\lambdaeft(s-\frac{\overline{\nu}+1}{2}\right)H^{s}\Gamma_{\mathbb{C}}\lambdaeft(s-\frac{\overline{\nu}+1}{2}\right),
\]
where $C = C'H^{-\frac{\overline{\nu}+1}{2}}$. In order to finish the theorem we need $P(s)$ to be such that
\[
P\lambdaeft(s-\frac{\overline{\nu}+1}{2}\right)\Gamma_{\mathbb{C}}\lambdaeft(s-\frac{\overline{\nu}+1}{2}\right) = \Gamma_{\mathbb{C}}\lambdaeft(s+\frac{\nu-1}{2}\right),
\]
for some $\nu \iotan \lambdaeft\{\frac{1}{2},\frac{3}{2},\frac{5}{2},\frac{7}{2}\right\}$ with $2\nu \varepsilonquiv \delta \thetamod{4}$. The condition $\mathbb{R}e(\mu_{j}) > -\frac{1}{2}\lambda_{j}$ implies $\gamma(s)$ cannot have poles in the region $\mathbb{R}e(s) \gammae \frac{1}{2}$. In other words, $\omegatilde{\gamma}(s)$ cannot have poles in the region $\mathbb{R}e(s) \gammae -\frac{\overline{\nu}}{2}$. Therefore $P(s)$ needs to have a zero canceling the possible poles coming from $\Gamma_{\mathbb{C}}(s)$ in $\omegatilde{\gamma}(s) = C'P(s)H^{s}\Gamma_{\mathbb{C}}(s)$. \cref{tab:table_for_proof_of_lemma} computes the possible polynomials $P(s)$ and corresponding weights $\nu$ for a given $\overline{\nu}$:
\betaegin{table}[h]
\betaegin{center}
\caption{}\lambdaabel{tab:table_for_proof_of_lemma}
\betaegin{stabular}[1.5]{|c|c|c|c|c|c|}
\hline
$\overline{\nu}$ & $\delta$ & Poles of $\omegatilde{\gamma}(s)$ & Possible poles of $\omegatilde{\gamma}(s)$ & $P(s)$ & $\nu$ \\
\hline
$\overline{\nu} = \frac{1}{2}$ & 3 & $s = n \lambdae -2$ & $s = -1$ & $s$ or $s(s+1)$ & $\nu = \frac{3}{2}$ or $\nu = \frac{7}{2}$ \\
\hline
$\overline{\nu} = \frac{3}{2}$ & 1 & $s = n \lambdae -2$ & $s = -1$ & $s$ or $s(s+1)$ & $\nu = \frac{1}{2}$ or $\nu = \frac{5}{2}$ \\
\hline
$\overline{\nu} = \frac{5}{2}$ & 3 & $s = n \lambdae -3$ & $s = -2$ & $s(s+1)$ or $s(s+1)(s+2)$ & $\nu = \frac{3}{2}$ or $\nu = \frac{7}{2}$ \\
\hline
$\overline{\nu} = \frac{7}{2}$ & 1 & $s = n \lambdae -3$ & $s = -2$ & $s(s+1)$ or $s(s+1)(s+2)$ & $\nu = \frac{1}{2}$ or $\nu = \frac{5}{2}$ \\
\hline
\varepsilonnd{stabular}
\varepsilonnd{center}
\varepsilonnd{table}
Upon inspection it is clear that for any $\delta$, the possible weights $\nu$ satisfy $2\nu \varepsilonquiv \delta \thetamod{4}$.
\varepsilonnd{proof}
We can now prove \cref{thm:ConverseTheorem}.
\betaegin{proof}[Proof of \cref{thm:ConverseTheorem}]
We divide the proof into two parts based on whether $F_{\lambda}(1,1)$ vanishes or not since the methods invovled are seperate:
\betaegin{enumerate}[label=(\roman*)]
\iotatem Suppose $F_{\lambda}(1,1) \neq 0$ for some $\lambda \gammae \frac{9}{2}$ with $2\lambda \varepsilonquiv \delta \thetamod{4}$. Let $d = \deltaim\mathcal{S}_{\lambda}(\Gamma_{0}(4N),\chi)$. Since the $g$ form a basis for $\mathcal{S}_{\lambda}(\Gamma_{0}(4N),\chi)$, we can choose positive integers $n_{1} < n_{2} < \cdots < n_{d}$ such that the $d$ many $d$-dimensional vectors $(\rho_{g}(n_{1}),\lambdadots,\rho_{g}(n_{d}))$ must be linearly independent. For otherwise, some linear combination of the basis vectors $g$ has $d$ many vanishing Fourier coefficients and hence must be identically zero contradicting the linear independence of the $g$. The definition of $K_{n_{i}}(s,f,\chi)$ is a linear combination of the Rankin-Selberg convolutions $L(s,f \thetaimes \overline{g})$. Since the vectors $(\rho_{g}(n_{1}),\lambdadots,\rho_{g}(n_{d}))$ are linearly independent, we can invert this linear relation so that each $L(s,f \thetaimes \overline{g})$ is a linear combination of the $K_{n_{i}}(s,f,\chi)$ for $1 \lambdae i \lambdae d$. By \cref{prop:analytic_continuation_of_K_function}, each $L(s,f \thetaimes \overline{g})$ admits meromorphic continuation to the region $\mathbb{R}e(s) > \frac{1}{2}$ with at most a simple pole at $s = 1$. Taking the residue at $s = 1$ gives
\[
\frac{\Gamma\lambdaeft(\lambda-1\right)}{(4\pi)^{\lambda-1}}\sigmaum_{g \iotan \mathcal{H}_{\lambda}(\Gamma_{0}(4N),\chi)}\rho_{g}(n)\mathbb{R}es_{s = 1}L(s,f \thetaimes \overline{g}) = \omega i^{-\lambda}\frac{1}{2}(4N)^{-1}\prod_{p \mid 4N}(1-p^{-1})F_{\lambda}(1,1)f_{n},
\]
for every $n \gammae 1$. As $F_{\lambda}(1,1) \neq 0$, we may isolate $f_{n}$ and conclude that $f_{n}$ is a linear combination of $n$-th Fourier coefficients of basis elements $g$. Moreover, the coefficients of this linear combination are independent of $n$. Since this linear relation holds for all $n$, $f \iotan \mathcal{S}_{\lambda}(\Gamma_{0}(4N),\chi)$.
\iotatem Suppose $F_{\lambda}(1,1) = 0$ for all $\lambda\gammaeq \frac{9}{2}$ with $2\lambda\varepsilonquiv \delta\thetamod{4}$. Using \cref{prop:specify_gamma_factor_if_integral_vanishes}, we may assume that $\gammaamma(s)=CH^s\Gamma_{\cc}\lambdaeft(s+\frac{\nu-1}{2}\right)$ where $C,H\iotan\rr_{>0}$ and $\nu\iotan\lambdaeft\{\frac{1}{2},\frac{3}{2},\frac{5}{2},\frac{7}{2}\right\}$ with $2\nu\varepsilonquiv\delta\thetamod{4}$. We will show that $H = 1$. Suppose first that $H>1$. Then
\[
F_{\lambda}(1,1) = \frac{1}{2\pi i}\iotant_{\mathbb{R}e(u) = \sigma_{1}} H^{-u-1}\frac{\Gamma_{\cc}\lambdaeft(\frac{\lambda-1+u}{2}\right)\Gamma_{\cc}\lambdaeft(\frac{\nu-1-u}{2}\right)}{2\Gamma_{\cc}\lambdaeft(\frac{\lambda+1-u}{2}\right)\Gamma_{\cc}\lambdaeft(\frac{\nu+1+u}{2}\right)}\,du=0.
\]
Replacing the $\Gamma_{\cc}$-factors with $\Gamma$-factors and clearing nonzero constants, the integral becomes
\betaegin{equation}\lambdaabel{equ:integral_polynomial_is_zero}
\frac{1}{2\pi i}\iotant_{\mathbb{R}e(u) = \sigma_{1}} H^{-u}\frac{\Gamma\lambdaeft(\frac{\lambda-1+u}{2}\right)\Gamma\lambdaeft(\frac{\nu-1-u}{2}\right)}{\Gamma\lambdaeft(\frac{\lambda+1-u}{2}\right)\Gamma\lambdaeft(\frac{\nu+1+u}{2}\right)}\,du=0.
\varepsilonnd{equation}
By Stirling's formula, the integrand in \cref{equ:integral_polynomial_is_zero} is $O(H^{-u}|u|^{-2\mathbb{R}e(u+1)})$. Since $H > 1$, this estimate shows that upon taking $\sigma_{1} \thetao \iotanfty$ the integrand vanishes. Therfore we may shift the line of integration in \cref{equ:integral_polynomial_is_zero} all the way to the right by taking $\sigma_{1} \thetao \iotanfty$ at the cost of pickup up residues. We obtain the following relation:
\betaegin{equation}\lambdaabel{equ:integral__residue_polynomial_relation}
\sigmaum_{\rho}\mathbb{R}es_{u = \rho}\lambdaeft(H^{-u}\frac{\Gamma\lambdaeft(\frac{\lambda-1+u}{2}\right)\Gamma\lambdaeft(\frac{\nu-1-u}{2}\right)}{2\Gamma\lambdaeft(\frac{\lambda+1-u}{2}\right)\Gamma\lambdaeft(\frac{\nu+1+u}{2}\right)}\right) = 0,
\varepsilonnd{equation}
where we are summing over all poles $\rho$ of the integrand in \cref{equ:integral_polynomial_is_zero} subject to $\mathbb{R}e(\rho) > \sigma_{1} > \frac{1-\lambda}{2}$. For every $\delta$, by \cref{prop:specify_gamma_factor_if_integral_vanishes} there are two possible weights $\nu$. For each such $(\delta,\nu)$ pair we choose one or two weights $\lambda \gammae \frac{9}{2}$. For each triple $(\delta,\nu,\lambda)$, the residue in \cref{equ:integral__residue_polynomial_relation} is an algebraic relation in $H$:
\[
G_{\delta,\nu,\lambda}(H) = 0.
\]
Given $(\delta,\nu)$, $H$ must be a zero of of this relation for all $\lambda \gammae \frac{9}{2}$ with $2\lambda \varepsilonquiv \delta \thetamod{4}$. \cref{tab:table_for_proof_of_theorem} computes various $G_{\delta,\nu,\lambda}(H)$ and from it we see that for any given tuple $(\delta,\nu)$, the corresponding triples $(\delta,\nu,\lambda)$ only have the root $H = 1$ in common. But this contradicts $H$ being common zero larger than $1$.
\betaegin{table}[ht]
\betaegin{center}
\caption{}\lambdaabel{tab:table_for_proof_of_theorem}
\centering\renewcommand\cellalign{c}
\sigmaetcellgapes{3pt}\makegapedcells
\betaegin{stabular}[1]{|c|c|c|c|c|}
\hline $\delta$ & $\nu$ & $\lambda$ & $G_{\delta,\nu,\lambda}(H)$ & Values of $H$ \\
\hline $1$ & $\frac{1}{2}$ & $\frac{9}{2}$ & $-\frac{1}{2}H^{1/2}+3H^{-3/2}-\frac{5}{2}H^{-7/2}$ & $H=1,\sigmaqrt{5}$ \\
\hline $1$ & $\frac{1}{2}$ & $\frac{13}{2}$ & $-\frac{1}{4}H^{1/2}+\frac{15}{4}H^{-3/2}-\frac{35}{4}H^{-7/2}+\frac{21}{4}H^{-11/2}$& $H=1,\sigmaqrt{7\pm2\sigmaqrt{7}}$\\
\hline $1$ & $\frac{5}{2}$ & $\frac{9}{2}$ & $-2H^{-3/2}+2H^{-7/2}$ & $H=1$ \\
\hline $3$ & $\frac{3}{2}$ & $\frac{11}{2}$ & $-\frac{3}{2}H^{-1/2}+5H^{-5/2}-\frac{7}{2}H^{-9/2}$ & $H=1,\sigmaqrt{\frac{7}{3}}$ \\
\hline $3$ & $\frac{3}{2}$ & $\frac{15}{2}$ & $-\frac{5}{4}H^{-1/2}+\frac{35}{4}H^{-5/2}-\frac{63}{4}H^{-9/2}+\frac{33}{4}H^{-13/2}$ & $H=1,\sigmaqrt{\frac{1}{5}(15\pm2\sigmaqrt{15})}$\\
\hline $3$ & $\frac{7}{2}$ & $\frac{11}{2}$ & $-2H^{-5/2}+2H^{-9/2}$ & $H=1$\\
\hline
\varepsilonnd{stabular}
\varepsilonnd{center}
\varepsilonnd{table}
Now assume $H \lambdae 1$. In this case, the integrand in \cref{equ:integral_polynomial_is_zero} does not vanish as $\sigma_{1} \thetao \iotanfty$ and the previous arguemnt cannot be used. Instead, we appeal to a variant of Hecke's converse theorem. By \cref{prop:Hecke_modularity_variant}, $f$ satisfies
\[
f\betaigg\vert\betaegin{pmatrix}aH^2 & -1\\ cH^2 & 0\varepsilonnd{pmatrix}=\omega i^{-\nu}\chi(\overline{a})\varepsilonpsilon_a^{-2\nu}\lambdaegendre{c}{a}f\betaigg\vert\betaegin{pmatrix}
1 & -\overline{a}\\ 0 & c \varepsilonnd{pmatrix},
\]
for $c\iotan 4N\zetaz_{>0}$ and $a,\overline{a}\iotan\zetaz$ with $a\overline{a}\varepsilonquiv1\pmod{c}$. We can express this action more convienently as
\betaegin{equation}\lambdaabel{eq:SlashOperatorModularity}
f\betaigg\vert\betaegin{pmatrix}aH & \frac{a\overline{a}H^2-1}{H}\\ cH & \overline{a}H\varepsilonnd{pmatrix}=\omega i^{-\nu}\chi(\overline{a})\varepsilonpsilon_a^{-2\nu}\lambdaegendre{c}{a}f.
\varepsilonnd{equation}
The argument presented in \cite{booker2022extension} to show that $H = 1$ and $\omega = i^{\nu}$ works vertabium. Thus, the matrices in \cref{eq:SlashOperatorModularity} are of the form
\[
M =\betaegin{pmatrix}
a & \frac{a\overline{a}-1}{c}\\
c & \overline{a}
\varepsilonnd{pmatrix},
\]
and letting $\chi(M)=\chi(\overline{a})$, \cref{eq:SlashOperatorModularity} can be compactly expressed as
\[
f\vert M=\chi(M)\varepsilonpsilon_a^{-2\nu}\lambdaegendre{c}{a}f.
\]
This is equivalent to the modularity condition for half-integral weight forms. The matrices $M$ along with $\betaegin{psmallmatrix}
1 & 0\\0 & 1
\varepsilonnd{psmallmatrix}$ and $\betaegin{psmallmatrix}
1 & 1\\ 0 & 1
\varepsilonnd{psmallmatrix}$ generate $\Gamma_0(4N)$. Therefore, $f\iotan S_\lambdaambda(\Gammaamma_0(4N),\chi)$ as desired.
\varepsilonnd{enumerate}
\varepsilonnd{proof}
\alphappendix
\sigmaection{Functional Equations for Additive Twists}\lambdaabel{append:functional_equation_additive_twist}
Let $f(z)$ be a half-integral weight modular form on $\Gamma_{0}(4N)\betaackslash\mathbb{H}$ of weight $\lambda$ twisted by a Dirichlet character $\chi$ modulo $4N$. Let $f(z) = \sigmaum_{n \gammae 1}f_{n}n^{\frac{\lambda-1}{2}}e(nz)$ be the Fourier expansion of $f$. The following proposition proves the analytic continuation and function equation of the additive twist $\Lambda_{f}(s,\frac{a}{c})$.
\betaegin{proposition}
For any $c \iotan 4N\mathbb{Z}_{> 0}$ and $a,\overline{a} \iotan \mathbb{Z}$ with $a\overline{a} \varepsilonquiv 1 \thetamod{c}$, $\Lambda_{f}(s,\frac{a}{c})$ and $\Lambda_{f}(s,-\frac{\overline{a}}{c})$ admit analytic continuation to $\mathbb{C}$ and satisfy the functional equation
\betaegin{equation}
\Lambda_{f}\lambdaeft(s,\frac{a}{c}\right) = i^{\lambda}\chi(\overline{a})\varepsilon_{a}^{-2\lambda}\lambdaegendre{c}{a}c^{1-2s}\Lambda_{f}\lambdaeft(1-s,-\frac{\overline{a}}{c}\right).
\varepsilonnd{equation}
\varepsilonnd{proposition}
\betaegin{proof}
Let $\gamma = \betaegin{psmallmatrix} a & b \\ c & d \varepsilonnd{psmallmatrix} \iotan \Gamma_{0}(4N)$. Set $z = -\frac{d}{c}+\frac{i}{tc}$ for $t > 0$ so that $\gamma z = \frac{a}{c}-\frac{t}{ic}$. Then
\betaegin{equation}\lambdaabel{equ:modular_transform_1}
f\lambdaeft(\frac{a}{c}-\frac{t}{ic}\right) = \sigmaum_{n \gammae 1}f_{n}n^{\frac{\lambda-1}{2}}e^{-2\pi n\frac{t}{c}}e\lambdaeft(n\frac{a}{c}\right).
\varepsilonnd{equation}
On the other hand, by modularity
\[
f\lambdaeft(\frac{a}{c}-\frac{t}{ic}\right) = i^{\lambda}\chi(d)\varepsilon_{d}^{-2\lambda}\lambdaegendre{c}{d}\sigmaum_{n \gammae 1}f_{n}n^{\frac{\lambda-1}{2}}e^{-2\pi n\frac{1}{tc}}e\lambdaeft(-n\frac{d}{c}\right)t^{-\lambda}.
\]
As $d \varepsilonquiv \overline{a} \thetamod{c}$, $d \varepsilonquiv a \thetamod{4}$, and the modified Jacobi symbol is quadratic, we can express the previous identity as
\betaegin{equation}\lambdaabel{equ:modular_transform_2}
f\lambdaeft(\frac{a}{c}-\frac{t}{ic}\right) = i^{\lambda}\chi(\overline{a})\varepsilon_{a}^{-2\lambda}\lambdaegendre{c}{a}\sigmaum_{n \gammae 1}f_{n}n^{\frac{\lambda-1}{2}}e^{-2\pi n\frac{1}{tc}}e\lambdaeft(-n\frac{\overline{a}}{c}\right)t^{-\lambda}.
\varepsilonnd{equation}
Taking the Mellin transform of \cref{equ:modular_transform_1} at $s+\frac{\lambda-1}{2}$ in the variable $t$ we obtain
\betaegin{equation}\lambdaabel{equ:additive_twist_mellin_1}
c^{s+\frac{\lambda-1}{2}}\Lambda_{f}\lambdaeft(s,\frac{a}{c}\right) = \iotant_{0}^{\iotanfty}f\lambdaeft(\frac{a}{c}-\frac{t}{ic}\right)t^{s+\frac{\lambda-1}{2}}\,dt.
\varepsilonnd{equation}
Similarly, taking the Mellin transform of \cref{equ:modular_transform_2} yields
\betaegin{equation}\lambdaabel{equ:additive_twist_mellin_2}
i^{\lambda}\chi(\overline{a})\varepsilon_{a}^{-2\lambda}\lambdaegendre{c}{a}c^{(1-s)+\frac{\lambda-1}{2}}\Lambda_{f}\lambdaeft(1-s,-\frac{\overline{a}}{c}\right) = \iotant_{0}^{\iotanfty}f\lambdaeft(\frac{a}{c}-\frac{t}{ic}\right)t^{s+\frac{\lambda-1}{2}}\,dt.
\varepsilonnd{equation}
Equating these two transforms gives to the desired functional equation. The analytic continuation follows from the fact that the integrals in \cref{equ:additive_twist_mellin_1,equ:additive_twist_mellin_2} are analytic via the rapid decay of $f$ and the functional equation just established.
\varepsilonnd{proof}
\sigmaection{An Extension of Hecke's Modularity Argument}
Let $f(z)$, $\gamma(s)$, and $\Lambdaambda_f\lambdaeft(s,\frac{a}{c}\right)$ as in \cref{thm:ConverseTheorem}. The aim of this appendix is to provide a variant of Hecke's modularity relation when $\gamma(s)$ is given explicitely.
\betaegin{proposition}\lambdaabel{prop:Hecke_modularity_variant}
Let $f(z)$, $\gamma(s)$, $\omega$, and $\Lambdaambda_f\lambdaeft(s,\frac{a}{c}\right)$ be as in \cref{thm:ConverseTheorem}. Moreover, suppose that $\gamma(s) = CH^{s}\Gamma_{\mathbb{C}}\lambdaeft(s+\frac{\nu-1}{2}\right)$ as in \cref{prop:specify_gamma_factor_if_integral_vanishes}. Then
\[
f\betaigg\vert\betaegin{pmatrix}aH^2 & -1\\ cH^2 & 0\varepsilonnd{pmatrix}=\omega i^{-\nu}\chi(\overline{a})\varepsilonpsilon_a^{-2\nu}\lambdaegendre{c}{a}f\betaigg\vert\betaegin{pmatrix}
1 & -\overline{a}\\ 0 & c \varepsilonnd{pmatrix}.
\]
\varepsilonnd{proposition}
\betaegin{proof}
Let $s = \sigma+it$ with $ \sigma > \frac{3}{2}$. Then $L_{f}(s,\frac{a}{c}) \lambdal 1$ and by Stirling's formula $\gamma(s) \lambdal (1+t)^{-1}$ uniformly as $t \thetao \iotanfty$. Therefore the Mellin inverse of $\Lambda_{f}\lambdaeft(s,\frac{a}{c}\right)$ exists. The inverse Mellin transform of $\Lambda_{f}\lambdaeft(s,\frac{a}{c}\right)$ at $\frac{1}{cHy}$ in the variable $s$ is
\[
\frac{1}{2\pi i}\iotant_{\mathbb{R}e(s)=\sigma}\lambdaeft(\frac{1}{cHy}\right)^{-s}\Lambdaambda_f\lambdaeft(s,\frac{a}{c}\right)\,ds.
\]
Opening up $\Lambdaambda_f\lambdaeft(s,\frac{a}{c}\right) = \gamma(s)L_{f}\lambdaeft(s,\frac{a}{c}\right)$ and interchanging with the integral gives
\betaegin{equation}\lambdaabel{equ:Hecke_int_1}
2C\lambdaeft(\frac{1}{cH^2y}\right)^{\frac{\nu-1}{2}}\sigmaum_{n\gammaeq 1}f_nn^{\frac{\nu-1}{2}}e\lambdaeft(n\frac{a}{c}\right)\frac{1}{2\pi i}\iotant_{\mathbb{R}e(s)=\sigma}\lambdaeft(\frac{2\pi n}{cH^2y}\right)^{-\lambdaeft(s+\frac{\nu-1}{2}\right)}\Gammaamma\lambdaeft(s+\frac{\nu-1}{2}\right)\,ds.
\varepsilonnd{equation}
The integral in \cref{equ:Hecke_int_1} is the inverse Mellin transform of an exponential function:
\betaegin{equation}\lambdaabel{equ:Hecke_int_2}
\frac{1}{2\pi i}\iotant_{\mathbb{R}e(s)=\sigma}\lambdaeft(\frac{2\pi n}{cH^2y}\right)^{-\lambdaeft(s+\frac{\nu-1}{2}\right)}\Gammaamma\lambdaeft(s+\frac{\nu-1}{2}\right)\,ds = e^{-\frac{2\pi n}{cH^2y}}.
\varepsilonnd{equation}
Substiuting \cref{equ:Hecke_int_2} into Expression (\ref{equ:Hecke_int_1}) and rewriting the resulting Fourier series, we obtain
\betaegin{equation}\lambdaabel{equ:Hecke_modularity_variant_side_1}
2C \lambdaeft(\frac{1}{cH^2y}\right)^{\frac{\nu-1}{2}}f\lambdaeft(\frac{\frac{-1}{H^2yi}+a}{c}\right).
\varepsilonnd{equation}
Now let $\sigma < \frac{1}{2}$. The same esimates for $\Lambda_{f}\lambdaeft(1-s,\frac{a}{c}\right)$ hold as for $\Lambda_{f}\lambdaeft(s,\frac{a}{c}\right)$ in the region $\sigma > \frac{3}{2}$, so the inverse Mellin transform of the right-hand side in \cref{equ:functional_equation_assumption} exists. Taking the inverse Mellin transform, and noting that $2\nu \varepsilonquiv \delta \thetamod{4}$ we obtain
\[
\frac{1}{2\pi i}\iotant_{\mathbb{R}e(s)=\sigma}\lambdaeft(\frac{1}{cHy}\right)^{-s}\omega\chi(\overline{a})\varepsilonpsilon_a^{-2\nu}\lambdaegendre{c}{a}c^{1-2s}\Lambdaambda_f\lambdaeft(1-s,-\frac{\overline{a}}{c}\right)\,ds.
\]
Opening up $\Lambda_{f}\lambdaeft(1-s,\frac{a}{c}\right) = \gamma(1-s)L_{f}\lambdaeft(1-s,\frac{a}{c}\right)$ and gathering terms with a power of $1-s$ together gives
\[
2CHy\omega\chi(\overline{a})\varepsilonpsilon_a^{-2\nu}\lambdaegendre{c}{a}\frac{1}{2\pi i}\iotant_{\mathbb{R}e(s)=\sigma}\lambdaeft(\frac{c}{y}\right)^{1-s}(2\pi)^{1-s-\frac{\nu-1}{2}}\Gammaamma\lambdaeft(1-s+\frac{\nu-1}{2}\right)\sigmaum_{n\gammaeq 1}\frac{f_ne\lambdaeft(-n\frac{\overline{a}}{c}\right)}{n^{1-s}}\,ds.
\]
Interchanging the sum and integral, and multiplying and dividing by $\lambdaeft(\frac{y}{c}\right)^{\frac{\nu-1}{2}}$, we arrive at
\betaegin{equation}\lambdaabel{equ:Hecke_int_3}
\betaegin{aligned}
2CHy\omega\chi(\overline{a})\varepsilonpsilon_a^{-2\nu}\lambdaegendre{c}{a}\lambdaeft(\frac{y}{c}\right)^{\frac{\nu-1}{2}}&\sigmaum_{n\gammaeq 1}f_nn^{\frac{\nu-1}{2}}e\lambdaeft(-n\frac{\overline{a}}{c}\right) \\
&\cdot\frac{1}{2\pi i}\iotant_{\mathbb{R}e(s)=\sigma}\lambdaeft(\frac{2\pi ny}{c}\right)^{-\lambdaeft(1-s+\frac{\nu-1}{2}\right)}\Gammaamma\lambdaeft(1-s+\frac{\nu-1}{2}\right)\,ds.
\varepsilonnd{aligned}
\varepsilonnd{equation}
The integral in Expression (\ref{equ:Hecke_int_3}) is the inverse Mellin transform of an exponential function:
\betaegin{equation}\lambdaabel{equ:Hecke_int_4}
\frac{1}{2\pi i}\iotant_{\mathbb{R}e(s)=\sigma}\lambdaeft(\frac{2\pi ny}{c}\right)^{-\lambdaeft(1-s+\frac{\nu-1}{2}\right)}\Gammaamma\lambdaeft(1-s+\frac{\nu-1}{2}\right)\,ds = e^{-\frac{2\pi ny}{c}}.
\varepsilonnd{equation}
Inserting \cref{equ:Hecke_int_4} into Expression (\ref{equ:Hecke_int_3}) and rewriting the resulting Fourier series yields
\betaegin{equation}\lambdaabel{equ:Hecke_modularity_variant_side_2}
2CHy\omega\chi(\overline{a})\varepsilonpsilon_a^{-2\nu}\lambdaegendre{c}{a}\lambdaeft(\frac{y}{c}\right)^{\frac{\nu-1}{2}}f\lambdaeft(\frac{iy-\overline{a}}{c}\right).
\varepsilonnd{equation}
Expressions (\ref{equ:Hecke_modularity_variant_side_1}) and (\ref{equ:Hecke_modularity_variant_side_2}) are equal since they are invese Mellin transforms of either side of \cref{equ:functional_equation_assumption}. But this equality holds for all $iy$ with $y > 0$ and hence for all $z$ by the identity theorem. As a result, we obtain
\[
2C \lambdaeft(\frac{1}{-cH^2iz}\right)^{\frac{\nu-1}{2}}f\lambdaeft(\frac{\frac{-1}{H^2z}+a}{c}\right)=2CH(-iz)\omega\chi(\overline{a})\varepsilonpsilon_a^{-2\nu}\lambdaegendre{c}{a}\lambdaeft(\frac{-iz}{c}\right)^{\frac{\nu-1}{2}}f\lambdaeft(\frac{z-\overline{a}}{c}\right).
\]
Moving all constants to the right-hand side gives the more compact relation
\[
f\lambdaeft(\frac{\frac{-1}{H^2z}+a}{c}\right)
=
\omega(-iHz)^{\nu}\chi(\overline{a})\varepsilonpsilon_a^{-2\nu}\lambdaegendre{c}{a}f\lambdaeft(\frac{z-\overline{a}}{c}\right),
\]
which is equivalent to the statement of the proposition in terms of slash notation.
\varepsilonnd{proof}
\betaibliographystyle{alpha}
\betaibliography{reference}
\varepsilonnd{document} |
\begin{document}
\title{Law of the Iterated Logarithm for the random walk on the infinite percolation cluster}
\small\textsc{Abstract:} We
show that random walks
on the infinite supercritical percolation clusters in $\mathbb{Z}^d$ satisfy the usual Law of the Iterated Logarithm.
The proof combines Barlow's Gaussian heat
kernel estimates and the ergodicity of the random walk
on the environment viewed from the random walker as derived by Berger and Biskup.
\normalsize
\section{Introduction}
Asymptotic properties of random walks in $\mathbb {Z}^d$ are very well-understood. Their convergence to $d$-dimensional Brownian motions and their almost sure behavior (such as the law of the iterated logarithm) have been derived decades ago. A natural question to ask is what happens to random walks on graphs that are in some sense perturbations of $\mathbb{Z}^d$. One of the first examples to consider is to look at the random graph obtained by taking the infinite cluster $\mathcal {C}=\mathcal{C}(\omega)$ of a supercritical percolation process. One ``perturbs'' the original lattice by removing some edges independently. Various large-scale properties of this infinite graph have been studied with techniques such as coarse-graining. One of the most natural questions is to look at random walk on this cluster and to study its behavior.
One can for instance consider the {continuous-time simple random walk} (CTSRW) on $\mathcal{C}$. This is the process $X^{\omega}$ that waits an
exponential time of mean 1 at each vertex $x$ and jumps along one of the open edges $e$ adjacent to $x$, with each edge
chosen with equal probability. This process has been studied in a number of papers. Grimmett, Kesten, and Zhang
(\cite{GrimmettKestenZhang},1993) proved that $X^{\omega}$ is almost surely
recurrent if $d=2$ and transient if $d \geq 3$. Barlow (\cite{Barlow},2004)
proved Gaussian estimates for $X^{\omega}$. An invariance principle in every
dimension has been proved independently by Berger and Biskup in
(\cite{BergerBiskup},2004) and by Mathieu and Piatnitski
(\cite{MathieuPiatnitski},2004). Before that, Sidoravicius and Sznitman proved this result
for $d\geq 4$ (\cite{SidoraviciusSznitman}, 2004).
All these results show that a property that holds for random walk on $\mathbb{Z}^d$ still holds for random walk
on the infinite supercritical
percolation cluster.
It is natural to ask if this is still valid if one looks for instance at almost sure properties of the random walk
(recall that almost sure properties often describe the behavior of the walk at exceptional times).
Our goal in the present note is to show that it is indeed the case for the law of the iterated logarithm (LIL).
\begin{theorem} \label{law of the iterated logarithm}
Consider $d \ge 2$ and suppose that $p>p_c$, where $p_c= p_c (d)$ is the critical bond percolation probability in $\mathbb {Z}^d$.
Then, there exists a positive and finite constant $c(p,d)$, such that for almost all realization of percolation with parameter $p$, for all $x$ in the infinite cluster $\mathcal{C}$, the continuous-time random walk $X^{w}$ started from $x$ satisfies almost surely the following LIL:
$$\limsup_{t\rightarrow \infty} \frac{\left|X_t^{\omega}\right|}{\sqrt{t\log \log t}}=c(p,d).$$
\end{theorem}
Here and throughtout the paper, $|x| = |x|_1 = \sum_{j=1}^d |x_j|$ stands for the $L^1$ norm of $x = (x_1, \ldots, x_d) \in {\mathbb Z}^d$.
Our proof can trivially be adapted to other norms (this would just change the value of the constant).
Note also that we are studying almost sure properties of the walk, so that the annealed and quenched statements are identical here (once we say that the constant $c(p,d)$ does not depend on the environment).
The main ingredients of our proof are the Gaussian bounds
derived in Barlow \cite{Barlow} and the ergodicity of Kipnis-Varadhan's \cite {KipnisVaradhan} random walk on the environment as seen from the random walker derived by Berger and Biskup in \cite{BergerBiskup}. These two results have in fact been instrumental in the (much more difficult) derivation of the invariance principle for this random walk.
The paper is organized as follows. In Section 2, we will show that one can find positive and finite $c_1 (p,d)$ and $c_2 (p,d)$ such that almost surely
$$ c_1(p,d) \le \limsup_{t\rightarrow \infty} \frac{\left|X_t^{\omega}\right|}{\sqrt{t\log \log t}} \le c_2(p,d).$$
This will be based on the Gaussian estimates derived in \cite{Barlow}.
The upper bound is an easy application of the Borel-Cantelli Lemma whereas the proof of the lower bound will use the Markov property and the fact that one can apply Gaussian bounds uniformly for $x$ in a ball of sufficiently large radius depending on $t$.
In Section 3, we derive a Zero-one Law for the limit of a discrete analog of the CTSRW. The main ingredient will be the ergodicity of a certain
shift $T$, related to Kipnis-Varadhan's {random walk on the
environment}. It has been proved by Noam Berger and Marek Biskup \cite{BergerBiskup} that this shift $T$ is
ergodic. Translating properties of the random walk in terms of this shift will allow us to
derive a Zero-one law for the limsup in the LIL for this discrete-time random walk.
Finally, in Section 4, we conclude by checking that the time-scales of the discrete-time random walk and of the continuous-time random walk are comparable.
\section{Weak LIL for the continuous random walk}
We will consider Bernoulli bond percolation of parameter $p$ on $\mathbb{Z}^d$ defined on a probability space
($\Omega$,$\mathcal{F}$,$\mathbb{P}_p$). It is well known (Grimmett \cite{grimmett}) that there exists $p_c\in(0,1)$ such
that when $p>p_c$ there is a unique infinite open cluster, that we denote by $\mathcal{C}$. For ${\mathbb P}_p$ almost every
environment $\omega \in \Omega$ and $x \in \mathcal {C}$, we define a CTSRW $X^{\omega}=(X_t^{\omega},t\geq0)$ started from $x$ under the probability measure $\mathbb{P}_{\omega}^x$. \textbf{In the whole paper, we fix $p>p_c$ and $d\geq2$}.
Because of translation-invariance of our problem (and because we are dealing with almost sure properties),
we can restrict ourselves to the case $x=0$ and work with the probability measure
$\tilde{\mathbb{P}}_p=\mathbb{P}_p(.\left|0 \in \mathcal{C}\right.)$ and $X^{\omega}_0=0$.
We will use the notation $\Phi(t)=\sqrt{t\log \log t}$ for all $t>e$.
\medbreak
We now recall Barlow's Gaussian estimates.
The first one uses the
{chemical distance} $d_{\omega}$ (or graph distance) on
$\mathcal{C}$. For every $x$ and $y$ in
$\mathcal{C}$, $d_{\omega}(x,y)$ is the length of the shortest
path between $x$ and $y$ that uses only edges in $\mathcal{C}$. For every integer $n$ and $x\in \mathcal{C}$, $\mathcal{B}_{\omega}(x,n)$ will denote the ball of radius $n$ and of center $x$ for distance $d_{\omega}$.
\begin{proposition} \label{maximum}(Barlow, \cite{Barlow}) There exist two constants $a_1=a_1(p,d)$ and $a_2=a_2(p,d)$ such that for every $\gamma>0$, there exists a finite random variable
$M_{\gamma}$ satisfying for almost every environment
$\omega$:
$$\text{for all } n\geq M_{\gamma}(\omega),\ \mathbb{P}^0_{\omega}(\max_{k\in \left[0,n\right]} d_{\omega} (0,X_k^{\omega}) > \gamma
\Phi(n)) \leq a_1 \exp\left(-a_2\frac{(\gamma\Phi(n))^2}{n}\right).$$
\end{proposition}
Recall that this statement holds for a general class of graphs (see Proposition 3.7 of Barlow \cite{Barlow}); percolation
estimates (see Theorem 2.18 and Lemma 2.19 of Barlow \cite{Barlow}) show that the percolation cluster belongs to this class.
The other result that we will use is the Gaussian bound itself:
\begin{theorem} (Barlow, \cite{Barlow}) \label{Gaussian bound} There exist finite constants $c_1$,..., $c_8$ and $\epsilon>0$ only depending on $p$ and $d$ that satisfy the following property. There exists a random variable $S_0$ with $\tilde{\mathbb{P}}_p(S_0\geq n)\leq c_7\exp(c_8 n^{\epsilon})$ and for almost every environment
$\omega$ such that $0,y\in \mathcal{C},t\geq 1$:
(1) The transition density $p_t^{\omega}(0,y)$ of $X^{\omega}$ satisfies the Gaussian bound
$$c_1t^{-d/2}e^{-c_2{\left|y\right|^2}/{t}} \leq p_t^{\omega}(0,y) \leq c_3t^{-d/2}e^{-c_4{\left|y\right|^2}/{t}}\text{ for }t\geq S_0(\omega)\vee \left|y\right|.$$
(2) $c_5n^d \leq Vol(\mathcal{B}_{\omega}(0,n))\leq c_6 n^d\ \text{for}\ n\geq
S_0(\omega).$
\end{theorem}
Note that translation invariance makes possible for each $x \in \mathbb{Z}^d$, a random variable $S_x$
satisfying the analogous conditions (with the same constants $c_1$,...,$c_8$,$\epsilon$) where one just replaces the origin $0$ by $x$ (and therefore replaces $y$ by $x+y$).
Let remark that there is no uniform Gaussian bounds for every $x,y\in \mathcal{C}$ and every $t>0$ because (almost surely) every finite graph is actually embedded somewhere in the infinite cluster. We can now derive almost sure upper and lower bounds for our limsup.
\begin{proposition} \label{upper bound} \textbf{(Upper bound)} There exists a finite $c_+=c_+(p,d)$ such that for almost
every environment $\omega$,
$$\mathbb{P}^0_{\omega}\ a.s.\ \limsup_{t\rightarrow \infty} \frac{\left|X_t^{\omega}\right|}{\Phi(t)}\leq c_+.$$
\end{proposition}
\textbf{Proof:} Fix $\omega$ an environment containing 0. The proof goes along the same lines as in the Brownian case. Let $\gamma>0$, and define the following events:
$$A_n^{\omega}=\left\{\max_{k \in \left[0,2^n\right]}d_{\omega}(0,X_k^{\omega}) > \gamma \Phi(2^n) \right\}.$$
Proposition \ref{maximum} shows that for all $n$ large enough,
$$\mathbb{P}^0_{\omega}(A_n^{\omega})\leq a_1\exp\left(-a_2\frac{(\gamma\Phi(2^n))^2}{2^n}\right)\leq 2a_1 n^{-a_2\gamma^2}.$$
Providing $\gamma$ large enough, the Borel-Cantelli Lemma claims that almost surely $A_n^{\omega}$ holds finitely often. Using the fact that $\left|.\right| \leq d_{\omega}(0,.)$, we get that for $n$ large enough, $\max_{k\in[0,2^n]}\left|X_k^{\omega}\right| < \gamma \Phi(2^{n})$. We conclude that for $n$ large enough, $\left|X_n^{\omega}\right| < 2\gamma \Phi(n)$.
\begin{flushright}
$\square$
\end{flushright}
\begin{proposition} \label{lower bound} \textbf{(Lower bound)} There exists a positive $c_-=c_-(p,d)$ such that for almost
every environment $\omega$,
$$\mathbb{P}^0_{\omega}\ \text{a.s.},\ c_-\leq \limsup_{t\rightarrow \infty} \frac{\left|X_t^{\omega}\right|}{\Phi(t)}.$$
\end{proposition}
Let first present the outline of the proof. Consider $q>1$ and $\gamma>0$ (we will choose their values later). As in the Brownian case, set
$D_{n}^{\omega}=X_{q^n}^{\omega}-X_{q^{n-1}}^{\omega}$. We have $\left|X_{q^n}^{\omega}\right|\geq
\left|D_n^{\omega}\right|-|X_{q^{n-1}}^{\omega}|$. Using the upper bound, we obtain that almost surely, for $n$ large enough:
\begin{align}\left|X_{q^n}^{\omega}\right|&\geq \left|D_n^{\omega}\right|-2c_+\Phi(q^{n-1}).\end{align}Because $\Phi(q^{n-1})\leq q^{-1/2}\Phi(q^n)$, the second term can be chosen much smaller than $\Phi(q^n)$, providing $q$ large enough. Then, in order to prove the result, it is enough to bound $D_n^{\omega}$ from below. Define the events $C_n^{\omega}=\left\{\left|D_n^{\omega}\right|>\gamma \Phi(q^n)\right\}.$ If these events hold for infinity many $n$ almost surely, then we are done. We define the $\sigma$-fields $\mathcal{F}_{n}^{\omega}=\sigma(X^{\omega}_k,k\leq q^{n})$. We will apply
the Borel-Cantelli Lemma generalized to dependent events (see Durrett \cite{Durrett}, chapter 4, paragraph 4.3). We therefore need to prove that
$$\mathbb{P}_{\omega}^0\text{ a.s. }\sum_{n\geq 1} \mathbb{E}^0_{\omega}[C_n^{\omega}
\left|\mathcal{F}^{\omega}_{n-1}\right.]=\infty.$$
Using the Markov property and Gaussian bounds, we will be able to
find a lower bound for $\mathbb{E}^0_{\omega}[C_n^{\omega}
\left|\mathcal{F}^{\omega}_{n-1}\right.].$
In order to apply these bounds, we need to control not only $S_0$ (from Theorem \ref{Gaussian bound}) but also $S_x$ for $x=X^{\omega}_{q^{n-1}}$. We first prove that it is indeed possible, using Gaussian estimates and the upper bound.
\begin{lemma} \label{environment}
Let $\gamma>0$, for almost every environment $\omega$ we have almost surely $S_{X^{\omega}_{n}}\leq \gamma\Phi(n)$ for $n$ large enough.
\end{lemma}
\textbf{Proof:} Let $\gamma>1$. Define for each integer $n$ the set
$$B_n=\left\{\exists y\in B(0, 2c_+\Phi(n))\text{ s.t. } S_y\geq \gamma\Phi(n)\right\}.$$
where $S_y$ is the random variable of Theorem \ref{Gaussian bound}. The Theorem yields
$$\tilde{\mathbb{P}}_p(B_n)\leq Vol\left(B(0,2c_+\Phi(n))\right) d_1 \exp\left(-d_2(\gamma\Phi(n))^{\epsilon}\right).$$
The right-hand side of the inequality is summable, so that (by Borel-Cantelli) $B_n$ holds for a finite number of $n$ for almost every environment. But almost surely, $X_{n}^{\omega}$ is less than $2c_+\Phi(n)$ for $n$ large enough. Combining these two facts, we obtain the claim.
\begin{flushright}
$\ $
\end{flushright}
$ $\\
\textbf{Proof of Proposition \ref{lower bound}:} Let $q,\gamma>0$ and $\kappa>0$ such that $c_5\kappa^d>c_6+1$. Note that $\kappa$ does not depend on $\gamma$ and $q$. Set $t_n=q^n-q^{n-1}$. By the Markov property, we get for $n\geq1$,$$\mathbb{E}^0_{\omega}(C_n^{\omega} \left|\mathcal{F}^{\omega}_{n-1}\right.)=\mathbb{P}^0_{\omega_n}[\gamma \Phi(q^n)<X_{t_n}^{\omega_n}]\geq \mathbb{P}^0_{\omega_n}[\gamma \Phi(q^n)<X_{t_n}^{\omega_n}<\kappa\gamma \Phi(q^n)]=G_n(\omega_n)$$
where $\omega_n=\tau_{X_{q^{n-1}}^{\omega}}(\omega)$ ($\tau_x$ is the shift defined by $(\tau_x\omega)_y=\omega_{x+y}$) and:$$G_n(\omega)=\mathbb{P}^0_{\omega}\left[\gamma\Phi(q^n)<X^{\omega}_{t_n}<\kappa\gamma\Phi(q^n)\right].$$
The function $G_n$ is well-defined and measurable. If $\mathcal{A}_n(\omega)$ is the annulus $$\mathcal{A}_n(\omega)=\left\{z\in \mathcal{C},\text{ s.t. }\gamma\Phi(q^n)< \left|z\right| <\kappa \gamma \Phi(q^n) \right\},$$ we find by definition of the transition density $G_n(\omega)=\sum_{z\in \mathcal{A}_n(\omega)}p_{t_n}^{\omega}(0,z)$. We deduce:
\begin{align}\mathbb{E}^0_{\omega}(C_n^{\omega} \left|\mathcal{F}^{\omega}_{n-1}\right.)=\sum_{z\in \mathcal{A}_n(\omega_n)}p_{t_n}^{\omega_n}(0,z).\end{align}
Using Lemma \ref{environment}, we know that almost surely there exists $N$ large enough such that for every $n$ larger than $N$, $S_0(\omega_n)=S_{X^{\omega}_{q^{n-1}}}(\omega)\leq \gamma\Phi(q^{n-1})\leq t_n$. For $n\geq N$, one can use Gaussian estimates of Theorem \ref{Gaussian bound} for every $z\in \mathcal{A}_n(\omega_n)$, we get for such a $z$:
\begin{align}p_{t_n}^{\omega_n}(0,z)&\geq c_1t_n^{-d/2}\exp\left(-\frac{c_2(\kappa\gamma\Phi(q^n))^2}{t_n}\right)\geq c_1t_n^{-d/2}n^{-c_2(\kappa\gamma)^2}\end{align}
Using again the same Lemma, Theorem \ref{Gaussian bound} yields that the volume growth property holds for $S_n(\omega_n)$. Recalling the definition of $\kappa$, we find:
\begin{align}Vol(\mathcal{A}_n(\omega_n))&\geq (\gamma \Phi(q^n))^{d}\geq \gamma^d t_n^{d/2}\end{align}
Combining (2.3) and (2.4) in (2.2), we obtain that there exists a constant $c>0$ such that almost surely for $n$ large enough:
$$G_n(\omega_n)\geq cn^{-c_2(\kappa\gamma)^2}$$
Providing $\gamma$ small enough, we can use the generalized Borel-Cantelli Lemma (e.g. \cite{Durrett}). We get that almost surely, there exist infinitely many integers $n$ such that $\left|D_n^{\omega}\right|>\gamma\Phi(q^n)$. If $q>0$ is taken large enough ($\kappa,\gamma$ and $c_2$ are not depending on $q$), we can use the inequality (2.1) to prove that almost surely:
$$ \left|X_{q^n}^{\omega}\right|\geq \gamma \Phi(q^n)-2c_+q^{-1/2}\Phi(q^n)>\frac{\gamma}{2}\Phi(q^n)$$
for infinitely many $n$, which is the claim.
\begin{flushright}
$\ $
\end{flushright}
$ $\\
\textbf{Remark 1:} In order to bound the sum in (2.2) from below, Gaussian bounds were not sufficient. Without the volume growth property, the annulus could contain only few elements. Even if the exponential term is not too small (typically of order $n^{-s}$ for $s$ small), the term $t_n^{-d/2}$ (which corresponds to $t^{-d/2}$ for the Brownian motion) could be very small and make the series become summable. The cardinality of the annulus was critical in order to balance out this term.
$ $\\
\textbf{Remark 2:} our goal was to obtain a result in ${L}^1$ norm. Unfortunately, the
natural distance on graphs is the chemical distance $d_{\omega}(.,.)$. In the bound from below, this does not create any trouble
because of the trivial inequality $\left|x\right|\leq d_{\omega}(0,x)$. But it could happen that the chemical distance is much bigger
than the $L^1$ norm. The proof of Theorem 2.18 in Barlow \cite{Barlow} precisely deals with this issue
thanks to a result by Antal and Pisztora \cite {AntalPisztora} that shows that the chemical
distance on $\mathcal{C}$ and the ${L}^1$ norm are not that different on a supercritical percolation cluster.
\section{Zero-one Law for the blind random walk}
In the present section, we will consider discrete time random walks. We first introduce the two random walks we
will use. Then we recall an ergodicity result proved in \cite{BergerBiskup} and we derive the Zero-one Law.
Our proofs are rather direct applications of the ergodicity statement of \cite{BergerBiskup}.
For each $x\in \mathbb{Z}^d$, let $\tau_x$ be the shift from $\Omega$ in $\Omega$ defined by: $(\tau_x\omega)_y=\omega_{y+x}$. For each $\omega$, let $Y_n^{\omega}$ be the simple random walk (called \textbf{blind random walk}) on $\mathcal{C}$ started at the origin. At each unit of time, the walk picks a neighbor at random and if the corresponding edge is occupied, the walk moves to this neighbor. Otherwise, it does not move. This random walk may seem less natural than the random walk that chooses randomly one of the accessible neighbors and jumps to it, but this blind random walk preserves the uniform measure on $\mathcal {C}$, so that the stationary measure on the environment as seen from the walker turns out to be simpler.
It is well known (cf Kipnis and Varadhan \cite{KipnisVaradhan}) that the Markov chain $(Y_n^{\omega})_{n\geq 0}$ induces a Markov chain on $\Omega$ (the so-called \textbf{Markov chain on the environment}), that can be interpreted as the trajectory of "environment viewed from the perspective of the walk". It is defined as
$$\omega_n ( \cdot) = \omega ( \cdot + Y_n^{\omega})=\tau_{Y_n^{\omega}}\omega(.).$$
One can describe the chain $(\omega_n)$ as follows. At each step $n$, one chooses one of the $2d$ neighbors of the origin at
random and calls it $e$. If the corresponding edge is closed for $\omega_n$, then $\omega_{n+1} = \omega_n$, otherwise $\omega_{n+1} (\cdot) = \tau_e \circ \omega_n$, where $\tau_e \circ \omega (\cdot) = \omega ( \cdot - e)$.
It is straightforward to check that the probability measure $\tilde{\mathbb{P}}_p$ is a reversible and therefore stationary measure for the Markov chain $(\omega_n)$.
This allows us to extend our probability space to $\Xi= \Omega^{\mathbb{Z}}$ (endowed with the product $\sigma$-algebra $\mathcal {H}= \mathcal{F}^{\otimes\mathbb{Z}}$) and to define $\omega_n$ also for negative $n$'s in such a way that
that the family $(\omega_n, n \in \mathbb {Z})$ is stationary. Let $\mu$ denote the probability measure associated to the
Markov chain.
Note that under the measure $\mu$, and for all $n \in \mathbb {Z}$, the law of $(\omega_n, \omega_{n+1}, \ldots)$ is identical
to that of $(\omega_0, \omega_1, \ldots) $. On the other hand, the marginal law of $\omega_0$ (still under $\mu$) is $\tilde{\mathbb{P}}_p$. One then defines $T:\Xi \rightarrow \Xi,\bar{\omega}\mapsto T\bar{\omega}$ to be $(T\bar{\omega})_n=\bar{\omega}_{n+1}$.
\begin{theorem}(Berger, Biskup, \cite {BergerBiskup})
\label{ergodicity}
$T$ is ergodic with respect to $\mu$. In other words, for all $A\in \mathcal{H}$, if $T^{-1}(A) =A$, then $\mu(A)$ is equal to 0 or 1.
\end{theorem}
We refer to the paper of Berger and Biskup \cite{BergerBiskup} for proofs. Define for every $a>0$ and $\omega$ the event: $$A_{\omega}(a)=\left\{\limsup_{n\rightarrow \infty}\frac{\left|Y_n^{\omega}\right|}{\Phi(n)}>a\right\}.$$
Let now state and prove a consequence of this ergodicity for our law of the iterated logarithm:
\begin{corollary}
\label{loi du tout ou rien} \textbf{(Zero-one Law)} Let $a\geq 0$. The probability that
$$B_a=\left\{\mathbb{P}_{\omega}^x\text{ a.s. }A_{\omega}(a)\text{ holds for all }x\in \mathcal{C}\right\}$$ is equal to $0$ or to $1$.
\end{corollary}
\textbf{Proof:}
Our goal is to use the ergodicity of the environment and to note that the considered event corresponds to a $T$-invariant set in $\Xi$.
Let $a \geq 0$ and define the function $F$ on $\Omega$ by:
$$F(\omega)=\mathbb{P}_{\omega}^0(A_{\omega}(a))$$
This function is well-defined and measurable. Let fix the environment $\omega$ for a little while and
denote $\omega_n=\tau_{Y_n^{\omega}}\omega$. We claim that $(F(\omega_n))_n$ is a martingale with respect to the
filtration $\mathcal{F}_n$ associated to the process $Y_n^{\omega}$. Indeed, the Markov property yields
$$F(\omega_n)=\mathbb{P}_{\omega}^0(A_{\omega}(a)\left|\mathcal{F}_n\right.).$$
This martingale is bounded and therefore converges almost surely as $n \to \infty$.
Moreover, it converges to the indicator function
of $A_{\omega}(a)$ because this event is clearly in
$\mathcal{F}_{\infty}=\sigma (\cup_{n\geq 0}\mathcal{F}_n)$.
By taking the Cesaro mean and then integrating it with respect to $\omega$ (and using the fact that the probabilities are bounded by $1$), we get that
$$
\tilde{\mathbb{E}}_0\left[
\mathbb{P}_{\omega}^0 \left( \left| \lim_{N\rightarrow \infty} \frac{1}{N} \sum_{n=0}^{N-1}F(\omega_n) - 1_{A_{\omega}(a)} \right| \right) \right] = 0 $$
On the other hand, $F$ can be viewed as a measurable function on $\Xi$. The ergodicity of $\mu$ implies that for $\mu$ almost every
$\bar{\omega}$:
$$\lim_{N\rightarrow \infty} \frac{1}{N} \sum_{n=1}^{N}F(\bar{\omega}_n)= \int Fd\mu$$
Let recall that $\bar{\omega}$ has same law under $\mu$ as $(\omega_n)_n$ under
$\tilde{\mathbb{E}}_0[\mathbb{P}_{\omega}^0(.)]$. We deduce that the limit
$1_{A_{\omega}(a)}$ is (up to a set of zero measure) constant. Since it is an indicator function, this means that
either the corresponding event is almost surely true, or almost surely wrong.
\section {The Law of the Iterated Logarithm}
We can now derive the Law of the Iterated Logarithm. Let first note that the previous corollary immediately implies that for a fixed $p > p_c$, there exists a constant $c'(p,d) \in [0, \infty]$ such that for almost
every environment $\omega$, the blind random walk satisfies
$$\limsup_{n\rightarrow \infty} \frac{\left|Y_n^{\omega}\right|_1}{\Phi(n)}=c'(p,d)$$
almost surely (just choose $c'(p,d)$ to be the supremum of the set of $a$'s such that the event $B_a$ is almost surely satisfied).
Our next goal is to show that the time scales for the two random walks are comparable. Let $\omega \in \Omega_0$, define the real random variable $(T_n^{\omega})_n$ by $T_0^{\omega}=0$ and
$$T_{n+1}^{\omega}=\inf\left\{t>T_n^{\omega}, X_t^{\omega} \neq X_{T_n^{\omega}}^{\omega}\right\}.$$
Clearly, the Law of Large Numbers implies that for all $\omega \in \Omega_{0}$,
$T_{n+1}^{\omega} \sim n $ almost surely.
Let $\omega \in \Omega_0$, define in the same way the random variable $(U_n^{\omega})$ by $U_0^{\omega}=0$ and
$$U_{n+1}^{\omega}=\inf\left\{p>U_n^{\omega}, Y_p^{\omega} \neq Y_{U_n^{\omega}}^{\omega}\right\}.$$
The $(U_{n+1}^{\omega}-U_n^{\omega})_n$ are not i.i.d. anymore. Conditionally on the environment and on the past up to the
$n$-th jump of $Y^{\omega}$, the law of $U_{n+1}^{\omega}- U_n^{\omega}$ is geometric and depends on the number $I(n)$ of incoming
open edges at $Y^{\omega}{U_n^{\omega}}$ (its mean is some function $f(I(n))$).
Ergodicity ensures that almost surely and for each $k \le 2d$,
$$ \frac {1}{n} \sum_{j=1}^n 1_{ I(j) = k } \to i(k)$$
where $i(k)$ denotes the $\mu$-probability that $\omega_0$ has $k$ incoming open edges at the origin.
Using the Law of Large Numbers for sums of independent geometric random variables of mean $f(k)$ for each $k$, we get
readily that for almost all $\omega\in \Omega_0$,
$$\mathbb{P}_{\omega}^0\text{ a.s. }U_{n+1}^\omega / n \to \sum_{k=1}^{2d} i(k) f(k)=\alpha_p^{-1}.$$
This last quantity is clearly positive and finite.
We can now conclude the proof of the Law of the Iterated Logarithm for the continuous time random walk.
\medbreak
\textbf{Proof of Theorem \ref{law of the iterated logarithm}:} Consider the natural coupling for which $X_t^{\omega}$ and
$Y_n^{\omega}$ have the same trajectories. More precisely, if we consider the \textbf{myopic random walk} $(Z_n^{\omega})_n$
that jumps at each time, choosing uniformly a neighbor, defined on a probability space $(\Omega_{\omega},\mathcal{F}_{\omega},\mathbb{P}^0_{\omega})$. Assume there exists an independent family $(T_i)_{i\in\mathbb{Z}_+}$ of iid exponential mean time 1 random variables and $(S^{\omega}_x)_{x\in \mathbb{Z}^d}$ an independent family of independent random variables such that $S_x^{\omega}$ is a geometrical of parameter ${n_x^{\omega}}/{(2d)}$ where $n_x^{\omega}$ is the number of adjacent open edges of $x$ for the configuration $\omega$.
Define $T_p^{\omega}=\sum_{k=0}^{p-1}T_i$ and $n^{\omega}(t)=\sup\left\{p, T_p^{\omega} \leq t\right\}$. Then we can write the continuous time random walk as follows
$$X_t^{\omega}=Z_{n^{\omega}(t)}^{\omega}\ \ \forall t \geq 0.$$
Now, consider $U_p^{\omega}=\sum_{k=0}^{p-1}S_{Z_k^{\omega}}^{\omega}$ and $m^{\omega}(n)=\sup\left\{p, U_p^{\omega} \leq p\right\}$. Then we can write the blind random walk as follow:
$$Y_n^{\omega}=Z_{m^{\omega}(n)}^{\omega}\ \ \forall n \geq 0.$$
Because of the estimates of the time-scales of our two walks, we get that
$$
\limsup_{t \rightarrow \infty}\frac{\left|X_t^{\omega}\right|}{\Phi(t)} = \limsup_{t \rightarrow \infty}\frac{\left|Z_{n^{\omega}(t)}^{\omega}\right|}{\Phi(n^{\omega}(t))}=\limsup_{n \rightarrow \infty}\frac{\left|Z_{n}^{\omega}\right|}{\Phi(n)}$$
and that
$$
\limsup_{n \rightarrow \infty}\frac{\left|Y_n^{\omega}\right|}{\Phi(n)} =\limsup_{n \rightarrow \infty}\frac{\left|Z_{
m^{\omega}(n)}^{\omega}\right|}{\Phi(\alpha_p m^{\omega}(n))}=\frac{1}{\sqrt{\alpha_p}}\limsup_{n \rightarrow \infty}\frac{\left|Z_{n}^{\omega}\right|}{\Phi(n)}.$$
From these two equalities, we deduce that
$$\limsup_{t \rightarrow \infty}\frac{\left|X_t^{\omega}\right|}{\Phi(t)}=\frac{1}{\sqrt{\alpha_p}}\limsup_{n \rightarrow \infty}\frac{\left|Y_n^{\omega}\right|}{\Phi(n)}\text{ a.s.}.$$
The theorem follows readily.
\begin{flushright}
$\square$
\end{flushright}
Note that this also show that the Law of the Iterated Logarithm holds for the blind and the myopic random walks.
\medbreak
\textbf{Acknowledgements:} This paper was written during my stay at the University of British Columbia, I would like to thank M.T. Barlow,
who first taught me about this question, for his availability and the advice he gave me during my whole stay. I would also
like to thank W. Werner for his careful reading of this paper and his numerous suggestions.
\begin{flushright}
\footnotesize \textsc{Department of Mathematics}
\textsc{University of British Columbia}
\textsc{Vancouver, British Columbia, Canada}
\medbreak
\textsc{DMA, Ecole Normale Sup\'erieure}
\textsc{45 rue d'Ulm, 75230 Paris cedex 05, France}
\textsc{E-mail:} [email protected]\end{flushright}
\end{document} |
\begin{document}
\title{BayesOpt: A Bayesian Optimization Library for Nonlinear Optimization, Experimental Design and Bandits}
\author{Ruben Martinez-Cantin \\
[email protected]}
\maketitle
\begin{abstract}
BayesOpt is a library with state-of-the-art Bayesian optimization methods to solve nonlinear optimization, stochastic bandits or sequential experimental design problems. Bayesian optimization is sample efficient by building a posterior distribution to capture the evidence and prior knowledge for the target function. Built in standard C++, the library is extremely efficient while being portable and flexible. It includes a common interface for C, C++, Python, Matlab and Octave.
\mathbf{e}nd{abstract}
\mathbf{s}ection{Introduction}
Bayesian optimization \citep{Mockus1989,Brochu:2010c} is a special case of nonlinear optimization where the algorithm \mathbf{e}mph{decides} which point to explore next based on the analysis of a distribution over functions $P(f)$, for example a Gaussian process or other \mathbf{e}mph{surrogate model}. The decision is taken based on a certain criterion $\mathcal{C}(\cdot)$ called \mathbf{e}mph{acquisition function}. Bayesian optimization has the advantage of having a \mathbf{e}mph{memory} of all the observations, encoded in the posterior of the surrogate model $P(f|\mathcal{D})$ (see Figure ~\ref{fig:algobasic}). Usually, this posterior distribution is sequentially updated using a nonparametric model. In this setup, each observation improves the knowledge of the function in all the input space, thanks to the spatial correlation (kernel) of the model. Consequently, it requires a lower number of iterations compared to other nonlinear optimization algorithms. However, updating the posterior distribution and maximizing the acquisition function increases the cost per sample. Thus, Bayesian optimization is normally used to optimize \mathbf{e}mph{expensive} target functions $f(\cdot)$, which can be multimodal or without closed-form. The quality of the prior and posterior information about the surrogate model is of paramount importance for Bayesian optimization, because it can reduce considerably the number of evaluations to achieve the same performance.
\begin{figure}
\centering
\begin{Algoritmo}[frametitle={Input: target $f(\cdot)$, prior $P(f)$, criterion $\mathcal{C}(\cdot)$, budget $N$ \mathbf{q}uad \mathbf{q}uad Output: $\mathbf{x}^*$}]
{\mathbf{s}criptsize
\hrulefill\mathbf{p}ar
Build a dataset $\mathcal{D}$ of points $\mathbf{X} = \mathbf{x}_1 \ldots \mathbf{x}_l$ and its response $\mathbf{y} = y_1 \ldots y_l$ using an initial design.\\
\textbf{While} $i < N$
\begin{itemize}[noitemsep]
\item Update the distribution with all data available $P(f|\mathcal{D}) \mathbf{p}ropto P(\mathcal{D}|f) P(f)$
\item Select the point $\mathbf{x}_i$ which maximizes the criterion: $\mathbf{x}_i = \arg \max \mathcal{C}(\mathbf{x}|P(f|\mathcal{D}))$. Observe $y_i = f(\mathbf{x}_i)$.
\item Augment the data with the new point and response: $\mathcal{D} \leftarrow \mathcal{D} \cup \{\mathbf{x}_i,y_i\} \mathbf{q}uad \mathbf{q}uad i \leftarrow i+1$
\mathbf{e}nd{itemize}}
\mathbf{e}nd{Algoritmo}
\caption{General algorithm for Bayesian optimization}
\label{fig:algobasic}
\mathbf{e}nd{figure}
\mathbf{s}ection{BayesOpt library}
BayesOpt uses a surrogate model of the form: $f(\mathbf{x}) = \mathbf{p}hi(\mathbf{x})^T \mathbf{w} + \mathbf{e}psilon(\mathbf{x})$, where we have $\mathbf{e}psilon(\mathbf{x}) \mathbf{s}im \mathcal{NP} \left( 0, \mathbf{s}igma^2_s (\mathbf{K}(\theta) + \mathbf{s}igma^2_n \mathbf{I}) \right)$.
Here, $\mathcal{NP}()$ means a nonparametric process, for example, a Gaussian, Student-t or Mixture of Gaussians process. This model can be considered as a linear regression model $\mathbf{p}hi(\mathbf{x})^T\mathbf{w}$ with heteroscedastic perturbation $\mathbf{e}psilon(\mathbf{x})$, as a nonparametric process with nonzero mean function or as a semiparametric model. The library allows to define hyperpriors on $\mathbf{w}$, $\mathbf{s}igma_s^2$ and $\theta$. The marginal posterior $P(f|\mathcal{D})$ can be computed in closed form, except for the kernel parameters $\theta$. Thus, BayesOpt allows to use different posteriors based on empirical Bayes \citep{Santner03} or MCMC \citep{Snoek2012}.
\mathbf{s}ubsection{Implementation}
\mathbf{e}mph{Efficiency} has been one of the main objectives during development. For empirical Bayes (ML or MAP of $\theta$), we found that a combination of global and local derivative free methods such as DIRECT \citep{Jones:1993} and BOBYQA \citep{Powell2009} marginally outperforms in CPU time to gradient based method for optimizing $\theta$ by avoiding the overhead of computing the marginal likelihood derivative.
Also, updating $\theta$ every iteration might be unnecessary or even counterproductive \citep{Bull2011}.
One of the most critical components, in terms of computational cost, is the computation of the inverse of the kernel matrix $\mathbf{K}(\cdot)^{-1}$. We compared different numerical solutions and we found that the \mathbf{e}mph{Cholesky decomposition} method outperforms any other method in terms of performance and numerical stability. Furthermore, we can exploit the structure of the Bayesian optimization algorithm in two ways. First, \mathbf{e}mph{points arrive sequentially}. Thus, we can do incremental computations of matrices and vectors, except when the kernel parameters $\theta$ are updated. For example, at each iteration, we know that only $n$ new elements will appear in the correlation matrix, i.e.: the correlation of the new point with each of the existing points. The rest of the matrix remains invariant. Thus, instead of computing the whole \mathbf{e}mph{Cholesky} decomposition, being $\mathcal{O}(n^3)$ we just add the new row of elements to the triangular matrix, which is $\mathcal{O}(n^2)$. Second, finding the optimal decision at each iteration $\mathbf{x}_i$ requires \mathbf{e}mph{multiple queries of the acquisition function from the same posterior} $\mathcal{C}(\mathbf{x}|P(f|\mathcal{D}))$ (see Figure \ref{fig:algobasic}). Also, many terms of the criterion function are independent of the query point $\mathbf{x}$ and can be precomputed. This behavior is not standard in nonparametric models, and to our knowledge, this is the first software for Gaussian processes/Bayesian optimization that exploits the idea of precomputing all terms independent of $\mathbf{x}$.
A comparison of CPU time (single thread) vs accuracy with respect to other open source libraries is represented in Table \ref{fig:time} with respect to two different configurations of BayesOpt. SMAC \citep{HutHooLey11-smac}, HyperOpt \citep{Bergstra2011} and Spearmint \citep{Snoek2012} used the HPOlib \citep{EggFeuBerSnoHooHutLey13} timing system (based on \texttt{runsolver}). DiceOptim \citep{Roustant2012} used R timing system (\texttt{proc.time}). For BayesOpt, standard \texttt{ctime} was used.
\begin{table}\centering
{\mathbf{s}criptsize
\renewcommand{3pt}{3pt}
\begin{tabular}{|l|c|c|r|c|c|r|}
\hline
& \multicolumn{3}{c|}{Branin (2D)} & \multicolumn{3}{c|}{Camelback (2D)} \\ \hline
& Gap 50 samp. & Gap 200 samp. & Time 200 s. & Gap 50 samp. & Gap 100 samp. & Time 100 s. \\ \hline
SMAC & 0.19444 (0.195) & 0.06780 (0.059) & 147.3 (1.3) & 0.08534 (0.103) & 0.03772 (0.034) & 70.5 (0.9) \\ \hline
HyperOpt & 0.69499 (0.414) & 0.07507 (0.059) & 23.5 (0.2) & 0.10941 (0.050) & 0.03383 (0.025) & 8.0 (0.09) \\ \hline
Spearmint & 1.48953 (1.468) & \textbf{0.00000} (0.000) & 7530.1 (30.4) & 0.00005 (0.000) & 0.00004 (0.000) & 1674.0 (8.0) \\ \hline
DiceOptim & \textbf{0.00004} (0.000) & 0.00003 (0.000) & 624.3 (35.3) & 0.80861 (0.417) & 0.35811 (0.350) & 215.2 (10.5) \\ \hline
BayesOpt1 & 1.16844 (1.745) & \textbf{0.00000} (0.000) & \textbf{8.6} (0.07) & 0.00852 (0.021) & \textbf{0.00000} (0.000) & \textbf{2.2} (0.2) \\ \hline
BayesOpt2 & 0.04742 (0.116) & \textbf{0.00000} (0.000) & 1802.7 (78.3) & \textbf{0.00000} (0.000) & \textbf{0.00000} (0.000) & 147.8 (1.3) \\ \hline \hline
& \multicolumn{3}{c|}{Hartmann (6D)} & \multicolumn{3}{c|}{Configuration - $\theta$ learning} \\ \hline
& Gap 50 samp. & Gap 200 samp. & Time 200 s. & \multicolumn{3}{c|}{} \\ \hline
SMAC & 1.23130 (0.645) & 0.31628 (0.249) & 155.9 (1.3) & \multicolumn{3}{l|}{Default HPOlib} \\ \hline
HyperOpt & 1.21979 (0.496) & 0.39065 (0.208) & \textbf{33.3} (0.3)& \multicolumn{3}{l|}{Default HPOlib} \\ \hline
Spearmint & 2.13990 (0.659) & 0.59980 (0.866) & 8244.5 (105.8) & \multicolumn{3}{l|}{Def. HPOlib, MCMC (10 particles, 100 burn-in)} \\ \hline
DiceOptim & \textbf{0.06008} (0.063) & 0.06004 (0.063) & 1266.6 (316.4) & \multicolumn{3}{l|}{ML, Genoud 50 pop., 20 gen., 5 wait, 5 burn-in} \\ \hline
BayesOpt1 & 0.06476 (0.047) & \textbf{0.02385} (0.048) & 39.0 (0.04) & \multicolumn{3}{l|}{MAP, DIRECT+BOBYQA every 20 iterations.} \\ \hline
BayesOpt2 & 1.05608 (0.831) & 0.04769 (0.058) & 4093.3 (55.7) & \multicolumn{3}{l|}{MCMC (10 particles, 100 burn-in)} \\ \hline
\mathbf{e}nd{tabular}}
\caption{Mean (and standard deviation) optimization gap and time (in seconds) for 10 runs for different number of samples (including initial design) to illustrate the convergence of each method. DiceOptim and BayesOpt1 used 5, 5 and 10 points for the initial design, while Spearmint and BayesOpt2 used only 2 points.}
\label{fig:time}
\mathbf{e}nd{table}
Another main objective has been \mathbf{e}mph{flexibility}. The user can easily select among different algorithms, hyperpriors, kernels or mean functions. Currently, the library supports continuous, discrete and categorical optimization. We also provide a method for optimization in high-dimensional spaces \citep{ZiyuWang2013}. The initial set of points (\mathbf{e}mph{initial design}, see Figure \ref{fig:algobasic}) can be selected using well known methods such as latin hypercube sampling or Sobol sequences. BayesOpt relies on a factory-like design for many of the components of the optimization process. This way, the components can be selected and combined at runtime while maintaining a simple structure. This has two advantages. First, it is very easy to create new components. For example, a new kernel can be defined by inheriting the abstract kernel or one of the existing kernels. Then, the new kernel is automatically integrated in the library. Second, inspired by the \mathbf{e}mph{GPML} toolbox by \cite{Rasmussen2010}, we can easily combine different components, like a linear combination of kernels or multiple criteria. This can be used to optimize a function considering an additional cost for each sample, for example a moving sensor \citep{Marchant2012}. BayesOpt also implements \mathbf{e}mph{metacriteria algorithms}, like the bandit algorithm GP-Hedge by \cite{Hoffman2011} that can be used to automatically select the most suitable criteria during the optimization. Examples of these combinations can be found in Section \ref{sec:params}.
The third objective is \mathbf{e}mph{correctness}. For example, the library is thread and exception safe, allowing parallelized calls. Numerically delicate parts, such as the GP-Hedge algorithm, had been implemented with variation of the actual algorithm to avoid over- or underflow issues. The library internally uses NLOPT by \cite{Johnson} for the inner optimization loops (optimize criteria, learn kernel parameters, etc.).
The library and the online documentation can be found at:
\centerline{\url{https://bitbucket.org/rmcantin/bayesopt/}}
\mathbf{s}ubsection{Compatibility}
BayesOpt has been designed to be highly compatible in many platforms and setups. It has been tested and compiled in different operating systems (Linux, Mac OS, Windows), with different compilers (Visual Studio, GCC, Clang, MinGW). The core of the library is written in C++, however, it provides interfaces for C, Python and Matlab/Octave.
\mathbf{s}ubsection{Using the library}
There is a common API implemented for several languages and programming paradigms. Before running the optimization we need to follow two simple steps:
\mathbf{s}ubsubsection{Target function definition}
Defining the function that we want to optimize can be achieved in two ways. We can directly send the function (or a pointer) to the optimizer based on a \mathbf{e}mph{function template}. For example, in C/C++:
\begin{lstlisting}[language=C]
double my_function (unsigned int n_query, const double *query,
double *gradient, void *func_data);
\mathbf{e}nd{lstlisting}
The gradient has been included for future compatibility. Python, Matlab and Octave interfaces define a similar template function.
For a more object oriented approach, we can inherit the abstract module and define the virtual methods. Using this approach, we can also include nonlinear constraints in the \mathbf{e}mph{checkReachability} method. This is available for C++ and Python. For example, in C++:
\begin{lstlisting}[language=C++]
class MyOptimization: public bayesopt::ContinuousModel {
public:
MyOptimization(size_t dim, bopt_params param): ContinousModel(dim,param) {}
double evaluateSample(const boost::numeric::ublas::vector<double> &query)
{ // My function here };
bool checkReachability(const boost::numeric::ublas::vector<double> &query)
{ // My constraints here };
};
\mathbf{e}nd{lstlisting}
\mathbf{s}ubsubsection{BayesOpt parameters}
\label{sec:params}
The parameters are defined in the \texttt{bopt\_params} struct --or a dictionary in Python--. The details of each parameter can be found in the included documentation. The user can define expressions to combine different functions (kernels, criteria, etc.). All the parameters have a default value, so it is not necessary to define all of them. For example, in Matlab:
\begin{lstlisting}[language=Matlab]
par.surr_name = 'sStudentTProcessNIG';
par.crit_name = 'cHedge(cEI,cLCB,cThompsonSampling)';
par.kernel_name = 'kSum(kMaternISO3,kRQISO)';
par.kernel_hp_mean = [1, 1]; par.kernel_hp_std = [5, 5];
par.l_type = 'L_MCMC';
par.sc_type = 'SC_MAP';
par.n_iterations = 200;
par.epsilon = 0.1;
\mathbf{e}nd{lstlisting}
\mathbf{e}nd{document} |
\begin{document}
\title{
Lebesgue Constants Arising in \
a Class of Collocation Methods
hanks{
May 6, 2015, revised September 12, 2015
The authors gratefully acknowledge support by
the Office of Naval Research under grants N00014-11-1-0068 and
N00014-15-1-2048, and by the National Science Foundation under
grants DMS 1522629 and CBET-1404767.
}
\begin{abstract}
Estimates are obtained for the
Lebesgue constants associated with
the Gauss quadrature points on $(-1, +1)$ augmented by the point $-1$
and with the Radau quadrature points on either $(-1, +1]$ or $[-1, +1)$.
It is shown that the Lebesgue constants are $O(\sqrt{N})$,
where $N$ is the number of quadrature points.
These point sets arise in the estimation of the
residual associated with recently developed orthogonal collocation
schemes for optimal control problems.
For problems with smooth solutions,
the estimates for the Lebesgue constants can imply an exponential decay
of the residual in the collocated problem as a function of the number of
quadrature points.
\end{abstract}
\begin{keywords}
Lebesgue constants, Gauss quadrature, Radau quadrature,
collocation methods
\end{keywords}
\pagestyle{myheadings} \thispagestyle{plain}
\markboth{W. W. HAGER, H. HOU, AND A. V. RAO}
{LEBESGUE CONSTANTS}
\section{Introduction}\label{introduction}
Recently, in \cite{ DarbyHagerRao11,DarbyHagerRao10, FrancolinHagerRao13,
GargHagerRao11b, GargHagerRao11a, GargHagerRao10a, PattersonHagerRao14},
a class of methods was developed for solving optimal control problems
using collocation at either Gauss or Radau quadrature points.
In \cite{HagerHouRao15b} and \cite{HagerHouRao15c} an
exponential convergence rate is established for these schemes.
The analysis is based on a bound for the inverse of a linearized operator
associated with the discretized problem, and an
estimate for the residual one gets when substituting the solution to the
continuous problem into the discretized problem.
This paper focuses on the estimation of the residual.
We show that the residual in the sup-norm is bounded by the sup-norm distance
between the derivative of the solution to the continuous problem and
the derivative of the interpolant of the solution.
By Markov's inequality $\cite{Markov1916}$,
this distance can be bounded in terms of the Lebesgue
constant for the point set and the error in best polynomial approximation.
A classic result of Jackson \cite{jackson} gives an estimate for
the error in best approximation.
The Lebesgue constant that we need to analyze corresponds to the
roots of a Jacobi polynomial on $(-1, +1)$
augmented by either $\tau = +1$ or $\tau = -1$.
The effects of the added endpoints were analyzed by
V\'{e}rtesi in \cite{Vertesi81}.
For either the Gauss quadrature points
on $(-1, +1)$ augmented by $\tau = +1$ or the Radau quadrature points on
$(-1, +1]$ or on $[-1, +1)$, the bound given in \cite[Thm. 2.1]{Vertesi81}
for the Lebesgue constants is $O(\log (N) \sqrt{N})$,
where $N$ is the number of quadrature points.
We sharpen this bound to $O(\sqrt{N})$.
To motivate the relevance of the Lebesgue constant to collocation methods,
let us consider the scalar first-order differential equation
\begin{equation} \label{de}
\dot{x}(\tau)=f\left(x(\tau)\right), \quad \tau \in [-1, +1],
\quad x(-1) = x_0,
\end{equation}
where $f : \mathbb{R}\rightarrow\mathbb{R}$.
In a collocation scheme for (\ref{de}),
the solution $x$ to the differential equation
(\ref{de}) is approximated by a polynomial $x$
that is required to satisfy the differential
equation at the collocation points.
Let us consider a scheme based on collocation at the Gauss quadrature
points $-1 < \tau_1 < \tau_2 < \ldots < \tau_N < +1$, the roots of the
Legendre polynomial of degree $N$.
In addition, we introduce the noncollocated point $\tau_0 = -1$.
The discretized problem is to find $x \in \C{P}_{N}$,
the space of polynomials of degree at most $N$, such that
\begin{equation}\label{collocated}
\dot{x}(\tau_k) = f(x(\tau_k)), \quad 1 \le k \le N,
\quad x(-1) = x_0.
\end{equation}
A polynomial of degree at most $N$ is uniquely specified by
$N+1$ parameters such as its coefficients.
The $N$ collocation equations and the boundary condition in (\ref{collocated})
yield $N+1$ equations for the polynomial.
The convergence of a solution of the collocated problem (\ref{collocated})
to a solution of the continuous problem (\ref{de})
ultimately depends on how accurately a polynomial interpolant of a
continuous solution satisfies the discrete equations (\ref{collocated}).
The Lagrange interpolation polynomials for the point set
$\{\tau_0, \tau_1, \ldots , \tau_N\}$ are defined by
\begin{equation}\label{lag}
L_i(\tau)=\prod_{\substack{j=0\\ j\neq i}}^N\frac{\tau-\tau_j}
{\tau_i-\tau_j}, \quad 0 \le i \le N.
\end{equation}
The interpolant $x^N$ of a solution $x$ to (\ref{de}) is given by
\[
x^N (\tau) = \sum_{j=0}^N x (\tau_j) L_j(\tau).
\]
The residual in (\ref{collocated})
associated with a solution of (\ref{de}) is the vector with components
\begin{equation}\label{res}
r_0 = x^N(-1) - x_0, \quad r_k = \dot{x}^N(\tau_k) - f(x^N(\tau_k)), \quad
1 \le k \le N.
\end{equation}
For the Gauss scheme,
$r_0 = 0$ since $x$ satisfies the boundary condition in (\ref{de}).
The potentially nonzero components of the residual are $r_k$, $1 \le k \le N$.
As we show in Section~\ref{residual}, the residual can be bounded
in terms of a Lebesgue constant and the error in best approximation for $x$ and
its derivative.
The Lebesgue constant $\Lambda_N$ relative to the point set
$\{\tau_0, \tau_1, \ldots , \tau_N\}$ is defined by
\begin{equation}\label{ln1}
\Lambda_N=\max \left\{
\sum_{j=0}^N\left|L_j(\tau)\right|: \tau\in[-1,1] \right\} .
\end{equation}
The article \cite{Brutman97} of Brutman gives a comprehensive survey on the
analysis of Lebesgue constants, while the book \cite{Mastroianni08}
of Mastroianni and Milovanovi\'{c} covers more recent results.
The paper is organized as follows.
In Section~\ref{residual}, we show how the Lebesgue constant enters
into the residual associated with the discretized problem (\ref{collocated}).
Section~\ref{szego} summarizes results of Szeg\H{o} used in the analysis.
Section~\ref{gauss+} analyzes the Lebesgue constant for the
Gauss quadrature points augmented by $\tau = -1$,
while Section~\ref{radau+} analyzes Radau quadrature points.
Finally, Section~\ref{tight} examines the tightness of the estimates
for the Lebesgue constants.
{\bf Notation.}
$\mathcal{P}_N$ denotes the space of polynomials of degree at most $N$
and $\|\cdot\|$ denotes the sup-norm on the interval $[-1, +1]$.
The Jacobi polynomial $P_N^{(\alpha, \beta)}(\tau)$,
$N \ge 1$, is an $N$-th degree polynomial, and for fixed $\alpha > -1$ and
$\beta > -1$, the polynomials are orthogonal on the interval $[-1, +1]$
relative to the weight function $(1-\tau)^\alpha(1+\tau)^\beta$.
$P_N$ stands for the Jacobi polynomial $P_N^{(0,0)}$, or equivalently,
the Legendre polynomial of degree $N$.
\section{Analysis of the residual}
\label{residual}
As discussed in the introduction,
a key step in the convergence analysis of collocation schemes
is the estimation of the residual defined in (\ref{res}).
The convergence of a discrete solution to the
solution of the continuous problem ultimately depends on
how quickly the residual approaches 0 as $N$ tends to infinity;
for example, see Theorem~3.1 in \cite{DontchevHager97},
Proposition~5.1 in \cite{Hager99c}, or Theorem~2.1 in \cite{Hager02b}.
Since a solution $x$ of (\ref{de}) satisfies the differential equation
on the interval $[-1, +1]$, it follows that
$\dot{x}(\tau_k) = f(x(\tau_k))$, $1 \le k \le N$.
Hence, the potentially nonzero components of the residual can be expressed
$r_k = \dot{x}^N (\tau_k) - \dot{x}(\tau_k)$, $1 \le k \le N$.
In other words, the size of the residual depends on the difference between
the derivative of the interpolating polynomial at the collocation
points and the derivative of the continuous solution at the collocation points.
Hence, let us consider the general problem of estimating the
difference between the derivative of an interpolating polynomial on the
point set $\tau_0 < \tau_1 < \ldots < \tau_N$ contained in $[-1, +1]$
and the derivative of the original function.
\begin{proposition}\label{L1}
If $x$ is continuously differentiable on $[-1, +1]$, then
\begin{eqnarray}
\left\|\dot{x}-\dot{x}^N\right\|
&\le& \left(1+2N^2\right)
\inf_{q \in \mathcal{P}_{N}}\left\|\dot{x}-\dot{q}\right\| \nonumber \\
&& \quad + N^2(1+\Lambda_N)
\inf_{p \in \mathcal{P}_{N}}\left\|x-p\right\|
\label{diffy}
\end{eqnarray}
where $x^N \in \C{P}_N$ satisfies $x^N(\tau_k) = x(\tau_k)$,
$0 \le k \le N$, and $\Lambda_N$ is the Lebesgue constant relative to
the point set $\{ \tau_0, \tau_1, \ldots, \tau_N \}$.
\end{proposition}
\begin{proof}
Given $p \in \mathcal{P}_N$, the triangle inequality gives
\begin{equation}
\left\|\dot{x}-\dot{x}^N\right\|\leq \|\dot{x}-\dot{p}\|+\left\|\dot{p}
-\dot{x}^N\right\|.\label{dify}
\end{equation}
By Markov's inequality $\cite{Markov1916}$, we have
\begin{eqnarray}
\left\|\dot{p}-\dot{x}^N\right\|
&\leq& N^2 \left\|p-x^N\right\|=N^2 \left\|\sum_{i=0}^N(p(\tau_i)-x(\tau_i))
L_i(\tau)\right\|\nonumber \\
&\leq & N^2 \Lambda_N\max_{0\leq i\leq N}|p(\tau_i)-x(\tau_i)|
\le N^2\Lambda_N \|p-x\|. \label{qminusy}
\end{eqnarray}
Let $q \in \C{P}_{N}$ with $q(-1) = x(-1)$.
Again, by the triangle and Markov inequalities, we have
\begin{eqnarray}
\|\dot{x}-\dot{p}\| &\le& \|\dot{x} - \dot{q} \| + \|\dot{q} - \dot{p}\| \le
\|\dot{x} - \dot{q} \| + N^2 \|q - p\| \nonumber \\
&\le&
\|\dot{x} - \dot{q} \| + N^2 (\|q - x\| + \|x - p\|). \label{h71}
\end{eqnarray}
By the fundamental theorem of calculus,
\begin{equation}\notag
\left|q(t)-x(t)\right|=\left|\int_{-1}^{t}
\left(\dot{q}(s)-\dot{x}(s)\right)ds\right|\leq \int_{-1}^{t}
\left|\dot{q}(s)-\dot{x}(s)\right|ds\leq 2\|\dot{q}-\dot{x}\|.
\end{equation}
We combine this with (\ref{h71}) to obtain
\begin{equation}\label{h72}
\|\dot{x}-\dot{p}\| \le (1 + 2N^2) \|\dot{x} - \dot{q} \| + N^2 \|x - p\| .
\end{equation}
To complete the proof, combine (\ref{dify}), (\ref{qminusy}), and (\ref{h72})
and exploit the fact that
\[
\left\{\dot{q}: q(-1) = x(-1), \;\; q \in \C{P}_N \right\} =
\left\{\dot{q}: q \in \C{P}_N \right\}.
\]
\end{proof}
An estimate for the right side of \eqref{diffy} follows from results
on best uniform approximation by polynomials, which
originate from work of Jackson \cite{jackson}.
For example, the following result employs an estimate from Rivlin's
book \cite{Rivlin1969}.
\begin{lemma}\label{L2}
If $x$ has $m$ derivatives on $[-1, +1]$ and $N > m$, then
\begin{equation}\label{jackson}
\inf_{p\in \mathcal{P}_N}\|x-p\|\leq
\left( \frac{12}{m+1} \right) \left( \frac{6e}{N} \right)^m
\|x^{(m)}\|,
\end{equation}
where $x^{(m)}$ denotes the $m$-th derivative of $x$.
\end{lemma}
\begin{proof}
It is shown in \cite[Thm. 1.5]{Rivlin1969} that
\begin{equation}\label{yp}
\inf_{p\in \mathcal{P}_N}\left\|x-p\right\|\leq
\left( \frac{6}{m+1} \right) \left( \frac{6e}{N} \right)^m
\omega_m \left(\frac{1}{N-m}\right),
\end{equation}
where $\omega_m$ is the modulus of continuity of $x^{(m)}$.
By the definition of the modulus of continuity, we have
\[
\omega_m\left(\frac{1}{N-m}\right)=\sup\left\{\left|x^{(m)}(\tau_1)
-x^{(m)}(\tau_2)\right|: {\tau_1, \tau_2 \in[-1,1], |\tau_1-\tau_2|
\leq
\frac{1}{N-m}}\right\}.
\]
Since
\[
|x^{(m)}(\tau_1)-x^{(m)}(\tau_2) |\leq 2
\|x^{(m)}\| ,
\]
(\ref{jackson}) follows from (\ref{yp}).
\end{proof}
If $\Lambda_N = O(N)$ and $m \ge 4$, then
Proposition~\ref{L1} and Lemma~\ref{L2} imply that the components
of the residual approach zero as $N$ tends to infinity.
Moreover, if $x$ is infinitely differentiable and
there exists a constant $c$ such that $\|x^{(m)}\| \le c^m$,
then we take $m = N-1$ in Lemma~\ref{L2} to obtain
\[
\inf_{p\in \mathcal{P}_N}\|x-p\|\leq
\left( \frac{2}{ec} \right) \left( \frac{6ec}{N} \right)^N.
\]
Hence, the convergence is extremely fast due to the $1/N^N$ factor.
\section{Some results of Szeg\H{o}}
\label{szego}
We now summarize several results developed by Szeg\H{o} in \cite{Szego1939}
for Jacobi polynomials that are used in the analysis.
The page and equation numbers that follow refer to the 2003 edition
of Szeg\H{o}'s book published by the American Mathematical Society.
First, at the bottom of page 338, Szeg\H{o} makes the following observation:
\begin{theorem}\label{jacobi}
The Lebesgue constant for the roots of the Jacobi polynomial
$P_N^{(\alpha, \beta)}(\tau)$ is $O(N^{0.5+\gamma})$
if $\gamma := \max(\alpha, \beta) > -1/2$,
while it is $O(\log N)$ if $\gamma \le-1/2$.
\end{theorem}
For the Gauss quadrature points, $\alpha = \beta = 0$, $\gamma = 0$,
and $\Lambda_N = O(\sqrt{N})$.
The result that we state as Theorem~\ref{jacobi}
is based on a number of additional properties of Jacobi polynomials
which are useful in our analysis.
The following identity is a direct consequence of the Rodrigues formula
\cite[p. 67]{Szego1939} for $P_N^{(\alpha,\beta)}$.
\begin{proposition}\label{flip}
For any $\alpha$ and $\beta \in \mathbb{R}$, we have
\begin{equation}\label{eq8}
P_N^{(\alpha, \beta)}(\tau)=(-1)^NP_N^{(\beta, \alpha)}(-\tau)
\quad \mbox{for all } \tau \in [-1, +1].
\end{equation}
\end{proposition}
The following proposition provides some bounds for Jacobi polynomials.
\begin{proposition}\label{pro1}
For any $\alpha$ and $\beta \in \mathbb{R}$
and any fixed constant $c_1 > 0$,
we have
\[
P_N^{(\alpha,\beta)}(\cos\theta)=\left\{
\begin{array}{clcccl}
O\left(N^\alpha\right) &\mbox{if } \theta \in
[&0&,& c_1N^{-1} &],\\[.05in]
\theta^{-\alpha-0.5}O\left(N^{-1/2}\right)
&\mbox{if } \theta \in [ &c_1N^{-1} &, & \pi/2 &],\\[.05in]
(\pi-\theta)^{-\beta-0.5}O\left(N^{-1/2}\right)
&\mbox{if } \theta \in [&\pi/2&,& \pi- c_1N^{-1}&],\\[.05in]
O\left(N^\beta\right) &\mbox{if } \theta \in [&\pi- c_1N^{-1}&,& \pi&].
\end{array}
\right.
\]
\end{proposition}
\begin{proof}
The bounds for $\theta \in [0, cN^{-1}]$ and for
$\theta \in [cN^{-1}, \pi/2]$ appear in \cite[(7.32.5)]{Szego1939}.
If $\theta \in \left[\pi/2, \pi\right]$, then
$\pi-\theta \in \left[0, \pi/2 \right]$ and by \eqref{eq8},
\begin{equation}\label{h1}
P_N^{(\alpha, \beta)}(\cos \theta)=P_N^{(\alpha, \beta)}(-\cos(\pi- \theta))
=(-1)^NP_N^{(\beta, \alpha)}(\cos(\pi- \theta)).
\end{equation}
Hence, for $\theta \in [\pi/2, \pi]$,
the first two estimates in the proposition applied to the right
side of (\ref{h1}) yield the last two estimates.
\end{proof}
The next proposition provides an estimate for the derivative of a
Jacobi polynomial at a zero.
\begin{proposition}\label{pro2}
If $\alpha>-1$ and $\beta>-1$, then there exist constants
$\gamma_2 \ge \gamma_1 > 0$, depending only on $\alpha$ and $\beta$, such that
\[
\gamma_1 i^{-\beta - 1.5} N^{\beta + 2} \le
\left|\dot{P}_N^{(\alpha, \beta)}(\tau_i)\right| \le
\gamma_2 i^{-\beta - 1.5} N^{\beta + 2}
\]
whenever $\tau_i \le 0$ where
$\tau_1 < \tau_2 < \ldots < \tau_N$ are the zeros of $P_N^{(\alpha, \beta)}$
(the smallest zero is indexed first).
Moreover, if $\theta_i \in [0, \pi]$ is defined by
$\cos \theta_i = \tau_i$, then there exist constants
$\gamma_4 \ge \gamma_3 > 0$, depending only on $\alpha$ and $\beta$, such that
\begin{equation}\label{h9}
\gamma_3 \sqrt{N} (\pi - \theta_i)^{-\beta - 1.5} \le
\left|\dot{P}_N^{(\alpha, \beta)}(\tau_i)\right| \le
\gamma_4 \sqrt{N} (\pi -\theta_i)^{-\beta - 1.5}
\end{equation}
whenever $\theta_i \in [\pi/2, \pi]$.
\end{proposition}
\begin{proof}
In \cite[(8.9.2)]{Szego1939}, it is shown that there exist
$\gamma_2 \ge \gamma_1 > 0$, depending only on $\alpha$ and $\beta$, such that
\begin{equation}\label{h7}
\gamma_1 i^{-\beta - 1.5} N^{\beta + 2} \le
\left|\dot{P}_N^{(\beta, \alpha)}(\sigma_i)\right| \le
\gamma_2 i^{-\beta - 1.5} N^{\beta + 2}
\end{equation}
whenever $\sigma_i \ge 0$ where
$\sigma_1 > \sigma_2 > \ldots > \sigma_N$ are the zeros of
$P_N^{(\beta, \alpha)}$ (the largest zero is indexed first).
By Proposition~\ref{flip}, $\tau_i$ is a zero of $P_N^{(\alpha,\beta)}$
if and only if $-\tau_i$ is a zero of $P_N^{(\beta,\alpha)}$.
Hence, the zeros of $P_N^{(\beta,\alpha)}$ are
$-\tau_1 > -\tau_{2} > \ldots > -\tau_N$.
Moreover,
\begin{equation}\label{h7.5}
\dot{P}_N^{(\alpha,\beta)}(\tau) = \pm
\dot{P}_N^{(\beta,\alpha)}(-\tau).
\end{equation}
The bound given in the proposition for
$|\dot{P}_N^{(\alpha,\beta)}(\tau_i)|$ with $\tau_i \le 0$ is exactly the
bound (\ref{h7}) for
$|\dot{P}_N^{(\beta,\alpha)}(\sigma_i)|$ with $\sigma_i \ge 0$.
It is shown in \cite[(8.9.7)]{Szego1939}, that there exist constants
$\gamma_4 \ge \gamma_3 > 0$, depending only on $\alpha$ and $\beta$, such that
\begin{equation}\label{h8}
\gamma_3 \sqrt{N} \phi_i^{-\beta - 1.5} \le
\left|\dot{P}_N^{(\beta, \alpha)}(\sigma_i)\right| \le
\gamma_4 \sqrt{N} \phi_i^{-\beta - 1.5}
\end{equation}
whenever $\phi_i \in [0, \pi/2]$ where $\cos \phi_i = \sigma_i$.
Since $\cos \phi_i = \sigma_i = -\tau_i = \cos (\pi - \theta_i)$,
it follows that $\phi_i = \pi - \theta_i$, and
(\ref{h7.5}) and (\ref{h8}) yield (\ref{h9}).
\end{proof}
\section{Lebesgue constant for Gauss quadrature points augmented by $-1$}
\label{gauss+}
In this section we estimate the Lebesgue constant for
the Gauss quadrature points augmented by $\tau_0 = -1$.
Due to the symmetry of the Gauss quadrature points, the same
estimate holds when the Gauss quadrature points are augmented by $+1$
instead of $-1$.
The Gauss quadrature points are the zeros of the Jacobi polynomial
$P_N^{(0, 0)}(\tau)$, which is abbreviated as $P_N(\tau)$.
By Theorem~\ref{jacobi}, the Lebesgue constant for the Gauss
quadrature points themselves is $O(\sqrt{N})$.
The effect of adding the point $\tau_0 = -1$ to the Gauss quadrature
points is not immediately clear due to the new factor $(1 + \tau_i)$
in the denominator of the Lagrange polynomials;
this factor can approach 0 since roots of $P_N$
approach $-1$ as $N$ tends to infinity.
Nonetheless, with a careful grouping of terms,
Szeg\H{o}'s bound in Theorem~\ref{jacobi}
for the Gauss quadrature points can be extended to handle the new
point $\tau_0 = -1$.
\begin{theorem}\label{gausstheom}
The Lebesgue constant for the point set consisting of the Gauss
quadrature points $-1 < \tau_1 < \tau_2 < \ldots < \tau_N < +1$
$($the zeros of $P_N)$ augmented with $\tau_0 = -1$ is $O(\sqrt{N})$.
\end{theorem}
\begin{proof}
Define
\[l(\tau)=(\tau-\tau_1)(\tau-\tau_2)\dots (\tau-\tau_N),
\quad \mbox{and}\quad L(\tau)=(\tau+1)l(\tau).
\]
The derivative of $L(\tau)$ at $\tau_i$ is
\[
\dot{L}(\tau_i)=l(\tau_i)+(\tau_i+1)\dot{l}(\tau_i)=\left\{
\begin{array}{cl}\displaystyle
l(-1), & i = 0, \\[.1in]
(\tau_i+1)\dot{l}(\tau_i), &i> 0.
\end{array}
\right.
\]
Hence, the Lagrange polynomials $L_i(\tau)$ associated with the
point set $\{\tau_0 , \tau_1, \ldots, \tau_N\}$ can be expressed as
\begin{equation}\label{Li}
L_i(\tau)=\frac{L(\tau)}{\dot{L}(\tau_i)(\tau-\tau_i)}=\left\{
\begin{array}{cl}
l(\tau)/l(-1), &i=0, \\[.1in]
\displaystyle\frac{L(\tau)}{(\tau_i+1)\dot{l}(\tau_i)(\tau-\tau_i)},
& i> 0.
\end{array}
\right.
\end{equation}
Since $P_N$ is a multiple of $l$ (it has the same zeros), it follows that
\[
L_i(\tau)=\left\{
\begin{array}{cl}
P_N(\tau)/P_N(-1), &i=0,\\[.1in]
\displaystyle
\frac{(\tau+1)P_N(\tau)}{(\tau_i+1)\dot{P}_N(\tau_i)(\tau-\tau_i)},
&i > 0.
\end{array}
\right.
\]
By \cite[(7.21.1)]{Szego1939},
$|P_N(\tau)| \le 1$ for all $\tau \in [-1, +1]$, and by
\cite[(4.1.4)]{Szego1939}, $|P_N(-1)| = (-1)^N$.
We conclude that $|L_0 (\tau)| \le 1$ for all $\tau \in [-1, +1]$.
Hence, the proof is complete if
\begin{equation}\label{h3}
\max \left\{ \sum_{i=1}^N|L_i(\tau)| : \tau \in[-1, 1] \right\} = O(\sqrt{N}) .
\end{equation}
For any $\tau \in [-1, +1]$, the integers $i \in [1, N]$ are partitioned
into the four disjoint sets
\begin{eqnarray*}
\C{I}_1 &=& \{ i \in [1,N]: \tau_i \ge 0 \}, \\
\C{I}_2 &=& \{ i \in [1,N]: -1 < \tau_i < 0, \; \tau_i > \tau \}, \\
\C{I}_3 &=& \{ i \in [1,N]: -1 < \tau_i < 0, \; \tau_i \le \tau, \;
\tau - \tau_i \le \tau_i + 1 \}, \\
\C{I}_4 &=& \{ i \in [1,N]: -1 < \tau_i < 0, \; \tau_i \le \tau, \;
\tau - \tau_i > \tau_i + 1 \}.
\end{eqnarray*}
Let $\C{I}_{123}$ denote $\C{I}_1 \cup \C{I}_2 \cup \C{I}_3$.
Observe that for any $i \in \C{I}_{123}$ and $\tau \in [-1, +1]$,
$(\tau+1)/(\tau_i + 1) \le 2$.
Consequently, for all $i \in \C{I}_{123}$,
\[
|L_i (\tau)| =
\left| \frac{(\tau+1)P_N(\tau)}{(\tau_i+1)\dot{P}_N(\tau_i)(\tau-\tau_i)}
\right| \le
\frac{2|P_N(\tau)|}{|\dot{P}_N(\tau_i)(\tau-\tau_i)|} .
\]
This bound together with Theorem~\ref{jacobi} imply that
\[
\sum_{i \in \C{I}_{123}} |L_i(\tau)| \le
\sum_{i \in \C{I}_{123}}
\frac{2|P_N(\tau)|}{|\dot{P}_N(\tau_i)(\tau-\tau_i)|} \le
2 \sum_{i=1}^N
\frac{|P_N(\tau)|}{|\dot{P}_N(\tau_i)(\tau-\tau_i)|} = O(\sqrt{N})
\]
since the terms in the final sum are the Lagrange
polynomials for the Gauss quadrature points.
To complete the proof, we need to analyze the terms in (\ref{h3})
associated with the indices in $\C{I}_4$.
These terms are more difficult to analyze since $\tau_i + 1$
in the denominator of $L_i$ could approach 0 while $\tau +1$ in
the numerator remains bounded away from 0.
For $i \in \C{I}_4$, we have
\[
\tau + 1 = (\tau - \tau_i) + (\tau_i + 1) \le 2 (\tau - \tau_i)
\]
since $\tau - \tau_i > \tau_i + 1$.
Hence,
\[
|L_i (\tau)| \le \frac{2|P_N(\tau)|}{|(\tau_i + 1) \dot{P}_N (\tau_i)|} \le
\frac{2}{|(\tau_i + 1) \dot{P}_N (\tau_i)|}
\]
since $|P_N(\tau)| \le 1$ for all $\tau \in [-1, +1]$
by \cite[(7.21.1)]{Szego1939}.
It follows that
\begin{equation}\label{h6}
\sum_{i \in \C{I}_{4}} |L_i(\tau)| \le
\sum_{i \in \C{I}_{4}}
\frac{2}{|(\tau_i + 1) \dot{P}_N (\tau_i)|} \le
\sum_{-1 < \tau_i < 0 }
\frac{2}{|(\tau_i + 1) \dot{P}_N (\tau_i)|} .
\end{equation}
Given $\theta \in [\pi/2, \pi]$, define $\phi = \pi - \theta$.
Observe that
\[
\left|\frac{\phi^2}{1+\cos \theta}\right|
=\frac{\phi^2}{2\cos^2(\theta/2)}
=\frac{2(\phi/2)^2}{\sin^2 (\phi/2)}
\leq \max_{x\in [0, \pi/4]}
\frac{2x^2}{\sin^2 x} =\frac{\pi^2}{4}.
\]
Hence, for $\theta \in [\pi/2, \pi]$, we have
\begin{equation}\label{h5}
1 + \cos \theta \ge \left( \frac{4}{\pi^2} \right) \phi^2 =
\frac{4}{\pi^2} (\pi - \theta)^2 .
\end{equation}
By the bounds \cite[(6.21.5)]{Szego1939} for the roots of the
Jacobi polynomial $P_N^{(\alpha, \beta)}$ when
$\alpha$ and $\beta \in [-0.5, +0.5]$, it follows that
\begin{equation}\label{*}
\left(\frac{2i-1}{2N+1}\right) \pi \leq \pi-\theta_i
\leq \left(\frac{2i}{2N+1}\right) \pi, \quad
1 \le i \le N,
\end{equation}
where $\cos \theta_i = \tau_i$.
This implies the lower bound
\begin{equation}\label{h4}
\pi - \theta_i \ge
\left(\frac{2i-1}{2N+1}\right) \pi \ge
\left( \frac{i}{3N} \right)\pi > \frac{i}{N} .
\end{equation}
We combine (\ref{h5}) and (\ref{h4}) to obtain
\begin{equation}\label{eq3}
1+\tau_i \ge \frac{4}{\pi^2}(\pi-\theta_i)^2\geq\frac{4}{\pi^2}
\left(\frac{i}{N}\right)^2.
\end{equation}
By Proposition~\ref{pro2},
\[
|\dot{P}_N(\cos \theta_i )| \ge \gamma_1 i^{-1.5} N^2.
\]
This lower bound for the derivative and the lower bound (\ref{eq3}) for
the root imply that
\[
\frac{1}{(1+\tau_i)|\dot{P}_N(\tau_{i})|} \le
\left( \frac{\pi^2}{4 \gamma_1} \right) i^{-1/2} .
\]
Hence, we obtain the following bound for the $\C{I}_4$ sum in (\ref{h6}):
\[
\sum_{-1<\tau_i<0}\frac{2}{(1+\tau_i)|\dot{P}_N(\tau_{i})|} \le
\left( \frac{\pi^2}{2 \gamma_1} \right)
\sum_{i = 1}^N i^{-1/2} \le
\left( \frac{\pi^2}{2 \gamma_1} \right)
\int_0^N i^{-1/2} di = O(\sqrt{N}) .
\]
This bound inserted in (\ref{h6}) completes the proof.
\end{proof}
\section{Lebesgue constants for the Radau quadrature points}
\label{radau+}
Next, we estimate the Lebesgue constant for the Radau quadrature scheme.
There are two versions of the Radau quadrature points depending on whether
$\tau_1 = -1$ or $\tau_N = +1$.
Since these two schemes have quadrature points that are the
negatives of one another, the Lebesgue constants are the same.
The analysis is carried out for the case $\tau_N = +1$.
In this case, the Radau quadrature points are the $N-1$ roots of
$P_{N-1}^{(1,0)}$ augmented by $\tau_N = 1$.
Szeg\H{o} shows that the Lebesgue constant for the roots of
$P_{N-1}^{(1,0)}$ is $O(N^{3/2})$.
We show that when the quadrature point $\tau_N = 1$ is included,
the Lebesgue constant drops to $O(\sqrt{N})$.
The analysis requires an estimate for the location of the zeros of
$P_{N-1}^{(1,0)}$.
Our estimate is based on some relatively recent results on
interlacing properties for the zeros of Jacobi polynomials obtained by
Driver, Jordaan, and Mbuyi in \cite{DriverJordaanMbuyi2008}.
Let $\tau_i'$ and $\tau_i''$, $i\geq 1$, be zeros of
$P_{N-1}$ and $P_{N}$ respectively, arranged in increasing order.
Applying \cite[Thm. 2.2]{DriverJordaanMbuyi2008}, we have
\[
\tau_i'' < \tau_i < \tau_{i}' ,
\]
$i = 1, 2, \ldots, N-1$, where
$-1 < \tau_1 < \tau_2 < \ldots < \tau_{N-1} < +1$ are the zeros of
$P_{N-1}^{(1,0)}$.
Let $\theta_i \in [0, \pi]$ be defined by $\cos \theta_i = \tau_i$.
By the estimate (\ref{*}) for the zeros of $P_N$, it follows that
the zeros of $P_{N-1}^{(1,0)}$ have the property that
\begin{equation}\label{zeros}
\left( \frac{2i-1}{2N-1} \right) \pi < \theta_{N-i} <
\left( \frac{2(i+1)}{2N+1} \right) \pi, \quad
1 \le i \le N-1.
\end{equation}
When $i$ is replaced by $N-i$, these bounds become
\begin{equation}\label{zeros*}
\left( \frac{2i-1}{2N+1} \right) \pi < \pi - \theta_{i} <
\left( \frac{2i}{2N-1} \right) \pi, \quad
1 \le i \le N-1.
\end{equation}
Together, (\ref{zeros}) and (\ref{zeros*}) imply that
\begin{equation}\label{phibounds}
\pi - \theta_{i} > i/N \quad \mbox{and} \quad \theta_{N-i} > i/N,
\quad 1 \le i \le N-1;
\end{equation}
moreover, taking into account both the upper and lower bounds, we have
\begin{eqnarray}
\theta_i - \theta_{i+1} &<& \left( \frac{4(i+N)+2N+1}{4N^2 - 1}\right) \pi
\le \left( \frac{10N - 7}{4N^2 - 1} \right) \pi \nonumber \\
&<& \left( \frac{5(2N - 1)}{4N^2 - 1} \right) \pi < \frac{2.5\pi}{N},
\quad 1 \le i \le N-2.
\label{separation}
\end{eqnarray}
Thus, the interlacing properties for the zeros leads to explicit
bounds for the separation of the zeros; for comparison,
Theorem~8.9.1 in \cite{Szego1939} yields $\theta_i - \theta_{i+1} = O(1)/N$,
while (\ref{separation}) yields an explicit constant $2.5\pi$.
These estimates for the zeros of $P_{N-1}^{(1,0)}$ are used
to derive the following result.
\begin{theorem}\label{radau}
The Lebesgue constant for the Radau quadrature points
\[
-1 < \tau_1 < \tau_2 < \ldots < \tau_N = 1
\]
$($the zeros of $P_{N-1}^{(1,0)}$ augmented by $\tau_N = +1)$ is $O(\sqrt{N})$.
\end{theorem}
\begin{proof}
The Lagrange interpolating polynomials $R_i$, $1 \le i \le N$,
associated with the Radau quadrature points are given by
\[
R_i(\tau)= \left( \frac{1-\tau}{1-\tau_i} \right)
\prod_{\substack{j=1\\ j\neq i}}^{N-1}\frac{\tau-\tau_j}
{\tau_i-\tau_j}, \quad 1 \le i \le N-1, \quad
R_N(\tau) =
\prod_{\substack{j=1}}^{N-1}\frac{\tau-\tau_j}
{1-\tau_j}.
\]
Similar to (\ref{Li}), the $R_i$ can be expressed
\begin{equation}\label{Ri}
R_i(\tau)=\left\{
\begin{array}{cl}
\displaystyle\frac{(1-\tau)P_{N-1}^{(1,0)}(\tau)}
{(1-\tau_i)\dot{P}_{N-1}^{(1,0)}(\tau_i)(\tau-\tau_i)},
&i < N, \\[.20in]
\displaystyle{\frac{P_{N-1}^{(1,0)}(\tau)}{P_{N-1}^{(1,0)}(1)}}.
&i=N.\\
\end{array}
\right.
\end{equation}
By \cite[(4.1.1)]{Szego1939} and \cite[(7.32.2)]{Szego1939}, we have
\begin{equation}\label{h22}
P_{N-1}^{(1,0)}(1)= N \quad \mbox{and} \quad
|P_{N-1}^{(1,0)}(\tau)|\le N \mbox{ for all } \tau \in [-1, +1] .
\end{equation}
We conclude that $|R_N (\tau)| \le 1$ for all $\tau \in [-1, +1]$.
Hence, the proof is complete if
\begin{equation}\label{h10}
\max \left\{ \sum_{i=1}^{N-1}|R_i(\tau)| : \tau \in [-1, +1] \right\}
=O(\sqrt{N}) .
\end{equation}
Let $\delta > 0$ be a small constant.
Technically, any $\delta$ satisfying $0 < \delta < 1/2$ is
small enough for the analysis.
Szeg\H{o} establishes the following bounds when analyzing the
Lebesgue constants associated with the roots of Jacobi polynomials:
\begin{equation}\label{radaulebesgue}
\sum_{i = 1}^N \left| \frac{P_{N}^{(1,0)}(\tau)}
{\dot{P}_{N}^{(1,0)}(\tau_i)(\tau-\tau_i)} \right| =
\left\{
\begin{array}{ll}
O(\sqrt{N}) & \mbox{if } \tau \in [-1, \delta-1], \\
O(\log N) & \mbox{if } \tau \in [\delta-1 , 1 - \delta], \\
O(N^{3/2}) & \mbox{if } \tau \in [1 - \delta, 1].
\end{array} \right.
\end{equation}
Szeg\H{o} considers the general Jacobi polynomials
$P_N^{(\alpha, \beta)}$ on pages 336--338 of \cite{Szego1939},
while here we only state the results
corresponding to $\alpha = 1$ and $\beta = 0$.
We first show that (\ref{h10}) holds when $\tau \in [-1, 1-\delta]$.
Observe that $(1 - \tau)/(1-\tau_i) \le 4/\delta$
when $\tau_i \le 1 - \delta/2$ and $\tau \in [-1, +1]$.
It follows from (\ref{radaulebesgue}) that
\begin{eqnarray}
\sum_{\tau_i \le 1-\delta/2} |R_i(\tau)| &\le& \left( \frac{4}{\delta} \right)
\sum_{\tau_i \le 1-\delta/2}
\left| \frac{P_{N-1}^{(1,0)}(\tau)}
{\dot{P}_{N-1}^{(1,0)}(\tau_i)(\tau-\tau_i)} \right| \nonumber \\
&=& \left\{ \begin{array}{ll}
O(\sqrt{N}), & \tau \in [-1, \delta-1], \\
O(\log N), & \tau \in [\delta-1, 1 - \delta] .
\end{array} \right. \label{h11}
\end{eqnarray}
When $\tau_i > 1-\delta/2$ and $\tau \in [-1, +1-\delta]$, we have
$|\tau - \tau_i| \ge \delta/2$; hence,
\begin{equation}\label{h12}
\sum_{1 > \tau_i > 1-\delta/2} |R_i(\tau)| \le \left( \frac{4}{\delta} \right)
\sum_{1 > \tau_i > 1-\delta/2}
\left| \frac{P_{N-1}^{(1,0)}(\tau)}
{(\tau_i - 1)\dot{P}_{N-1}^{(1,0)}(\tau_i)} \right| .
\end{equation}
We have the following bounds for the factors on the right side of (\ref{h12}):
\begin{itemize}
\item[(a)]
By Proposition~\ref{pro1},
$|P_{N-1}^{(1,0)} (\tau)| = O(1)$ if $\tau \in [-1, \delta - 1]$ and
$|P_{N-1}^{(1,0)} (\tau)| = O(N^{-1/2})$ if $\tau \in [\delta - 1, 1-\delta]$.
\item[(b)]
By (\ref{h8}),
$|\dot{P}_{N-1}^{(1, 0)}(\tau_i)| \ge
\gamma_3 \theta_i^{-5/2} \sqrt{N-1}$, where $\cos \theta_i = \tau_i \ge 0$.
\item[(c)]
By a Taylor expansion around $\theta = 0$,
\begin{equation}\label{1-cos}
\theta^2/4 \le 1 - \cos \theta \le \theta^2/2, \quad \theta \in [0, \pi/2].
\end{equation}
\end{itemize}
By (b) and the lower bound in (c) at $\theta = \theta_i$, we have
\begin{equation}\label{h100}
(1-\tau_i) |\dot{P}_{N-1}^{(1,0)}(\tau_i)| \ge
0.25 \gamma_3 \theta_i^{-1/2} \sqrt{N-1} .
\end{equation}
We combine this with (a) and (\ref{h12}) to obtain
\[
\sum_{1 > \tau_i > 1-\delta/2} |R_i(\tau)| =
\left\{ \begin{array}{lll}
O(N^{-1/2})\displaystyle\sum_{i = 1}^N \sqrt{\theta_i} &= O(\sqrt{N}),
& \tau \in [-1, \delta - 1], \\
O(N^{-1})\displaystyle\sum_{i = 1}^N \sqrt{\theta_i} &= O(1),
& \tau \in [\delta-1, 1-\delta] ,
\end{array} \right.
\]
since $\theta_i \in [0, \pi]$.
This establishes (\ref{h11}) for all $\tau \in [-1, 1-\delta]$.
To complete the proof of (\ref{h10}), we need to consider
$\tau \in (1-\delta, 1]$.
The analysis becomes more complex since
Szeg\H{o}'s estimate (\ref{radaulebesgue}) is $O(N^{3/2})$ in this region,
while we are trying to establish a much smaller bound in (\ref{h10});
in fact, the bound in this region is $O(\log N)$ as we will show.
For the numerator of $R_i (\tau)$ and
$\tau \in (1-\delta, 1]$,
Proposition~\ref{pro1} and (\ref{1-cos}) yield
\begin{eqnarray}
(1-\tau) |P_{N-1}^{(1,0)} (\tau)| &=&
(1-\cos \theta)|P_{N-1}^{(1,0)}(\cos \theta)| =
\left\{ \begin{array}{ll}
\theta^2 O(N), & \theta \in [0, 1/N], \\
\theta^{1/2} O(N^{-1/2}), & \theta \in [1/N, \pi/2],
\end{array} \right. \nonumber \\
&=&
O(N^{-1/2}). \label{h14}
\end{eqnarray}
Given $\tau \in (1-\delta, 1]$,
let us first focus on those $i$ in (\ref{h10}) for which
$|\tau-\tau_i| \ge \delta$.
In this case, $\tau_i \le 1-\delta$ or
$1-\tau_i \ge \delta$, and (\ref{h14}) gives
\begin{eqnarray}
\sum_{|\tau-\tau_i|\ge\delta}|R_i(\tau)| &=&
\sum_{|\tau-\tau_i|\ge\delta}
\left| \frac{(\tau-1)P_{N-1}^{(1,0)}(\tau)}
{(\tau_i-1)\dot{P}_{N-1}^{(1,0)}(\tau_i)(\tau-\tau_i)} \right| \nonumber \\
&\le& \frac{O(N^{-1/2})}{\delta^2} \sum_{|\tau-\tau_i|\ge\delta}
\frac{1}{|\dot{P}_{N-1}^{(1,0)} (\tau_i)|}. \label{h50}
\end{eqnarray}
The lower bounds (\ref{h9}) and (\ref{h8}) imply that
\begin{equation}\label{h51}
\sum_{|\tau-\tau_i|\ge\delta}|R_i(\tau)| =
O(N^{-1}) \sum_{\tau_i \ge 0} \theta_i^{5/2}
+
O(N^{-1}) \sum_{\tau_i < 0} |\pi - \theta_i|^{3/2} = O(1),
\end{equation}
since the terms in the sums are uniformly bounded and there are at
most $N$ terms.
The next step in the proof of (\ref{h10}) for $\tau \in (1-\delta, 1]$
is to consider those terms corresponding to $|\tau-\tau_i|<\delta$.
Since $\delta$ is small, it follows that both $\tau$ and $\tau_i$ are
near 1, and consequently, $\theta$ and $\theta_i$
are small and nonnegative,
where $\cos \theta = \tau$ and $\cos \theta_i = \tau_i$.
In particular, $0 \le \theta_i \le \pi/2$.
In this case where $\tau_i$ is near $\tau$,
it is important to take into account the fact that
$\tau - \tau_i$ is a divisor of the numerator $P_{N-1}^{(1,0)}(\tau)$.
To begin, we combine the lower bound in (\ref{h8}) and the bounds in
(\ref{1-cos}) to obtain
\begin{equation}\label{h19}
\frac{(1-\tau)}
{(1-\tau_i)|\dot{P}_{N-1}^{(1,0)}(\tau_i)|} \le
\frac{2\theta^2}{\theta_i^2 (\gamma_3 \sqrt{N}) \theta_i^{-5/2}} =
O(N^{-1/2}) \theta^2\sqrt{\theta_i} .
\end{equation}
It follows from (\ref{Ri}) that
\begin{equation}\label{h15}
|R_i(\tau)|= O(N^{-1/2}) \theta^2\sqrt{\theta_i}
\left| \frac{P_{N-1}^{(1,0)}(\tau)}{\tau-\tau_i} \right| .
\end{equation}
The mean value theorem and the formula
\cite[(4.21.7)]{Szego1939} for the derivative of
$P_{N-1}^{(\alpha, \beta)}(\tau)$ in terms of
$P_{N-2}^{(\alpha+1, \beta+1)}(\tau)$ gives the identity
\begin{equation} \label{h17}
\left|\frac{P_{N-1}^{(1,0)}(\tau)}{\tau-\tau_i}\right|
= \left|\frac{P_{N-1}^{(1,0)}(\tau)-P_{N-1}^{(1,0)}
(\tau_i)}{\tau-\tau_i}\right|
=\left(\frac{N+1}{2}\right)\left|P_{N-2}^{(2,1)}(\cos\eta_i)\right|,
\end{equation}
where $\eta_i$ lies between $\theta$ and $\theta_i$.
Together, (\ref{h15}) and (\ref{h17}) imply that
\begin{equation}\label{h20}
|R_i(\tau)| = O(N^{1/2}) \theta^2 \sqrt{\theta_i}
\left|P_{N-2}^{(2,1)}(\cos\eta_i)\right|.
\end{equation}
The estimate (\ref{h20}) is useful when $\tau_i$ is near $\tau$.
When $\tau_i$ is not near $\tau$, we proceed as follows.
Use the identity
\[
\cos\alpha-\cos\beta
=-2\sin\displaystyle{\frac{(\alpha+\beta)}{2}}
\sin\displaystyle{\frac{(\alpha-\beta)}{2}},
\]
to deduce that
\begin{equation}\label{h70}
|\tau - \tau_i| = |\cos \theta - \cos \theta_i| \ge
\frac{2}{\pi^2} \left| \theta^2 - \theta_i^2 \right|
\end{equation}
when $|\theta + \theta_i| \le \pi$, which is satisfied since
both $\theta$ and $\theta_i$ are near 0.
Exploiting this inequality in (\ref{h15}) yields
\begin{equation}\label{h21}
|R_i(\tau)|= O(N^{-1/2}) \theta^2\sqrt{\theta_i}
\left| \frac{P_{N-1}^{(1,0)}(\tau)}{ \theta^2 - \theta_i^2} \right| .
\end{equation}
Recall, that we now need to analyze the interval $\tau \in [1-\delta, 1]$
and those $i$ for which $|\tau-\tau_i| < \delta$.
Our analysis works with the variable $\theta \in [0, \pi/2]$,
where $\cos \theta = \tau$.
The interval $\theta \in [0, \pi/2]$ corresponds to
$\tau \in [0,1]$ which covers the target interval $[1-\delta, 1]$ when
$\delta$ is small.
By \cite[(7.32.2)]{Szego1939}, we have
\[
|P_{N-2}^{(2,1)}(\cos\eta_i)| \le N(N-1)/2 .
\]
If $\theta \in [0, c/N]$, where $c$ is a fixed constant independent of $N$,
then it follows from
(\ref{h20}) that $|R_i(\tau)| = O(N^{1/2}) \sqrt{\theta_i}$.
Moreover, if $\theta_i \le 2 \theta \le 2c/N$, then
$|R_i (\tau)| = O(1)$.
By the bounds (\ref{phibounds}),
the number of roots that
satisfy $\theta_{N-i} \le 2c/N$ is at most $2c$, independent of $N$.
On the other hand, if $\theta_i > 2 \theta$, then $\theta< \theta_i/2$ and
\[
\left|\theta_i^2-\theta^2\right|=\theta_i^2-\theta^2\geq
(3/4) \theta_i^2 .
\]
With this substitution in (\ref{h21}), we have
\[
|R_i(\tau)|= O(N^{-1/2}) \theta^2\theta_i^{-3/2}
\left| {P_{N-1}^{(1,0)}(\tau)} \right| .
\]
By (\ref{h22}), $| P_{N-1}^{(1,0)}(\tau)| \le N$.
Hence, if $\theta \in [0, c/N]$, then by (\ref{phibounds}), we have
\begin{eqnarray*}
\sum_{|\tau-\tau_i| < \delta} |R_i(\tau)| &=&
O(N^{-3/2}) \sum_{|\tau-\tau_i| < \delta} \theta_i^{-3/2} =
O(N^{-3/2}) \sum_{i=1}^{N-1} \theta_i^{-3/2} \\
&=& O(N^{-3/2}) \sum_{i=1}^{N-1} \theta_{N-i}^{-3/2} =
O(1) \sum_{i=1}^{N-1} i^{-3/2} = O(1),
\end{eqnarray*}
for all $\theta \in [0, c/N]$.
Finally, suppose that $\theta \in [c/N, \pi/2]$.
By (\ref{separation}) the separation between adjacent zeros
$\theta_i$ and $\theta_{i+1}$ is at most $2.5\pi/N$.
Hence, if $\theta_i$ is within $k$ zeros of $\theta$, then
$\eta_i \ge \theta - \gamma N^{-1}$, $\gamma = 2.5\pi k$.
Here $k \ge 2$ is an arbitrary fixed integer.
By Proposition~\ref{pro1}, we have
\[
\left|P_{N-2}^{(2,1)}(\cos\eta_i)\right| =
(\theta- \gamma N^{-1})^{-5/2}O(N^{-1/2}) .
\]
Choose $c > 2\gamma$.
If $\theta \in [c/N, \pi/2]$,
then $\theta/2 \ge c/(2N) \ge \gamma/N$.
Hence, $\theta- \gamma /N \ge \theta/2$ and
\[
\left|P_{N-2}^{(2,1)}(\cos\eta_i)\right| =
(\theta/2)^{-5/2}O(N^{-1/2}) =
\theta^{-5/2}O(N^{-1/2}) .
\]
Combine this with (\ref{h20}) to obtain
\[
|R_i(\tau)| = O(1) \sqrt{\theta_i/\theta} .
\]
when $\theta \in [c/N, \pi/2]$ and $\theta_i$ is within $k$
zeros of $\theta$.
If $\theta_i \le \theta$, then $R_i (\tau) = O(1)$.
If $\theta_i > \theta$ and $\theta_i$ is within $k$ zeros of $\theta$,
then $\theta_i - \theta \le \gamma/N$, and
\[
\theta_i/\theta \le (\theta + \gamma/N)/\theta \le 1 + \gamma/c
\]
when $\theta \in [cN, \pi/2]$.
Thus $|R_i (\tau)| = O(1)$ when $\theta \in [cN, \pi/2]$ and
$\theta_i$ is within $k$ zeros of $\theta$.
This analysis of $R_i$ when $\theta_i$ is close to $\theta$ needs to
be complemented with an analysis of $R_i$ when $\theta_i$ is not
close to $\theta$ and $\theta \in [c/N, \pi/2]$.
For $\theta$ in this interval, Proposition~\ref{pro1} yields
$|P_{N-1}^{(1,0)} (\cos \theta)| = \theta^{-3/2}O(N^{-1/2})$.
By (\ref{h21}), we have
\begin{equation}\label{h23}
|R_i (\tau)| = O(N^{-1}) \frac{\sqrt{\theta} \sqrt{\theta_i}}
{|\theta^2 - \theta_i^2|} .
\end{equation}
If $\theta \ge 2\theta_i$, then
$\theta^2 - \theta_i^2 \ge (3/4)\theta^2$ and
\[
|R_i (\tau)| = O(N^{-1}) \theta^{-3/2} \theta_i^{1/2} .
\]
By (\ref{zeros*}), we have
\[
|R_{N-i} (\tau)| = O((N\theta)^{-3/2}) \sqrt{i+1} .
\]
Recall that we are focusing on those $i$ for which $\theta_{N-i} \le \theta/2$.
The lower bound $\theta_{N-i} \ge i/N$ from (\ref{phibounds}) implies that
$i \le N\theta/2$ whenever $\theta_{N-i} \le \theta/2$.
Hence, the set of $i$
satisfying $i \le N\theta$ is a superset of the $i$ that we need to consider,
and we have
\begin{eqnarray*}
\sum_{\theta_i \le \theta/2} |R_i (\tau)| &=&
\sum_{\theta_{N-i} \le \theta/2} |R_{N-i} (\tau)| =
O((N\theta)^{-3/2}) \sum_{i \le N\theta} \sqrt{i+1} \\
&=& O((N\theta)^{-3/2}) (N\theta + 1)^{3/2} = O(1) .
\end{eqnarray*}
On the other hand, if $\theta < 2 \theta_i$, then we have
\[
\frac{\sqrt{\theta} \sqrt{\theta_i}}
{|\theta^2 - \theta_i^2|} =
\frac{\sqrt{\theta} \sqrt{\theta_i}}
{|(\theta - \theta_i)(\theta + \theta_i)|} \le
\frac{\sqrt{\theta} \sqrt{\theta_i}}
{|(\theta - \theta_i)\theta_i|} \le
\frac{\sqrt{2}}
{|\theta - \theta_i|} .
\]
Combine this with (\ref{h23}) to obtain
\[
|R_i (\tau)| = \frac{O(1)}{|N\theta - N\theta_i|} .
\]
Earlier we showed that
$|R_i (\tau)| = O(1)$ for those $i$ where the associated $\theta_i$
is within $k$ zeros of $\theta$.
When $\theta_i$ is more than $k$ zeros away from $\theta$,
we exploit the estimate (\ref{zeros}) for the zeros to deduce that
$|N\theta - N\theta_i|$ behaves like an arithmetic sequence of natural numbers.
Hence, the sum of the $|R_i (\tau)|$ over these natural numbers,
where we avoid the singularity, is bounded by a multiple of $\log N$.
This completes the proof.
\end{proof}
\section{Tightness of estimates}\label{numerical}
\label{tight}
At the bottom of page 110 in \cite{Vertesi81}, V\'{e}rtesi states
some lower bounds for the Lebesgue function.
In the case of the Gauss quadrature points augmented by $\tau_{N+1} = +1$
and the Radau quadrature points with $\tau_N = +1$, the associated
Lebesgue function is of order $\sqrt{N}$ at
$\tau = (\tau_1 + \tau_{2})/2$,
the midpoint between the two smallest quadrature points.
It follows that the $O(\sqrt{N})$ estimates for the Lebesgue constant are tight.
To study the tightness of the estimates,
the Lebesgue constants were evaluated numerically and
fit by curves of the form $a \sqrt{N} + b$, $10 \le N \le 100$
(see Figures~\ref{graphgauss}--\ref{graphradau}).
A fast and accurate method for evaluating the Gauss quadrature points,
which could be extended to the Radau quadrature points,
is given by Hale and Townsend in \cite{HaleTownsend13}.
Figure~\ref{graphgauss}--\ref{graphradau} indicate that
a curve of the form $a \sqrt{N} + b$ is a good fit to the Lebesgue constant.
Another Lebesgue constant which enters into the analysis of
the Radau collocation schemes studied in \cite{HagerHouRao15c} is the
Lebesgue constant for the Radau quadrature points on $(-1, +1]$
augmented by $\tau_0 = -1$.
As given by V\'{e}rtesi in \cite[Thm. 2.1]{Vertesi81}, the
Lebesgue constant is $O(\log n)$.
Trefethen \cite{Trefethen13} points out that the Lebesgue constant
on any point set has the lower bound
\[
\Lambda_N \ge \left( \frac{2}{\pi} \right) \log (N) + 0.52125\ldots ,
\]
due to Erd\H{o}s \cite{Erdos61} and Brutman \cite{Brutman78}.
For comparison, Figure~\ref{graphradau-1} plots this lower bound
along with the computed Lebesgue constant.
When the number of interpolation points range between 10 and 100,
the Lebesgue constant for the Radau quadrature
points augmented by the point $-1$ differs from the smallest possible Lebesgue
constant by between 0.70 and 0.84.
\begin{figure}
\caption{Least squares approximation to the Lebesgue constant for
the point set corresponding to the Gauss quadrature points augmented by $-1$
using curves of the form $a\sqrt{N}
\label{graphgauss}
\end{figure}
\begin{figure}
\caption{Least squares approximation to the Lebesgue constant for
the point set corresponding to the Radau quadrature points using curves of the
form $a\sqrt{N}
\label{graphradau}
\end{figure}
\begin{figure}
\caption{Least squares approximation to the Lebesgue constant for
the point set corresponding to the Radau quadrature points on
$(-1, +1]$ augmented by $-1$ using curves of the form $a\log N +b$}
\label{graphradau-1}
\end{figure}
\section{Conclusions}
\label{conclusions}
In Gauss and Radau collocation methods for unconstrained control problems
\cite{HagerHouRao15b, HagerHouRao15c},
the error in the solution to the discrete problem is bounded by the
residual for the solution to the continuous problem inserted in the
discrete equations.
In Section~\ref{residual}, we observe that the residual in the sup-norm
is bounded by the distance between the derivative of the continuous
solution interpolant and the derivative of the continuous solution.
Proposition~\ref{L1} bounds this distance in terms of the error in
best approximation and the Lebesgue constant for the point set.
We show that the Lebesgue constant for the point sets associated with
the Gauss and Radau collocation methods is $O(\sqrt{N})$, and
by the plots of Section~\ref{numerical}, the Lebesgue constants are
closely fit by curves of the form $a\sqrt{N}+b$.
\section*{Acknowledgments}
Special thanks to Lloyd N. Trefethen for pointing out Brutman's paper
\cite{Brutman97} and for providing a copy when we had trouble locating
the journal.
Also, we thank a reviewer for pointing out the book \cite{Mastroianni08}
which contains newer results as well as additional references.
\end{document} |
\begin{document}
\maketitle
\begin{abstract} We prove that if a directed multigraph $D$ has at
most $t$ pairwise arc-disjoint directed triangles, then there exists
a set of less than $2t$ arcs in $D$ which meets all directed
triangles in $D$, except in the trivial case $t=0$. This
answers affirmatively a question of Tuza from 1990.
\end{abstract}
\section{Introduction}\label{sec:intro}
In the 1980s, Tuza~\cite{TuzaProc, Tuza} posed the following conjecture about packing
and covering triangles in undirected simple graphs (hereafter called graphs). Given a graph $G$,
let $\nu(G)$ be the maximum size of a family of pairwise edge-disjoint triangles in $G$,
and let $\tau(G)$ be the minimum size of an edge set $X$ such that $G-X$ is triangle-free.
Evidently $\tau(G) \geq \nu(G)$, since we are forced to delete at least one edge from
each triangle in a family of edge-disjoint triangles (and these edges must be distinct),
and on the other hand $\tau(G) \leq 3\nu(G)$, since it suffices to delete \emph{all} edges
from each triangle in a maximal family of edge-disjoint triangles. Tuza conjectured
that in fact $\tau(G) \leq 2\nu(G)$ for every graph $G$. As Tuza
observed, this upper bound is sharp if true, and in particular it is achieved by $K_4$ and $K_5$.
The best general result on Tuza's conjecture is due to
Haxell~\cite{Haxell}, who proved that $\tau(G) \leq 2.87\nu(G)$ for
every graph $G$. Other authors have approached the conjecture by
proving that $\tau(G)\leq 2 \nu(G)$ for all graphs in some given
family. Tuza \cite{Tuza} showed that his conjecture holds for all
planar graphs, and Aparna~Lakshmanan, Bujt\'as, and Tuza~\cite{LBT}
showed that it holds for all 4-colorable graphs. The planar result has
been generalized to graphs without $K_{3,3}$-subdivisions (Krivelevich
\cite{Krivelevich}), and then to graphs with maximum average degree
less than $7$ (Puleo \cite{Puleo}). In the case where $G$ is a
$K_4$-free planar graph, the stronger inequality
$\tau(G) \leq \frac{3}{2}\nu(G)$ was proved by Haxell, Kostochka, and
Thomass\'e~\cite{SashaK4}.
Asymptotic, fractional, and multigraph versions of Tuza's conjecture
have also been considered. Yuster \cite{Yuster} proved that
$\tau(G)\leq (2+o(1))\nu(G)$ when $G$ is a dense graph, and this was
shown to be asymptotically tight by Kahn and Baron
\cite{BarKahn}. Yuster \cite{Yuster} also noted that a combination of
results by Krivelevich \cite{Krivelevich} and Haxell and R\"{o}dl
\cite{HaxRod} implies that for any graph $G$ with $n$ vertices,
$\tau(G)< 2 \nu(G)+o(n^2)$. Two fractional versions of Tuza's
Conjecture were proved by Krivelevich~\cite{Krivelevich}. Chapuy,
DeVos, McDonald, Mohar, and Scheide~\cite{CDMMS} tightened one of
these fractional versions, and considered the natural extension of
Tuza's conjecture to multigraphs. Here by multigraph we mean that
multiple edges are permitted, but not loops (they have no
effect on our problem anyways); the definitions of $\mu$ and
$\tau$ are identical to those given in the simple graph case. In
\cite{CDMMS}, planar multigraphs were shown to satisfy Tuza's
conjecture, and $\tau(G)\leq 2.92\nu(G)$ was shown to hold for all
multigraphs $G$.
When posing his conjecture in \cite{Tuza}, Tuza also discussed the problem of packing and covering \emph{directed} triangles. Here by directed multigraph we shall mean any oriented multigraph; by directed graph we shall mean any directed multigraph without parallel arcs in the same direction (but we allow digons, i.e., a pair of arcs $u \to v$ and $v \to u$). Given a directed multigraph $D$, let $\nu_c(D)$ denote the maximum size of a family of pairwise arc-disjoint directed triangles, and let $\tau_c(D)$
denote the minimum size of an edge set $Y$ such that $D-Y$ has no
directed triangles. Tuza asked: \emph{``Is $\tau_c(D)< 2 \nu_c(D)$ for every digraph $D$?''}. In this paper we answer this affirmatively with the following theorem.
\begin{theorem}\label{thm:main}
If $D$ is a directed multigraph with at least one directed triangle, then $\tau_c(D) < 2\nu_c(D)$.
\end{theorem}
Tuza \cite{Tuza} observed that the rotational 5-tournament $T_5$, pictured in Figure~\ref{fig:5tourn},
satisfies $\tau_c(T_5)/ \nu_c(T_5)= \tfrac{3}{2}$. Our computational efforts have not yielded any examples with a larger ratio for $\tau_c/\nu_c$, and in fact we find the following conjecture plausible.
\begin{conjecture}\label{conj:32}
If $D$ is a directed multigraph, then $\tau_c(D) \leq \frac{3}{2}\nu_c(D)$.
\end{conjecture}
\begin{figure}
\caption{The rotational $5$-tournament $T_5$, with $\tau_c(T_5) = 3$ and $\nu_c(T_5) = 2$.}
\label{fig:5tourn}
\end{figure}
In \cite{TuzaPerfect}, Tuza proved that if $D$ is a planar oriented graph, then $\tau_c(D)=\nu_c(D)$. This topic of packing and covering directed triangles appears not to have caught on in the literature however (in contrast to the undirected analogue), and we hope that Conjecture \ref{conj:32} and Theorem \ref{thm:main} may create interest.
\section{Proof of Theorem~\ref{thm:main}}
The main idea of our proof is based on the reducibility argument in
Puleo~\cite{Puleo}. We use induction on $\sizeof{V(D)}$, with trivial
base case when $\sizeof{V(D)} = 1$. \new{Note that in what follows ``triangle'' always means ``directed triangle''.}
Take any $v \in V(D)$, and define an auxiliary directed multigraph $N$ as
follows: the vertex set of $N$ is the disjoint union of a set
$\{s,t\}$ consisting of designated source and sink vertices, as well
as two sets $W^+$ and $W^-$, where $W^+$ contains a copy $w^+$ of
each vertex $w \in N^+(v)$, and $W^-$ contains a copy $w^-$ of each
vertex $w \in N^-(v)$. (Note that if $w \in N^+(v) \cap N^-(v)$, then
there is a copy of $w$ in \emph{each} of $W^+$ and $W^-$.) Given
vertices $u^+ \in W^+$ and $z^- \in W^-$, we include the arc
$u^+ \to z^-$ in $E(N)$ with the same multiplicity as the arc $u \to z$
in $E(D)$. For each $w^+ \in W^+$, we include the arc $s \to w^+$ in $E(D)$ with
the same multiplicity as the arc $v \to w$, and for each $w^- \in W^-$, we include the arc $w^- \to t$ in
$E(N)$ with the same multiplicity as the arc $w \to v$ in $E(D)$.
Observe that there is a bijection between directed triangles in $D$
containing $v$, and directed $(s,t)$-paths in $N$; triangle
$z \to v \to u \to z$ in $D$ corresponds to directed path $su^+z^-t$
in $N$. Furthermore, two directed triangles in $D$ are arc-disjoint
if and only if the corresponding paths in $N$ are
arc-disjoint. (Whenever two triangles use different parallel arcs,
the corresponding paths have parallel arcs as well.)
Let $\mathcal{P}$ be a maximum-size set of arc-disjoint
$(s,t)$-paths in $N$, say with $|\mathcal{P}|=p$. Let $\mathcal{R}$ be the
corresponding set of pairwise arc-disjoint triangles in $D$, all of
which contain $v$. Each triangle in $\mathcal{R}$ has exactly one arc that
is not incident to $v$; let $\mathcal{R}_v$ be the set consisting of these
$p$ arcs.
Let $X$ be a minimum-size set of arcs in $N$ so that $N-X$ has no
$(s, t)$-paths. By Menger's Theorem, $|X|=|\mathcal{P}|=p$. Note
that in $D$, the set $X$ corresponds to a set $X_D$ of $p$ arcs, and
every triangle incident to $v$ has at least one arc in $X_D$. Let
$C=X_D\cup \mathcal{R}_v$, and observe that $C$ is a triangle arc cover of
every triangle involving $v$ as well as every triangle sharing an
edge with $\mathcal{R}$. We have $|C|\leq 2p$, with equality if and only if
$X_D$ and $\mathcal{R}_v$ are disjoint.
\new{Let $D'=D-v-\mathcal{R}_v$, and suppose first that} $D'$ has at least one directed triangle. By induction, $\tau_c(D')<2\nu_c(D')$. Let $\mathcal{R}'$ be a maximum-size set of edge-disjoint directed triangles in $D'$ and let $C'$ be a minimum-size triangle arc cover in $D'$. By our observations above, note that $C\cup C'$ is a triangle arc cover of $D$, and $\mathcal{R}' \cup \mathcal{R}$ is a set of edge-disjoint triangles in $D$. We get that
\[ |C'\cup C| < 2|\mathcal{R}'|+2p =2|\mathcal{R}\cup\mathcal{R}'|,\]
as desired.
We may now assume that $D'$ has no directed triangles. In this case,
$C$ is a triangle arc cover for $D$ with size at most $2p$. Since
$\mathcal{R}$ is set of $p$ arc-disjoint triangles in $D$, \new{we may assume that $\tau_c(D) = 2p$. We will show that $\nu_c(D) \geq p+1$}.
\new{There exists a directed triangle $T_0$ in $D$ that is disjoint from $\mathcal{R}_v$, since $|\mathcal{R}_v|=p<\tau_c(D)$. Since $D'$ has no directed triangles, $T_0$ must be incident to $v$; let $e_0$ be the arc of $T_0$ that is not incident to $v$. If $T_0$ has no arcs in common with $\mathcal{R}$, then $\mathcal{R}\cup\{T_0\}$ is our desired triangle packing of size $p+1$. Let $\mathcal{R}^0$ be the set of triangles in $\mathcal{R}$ with at least one arc in common with $T_0$. Since $e_0\not\in\mathcal{R}_v$, we know that $|\mathcal{R}^0|\in\{1, 2\}$. We will show that we can find a set $\mathcal{T}$ of $|\mathcal{R}^0|$ directed triangles so that $(\mathcal{R}- \mathcal{R}^0)\cup\{T_0\}\cup \mathcal{T}$ is a set of $p+1$ arc-disjoint triangles in $D$.}
\new{Let $\mathcal{R}^0_v$ be the subset of $\mathcal{R}_v$ that corresponds to $\mathcal{R}^0$. Consider $D^* = D-v - (\mathcal{R}_v - \mathcal{R}^0_v)$. Note that $D^*$ contains at least one triangle, because if not, the arc set $X_D \cup (\mathcal{R}_v -\mathcal{R}^0_v)$ is a triangle arc cover for $D$. Since $D'$ is triangle-free, every triangle in $D^*$ must contain at least one arc from $\mathcal{R}^0_v$. Hence $|\mathcal{R}^0_v|\in\{1, 2\}$ implies that $\nu_c(D^*)\in\{1, 2\}$. Let $\mathcal{T}$ be a maximum packing of directed triangles in $D^*$.}
\new{We first claim that $|\mathcal{T}|=|\mathcal{R}^0|$. If not, then $\nu_c(D^*)=1$ and $|\mathcal{R}^0|=2$, since every triangle in $\mathcal{T}$ must contain at least one edge from $\mathcal{R}^0_v$. However $\nu_c(D^*) = 1$ implies (by applying the induction hypothesis to $D^*$) that $\tau_c(D^*) = 1$, so there is an arc $f^*$ that covers all directed triangles in $D^*$, and hence $X_D \cup (\mathcal{R}_v - \mathcal{R}^0_v) \cup \{f^*\}$ is a triangle arc cover in $D$ that is smaller than $C$.}
\new{We now complete our proof by showing that $\mathcal{T}$ is arc-disjoint from $(\mathcal{R}- \mathcal{R}^0)\cup\{T_0\}$. Each arc used in this second set of triangles, aside from $e_0$, is either incident to $v$ or from the set $\mathcal{R}_v-\mathcal{R}_v^0$. Given that $\mathcal{T}$ is chosen from $D^*$, we need only worry about $e_0$ appearing in some triangle $T \in \mathcal{T}$.
As observed above, such a $T$ must contain at least one arc from $\mathcal{R}^0_v$, say the arc $e_1$ from the triangle $R_1 \in \mathcal{R}^0$. As $R_1$ and $T_0$ share
an arc incident to $v$, their arcs $e_1$ and $e_0$ either have a common head or a common tail (or both, if they are parallel). Either way, no directed triangle can contain both
of the arcs $e_1$ and $e_0$, and in particular $T$ cannot contain the arc $e_0$.}
\end{document} |
\begin{document}
\begin{spacing}{1.2}
\title{How to account for behavioural states in step-selection analysis: a model comparison}
\begin{abstract}
\noindent
\begin{enumerate}
\item Step-selection models are widely used to study animals' fine-scale habitat selection based on movement data. Resource preferences and movement patterns, however, can depend on the animal's unobserved behavioural states, such as resting or foraging. This is ignored in standard (integrated) step-selection analyses (SSA, iSSA). While different approaches have emerged to account for such states in the analysis, the performance of such approaches and the consequences of ignoring the states in the analysis have rarely been quantified.
\item We evaluated the recent idea of combining hidden Markov chains and iSSA in a single modelling framework. The resulting Markov-switching integrated step-selection analysis (MS-iSSA) allows for a joint estimation of both the underlying behavioural states and the associated state-dependent habitat selection. In an extensive simulation study, we compared the MS-iSSA to both the standard iSSA and a classification-based iSSA (i.e., a two-step approach based on a separate prior state classification). We further compared the three approaches in a case study on fine-scale interactions of simultaneously tracked bank voles (\textit{Myodes glareolus}).
\item The simulation study illustrates that standard iSSAs lead to erroneous conclusions due to both biased estimates and unreliable p-values when ignoring underlying behavioural states. We found the same for iSSAs based on prior state-classifications, as they ignore misclassifications and classification uncertainties. The MS-iSSA, on the other hand, performed well in parameter estimation and decoding of behavioural states. In the bank-vole case study, the MS-iSSA was able to distinguish between an inactive and active state, but results highly varied between individuals.
\item MS-iSSA provides a flexible framework to study state-dependent habitat selection. It defines states on both selection and movement patterns and accounts for uncertainties in the corresponding state process. To facilitate its use, we implemented the MS-iSSA approach in the R package \textit{msissa}.
\end{enumerate}
\end{abstract}
\noindent
{\bf Keywords:} animal movement, fine-scale interactions, habitat selection, hidden Markov models, Markov-switching regression, movement behaviour, state-switching, integrated step-selection analysis
\section{Introduction}\label{Sec_Intro}
Combining animal movement and environmental data, step-selection analysis (SSA) and its extension, the integrated step-selection analysis (iSSA), build a popular framework for studying animals' fine-scale habitat selection, while also taking the movement capacity of the animal into account \citep{for05,for09,avg16,nor21}. Essentially, SSA and iSSA are used to explain the animals' space use based on possible preferences for or avoidance of environmental features, accounting for spatial limitations that the animals' movement process imposes on availability. ISSA has successfully been applied, for example, to analyse elk response to roads \citep{pro17}, to study the effects of artificial nightlight on predator–prey dynamics of cougars and deer \citep{dit21}, and to model space use of Cape vultures in the context of wind energy development \citep{cer23}. Besides conventional habitat use, SSA has also proven suitable for detecting interactions such as avoidance or attraction between simultaneously tracked individuals \citep{schl19}.
For parameter estimation, SSA and iSSA use a conditional logistic regression for case-control designs to compare the characteristics of observed, i.e.\ \textit{used} steps against the covariates of alternative steps \textit{available} at a given time point. In this context, a step is the straight-line segment connecting two consecutive locations sampled at regular time intervals and is usually described by the step length and turning angle, i.e.\ the directional change \citep{for05}. The covariates usually correspond to features of the steps' end point, e.g.\ vegetation or snow cover \citep{str21}, but can also refer to characteristics along the step, e.g.\ the presence of roads on the path \citep{pro17}. What is considered to be available at a given time point depends on the assumptions made about the animals' movement capacities and/or typical, i.e.\ habitat-selection-free movement patterns. This usually translates to assumptions about the animals' step length and turning angle distribution (e.g.\ gamma and von Mises distributions). Tentative parameter estimates for these distributions can be estimated from observed steps. These estimates are biased because movement is restricted by habitat selection. A correction for the tentative parameters can be estimated using iSSA \citep{avg16,fie21}.
While (i)SSAs seem suitable in numerous instances, it has recently been argued that fine-scale habitat selection, resource requirements, and selection-free movement patterns might depend on the animal's behavioural modes such as resting or foraging (illustrated in Figure \ref{fig:HMM}). Ignoring such states in the analysis might thus lead to biased results and misleading conclusions \citep{roe14,sur19}. With telemetry-based location data, however, the underlying behavioural states are usually unobserved. Therefore, it has been suggested to first classify the movement data into different states, e.g.\ based on hidden Markov models (HMMs, \citealp{zuc16}), and to split the step observations accordingly into state-specific data sets, which can then be used to fit state-specific (i)SSAs in a second step \citep{roe14,kar19,pic22}. This two-step approach, hereafter named TS-(i)SSA, accounts for the unobserved state structure and is convenient as it can be based on existing software implementations. It has, however, two major drawbacks. First, the state classification is purely based on movement patterns without considering habitat selection. Thus, habitat selection and selection-independent movement processes can be confounded when defining the states. This can affect the validity of the state classification and can lead to a bias in the estimated movement and selection parameters \citep{pri22}. Second, the uncertainty in the HMM state classification is completely ignored in the follow-up (i)SSA. This can again lead to biased movement and (habitat) selection coefficients, as misclassification can occur. Furthermore, confidence intervals and standard p-values are no longer reliable as the uncertainty of both the HMM parameter estimation and the state classification are not taken into account. Consequently, also the TS-iSSA might lead to biased results and misleading conclusions. How serious this is in practice, however, has rarely been quantified. \citet{pri22} evaluated a population-level version of the TS-iSSA in a simulation study and found good classification and prediction performances in the scenarios considered, but also biased parameter estimates. However, as they focused on the population level, they did not provide results on the variation, uncertainty quantification and estimation accuracy of the individually fitted TS-iSSA models.
\begin{figure}
\caption{Illustration of how behavioural states can affect animals habitat selection and movement patterns. The state ``foraging'' is related to search for food such as small insects in an open landscape, while the state ``resting'' is associated to a retreat in its shelter. Usually, the behavioural states are unobserved, thus hidden, and serially correlated. This structure corresponds to the basic dependence structure of a Markov-switching step-selection model.}
\label{fig:HMM}
\end{figure}
The above mentioned problems could be avoided by combining step selection models and hidden Markov chains (as used in HMMs) in a single model to allow for a joint estimation of the underlying state, habitat selection and selection-free movement processes. Similar as \citet{nic17} and \citet{pri22}, we therefore consider a Markov-switching integrated step-selection analysis (MS-iSSA, also called HMM-SSA) which renders a prior state classification unnecessary. All model parameters are jointly estimated using a case-control Markov-switching conditional logistic regression framework. In our implementation, we use a numerical maximum likelihood estimation and constrain all parameters to their natural parameter space to avoid problems in model interpretation (e.g.\ a negative shape parameter for an assumed gamma distribution for step length). For state decoding, we consider the well-known Viterbi algorithm, which computes the most likely state sequence underlying the data given the fitted model.
The aim of this paper is two-fold. First, we provide a broad overview of the MS-iSSA framework by discussing the underlying movement model, its relation to alternative approaches (iSSA, TS-iSSA, HMMs) and, most importantly, its practical implementation, which we further facilitate through release as an R package. Second, we investigate whether and to what extent either the complete neglect of underlying states in the analysis (iSSA) or their incorporation by a prior HMM-based state classification (TS-iSSA) affects the estimation results compared to the MS-iSSA approach. For this, we use an extensive simulation study to compare the estimation and, if applicable, classification performance of iSSAs, TS-iSSAs and MS-iSSAs in three state-switching scenarios. Thereby, we showcase different ways in which behavioural states could influence the animals' movement decisions. A supplementary simulation covers a scenario without underlying state-switching. We further compare the three approaches in a case study on fine-scale interactions of bank voles (\textit{Myodes glareolus}), which are small ground-dwelling rodents. Using a movement data set of synchronously tracked individuals, as analysed in \citet{schl19}, we test whether MS-iSSAs can detect meaningful biological states and whether they provide new insights into interactions, such as attraction, avoidance, or neutrality towards other conspecifics compared to iSSAs.
\section{Methods}\label{Sec2_Methods}
\subsection{Markov-switching step-selection model}\label{Sec2_basics}
We use $\{\mathbf{x}_{0,1},\mathbf{x}_{0,2},\ldots,\mathbf{x}_{0,T}\}$ to denote the sequence of two-dimensional animal locations observed at regular time intervals, which forms the observed movement track. Conditional on the previous location $\mathbf{x}_{0,t-1}$, a step from the current location $\mathbf{x}_{0,t}$ to the next location $\mathbf{x}_{0,t+1}$, is characterised by its step length $l_{0,t+1}$, i.e.\ the straight-line distance between the two consecutive locations, and its turning angle $\alpha_{0,t+1}$, i.e.\ the directional change. The corresponding covariate vector $\mathbf{Z}_{0,t+1}$ stores the feature values of the step, and we use $\mathbf{Z}$ to denote the collection of covariate values for all possible location in the given area.
In the Markov-switching step-selection model, we assume the observed steps to be driven by an underlying hidden state sequence $\{S_{1},S_{2},\ldots,S_{T}\}$ with $N$ discrete states. Thus, each state variable $S_t$ at time $t$ can take one of $N$ state values ($S_t \in \{1,\ldots,N\}$). These states serve as proxies for the unobserved behavioural modes of the animal that influence its movement and habitat selection (illustrated in Figure \ref{fig:HMM}). We assume the state sequence to be a homogeneous $N$-state Markov chain, characterised by its transition probabilities $\gamma_{ij}=\Pr(S_t=j\mid S_{t-1}=i)$ to switch from state $i$ to state $j$, summarised in the $N \times N$ transition probability matrix $\bm{\Gamma}$, and the initial state distribution $\bm{\delta}$ which contains the probabilities to start in a certain state.
Each state $i$ ($i=1,\ldots,N$) is associated to a state-dependent density $f_i$ generating the next location. Its functional form is similar to the basic step-selection model \citep{for09}, but with movement and habitat selection parameters being state-dependent. Thus, conditional on locations $\mathbf{x}_{0,t-1}$ and $\mathbf{x}_{0,t}$, and covariates $\mathbf{Z}$, the current state $S_t=i$ determines the following distribution for a step to location $\mathbf{x}_{0,t+1}$:
\begin{align*}
f_i(\mathbf{x}_{0,t+1} \mid \mathbf{x}_{0,t}, \mathbf{x}_{0,t-1}, \mathbf{Z}; \boldsymbol{\theta}_i, \boldsymbol{\beta}_i)&=
\cfrac{\xoverbrace{\phi(\mathbf{x}_{0,t+1} \mid \mathbf{x}_{0,t},\mathbf{x}_{0,t-1};\boldsymbol{\theta}_i)}
^{\substack{\text{selection-free}\\\text{movement kernel}}}
\cdot
\overbrace{\omega(\mathbf{Z}_{0,t+1};\boldsymbol{\beta}_i)}
^{\substack{\text{movement-free}\\\text{selection function}}}}
{\underbrace{\int_{\mathbf{\tilde{x}} \in \mathcal{D}}
\phi(\mathbf{\tilde{x}} \mid \mathbf{x}_{0,t},\mathbf{x}_{0,t-1};\boldsymbol{\theta}_i)
\cdot
\omega(\mathbf{\tilde{Z}};\boldsymbol{\beta}_i)
d \mathbf{\tilde{x}}}
_{\text{normalising constant}}}
\end{align*}
The density consists of three components: i) The movement kernel $\Phi(\cdot)$ describes the space use in a homogeneous landscape and is usually defined in terms of step length $l_{0,t+1}$ (e.g.\ gamma distribution) and turning angle $\alpha_{0,t+1}$ (e.g.\ von Mises distribution). The corresponding state-dependent parameters for state $i$ are summarised in the movement parameter vector $\boldsymbol{\theta}_i$; ii) The movement kernel is weighted by the movement-free selection function $\omega(\cdot)$ which indicates a possible selection for or against the covariates in $\mathbf{Z}_{0,t+1}$. It is usually assumed to be a log-linear function of the (state-dependent) selection coefficient vector $\boldsymbol{\beta}_i$,
$$\omega(\mathbf{Z}_{0,t+1};\boldsymbol{\beta}_i)=\exp\left( \mathbf{Z}_{0,t+1}^{\top}\boldsymbol{\beta}_i \right),$$
where a positive selection coefficient value indicates preference for, and a negative value avoidance of a corresponding covariate; iii) The integral in the denominator ensures that $f_i$ integrates to one. Usually, it is analytically intractable and must therefore be approximated, for example, using numerical integration methods. We provide an example of state-dependent step-selection densities in a $2$-state scenario in Figure S1 (Supplementary Material).
There are important relations between the Markov-switching step-selection model and two alternative movement models: i) If all states share the same parameters, i.e.\ $\boldsymbol{\theta}_1=\ldots=\boldsymbol{\theta}_N$ and $\boldsymbol{\beta}_1=\ldots=\boldsymbol{\beta}_N$, or if the number of states is set to one, i.e.\ $N=1$, the model reduces to the basic step-selection model without state-switching \citep{for09}; ii) If all selection coefficients are equal to zero, i.e.\ $\boldsymbol{\beta}_1=\ldots=\boldsymbol{\beta}_N=\mathbf{0}$, the model reduces to a basic movement HMM (\citealp{lan12}, \citealp{pat17}) with state-dependent step length and turning angle distributions as implied by the movement kernel $\phi(\cdot)$ but without habitat selection. These relations are very convenient, for example in the context of model comparison and model selection, as it allows the use of standard tests or information criteria to select between these three candidate models.
We can simplify the step-selection density $f_i$ by assuming step length to follow a distribution from the exponential family (with support on non-negative real numbers, e.g.\ a gamma distribution) and turning angle to follow either a uniform or von Mises distribution with fixed mean \citep{avg16,nic17}. In this case, the product of the movement kernel $\phi(\cdot)$ and the exponential selection function $\omega(\cdot)$ is proportional to a single log-linear function of the corresponding model parameters and $f_i$ reduces to:
\begin{align*}
f_i(\mathbf{x}_{0,t+1} \mid \mathbf{x}_{0,t}, \mathbf{x}_{0,t-1}, \mathbf{Z}; \boldsymbol{\theta}_i, \boldsymbol{\beta}_i) &=\myfrac[3pt]{\exp\left(\mathbf{C}_{0,t+1}^{\top} \boldsymbol{\theta}_i + \mathbf{Z}_{0,t+1}^{\top}\boldsymbol{\beta}_i-\log(l_{0,t+1})\right)}
{\displaystyle \int_{\mathbf{\tilde{x}} \in \mathcal{D}}
\exp\left(\mathbf{\tilde{C}}^{\top} \boldsymbol{\theta}_i + \mathbf{\tilde{Z}}^{\top}\boldsymbol{\beta}_i-\log(\tilde{l})\right)
d \mathbf{\tilde{x}}}.
\end{align*}
The vector $\mathbf{C}_{0,t+1}$ can be interpreted as a movement covariate vector that contains different step length and turning angle terms. Its exact form depends on the chosen step length and turning angle distributions (Table S1, see also \citealp{nic17}). For example, for gamma-distributed step length and von-Mises-distributed turning angles with mean zero, we have $\mathbf{C}_{0,t+1} = ( \log(l_{0,t+1}), -l_{0,t+1},\allowbreak \cos(\alpha_{0,t+1}))^{\top}$. The corresponding state-dependent movement coefficient vector is $\boldsymbol{\theta}_i=(k_i-1,r_i,\kappa_i)^{\top}$ with $k_i$ and $r_i$ being the shape and rate parameter of the gamma-distribution belonging to state $i$, respectively, and $\kappa_i$ being the state-dependent concentration parameter of the von-Mises distribution. Thus, in this reduced representation of the step-selection density $f_i$, the parameterisation of the movement kernel might differ from the commonly used parameterisation of the corresponding step and angle distributions (e.g.\ $k_i-1$ instead of $k_i$), but there is a direct relationship between the two (Table S1 and S2). The negative log step length included in the exponential function is necessary to correctly represent the movement kernel in a Cartesian coordinate system.
The reduced form of $f_i$ is very convenient. Justified by the law of large numbers, it allows for a joint parameter estimation of the state, movement and selection parameters based on a Markov-switching conditional logistic regression for case-control designs with $M$ control, i.e.\ available, locations per observed, i.e.\ used, location (\citealp{nic17}, see also \citealt{avg16} for step-selection models without state-switching). This forms the basis for the MS-iSSA.
\subsection{Markov-switching integrated step-selection analysis}\label{Sec2_est}
The MS-iSSA workflow is similar to the one of the iSSA. For each observed step, we choose $M$ control steps, e.g.\ using a suitable proposal distribution for step length and turning angle, respectively, and extract the corresponding habitat and movement covariate values. This builds the case-control data set. The model parameters are then estimated using a Markov-switching conditional logistic regression, i.e.\ a conditional logistic regression in which the regression coefficients depend on an underlying latent Markov chain. In our implementation, we use the forward algorithm, which is well-known especially in the context of HMMs \citep{zuc16} to efficiently evaluate the corresponding likelihood. This allows for a numerical maximum likelihood estimation based on standard optimisation procedures such as \textit{nlm} in R \citep{RCore22}. Afterwards, it is possible to decode the states, for example, using the Viterbi algorithm \citep{vit67}, which calculates the most likely sequence of states given the fitted model and the case-control data.
More precisely, for each step from location $\mathbf{x}_{0,t}$ to $\mathbf{x}_{0,t+1}$ ($t=2,\ldots,T-1$), we have a choice set $\tilde{\mathbf{x}}_{t+1}=\{\mathbf{x}_{0,t+1},\mathbf{x}_{1,t+1},\ldots,\mathbf{x}_{M,t+1}\}$ that includes the observed and the $M$ control locations for the end point of the step. Usually, the control locations are randomly drawn from a suitable proposal distribution for step length and turning angle \citep{for09}. However, it is also possible to use a grid or a mesh \citep{arc23}. Here the devil is in the detail, as depending on the sampling procedure, the interpretation of the models' movement coefficients might differ (see Section S2 in the Supplementary Material). The interpretation of the selection coefficients, however, remain unaffected.
In the Markov-switching conditional logistic regression, we model the state-dependent choice probability $p_{0ti}$ of choosing the observed location $\mathbf{x}_{0,t+1}$ from the choice set $\tilde{\mathbf{x}}_{t+1}$ given the current state $S_t=i$, as:
\begin{align*}
p_{0ti}(\mathbf{x}_{0,t+1}|\tilde{\mathbf{x}}_{t+1},\mathbf{C},\mathbf{Z};\boldsymbol{\theta}_i,\boldsymbol{\beta}_i)=
\myfrac[2pt]{\exp\left(\mathbf{C}_{0,t+1}^{\top}\boldsymbol{\theta}_i + \mathbf{Z}_{0,t+1}^\top\boldsymbol{\beta}_i\right)}
{\sum_{m=0}^M
\exp\left(\mathbf{C}_{m,t+1}^{\top}\boldsymbol{\theta}_i + \mathbf{Z}_{m,t+1}^\top\boldsymbol{\beta}_i\right)},
\end{align*}
with $\mathbf{C}_{m,t+1}$ and $\mathbf{Z}_{m,t+1}$ being the movement and habitat covariate vectors belonging to location $\mathbf{x}_{m,t+1}$ for $m=0,\ldots,M$. This case-control step-selection probability is closely related to direct numerical integration, which offers an alternative way to approximate the step-selection density $f_i$. We derive the likelihood of the Markov-switching conditional logistic regression by plugging $p_{0ti}$ ($i=1,\ldots,N$) into the HMM likelihood \citep{zuc16},
\begin{align*}
\mathcal{L}(\boldsymbol{\theta},\boldsymbol{\beta};\tilde{\mathbf{x}}_3,\tilde{\mathbf{x}}_4,\ldots,\tilde{\mathbf{x}}_{T},\mathbf{C},\mathbf{Z})
&= \boldsymbol{\delta}^\top \mathbf{P}(\tilde{\mathbf{x}}_{3}) \bm{\Gamma} \mathbf{P}(\tilde{\mathbf{x}}_{4}) \bm{\Gamma} \cdots \boldsymbol{\Gamma} \mathbf{P}(\tilde{\mathbf{x}}_{T}) \bm{1},
\end{align*}
where $\mathbf{P}(\tilde{\mathbf{x}}_{t})=\text{diag}(p_{0t1},\ldots,p_{0tN})$ is a diagonal matrix including the state-dependent step-selection probabilities, $\bm{\Gamma}$ and $\boldsymbol{\delta}$ are the transition probability matrix and the initial distribution of the underlying Markov chain, respectively, and $\mathbf{1}$ is an $N$-dimensional vector of ones. We can then estimate the model parameters using a numerical maximisation of the log-likelihood (for details, see \citealp{zuc16}). In our implementation, we restrict the movement parameters to always remain in their natural parameter space, e.g.\ the shape and rate parameters of the gamma distribution are always greater than zero.
For initialisation, the numerical maximisation requires a set of starting values for the model parameters. To avoid ending up in a local maximum of the log-likelihood, it is necessary to test several sets of starting values, for example by randomly drawing values for each model parameter. We discuss this in more detail in Section S3 in the Supplementary Material.
\subsection{Two-step approach}\label{Sec2_TSiSSA}
The TS-iSSA is based on the same idea as the MS-iSSA. However, the TS-iSSA relies on a \textit{prior} classification of the movement data into different movement states. Thus, in a first step, an $N$-state HMM with state-dependent step length and turning angle distributions as defined for the movement kernel is fitted to the data, e.g.\ using a gamma distribution for step length and a von Mises distribution for turning angles. Then, the Viterbi algorithm is used to assign each observed step to one of the $N$ HMM movement states. Alternatively, local state decoding can be used. In the second step, state-specific (i)SSAs are fitted to the state-specific data using a case-control design and conditional logistic regression (e.g.\ \citealp{roe14,kar19}). The control steps for the state-specific case-control data sets are thereby sampled based on the respective state-dependent HMM step length and turning angle distributions.
\subsection{Simulation Study}\label{Sec_Sim}
We used a simulation study with three state-switching scenarios to evaluate the performance of our MS-iSSA approach and to demonstrate possible consequences of either ignoring the underlying latent states in the traditional iSSA or ignoring the uncertainty of prior state-decoding in the TS-iSSA. For each scenario, we generated movement data from a Markov-switching step-selection model with $2$ states and state transition probabilities $\gamma_{11}=\gamma_{22}=0.9$. A realisation of a Gaussian random field with covariance $\sigma^2=1$ and range $\phi=10$, computed using the function \textit{grf} from the R-package \textit{geoR} \citep{rib22}, served as the habitat covariate $\mathbf{Z}$ (Figure S2). For the movement kernel, we used state-dependent gamma and zero-mean von Mises distributions to model step length and turning angle, respectively (Figure \ref{fig:sim_sdds}).
Table \ref{tab:sim_sen} summarises the movement and selection parameters for each of the three simulation scenarios. Scenario 1 is chosen to represent a typical inactive-active scenario in which the first state (``inactive'' state) is associated to small step length, less directive movement and no selection, while the second state (``active'' state) corresponds to larger step length, more directed movement and attraction to the landscape feature $\mathbf{Z}$. The second and the third scenarios cover the rather extreme cases in which either the selection or the movement parameters are shared across states: In Scenario 2 (``switching preferences''), the two states only differ in their selection patterns with avoidance of the feature in state 1, and attraction to the feature in state 2. In Scenario 3 (``HMM''), only the movement patterns differ across states while there is no selection for or against the landscape feature in either state. This corresponds to a basic movement HMM (see Section \ref{Sec2_basics}). To check the robustness of the MS-iSSA, in Section S5.2 of the Supplementary Material we additionally cover a fourth scenario without state-switching in which the data are generated based on a standard step-selection model. Furthermore, to check the influence of the spatial variation in the habitat feature on the estimation results, we also considered a second landscape feature map which was a realisation of a Gaussian random field with covariance $\sigma^2=1$ and range $\phi=50$ (Section S5.3 in the Supplementary Material).
\begin{table}[t]
\centering
\footnotesize
\begin{tabular}{ld{2.2}rrrd{2.2}rrr}
\toprule
& \multicolumn{4}{c}{state 1} & \multicolumn{4}{c}{state 2} \\\cmidrule(lr{.75em}){2-5}\cmidrule(lr{.75em}){6-9}
& \multicolumn{1}{c}{select.\ fun.\ } & \multicolumn{3}{c}{movement kernel} & \multicolumn{1}{c}{select.\ fun.\ } & \multicolumn{3}{c}{movement kernel} \\\cmidrule(lr{.75em}){2-2}\cmidrule(lr{.75em}){3-5}\cmidrule(lr{.75em}){6-6}\cmidrule(lr{.75em}){7-9}
scenario & \multicolumn{1}{c}{$\beta_1$} & \multicolumn{1}{c}{$k_1$} & \multicolumn{1}{c}{$r_1$} & \multicolumn{1}{c}{$\kappa_1$} & \multicolumn{1}{c}{$\beta_2$} & \multicolumn{1}{c}{$k_2$} & \multicolumn{1}{c}{$r_2$} & \multicolumn{1}{c}{$\kappa_2$} \\\midrule
1 (active-inactive) & 0.00 & 1.20 & 1.25 & 0.30 & 2.00 & 2.50 & 0.29 & 1.00 \\[0.2em]
2 (switching preferences) & -2.00 & 2.50 & 0.29 & 1.00 & 2.00 & 2.50 & 0.29 & 1.00 \\[0.2em]
3 (HMM) & 0.00 & 1.20 & 1.25 & 0.30 & 0.00 & 2.50 & 0.29 & 1.00 \\
4 (iSSA, Supp.\ Mat.) & 2.00 & 2.50 & 0.29 & 1.00 & - & -- & -- & -- \\
\bottomrule
\end{tabular}
\caption{Overview of the underlying Markov-switching step-selection model parameters for each simulation scenario. The selection coefficients $\beta_i$ for state $i$ ($i=1,2$), describe the habitat selection and belong to the selection function of the model. The movement kernel parameter vector $\boldsymbol{\theta}_i$ includes the shape $k_i$ and rate $r_i$ of gamma distribution for step length and the concentration parameter $\kappa_i$ of the von-Mises distribution for turning angles. Scenario 4 does not include any state-switching and is covered in the Supplementary Material.}
\label{tab:sim_sen}
\end{table}
\begin{figure}
\caption{Gamma and von Mises distributions for step length and turning angle, respectively, which are used in the simulation study to form the state-dependent movement kernels of the two states. The corresponding parameters are denoted by $k$ (shape), $r$ (rate) and $\kappa$ (concentration). The distributions of state $2$ are shown in orange. The distributions for state $1$ depend on the simulation scenario: In Scenario 2, both states share the same movement kernel and thus, the distributions in orange are also used for state $1$; in the other two Scenarios 1 and 3, the distributions in blue are used for state $1$.}
\label{fig:sim_sdds}
\end{figure}
In each of the $100$ simulation runs per scenario, we simulated movement paths of length $T=1000$ from the corresponding Markov-switching step-selection model and then applied 2-state MS-iSSAs, 2-state TS-iSSAs and iSSAs to corresponding case-control data sets with $M=20$, $M=100$ and $M=500$ randomly drawn control steps per observed step, respectively. We use different numbers of control locations $M$ to check whether the parameter estimates converge to stable values. For the control steps, we used a uniform distribution for turning angles and a proposal gamma distribution for step length, respectively (Section S2). For model selection purposes, we further estimated the parameters of a $2$-state MS-iSSA with movement but without habitat covariates. This corresponds to a basic movement HMM, but fitted to the same case-control data as the candidate models. All models were implemented in R \citep{RCore22}. For the iSSA, we used the \textit{clogit}-function of the \textit{survival} package \citep{the23}. After parameter estimation, we computed AIC and BIC for model selection \citep{bur02} and the basic p-values of the estimated selection coefficients. For the TS-iSSA and MS-iSSA we further computed the state missclassification rate, i.e.\ the percentage of states that were not correctly classified using the Viterbi algorithm. These metrics were used to evaluate the estimation and classification accuracy of the candidate models, and to assess the performance of standard model selection procedures.
\subsection{Case Study on bank vole interactions}\label{Sec_Case}
To illustrate the use of MS-iSSAs on empirical data, we applied them to movement data of synchronously tracked bank vole individuals (\textit{Myodes glareolus}) as analysed in \citet{schl19}. The data set contains $6$-minute locations of $n=28$ individuals split into $8$ groups, i.e.\ replicates, with $2$ males and $1$-$2$ females each. The individuals within a replicate were synchronously tracked in fenced quadratic outdoor enclosures of $2500\text{m}^2$ for $3-5$ days using collars with small radio telemetry transmitters (1.1 g, BD‐2C, Holohil Systems Ltd., Canada) and a system of automatic receiving units (Sparrow systems, USA). For bank vole individuals tracked under natural conditions, the estimated home range sizes were on average $2029.18\text{m}^2$ with a core area of $549.23\text{m}^2$ \citep{sch19}. Thus, the size of the enclosures allowed the individuals to express their natural movement and space use. Due to daily system maintenance, locations were missing for approximately one hour per day. Otherwise, movement paths were complete. This resulted in $602$–$1,200$ locations per individual split into $3-5$ bursts of around $23$ hours each.
To study interactions between the bank vole individuals, i.e.\ attraction, avoidance or neutral behaviour towards each other, \citet{schl19} applied SSAs to each individual of each replicate, respectively, using occurrence estimates of the conspecifics as covariates. The occurrence estimate of an individual provides a map of the individual's space use during a certain time window, indicating areas of higher and lower probability of occurrence during that time period. It is estimated from the discrete sample of observed locations through kriging \citep{fle16}. To account for the movement of individuals, occurrence estimates are computed using a rolling time window (here $4$ hours).
The analysis focused on interactions between males and females: Males were expected to mainly show attraction towards females, while females could show any of the three behaviours depending on their reproductive state \citep{schl19}. The authors suggested that the relatively large number of non‐significant interaction coefficients, especially found for male interactions with females, might be caused by unobserved mixtures of different underlying behavioural modes. Bank voles are polyphasic with resting phases of approximately $3\text{h}$ and active phases of approximately $1\text{h}$ following on each other \citep{mir90}. We therefore applied $2$-state MS-iSSAs to the same data to investigate i) if the state-switching model is capable to detect meaningful biological states, and ii) if we find different significant selection, i.e.\ interaction coefficients using the state-switching approach.
For each individual, we used a $2$-state MS-iSSA with state-dependent gamma distributions for step length, and uniform distribution for turning angle, respectively. Occurrence estimates of each conspecific within the same replicate were used as covariates for the selection part of the model \citep{schl19}. We did not include a resource covariate, as vegetation was sufficiently homogeneous within enclosures. Thus, with $M=500$ available steps per used step, the corresponding selection covariate vector for individual $k$ at time $t$ and locations $\mathbf{x}_{m,t}$, $m=1,\ldots,500$, was given by $\mathbf{Z}_{k,m,t}=(\{O_{-k,m,t}\})$, where $\{O_{-k,m,t}\}$ denotes the set of occurrence estimates of the respective conspecifics withing the same replicate. The corresponding movement covariate vector was $\mathbf{C}_{k,m,t}=(\log(l_{k,m,t}),-l_{k,m,t})$. Parameters were then estimated using a Markov-switching conditional logistic regression with $50$ sets of random starting values to initialise the optimisation (Section \ref{Sec2_est}). For model comparison, we further applied corresponding iSSAs (no state-switching) and HMMs (no selection) to the same, and $2$-state TS-iSSAs (prior state-classification) to a similar case-control data set for each individual.
\section{Results}\label{Sec_Res}
\subsection{Simulation study}\label{Sec_Res_Sim}
\begin{figure}
\caption{Boxplots of the parameter estimates across the $100$ simulation runs for each applied method, simulation scenario and number of control locations $M$, respectively. The rows refer to the estimated selection coefficient (beta), the shape and rate of the gamma-distribution for step length and the concentration parameter (kappa) of the von Mises distribution for turning angle, respectively. The columns refer to the three different simulation scenarios. For each method (iSSA, TS-iSSA and MS-iSSA) and state (state 1: blue, state 2: orange, no state differentiation: black), the three adjacent boxplots refer the use of $M=20$, $M=100$ and $M=500$ control locations per used location for the parameter estimation. Note that in Scenario 2, the TS-iSSA is naturally not capable to distinguish between two states as both share the same movement kernel. Thus, there are only results for a single state.}
\label{fig:Sim_res}
\end{figure}
Overall, the number of available steps $M$ only slightly affected the estimation results in this simulation exercise, especially the results for $M=100$ and $M=500$ are very similar (Figure \ref{fig:Sim_res}). Thus, the results seem to be stable. The MS-iSSA performed very well across all simulation scenarios and did not produce any evident bias even in the extremer Scenarios 2 (``state-switching preferences'') and 3 (`HMM''; Figure \ref{fig:Sim_res}, Tables S3 and S4). The TS-iSSA was able to detect two suitable states in both scenarios with state-dependent movement kernels (Scenarios 1 and 3), although there was a small but evident bias for some parameters, for example, for the selection coefficients in scenario 1 ($0.18$ in state $1$, $-0.13$ in state $2$ for $M=500$), and for the shape parameter in scenario 3 ($0.12$ in state $2$ for $M=500$). For Scenario 1 (``active-inactive''), this is also reflected in the rather large percentage of significant selection coefficients across the simulation runs in state 1 ($39-40\%$ at a significance level of $\alpha=0.05$, Table \ref{tab:pv}), although the true coefficient is equal to zero. Thus, in contrast to the MS-iSSA, the p-values of the TS-iSSA are not reliable in this active-inactive setting.
\begin{table}[!t]
\centering
\footnotesize
\begin{tabular}{lccccccc}
\toprule
& & \multicolumn{2}{c}{Scen.\ 1} & \multicolumn{2}{c}{Scen.\ 2} & \multicolumn{2}{c}{Scen.\ 3}\\\cmidrule(lr{.75em}){3-4}\cmidrule(lr{.75em}){5-6}\cmidrule(lr{.75em}){7-8}
\rowcolor{white} model & no.\ cont.\ & $\beta_1=0$ & $\beta_2=2$ & $\beta_1=-2$ & $\beta_2=2$ & $\beta_1=0$ & $\beta_2=0$ \\\midrule
\multirow{3}{*}{iSSA} & 20 & 100 & -- & 57 & -- & 16 & -- \\
& 100 & 100 & -- & 57 & -- & 17 & -- \\
& 500 & 100 & -- & 58 & -- & 17 & -- \\\midrule
\multirow{3}{*}{TS-iSSA} & 20 & 40 & 100 & 58 & -- & 5 & 6 \\
& 100 & 42 & 100 & 57 & -- & 4 & 6 \\
& 500 & 39 & 100 & 57 & -- & 4 & 5 \\\midrule
\multirow{3}{*}{MS-iSSA} & 20 & 2 & 100 & 100 & 100 & 4 & 5 \\
& 100 & 3 & 100 & 100 & 100 & 2 & 5 \\
& 500 & 1 & 100 & 100 & 100 & 5 & 6 \\
\bottomrule
\end{tabular}
\caption{Percentage of simulation runs in which the selection coefficients are estimated to be significantly different from zero at a significance level of $\alpha=0.05$, for each scenario and fitted model, respectively.}
\label{tab:pv}
\end{table}
\begin{table}[h!t]
\centering
\footnotesize
\begin{tabular}{lcccc}
\toprule
& \multicolumn{3}{c}{MS-iSSA} & \\\cmidrule(lr{.75em}){2-4}
\multicolumn{1}{c}{scenario} & 20 & 100 & 500 & HMM \\\midrule
1 (active-inactive) & 4.05 (0.83) & 3.76 (0.82) & \textbf{3.70} (0.79) & 5.93 (1.47)\\[0.2em]
2 (switching preferences) & 2.12 (0.53) & 2.00 (0.52) & \textbf{1.94} (0.50) & 49.01 (4.38)\\[0.2em]
3 (HMM) & 2.49 (0.49) & 2.42 (0.51) & \textbf{2.38} (0.55) & 2.39 (0.53)\\[0.2em]
\bottomrule
\end{tabular}
\caption{Mean missclassification rate with standard deviation in parentheses across the $100$ simulation runs for each scenario and fitted state-switching model, respectively. The missclassification rate is calculated as the percentage of states incorrectly classified using the Viterbi sequence. The lowest missclassification rate for each scenario is highlighted in bold face.}\label{tab:Vit}
\end{table}
The iSSA is by its nature unable to distinguish between the underlying states and thus, did not recover the true underlying parameters in either scenario. Especially in Scenario 2 (``switching preferences''), the iSSA selection coefficients were estimated close to zero and the associated p-values would misleadingly indicate no selection for or against the landscape feature in $42\%-43\%$ of the simulation runs (Table \ref{tab:pv}). Note that the TS-iSSA produced similar results to the iSSA in this scenario, since the inherent HMM classification was not able to distinguish between states that share the same movement kernel, and therefore all steps were classified to belong to the same state.
\begin{table}[h!t]
\centering
\footnotesize
\begin{tabular}[t]{lccccccc}
\toprule
& & \multicolumn{3}{c}{AIC} & \multicolumn{3}{c}{BIC}\\\cmidrule(lr{.75em}){3-5}\cmidrule(lr{.75em}){6-8}
Scenario & no.\ cont.\ & iSSA & HMM & MS-iSSA & iSSA & HMM & MS-iSSA \\\midrule
\multirow{3}{*}{Scen.\ 1} & 20 & 0 & 0 & \textbf{100} & 0 & 0 & \textbf{100} \\
& 100 & 0 & 0 & \textbf{100} & 0 & 0 & \textbf{100} \\
& 500 & 0 & 0 & \textbf{100} & 0 & 0 & \textbf{100} \\\midrule
\multirow{3}{*}{Scen.\ 2} & 20 & 2 & 0 & \textbf{98} & 2 & 0 & \textbf{98} \\
& 100 & 2 & 0 & \textbf{98} & 2 & 0 & \textbf{98} \\
& 500 & 2 & 0 & \textbf{98} & 2 & 0 & \textbf{98} \\\midrule
\multirow{3}{*}{Scen.\ 3} & 20 & 0 & \textbf{88} & 12 & 0 & \textbf{100} & 0 \\
& 100 & 0 & \textbf{91} & 9 & 0 & \textbf{100} & 0 \\
& 500 & 0 & \textbf{86} & 14 & 0 & \textbf{100} & 0 \\
\bottomrule
\end{tabular}
\caption{Percentage of simulation runs in which the three candidate models are selected by either AIC or BIC for each simulation scenario and number of control points used for model fitting, respectively. The cells belonging to the true underlying model are highlighted using bold face.}
\label{tab:ic}
\end{table}
For all three simulation scenarios, the MS-iSSA with $M=500$ available steps achieved the lowest missclassification rate (Table \ref{tab:Vit}). As the data in Scenario 3 were simulated from an HMM, the HMM classification was equally accurate in this scenario. Overall, the MS-iSSA clearly outperformed the other candidate models in its estimation and classification performance in all scenarios.
As the TS-iSSA involves an a-priori HMM classification, it does not provide a proper maximum likelihood value. It is therefore not possible to calculate corresponding AIC or BIC values for model selection. Thus, we only considered iSSAs without state-switching, HMMs without selection (fitted to the same case-control data sets) and MS-iSSAs as candidate models to evaluate information-criteria based model selection in this modelling framework. For Scenario 1 and 2, AIC and BIC performed very well and selected the true underlying model in $100$ and $98\%$ of the simulation runs, respectively (Table \ref{tab:ic}). In Scenario 3 (``HMM''), the AIC tended to select the true HMM model in most of the cases but occasionally selected the more complex MS-iSSA ($9-14\%$ of the runs), while the BIC again selected the correct model in all simulation runs.
Overall, the simulation runs with lower spatial variation in the landscape variable produced similar results (Section S5.3). However, the lower spatial variation reduces the influence of the habitat selection function on space use. Therefore, the variance in the estimates slightly increased, the HMM missclassification rate decreased in Scenario 1 and the MS-iSSA missclassification rate increased in Scenario 2. In the supplementary simulation scenario without state-switching, the MS-iSSA was able to recovered the true underlying values in state 1, but produced unusable estimates in state 2 (Section S5.2).
\subsection{Case Study on bank vole interactions}\label{Sec_Res_Case}
For most bank vole individuals, the MS-iSSA approach could reasonably distinguish between two activity levels. State 1 was always associated to shorter step lengths compared to state 2 which could correspond to a rather inactive behaviour (Figure \ref{fig:vole_gamma}; mean of the estimated gamma distribution for step length ranging from $1.43$ to $7.38$ in state 1, and from $8.13$ to $20.85$ in state 2, respectively). According to the Viterbi decoded state sequences, the ``less active'' state 1 was occupied between $15.29\%$ and $66.71\%$ of the observed time period (Table S7), except for male 1 in replicate $4$ which spent $96.60\%$ of the time in state 1 according to its decoded state sequence. It is also the individual with the largest estimated mean step length in both states ($7.38$ in state 1, $20.85$ in state 2). Thus, for this male, interpretation must be taken with care. The TS-iSSA provided mostly similar results for the movement kernel and state classification (Figure S10).
\begin{figure}
\caption{Estimated state-dependent gamma distributions for step length as implied by the fitted $2$-state MS-iSSAs for each individual in replicates $1-8$, respectively. The distributions are weighted by the relative state occupancy frequencies derived from the Viterbi sequence. The gray histograms in the background show the distribution of the observed step lengths.}
\label{fig:vole_gamma}
\end{figure}
\begin{figure}
\caption{Estimated iSSA and MS-iSSA selection coefficients (solid points/triangles) of interaction behaviour between individuals of opposing sexes within the eight replicates (1–8), including $95\%$ confidence intervals (solid lines). Each replicate consisted of two males (male 1 and male 2) and one or two females (female 1 and female 2) such that each
individual could respond to up to two opposite‐sex individuals (dot: response to female/male 1, triangle: response to female/male 2 within a replicate). Non‐significant coefficients (p-values below 0.05) are greyed out. The horizontal dashed line indicates zero (i.e. neutral behaviour); positive coefficients indicate attraction, while negative
coefficients would indicate avoidance.}
\label{fig:vole_coeff}
\end{figure}
For $21$ of the $28$ bank vole individuals, the MS-iSSA results implied neutral behaviour towards conspecifics in state 1 as all selection coefficients were non-significant ($\alpha=0.05$; Figure \ref{fig:vole_coeff} and Figure S9). This matches well with the interpretation of a less active/inactive state. For two individuals, the results indicated avoidance behaviour in state 1. In state 2 (``active state''), most bank voles showed attraction to at least one bank vole of opposite sex as implied by the positive and significant selection coefficients. However, for four males and four females, the coefficients for occurrence of individuals with opposite sex were non-significant in both states. These are mainly the individuals for which the iSSA also implied neutral behaviour (Figure \ref{fig:vole_coeff}). However, for 3 individuals, i.e.\ male 1 in replicate 7, female 1 in replicate 4, and female 1 in replicate 8, the MS-iSSA indicated attraction towards another individual of opposite sex, while the iSSA indicated neutrality. The opposite is true for female 1 in replicate 7 for which only the iSSA indicated attraction. The selection coefficients for occurrence of individuals with same sex usually implied neutral behaviour in state 1, and neutral or attraction behaviour in state 2 (Figure S9).
Overall, the results of the TS-iSSA are in line with the results of the MS-iSSA (Figures S10, S11 and S12), although the implications are slightly different for nine individuals. Regarding information-criteria based model selection, for most bank voles, AIC and BIC pointed to the Markov-switching step-selection model (Table S8). However, for 10 individuals, including half of the female individuals, BIC selected a simpler model, i.e.\ iSSA or HMM. The selection of HMMs mainly corresponded to cases with many non-significant MS-iSSA selection coefficients. The iSSA was preferred by BIC for male 1 in replicate $4$ and female 2 in replicate 8.
\section{Discussion}\label{Sec_Dis}
In this paper, we discussed the relationship between iSSA without underlying behavioural states, the two-step approach TS-iSSA and the joint approach MS-iSSA and compared them in both a simulation and a case study. Thereby, we highlighted possible consequences of either ignoring underlying behavioural states or using a prior HMM-based state classification to take them into account. This provides important implications for the practical application of fine-scale habitat selection analyses.
Combining ideas of iSSAs and HMMs in a single model, MS-iSSAs build a convenient modelling framework to study state-dependent movement and habitat selection based on animal movement data \citep{nic17,pri22}. This makes a prior state classification unnecessary, which, as demonstrated in the simulation study, could otherwise lead to biased estimates and misleading conclusions (see also \citealp{pri22}). In particular, the MS-iSSA accounts for uncertainties in both the latent state and the observation process which allows for further inference, while the TS-iSSA completely ignores the uncertainties in the state decoding. This renders classical p-values of the TS-iSSA invalid.
Moreover, the MS-iSSA can detect states associated to same movement but different selection behaviour (Scenario 2 in the simulation study), which is not possible using a prior classification that ignores the selection patterns. While Scenario 2 (``switching preferences'') might cover a rather extreme case, one could imagine, for example, an underlying hungry and a thirsty state where the animal is searching for either food or water, or an attraction and neutrality/avoidance state where the animal is either attracted to another individual or ignoring/avoiding possible social interactions. Even if the movement patterns might not be completely the same across these states, they might largely overlap and therefore lead to problems and high uncertainties in the prior state decoding of the TS-iSSA. This is contrasted with Scenario 3 (``HMM'') of the simulation study which does not include any habitat selection, the states are solely associated with different movement kernels. Here, an HMM-based classification is suitable and the TS-iSSA and MS-iSSA perform equally well. Still, the TS-iSSA does not propagate the uncertainties of the state-decoding.
Our analyses further demonstrates that ignoring underlying behavioural states completely by using standard iSSA can strongly corrupt results on selection behaviour. While theoretically expected, a systematic evaluation and quantification of this effect had been lacking. Our study shows that iSSA tends to average out different selection behaviours in different behavioural states. This can lead to simple over- or underestimation of selection strength, keeping the overall direction of selection (i.e., avoidance or attraction) correct. However, it can also lead to more serious problems when selection behaviour has opposing directions in different states. In this case, we found that selection was estimated to be non-significant, which would lead to a strongly erroneous biological conclusion. This result corroborates the surmise that small effect sizes or non-significant results in step-selection analyses may in fact be due to underlying behavioural state switching \citep{schl19} and more generally the caveat that failure to detect an effect does not imply lack of an affect.
MS-iSSAs have successfully been applied to study habitat selection of bison and zebra in encamped and exploratory states \citep{nic17,pri22}, to detect the onset of mule deer migration and to evaluate the behavioural response of bison on the presence of wolves \citep{pri22}. In our case study, we extend the scope of application to fine-scale interactions of simultaneously tracked bank voles. Here, the 2-state MS-iSSA provided a reasonable separation into a rather inactive state mostly associated with neutral behaviour towards the conspecifics, and an active state often associated with attraction behaviour. However, according to the decoded state sequences, the voles spend more time in the active state as expected ($62.73\%$ of the time on average instead of $25.00\%$). For one male bank vole individual, the state-classification within the MS-iSSA was different. Its second state captured only rare observations with large displacement, while the first state accounted for all other observations. Here, the Viterbi sequence assigned over $96\%$ of the observations to state $1$ and the estimated MS-iSSA showed larger mean step lengths in the estimated state-dependent gamma distributions than for all other individuals. Thus, the second state either captured rare events or outlying observations. This demonstrates that similar care is needed when interpreting the MS-iSSA states as for general HMMs in an unsupervised learning context \citep{mcc20}.
In the active state, we generally expected males to look for females, while females might show different interactions with males depending on the reproductive state \citep{schl19}. For example, females in estrous may actively seek out males to generate mating opportunities away from the nest to lower the risk of infanticide \citep{ecc18}. In contrast, females that are not in estrous state might show avoidance or neutrality toward males. In line with \citet{schl19}, for the male bank voles, we found either attraction or neutral behaviour toward the females in the active state. This was, however, also the case for the female responses to male occurrences. While this might reflect the true individual interaction patterns, it might also be an artefact of measurement errors or the fence around the enclosures that limited the space use. Furthermore, some selection coefficient estimates had rather large confidence intervals possibly associated to the small number of observations for the rather complex model structure. This also prevented the use of a $3$-state model that might have been able to differentiate between pure foraging and social interaction states.
With behavioural states being unobserved, it is usually unclear whether they manifest themselves in a given empirical data set. In both the bank-vole and simulation study, we therefore considered information criteria to select between the candidate models iSSA, HMM and MS-iSSA. Especially the BIC performed well in our simulation study. For the TS-iSSA, such likelihood-based criteria cannot be applied as there is no proper joint maximum log-likelihood value for the state and observation process. This is another drawback of the two-step approach. Besides indicating if the inclusion of states or the inclusion of the selection function are appropriate for a given application, information criteria could also be used to select between MS-iSSAs with different covariate sets or generally to select an appropriate number of meaningful biological states $N$. In the context of HMMs, however, the latter has proven difficult, as information criteria, especially the AIC, tend to select overly complex models with a rather large number of states \citep{cel08,poh17}. We expect this to be the case also for MS-iSSAs. Therefore, besides information criteria, the selection of the number of states should further be based on a close inspection of the fitted models, and involve expert knowledge (``pragmatic order selection'', \citealp{poh17}). This is also highlighted in the supplementary simulation scenario which does not include state-switching. Furthermore, future research could focus on the development of appropriate model checking methods for (Markov-switching) step-selection models.
It is important to note that the resolution of the data in time and space can strongly influence the model results and interpretation. Data sets with different resolutions might reflect different state, movement and selection patterns of an animal \citep{may09,ada19}. For example, an individual can exhibit many behaviours during a long time interval, e.g.\ during $24$ hours, and thus, a coarse time resolution might hinder the model to detect biological states such as resting and foraging or provide only crude state proxies. However, migration modes might be reflected in the data. On the other hand, movement and selection patterns might not directly be expressed in steps at very fine time resolution, e.g.\ based on one location every second \citep{mun21}. Thus, the temporal resolution of the data must match the time scale in which the animal expresses its state, movement and selection patterns of interest. Moreover, if the spatial resolution of a covariate map is too coarse, important habitat features might be overlooked in the analysis \citep{zel17}. Thus, the resolution of the data is a key factor in MS-iSSAs. However, once movement and habitat data are available at a suitable resolution in space and time for a given species and research question at hand, the MS-iSSA approach can flexibly be applied to study fine-scale state-dependent movement and habitat selection. To facilitate its use, the basic MS-iSSA is implemented in the R-package \textit{msissa} available on GitHub \citep{pohle23}.
\section*{Author contributions}
JP, UES and JS conceived the ideas and designed the study. JP implemented the methods with input from UES and JS. JS and JP implemented the R-package. JAE and MD provided the telemetry data and ecological input for the case study. JP led the writing of the manuscript, supported by UES and JS. All authors contributed critically to the drafts and gave final approval for publication.
\section*{Data Availability}
The bank vole data are available from the Dryad Digital Repository: \href{https ://doi.org/10.5061/dryad.rt535m8}{https ://doi.org/10.5061/\\dryad.rt535m8} \citep{schl19}.
\end{spacing}
\end{document} |
\begin{document}
\title{On a class of quaternary complex Hadamard matrices}
\begin{abstract}
We introduce a class of regular unit Hadamard matrices whose entries consist of two complex numbers and their conjugates for a total of four complex numbers. We then show that these matrices
are contained in the Bose-Mesner algebra of an association scheme arising from skew Paley matrices.
\end{abstract}
\section{Introduction}
An $n \times n$ matrix $H$ is a \emph{unit Hadamard matrix} if its entries are all complex numbers of modulus 1 and it satisfies $HH^* = nI_n$. If the entries of $H$ are all complex $k^\text{th}$ roots of unity, it is called a \emph{Butson Hadamard matrix}, referred to as a $\mathcal{B}Hnk$, and the particular case of $k=2$ is a \emph{Hadamard matrix}. Following Compton et al.~\cite{Compton2015} we call a Butson or unit Hadamard matrix \textit{unreal} if its entries are strictly in $\mathbb{C} \setminus \mathbb{R}$. A Hadamard matrix $H$ of order $n$ is called to be of \emph{skew type}, if $H=I +W$, where $W$ is a skew symmetric $(0,\pm 1)$-matrix. It follows that $WW^T=(n-1)I_n$. For a thorough examination of unit and Butson Hadamard matrices, we refer the reader to Sz\"oll\H{o}si's PhD thesis~\cite{Szollosi}, and for some fundamental results and applications of Hadamard matrices, we refer the reader to Seberry and Yamada's 1992 survey~\cite{seberrySurvey}.
Given a matrix $A$ of order $n$, let $R_i$ denote the $i$-th row of $A$, $S(R_i)$ the sum of all entries of $R_i$ and $S(A)$, called the \emph{excess of $A$}, the sum of all its entries.
A result of ~\cite{Best} implies that for a unit Hadamard matrix of order $n$, $|S(A)| \le n \sqrt n$ and equality occurs if and only if $|S(R_i)|=\sqrt n$ for $1\le i \le n$. A unit Hadamard matrix $A$ of order $n$ is called \emph{regular} if $|S(R_i)|=\sqrt n$ for $1\le i \le n$, see \cite{KS} for details.
In this paper we introduce a recursive method to construct pairs of $(\pm 1)$-matrices satisfying two specific equations. Similar recursive methods were presented in 2005 to obtain symmetric designs and orthogonal designs~\cite{recOrthogonalDesigns, recSymmetricDesigns}.
Assuming the existence of a skew type Hadamard matrix of order $q+1$, we show the pairs of matrices obtained from our recursive method can be used to construct infinite classes of a special type of unit Hadamard matrices of order $q^m$, for each positive integer $m$, which we have dubbed \emph{quaternary unit Hadamard} matrices. In particular, as a corollary we conclude that for each prime power $q \equiv 3 \pmod{4}$, there are infinite classes of unreal $\mathcal{B}H{3^m}{6}$'s, and quaternary unit Hadamard matrices of order $q^m$. Moreover, we will demonstrate that all of the constructed Butson Hadamard matrices and quaternary unit Hadamard matrices are regular, and some of those have multicirculant structure.
Some of the results in this paper are closely related to part of the results in a recent paper by Compton et al.~\cite{Compton2015}, see also \cite{muk}. Among other results, Compton et al. proved the existence of $\mathcal{B}H{3^m}{6}$'s for each integer $m \ge 0$. Herein, we too will construct $\mathcal{B}H{3^m}{6}$'s. However, our matrices are distinguished from those of Compton et al. in that our $\mathcal{B}H{3^m}{6}$'s are regular and multicirculant. In their paper, Compton et al. also showed that a $\mathcal{B}H{n}{6}$ is equivalent to a pair of amicable $(\pm 1)$-matrices satisfying a certain equation, and that this pair of matrices can be used to construct a Hadamard matrix. We have generalized this result in Section~\ref{sect:concepts} by introducing quaternary Hadamard matrices and showing that they are equivalent to a pair of amicable $(\pm 1)$-matrices satisfying an equation analogous to that introduced by Compton et al. Moreover, we will show that the pairs of amicable $(\pm 1)$-matrices equivalent to quaternary unit Hadamard matrices can be used to construct Hadamard matrices. Next, in Section~\ref{sect:result} we will introduce a recursive method to construct such pairs of matrices, and we will use this method to show that for each prime power $q \equiv 3 \pmod{4}$ and integer $m \ge 0$, we can construct infinite classes of unreal $\mathcal{B}H{3^m}{6}$'s and unreal quaternary unit Hadamard matrices of order $q^m$. Finally, in Chapter 4, we introduce an association scheme whose Bose-Mesner algebra contains our quaternary unit Hadamard matrices.
\section{Quaternary Unit Hadamard Matrices}\label{sect:concepts}
\begin{definition}
\label{def:quaternary}
We say that an $n \times n$ unit Hadamard matrix $H$ is \textit{quaternary} if there is a positive integer $m$ such that the entries of $H$ are all in the set $\left\{\pm \frac{1}{\sqrt{m+1}} \pm i \sqrt{\frac{m}{m+1}}, \pm \frac{1}{\sqrt{m+1}} \mp i \sqrt{\frac{m}{m+1}}\right\}$. For short, we refer to such a quaternary unit Hadamard matrix as a $\mathcal{F}UHnm$.
\end{definition}
It is readily verified that any $\mathcal{F}UH{n}{1}$ or $\mathcal{F}UH{n}{3}$ is also a Butson Hadamard matrix.
\begin{lem}
\label{lem:rootsOfUnity}
Let $m$ be a positive integer. Then $\zeta=\frac{1}{\sqrt{m+1}} + i \sqrt{\frac{m}{m+1}}$ is a root of unity if and only if $m=1$ or $m=3$.
\end{lem}
\begin{proof}
If $\zeta$ is a root of unity, then so are $\zeta^2$ and $\bar{\zeta}^2$. Thus $\zeta^2+{\bar{\zeta}}^2=\frac{-2(m-1)}{m+1}$ is an algebraic integer, and hence an integer. This implies that $m=1$ or 3. \end{proof}
The next proposition follows immediately from the previous lemma and the observation that any $\mathcal{F}UH{n}{1}$ or $\mathcal{F}UH{n}{3}$ is also a Butson Hadamard matrix.
\begin{prop}
A $\mathcal{F}UHnm$ is a Butson Hadamard matrix if and only if $m=1$ or $m=3$.
\end{prop}
We now demonstrate that $\mathcal{F}UHnm$'s are equivalent to pairs of $n \times n$ matrices satisfying certain properties. First, however, recall a definition.
\begin{definition}
Two complex matrices $A$ and $B$ are called \textit{amicable} if $AB^* = BA^*$.
\end{definition}
In the reference~\cite{Compton2015}, Compton et al. establish the following result.
\begin{thm}[Compton et al.,~\cite{Compton2015}]
\label{thm:comptonEquivalence}
An unreal $\mathcal{B}H{n}{6}$ is equivalent to a pair of $n \times n$ amicable $(\pm 1)$-matrices $A$ and $B$ satisfying $AA^T+3BB^T = 4nI_n$.
\end{thm}
With little difficulty, this result can be generalized in the following manner. Assume $H$ is a $\mathcal{F}UHnm$.
Then we can write
$$ H = \frac{1}{\sqrt{m+1}} A + i \sqrt{\frac{m}{m+1}} B $$
for some $(\pm 1)$-matrices $A$ and $B$. Therefore,
\begin{align*}
nI_n &= \left(\frac{1}{\sqrt{m+1}} A + i \sqrt{\frac{m}{m+1}} B\right)\left(\frac{1}{\sqrt{m+1}} A + i \sqrt{\frac{m}{m+1}} B\right)^*,
\end{align*}
so
$$ n(m+1)I_n = AA^T + m BB^T + i \sqrt{m}(BA^T-AB^T). $$
Since the right-hand-side must be real, this proves the following generalization of Theorem~\ref{thm:comptonEquivalence}.
\begin{thm}
\label{thm:comptonGen}
A $\mathcal{F}UHnm$ is equivalent to a pair of $n \times n$ amicable $(\pm 1)$-matrices $A$ and $B$ satisfying $AA^T + mBB^T = (m+1)nI_n$.
\end{thm}
\section{A Recursive Method}\label{sect:result}
In this section we will introduce a recursive construction for pairs of matrices satisfying the aforementioned properties.
We use $j_n$ and $J_n$ to denote the $1 \times n$ and $n \times n$ all-ones matrices respectively. Subscripts will be dropped where no ambiguity arises.
Let $q+1$ be the order of a skew type Hadamard matrix $H$. Multiply rows and columns of $H$, if necessary, to get the matrix \[ \left (\begin{matrix} 1 & j\\-j^T & I+Q\end{matrix}\right).\] The $(0,\pm 1)$-matrix $Q=(q_{ij})_{i,j=1}^{q}$, called the \emph{skew symmetric core} of the skew type Hadamard matrix, is
skew symmetric, $J_qQ=QJ_q=0$, and $QQ^T=qI_q-J_q$. For any odd prime power $q$ the
Jacobsthal matrix of order $q$ defined by
$$
q_{ij} = \chi_q(a_i - a_j) (a_i,a_j\in GF(q))
$$
where $\chi_q$ denotes the quadratic character in $\mathcal{G}Fq$, enjoys the following important properties:
\begin{enumerate}
\item $Q$ is symmetric if $q \equiv 1 \pmod{4}$ and skew symmetric if $q \equiv 3 \pmod{4}$.
\item $J_qQ=QJ_q=0.$
\item $QQ^T = qI_q-J_q$.
\end{enumerate}
So, Jacobsthal matrices provide many examples of skew symmetric cores.
Let $q$ be the order of a skew symmetric core $Q$. Define the following matrices recursively for each nonnegative integer $m$.
\begin{align}
\label{eqn:JA}
\mathcal{J}^{(q)}_m & = \begin{cases}
J_1 & \text{if } m=0\\
J_q \otimes \mathcal{A}^{(q)}_{m-1} & \text{otherwise}
\end{cases} , &
\mathcal{A}^{(q)}_m & = \begin{cases}
J_1 & \text{if } m=0\\
I_q \otimes \mathcal{J}^{(q)}_{m-1} + Q \otimes \mathcal{A}^{(q)}_{m-1} & \text{otherwise.}
\end{cases}
\end{align}
It should be noted that when no ambiguity arises, for brevity we will drop the superscripts on $\mathcal{J}^{(q)}_m$ and $\mathcal{A}^{(q)}_m$.
It is not hard to prove by induction that $\mathcal{J}^{(q)}_m$ and $\mathcal{A}^{(q)}_m$ are amicable for each nonnegative integer $m$. Indeed, the base case is clear, and using the induction hypothesis together with the fact that $J_qQ=QJ_q=0$, note that
\begin{align*}
\mathcal{J}_{m+1} \mathcal{A}_{m+1}^T &= (J_q \otimes \mathcal{A}_m)(I_q \otimes \mathcal{J}_m + Q \otimes \mathcal{A}_m)^T \\
&= J_q \otimes (\mathcal{A}_m\mathcal{J}_m^T) \\
&= J_q \otimes (\mathcal{J}_m\mathcal{A}_m^T) \\
&= (I_q \otimes \mathcal{J}_m + Q \otimes \mathcal{A}_m)(J_q \otimes \mathcal{A}_m)^T \\
&= \mathcal{A}_{m+1}\mathcal{J}_{m+1}^T.
\end{align*}
It follows that $\mathcal{J}^{(q)}_m$ and $\mathcal{A}^{(q)}_m$ are amicable for each integer $m \ge 0$.
It is also straightforward to prove by induction that
$$\mathcal{J}^{(q)}_m(\mathcal{J}^{(q)}_m)^T + q \mathcal{A}^{(q)}_m(\mathcal{A}^{(q)}_m)^T = q^m(q+1)I_{q^m}$$
whenever $q+1$ is the order of a skew type Hadamard matrix. Again the base case is clear. Using the induction hypothesis together with the facts that $Q$ is skew symmetric, that $QQ^T = qI_q - J_q$, and that $\mathcal{J}^{(q)}_m$ and $\mathcal{A}^{(q)}_m$ are amicable, we obtain
\begin{align*}
\mathcal{J}_{m+1}&\mathcal{J}_{m+1}^T + q \mathcal{A}_{m+1}\mathcal{A}_{m+1}^T \\
&= (J_q \otimes \mathcal{A}_m)(J_q \otimes \mathcal{A}_m^T) + q(I_q \otimes \mathcal{J}_m + Q \otimes \mathcal{A}_m)(I_q \otimes \mathcal{J}_m^T + Q^T \otimes \mathcal{A}_m^T) \\
&= qJ_q \otimes \mathcal{A}_m\mathcal{A}_m^T + qI_q \otimes \mathcal{J}_m\mathcal{J}_m^T - q\, Q \otimes \mathcal{J}_m\mathcal{A}_m^T + q\, Q \otimes \mathcal{A}_m\mathcal{J}_m^T + q \,QQ^T \otimes \mathcal{A}_m\mathcal{A}_m^T \\
&= qJ_q \otimes \mathcal{A}_m\mathcal{A}_m^T + qI_q \otimes \mathcal{J}_m\mathcal{J}_m^T + q(qI_q - J_q) \otimes \mathcal{A}_m\mathcal{A}_m^T \\
&= qI_q \otimes (\mathcal{J}_m\mathcal{J}_m^T + q \mathcal{A}_m\mathcal{A}_m^T) \\
& = q^{m+1}(q+1)I_{q^{m+1}}.
\end{align*}
Therefore, for each skew symmetric core of order $q$ and integer $m \ge 0$, the matrices $\mathcal{J}^{(q)}_m$ and $\mathcal{A}^{(q)}_m$ are a pair of amicable $(\pm 1)$-matrices satisfying
$$\mathcal{J}^{(q)}_m(\mathcal{J}^{(q)}_m)^T + q \mathcal{A}^{(q)}_m(\mathcal{A}^{(q)}_m)^T = q^m(q+1)I_{q^m}.$$
Thus, using the results of Section~\ref{sect:concepts} we obtain a $\mathcal{F}UH{q^m}{q}$. Explicitly, the quaternary unit Hadamard matrices are
\begin{align}
\label{eqn:rec4UH}
\frac{1}{\sqrt{q+1}} \mathcal{J}^{(q)}_m + i \sqrt{\frac{q}{q+1}} \mathcal{A}^{(q)}_m.
\end{align}
In the next subsection, we will show that these quaternary unit Hadamard matrices have some interesting properties. However, first we will take this opportunity to make a brief comment on our recursive method. Notice that our proof that $\mathcal{J}^{(q)}_m$ and $\mathcal{A}^{(q)}_m$ are a pair of amicable $(\pm 1)$-matrices satisfying
$$\mathcal{J}^{(q)}_m(\mathcal{J}^{(q)}_m)^T + q \mathcal{A}^{(q)}_m(\mathcal{A}^{(q)}_m)^T = q^m(q+1)I_{q^m}$$
only relied on the fact that $J_1$ is amicable with itself and that $J_1J_1^T + qJ_1J_1^T = (q+1)I_1$. Therefore, it is straightforward to see that given any pair of $n \times n$ amicable $(\pm 1)$-matrices $X$ and $Y$ satisfying $XX^T + qYY^T=n(q+1)I_n$, where $q$ is the order of a skew symmetric core, the matrices
\begin{align*}
\mathcal{X}_m & = \begin{cases}
X & \text{if } m=0\\
J_q \otimes \mathcal{Y}_{m-1} & \text{otherwise}
\end{cases} , &
\mathcal{Y}_m & = \begin{cases}
Y & \text{if } m=0\\
I_q \otimes \mathcal{X}_{m-1} + Q \otimes \mathcal{Y}_{m-1} & \text{otherwise}
\end{cases}
\end{align*}
are amicable and satisfy
$$
\mathcal{X}_m \mathcal{X}_m^T + q\mathcal{Y}_m \mathcal{Y}_m^T = nq^m(q+1)I_{nq^m}
$$
for each integer $m \ge 0$.
As an application, for the prime power $q \equiv 1 \pmod{4}$, by using the Jacobsthal matrix $Q$ and complex numbers we can get a recursive construction similar to that in Equation~(\ref{eqn:JA}):
\begin{align}
\label{eqn:CD}
\mathcal{C}^{(q)}_m & = \begin{cases}
J_1 & \text{if } m=0\\
J_q \otimes \mathcal{D}^{(q)}_{m-1} & \text{otherwise}
\end{cases} , &
\mathcal{D}^{(q)}_m & = \begin{cases}
J_1 & \text{if } m=0\\
I_q \otimes \mathcal{C}^{(q)}_{m-1} + i \, Q \otimes \mathcal{D}^{(q)}_{m-1} & \text{otherwise}
\end{cases}.
\end{align}
Almost identical proofs to those above show that $\mathcal{C}^{(q)}_m$ and $\mathcal{D}^{(q)}_m$ are always amicable and that
$$
\mathcal{C}^{(q)}_m(\mathcal{C}^{(q)}_m)^* + q\mathcal{D}^{(q)}_m(\mathcal{D}^{(q)}_m)^* = q^m(q+1)I_{q^m}.
$$
Therefore, the following is a unit Hadamard matrix (each entry of the matrix being one of $\pm 1, \pm i$, it is called a \emph{quaternary Hadamard matrix}) for each $m \ge 0$ and prime power $q \equiv 1 \pmod{4}$.
$$
\left(
\begin{array}{cc}
0 & j_q \\
j_q^T & Q
\end{array}
\right)
\otimes \mathcal{D}^{(q)}_m + i \, I_{q+1} \otimes \mathcal{C}^{(q)}_m.
$$
\subsection{An infinite class of quaternary unit Hadamard matrices}
By using the appropriate Jacobsthal matrix $Q$ in the definition of $\mathcal{J}^{(q)}_m$ and $\mathcal{A}^{(q)}_m$, we can ensure that the resulting $\mathcal{F}UH{q^m}{q}$'s have an interesting structure, which we have dubbed \textit{multicirculant}. Before defining this structure, we remind the reader that a circulant matrix with first row $(a_1, \dots, a_n)$ is denoted $\text{circ}(a_1, \dots, a_n)$, and that a block-circulant matrix is a matrix of the form $\text{circ}(A_1, \dots, A_n)$, where the $A_i$ are its blocks.
\begin{definition}
Let $M$ be a matrix of order $n$. If $n=1$, then we call $M$ a multicirculant matrix. If $n>1$, then we call $M$ multicirculant if and only if it is a block-circulant matrix whose blocks are multicirculant matrices.
\end{definition}
Two facts about multicirculant matrices are straightforward to verify and will be used shortly. First, the Kronecker product of two multicirculant matrices is itself a multicirculant matrix. Second, if $A$ and $B$ are two multicirculant $n \times n$ matrices such that all their blocks are of the same dimensions, then $A+B$ is also a multicirculant matrix.
For any odd prime power $q$, it is well known that one can construct a multicirculant Jacobsthal matrix $Q$. When $q \equiv 3 \pmod{4}$, use a multicirculant Jacobsthal matrix to construct $\mathcal{J}^{(q)}_m$ and $\mathcal{A}^{(q)}_m$. Then the two facts listed in the previous paragraph imply that $\mathcal{J}^{(q)}_m$ and $\mathcal{A}^{(q)}_m$ will be multicirculant. It follows that the $\mathcal{F}UH{q^m}{q}$'s in Equation~(\ref{eqn:rec4UH}) are multicirculant.
The $\mathcal{F}UH{q^m}{q}$'s in Equation~(\ref{eqn:rec4UH}) have another interesting property: they have maximal excess. To prove this, we introduce a lemma.
\begin{lem}
\label{lem:excessJA}
The following holds for all integers $m \ge 0$ and prime powers $q \equiv 3 \pmod{4}$.\end{lem}
\begin{enumerate}[(i)]
\item $S(\mathcal{J}^{(q)}_{2m}) = S(\mathcal{A}^{(q)}_{2m}) = q^{3m}$.
\item $S(\mathcal{J}^{(q)}_{2m+1}) = q^{3m+2}$.
\item $S(\mathcal{A}^{(q)}_{2m+1}) = q^{3m+1}$.
\end{enumerate}
\begin{proof}
First notice that
\begin{equation}
\begin{split}
S(\mathcal{J}_{m}) & = S(J_q \otimes \mathcal{A}_{m-1}) \\
& = S( J_q \otimes (I_q \otimes \mathcal{J}_{m-2} + Q \otimes \mathcal{A}_{m-2})) \\
& = S((J_q \otimes I_q) \otimes \mathcal{J}_{m-2}) + S((J_q \otimes Q) \otimes \mathcal{A}_{m-2}) \\
& = S(J_q)S(I_q)S(\mathcal{J}_{m-2}) + S(J_q)S(Q)S(\mathcal{A}_{m-2}) \\
& = q^3\, S(\mathcal{J}_{m-2})
\end{split}
\label{eq:recJ}
\end{equation}
and
\begin{equation}
\begin{split}
S(\mathcal{A}_{m}) & = S(I_q \otimes \mathcal{J}_{m-1} + Q \otimes \mathcal{A}_{m-1}) \\
& = S( I_q \otimes (J_q \otimes \mathcal{A}_{m-2}) ) + S(Q)S(\mathcal{A}_{m-1}) \\
& = S(J_q)S(I_q)S(\mathcal{A}_{m-2}) \\
& = q^3 \, S(\mathcal{A}_{m-2}).
\end{split}
\label{eq:recA}
\end{equation}
We now prove that $S(\mathcal{J}^{(q)}_{2m}) = S(\mathcal{A}^{(q)}_{2m}) = q^{3m}$ by induction on $m$. For the base case, notice $S(\mathcal{J}_0) = S(\mathcal{A}_0) = 1$. Now suppose $k \ge 1$ and that $S(\mathcal{J}^{(q)}_{2k}) = S(\mathcal{A}^{(q)}_{2k}) = q^{3k}$. Equation~(\ref{eq:recJ}) together with the induction hypothesis implies $S(\mathcal{J}_{2(m+1)}) = q^3 \, S(\mathcal{J}_{2m}) = q^{3(k+1)}$. Similarly, Equation~(\ref{eq:recA}) and the induction hypothesis imply $S(\mathcal{A}_{2(m+1)}) = q^{3(m+1)}$. Thus for all positive integers $m$ we have $S(\mathcal{J}^{(q)}_{2m}) = S(\mathcal{A}^{(q)}_{2m}) = q^{3m}$. It follows that (i) holds. We can prove (ii) and (iii) similarly.
\end{proof}
Lemma~\ref{lem:excessJA} makes it easy to calculate the excess of the $\mathcal{F}UH{q^m}{q}$'s in Equation~(\ref{eqn:rec4UH}). To compute the excess of these matrices, we consider separately the cases when $m$ is odd and even. First, use Lemma~\ref{lem:excessJA} to observe that
\begin{align*}
S\left( \frac{1}{\sqrt{q+1}} \mathcal{J}^{(q)}_{2m} + i \sqrt{\frac{q}{q+1}} \mathcal{A}^{(q)}_{2m}\right)
&= \frac{1}{\sqrt{q+1}} S\left(\mathcal{J}_{2m}\right) + i \sqrt{\frac{q}{q+1}} S\left(\mathcal{A}_{2m}\right) \\
&= \frac{q^{3m}}{\sqrt{q+1}}\left( 1 + i \sqrt{q} \right).
\end{align*}
Therefore,
$$ \left|S\left( \frac{1}{\sqrt{q+1}} \mathcal{J}^{(q)}_{2m} + i \sqrt{\frac{q}{q+1}} \mathcal{A}^{(q)}_{2m}\right)\right| = \left|\frac{q^{3m}}{\sqrt{q+1}}\left( 1 + i \sqrt{q} \right)\right| = q^\frac{3(2m)}{2}. $$
Using a similar computation one can show that
$$ \left|S\left( \frac{1}{\sqrt{q+1}} \mathcal{J}^{(q)}_{2m+1} + i \sqrt{\frac{q}{q+1}} \mathcal{A}^{(q)}_{2m+1}\right)\right| = q^\frac{3(2m+1)}{2}. $$
Therefore, for any $m \ge 0$ we have
$$ \left|S \left( \frac{1}{\sqrt{q+1}} \mathcal{J}^{(q)}_{m} + i \sqrt{\frac{q}{q+1}} \mathcal{A}^{(q)}_{m}\right)\right| = q^\frac{3m}{2}.$$
The excess meets Best's upper bound \cite{Best}, so the matrices are regular.
In summary, we have established the following theorem.
\begin{thm}
Let $q$ be the order of a skew symmetric core. Then for each positive integer $m$, there is a regular $\mathcal{F}UH{q^m}{q}$ with excess $q^\frac{3m}{2}$. Furthermore, if $q$ is an odd prime power $q \equiv 3 \pmod{4}$ the constructed regular quaternary unit Hadamard matrix is multicirculant.
\end{thm}
\section{Association schemes}
There are many relationships between Hadamard matrices and association schemes; see \cite[Theorem 1.8.1]{BCN}, \cite{GC,HT} for Hadamard matrices and \cite{cg10,IM} for unit Hadamard matrices.
In this section we show that the quaternary unit Hadamard matrices are contained in some commutative association scheme.
A \emph{(commutative) association scheme of class $d$}
with vertex set $X$ of size $n$
is a set of non-zero $(0,1)$-matrices $A_0, \ldots, A_d$, which are called {\em adjacency matrices}, with
rows and columns indexed by $X$, such that:
\begin{enumerate}[(i)]
\item $A_0=I_n$.
\item $\sum_{i=0}^d A_i = J_n$.
\item For any $i\in\{0,1,\ldots,d\}$, $A_i^T\in\{A_0,A_1,\ldots,A_d\}$.
\item For any $i,j\in\{0,1,\ldots,d\}$, $A_iA_j=\sum_{k=0}^d p_{ij}^k A_k$
for some $p_{ij}^k$'s.
\item For any $i,j\in\{0,1,\ldots,d\}$, $A_iA_j=A_jA_i$.
\end{enumerate}
The vector space spanned by the $A_i$'s forms a commutative algebra, denoted by $\mathcal{A}$ and is called the \emph{Bose-Mesner algebra} or \emph{adjacency algebra}.~There exists a basis of $\mathcal{A}$ consisting of the primitive idempotents, say $E_0=(1/n)J_n,E_1,\ldots,E_d$.
Since $\{A_0,A_1,\ldots,A_d\}$ and $\{E_0,E_1,\ldots,E_d\}$ are two bases of $\mathcal{A}$, there exist a change-of-bases matrix $P=(P_{ij})_{i,j=0}^d$
so that
\begin{align*}
A_j=\sum_{i=0}^d P_{ij}E_i.
\end{align*}
The matrix $P$
is called to be the {\em
eigenmatrix}.
Write $Q=A_1-A_2$ for disjoint $(0,1)$-matrices $A_1,A_2$, and let $A_0=I_q$.
Note that $A_1,A_2$ are the adjacency matrices of the doubly regular tournaments on $q$ vertices; see \cite{RB}.
Let $\mathfrak{X}^{(q)}$ be the association scheme with adjacency matrices $A_0,A_1,A_2$.
Then the association scheme has the following eigenmatrix $P$:
\begin{align*}
P&=\left(
\begin{array}{cccccc}
1 & \frac{q-1}{2} & \frac{q-1}{2} \\
1 & \frac{-1+\sqrt{-q}}{2} & \frac{-1-\sqrt{-q}}{2} \\
1 & \frac{-1-\sqrt{-q}}{2} & \frac{-1+\sqrt{-q}}{2}
\end{array}
\right).
\end{align*}
We define $\mathfrak{X}_m^{(q)}$ as the association scheme obtained from the $m$-times tensor products of the adjacency matrices $\mathfrak{X}^{(q)}$.
The adjacency matrices of $\mathfrak{X}_m^{(q)}$ are $A_{i_1}\otimes\cdots\otimes A_{i_m}$, $(i_1,\ldots,i_m)\in\{0,1,2\}^m$.
Letting $E_0,E_1,E_2$ be the primitive idempotents of $\mathfrak{X}^{(q)}$, the primitive idempotents of $\mathfrak{X}_m^{(q)}$ are $E_{i_1}\otimes\cdots\otimes E_{i_m}$, $(i_1,\ldots,i_m)\in\{0,1,2\}^m$.
Note that for suitable ordering of the indices of the adjacency matrices and the primitive idempotents, the eigenmatrix $P_m$ of $\mathfrak{X}_m^{(q)}$ is
\begin{align*}
P_m=P\otimes\cdots\otimes P ^{(q)}uad \text{($m$ factors)}.
\end{align*}
Now we have the following proposition.
\begin{prop}
The quaternary unit Hadamard matrix $\frac{1}{\sqrt{q+1}}\mathcal{J}_m^{(q)}+i\sqrt{\frac{q}{q+1}}\mathcal{A}_m^{(q)}$ is in the Bose-Mesner algebra of $\mathfrak{X}_m^{(q)}$.
\end{prop}
\begin{proof}
We prove by induction that $\mathcal{J}_m^{(q)}$ and $\mathcal{A}_m^{(q)}$ are in the Bose-Mesner algebra of $\mathfrak{X}_m^{(q)}$.
The cases for $m=1,2$ are clear.
Assume the cases $m-1,m-2$ to be true.
The Bose-Mesner algebra of $\mathfrak{X}_m^{(q)}$ contains elements $J_q\otimes A_{i_2}\otimes\cdots \otimes A_{i_m}$ where $A_{i_2}\otimes\cdots \otimes A_{i_m}$ is an adjacency matrix of $\mathfrak{X}_{m-1}^{(q)}$.
Using the induction hypothesis for $m-1$, the Bose-Mesner algebra of $\mathfrak{X}_m^{(q)}$ contains $\mathcal{J}_m^{(q)}=J_q\otimes \mathcal{A}_{m-1}^{(q)}$.
By
\begin{align*}
I_q\otimes \mathcal{J}_{m-1}^{(q)}&=I_q\otimes J_q \otimes \mathcal{A}_{m-2}^{(q)}, \\
Q\otimes \mathcal{A}_{m-1}^{(q)}&=(A_1-A_2)\otimes \mathcal{A}_{m-1}^{(q)},
\end{align*}
and the induction hypothesis for $m-1$ and $m-2$, the Bose-Mesner algebra of $\mathfrak{X}_m^{(q)}$ contains $\mathcal{A}_m^{(q)}=I_q\otimes \mathcal{J}_{m-1}^{(q)}+Q\otimes \mathcal{A}_{m-1}^{(q)}$.
This completes the proof.
\end{proof}
\noindent {\bf Acknowledgments.}
Part of this note in contained in Kai Fender's undergraduate Honour's thesis written under supervision of Hadi Kharaghani. Hadi Kharaghani is supported in part by an NSERC Discovery Grant and ULRF. Sho Suda is supported by JSPS KAKENHI Grant Number 15K21075. Kai Fender was supported by an NSERC-USRA.
Thanks to the anonymous referees for their invaluable suggestions. The short and elegant proof of Lemma 2.2 is due to a referee which replaces our longer proof.
\end{document} |
\betagin{document}
\betagin{abstract}
We solve a class of weighted isoperimetric problems of the form
\[
\min\left\{\int_{\partialrtial E}w e^V\,dx:\int_E e^V\,dx={\rm constant}\right\}
\]
where $w$ and $V$ are suitable functions on $\mathbb R^d$. As a consequence, we prove a comparison result for
the solutions of degenerate elliptic equations.
\end{abstract}
\maketitle
\section{Introduction}
In the celebrated paper \cite{T}, G. Talenti established several comparison results between the solutions of the
Poisson equation with Dirichlet boundary condition (with suitable data $f$ and $E$):
\betagin{equation}\lambdabel{problematalenti}
-\Deltalta u=f \mbox{ in $E$},\qquad u=0 \mbox{ on $\partial E$}
\end{equation}
and the solutions of the corresponding problem where $f$ and $E$ are replaced by their spherical
rearrangements (see \cite[Chapter 3]{LL} for the definition and main properties of spherical rearrangement).
Precisely, he proves that if we denote by $v$ the solution
of the
problem with symmetrized data, then the rearrangement $u^*$ of the (unique) solution $u$ of \eqref{problematalenti}
is
pointwise bounded by $v$. Moreover he shows that the $L^q$ norm of $\nablabla u$ is bounded, as well, by the $L^q$
norm of $\nablabla v$, for $q\in(0,2]$. The proof of these facts basically relies on two ingredients: the
Hardy-Littlewood-Sobolev
inequality and the isoperimetric inequality (see \cite{amfupa} and \cite{LL} for comprehensive
accounts on the
subjects).
Later on, following such a scheme, many other works have been developed to prove analogous comparison results
related to the solutions of PDEs involving different kind of operators, see for instance
\cite{bbmp1,bbmp2,bbmp3,bcm1,bcm2,bla,blafeopos,cinesi} and the references therein. A recurring idea in these works is, roughly speaking, the following:
the operator considered is usually linked to a sort of {\em weighted perimeter}. Thus initially it is necessary
to
solve a corresponding isoperimetric problem; then
the desired comparison results can be obtained following the ideas contained
in \cite{T}.
\noindent
For example in
\cite{bbmp2} the authors consider a class of weighted perimeters of the form
\[
P_w(E)=\int_{\partialrtial E}w(|x|)\,d\mathcal{H}^{d-1}(x),
\]
where $E$ is a set with Lipschitz boundary and $w:\mathbb R\to[0,\infty)$ a non-negative function, and prove, under
suitable convexity
assumptions on the weight $w$, that the ball centered at the origin is the unique solution of the mixed
isoperimetric problem
\[
\min\{P_w(E):|E|={\rm constant}\}
\]
where $|\cdot|$ denotes the $d$-dimensional Lebesgue measure. As a consequence they prove comparison
results, analogous to those considered by Talenti in \cite{T}, for the solutions
of
\[
-{\rm div}(w^2\nablabla u)=f \mbox{ in $E$},\qquad u=0 \mbox{ on $\partial E$}.
\]
\noindent
Recently in \cite{bdr}, L. Brasco, G. De Philippis and the second author proved a quantitative
version of the weighted isoperimetric inequality considered in \cite{bbmp1}. Their proof is achieved
by means of a sort of {\em calibration technique}. One advantage of this technique is that it is adaptable
to other kind of problems, as that of considering other kind of functions in the weighted perimeter (e.g.
Wulff-type weights, see \cite{bf}), or that of considering different measured spaces, as $\mathbb R^d$ endowed with
the Gauss measure.
\noindent
In this paper we consider degenerate elliptic equations with Dirichlet boundary condition of
the form
\betagin{equation}\lambdabel{problem}
-\mathrm{div}(w^2\,e^{V}\nablabla u)=f\,e^{V} \, \mbox{ in $E$},\qquad u=0 \mbox{ on $\partial E$}
\end{equation}
where $w$ and $V$ are two given functions, and we aim to prove
analogous comparison results as those in \cite{T}. The particular form in which is written the measure $e^V$ is
due to the later applications, whose main examples are Gauss-type measures, that is $V(x)=-c|x|^2$. Bearing in
mind this instance, we consider a class of mixed isoperimetric problems of the form
\[
\min\left\{P_{we^V}(E):\int_E e^{V}={\rm constant}\right\}
\]
and prove, by means of a calibration technique reminiscent of that developed in \cite{bdr}, that the solutions,
under suitable
assumptions on $V$ and $w$, are half-spaces, see Proposition \ref{semispazides} and Theorem \ref{brasco0}.
Then, using a suitable concept of rearrangement related to the measures considered,
we
prove, in the Main Theorem in Section \ref{main}, comparison results between the solutions of
\eqref{problem} and the solutions of the same
equation with rearranged data.
\section{Preliminaries on rearrangement inequalities}\lambdabel{prerequisiti}
In this section we introduce the main definitions and properties about the concept of symmetrization and
rearrangement we shall make use of.
\noindent
Let $\mu$ be a finite Radon measure on $\mathbb R^d$, a \emph{right rearrangement} with respect to $\mu$
is defined, for any Borel set $A$, as
\[
R_A^\mu=\{(x_1,x')\in\mathbb R\times\mathbb R^{d-1}\,:\,x_1> t_A\},
\]
where $t_A=\inf\left\{t\,: \mu(A)=\mu(\{(x_1,x')\in\mathbb R\times\mathbb R^{d-1}\,:\,x_1> t\})\,\right\}$. Notice that if
$d\mu=fdx$, for some positive and measurable function $f$, then the value of $t$ is uniquely determined.\\
\noindent
Given a non-negative Borel function $f:\mathbb R^d\to[0,+\infty)$, we call \emph{right increasing rearrangement} of $f$
the
function $f^{*\mu}$ given by
\[
f^{*\mu}(x)=\int_0^{+\infty}\chi_{R^\mu_{\{f>t\}}}(x)\,dt
\]
where $\chi_A$ is the characteristic function of the set $A$. As an aside we notice that the right increasing
rearrangement of the characteristic function of a Borel set
$A$ coincides with the characteristic function of $R_A^\mu$. Clearly $f^{*\mu}$ is non-negative, increasing with
respect to
the first variable $x_1$, and constant on the sets $\{(x_1,x')\in\mathbb R\times\mathbb R^{d-1}:x_1=t\}$, for
$t\in\mathbb R$.
Moreover $f$ and $f^{*\mu}$ share the same distribution function:
\[
\mu_f(t):=\mu(\{f>t\})=\mu(\{f^{*\mu}>t\})=\mu_{f^{*\mu}}(t).
\]
We furthermore define $f^{\star\mu}:\mathbb R^+\rightarrow\mathbb R^+$ as the smallest decreasing function satisfying
$f^{\star\mu}(\mu_f(t))\geq t$; in other words
\[
f^{\star\mu}(s)=\inf\{t>0\,:\, \mu_f(t)<s\}.
\]
It is useful to bear in mind that $\{s:f^{\star\mu}(s)>t\}=[0,\mu_f(t)]$ so that by the Layer-Cake Representation
Theorem (see for instance \cite{LL}) we have
\betagin{equation}\lambdabel{symlc}
\int_0^{\mu(\{x_1>t\})}f^{\star\mu}(s)\,ds=\int_{t}^\infty \mu_f(s)\,ds=\int_{\{x_1>t\}} f^{*\mu}(x)\,dx.
\end{equation}
\noindent
We conclude this section by proving the {\it Hardy-Littlewood}
rearrangement inequality related to the right symmetrization.
\betagin{lem}[Hardy-Littlewood rearrangement inequality]\lambdabel{hardy}
Let $f$ and $g$ be non-negative Borel functions from $\mathbb R^d$ to $\mathbb R$. Then for any non-negative Borel measure
$\mu$ we have
\[
\int_{\mathbb R^d}f\,g\,d\mu\leq\int_{\mathbb R^d}f^{*\mu}g^{*\mu}d\mu.
\]
\end{lem}
\betagin{proof}
We have
\[
\betagin{aligned}
\int_{\mathbb R^d}f\,g\,d\mu&=\int_{\mathbb R^d}\int_0^\infty\int_0^\infty \chi_{\{f>t\}}(x)\chi_{\{g>s\}}(x)\,dt\,ds\,d\mu(x)\\
&=\int_0^\infty\int_0^\infty\int_{\mathbb R^d}
\chi_{\{f>t\}\cap \{g>s\}}(x)\,d\mu(x)\,dt\,ds\\
&=\int_0^\infty\int_0^\infty\mu(\{f>t\}\cap\{g>s\})\,dt\,ds
\\
&\le\int_0^\infty\int_0^\infty\min(\mu(\{f>t\}),\,\mu(\{g>s\}))\,dt\,ds
\\&
=\int_0^\infty\int_0^\infty\min(\mu(\{f^{*\mu}>t\}),\,\mu(\{g^{*\mu}>s\}))\,dt\,ds
\\
&=\int_0^\infty\int_0^\infty\mu(\{f^{*\mu}>t\}\cap\{g^{*\mu}>s\})\,dt\,ds=
\int_{\mathbb R^d}f^{*\mu}\,g^{*\mu}\,d\mu,
\end{aligned}
\]
where we used the fact that $\{f^{*\mu}>t\}$ and $\{g^{*\mu}>s\}$ are half-spaces of the form
$\{(x_1,x')\in\mathbb R\times\mathbb R^{d-1}:x_1>r\}$ for some $r\in\mathbb R$ and so
\[
\min(\mu(\{f^{*\mu}>t\}),\,\mu(\{g^{*\mu}>s\}))=\mu(\{f^{*\mu}>t\}\cap\{g^{*\mu}>s\}).
\]
\end{proof}
\betagin{remark}
Setting $g=\chi_A$ in Lemma \ref{hardy} and thanks to \eqref{symlc} we get
\betagin{equation}\lambdabel{hardy2}
\int_A f\,dx\leq \int_{R_A^\mu}f^{*\mu}(x)\,dx=\int_0^{\mu(A)}f^{\star\mu}(s)\,ds.
\end{equation}
\end{remark}
\section{A class of weighted isoperimetric inequalities}
Given a measurable function $V:\mathbb R^d\to \mathbb R$ we denote by $\mu[V]$ the absolutely continuous measure whose
density equals $e^ V$, that is, for any measurable set $E\subset\mathbb R^d$
\[
\mu[V](E)=\int_E e^{V(x)} dx;
\]
in what follows with the scope of simplifying the notation, and if there is no risk of confusion, we will
drop the dependence of $V$, writing $\mu$ instead of $\mu[V]$.
Moreover we will often adopt the notation $x=(x_1,x')\in\mathbb R\times\mathbb R^{d-1}$ and denote by $R_A$ instead of
$R_A^{\mu[V]}$ the right rearrangement of $A$ with respect to the measure $\mu[V]$. Given a Borel {\it
weight} function $w:\mathbb R\to\mathbb [0,+\infty]$ we define, for any open set $A$ with Lipschitz boundary, the
following concept of {\em weighted perimeter}:
\[
P_{w,V}(A)=\int_{\partial A}w(x_1)e^{V(x)}d\mathcal H^{d-1}(x).
\]
In the following proposition we show that, under suitable conditions on $w$ and $V$, the half-spaces of the form
$\{(x_1,x'):x_1>t\}$ are
the only minimizers of the weighted perimeter among the sets of fixed volume with respect to the measure
$\mu[V]$.
\betagin{pro}\lambdabel{semispazides}
Let $A\subset\mathbb R^d$ be a set with Lipschitz boundary. Suppose that $w : \mathbb R\to\mathbb R^+$ and
$V:\mathbb R^d\to\mathbb R$ are
$C^1$-regular functions
satisfying the following assumptions:
\betagin{itemize}
\item[{\it(i)}] $\mu(A)=\mu(R_A)<+\infty$,
\item[{\it (ii)}] the function $\partial_1 V(x)$ depends only on $x_1$ and $g(x):=-w'(x_1)-w(x_1)\partial_1 V(x)$ is a
non-negative decreasing function on the
real line.
\end{itemize}
Then
\betagin{equation}\lambdabel{disprincipale}
P_{w,V}(A)\geq P_{w,V}(R_A).
\end{equation}
\end{pro}
\betagin{proof}
We start by noticing that if $P_{w,V}(A)=+\infty$ there is nothing to prove. Hence we can suppose that
\betagin{equation}\lambdabel{perimetrofinito}
P_{w,V}(A)<+\infty.
\end{equation}
Let $e_1=(1,0,\dots,0)\in\mathbb R^d$ and consider the vector field $-e_1w(x_1)e^{V(x)}$. Its divergence is given by
\[
\mathrm{div} (-e_1w(x_1)e^V(x))=(-w'(x_1)-w(x_1)\partial_1 V(x))e^{V(x)}=g(x)e^{V(x)}.
\]
By an application of the Divergence Theorem we have
\betagin{equation}\lambdabel{perimestimate}
\betagin{aligned}
\int_Ag(x)d\mu(x)&=\int_{A}\mathrm{div} (-e_1w(x_1)e^{V(x)})dx\\
&=\int_{\partial A}w(x_1)e^{V(x)}\lambdangle\nu_A(x),-e_1\rangle d\mathcal H^{d-1}(x)\\
&\le\int_{\partial A}w(x_1)e^{V(x)}d\mathcal H^{d-1}(x)=P_{w,V}(A),
\end{aligned}
\end{equation}
\noindent
where $\nu_A(x)$ is the outer unit normal to $\partialrtial A$ at $x$.
Let $t_A$ be a real number such that the right half-space $R_A=\{(x_1,x'):x_1\ge t_A\}$ satisfies
$\mu(R_A)=\mu(A)$. Then, since the outer normal of $R_A$ is the constant vector field $-e_1$,
the
inequality in \eqref{perimestimate} turns into an equality
if we replace $A$ with $R_A$. Notice that by condition $(ii)$ and \eqref{perimestimate} we have
\[
P_{w,V}(R_A)=\int_{R_A\setminus A}g\,d\mu+\int_{R_A\cap A}g\, d\mu\le g(t_A)\mu(A)+P_{w,V}(A).
\]
Thanks to assumption $(i)$ and \eqref{perimetrofinito} such quantities are finite and so we get
\[
P_{w,V}(A)-P_{w,V}(R_A)\geq\int_Ag(x)d\mu(x)-\int_{R_A}g(x)d\mu(x).
\]
Since, by definition, $\mu(A)=\mu(R_A)<+\infty$ again by condition $(i)$ we obtain $\mu(A\setminus
R_A)=\mu(R_A\setminus A)<+\infty$.
Thus
\betagin{equation}\lambdabel{finito}
\betagin{aligned}
\int_Ag(x)&d\mu(x)-\int_{R_A}g(x)d\mu(x)=\int_{A\setminus R_A}g(x)d\mu(x)-\int_{R_A\setminus
A}g(x)d\mu(x)\\
&=\int_{A\setminus R_A}(g(x)-g(t_Ae_1))d\mu(x)-\int_{R_A\setminus A}(g(x)-g(t_Ae_1))d\mu(x).
\end{aligned}
\end{equation}
Since every $x\in A\setminus R_A$ (respectively $x\in R_A\setminus A$) satisfies $\lambdangle x,e_1\rangle<t_A$
(respectively $\lambdangle x,e_1\rangle>t_A$), by condition {\it (ii)} we deduce
\betagin{equation}\lambdabel{quantitativa}
\betagin{aligned}
P_{w,V}(A)-P_{w,V}(R_A)&\ge\int_{A\setminus R_A}|g(x)-g(t_Ae_1)|d\mu(x)+\int_{R_A\setminus
A}|g(x)-g(t_Ae_1)|d\mu(x)
\\&=\int_{A\Deltalta R_A}|g(x)-g(t_Ae_1)|d\mu\geq 0,
\end{aligned}
\end{equation}
where $A\Deltalta R_A=(A\setminus R_A)\cup (R_A\setminus A)$ stands for the symmetric difference between $A$ and
$R_A$. This concludes the proof.
\end{proof}
\betagin{oss}[Necessity of the assumptions]\rm
We stress that the integrability condition $(i)$ is necessary to formulas \eqref{perimestimate} and \eqref{finito}
(and thus to our proof) to
work.
\noindent
Concerning condition $(ii)$, we note that it is needed just for technical reasons.
Nonetheless we stress that our proof offers a slightly stronger inequality than \eqref{disprincipale}. Indeed the
right-hand side of \eqref{quantitativa} may be seen as a modulus of continuity of the $L^1$ distance between $A$
and $R_A$. Thus it would be interesting to understand how much our hypotheses are far from optimality
(compare also with \cite[Remark $2.3$]{bdr}).
\end{oss}
\betagin{oss}[Equality cases]\rm
An inspection of the proof of Proposition \ref{semispazides}, and in particular of inequality
\eqref{perimestimate}, shows that if $w>0$, then we have equality in \eqref{disprincipale} only if $A$ is
equal to the half space $R_A$, up to set of
zero $d$-dimensional Lebesgue measure. On the other hand, if the set $\{w=0\}$ has positive Lebesgue measure, we
can not expect any kind of uniqueness for the equality cases of such an inequality.
\end{oss}
\betagin{example}\lambdabel{esempione}\rm
A non-trivial example fulfilling condition $(ii)$ of Proposition \ref{semispazides} is the
following
\[
V(x_1,x')=-c(x_1|x_1|+|x'|^2),\quad w(x_1)=e^{-ax_1},
\]
with $a,c>0$ constants satisfying $a^2-2c\ge0$. To prove this fact we initially observe that if $x_1\ne0$ such a
condition
is equivalent to require that
\betagin{equation}\lambdabel{oder}
w''(x_1)+V_1''(x_1)w(x_1)+V_1'(x_1)w'(x_1)\ge0
\end{equation}
which turns out to be equivalent, in our example, to
\[
a^2-2c+2ac|x_1|\ge0.
\]
Then, since $-w'(x_1)-w(x_1)\partialrtial_1 V(x_1)$ is continuous in $x_1=0$,
condition
$(ii)$ is satisfied everywhere.
\end{example}
To transform inequality \eqref{disprincipale} into a well posed isoperimetric problem, it would be more advisable
to eliminate the integrability hypothesis $(i)$ in Proposition \ref{semispazides} by requiring that the measure
$\mu(\mathbb R^d)<+\infty$. This fact, together with ordinary differential inequality required in assumption $(ii)$, is
seldom satisfied.
Hence, to get other instances of functions which fulfill
inequality
\eqref{oder} together with the integrability property {\it (i)} of Proposition
\ref{semispazides} it is worth restricting our attention to the half-space
\[
\mathbb R^d_+=\{(x_1,x')\in\mathbb R\times\mathbb R^{d-1}: x_1>0\}.
\]
As an immediate corollary of Proposition \ref{semispazides} we get that
the solution of the problem
\betagin{equation}\lambdabel{errore}
\min\left\{P_{w,V}(A): A\subseteq\mathbb R^d_+,\,\,\mu(A)=c, \,\,\partial A\,\,{\rm Lipschitz}\right\}
\end{equation}
\noindent
is given by $R_c=\{x_1\ge t_c\}$ where $t_c$ is such that $\mu(R_c)=c$.\\
\betagin{oss}\lambdabel{osservazionciona2}\rm
Notice that the non-mixed Gauss case, $w$ constant and $V(x)={-c|x|^2}$, is not covered by our hypotheses.
Nevertheless in this case examples of functions $w$ which satisfy the hypotheses of Proposition
\ref{semispazides} are given by $w(t)=t^{-a}$ with $a\ge1$ or $w(t)=b+e^{-at}$, with $a,b\ge 0$ such that
$a^2-2c(1+b)>0$ (as can be easily seen reasoning as in the previous example). In the latter case at least if
$b=0$ we have that
\[
we^V=e^{a^2/(4c)}\exp{\left(-c\left|x+{\bf e_1}\frac{a}{2c}\right|^2\right)},
\]
where ${\bf e_1}=(1,0,\dots,0)\in\mathbb R^d$,
which can be rephrased\footnote{as suggested us by an anonymous Referee.}
as
the fact that
the solutions of the isoperimetric problem in the half-space $\mathbb R^d_+$ with
(suitable) mixed Gaussian conditions
\[
\min\left\{P_{\mumma_{\sigmagma,\eta}}(E): \mumma_{\sigmagma,0}(E)={\rm constant},\,\, E\subseteq \mathbb R^d_+,\,\,\partialrtial E
\,\,{\rm Lipschitz} \right\}
\]
are right-half spaces. Here we
denoted by $\mumma_{\sigmagma,\eta}$ the normal distribution whose covariance matrix is
$\sigmagma \rm{Id}$ and whose mean vector $\eta$ is given
by $\eta=-\frac{a}{2c}{\bf e_1}$. If $b\ne0$ the unique change is that the perimeter is weighted by means of the
sum of two Gaussian measures.
We recall that, as pointed out in the Introduction, similar problems related to the Gauss measure are considered in
\cite{bbmp3,bcm1,bla,blafeopos,cinesi}.
\end{oss}
Notice that we defined the perimeter $P_{w,V}$ only for sets with Lipschitz boundary,
but for our later applications it will be useful to have a definition of perimeter which comprehends also less
regular
subsets of $\mathbb R^d$.
A measurable set $A$ is said to have locally finite (Euclidean) perimeter (we refer to \cite{M} for a complete
overview on the subject) if there exists a vector-valued Radon measure $\nu_A$ called \emph{Gauss--Green measure}
of the set $A$ such that, for every $T\in C_c^1(\mathbb R^d;\mathbb R^d)$, it holds true that
\[
\int_A \mathrm{div} T =\int_{\mathbb R^d}\lambdangle T,d\nu_A\rangle.
\]
The perimeter of $A$ is defined in terms of the total variation of the Gauss--Green measure of $A$ as
$P(A)=|\nu_A|(\mathbb R^d)$.
For any set $A$ of locally finite perimeter we then define the \emph{weighted perimeter} $P_{w,V}$ by
\[
P_{w,V}(A)=we^V|\nu_A|(\mathbb R^d).
\]
Since when $A$ has Lipschitz boundary $|\nu_A|=\mathcal H^{d-1}\llcorner \partial A$, the above definition is
coherent
with the one given at the beginning of this section on such sets. \\
\betagin{thm}\lambdabel{brasco0}
Let $w$ and $V$ non-negative and $C^1$-regular functions satisfying condition $(ii)$ of Proposition
\ref{semispazides}. Suppose moreover that $\mu(\mathbb R_+^d)<+\infty$; then the problem
\[
\min\left\{P_{w,V}(A): A\subseteq\mathbb R^d_+,\,\,\mu(A)=c\right\}
\]
admits a solution, and this solution coincides with the one of \eqref{errore}.
\end{thm}
\betagin{proof}
Let $A$ be a measurable set of locally finite perimeter and suppose, by contraddiction, that
$P_{w,V}(A)<P_{w,V}(R_A)$. We start by noticing that $P_{w,V}(R_A)<+\infty$, indeed, recalling
\eqref{perimestimate} we have that
\[
P_{w,V}(R_A)=\int_{R_A}g(x)\,d\mu(x)\le g(0)\mu(A).
\]
By \cite[Theorem II.2.8]{M} we can find a sequence of sets $A_n$ with smooth boundary such that
$\chi_{A_n}\rightarrow\chi_A$ in $L_{\rm{loc}}^1(\mathbb R^d)$ and $|\nu_{A_n}| \rightharpoonup ^* |\nu_A|$, where
$\rightharpoonup ^*$ indicates the weak* convergence of Radon measures.
Since $\mu(\mathbb R^d_+)<+\infty$, we also have that
\betagin{equation}\lambdabel{convvol}
\chi_{A_n}\rightarrow\chi_A \,\,\,\, \mathrm{in }\, L^1(\mathbb R^d, \mu)
\end{equation}
and, since $we^V$ is a continuous function
\betagin{equation}\lambdabel{convper}
we^V|\nu_{A_n}| \rightharpoonup ^* we^V|\nu_A|.
\end{equation}
Thanks to \eqref{convper} we get
\[
P_{w,V}(A)=\lim_{n\to\infty} P_{w,V}(A_n)\ge \lim_{n\to\infty} P_{w,V}(R_{A_n}).
\]
We are left to show that $\lim_{n\to\infty} P_{w,V}(R_{A_n})=P_{w,V}(R_A)$, but
\[
|P_{w,V}(R_A)-P_{w,V}(R_{A_n})|\le g(0)|\mu(A)-\mu(A_n)|,
\]
and we can conclude thanks to \eqref{convvol} and the fact that $\mu(\mathbb R^d_+)<+\infty$.
\end{proof}
\section{Main result}\lambdabel{main}
In this section we consider sets $E\subseteq\mathbb R^d_+$ and we define $d\mu=e^V\,dx$,
$R_E=\{x_1>t_E\}$ where
$t_E\in\mathbb R$ is such that $\mu(R_E)=\mu(E)$ and $f^*=f^{*\mu}$ the
right rearrangement of a function $f$ with respect to $\mu$. In what follows we consider
problems of the form
\betagin{equation}\lambdabel{brasco}
\left\{ \betagin{array}{ll}
-\mathrm{div}(w^2\,e^{V}\nablabla u)=f\,e^{V} & \mbox{in $E$}\\
u=0 & \mbox{on $\partial E$}\end{array} \right.
\end{equation}
which must be intended in weak sense. Precisely, a solution of \eqref{brasco} is a function $u\in
H^1_0(e^V,w^2e^V,E)$, defined as the space of functions in $L^2(E,e^V)$ whose weak gradients are in
$L^2(E,w^2e^V)$ which
vanish on the boundary of $E$ in the trace sense\footnote{which is possible since the regularity of
$w$ and $V$ and if $E$ has Lipschitz boundary.}, and which satisfies
\betagin{equation}\lambdabel{weakproblem0}
\int_E\lambdangle \nablabla u,\nablabla \phi\rangle w^2e^V\,dx=\int_E f\,\phi\,e^V\,dx
\end{equation}
for any $\phi\in H^1_0(e^V,w^2e^V,E)$. \\
The main scope of this section is to prove {\em a priori} estimates for the solutions of problem \eqref{brasco}.
For this reason we shall always consider that a solution $u$ exists. Clearly this requirement depends on the
choice of $w$, $V$ and $f$. General instances of such functions for which the existence of a solution for
problem \eqref{brasco} is guaranteed, can be found in \cite{tru} (see also \cite{bla,cinesi,bbmp3,blafeopos}). Here
we limit
ourselves to state that most of the examples considered in Remark \ref{osservazionciona2}, as the {\em
mixed-Gaussian case}
$V(x)=-c|x|^2$, $w(t)=b+e^{-at}$ with $a^2- 2c(1 + b) > 0$ and $b$ strictly
positive, are covered by the cases considered in \cite{tru}, whenever $f\in
L^2(E,e^V)$.
\betagin{mt}\lambdabel{mainthm}
Suppose that the set $E\subset\mathbb R^d_+=\{(x_1,x'):x_1>0\}$ and the functions $w:[0,+\infty]\to(0,+\infty]$ and
$V:\mathbb R^d \to\mathbb R$ satisfy the hypotheses of Proposition \ref{semispazides}. Consider the two problems
\betagin{equation}\lambdabel{problemthm}
\left\{ \betagin{array}{ll}
-\mathrm{div}(w^2\,e^{V}\nablabla u)=f\,e^{V} & \mbox{in $E$}\\
u=0 & \mbox{on $\partial E$}\end{array} \right.
\end{equation}
and
\betagin{equation}\lambdabel{problemsym}
\left\{ \betagin{array}{ll}
-\mathrm{div}(w^2\,e^{V}\nablabla v)=f^{*}e^{V} & \mbox{in $R_E$}\\
v=0 & \mbox{on $\partial R_E$}\end{array} \right.
\end{equation}
where $0<f\in L^2(\mathbb R^d_+,\mu)$.
Then the problem \eqref{problemsym} has as solution the one variable function $v(z)$ given by
\betagin{equation}\lambdabel{v}
v((z,z'))=v(z)=\int_{\mu(\{x_1\ge z\})}^{\mu(R_E)}\frac{1}{h^2(s)}\left(\int_0^sf^*(\xi)\,d\xi\right)\,ds,
\end{equation}
where
\betagin{equation}\lambdabel{acca}
h(m)=w(\Phi^{-1}(m))\int_{\mathbb R^{d-1}}\mu(\Phi^{-1}(m),x')\,dx',
\end{equation}
being $\Phi(t)=\mu(\{x_1>t\})$.
Moreover, for any solution $u$ of the problem \eqref{problemthm}, we have
\betagin{equation}\lambdabel{tesi1}
u^*(x)\le v(x),
\end{equation}
and, for any $q\in(0,2]$,
\betagin{equation}\lambdabel{tesi2}
\int_E |\nablabla u|^qw^q \,d\mu \le \int_{R_E} |\nablabla v|^qw^q \,d\mu
\end{equation}
\end{mt}
\betagin{proof}
Let us suppose for the moment that the function $v$ given in \eqref{v} is a solution for the problem
\eqref{problemsym}. To prove
\eqref{tesi1} and \eqref{tesi2} we consider the functions $\phi_h$ defined as
\[
\phi_h(x)=\left\{ \betagin{array}{ll}
\mathrm{sign\ }(u) & \mbox{if $|u|>t+h$}\\
\frac{u(x)-t\mathrm{sign\ } u(x)}{h} & \mbox{if $|u|\in [t,t+h)$}\\
0 & \mbox{if $|u|<t$},\end{array} \right.
\]
where $0 \leq t< \mathrm{ess\,sup}|u|$ and $h >0$. Notice that, for every $h>0$, $\phi_h$ is an admissible test function, since the solution $u$ belongs to the space $H^1_0(e^V,w^2e^V,E)$. Then \eqref{weakproblem0} turns into
\[
\frac 1 h \int_{\{|u|\in[t,t+h)\}}\lambdangle \nablabla u,\nablabla u\rangle w^2\,d\mu=\frac 1 h\int_{\{|u|\in[t,t+h)\}} f\,
(u-t\frac{u}{|u|}) d\mu+\int_{\{|u|>t+h\}}f\,\mathrm{sign\ }(u)\,d\mu.
\]
Taking the limit for $h\to 0$, we get
\betagin{equation}\lambdabel{stima1}
-\frac{d}{dt}\int_{\{|u|>t\}}|\nablabla u|^2w^2\,d\mu=\int_{\{|u|>t\}}f\,d\mu.
\end{equation}
Let us analyze the left-hand side of equation \eqref{stima1}. We claim that the following
inequality holds true for almost every $t$:
\betagin{equation}\lambdabel{s2}
-\frac{d}{dt}\int_{\{|u|>t\}}|\nablabla u|^2w^2\,d\mu\ge\frac{\left(-\frac{d}{dt}
\int_{\{|u|>t\}}|\nablabla u|w\,d\mu\right)^2}{-\mu'_u(t)},
\end{equation}
where $\mu_u(t)$ is the distribution function of $u$ introduced in the Section \ref{prerequisiti}.\\
\noindent
Indeed $\mu_u(t)$ is a decreasing function and thence it is derivable for almost every $t$, thanks to the H\"older inequality we get
\[
\betagin{aligned}
-\frac{d}{dt}\int_{\{|u|>t\}}&|\nablabla u|w\,d\mu=\lim_{h\rightarrow 0}\frac 1 h \int_{t<|u|<t+h}|\nablabla
u|w\,d\mu\\
&\le\lim_{h\rightarrow 0}\left(\int_{\{t<|u|<t+h\}}|\nablabla u|^2w^2\,d\mu\right)^{1/2}\left(\int_{\{t<|u|<t+h\}}\frac{1}{h^2}\,d\mu\right)^{1/2}\\
&=\lim_{h\rightarrow 0}\left(\frac 1 h\int_{\{t<|u|<t+h\}}|\nablabla u|^2w^2\,d\mu\right)^{1/2}\left(\frac 1 h\int_{\{t<|u|<t+h\}} 1\,d\mu\right)^{1/2}\\
&=\left(-\frac{d}{dt}\int_{\{|u|>t\}}|\nablabla u|^2w^2\,d\mu\right)^{1/2}\left(-\mu_u'(t)\right)^{1/2}
\end{aligned}
\]
By the Co-Area formula and the fact that $w$ is strictly positive and $C^1$-regular, we easily get that the set
$\{u>t\}$ is a set of locally finite (Euclidean) perimeter. Thus, thanks to Proposition \ref{semispazides} and
Theorem \ref{brasco0} we get
\betagin{equation}\lambdabel{stima2}
-\frac{d}{dt}\int_{\{|u|>t\}}|\nablabla
u|w\,d\mu=\int_{\{|u|=t\}}w\,d\mu=P_{w,V}(\{|u|>t\})\ge P_{w,V}({\{u^*>t\}}).
\end{equation}
We introduce the function
\betagin{equation}
\Phi(t)=\mu(\{x_1>t\}).
\end{equation}
We recall that the weight function $w$ is constant on the boundary of the super level sets of $u^*$, so that the
perimeter of $\{u^*>t\}$ can be written as
\[
P_{w,V}(\{u^*>t\})=w(\tau)\int_{\mathbb R^{d-1}}\mu(\tau,x')\,dx'.
\]
Moreover $\tau\in\mathbb R$ satisfies $\mu_{u^*}(t)=\Phi(\tau)$ that is $\tau=\Phi^{-1}(\mu_{u^*}(t))$ (notice
that $\Phi$ is a strictly decreasing function and thus invertible) so that we
can write the previous formula as
\betagin{equation}\lambdabel{stima3}
P_{w,V}(\{u^*>t\})=w(\Phi^{-1}(\mu_{u^*}(t)))\int_{\mathbb R^{d-1}}\mu(\Phi^{-1}(\mu_{u^*}(t)),x')\,
dx':=h(\mu_{u^*}(t)).
\end{equation}
Plugging \eqref{stima2} in \eqref{s2}, and recalling \eqref{stima3} we get that
\betagin{equation}\lambdabel{stima4}
-\frac{d}{dt}\int_{\{|u|>t\}}|\nablabla u|^2w^2\,d\mu\ge\frac{h(\mu_{u^*}(t))^2}{-\mu_{u^*}'(t)}.
\end{equation}
We pass now to estimate the right-hand side of \eqref{stima1}: equation \eqref{hardy2} with $A=\{|u|>t\}$ turns
into
\betagin{equation}\lambdabel{stima5}
\int_{\{|u|>t\}}f\,d\mu\le \int_{\{|u^*|>t\}}f^*\,d\mu=\int_0^{\mu_{u^*}(t)}f^\star(s)\,ds.
\end{equation}
Combining \eqref{stima5} and \eqref{stima4} we get
\betagin{equation}\lambdabel{stima6}
\frac{\left(\int_0^{\mu_{u^*}(t)}f^\star(s)\,ds\right)\mu_{u^*}'(t)}{h^2(\mu_{u^*}(t))}\le-1.
\end{equation}
Reasoning analogously for the function $v$, we easily see that, since $v$ is constant on every set $\{x_1=t\}$ and
since $v=v^*$, \eqref{stima6} holds for $v$ as an equality. Consider now the real function
\[
F(r)=\frac{\int_0^r f(s)\,ds}{h(r)^2},
\]
and let $G$ be a primitive of $F$. Since $F\ge0$, we have that $G$ is increasing. Moreover by
our previous
analysis we have that
\[
F(\mu_{u^*}(t))\mu_{u^*}'(t)\le-1= F(\mu_v(t))\mu_v'(t).
\]
We recall that here $\mu_{u^*}'(t)$ denotes the derivative almost everywhere of the function $\mu_{u^*}(t)$.
Moreover $t\mapsto G(\mu_{u^*}(t))$ is a monotone non-increasing function which satisfies the chain rule in any
point of differentiability of $\mu_{u^*}$, so that, by \cite[Corollary $3.29$]{amfupa},
we get that
\betagin{equation}\lambdabel{palle1}
G(\mu_{u^*}(t))\le
G(\mu_{u^*}(0))+\int_0^tF(\mu_{u^*}(\tau))\mu_{u^*}'(\tau)\,d\tau.
\end{equation}
On the other hand, being $\mu_v(t)$
an absolutely continuous function (since $v$ is a $C^1$-regular with positive derivative one variable function) we
have
\betagin{equation}\lambdabel{palle2}
G(\mu_{v}(t))=G(\mu_{u^*}(0))+ \int_0^tF(\mu_{v}(\tau))\mu_{v}'(\tau)\,d\tau,
\end{equation}
so that, since $G(\mu_{v}(0))=G(\mu_{u^*}(0))$, we get that $G(\mu_{u^*}(t))\le G(\mu_v(t))$. This implies that
$\mu_{u^*}(t)\le\mu_v(t)$ for any $t$ and
hence that $u^*\le v$, since $u^*$ and $v$ depends only on $x_1$ and are increasing functions of such a variable.
We pass now to the proof of \eqref{tesi2}. Using the H\"older inequality and
reasoning as before we obtain, for $0< q\le2$,
\[
\betagin{aligned}
-\frac{d}{dt} \int_{\{|u|>t\}}&|\nablabla u|^q w^q\,d\mu =\lim_{h\rightarrow 0}\frac 1 h \int_{\{t<|u|<t+h\}}|\nablabla
u|^q w^q\,d\mu\\
& \le \lim_{h\rightarrow 0}\left(\frac 1 h \int_{\{t<|u|<t+h\}}|\nablabla u|^2w^2\,d\mu\right)^{q/2}\left(\frac 1 h\int_{\{t<|u|<t+h\}}d\mu\right)^{1-q/2}\\
&=\left(-\frac{d}{dt}\int_{\{|u|>t\}}|\nablabla u|^2w^2\,d\mu
\right)^{q/2}(-\mu_u'(t))^{1-q/2}.
\end{aligned}
\]
Recalling \eqref{stima1} and \eqref{stima5} we have
\[
-\frac{d}{dt}\int_{\{|u|>t\}}|\nablabla u|^2w^2\,d\mu
\leq \int_0^{\mu_{u^*}(t)}f^*(s)\,ds,
\]
thus
\betagin{equation}\lambdabel{stima9}
-\frac{d}{dt} \int_{\{|u|>t\}}|\nablabla u|^q w^q\,d\mu\le
\left(\int_0^{\mu_{u^*}(t)}f^\star(s)\,ds\right)^{q/2}(-\mu_u'(t))^{1-q/2}.
\end{equation}
Combining \eqref{stima9} and \eqref{stima6} we finally get
\[
-\frac{d}{dt} \int_{\{|u|>t\}}|\nablabla u|^q w^q\,d\mu\le
(-\mu_{u^*}'(t))\left(h(\mu_{u^*}(t))^{-1}\int_0^{\mu_{u^*}(t)}f^\star(s)\,ds\right)^q.
\]
By integrating on both side between $0$ and $+\infty$, we get
\[
\int_E |\nablabla u|^q w^q\,d\mu\le\int_0^\infty
(-\mu_{u^*}'(t))\left(h(\mu_{u^*}(t))^{-1}\int_0^{\mu_{u^*}(t)}f^\star(s)\,ds\right)^q dt.
\]
We perform the change of variables $r=\mu_{u^*}(t)$, so that the above equation turns into
\[
\int_E |\nablabla u|^q w^q\,d\mu\le \int_0^{\mu(E)} \left(h(r)^{-1}\int_0^{r}f^\star(s)\,ds\right)^q dr.
\]
By a straightforward inspection of those steps we notice that $v$ satisfies
\[
\int_{R_E} |\nablabla v|^q w^q\,d\mu=\int_0^\infty
(-\mu_{v}'(t))\left(h(\mu_{v}(t))^{-1}\int_0^{\mu_{v}(t)}f^\star(s)\,ds\right)^q dt;
\]
By performing the change of variables $r=\mu_v(t)$ we find
\[
\int_{R_E} |\nablabla v|^q w^q\,d\mu= \int_0^{\mu(R_E)} \left(h(r)^{-1}\int_0^{r}f^\star(s)\,ds\right)^q dr.
\]
Since $\mu(E)=\mu(R_E)$ we get the desired result.
We are left to prove that the function $v$ given by \eqref{v} is a solution of problem
\eqref{problemsym}. We start by noticing that
equation \eqref{stima6} suggests how to derive \eqref{v}: indeed, as we
pointed out, any solution $v$ of \eqref{problemsym} such that $v=v^*$ satisfies
\[
\frac{\int_0^{\mu_{v}(t)}f^\star(s)\,ds}{h^2(\mu_{v}(t))}\mu_{v}'(t)=-1.
\]
By integrating both sides between $0$ and $r$ we obtain
\[
\int_0^r \frac{\int_0^{\mu_{v}(t)}f^\star(s)\,ds}{h^2(\mu_{v}(t))} \mu_{v}'(t)\,dt=-r.
\]
so that, by performing the change of variables $m=\mu_v(t)$, we get
\[
\int_{\mu_v(r)}^{\mu(R_E)}\frac{\int_0^{m}f^\star(s)\,ds}{h^2(m)} dm=r\]
which is equivalent to
\[
v(z,z')=\int_{\mu\{x_1>z\}}^{\mu(R_E)}\frac{\int_0^{m}f^\star(s)\,ds}{h^2(m)} dm,
\]
that is \eqref{v}.
Notice that $v$ is strictly decreasing and belongs to
$C_{\mathrm{loc}}^{1,1}(R_E)$. Indeed, recalling \eqref{acca} one can explicitly compute
\[
\nablabla v(z,z')=e_1 \frac{\partial v}{\partial z}(z,z')=-e_1 \frac{\int_0^{\mu\{x_1>z\}}f^\star(s)\,ds}{w^2(z)
\int_{R^{d-1}}e^{V(z,x')}\,dx'},
\]
where $e_1=(1,0,\dots,0)\in\mathbb R^d$.
Since $f^\star$ is a decreasing and locally integrable function, then $f^\star\in L_{\mathrm{loc}}^\infty(\mathbb R)$;
thus, being $z\mapsto \mu(\{x_1>z\})$ $C^1$-regular, we get that $\int_0^{\mu\{x_1>z\}}f^\star(s)\,ds$ is a
locally Lipschitz
function.
Moreover the denominator is locally Lipschitz as well, and locally bounded away from zero.
Hence we have that $\nablabla v$ is locally Lipschitz.
Thus, recalling that $\partialrtial_1 V$ depends only on the first variable $x_1$ it is possible to explicitly compute
the divergence of $w^2\nablabla v e^V$ and check that it satisfies \eqref{problemsym}. This concludes the proof of
the theorem.
\end{proof}
\end{document} |
\begin{document}
\title{Resonant Scattering Can Enhance the Degree of Entanglement}
\author{Kazuya Yuasa}
email[]{[email protected]}
\affiliation{Research Center for Information Security, National Institute of Advanced Industrial Science and Technology (AIST), 1-18-13 Sotokanda, Chiyoda-ku, Tokyo 101-0021, Japan}
\author{Hiromichi Nakazato}
email[]{[email protected]}
\affiliation{Department of Physics, Waseda University, Tokyo 169-8555, Japan}
date[]{September 28, 2006}
\begin{abstract}
Generation of entanglement between two qubits by scattering an entanglement mediator is discussed.
The mediator bounces between the two qubits and exhibits a resonant scattering.
It is clarified how the degree of the entanglement is enhanced by the constructive interference of such bouncing processes.
Maximally entangled states are available via adjusting the incident momentum of the mediator or the distance between the two qubits, but their fine tunings are not necessarily required to gain highly entangled states and a robust generation of entanglement is possible.
end{abstract}
\pacs{03.67.Mn, 03.65.Xp, 03.65.Nk, 72.10.-d}
\maketitle
\section{Introduction}
Entanglement plays an essential role in the ideas of quantum information, like quantum computation, quantum communication, quantum cryptography, and so on
\cite{ref:QuantInfoTextbooks}.
Efficient generation of entanglement hence constitutes an essential element for the realization of such ideas, and various schemes have been proposed.
Entanglement would be most simply and naturally generated by a direct interaction between two entities carrying quantum information
\cite{ref:Qdots},
i.e.\ between two qubits.
There are, however, several setups in which the two qubits are separated from each other, beyond the range of the direct interaction, but an entanglement is to be shared between them.
In such a case, a ``mediator'' would be convenient to entangle the separated qubits, and several schemes have been investigated theoretically
\cite{ref:IntMed,ref:cavity,ref:successive0,ref:qpfes,ref:qpfeq,ref:RosannaWJJ,ref:EntScatCostaPRL,ref:EntScatPalma}
and experimentally
\cite{ref:Haroche,ref:Kuzmich}.
Among these proposals, we here concentrate ourselves on a scheme based on the ``successive'' interactions of a mediator with two qubits to be entangled
\cite{ref:successive0,ref:qpfes,ref:qpfeq,ref:RosannaWJJ,ref:EntScatCostaPRL,ref:EntScatPalma,ref:Haroche,ref:Kuzmich}.
Mediator X prepared in a specific state is sent to interact successively one by one with qubits A and B, each of which is prepared in an appropriate initial state, and then the state of X is measured.
If X is found in a specific state after the interactions, we end up with an entanglement between A and B\@.
It has been somewhat standard in such schemes to assume that mediator X interacts with qubit A(B) for a certain time duration $\tau_\text{A(B)}$
\cite{ref:successive0,ref:qpfes,ref:qpfeq,ref:RosannaWJJ,ref:EntScatCostaPRL,ref:Haroche},
and the generation of a highly entangled state with a high probability is accomplished by tuning these times, given the coupling constants.
It is, however, a subtle problem whether the notion of the ``interaction time'' makes sense or not.
It might be valid when the size of a wave packet is small enough compared with the interaction region, but it might not be otherwise.
Even in the former case, no rigorous proof has been given explicitly so far, to the best of the present authors' knowledge.
A rigorous approach to this issue would be to formulate it as a \textit{scattering problem} and such attempts have recently appeared \cite{ref:EntScatCostaPRL,ref:EntScatPalma}.
Qubits A and B are shown to be entangled after \textit{scattering} mediator X\@.
In this approach, the notion of the interaction time is not necessary; actually, such a time is either absent in the stationary-state treatments or automatically given by the physical situation under consideration in the wave-packet scatterings \cite{ref:WavePacketDeltaPot}.
At this point, it is worth noting that X can be reflected by each of the qubits with a certain probability.
This effect is not taken into account in the standard formulations with the interaction times.
The wave reflected by B is partially directed to A and some of its portion is reflected back to B\@.
Such a sequence of bounces between A and B gives rise to a resonant scattering of mediator X\@.
The purpose of the present work is to clarify the effects of the 1D resonant scattering on the entanglement generation \cite{note:NoReso}: resonance can enhance the degree of entanglement.
It is revealed that the distance $d$ between the two qubits is an important parameter to gain an entanglement efficiently, and maximally entangled states are available with finite probabilities by adjusting $d$ or the incident momentum $k$ of X, in a wide parameter region of the coupling constants.
Fine tunings of $k$ or $d$, however, are not necessarily required and robust generation of highly entangled states is possible.
\section{Scattering a Monochromatic Wave}
We consider the following Hamiltonian in 1D space:
\begin{align}
H=\frac{p^2}{2m}
&+g_\text{A}(\sigma_+^\text{(X)}\sigma_-^\text{(A)}
+\sigma_-^\text{(X)}\sigma_+^\text{(A)})delta(x+d/2)\nonumber\\
&+g_\text{B}(\sigma_+^\text{(X)}\sigma_-^\text{(B)}
+\sigma_-^\text{(X)}\sigma_+^\text{(B)})delta(x-d/2).
\label{eqn:Hamiltonian}
end{align}
Qubits A and B are placed at $x=-d/2$ and $d/2$, respectively, and the quantum information is encoded in their spin states $\ket{\uparrow}_\text{A(B)}$ and $\ket{downarrow}_\text{A(B)}$, which are flipped by the ladder operators $\sigma_\mp^\text{(A(B))}$.
A and B are initially prepared in $\ket{\uparrow\uparrow}_\text{AB}$ and mediator X polarized in $\ket{downarrow}_\text{X}$ is injected from the left.
X, whose position and momentum are represented by the operators $x$ and $p$, respectively, propagates according to the Hamiltonian (\ref{eqn:Hamiltonian}) and is scattered by A and B (Fig.\ \ref{fig:EntGenScat}).
Since the interactions between mediator X and qubits A, B preserve the number of spins in the up state $\ket{\uparrow}$ among A, B, and X, if X after the scattering is detected by either of the two detectors on both sides and found in $\ket{\uparrow}_\text{X}$, either state of A or B is flipped down and an entangled state is generated [see (\ref{eqn:EntStat}) below].
Note that we are looking at an extreme case where the potential barriers are much thinner than the extension of the wave packet of X\@.
\begin{figure}[t]
\includegraphics[width=0.4\textwidth]{fig1.eps}
\caption{Mediator X prepared in $\ket{downarrow}_\text{X}$ is scattered by qubits A and B placed at $x=-d/2$ and $d/2$, respectively, and initialized in $\ket{\uparrow\uparrow}_\text{AB}$, and $\ket{\uparrow}_\text{X}$ is to be detected by either of the two detectors on both sides, with a certain probability.}
\label{fig:EntGenScat}
end{figure}
Let us find a scattering state of the Hamiltonian (\ref{eqn:Hamiltonian}),
\begin{equation}
H\ket{E}=E\ket{E},
\label{eqn:EigenEq}
end{equation}
which would describe the scattering of a well-mono\-chromatized incident particle X\@.
We introduce the wave functions
\begin{equation}
u_{{downarrow};{\uparrow\uparrow}}(x)
=\Bigl(\bras{x{downarrow}}{\text{X}}\otimes\bras{{\uparrow}{\uparrow}}{\text{AB}}\Bigr)\,\ket{E}
equiv\bracket{x{downarrow};{\uparrow}{\uparrow}}{E},\ \text{etc.}
end{equation}
The solution is given in the form
\begin{equation}
u(x)=\begin{cases}
Ie^{ik(x+d/2)}+Re^{-ik(x+d/2)}&(x<-d/2),\\
Te^{ik(x-d/2)}&(x>d/2)
end{cases}
end{equation}
for each spin state, where $k$ is the momentum (apart from $\hbar$) of X and $E=\hbar^2k^2/2m$.
We are interested in the entanglement generation from the initialized state $\ket{\uparrow\uparrow}_\text{AB}$ by injecting X in the spin state $\ket{downarrow}_\text{X}$.
In such a case, only the three wave functions $u_{{downarrow};{\uparrow\uparrow}}(x)$, $u_{{\uparrow};{\uparrowdownarrow}}(x)$, and $u_{{\uparrow};{downarrow\uparrow}}(x)$ are involved in the problem.
Imposing the continuity condition on the wave functions, the scattering problem (\ref{eqn:EigenEq}) is solved under the boundary conditions
\begin{equation}
I_{{downarrow};{\uparrow\uparrow}}=N,\quad
I_{{\uparrow};{\uparrowdownarrow}}=0,\quad
I_{{\uparrow};{downarrow\uparrow}}=0
end{equation}
to yield the transmission and reflection amplitudes
\begin{subequations}
\label{eqn:Coefficients}
\begin{align}
T_{{downarrow};{\uparrow\uparrow}}
&=N\frac{t_\text{A}t_\text{B}e^{ikd}}{1-r_\text{A}r_\text{B}e^{2ikd}},displaybreak[0]\\
R_{{downarrow};{\uparrow\uparrow}}
&=N\left(
r_\text{A}
+\frac{t_\text{A}^2r_\text{B}e^{2ikd}}{1-r_\text{A}r_\text{B}e^{2ikd}}
\right),displaybreak[0]\\
T_{{\uparrow};{\uparrowdownarrow}}
&=R_{{\uparrow};{\uparrowdownarrow}}e^{-ikd}
=N\frac{t_\text{A}f_\text{B}e^{ikd}}{1-r_\text{A}r_\text{B}e^{2ikd}},displaybreak[0]\\
T_{{\uparrow};{downarrow\uparrow}}
&=R_{{\uparrow};{downarrow\uparrow}}e^{ikd}
=N\left(
1+\frac{t_\text{A}r_\text{B}e^{2ikd}}{1-r_\text{A}r_\text{B}e^{2ikd}}
\right)f_\text{A}e^{ikd},
end{align}
end{subequations}
where
\begin{subequations}
\begin{gather}
t_\text{A(B)}=\frac{1}{1+\Omega_\text{A(B)}^2},\qquad
r_\text{A(B)}=-\frac{\Omega_\text{A(B)}^2}{1+\Omega_\text{A(B)}^2},
displaybreak[0]\\
f_\text{A(B)}=-\frac{i\Omega_\text{A(B)}}{1+\Omega_\text{A(B)}^2},
end{gather}
end{subequations}
and $\Omega_\text{A(B)}=mg_\text{A(B)}/\hbar^2k$.
Note that X does not feel A(B) when they are in the same spin states.
The coefficients $t_\text{A(B)}$ and $r_\text{A(B)}$ respectively describe the transmission through and the reflection from A(B) without spin flip and $f_\text{A(B)}$ the transmission/reflection with spin flips, when X is in a different spin state from A(B).
It is interesting to expand the amplitudes in (\ref{eqn:Coefficients}) as power series in $e^{2ikd}$:
\begin{subequations}
\label{eqn:CoefficientsExp}
\begin{align}
T_{{\uparrow};{\uparrowdownarrow}}
&=R_{{\uparrow};{\uparrowdownarrow}}e^{-ikd}\nonumber\\
&=Nt_\text{A}e^{ikd}(
1+r_\text{B}e^{ikd}r_\text{A}e^{ikd}\nonumber\\
&\qquad\qquad\quad\ \ \,
{}+r_\text{B}e^{ikd}r_\text{A}e^{ikd}r_\text{B}e^{ikd}r_\text{A}e^{ikd}
+\cdots)f_\text{B},displaybreak[0]\\
T_{{\uparrow};{downarrow\uparrow}}
&=R_{{\uparrow};{downarrow\uparrow}}e^{ikd}\nonumberdisplaybreak[0]\\
&=N(
1+t_\text{A}e^{ikd}r_\text{B}e^{ikd}\nonumber\\
&\qquad\quad\,
{}+t_\text{A}e^{ikd}r_\text{B}e^{ikd}r_\text{A}e^{ikd}r_\text{B}e^{ikd}
+\cdots)f_\text{A}e^{ikd},
end{align}
end{subequations}
etc., where $e^{ikd}$ represents the phase factor gained for the rightward/leftward propagation of X between A and B, and each term of the expansions reveals how X goes back and forth between A and B, before flipping its spin by the interaction with A or B\@.
It is also worth looking at the coefficients $t_\text{A(B)}$, $r_\text{A(B)}$, and $f_\text{A(B)}$ for the single potential A(B) and comparing the present approach with the ordinary formulation with the interaction times \cite{ref:successive0,ref:qpfes,ref:qpfeq,ref:RosannaWJJ,ref:EntScatCostaPRL,ref:Haroche}.
\textit{When we concentrate ourselves on a transmitted particle} (like in the approach with the interaction time), a state of X and A, e.g.\ $\ket{{downarrow};{\uparrow}}_\text{XA}$, is ``rotated'' after the transmission of X through potential A like
\begin{equation}
\ket{{downarrow};{\uparrow}}_\text{XA}
\to t_\text{A}\ket{{downarrow};{\uparrow}}_\text{XA}
+f_\text{A}\ket{{\uparrow};{downarrow}}_\text{XA},
end{equation}
apart from the normalization.
The counterpart in the approach with the interaction time reads \cite{ref:qpfeq,ref:RosannaWJJ}
\begin{equation}
\ket{{downarrow};{\uparrow}}_\text{XA}
\to \cos g_\text{A}\tau_\text{A}
\ket{{downarrow};{\uparrow}}_\text{XA}
-i\sin g_\text{A}\tau_\text{A}
\ket{{\uparrow};{downarrow}}_\text{XA},
end{equation}
and there exists the connection between the two formulations:
\begin{equation}
\cos g_\text{A}\tau_\text{A}
\leftrightarrow\frac{1}{\sqrt{1+\Omega_\text{A}^2}},\quad
\sin g_\text{A}\tau_\text{A}
\leftrightarrow\frac{\Omega_\text{A}}{\sqrt{1+\Omega_\text{A}^2}}.
\label{eqn:correspondence}
end{equation}
These relations show that $g_\text{A}\tau_\text{A}$ ranges from $0$ to $\pi/2$ and a higher/lower momentum corresponds to a shorter/longer interaction time.
In practice, however, one should also take account of the probability for such event, transmission, to occur.
To achive the complete flip $\sin g_\text{A}\tau_\text{A}=1$, for instance, the correspondence (\ref{eqn:correspondence}) suggests that $\Omega_\text{A}\to\infty$ is required.
But the probability of the transmission
\begin{equation}
|t_\text{A}|^2+|f_\text{A}|^2
=\frac{1}{1+\Omega_\text{A}^2}
end{equation}
vanishes in this limit, meaning that the complete flip is not possible.
A larger rotation angle $g_\text{A}\tau_\text{A}$ requires a larger $\Omega_\text{A}$ but it is available with a smaller probability.
This restricts the applicability of the formulation with the interaction times, at least for the delta-shaped potential.
\section{Generation of Entanglement}
Now we are ready to discuss the entanglement generation.
If a transmitted/reflected particle in $\ket{\uparrow}_\text{X}$ is detected by the detector on the right/left, an entangled state
\begin{subequations}
\label{eqn:EntStat}
\begin{align}
\ket{\Psi_\text{t}}_\text{AB}
&\propto
T_{\uparrow;\uparrowdownarrow}
\ket{\uparrowdownarrow}_\text{AB}
+T_{\uparrow;downarrow\uparrow}
\ket{downarrow\uparrow}_\text{AB}
\label{eqn:EntStatT}
\intertext{(for the former case) or}
\ket{\Psi_\text{r}}_\text{AB}
&\propto
R_{\uparrow;\uparrowdownarrow}
\ket{\uparrowdownarrow}_\text{AB}
+R_{\uparrow;downarrow\uparrow}
\ket{downarrow\uparrow}_\text{AB}
\label{eqn:EntStatR}
end{align}
end{subequations}
(for the latter) is established.
In the present model, the concurrences \cite{ref:Concurrence} of these states are the same,
\begin{subequations}
\label{eqn:C}
\begin{gather}
C=\frac{2|T_{\uparrow;\uparrowdownarrow}T_{\uparrow;downarrow\uparrow}|}{|T_{\uparrow;\uparrowdownarrow}|^2+|T_{\uparrow;downarrow\uparrow}|^2}
=\frac{2|R_{\uparrow;\uparrowdownarrow}R_{\uparrow;downarrow\uparrow}|}{|R_{\uparrow;\uparrowdownarrow}|^2+|R_{\uparrow;downarrow\uparrow}|^2}
=\frac{2a}{1+a^2},displaybreak[0]\\
a
=\left|\frac{T_{\uparrow;downarrow\uparrow}}{T_{\uparrow;\uparrowdownarrow}}\right|
=\left|\frac{R_{\uparrow;downarrow\uparrow}}{R_{\uparrow;\uparrowdownarrow}}\right|
=\frac{\Omega_\text{A}}{\Omega_\text{B}}\sqrt{1+4\Omega_\text{B}^2(1+\Omega_\text{B}^2)\sin^2\!kd},
end{gather}
end{subequations}
and so are the probabilities of the generations of the entangled states (\ref{eqn:EntStatT}) and (\ref{eqn:EntStatR}), namely the probability of detecting a transmitted particle in the $\ket{\uparrow}_\text{X}$ state and that of detecting a reflected one,
\begin{align}
P&=|T_{\uparrow;\uparrowdownarrow}|^2
+|T_{\uparrow;downarrow\uparrow}|^2
=|R_{\uparrow;\uparrowdownarrow}|^2
+|R_{\uparrow;downarrow\uparrow}|^2\nonumberdisplaybreak[0]\\
&=\frac{\Omega_\text{A}^2+\Omega_\text{B}^2+4\Omega_\text{A}^2\Omega_\text{B}^2(1+\Omega_\text{B}^2)\sin^2\!kd}
{(1+\Omega_\text{A}^2+\Omega_\text{B}^2)^2+4\Omega_\text{A}^2\Omega_\text{B}^2(1+\Omega_\text{A}^2)(1+\Omega_\text{B}^2)\sin^2\!kd}.
\label{eqn:P}
end{align}
Note that the probability of \textit{either} of the two detectors detecting X in the state $\ket{\uparrow}_\text{X}$ is given by $2P$, and therefore the maximal value of $P$ is $1/2$.
Notice here that the distance between the two qubits, $d$, enters the formulas through the exponential factor $e^{2ikd}$, which is responsible for the resonant scattering.
See Fig.\ \ref{fig:ResoCP}, where the resonance structures are observed in the momentum dependences of the concurrence $C$ and the probability $P$.
By adjusting the incident momentum $k$, one can generate a highly entangled state with a finite probability.
At the same time, fine tunings of parameters are not necessarily required.
For example, let us look at a case where $g_\text{A}=g_\text{B}$.
Figure \ref{fig:ResoCP-gA=gB}(a) shows that $C$ and $P$ do not oscillate strongly for large $k$ and a high concurrence is available in a wide range of the incident momentum $k$.
This is because the oscillating factor $e^{2ikd}$ represents the bouncing of X between A and B and it always accompanies the reflection coefficients $r_\text{A}$ and $r_\text{B}$, which are reduced for large momentum $k$ and suppress the oscillations.
\begin{figure}[t]
\includegraphics[height=0.61\textwidth]{fig2-s.eps}
\caption{(a) The concurrence $C$ given in (\ref{eqn:C}) and (b) the probability $P$ given in (\ref{eqn:P}) as functions of $k$ and $g_\text{A}$ when $g_\text{B}=3\,[\hbar^2\pi/md]$.}
\label{fig:ResoCP}
end{figure}
\begin{figure}[t]
\includegraphics[width=0.4\textwidth]{fig3a.eps}\\
\includegraphics[width=0.4\textwidth]{fig3b.eps}
\caption{(a) The concurrence $C$ given in (\ref{eqn:C}) (solid) and the probability $P$ given in (\ref{eqn:P}) (dashed) as functions of the incident momentum $k$ when $g_\text{A}=g_\text{B}=3\,[\hbar^2\pi/md]$.
(b) The same as (a) but only the contributions up to the $(n+1)$th term in each of the series expansions (\ref{eqn:CoefficientsExp}) are taken into account, where $n$ is the number of bounces between A and B\@.}
\label{fig:ResoCP-gA=gB}
end{figure}
Let us look more closely at the formulas and see how a highly entangled state is acquired by the resonant tunneling.
Figure \ref{fig:ResoCP-gA=gB}(b) shows that the series expansions in (\ref{eqn:CoefficientsExp}) converge very quickly and the first two contributions, i.e.\ the no- and one-bounce processes, are important.
For $g_\text{A}=g_\text{B}$, the no-bounce process, i.e.\ the first terms in (\ref{eqn:CoefficientsExp}), yields $|T_{\uparrow;\uparrowdownarrow}|<|T_{\uparrow;downarrow\uparrow}|$ ($|R_{\uparrow;\uparrowdownarrow}|<|R_{\uparrow;downarrow\uparrow}|$).
If the momentum $k$ is adjusted so as to satisfy $kd=\nu\pi\,(\nu=1,2,\ldots)$, the one-bounce process, i.e.\ the second terms in (\ref{eqn:CoefficientsExp}), interferes with the no-bounce process \textit{constructively} for the $\ket{\uparrow;\uparrowdownarrow}$ component but \textit{destructively} for the $\ket{\uparrow;downarrow\uparrow}$ component (since $t_\text{A(B)}>0$ while $r_\text{A(B)}<0$).
As a result, the difference between $|T_{\uparrow;\uparrowdownarrow}|$ and $|T_{\uparrow;downarrow\uparrow}|$ ($|R_{\uparrow;\uparrowdownarrow}|$ and $|R_{\uparrow;downarrow\uparrow}|$) is reduced by the interference and the concurrence is made close to unity.
In fact, the ratio $a$ introduced in (\ref{eqn:C}) is evaluated up to the one-bounce contribution to be $a=1+delta a=1+\Omega^6/(1+2\Omega^2+2\Omega^4)$ under the conditions $g_\text{A}=g_\text{B}$ and $kd=\nu\pi\,(\nu=1,2,\ldots)$, and the deviation of the concurrence from unity is about $(delta a)^2/2$, which is very small for $\Omega\,(=\Omega_\text{A}=\Omega_\text{B})\lesssim1$ (almost perfect compensation).
Other processes with more bounces complete the exact unit concurrence.
Note that, in Fig.\ \ref{fig:ResoCP-gA=gB}, where $g_\text{A}=g_\text{B}=3\,[\hbar^2\pi/md]$, the condition $\Omega\lesssim1$ corresponds to $k\gtrsim3\,[\pi/d]$.
\begin{figure*}[t]
\includegraphics[width=0.43\textwidth]{fig4a.eps}\qquad\qquad
\includegraphics[width=0.43\textwidth]{fig4b.eps}
\caption{(a) Contour plots of the probability $P$ (dashed) given in (\ref{eqn:P}) and the concurrence $C$ (solid) given in (\ref{eqn:C}) as functions of $\Omega_\text{A}$ and $\Omega_\text{B}$ under the resonance condition $\sin^2\!kd=1$. The probability $P$ takes its maximum value $P_\text{max}=1/2$ at $(\Omega_\text{A},\Omega_\text{B})=(1/\sqrt{2},\infty)$, and the concurrence is unity $C=1$ on the line $\Omega_\text{A}=\Omega_\text{B}/(1+2\Omega_\text{B}^2)$.
(b) Contour plots of the optimal concurrence $C$ for given $(\Omega_\text{A},\Omega_\text{B})$ (solid) and the corresponding probability $P$ (dashed) as functions of $\Omega_\text{A}$ and $\Omega_\text{B}$. The unit concurrence $C=1$ is available in the gray region $\Omega_\text{B}/(1+2\Omega_\text{B}^2)\le\Omega_\text{A}\le\Omega_\text{B}$. In the left region $\Omega_\text{A}\le\Omega_\text{B}/(1+2\Omega_\text{B}^2)$, the concurrence $C$ is optimal when $\sin^2\!kd=1$, and in the right region $\Omega_\text{A}\ge\Omega_\text{B}$, it is optimal when $\sin^2\!kd=0$. The optimal probability $P$ for the unit concurrence $C=1$ is realized at the point indicated by a dot, $(\Omega_\text{A},\Omega_\text{B})\simeq(0.33,1.07)$, and is given by $P_\text{opt}\simeq0.37$.}
\label{fig:OptPC}
end{figure*}
\section{Optimization}
Let us next survey the whole parameter space and discuss how to optimize the concurrence $C$ in (\ref{eqn:C}) and the probability $P$ in (\ref{eqn:P}).
There are essentially three independent (dimensionless) parameters $\Omega_\text{A(B)}=mg_\text{A(B)}/\hbar^2k$ and $kd$.
We first look at the probability (\ref{eqn:P}).
It is possible to show that it is optimal for a given pair $(\Omega_\text{A},\Omega_\text{B})$ when the resonance condition
\begin{equation}
\sin^2\!kd=1
\label{eqn:Resonance}
end{equation}
is satisfied.
The probability $P$ under this condition is plotted in Fig.\ \ref{fig:OptPC}(a) as a function of $\Omega_\text{A}$ and $\Omega_\text{B}$, together with the concurrence $C$.
The maximum value of the probability $P$ is given by
\begin{equation}
P_\text{max}
=\frac{1}{2}\quad\text{at}\quad
(\Omega_\text{A},\Omega_\text{B})=(1/\sqrt{2},\infty),
end{equation}
while the unit concurrence $C=1$ under the resonance condition (\ref{eqn:Resonance}) is achieved when
\begin{equation}
\Omega_\text{A}
=\frac{\Omega_\text{B}}{1+2\Omega_\text{B}^2}.
end{equation}
If one does not stick to a high probability, the concurrence $C$ is optimized as follows.
The concurrence (\ref{eqn:C}) takes its maximum $C=1$ at $a=1$ and decreases monotonically as $a$ leaves this optimal point.
Since
\begin{equation}
\frac{\Omega_\text{A}}{\Omega_\text{B}}\le a
\le\frac{\Omega_\text{A}}{\Omega_\text{B}}(1+2\Omega_\text{B}^2)
end{equation}
for a given pair $(\Omega_\text{A},\Omega_\text{B})$, the unit concurrence $C=1$ (i.e.\ $a=1$) requires
\begin{equation}
\frac{\Omega_\text{A}}{\Omega_\text{B}}\le 1
\le\frac{\Omega_\text{A}}{\Omega_\text{B}}(1+2\Omega_\text{B}^2),\quad
\sin^2\!kd
=\frac{\Omega_\text{B}^2-\Omega_\text{A}^2}
{4\Omega_\text{A}^2\Omega_\text{B}^2(1+\Omega_\text{B}^2)}.
end{equation}
See Fig.\ \ref{fig:OptPC}(b), where the region for the unit concurrence $C=1$ is shown, together with the corresponding probability $P$.
In particular, the unit concurrence $C=1$ is always available when $g_\text{A}=g_\text{B}$, by adjusting the incident momentum $k$ or the distance between the two qubits, $d$.
The optimal probability $P$ for the unit concurrence $C=1$ is realized at the point indicated by a dot in Fig.\ \ref{fig:OptPC}(b), which is evaluated to be
\begin{subequations}
\begin{equation}
P_\text{opt}\simeq0.37
end{equation}
at
\begin{gather}
\Omega_\text{B}
=\sqrt{\frac{1+\sqrt[3]{37-3\sqrt{114}}+\sqrt[3]{37+3\sqrt{114}}}{6}}\simeq1.07,displaybreak[0]\\
\Omega_\text{A}
=\frac{\Omega_\text{B}}{1+2\Omega_\text{B}^2}\simeq0.33,\qquad
\sin^2\!kd=1.
end{gather}
end{subequations}
\begin{figure*}
\includegraphics[height=0.61\textwidth]{fig5ab-s.eps}\qquad
\includegraphics[height=0.61\textwidth]{fig5cd-s.eps}
\caption{The concurrences (above: a,c) and the probabilities (below: b,d) for the Hamiltonian (\ref{eqn:HamiltonianContact}) as functions of $k$ and $g_\text{A}$ with $g_\text{B}=1.5\,[\hbar^2\pi/md]$, when a reflected particle is detected (left: a,b) and when a transmitted particle is detected (right: c,d).}
\label{fig:ResoCP2}
end{figure*}
\begin{figure*}
\includegraphics[width=0.4\textwidth]{fig6a.eps}\qquad\qquad\qquad
\includegraphics[width=0.4\textwidth]{fig6b.eps}\qquad\mbox{}
\caption{The concurrences (solid) and the probabilities (dashed) for the Hamiltonian (\ref{eqn:HamiltonianContact}) as functions of the incident momentum $k$ with $g_\text{A}=g_\text{B}=1.5\,[\hbar^2\pi/md]$, when a reflected particle is detected (a) and when a transmitted particle is detected (b).}
\label{fig:ResoCP2-gA=gB}
end{figure*}
\section{Another Model}
\label{sec:Model2}
Similar analyses are possible for the Hamiltonian
\begin{align}
H=\frac{p^2}{2m}
&+g_\text{A}\bm{\sigma}^\text{(X)}\cdot\bm{\sigma}^\text{(A)}delta(x+d/2)\nonumber\\
&+g_\text{B}\bm{\sigma}^\text{(X)}\cdot\bm{\sigma}^\text{(B)}delta(x-d/2).
\label{eqn:HamiltonianContact}
end{align}
This type of interaction is considered in Ref.\ \cite{ref:EntScatCostaPRL,ref:EntScatPalma}.
In this case, the amplitudes are given by
\begin{subequations}
\label{eqn:Coefficients2}
\begin{align}
T_{downarrow;\uparrow\uparrow}
&=N\frac{\tilde{t}_\text{A}\tilde{t}_\text{B}e^{ikd}}{1-\tilde{r}_\text{A}\tilde{r}_\text{B}e^{2ikd}},displaybreak[0]\\
R_{downarrow;\uparrow\uparrow}
&=N\left(
\tilde{r}_\text{A}
+\frac{\tilde{t}_\text{A}^2\tilde{r}_\text{B}e^{2ikd}}{1-\tilde{r}_\text{A}\tilde{r}_\text{B}e^{2ikd}}
\right),\\
T_{\uparrow;\uparrowdownarrow}
&=N\frac{\tilde{t}_\text{A}e^{ikd}}{1-\tilde{r}_\text{A}\tilde{r}_\text{B}e^{2ikd}}f_\text{B}\left(
1+\frac{r_\text{A}'t_\text{B}e^{2ikd}}{1-r_\text{A}'r_\text{B}e^{2ikd}}
\right),\\
R_{\uparrow;\uparrowdownarrow}
&=N\frac{\tilde{t}_\text{A}e^{ikd}}{1-\tilde{r}_\text{A}\tilde{r}_\text{B}e^{2ikd}}f_\text{B}\frac{t_\text{A}'e^{ikd}}{1-r_\text{A}'r_\text{B}e^{2ikd}},displaybreak[0]\\
T_{\uparrow;downarrow\uparrow}
&=N\left(
1+\frac{\tilde{t}_\text{A}\tilde{r}_\text{B}e^{2ikd}}{1-\tilde{r}_\text{A}\tilde{r}_\text{B}e^{2ikd}}
\right)f_\text{A}\frac{t_\text{B}'e^{ikd}}{1-r_\text{A}r_\text{B}'e^{2ikd}},displaybreak[0]\\
R_{\uparrow;downarrow\uparrow}
&=N\left(
1+\frac{\tilde{t}_\text{A}\tilde{r}_\text{B}e^{2ikd}}{1-\tilde{r}_\text{A}\tilde{r}_\text{B}e^{2ikd}}
\right)\nonumber\\
&\qquad\qquad\qquad
{}\times f_\text{A}\left(
1+\frac{t_\text{A}r_\text{B}'e^{2ikd}}{1-r_\text{A}r_\text{B}'e^{2ikd}}
\right),
end{align}
end{subequations}
with
\begin{subequations}
\begin{align}
t_\text{A(B)}
&=\frac{1-i\Omega_\text{A(B)}}{(1+i\Omega_\text{A(B)})(1-3i\Omega_\text{A(B)})},displaybreak[0]\\
r_\text{A(B)}
&=\frac{i\Omega_\text{A(B)}(1+3i\Omega_\text{A(B)})}{(1+i\Omega_\text{A(B)})(1-3i\Omega_\text{A(B)})},displaybreak[0]\\
f_\text{A(B)}
&=-\frac{2i\Omega_\text{A(B)}}{(1+i\Omega_\text{A(B)})(1-3i\Omega_\text{A(B)})},displaybreak[0]\\
t_\text{A(B)}'
&=\frac{1}{1+i\Omega_\text{A(B)}},\quad
r_\text{A(B)}'=-\frac{i\Omega_\text{A(B)}}{1+i\Omega_\text{A(B)}},
end{align}
end{subequations}
and
\begin{subequations}
\label{eqn:RenCoefficients2}
\begin{gather}
\tilde{t}_\text{A(B)}
=t_\text{A(B)}
+\Sigma_\text{A(B)},\quad
\tilde{r}_\text{A(B)}
=r_\text{A(B)}+\Sigma_\text{A(B)},displaybreak[0]\\
\Sigma_\text{A(B)}
=\frac{f_\text{A(B)}^2r_\text{B(A)}'e^{2ikd}}{1-r_\text{A(B)}r_\text{B(A)}'e^{2ikd}}.
end{gather}
end{subequations}
In contrast to the previous model, X feels potential A(B) even when X is in the same spin state as A(B)\@.
$t_\text{A(B)}'$ and $r_\text{A(B)}'$ are the coefficients for the transmission through and the reflection from the single potential A(B) without spin flip when X is in the same spin state as A(B), $t_\text{A(B)}$ and $r_\text{A(B)}$ are those when X is in a different spin state from A(B), and $f_\text{A(B)}$ describes the transmission and the reflection with spin flip when X is in a different spin state from A(B).
For this model, the concurrence $C_\text{t}$ and the probability $P_\text{t}$ by detecting a transmitted particle in $\ket{\uparrow}_\text{X}$ on the right are different from $C_\text{r}$ and $P_\text{r}$ by detecting a reflected one on the left.
Again, the denominators in (\ref{eqn:Coefficients2}) and (\ref{eqn:RenCoefficients2}) represent bouncing of X between A and B (which becomes clear by expanding them as power series in $e^{2ikd}$) and give rise to the resonant tunneling.
Similar resonance structures to the previous example are observed in the concurrences and the probabilities, as shown in Figs.\ \ref{fig:ResoCP2} and \ref{fig:ResoCP2-gA=gB}.
The oscillations of the concurrences and the probabilities are reduced for a large momentum $k$ and a robust entanglement generation is available (Fig.\ \ref{fig:ResoCP2-gA=gB}), due to the same mechanism as in the previous example.
\section{Summary}
In this article, we have investigated the entanglement generation by the resonant scattering.
The resonance effects are clarified and the optimization of the entanglement generation is discussed.
The interference of the bouncing and non-bouncing processes can enhance entanglement.
Maximally entangled states are available with finite probabilities in a wide parameter region.
The degree of entanglement is optimized by adjusting the momentum of the mediator, but its fine tuning is not necessarily required.
One of the interesting future subjects would be to clarify the effect of the size of a wave packet on the generation of entanglement.
Scattering of plane waves is discussed in this article, but the models are solvable also for wave packets \cite{ref:WavePacketDeltaPot}.
It is possible to discuss the resonant scattering fully dynamically.
Introduction of the width of the potential would also be interesting to explore the validity of the ordinary formulation with the interaction times.
\begin{thebibliography}{10}
\bibitem{ref:QuantInfoTextbooks}
M.~A. Nielsen and I.~L. Chuang, \textit{Quantum Computation and Quantum
Information} (Cambridge University Press, Cambridge, 2000); \textit{The
Physics of Quantum Information}, edited by D. Bouwmeester, A. Zeilinger, and
A. Ekert (Springer-Verlag, Heidelberg, 2000); C.~H. Bennett and D.~P.
DiVincenzo, Nature (London) \textbf{404}, 247 (2000); A. Galindo and M.~A.
Mart\'{\i}n-Delgado, Rev. Mod. Phys. \textbf{74}, 347 (2002).
\bibitem{ref:Qdots}
D. Loss and D.~P. DiVincenzo, Phys. Rev. A \textbf{57}, 120 (1998); B.~E. Kane,
Nature (London) \textbf{393}, 133 (1998).
\bibitem{ref:IntMed}
C. Cabrillo, J.~I. Cirac, P. Garc\'{\i}a-Fern\'andez, and P. Zoller, Phys. Rev.
A \textbf{59}, 1025 (1999); L.-M. Duan, M.~D. Lukin, J.~I. Cirac, and P.
Zoller, Nature (London) \textbf{414}, 413 (2001); X.-L. Feng, Z.-M. Zhang,
X.-D. Li, S.-Q. Gong, and Z.-Z. Xu, Phys. Rev. Lett. \textbf{90}, 217902
(2003); L.-M. Duan and H.~J. Kimble, \textit{ibid.} \textbf{90}, 253601
(2003); D.~E. Browne, M.~B. Plenio, and S.~F. Huelga, \textit{ibid.}
\textbf{91}, 067901 (2003).
\bibitem{ref:cavity}
M.~B. Plenio, S.~F. Huelga, A. Beige, and P.~L. Knight, Phys. Rev. A
\textbf{59}, 2468 (1999); J. Hong and H.-W. Lee, Phys. Rev. Lett.
\textbf{89}, 237901 (2002); C. Marr, A. Beige, and G. Rempe, Phys. Rev. A
\textbf{68}, 033817 (2003).
\bibitem{ref:successive0}
J.~A. Bergou and M. Hillery, Phys. Rev. A \textbf{55}, 4585 (1997); A. Messina,
Eur. Phys. J. D \textbf{18}, 379 (2002); D.~E. Browne and M.~B. Plenio, Phys.
Rev. A \textbf{67}, 012325 (2003).
\bibitem{ref:qpfes}
G. Compagno, A. Messina, H. Nakazato, A. Napoli, M. Unoki, and K. Yuasa, Phys.
Rev. A \textbf{70}, 052316 (2004).
\bibitem{ref:qpfeq}
K. Yuasa and H. Nakazato, Prog. Theor. Phys. \textbf{114}, 523 (2005).
\bibitem{ref:RosannaWJJ}
R. Migliore, K. Yuasa, H. Nakazato, and A. Messina, Phys. Rev. B \textbf{74},
104503 (2006) [cond-mat/0604313 (2006)].
\bibitem{ref:EntScatCostaPRL}
A.~T. {Costa, Jr.}, S. Bose, and Y. Omar, Phys. Rev. Lett. \textbf{96}, 230501
(2006).
\bibitem{ref:EntScatPalma}
F. Ciccarello, G.~M. Palma, M. Zarcone, Y. Omar, and V.~R. Vieira,
cond-mat/0603456 (2006).
\bibitem{ref:Haroche}
E. Hagley, X. Ma\^{\i}tre, G. Nogues, C. Wunderlich, M. Brune, J.~M. Raimond,
and S. Haroche, Phys. Rev. Lett. \textbf{79}, 1 (1997); J.~M. Raimond, M.
Brune, and S. Haroche, Rev. Mod. Phys. \textbf{73}, 565 (2001).
\bibitem{ref:Kuzmich}
T. Chaneli\`ere, D.~N. Matsukevich, S.~D. Jenkins, S.-Y. Lan, T.~A.~B. Kennedy,
and A. Kuzmich, Nature (London) \textbf{438}, 833 (2005); D.~N. Matsukevich,
T. Chaneli\`ere, S.~D. Jenkins, S.-Y. Lan, T.~A.~B. Kennedy, and A. Kuzmich,
Phys. Rev. Lett. \textbf{96}, 030405 (2006).
\bibitem{ref:WavePacketDeltaPot}
H. Nakazato, Found. Phys. \textbf{27}, 1709 (1997).
\bibitem{note:NoReso}
The resonance effect is not clarified in Ref.\ \cite{ref:EntScatCostaPRL}. In
fact, the distance between the two scatterers, $d$, is not contained in the
formulas in Ref.\ \cite{ref:EntScatCostaPRL}, while it is the important
parameter for the resonance. Reference \cite{ref:EntScatPalma} briefly
discusses the entanglement generation in the same system as that discussed in Sec.\ \ref{sec:Model2} of
this article, but the parameter $kd$ (which is $kx_0$ in Ref.\
\cite{ref:EntScatPalma}) is fixed at $kd=\nu\pi\,(\nu=1,2,\ldots)$, and the
effect of the resonant scattering on the entanglement generation is not fully
clarified.
\bibitem{ref:Concurrence}
W.~K. Wootters, Phys. Rev. Lett. \textbf{80}, 2245 (1998).
end{thebibliography}
end{document} |
bin{document}
bin{titlepage}
\def\thepage {}
bin{flushleft}
\large Andrew V. Goldberg\footnote{A.V. Goldberg: InterTrust
Technologies Corp., 4750 Patrick Henry Drive, Santa Clara, CA 95054
email: [email protected]. Part of this research was done while this author
was at NEC Research Institute, Inc., Princeton, NJ.}
and Alexander V. Karzanov\footnote{A.V. Karzanov: Corresponding author.
Institute for System Analysis of the Russian Academy of Sciences, 9,
Prospect 60 Let Oktyabrya, 117312 Moscow, Russia, email:
[email protected]. This author was supported in part by a grant from
the Russian Foundation of Basic Research.}
\end{flushleft}
bin{flushleft}
\bf\Large Maximum skew-symmetric flows and matchings
\end{flushleft}
bin{flushleft}
December 2003
\end{flushleft}
\noindent
{\bf Abstract.}
The maximum integer skew-symmetric flow problem (MSFP)
generalizes both the maximum flow and maximum matching
problems. It was introduced by Tutte~\cite{tut-67} in terms of
self-conjugate flows in antisymmetrical digraphs. He showed that for
these objects there are natural analogs of classical theoretical
results on usual network flows, such as the flow decomposition,
augmenting path, and max-flow min-cut theorems. We give unified and
shorter proofs for those theoretical results.
We then extend to MSFP the shortest
augmenting path method of Edmonds and Karp~\cite{EK-72} and the
blocking flow method of Dinits~\cite{din-70}, obtaining algorithms
with similar time bounds in general case. Moreover, in the cases of
unit arc capacities and unit ``node capacities'' the blocking
skew-symmetric flow algorithm has time bounds similar to those
established in~\cite{ET-75,kar-73-2} for Dinits' algorithm.
In particular, this implies an algorithm for finding a
maximum matching in a nonbipartite graph in $O(\sqrt{n}m)$ time,
which matches the time bound for the algorithm of Micali and
Vazirani~\cite{MV-80}.
Finally, extending a clique compression technique of Feder and
Motwani~\cite{FM-91} to particular skew-symmetric graphs, we
speed up the implied maximum matching algorithm to run
in $O(\sqrt{n}m\log(n^2/m)/\log{n})$ time, improving the
best known bound for dense nonbipartite graphs.
Also other theoretical and algorithmic results on skew-symmetric
flows and their applications are presented.
{\bf Key words.} skew-symmetric graph -- network flow - matching
-- b-matching
{\it Mathematics Subject Classification (1991):}
90C27, 90B10, 90C10, 05C85
\end{titlepage}
\baselineskip 15pt
\section{\Large Introduction}
By a {\em skew-symmetric flow} we mean a flow in a
skew-symmetric directed graph which takes equal values on any pair of
``skew-symmetric'' arcs. This is a synonym of Tutte's
{\em self-conjugate flow in an antisymmetrical digraph}~\cite{tut-67}.
This paper is devoted to the maximum integer skew-symmetric flow
problem, or, briefly, the {\em maximum IS-flow problem}. We study
combinatorial properties of this problem and develop fast algorithms
for it.
A well-known fact~\cite{FF-62} is that the bipartite matching problem
can be viewed as a special case of the maximum flow problem.
The combinatorial structure of nonbipartite matchings revealed by
Edmonds~\cite{edm-65} involves blossoms and is
more complicated than the structure of flows.
This phenomenon explains, to some extent, why
general matching algorithms are typically more intricate relative
to flow algorithms. The maximum IS-flow problem is a generalization
of both the maximum flow and maximum matching (or b-matching) problems.
Moreover,
this generalization appears to be well-grounded for two reasons.
First, the basic combinatorial and linear programming theorems for
usual flows have natural counterparts for IS-flows. Second, when
solving problems on IS-flows, one can use intuition, ideas and
technical tools well-understood for usual flows, so that the implied
algorithms for matchings become more comprehensible.
As the maximum flow problem is related to certain path
problems, the maximum IS-flow problem is related to certain problems on
so-called {\em regular paths} in skew-symmetric graphs.
We use some theoretical and algorithmic results on the {\em regular
reachability} and {\em shortest regular path} problems
from~\cite{GK-96}.
Tutte~\cite{tut-67} originated a mini-theory of IS-flows (in our terms)
to bridge theoretical results on matchings and their
generalizations (b-factors, b-matchings, degree-constrained subgraphs,
Gallai's track packings, and etc.) and results on usual flows. This
theory parallels Ford and Fulkerson's flow theory~\cite{FF-62} and
includes as basic results the decomposition, augmenting path, and
max-flow min-cut theorems. Subsequently, some of those results were
re-examined in different, but equivalent, terms by other authors, e.g.,
in~\cite{blu-90,GK-95,KS-93}.
Recall that the flow decomposition theorem says that a flow can be
decomposed into a collection of source-to-sink paths and cycles. The
augmenting path theorem says that a flow is maximum if and only if it
admits no augmenting path. The max-flow min-cut theorem says that the
maximum flow value is equal to the minimum cut capacity. Their
skew-symmetric analogs are, respectively, that an IS-flow can be
decomposed into a collection of pairs of symmetric source-to-sink paths
and pairs of symmetric cycles, that an IS-flow is maximum if and only
if it admits no regular augmenting path, and that the maximum IS-flow
value is equal to the minimum {\em odd-barrier} capacity. We give
unified and shorter proofs for these skew-symmetric flow theorems.
There is a relationship between skew-symmetric flows and {\em
bidirected flows} introduced by Edmonds and Johnson~\cite{EJ-70}
in their combinatorial study of a natural class of integer linear
programs generalizing usual flow and matching problems. In
particular, they established a linear programming description for
integer bidirected flows. We finish the theoretical part by showing how
to obtain a linear programming description for maximum IS-flows
directly, using the max-IS-flow min-barrier theorem.
The second, larger, part of this paper is devoted to efficient
methods to solve the maximum IS-flow problem (briefly, {\em MSFP}) in
general and special cases, based on the theoretical ground given in the
first part. First of all we explain how to adapt the idea of
Anstee's elegant methods~\cite{ans-85,ans-87} for b-matchings in which
standard flow algorithms are used to construct an optimal half-integer
solution and then, after rounding, the ``good pre-solution''
is transformed into an optimal b-matching by solving $O(n)$
certain path problems.
We devise an $O(M(n,m)+nm)$-time algorithm for MSFP in a similar
fashion, using a regular reachability algorithm with linear
complexity to improve a good pre-solution.
Hereinafter $n$ and $m$ denote the numbers of nodes and arcs of the
input graph, respectively, and $M(n,m)$ is the time needed to find an
integer maximum flow in a usual network with $n$ nodes and $m$ arcs.
Without loss of generality, we assume $n=O(m)$.
The next approach is the core of this paper. The purpose is to
extend to MSFP the well-known shortest augmenting path algorithm of
Edmonds and Karp~\cite{EK-72} with complexity $O(nm^2)$, and its
improved version, the blocking flow algorithm of
Dinits~\cite{din-70} with complexity $O(n^2m)$, so as to preserve the
complexity bounds. Recall that the blocking flow algorithm consists
of $O(n)$ {\em phases}, each finding a blocking flow in a
layered network representing the union of currently shortest
augmenting paths. We introduce concepts of
shortest blocking and totally blocking IS-flows and show that an
optimal solution to MSFP is also constructed in $O(n)$ phases,
each finding a shortest blocking IS-flow in the residual skew-symmetric
network. In its turn a phase is reduced to finding a totally blocking
IS-flow in an {\em acyclic} (though not necessarily layered)
skew-symmetric network.
The crucial point is to perform the latter task in time comparable with
the phase time in Dinits' algorithm (which is $O(nm)$ in general case).
We reduce it to a certain auxiliary problem in a usual acyclic
digraph. A fast algorithm for this problem provides the desired time
bound for a phase.
The complexity of our blocking IS-flow algorithm
remains comparable with that of Dinits' algorithm in important
special cases where both the number of phases and the phase time
significantly decrease. More precisely, Dinits' algorithm applied
to the maximum matching problem in a bipartite graph runs in
$O(\sqrt{n}m)$ time~\cite{HK-73,kar-73}. Extending that result,
it was shown in~\cite{ET-75,kar-73-2} that for arbitrary
nonnegative integer capacities, Dinits' algorithm has
$O({\rm min}\{n,\sqrt{\Delta}\})$ phases and each phase runs in
$O({\rm min}\{nm,m+\Delta\})$ time, where $\Delta$ is the sum of transit
capacities of inner nodes. Here the transit capacity of a node
(briefly, the {\em node capacity}) is the maximum flow value that can
be pushed through this node. We show that both bounds remain valid for
the blocking IS-flow algorithm.
When the network has unit arc capacities (resp. unit inner node
capacities), the number of phases turns into $O(\sqrt{m})$ (resp.
$O(\sqrt{n})$); in both cases the phase time turns into $O(m)$.
The crucial auxiliary problem (that we are able to solve in linear
time for unit arc capacities) becomes the following {\em maximal
balanced path-set problem}:
bin{itemize}
\item[{\em MBP}:] {\sl Given an acyclic digraph in which one sink and
an even set of sources partitioned into pairs are distinguished,
find an (inclusion-wise) maximal set of pairwise arc-disjoint paths
from sources to the sink such that for each pair $\{z,z'\}$ of sources
in the partition, the number of paths from $z$ is equal to that from
$z'$.}
\end{itemize}
As a consequence, the implied algorithm solves the maximum matching
problem in a general graph in the same time, $O(\sqrt{n}m)$, as
the algorithm of Micali and Vazirani~\cite{MV-80,vaz-90}
(cf.~\cite{blu-90,GT-91}) and solves the b-factor or maximum
degree-constrained subgraph problem in $O(m^{3/2})$ time, similarly
to Gabow~\cite{gab-83}. The logical structure of our algorithm differs
from that of~\cite{MV-80} and sophisticated data structures
(incremental trees for set union~\cite{GT-85}) are used
only in the regular reachability and shortest regular path algorithms
of linear complexity from~\cite{GK-96} (applied as black-box
subroutines) and once in the algorithm for MBP.
Finally, we show that a clique compression technique of Feder and
Motwani~\cite{FM-91} can be extended to certain
skew-symmetric graphs. As a result, our maximum matching algorithm
in a general graph is speeded up to run in
$O(\sqrt{n}m\log(n^2/m)/\log{n})$ time. This matches the best bound for
bipartite matching~\cite{FM-91}.
Fremuth-Paeger and Jungnickel~\cite{FJ-99} developed an algorithm for
MSFP (stated in terms of ``balanced flows'') which combines Dinits'
approach with ideas and tools from~\cite{MV-80,vaz-90}; it runs in
$O(nm^2)$ time for general capacities and in time slightly slower than
$O(\sqrt{n}m)$ in the nonbipartite matching case.
This paper is organized as follows. Basic definitions and facts are
given in Section~\ref{sec:back}. Sections~\ref{sec:theo}
and~\ref{sec:lin} contain theoretical results on
combinatorial and linear programming aspects of IS-flows,
respectively. Section~\ref{sec:gisa} describes Anstee's type algorithm
for MSFP. The shortest regular augmenting path algorithm and a high
level description of the blocking IS-flow method are developed in
Section~\ref{sec:sbf}. Section~\ref{sec:shortp} gives a short review
on facts and algorithms in~\cite{GK-96} for regular path problems.
Using it, Section~\ref{sec:iter} explains the idea of implementation of
a phase in the blocking IS-flow method. It also bounds the number of
phases for special skew-symmetric networks. Section~\ref{sec:acyc}
completes the description of the blocking IS-flow algorithm
by reducing the problem of finding a totally blocking IS-flow in an
acyclic skew-symmetric network to the above-mentioned auxiliary problem
in a usual acyclic digraph and devising a fast procedure to solve the
later. The concluding Section~\ref{sec:mat} discusses implications for
matchings and their generalizations, and explains how to speed up
the implied maximum matching algorithm by use of the clique
compression.
This paper is self-contained up to several quotations from~\cite{GK-96}.
Main results presented in this paper were announced in extended
abstract~\cite{GK-95}. Subsequently, the authors found a flaw in the
original fast implementation of a phase in the blocking IS-flow method.
It was corrected in a revised version of this paper (circulated in 2001)
where problem MBP and its weighted analog were introduced and
efficiently solved,
whereas the original version (identical to preprint~\cite{GK-99})
embraced only the content of Sections~\ref{sec:theo}--\ref{sec:iter}.
\section{\Large Preliminaries}\label{sec:back}
By a {\em skew-symmetric graph} we mean a digraph $G = (V,E)$ with
a mapping (involution) $\sigma$ of $V\cup E$ onto itself
such that:
(i) for each $x\in V\cup E$, $\sigma(x)\ne x$ and $\sigma(\sigma(x))=x$;
(ii) for each $v\in V$, $\sigma(v)\in V$;
and (iii) for each $a=(v,w)\in E$, $\sigma(a)=(\sigma(w),\sigma(v))$.
Although parallel arcs are allowed in $G$,
an arc leaving a node $x$ and entering a node $y$ is denoted by $(x,y)$
when it is not confusing.
We assume that $\sigma$ is fixed (when there are several such mappings)
and explicitly included in the description of $G$.
The node (arc) $\sigma(x)$ is called {\em symmetric} to a node (arc) $x$
(using, for brevity, the term {\em symmetric\/} rather than
skew-symmetric).
Symmetric objects are also called {\em mates}, and we usually use
notation with
primes for mates: $x'$ denotes the mate $\sigma(x)$ of an element
$x$. Note that $G$ can contain an arc $a$ from a node $v$ to its mate
$v'$; then $a'$ is also an arc from $v$ to $v'$.
Unless mentioned otherwise, when talking about paths (cycles), we mean
directed paths (cycles). The symmetry $\sigma$ is extended in a natural
way to paths, subgraphs, and other objects in $G$; e.g., two paths
(cycles) are symmetric if the elements of one of them are symmetric to
those of the other and go in the reverse order. Note that $G$ cannot
contain self-symmetric paths or cycles. Indeed, if
$P=(x_0,a_1,x_1,\ldots,a_k,x_k)$ is such a path (cycle), choose arcs
$a_i$ and $a_j$ such that $i\le j$, $a_j=\sigma(a_i)$ and $j-i$ is
minimum. Then $j>i+1$ (as $j=i$ would imply $\sigma(a_i)=a_i$ and
$j=i+1$ would imply $\sigma(x_i)=x_{j-1}=x_i$). Now
$\sigma(a_{i+1})=a_{j-1}$ contradicts the minimality of $j-i$.
We call a function $h$ on $E$ {\em symmetric\/} if
$h(a) = h(a')$ for all $a \in E$.
A {\em skew-symmetric network\/} is a quadruple $N=(G,\sigma,u,s)$
consisting of a skew-symmetric graph $G=(V,E)$ with symmetry $\sigma$,
a nonnegative integer-valued symmetric function $u$ (of {\em arc
capacities}) on $E$, and a {\em source} $s\in V$.
The mate $s'$ of $s$ is the {\em sink} of $N$. A {\em flow} in $N$
is a function $f: E \to{\mathbb R}_+$ satisfying the capacity constraints
$$
f(a) \leq u(a) \qquad \mbox{for all}\;\; a \in E
$$
and the conservation constraints
$$
\mbox{div}_f(x):=\sum_{(x,y) \in E} f(x,y) - \sum_{(y,x) \in E} f(y,x) =0
\qquad\mbox{for all}\;\; x \in V - \{s,s'\} .
$$
The value $\mbox{div}_f(s)$ is called the {\em value} of $f$ and denoted by
$|f|$; we usually assume that $|f|\ge 0$. Now {\em IS-flow} abbreviates
{\em integer symmetric flow}, the main object that we study in this
paper. The {\em maximum IS-flow problem (MSFP)} is to find
an IS-flow of maximum value in $N$.
The integrality requirement is important: if we do not require
$f$ to be integral, then for any integer flow $f$ in $N$, the flow $f'$,
defined by $f'(a):=(f(a)+f(a'))/2$ for $a\in E$, is a flow of the same
value as $f$, which is symmetric but not necessarily integral.
Therefore, the {\em fractional} skew-symmetric flow problem is
equivalent to the ordinary flow problem.
Note that, given a digraph $D=(V(D),A(D))$ with two specified nodes
$p$ and $q$ and nonnegative integer capacities of the arcs, we can
construct a skew-symmetric graph $G$ by taking a disjoint copy $D'$
of $D$ with all arcs reversed, adding two extra nodes $s$ and $s'$,
and adding four arcs $(s,p),(s,q'),(q,s'),(p',s')$ of infinite
capacity, where $p',q'$ are the copies of $p,q$ in $D'$,
respectively. Then there is a natural one-to-one correspondence
between integer flows from $p$ to $q$ in $D$
and the IS-flows from $s$
to $s'$ in $G$. This shows that MSFP generalizes the classical
(integer) max-flow problem.
\noindent
{\bf Remark.} Sometimes it is useful to consider a sharper version of
MSFP in which double-sided capacity constraints $\ell(a)\le f(a)\le
u(a)$, $a\in E$, are imposed, where $\ell,u:E\to{\mathbb Z}_+$ and $\ell
\le u$ ({\em problem DMSFP}). Similarly to the max-flow problem
with upper and lower capacities~\cite{FF-62}, DMSFP is reduced
to MSFP
in the skew-symmetric network $N'$ obtained from $N$ by subdividing
each arc $a=(x,y)$ into three arcs $(x,v),(v,w),(w,y)$
with (upper) capacities $u(a),u(a)-\ell(a),u(a)$, respectively, and
adding extra arcs $(s,w)$ and $(v,s')$ with capacity $\ell(a)$ each.
It is not difficult to show (e.g., using Theorem~\ref{tm:m-m})
that DMSFP has a solution if and only if all extra arcs are saturated
by a maximum IS-flow $f'$ for $N'$, and in this case $f'$ induces a
maximum IS-flow for $N$ in a natural way. For details,
see~\cite{FJ-99}.
In our study of IS-flows we rely on results for regular paths
in skew-symmetric graphs. A {\em regular path}, or an {\em r-path},
is a path in $G$ that does not contain a pair of symmetric arcs.
Similarly, an {\em r-cycle} is a cycle that does not contain a pair
of symmetric arcs. The {\em r-reachability problem (RP)\/} is to find
an r-path from $s$ to $s'$ or a proof that there is none.
Given a symmetric function of
{\em arc lengths}, the {\em shortest r-path problem (SRP)\/} is to find
a minimum length r-path from $s$ to $s'$ or a proof that there is none.
A criterion for the existence of a regular $s$ to $s'$ path is less
trivial than that for the usual path reachability; it involves
so-called barriers. We say that
$$
{\cal B}=(A; X_1, \ldots, X_k)
$$
is an {\em $s$-barrier} if the following conditions hold.
bin{enumerate}
\item[(B1)] $A, X_1, \ldots, X_k$ are pairwise disjoint subsets
of $V$, and $s \in A$.
\item[(B2)] For $A' = \sigma(A)$, $A \cap A' = \emptyset$.
\item[(B3)] For $i = 1, \ldots, k$, $X_i$ is self-symmetric, i.e.,
$\sigma(X_i) = X_i$.
\item[(B4)] For $i = 1, \ldots, k$, there is a unique arc, $e^i$,
from $A$ to $X_i$.
\item[(B5)] For $i,j = 1,\ldots, k$ and $i \not = j$, no arc
connects $X_i$ and $X_ j$.
\item[(B6)] For $M := V - (A\cup A' \cup X_1 \cup \ldots \cup X_k)$
and $i = 1, \ldots, k$, no arc connects $X_i$ and $M$.
\item[(B7)] No arc goes from $A$ to $A' \cup M$.
\end{enumerate}
(Note that arcs from $A'$ to $A$, from $X_i$ to $A$, and from $M$ to $A$
are possible.) Figure~\ref{fig:bar} illustrates the definition.
Tutte proved the following (see also~\cite{blu-90,GK-96}).
bin{figure}[tb]
\unitlength=1mm
bin{center}
bin{picture}(120,80)
\put(50,5){\circle{2}}
\put(52,6){$s$}
\put(50,75){\circle{2}}
\put(52,72){$s'$}
\put(30,40){\circle*{1.5}}
\put(40,40){\circle*{1.5}}
\put(50,40){\circle*{1.5}}
\put(0,20){\line(5,-2){50}}
\put(100,20){\line(-5,-2){50}}
\put(0,60){\line(5,2){50}}
\put(100,60){\line(-5,2){50}}
\put(0,20){\line(1,0){100}}
\put(0,60){\line(1,0){100}}
\put(37,8){$A$}
\put(37,69){$A'$}
\put(15,40){\oval(15,25)}
\put(65,40){\oval(15,25)}
\put(90,30){\line(1,0){25}}
\put(90,50){\line(1,0){25}}
\put(90,30){\line(0,1){20}}
\put(115,30){\line(0,1){20}}
\put(13,39){$X_1$}
\put(63,39){$X_k$}
\put(101,39){$M$}
\put(10,34){\vector(1,0){10}}
\put(20,46){\vector(-1,0){10}}
\put(62,34){\vector(1,0){8}}
\put(70,46){\vector(-1,0){8}}
\put(95,35){\vector(0,1){10}}
\put(110,45){\vector(0,-1){10}}
\put(18,32){\vector(2,-3){10}}
\put(28,63){\vector(-2,-3){10}}
\put(60,35){\vector(-1,-2){10}}
\put(50,65){\vector(1,-2){10}}
\put(100,35){\vector(-3,-4){14}}
\put(86,63){\vector(3,-4){14}}
\put(35,65){\vector(0,-1){50}}
\put(45,65){\vector(0,-1){50}}
\put(11,22){$e^1$}
\put(67,22){$e^k$}
\thicklines
\put(15,17){\vector(0,1){13.5}}
\put(15,50){\vector(0,1){13.5}}
\put(65,17){\vector(0,1){13}}
\put(65,50){\vector(0,1){13}}
\end{picture}
\end{center}
\caption{ A barrier}
\label{fig:bar}
\end{figure}
bin{theorem}\label{tm:bar}{\rm \cite{tut-67}}
There is an r-path from $s$ to $s'$ if and only if there is no s-barrier.
\end{theorem}
This criterion will be used in Section~\ref{sec:theo}
to obtain an analog of the max-flow min-cut theorem for IS-flows.
RP is efficiently solvable.
bin{theorem} \label{tm:ratime} {\rm \cite{blu-90,GK-96}}
The r-reachability problem in $G$ can be solved in $O(m)$ time.
\end{theorem}
The methods for the maximum IS-flow problem that we develop apply,
as a subroutine, the r-reachability algorithm of linear
complexity from~\cite{GK-96}, which finds either a regular $s$ to $s'$
path or an $s$-barrier. Another ingredient used in our methods is the
shortest r-path algorithm for the case of nonnegative symmetric
lengths, which runs in $O(m\,\log n)$ time in general, and in $O(m)$
time for all-unit lengths~\cite{GK-96}. The necessary results on RP and
SRP are outlined in Section~\ref{sec:shortp}.
In the rest of this paper, $\sigma$ and $s$
will denote the symmetry map and the source, respectively,
regardless of the network in question, which will allow us to use
the shorter notation $(G,u)$ for a network $(G,\sigma,u,s)$.
Given a simple path $P$, the number of arcs on $P$ is denoted by
$|P|$ and the incidence vector of its arc set in ${\mathbb R}^E$
is denoted by $\chi^P$, i.e., $\chi^P(a)=1$ if $a$ is an arc of
$P$, and 0 otherwise.
\subsection{Relationships to Matchings}
\label{ssec:rel_mat}
Given an undirected graph $G' = (V',E')$, a {\em matching} is a subset
$M\subseteq E'$ such that no two edges of $M$ have a common endnode.
The {\em maximum matching problem} is to find a matching $M$
whose cardinality $|M|$ is as large as possible.
There are well-known generalizations of matchings;
for a survey see~\cite{law-76,LP-86,sch-03}. Let
$u_0,u:E'\to{\mathbb Z}_+\cup\{\infty\}$ and $b_0,b:V'\to{\mathbb Z}_+$ be
functions such that $b_0\le b$ and $u_0\le u$.
A {\em $(u_0,u)$-capacitated $(b_0,b)$-matching} is a function
$h: E'\to{\mathbb Z}_+$ satisfying the capacity constraints
$$
u_0(e)\le h(e)\le u(e) \qquad \mbox{for all}\;\; e \in E',
$$
and the supply constraints
$$
b_0(v)\le \sum_{e=\{v,w\}\in E'} h(e) \le b(v) \qquad
\mbox{for all}\;\; v \in V'.
$$
The {\em value}\/ of $h$ is defined to be $h(E')$. Hereinafter,
for a numerical function $g$ on a set $S$ and a subset $S'\subseteq
S$, $g(S')$ denotes $\sum_{e\in S'} g(e)$.
Popular special cases are:
a {\em u-capacitated b-matching} (when $b_0=0$);
a {\em degree-constrained subgraph} (when $u\equiv 1$);
a {\em perfect b-matching} (when $u\equiv \infty$ and $b_0=b$);
a {\em b-factor} (when $u\equiv 1$ and $b_0=b$). In these cases one
assigns $u_0=0$.
Typically, in unweighted versions, one is asked for maximizing the
value of $h$ (in the former two cases) or for finding a feasible $h$
(in the latter two cases).
The general maximum $(u_0,u)$-capacitated $(b_0,b)$-matching problem
is reduced to the maximum IS-flow problem (MSFP or DMSFP, depending on
whether both $u_0,b_0$ are zero functions or not) without increasing
the problem size by more than a constant factor. The construction of
the corresponding capacitated skew-symmetric graph $G=(V,E)$
is straightforward (and close to that in~\cite{tut-67}):
(i) for each $v\in V'$, $V$ contains two symmetric nodes
$v_1$ and $v_2$;
(ii) also $V$ contains two additional symmetric nodes
$s$ and $s'$ (the source and the sink);
(iii) for each $e=\{v,w\} \in E'$, $E$ contains two symmetric arcs
$(v_1,w_2)$ and $(w_1, v_2)$ with lower capacity $u_0(e)$ and upper
capacity $u(e)$;
(iv) for each $v\in V'$, $E$ contains two symmetric arcs ($s,v_1)$
and $(v_2,s')$ with lower capacity $b_0(v)$ and upper capacity $b(v)$.
There is a natural one-to-one correspondence between the
$(u_0,u)$-capacitated $(b_0,b)$-matchings $h$ in $G'$ and the IS-flows
$f$ from $s$ to $s'$ in $G$, and the value of $f$ is twice the value
of $h$. Figure~\ref{fig:red} illustrates the correspondence for
matchings.
bin{figure}[tb]
\unitlength=1mm
bin{center}
bin{picture}(150,75)
\put(15,15){\circle*{1.5}}
\put(11,14){$a$}
\put(15,30){\circle*{1.5}}
\put(11,29){$b$}
\put(30,45){\circle*{1.5}}
\put(32,44){$c$}
\put(15,60){\circle*{1.5}}
\put(11,59){$d$}
\put(15,75){\circle*{1.5}}
\put(11,74){$e$}
\put(70,45){\circle*{1.5}}
\put(66,44){$s$}
\put(140,45){\circle*{1.5}}
\put(142,44){$s'$}
\put(90,15){\circle*{1.5}}
\put(85,12){$a_1$}
\put(90,30){\circle*{1.5}}
\put(85,27){$b_1$}
\put(90,45){\circle*{1.5}}
\put(86,41){$c_1$}
\put(90,60){\circle*{1.5}}
\put(85,61){$d_1$}
\put(90,75){\circle*{1.5}}
\put(85,75){$e_1$}
\put(120,15){\circle*{1.5}}
\put(122,12){$a_2$}
\put(120,30){\circle*{1.5}}
\put(122,27){$b_2$}
\put(120,45){\circle*{1.5}}
\put(121,41){$c_2$}
\put(120,60){\circle*{1.5}}
\put(122,61){$d_2$}
\put(120,75){\circle*{1.5}}
\put(122,75){$e_2$}
\put(15,30){\line(0,1){30}}
\put(15,60){\line(0,1){15}}
\put(15,30){\line(1,1){15}}
\put(70,45){\line(2,3){20}}
\put(140,45){\line(-2,3){20}}
\put(90,30){\line(2,1){30}}
\put(120,30){\line(-2,1){30}}
\put(90,30){\line(1,1){30}}
\put(120,30){\line(-1,1){30}}
\put(90,60){\line(2,1){30}}
\put(120,60){\line(-2,1){30}}
{
\thicklines
\put(15,15){\line(0,1){15}}
\put(30,45){\line(-1,1){15}}
\put(70,45){\vector(2,-3){19}}
\put(70,45){\vector(4,-3){19}}
\put(70,45){\vector(1,0){19}}
\put(70,45){\vector(4,3){19}}
\put(90,15){\vector(2,1){29}}
\put(90,30){\vector(2,-1){29}}
\put(90,45){\vector(2,1){29}}
\put(90,60){\vector(2,-1){29}}
\put(120,15){\vector(2,3){18.5}}
\put(120,30){\vector(4,3){18.5}}
\put(120,45){\vector(1,0){18.5}}
\put(120,60){\vector(4,-3){18.5}}
}
\put(7,2){Matching}
\put(87,2){Skew-symmetric flow}
\end{picture}
\end{center}
\caption{{\sl Reduction example for maximum cardinality
matching}}
\label{fig:red}
\end{figure}
In case of the b-factor or degree-constrained subgraph problem,
one may assume
that $b$ does not exceed the node degree function of $G$. Therefore,
one can make a further reduction to MSFP in a network with $O(|E'|)$
nodes, $O(|E'|)$ arcs, and unit arc capacities (by getting rid of
lower capacities as in the Remark above and then splitting each arc
$a$ with capacity $q(a)>1$ into $q(a)$ parallel arcs of capacity one).
In Section~\ref{sec:mat} we compare the time bounds of our methods for
MSFP applied to the matching problem and its generalizations above with
known bounds for these problems.
Edmonds and Johnson~\cite{EJ-70} studied the class of integer linear
programs in which the constraint matrix entries are integers
between --2 and +2 and the sum of absolute values of
entries in each column (without including entries from the box
constraints) does not exceed two. Such a problem is often stated in
terms of bidirected graphs (for a survey,
see~\cite[Chapter 36]{sch-03}). Recall that a {\em bidirected graph}
$H=(X,B)$ may contain, besides usual directed edges going from one
node to another, edges directed {\em from} both of its endnodes, and
{\em to} both of them. A particular problem on such an object is the
{\em maximum bidirected flow problem}: given a capacity
function $c:B\to {\mathbb Z}_+$ and a {\em terminal} $p\in X$,
find a function ({\em biflow}) $g:B\to{\mathbb Z}_+$
maximizing the value $\mbox{div}_g(p)$.
(Reasonable versions with more terminals are reduced to this one.)
Here $g\le c$ and $\mbox{div}_g(x)=0$ for all $x\in X-\{p\}$,
where $\mbox{div}_g(x)$ is the total biflow on the edges directed
from $x$ minus the total biflow on the edges directed to $x$
(a loop $e$ at $x$ contributes 0, $2g(e)$ or $-2g(e)$).
The maximum IS-flow problem admits a linear time and space reduction
to the maximum biflow problem (in fact, both are equivalent).
More precisely, given an
instance $N=(G=(V,E),\sigma,u,s)$ of MSFP, take a partition $(X,X')$
of $V$ such that $X'=\sigma(X)$ and $s\in X$. For each pair $\{a,a'\}$
of symmetric arcs in $E$ and nodes $x,y\in X$, assign an edge from $x
$ to $y$ if $a$ or $a'$ goes from $x$ to $y$; an edge from both $x,y$
if $a$ or $a'$ goes from $x$ to $\sigma(y)$; an edge to both $x,y$ if
$a$ or $a'$ goes from $\sigma(x)$ to $y$. This produces a bidirected
graph $H=(X,B)$. We set $p:=s$ and assign the capacity $c(e)$ of
each edge $e\in B$ to be the capacity of the arc from which
$e$ is created. There is a one-to-one correspondence between
the IS-flows in $N$ and the
biflows in $(H,c,p)$, and the values of corresponding flows are equal.
A reverse reduction is also obvious. Using these reductions, one can
try to derive results for IS-flows from corresponding results on
biflows, and vice versa.
In this paper we give direct proofs and algorithms for IS-flows.
\section{\Large Mini-Theory of Skew-Symmetric Flows}\label{sec:theo}
This section extends the classical flow decomposition, augmenting
path, and max-flow min-cut theorems of Ford and Fulkerson \cite{FF-62}
to the skew-symmetric case.
The {\em support} $\{e\in S: f(e)\ne 0\}$ of a function $f:S\to{\mathbb R}$
is denoted by $\mbox{supp}(f)$.
Let $h$ be a nonnegative integer symmetric function on the arcs of
a skew-symmetric graph $G=(V,E)$.
A path (cycle) $P$ in $G$ is called $h$-{\em regular} if
$h(a)>0$ for all arcs $a$ of $P$ and each arc $a\in P$ such that $a'\in
P$ satisfies $h(a)\ge 2$. Clearly when $h$ is all-unit on $E$, the sets
of regular and $h$-regular paths (cycles) are the same. We call an arc
$a$ of $P$ {\em ordinary} if $a'\not\in P$ and define the $h$-{\em
capacity} $\delta_h(P)$ of $P$ to be the minimum of all values $h(a)$ for
ordinary arcs $a$ on $P$ and all values $\lfloor h(a)/2\rfloor$ for
nonordinary arcs $a$ on $P$.
To state the symmetric flow decomposition theorem, consider an IS-flow
$f$ in a skew-symmetric network $N=(G=(V,E),u)$. An IS-flow $g$ in $N$
is called {\em elementary} if it is representable as $g=\delta\chi^P
+\delta\chi^{P'}$, where $P$ is a simple cycle or a simple path from $s$
to $s'$ or a simple path from $s'$ to $s$, $P'=\sigma(P)$, and
$\delta$ is a {\em positive integer}. Since $g$ is feasible, $P$ is
$u$-regular and $\delta\le\delta_u(P)$. We denote $g$ by
$(P,P',\delta)$. By a {\em symmetric decomposition} of $f$ we mean a set
$D$ of elementary flows such that $f=\sum(g : g\in D)$.
The following {\em symmetric decomposition theorem}
(see~\cite{FJ-99,GK-95}) slightly
generalizes a result by Tutte~\cite{tut-67} that there exists a
symmetric set of $|f|$ paths from $s$ to $s'$ such that any arc $a$
is contained in at most $f(a)$ paths.
bin{theorem}\label{tm:dec} For an IS-flow $f$ in $G$, there exists a
symmetric decomposition consisting of at most $m$ elementary flows.
\end{theorem}
bin{proof} We build up an
$f$-regular path $\Gamma$ in $G$ until this path contains a simple
cycle $P$ or a simple path $P$ connecting $s$ and $s'$. This will
determine a member of the desired flow decomposition. Then we
accordingly decrease $f$ and repeat the process for the resulting
IS-flow $f'$, and so on until we obtain the zero flow.
We start with $\Gamma$ formed by a single arc $a\in\mbox{supp}(f)$. First we
grow $\Gamma$ forward. Let $b=(v,w)$ be the last arc on the current
(simple) path $\Gamma$.
Suppose that $w \not = s,s'$. By the conservation for $f$ at $w$,
$\mbox{supp}(f)$ must contain an arc $q=(w,z)$. If $q'$ is not on $\Gamma$ or
$f(q)\ge 2$, we add $q$ to $\Gamma$.
Suppose $q'$ is on $\Gamma$ and $f(q)=1$. Let $\Gamma_1$ be the part
of $\Gamma$ between $w'$ and $w$. Then $\Gamma_1$ contains at
least one arc since $w \ne w'$. Suppose there is an arc $\widetilde q\in
\mbox{supp}(f)$ leaving $w$ and different from $q$. Then we can add $\widetilde
q$ to $\Gamma$ instead of $q$, forming a longer $f$-regular path.
(Note that since the path $\Gamma$ is simple, ${\widetilde q}'$ is not
on $\Gamma$). Now
suppose that such a $\widetilde q$ does not exist. Then exactly one unit of
the flow $f$ leaves $w$. Hence, exactly one unit of the flow $f$ enters
$w$, implying that $b=(v,w)$ is the only arc entering $w$ in
$\mbox{supp}(f)$, and that $f(b)=1$. But $\sigma(d)$ also enters $w$, where
$d$ is the first arc on $\Gamma_1$. The fact that $\sigma(d)\ne b$
(since $\Gamma_1$ is $f$-regular) leads to a contradiction.
Let $(w,z)$ be the arc added to $\Gamma$. If $z$ is not on $\Gamma$,
then $\Gamma$ is a simple $f$-regular path, and we continue growing
$\Gamma$. If $z$ is on $\Gamma$, we discover a simple $f$-regular cycle
$P$.
If $\Gamma$ reaches $s'$ or $s$, we start growing $\Gamma$ backward
from the initial arc $a$ in a way similar to growing it forward. We stop
when an $f$-regular cycle $P$ is found or one of $s$, $s'$ is reached.
In the latter case $P = \Gamma$ is either an $f$-regular path from $s$
to $s'$ or from $s'$ to $s$, or an $f$-regular cycle (containing $s$ or
$s'$).
Form the elementary flow $g=(P,P',\delta)$ with $\delta=\delta_f(P)$ and
reduce $f$ to $f':=f-\delta\chi^P-\delta\chi^{P'}$. Since $P$ is
$f$-regular, $\delta>0$. Moreover, there is a pair $e,e'$ of symmetric
arcs of $P$ such that either $f'(e)=f'(e')=0$ or $f'(e)=f'(e')=1$; we
associate such a pair with $g$. In the former case $e,e'$ vanish in the
support of the new IS-flow $f'$, while in the latter case $e,e'$ can be
used in further iterations of the decomposition process at most once.
Therefore, each pair of arc mates of $G$ is associated with at most two
members of the constructed decomposition $D$, yielding $|D|\le m$.
\end{proof}
The above proof gives a polynomial time algorithm for symmetric
decomposition. Moreover, the above decomposition process can be easily
implemented in $O(nm)$ time, which matches the complexity of standard
decomposition algorithms for usual flows.
The decomposition theorem and the fact that the network has no
self-symmetric cycles imply the following useful property noticed by
Tutte as well.
bin{corollary}\label{cor:even} {\rm \cite{tut-67}}
For any self-symmetric set $S\subseteq V$
and any IS-flow in $G$, the total flow on the arcs entering $S$, as well
as the total flow on the arcs leaving $S$, is even.
\end{corollary}
{\bf Remark.\ }
Another consequence of Theorem~\ref{tm:dec} is that one may assume
that $G$ has no arc entering $s$. Indeed, consider a maximum IS-flow
$f$ in $G$ and a symmetric decomposition $D$ of $f$. Putting together
the elementary flows from $s$ to $s'$ in $D$, we obtain an IS-flow $f'$
in $G$ with $|f'| \geq |f|$, so $f'$ is a maximum flow. Since $f'$ uses
no arc entering $s$ or leaving $s'$, deletion of all such arcs from $G$
produces an equivalent problem in a skew-symmetric graph.
Next we state a skew-symmetric version of the augmenting path theorem.
It is convenient to consider the graph $G^+=(V,E^+)$ formed by adding a
reverse arc $(y,x)$ to each arc $(x,y)$ of $G$. For $a \in E^+$, $a^R$
denotes the corresponding reverse arc. The symmetry $\sigma$ is extended
to $E^+$ in a natural way. Given a (nonnegative integer) symmetric
capacity function $u$ on $E$ and an IS-flow $f$ on $G$,
define the {\em residual capacity}
$u_f(a)$ of an arc $a \in E^+$ to be $u(a)-f(a)$ if $a \in E$, and
$f(a^R)$ otherwise. An arc $a \in E^+$ is called {\em residual} if
$u_f(a) > 0$, and {\em saturated} otherwise. Given an IS-flow $g$ in the
network $(G^+,u_f)$, we define the function $f \oplus g$ on $E$ by
setting $(f \oplus g)(a) := f(a) + g(a) - g(a^R)$. Clearly $f \oplus g$
is a feasible IS-flow in $(G,u)$ whose value is $|f|+|g|$.
By an {\em r-augmenting path} for $f$ we mean a $u_f$-regular path from
$s$ to $s'$ in $G^+$. If $P$ is an r-augmenting path and if $\delta \in
{\mathbb N}$ does not exceed the $u_f$-capacity of $P$, then we can push
$\delta$ units of flow through a (not necessarily directed) path in $G$
corresponding to $P$ and then $\delta$ units through the path
corresponding to $P'$. Formally, $f$ is transformed into $f \oplus g$,
where $g$ is the elementary flow $(P, P', \delta)$ in $(G^+,u_f)$. Such
an augmentation increases the value of $f$ by $2\delta$.
bin{theorem}\label{tm:aug} {\rm \cite{tut-67}}
An IS-flow $f$ is maximum if and only if there is no r-augmenting path.
\end{theorem}
bin{proof}
The direction that the existence of an r-augmenting path implies
that $f$ is not maximum is obvious in light of the above discussion.
To see the other direction, suppose that $f$ is not maximum, and let
$f^*$ be a maximum IS-flow in $G$. For $a \in E$ define $g(a) := f^*(a) -
f(a)$ and $g(a^R) := 0$ if $f^*(a) \geq f(a)$, while $g(a^R) := f(a) -
f^*(a)$ and $g(a) := 0$ if $f^*(a) < f(a)$. One can see that $g$ is a
feasible symmetric flow in $(G^+,u_f)$. Take a symmetric decomposition
$D$ of $g$. Since $|g| = |f^*| - |f| > 0$, $D$ has a
member $(P,P',\delta)$, where $P$ is a $u_f$-regular path from $s$ to
$s'$. Then $P$ is an r-augmenting path for $f$.
\end{proof}
In what follows we will use a simple construction which enables us
to reduce the task of finding an r-augmenting path to the r-reachability
problem. For a skew-symmetric network $(H,h)$, split each arc $a =
(x,y)$ of $H$ into two parallel arcs $a_1$ and $a_2$ from $x$ to $y$
(the {\em first\/} and {\em second split-arcs\/} generated by $a$).
These arcs are endowed with the capacities $[h](a_1) := \lceil h(a)/2
\rceil$ and $[h](a_2) := \lfloor h(a)/2 \rfloor$. Then delete all arcs
with zero capacity $[h]$. The resulting capacitated graph is called the {\em
split-graph} for $(H,h)$ and denoted by $S(H,h)$. The symmetry $\sigma$
is extended to the arcs of $S(H,h)$ in a natural way, by defining
$\sigma(a_i) := (\sigma(a))_i$ for $i=1,2$.
For a path $P$ in $S(H,h)$, its image in $H$ is denoted by $\omega(P)$
(i.e., $\omega(P)$ is obtained by replacing each arc $a_i$ of $P$ by the
original arc $a=:\omega(a_i)$). It is easy to see that if $P$
is regular, then $\omega(P)$ is $h$-regular. Conversely, for any
$h$-regular path $Q$ in $H$, there is a (possibly not unique)
r-path $P$ in $S(H,h)$ such that $\omega(P)=Q$. Indeed, replace
each ordinary arc $a$ of $Q$ by the first split-arc $a_1$ (existing
as $h(a)\ge 1$) and replace each pair $a,a'$ of arc mates in $Q$ by
$a_i,a'_j$ for $\{i,j\}=\{1,2\}$ (taking into account that
$h(a)=h(a')\ge 2$). This gives the required r-path $P$. Thus,
Theorem~\ref{tm:aug} admits the following reformulation in terms
of split-graphs.
bin{corollary}\label{tm:spaug} An IS-flow $f$ in $(G,u)$ is maximum
if and only if there is no regular
path from $s$ to $s'$ in $S(G^+,u_f)$.
\end{corollary}
Finally, the classic max-flow min-cut theorem states that the maximum
flow value is equal to the minimum cut capacity. A skew-symmetric
version of this theorem involves a more complicated object which is
close to an $s$-barrier occurring in the solvability criterion for the
r-reachability problem given in Theorem~\ref{tm:bar}. We say that
${\cal B} = (A; X_1, \ldots, X_k) $ is an {\em odd $s$-barrier} for
$(G,u)$ if the following conditions hold.
bin{enumerate}
\item[(O1)] $A, X_1, \ldots, X_k$ are pairwise disjoint subsets
of $V$, and $s \in A$.
\item[(O2)] For $A' = \sigma(A)$, $A \cap A' = \emptyset$.
\item[(O3)] For $i = 1, \ldots, k$, $X_i$ is self-symmetric, {\it
i.e.,} $\sigma(X_i) = X_i$.
\item[(O4)] For $i = 1, \ldots, k$, the total capacity $u(A, X_i)$
of the arcs from $A$ to $X_i$ is odd.
\item[(O5)] For $i,j = 1, \ldots, k$ and $i \not = j$, no
positive capacity arc connects $X_i$ and $X_j$.
\item[(O6)] For $M := V - (A\cup A' \cup X_1 \cup \ldots \cup X_k)
$ and $ i = 1, \ldots, k$, no positive capacity arc connects $X_i$
and $M$.
\end{enumerate}
Compare with (B1)--(B7) in Section~\ref{sec:back}.
Define the {\em capacity} $u({\cal B})$ of ${\cal B}$ to be
$u(A, V-A) - k$. Since the source is denoted by $s$ throughout, we
refer to an odd $s$-barrier as {\em odd barrier}.
The following is the {\em maximum IS-flow minimum barrier theorem}.
bin{theorem}\label{tm:m-m} {\rm \cite{tut-67}}
The maximum IS-flow value is equal to the minimum odd barrier capacity.
\end{theorem}
bin{proof}
To see that the capacity of an odd barrier ${\cal B}=(A;X_1,\ldots,
X_k)$ is an upper bound on the value of an IS-flow $f$, consider a
symmetric decomposition $D$ of $f$. For each member $g=(P,P',\delta)$ of
$D$, where $P$ is a path from $s$ to $s'$, take the {\em last} arc
$a=(x,y)$ of the {\em first} path $P$ such that $x\in A$. If $y\in
A'$, then the symmetric arc $a'$ (which is in $P'$) also goes from $A$
to $A'$ (by (O2)), and therefore, $g$ uses at least $2\delta$ units of the
capacity of arcs from $A$ to $A'$. Associate $g$ with the pair $a,a'$.
Now let $y\not\in A'$. Since $y\not\in A$, $y$ is either in $Y:=M$ or
in $Y:=X_i$ for some $i$. The choice of $a$ and (O1),(O5),(O6) imply
that $P$ leaves $Y$ by an arc $b$ from $Y$ to $A'$.
Then the symmetric arc $b'$ (which is in $P'$) goes from $A$ to $Y$
(since $Y$ is self-symmetric),
and therefore, $g$ uses at least $2\delta$ units of the capacity
$u(A,Y)$. Associate $g$ with the pair $a,b'$ (possibly $a=b'$).
Note that at least one unit of each capacity $u(A,X_i)$ is not used
under the canonical way we associate the elementary $s$ to $s'$ flows
of $D$ with arcs from $A$ to $V-A$ (since $u(A,X_i)$ is odd, by (O4)).
By these reasonings, $|f|\le u({\cal B})$.
Next we show that the two values in the theorem are equal. Let $f$ be a
maximum IS-flow. By Corollary~\ref{tm:spaug}, the split-graph
$S=S(G^+,u_f)$ contains no $s$ to $s'$ r-path, so it must contain an
$s$-barrier ${\cal B} = (A; X_1, \ldots, X_k)$, by Theorem~\ref{tm:bar}.
Let $e^i$ be the (unique) arc from $A$ to $X_i$ in $S$ (see (B4) in
Section 2). By the construction of $S$, it follows that the residual
capacity $u_f$ of every arc from $A$ to $X_i$ in $G^+$ is zero except
for the arc $\omega(e^i)$, whose residual capacity is one.
Hence,
bin{itemize}
\item[(i)] if $e^i$ was formed by splitting an arc $a\in E$, then
$a$ goes from $A$ to $X_i$, and $f(a)=u(a)-1$;
\item[(ii)] if $e^i$ was formed by splitting $a^R$ for $a\in E$,
then $a$ goes from $X_i$ to $A$, and $f(a)=1$;
\item[(iii)] all arcs from $A$ to $X_i$ in $G$, except $a$ in case (i),
are saturated by $f$;
\item[(iv)] all arcs from $X_i$ to $A$ in $G$, except $a$ in case (ii),
are free of flow.
\end{itemize}
Furthermore, comparing arcs in $S$ and $G$, we observe that:
bin{itemize}
\item[(v)] property (B7) implies that the arcs from $A$
to $A'\cup M$ are saturated and the arcs from $A'\cup M$ to $A$ are free
of flow;
\item[(vi)] property (B5) implies (O5) and (B6) implies (O6).
\end{itemize}
Properties (i)--(iv),(O5),(O6) together with Corollary~\ref{cor:even}
provide (O4). So ${\cal B}$ is an odd $s$-barrier in $G$. We have
$|f|=f(A,V-A)-f(V-A,A)=u(A,V-A)-k$ (in view of (i)--(v)). Hence,
$|f|=u({\cal B})$.
\end{proof}
\section{\Large Integer and Linear Programming
Formulations}\label{sec:lin}
Although methods of solving MSFP developed in subsequent sections will
not use explicitly linear programming aspects exhibited in this
section, such aspects help to understand more about the structure of
IS-flows.
MSFP is stated as an integer program in a straightforward way.
We use function rather than vector notation. For functions $g,h$ on a
set $S$, $g\cdot h$ denotes the inner product $\sum_{x\in S}g(x)h(x)$.
Assuming that no arc of $G$ enters the source $s$ (see the Remark in
the previous section), MSFP can be written as follows:
bin{eqnarray}
\mbox{\bf maximize}\;\; |f| = \sum\nolimits_{(s,v) \in E} f(s,v)
& & \mbox{\bf subject to} \label{eq:1} \\
f(a) \geq 0 & & \forall a \in E \label{eq:2} \\
f(a) \leq u(a) & & \forall a \in E \label{eq:3} \\
-\sum\nolimits_{(u,v) \in E} f(u,v)
+ \sum\nolimits_{(v,w) \in E} f(v,w) = 0
& & \forall v \in V - \{s,s'\} \label{eq:4} \\
f(a) - f(\sigma(a)) = 0 & & \forall a \in E \label{eq:5} \\
f(a) \;\;\;\mbox{integer} & & \forall a \in E \label{eq:6}
\end{eqnarray}
A linear programming formulation for MSFP is obtained by
replacing the integrality condition (\ref{eq:6}) by linear
constraints related to certain objects that we call odd fragments
in $G$. The correctness of the resulting
linear program will be shown by use of the max-min relation
between IS-flow and odd barriers in Theorem~\ref{tm:m-m}.
Alternatively, one can try to derive it from a linear programming
characterization of integer bidirected flows in~\cite{EJ-70} (using the
reduction as in Section~\ref{sec:back}).
An {\em odd fragment} is a pair $\rho = (V_\rho, U_\rho)$, where
$V_\rho$ is a {\em self-symmetric} set of nodes with $s\not\in V_\rho$,
and $U_\rho$ is a subset of arcs entering $V_\rho$ such that the total
capacity $u(U_\rho)$ is odd. The {\em characteristic function}
$\chi_\rho$ of $\rho$ is the function on $E$ defined by
bin{equation}\label{eq:ch_of}
\chi_\rho (a) := \left\{
bin{array}{rl}
1 & \mbox{if} \;\; a \in U_\rho \cup \sigma(U_\rho),\\
-1 & \mbox{if} \;\; a \in \delta(V_\rho) - (U_\rho \cup
\sigma(U_\rho)),\\
0 & \mbox{otherwise}.
\end{array}
\right.
\end{equation}
Here $\delta(V_\rho)$ is the set of arcs with one end in $V_\rho$ and
the other in $V - V_\rho$. We denote the set of odd fragments by $\Omega$.
Let $f$ be a (feasible) IS-flow and $\rho\in\Omega$. By~(\ref{eq:ch_of})
and the symmetry of $u$, we have $f\cdot\chi_\rho\le
u(U_\rho)+u(\sigma(U_\rho))=2u(U_\rho)$.
Moreover, $f\cdot\chi_\rho$ is at most $2u(U_\rho)-2$;
this immediately
follows from Corollary~\ref{cor:even} and the fact
that $u(U_\rho)$ is odd. This gives new linear constraints
for MSFP:
bin{equation}
f\cdot \chi_\rho\le 2u(U_\rho)-2\quad \mbox{for each}\;\; \rho\in\Omega.
\label{eq:8}
\end{equation}
Addition of these constraints enables us to drop off the symmetry
constraints (\ref{eq:5}) and the integrality constraints (\ref{eq:6})
without changing the optimum value of the linear program. This fact is
implied by the following theorem.
bin{theorem}\label{tm:opt}
Every maximum IS-flow is an optimal solution to the linear program
{\rm (\ref{eq:1})--(\ref{eq:4}), (\ref{eq:8})}.
\end{theorem}
bin{proof}
Assign a dual variable $\pi(v)\in{\mathbb R}$ (a {\it potential}) to each node
$v\in V$, $\gamma(a)\in{\mathbb R}_+$ (a {\it length}) to each arc $a\in E$,
and $\xi(\rho)\in{\mathbb R}_+$ to each odd fragment $\rho\in\Omega$.
Consider the linear program:
bin{eqnarray}
\mbox{\bf minimize}\;\; \psi(\pi,\gamma,\xi):=\sum_E u(a)\gamma(a)+
\sum_{\Omega}(2u(U_\rho)-2)\xi(\rho)
& & \mbox{\bf subject to} \label{eq:9} \\
\gamma(a)\ge 0 & & \forall a\in E \label{eq:10} \\
\xi(\rho)\ge 0 & & \forall \rho\in\Omega \label{eq:11} \\
\pi(s)=0 & & \label{eq:12} \\
\pi(s')=1 & & \label{eq:13} \\
\pi(v)-\pi(w)+\gamma(a)+\sum_{\Omega}\xi(\rho)\chi_\rho(a)
\ge 0 & & \forall a=(v,w)\in E .
\label{eq:14}
\end{eqnarray}
In fact, (\ref{eq:9})--(\ref{eq:14}) is dual to linear
program~(\ref{eq:1})--(\ref{eq:4}),(\ref{eq:8}). (To see this,
introduce an extra arc $(s',s)$, add the conservation
constraints for $s$ and $s'$, and replace the objective (\ref{eq:1})
by ${\rm max}\{f(s',s)\}$. The latter generates the dual constraint
$\pi(s')-\pi(s)\ge 1$. We can replace it by the equality or
impose~(\ref{eq:12})--(\ref{eq:13}).) Therefore,
bin{equation}
{\rm max}\;|f| ={\rm min}\; \psi(\pi,\gamma,\xi), \label{eq:15}
\end{equation}
where the maximum and minimum range over the corresponding feasible
solutions.
We assert that every maximum IS-flow $f$ achieves the maximum in
(\ref{eq:15}). To see this, choose an odd barrier $ {\cal B} = (A; X_1,
\ldots, X_k) $ of minimum capacity $u({\cal B})$. For $i=1,\ldots,k$,
let $U_i$ be the set of arcs from $A$ to $X_i$; then $\rho_i=(X_i,U_i)$
is an odd fragment for $G,u$. Define $\pi(v)$ to be 0 for $v\in A$, 1
for $v\in A'$, and $1/2$ otherwise. Define $\gamma(a)$ to be 1 for $a\in
(A,A')$, $1/2$ for $a\in (A,M)\cup (M,A')$, and 0 otherwise, where
$M=V-(A\cup A'\cup X_1\cup\ldots\cup X_k)$. Define
$\xi(\rho_i)=1/2$ for $i=1,\ldots,k$, and $\xi(\rho)=0$ for the other
odd fragments in $(G,u)$.
One can check that (\ref{eq:14}) holds for all arcs $a$ (e.g.,
both values $\pi(w)-\pi(v)$ and $\gamma(a)+
\sum_{\Omega}\xi(\rho)\chi_\rho(a)$ are equal to 1
for $a=(v,w)\in (A,A')$, and 1/2 for $a=(v,w)\in (A,L)\cup (L,A')$,
where $L:=V-(A\cup A')$). Thus $\pi,\gamma,\xi$ are feasible.
Using the fact that $u(A,M)=u(M,A')$, we observe that
$u\cdot\gamma=u(A,A')+u(A,M)$. Also
$$
\sum_{\Omega}(2u(U_\rho)-2)\xi(\rho)= \sum_{i=1}^k
\frac{1}{2}(2u(U_{i})-2)=\left(\sum_{i=1}^ku(A,X_k)\right)-k.
$$
This implies $\psi(\pi,\gamma,\xi)=u({\cal B})$, and now the result
follows from Theorem~\ref{tm:m-m}.
\end{proof}
\section{\Large Algorithm Using a Good Pre-Solution}
\label{sec:gisa}
Anstee~\cite{ans-85,ans-87} developed efficient methods for b-factor
and b-matching problems (unweighted or weighted) based on the idea
that a good pre-solution can easily be found by solving a
corresponding flow problem.
In this section we adapt his approach to solve
the maximum IS-flow problem in a skew-symmetric network $N=
(G=(V,E),u)$. The algorithm that we devise is relatively simple; it
finds a ``nearly optimal'' IS-flow and then makes $O(n)$ augmentations
to obtain a maximum IS-flow. The algorithm consists of four stages.
The {\em first\/} stage ignores the fact that $N$ is skew-symmetric
and finds an integer maximum flow $g$ in $N$ by use of a
max-flow algorithm. Then we set $h(a):=(g(a)+g(a'))/2$
for all arcs $a\in E$. Since $\mbox{div}_h(s)=\mbox{div}_g(s)/2-\mbox{div}_g(s')/2
=\mbox{div}_g(s)$, $h$ is a maximum flow as well. Also
$h$ is symmetric and {\em half-integer}.
Let $Z$ be the set of arcs on which $h$ is not integer. If
$Z=\emptyset$, then $h$ is already a maximum IS-flow; so assume
this is not the case.
The {\em second\/} stage applies simple transformations of $h$ to
reduce $Z$.
Let $H=(X,Z)$ be the subgraph of $G$ induced by $Z$. Obviously,
for each $x\in V$, $\mbox{div}_h(x)$ is an integer,
so $x$ is incident to an even number of
arcs in $Z$. Therefore, we can decompose $H$ into simple, not
necessarily directed, cycles $C_1,\ldots,C_r$ which are pairwise
arc-disjoint. Moreover, we can find, in linear time, a decomposition
in which each cycle $C_i$ is either self-symmetric ($C_i=\sigma(C_i)$)
or symmetric to another cycle $C_j$ ($C_i=\sigma(C_j)$).
To do this, we start with some node $v_0\in X$ and grow in $H$ a
simple (undirected) path $P=(v_0,e_1,v_1,\ldots,e_q,v_q)$ such that
the mate $v'_i$ of each node $v_i$ is not in $P$. At each step, we
choose in $H$ an arc $e\ne e_q$ incident to the last node $v_q$
($e$ exists since $H$ is eulerian); let $x$ be the other end node of
$e$. If none of $x,x'$ is in $P$, then we add $e$ to $P$. If some
of $x,x'$ is a node of $P$, $v_i$ say, then we shorten $P$ by
removing its end part from $e_{i+1}$ and delete from $H$ the arcs
$e_{i+1},\ldots,e_q,e$ and their mates. One can see that the arcs
deleted induce a self-symmetric cycle (when $x'=v_i$) or two
disjoint symmetric cycles (when $x=v_i$). We also remove the
isolated nodes created by the arc deletions and change the
initial node $v_0$ if needed.
Repeating the process for the new current graph $H$ and path $P$, we
eventually obtain the desired decomposition ${\cal C}$, in $O(|Z|)$
time.
Next we examine the cycles in ${\cal C}$. Each pair $C,C'$ of
symmetric cycles is canceled by sending a half unit of flow
through $C$ and through $C'$, i.e., we increase (resp. decrease)
$h(e)$ by 1/2 on each forward (resp.
backward) arc $e$ of these cycles. The resulting function $h$ is
symmetric, and $\mbox{div}_h(x)$ is preserved at each node $x$, whence $h$
is again a maximum symmetric flow. Now suppose that two
self-symmetric cycles $C$ and $D$ meet at a node $x$. Then they
meet at $x'$ as well. Concatenating the $x$ to $x'$ path in $C$ and
the $x'$ to $x$ path in $D$ and concatenating the rests of $C$ and $D
$, we obtain a pair of symmetric cycles and cancel these cycles as
above.
These cancellations result in ${\cal C}$ consisting of pairwise
{\em node-disjoint} self-symmetric cycles, say $C_1,\ldots,C_k$.
The second stage takes $O(m)$ time.
The {\it third\/} stage transforms $h$ into an IS-flow $f$ whose
value $|f|$ is at most $k$ units below $|h|$. For each
$i$, fix a node $t_i$ in $C_i$ and change $h$ on $C_i$ by sending a
half unit of flow through the $t_i$ to $t'_i$ path in $C_i$ and
through the reverse to the $t'_i$ to $t_i$ path in it. The resulting
function $h$ is integer and symmetric and the divergences preserve
at all nodes except for the nodes $t_i$ and $t'_i$
where we have $\mbox{div}_h(t_i)=-\mbox{div}_h(t'_i)=1$ for each $i$
(assuming, without loss of generality, that all $t_i$'s are
different from $s'$). Therefore, $h$ is, in essence, a
multiterminal IS-flow with sources $s,t_1,\ldots,t_k$ and
sinks $s',t'_1,\ldots,t'_k$. A genuine IS-flow $f$ from $s$
to $s'$ is extracted by reducing $h$ on some $h$-regular paths.
More precisely, we add to $G$ artificial arcs $e_i=(s,t_i)$, $i=1,
\ldots, k$ and their mates, extend $h$ by ones to these
arcs and construct a symmetric decomposition ${\cal D}$ (defined in
Section~ \ref{sec:theo}) for the obtained function $h'$ in the
resulting graph $G'$ (clearly $h'$ is an IS-flow of value
$|h|+k$).
Let ${\cal D}'$ be the set of elementary flows in ${\cal D}$ formed
by the paths or cycles which contain artificial arcs. Then
$\delta=1$ for each $(P,P',\delta)\in{\cal D}'$. Define $f':=h'-
\sum(\chi^P+\chi^{P'}: (P,P',1)\in{\cal D}')$. Then $f'$ is an
IS-flow in $G'$, and $|f'|\ge|h'|-2k\ge|h|-k$.
Moreover, since $f(e_i)=0$ for
$i=1,\ldots,k$, the restriction $f$ of $f'$ to $E$ is an IS-flow in
$G$, and $|f|=|f'|$. Thus, $|f|\ge|h|-k$, and now the facts that
$k\le n/2$ (as the nodes $t_1,\ldots,t_k,t'_1,\ldots,t'_k$ are
different) and that $h$ is a maximum flow in $N$ imply that the
value of $f$ differs from the maximum IS-flow value by $O(n)$.
The third stage takes $O(nm)$ time (the time
needed to construct a symmetric decomposition of $h'$).
The final, {\em fourth}, stage transforms $f$ into a maximum
IS-flow. Each iteration applies the r-reachability algorithm (RA)
mentioned in Section~\ref{sec:back} to the split-graph
$S(G^+,u_f)$ in order to find a $u_f$-regular $s$ to $s'$ path
$P$ in $G^+$ and then augment the current IS-flow $f$ by the
elementary flow
$(P,P',\delta_{u_f}(P))$ as explained in Section~\ref{sec:theo}.
Thus, a maximum IS-flow in $N$ is constructed in $O(n)$ iterations.
Since the RA runs in $O(m)$ time (by Theorem~\ref{tm:ratime}), the
fourth stage takes $O(nm)$ time.
Summing up the above arguments, we conclude with the following.
bin{theorem} \label{tm:gisa}
The above algorithm finds a maximum IS-flow in $N$ in
$O(M(n,m)+nm)$ time, where $M(n,m)$ is the running time of the
max-flow procedure it applies.
\end{theorem}
\section{\Large Shortest R-Augmenting Paths and Blocking IS-Flows}
\label{sec:sbf}
Theorem \ref{tm:aug} and Corollary \ref{tm:spaug} prompt an
alternative method
for finding a maximum IS-flow in a skew-symmetric network
$N=(G,u)$, which is analogous to the method of Ford and Fulkerson for
usual flows. It starts with the zero flow, and at each iteration, the
current IS-flow $f$ is augmented by an elementary flow in $(G^+,u_f)$
(found by applying the r-reachability algorithm to $S(G^+,u_f)$).
Since each iteration increases the value of $f$ by at least two,
a maximum IS-flow is constructed in pseudo-polynomial time. In
general, this method is not competitive to the method of
Section~\ref{sec:gisa}.
More efficient methods involve the concepts of
shortest r-augmenting paths and shortest blocking IS-flows that
we now introduce. Let $g$ be an IS-flow in a
skew-symmetric network $(H=(V,W),h)$. We call $g(W)$
($=\sum_{e\in W} g(e)$) the {\em volume\/} of $g$. Considering a
symmetric decomposition $D=\{(P_i,P'_i,\delta_i): i=1,\ldots,k)$ of
$g$, we have
$$
g(W)=\sum(\delta_i|P_i|+\delta_i|P'_i| : i=1,\ldots,k) \ge
|g|{\rm min}\{|P_i|: i=1,\ldots,k\}.
$$
This implies
bin{equation}
g(W)\ge |g|\mbox{\mbox{r-dist}}_{S(H,h)}(s,s'), \label{eq:5-1}
\end{equation}
where $\mbox{r-dist}_{H'}(x,y)$ denotes the minimum length of a regular
$x$ to $y$ path in a skew-symmetric graph $H'$ (the {\em regular
distance\/} from $x$ to $y$). We say that an IS-flow $g$ is
bin{itemize}
\item[(i)] {\em shortest\/} if (\ref{eq:5-1}) holds with equality,
i.e., some (equivalently, any) symmetric decomposition of $g$
consists of shortest $h$-regular paths from $s$ to $s'$;
\item[(ii)] {\em totally blocking\/} if there is no $(h-g)$-regular
path from $s$ to $s'$ in $H$, i.e., we cannot augment $g$ using only
residual capacities in $H$ itself;
\item[(iii)] {\em shortest blocking\/} if $g$ is shortest (as in (i))
and
bin{equation}
\mbox{r-dist}_{S(H,h-g)}(s,s') > \mbox{r-dist}_{S(H,h)}(s,s'). \label{eq:5-2}
\end{equation}
\end{itemize}
Note that a shortest blocking IS-flow is not necessarily totally
blocking, and vice versa.
Given a skew-symmetric network $N=(G,u)$, the {\em shortest
r-augmenting path method (SAPM)\/}, analogous to the method of
Edmonds and Karp~\cite{EK-72} for usual flows, starts with the zero
flow, and each iteration augments the current IS-flow $f$ by a
shortest elementary flow $g=(P,P',\delta_{u_f}(P))$.
The {\em shortest blocking IS-flow method (SBFM)\/}, analogous to
Dinits' method~\cite{din-70}, starts with the zero flow, and each
{\em phase} (big iteration) augments the current IS-flow
$f$ by performing the following two steps:
bin{itemize}
\item[(P1)] Find a shortest blocking IS-flow $g$ in $(G^+,u_f)$.
\item[(P2)] Update $f:=f\oplus g$.
\end{itemize}
Both methods terminate when $f$ no longer admits r-augmenting paths
(i.e., $g$ becomes the zero flow). The following observation is
crucial for our methods.
bin{lemma}\label{lm:incr}
Let $g$ be a shortest IS-flow in $(G^+,u_f)$, and let $f':=f\oplus g$.
Let $k$ and $k'$ be the minimum lengths of r-augmenting paths for $f$
and $f'$, respectively. Then $k'\ge k$. Moreover, if $g$ is a shortest
blocking IS-flow, then $k'>k$.
\end{lemma}
bin{proof}
Take a shortest $u_{f'}$-regular path $P$ from $s$ to $s'$ in $G^+$.
Then $|P|=k'$ and $g'=(P,P',1)$ is an elementary flow in $(G^+,u_{f'})$.
Note that $\mbox{supp}(g)$ does not contain opposed arcs $a=(x,y)$ and
$b=(y,x)$. Otherwise decreasing $g$ by one on each of $a,b,a',b'$
(which are, obviously, distinct), we would
obtain the IS-flow $\widetilde g$ in $(G^+,u_f)$ such that $|\widetilde g|=|g|$
and $\widetilde g(E^+)<g(E^+)$, which is impossible because $\widetilde
g(E^+)\ge k|\widetilde g|$ and $g(E^+)=k|g|$. This implies that each arc
$a$ in the set $Z:=\{a\in E^+: g(a^R)=0\}$ satisfies
bin{equation}
u_{f'}(a)=u_f(a)-g(a). \label{eq:5-3}
\end{equation}
If $\mbox{supp}(g')\subseteq Z$, then $g'$ is a feasible IS-flow in
$(G^+,u_f)$ (by~(\ref{eq:5-3})), whence $k'=g'(E^+)/|g'|\ge k$.
Moreover, if, in addition, $g$ is a shortest blocking IS-flow, then
(\ref{eq:5-2}) and the fact that $g'\le u_f-g$ (by (\ref{eq:5-3}))
imply $k'>k$.
Now suppose there is an arc $e\in E^+$ such that $g'(e)>0$ and
$g(e^R)>0$. For each $a\in E^+$, put $\lambda(a):={\rm max}\{0,g(a)+g'(a)
-g(a^R)-g'(a^R)\}$. One can check that $\lambda(a)\le u_f(a)$ for all
arcs $a$ and that $\mbox{div}_\lambda(v)=0$ for all nodes $v\ne s,s'$.
Therefore, $\lambda$ is an IS-flow in $(G^+,u_f)$ with
$|\lambda|=|g|+|g'|=|g|+2$. Also $\lambda(E^+)<g(E^+)+g'(E^+)$
since for the $e$ above, $\lambda(e)+\lambda(e^R)<g'(e)+g(e^R)$. We
have
$$
2k'=g'(E^+)>\lambda(E^+)-g(E^+)\ge k(|g|+2)-k|g|=2k,
$$
yielding $k'>k$.
\end{proof}
Thus, each iteration of SAPM does not decrease the minimum
length of an r-augmenting path, and each phase of
SBFM increases this length. This gives upper bounds on the
numbers of iterations.
bin{corollary}\label{cor:sapm}
SAPM terminates in at most $(n-1)m$ iterations.
\end{corollary}
(This follows by observing, in the proof of Lemma~\ref{lm:incr},
that on the iterations with the same length of shortest
r-augmenting paths, the subgraph of $G^+$ induced by the arcs
contained in such paths is monotone nonincreasing, and each
iteration reduces the capacity of some arc of this subgraph, as well
as the capacity of its mate, to zero or one.)
bin{corollary}\label{cor:n-1}
SBFM terminates in at most $n-1$ phases.
\end{corollary}
As mentioned above, SBFM can be considered as a skew-symmetric
analog of Dinits' blocking flow algorithm. Recall that each phase
of that algorithm constructs a blocking flow in the
subnetwork $H$ formed by the nodes and arcs of shortest augmenting
paths. Such a network is acyclic (moreover, layered), and a
blocking flow in $H$ is easily constructed in $O(nm)$ time.
The problem of finding a shortest blocking IS-flow ((P1) above) is more
complicated. Let $H$ be the subgraph of $G^+$ formed by the nodes and
arcs contained in shortest $u_f$-regular $s$ to $s'$ paths. Such an $H$
need not be acyclic (a counterexample is not difficult).
In Section~\ref{sec:iter} we will show
that problem (P1) can be reduced to a seemingly easier task, namely,
to finding a totally
blocking IS-flow in a certain acyclic network $(\overline H,\overline h)$.
Such a network arises when the shortest r-path algorithm
from~\cite{GK-96} is applied to the split-graph $S(G^+,u_f)$ with
unit arc lengths.
First, however, we need to tell more about the r-reachability and
shortest r-path algorithms from~\cite{GK-96}.
\section{\Large Properties of Regular and Shortest Regular Path
Algorithms}
\label{sec:shortp}
In this section we exhibit certain properties of the algorithms
in~\cite{GK-96}, referring the reader to that paper for details. We
also establish an additional fact (Lemma~\ref{lm:acyc}), which will be
used later.
\subsection{The Regular Reachability Algorithm (RA)}\label{sec:ra}
Let $\Gamma=(V,E)$ be a skew-symmetric graph with source $s$ and sink
$s'=\sigma(s)$ (as before, $\sigma$ is the symmetry map).
A {\em fragment} (or an $s$-{\em fragment}) in $\Gamma$ is a pair
$\phi=(V_\phi,e_\phi = (v,w))$, where $V_\phi $ is a self-symmetric
set of nodes of $\Gamma$ with $s\not\in V_\phi$ and $e_\phi$ is an arc
entering $V_\phi$, i.e., $v\not\in V_\phi\ni w$ (cf. the definition of
odd fragments in Section~\ref{sec:lin}).
We refer to $e_\phi$ and $e'_\phi$ as the {\em base} and
{\em anti-base} arcs of $\phi$, respectively. Let us
say that the fragment is {\em well-reachable} if
(i) for each node $x\in V_\phi$, there is an r-path from
$w$ to $x$ in the subgraph induced by $V_\phi$ (and therefore, an
r-path from $x$ to $w'= \sigma(w)$), and
(ii) there is an r-path from $s$ to $v$ disjoint from $V_\tau$.
The {\em trimming operation} applied to $\phi$ (which is analogous to
shrinking a blossom in matching algorithms) transforms $\Gamma$ by
removing the nodes of $V_\phi -\{w, w'\}$ and modifying the arcs as
follows.
bin{itemize}
\item[(T1)]
Each arc $a=(x,y) \in E$ such that either
$x,y \in V - V_\phi$ or $a = e_\phi$ or $a=e'_\phi$
remains an arc from $x$ to $y$.
\item[(T2)]
Each arc $(x,y) \in E-\{e'_\phi\}$ that leaves $V_\phi$ is replaced
by an arc from $w$ to $y$, and each arc $(x,y) \in E-\{e_\phi\}$ that
enters $V_\phi$ is replaced by an arc from $x$ to $w'$.
\item[(T3)]
Each arc with both ends in $V_\phi$ is replaced
by an arc from $w$ to $w'$.
\end{itemize}
(A variant of trimming deletes all arcs in (T3).) The image of an
arc $a$ in the new graph is denoted again by $a$ (so its end nodes can
be changed, but not its name). Figure~\ref{fig:trim} illustrates
fragment trimming. The new $\Gamma$ is again skew-symmetric.
bin{figure}[tb]
\unitlength=1mm
bin{center}
bin{picture}(150,70)
\multiput(0,0)(80,0){2}{
\put(15,15){\circle*{1.5}}
\put(13,17){$z$}
\put(35,10){\circle*{1.5}}
\put(37,9){$v$}
\put(35,25){\circle*{1.5}}
\put(37,22){$w$}
\put(5,35){\circle*{1.5}}
\put(4,31.5){$x$}
\put(65,35){\circle*{1.5}}
\put(64,31){$y'$}
\put(5,45){\circle*{1.5}}
\put(4,47){$y$}
\put(65,45){\circle*{1.5}}
\put(64,47){$x'$}
\put(35,55){\circle*{1.5}}
\put(30,56){$w'$}
\put(35,70){\circle*{1.5}}
\put(30.5,68){$v'$}
\put(55,65){\circle*{1.5}}
\put(56,62){$z'$}
{\thicklines
\put(35,10){\vector(0,1){14}}
\put(35,55){\vector(0,1){14}}
}
}
\put(25,35){\circle*{1.5}}
\put(27,36){$c$}
\put(45,30){\circle*{1.5}}
\put(42,31){$b'$}
\put(25,50){\circle*{1.5}}
\put(27,47){$b$}
\put(45,45){\circle*{1.5}}
\put(41,41){$c'$}
\put(33,38){$V_\phi$}
\put(35,40){\oval(30,40)}
\put(36.5,15){$e_\phi$}
\put(36.5,64){$e'_\phi$}
\put(15,15){\vector(2,1){18.5}}
\put(45,30){\vector(4,1){18.5}}
\put(25,35){\vector(-1,0){18.5}}
\put(5,45){\vector(4,1){18.5}}
\put(65,45){\vector(-1,0){18.5}}
\put(35,55){\vector(2,1){18.5}}
\put(35,25){\vector(-1,1){9.0}}
\put(45,45){\vector(-1,1){9.0}}
\put(95,15){\vector(1,2){19.5}}
\put(115,25){\vector(1,2){19.5}}
\put(115,25){\vector(-3,1){28.5}}
\put(115,25){\vector(3,1){28.5}}
\put(85,45){\vector(3,1){28.5}}
\put(145,45){\vector(-3,1){28.5}}
\put(113.5,30){\vector(0,1){20}}
\put(116.5,30){\vector(0,1){20}}
\put(20,0){Before trimming}
\put(100,0){After trimming}
\end{picture}
\end{center}
\caption{Fragment trimming example}
\label{fig:trim}
\end{figure}
The algorithm RA relies on the following property.
bin{statement}\label{st:sym} {\rm \cite{GK-96}}
If $\phi$ is a well-reachable fragment, then trimming $\phi$ preserves
the existence (or non-existence) of a regular path from $s$ to $s'$.
\end{statement}
RA searches for a regular $s$ to $s'$ path in $\Gamma$, starting with
the trivial path $P=s$. Each iteration
either increases the current r-path $P$, or reveals a well-reachable
fragment and trims it, producing the new current graph $\Gamma$ and
accordingly updating $P$. It terminates when either an $s$ to $s'$
r-path $\overline P$ or an $s$-barrier $\overline{\cal B}$ in the final graph
$\overline\Gamma$ is found (cf. Theorem~\ref{tm:bar}). The
postprocessing stage extends $\overline P$ into a regular $s$ to $s'$ path
$P$ of the initial $\Gamma$ (cf. Statement~\ref{st:sym}) in the
former case (the {\em path restoration procedure}) and extends
$\overline{\cal B}$ into a barrier ${\cal B}$ of the initial $\Gamma$ in the
latter case (the {\em barrier restoration procedure}).
The fragments of current graphs revealed by RA determine fragments of
the initial $\Gamma$ in a natural way; all fragments are well-reachable.
Moreover, the set $\Phi$ of these fragments of the initial $\Gamma$ is
{\em well-nested}. This means that
bin{itemize}
\item[(F1)] for distinct $\phi,\psi\in\Phi$, either
$V_\phi\subset V_\psi$ or $V_\psi\subset V_\phi$ or
$V_\phi\cap V_\psi=\emptyset$, and
\item[(F2)] for $\phi,\psi\in\Phi$, if $V_{\psi}\subset V_\phi
$ and $e_{\psi}\in\delta(V_\phi)$ then $e_{\psi}=e_\phi$, and
if $V_{\phi}\cap V_{\psi}=\emptyset$ and $e_{\psi}\in\delta(V_{\phi})$
then $e_{\phi}\not\in\delta(V_{\psi})$.
\end{itemize}
(Recall that for $X\subseteq V$, $\delta(X)$ is the set of arcs with
one end in $X$ and the other in $V-X$.) Let us say that a path $R$
in $\Gamma$ is {\em compatible with} $\Phi$ if for each $\phi\in\Phi$,
$p:=|R\cap\delta(V_\phi)|\le 2$, and if $p=2$ then $R$ contains
exactly one of $e_\phi,e'_\phi$. The following additional properties
(relying on (T1)--(T3)) are important:
bin{myitem}
any regular $s$ to $s'$ path $\overline P$ in the final graph $\overline\Gamma$
is extendable to a regular $s$ to $s'$ path $P$ compatible with $\Phi$
in the initial $\Gamma$, and the path restoration procedure applied to
$\overline P$ constructs such a $P$ in $O(|P|+d)$ time, where $d$ is the total
size of maximal fragments in $\Phi$ traversed by $P$;
\label{eq:P}
\end{myitem}
bin{myitem}
for each $\phi\in\Phi$ and each arc $a\ne e'_\phi$ leaving $V_\phi$,
there exists a compatible with $\Phi$ r-path $Q_\phi(a)$ with the first
arc $e_\phi$, the last arc $a$ and all intermediate nodes in $V_\phi$;
such a path can be constructed by (a phase of) the path
restoration procedure in $O(|V_\phi|)$ time.
\label{eq:Q}
\end{myitem}
A fast implementation of RA (supported by the disjoint set union data
structure of~\cite{GT-85}) runs in linear time, as indicated in
Theorem~\ref{tm:ratime}.
\subsection{The Shortest Regular Path Algorithm
(SRA)}\label{sec:srpa}
We now consider the shortest regular path problem (SRP) in a
skew-symmetric graph $\Gamma=(V,E)$ with {\em nonnegative
symmetric} lengths $\ell(e)$ of the arcs $e\in E$: find a minimum
length regular path from $s$ to $s'$. One may assume that $s'$ is
r-reachable from $s$. The dual problem involves above-mentioned
fragments. Define the characteristic function $\chi_\phi$ of a fragment
$\phi = (V_\phi, e_\phi)$ by
bin{equation}\label{eq:ch_f}
\chi_\phi(a):= \left\{
bin{array}{rl}
1 & \mbox{for $a=e_\phi, e'_\phi$}, \\
-1 & \mbox{for $a \in \delta(V_\phi) - \{e_\phi, e'_\phi\} $}, \\
0 & \mbox{for the remaining arcs of $\Gamma$}.
\end{array}
\right.
\end{equation}
(Compare with (\ref{eq:ch_of}).)
For a function $\pi: V\to{\mathbb R}$ (of node {\em potentials})
and a nonnegative function $\xi$ on a set $\Phi$ of fragments,
define the {\em reduced length} of an arc $e = (x,y)$ to be
$$
\ell^\xi_\pi (e): = \ell(e) + \pi(x) - \pi(y) +
\sum_{\phi \in \Phi} \xi(\phi)\chi_\phi(e) .
$$
An optimality criterion for SRP can be formulated as follows.
bin{theorem} {\rm \cite{GK-96}}\label{tm:sp}
A regular path $P$ from $s$ to $s'$ is a shortest r-path if and
only if there exist a potential $\pi: V\to{\mathbb R}$,
a set $\Phi$ of fragments, and a {\em positive} function $\xi$ on
$\Phi$ such that
bin{eqnarray}
\ell^\xi_\pi (e) \geq 0 & \mbox{for each} & e \in E; \label{eq:6-2} \\
\ell^\xi_\pi (e) = 0 & \mbox{for each} & e\in P; \label{eq:6-3}\\
\chi^P \cdot \chi_\phi = 0 & \mbox{for each} & \phi\in\Phi.
\label{eq:6-4}
\end{eqnarray}
\end{theorem}
The {\em shortest r-path algorithm (SRA)}
from \cite{GK-96} implicitly maintains $\pi,\Phi,\xi$ in the input
graph $\Gamma$ and iteratively modifies the graph by trimming certain
fragments.
Let $\Gamma^0$ be the subgraph of the current $\Gamma$ with the same
set of nodes and with the arcs having zero reduced length, called the
current {\em 0-subgraph} (recall that the arcs of the current graph
are identified with the corresponding arcs of the initial one). Each
iteration applies the above r-reachability algorithm RA to search for a
regular $s$ to $s'$ path in $\Gamma^0$. If such a path is found, the
algorithm terminates and outputs this path to a postprocessing stage.
If such a path does not exist, then, using the
barrier ${\cal B}=(A;X_1,\ldots,X_k)$ in $\Gamma^0$ constructed by RA,
the iteration trims the fragments determined by the sets $X_i$ and
updates $\pi,\Phi,\xi$, modifying $\Gamma^0$. The reduced lengths
of the arcs within the newly and previously extracted fragments, as
well as of their base and anti-base arcs, are not changed.
Let $\overline\Gamma$ and $\overline\Gamma^0$ denote the final graph $\Gamma$ and
the 0-subgraph in it, respectively, and $\overline P$ the regular $s$ to $s'$
path in $\overline\Gamma^0$ found by the algorithm.
Let $\Gamma^0$ stand for the 0-subgraph of the initial graph $\Gamma$
(concerning the reduced arc lengths determined by the resulting
$\pi,\Phi,\xi$). We call $\Gamma^0$ and $\overline\Gamma^0$ the {\em full}
and {\em trimmed 0-graphs}, respectively.
The postprocessing stage applies the path restoration
procedure of RA to extend $\overline P$ into a regular $s$ to $s'$ path $P$
in $\Gamma^0$, in time indicated in~(\ref{eq:P}).
It also explicitly constructs $\Gamma^0$ (in linear time).
Note that any $s$ to $s'$ r-path or r-cycle $Q$ in $\Gamma$ compatible
with $\Phi$ satisfies $\chi^Q\cdot\chi_\phi=0$ for each $\phi\in\Phi$.
By~(\ref{eq:P}), {\em any} $s$ to $s'$ r-path $\overline P$ in
$\overline\Gamma^0$ is extendable to an $s$ to $s'$ r-path $P$ in
$\Gamma^0$ compatible with $\Phi$. Therefore, $P$ is shortest, by
Theorem~\ref{tm:sp}.
bin{theorem} \label{tm:sharp} {\rm \cite{GK-96}}
For nonnegative symmetric arc lengths $\ell$,
SRA runs in $O(m \log n)$ time, and in $O(m \sqrt{\log L})$ time
when $\ell$ is integer-valued and $L$ is the maximum arc length.
Furthermore, the algorithm constructs (implicitly) $\pi,\Phi,\xi$ as
in Theorem~\ref{tm:sp}, where $\pi$ is {\em anti-symmetric} (i.e.,
$\pi(x)=-\pi(x')$ for all $x\in V$), and constructs (explicitly) the
trimmed 0-graph $\overline\Gamma^0$ and the full 0-graph $\Gamma^0$
such that:
bin{itemize}
\item[{\rm (A1)}] $\Phi$ is well-nested (obeys
{\rm (F1)--(F2)}) and consists of well-reachable fragments in
$\Gamma^0$; in particular, $\ell^\xi_\pi(e_\phi)=0$ for each $\phi\in\Phi$;
\item[{\rm (A2)}] $\Phi$ satisfies~(\ref{eq:P}) and~(\ref{eq:Q})
with $\Gamma^0,\overline\Gamma^0$ instead of $\Gamma,\overline\Gamma$;
in particular, any regular $s$ to $s'$ path of $\overline\Gamma^0$ is
(efficiently) extendable to a shortest regular $s$ to $s'$ path in
$(\Gamma,\ell)$.
\end{itemize}
\end{theorem}
(Note that the anti-symmetry of $\pi$ and the symmetry of $\ell$ and
$\chi_\phi$ for all $\phi\in\Phi$ imply that the reduced length
function $\ell^\xi_\pi$ is symmetric. Therefore, the graphs $\Gamma^0$ and
$\overline\Gamma^0$ are indeed skew-symmetric.)
Let $\Phi^{{\rm max}}$ denote the set of maximal fragments in $\Phi$.
The sets $V_\phi$ for $\phi\in\Phi^{{\rm max}}$
are pairwise disjoint (by (F1)), and the graph $\overline\Gamma^0$ can be
directly obtained from $\Gamma^0$ by simultaneously trimming the
fragments in $\Phi^{{\rm max}}$.
In the next section we will take advantage of the relationship
between r-paths in $\overline\Gamma^0$ and shortest r-paths in
$(\Gamma,\ell)$ indicated in (A2). Another important property of
$\overline\Gamma^0$ is as follows.
bin{lemma} \label{lm:acyc}
If the length $\ell(C)$ of every cycle $C$ in $\Gamma$ is
positive, then $\overline\Gamma^0$ is acyclic. In particular,
$\overline\Gamma^0$ is acyclic if all arc lengths are positive.
\end{lemma}
bin{proof}
Suppose $\overline\Gamma^0$ contains a (not necessarily regular) simple
cycle $\overline C$. In view of~(\ref{eq:Q}), $\overline C$ is extendable to a
cycle $C$ of $\Gamma^0$ compatible with $\Phi$. Then
$\chi^C\cdot\chi_\phi=0$ for all $\phi\in\Phi$. This implies that
the original length $\ell(C)$ and the reduced length $\ell^\xi_\pi(C)$
are the same (since the changes in $\ell^\xi_\pi$ due to $\pi$ cancel out
as we go around the cycle). Since all arcs of $C$ have zero reduced
length, $\ell(C)=\ell^\xi_\pi(C)=0$. This contradicts the hypotheses of the
lemma.
\end{proof}
\section{\Large Reduction to an Acyclic Network and Special Cases}
\label{sec:iter}
We continue the description of the shortest blocking IS-flow method
(SBFM) for solving the maximum IS-flow problem in a
network $N=(G=(V,E),u)$ begun in Section~\ref{sec:sbf}. Let $f$ be
a current IS-flow in $N$. We show that the task of finding a shortest
blocking IS-flow $g$ in $(G^+,u_f)$ (step (P1) of a phase
of SBFM) reduces to finding a totally blocking IS-flow
in an acyclic network.
Build the split-graph $\Gamma=S(G^+,u_f)$ and apply the above
shortest regular path algorithm to $\Gamma$ with the
{\em all-unit} length function $\ell$ on the arcs. It constructs
$\phi,\Phi,\xi$ as in Theorems~\ref{tm:sp} and \ref{tm:sharp},
taking $O(m)$ time (since $L=1$). SRA also constructs the trimmed
0-graph $\overline\Gamma^0$, the main object we will deal with.
By Lemma~\ref{lm:acyc}, $\overline\Gamma^0$ is
acyclic. Also the following property takes place.
bin{lemma}\label{lm:ub}
Let $a\in E^+$ be an arc with $u_f(a)>1$, and let $a_1,a_2$ be
the corresponding split-arcs in $\Gamma$. Then
$\ell^\xi_\pi(a_1)=\ell^\xi_\pi(a_2)$. Moreover, none of $a_1,a_2$ can be the
base or anti-base arc of any fragment in $\Phi$.
\end{lemma}
bin{proof}
Since $a_1,a_2$ are parallel arcs, for each $\phi\in\Phi$, $a_1$
enters (resp. leaves) $V_\phi$ if and only if $a_2$ enters (resp.
leaves) $V_\phi$. This implies that $\ell^\xi_\pi(a_1)\ne\ell^\xi_\pi(a_2)$ can
happen only if one of $a_1,a_2$ is the base or anti-base arc of
some fragment in $\Phi$. Suppose $a_1\in\{e_\phi,e'_\phi\}$ for
some $\phi\in\Phi$ (the case $a_2\in\{e_\phi,e'_\phi\}$ is similar).
Then $\ell^\xi_\pi(a_1)=0$ (by (A1) in Theorem~\ref{tm:sharp}). Using
property~(F2) from Section~\ref{sec:shortp} (valid as $\Phi$ is
well-nested), one can see that $a_2$ is not the base or anti-base
arc of any fragment in $\Phi$. Therefore,
$\chi_{\psi}(a_2)\le\chi_{\psi}(a_1)$ for all $\psi\in\Phi$,
yielding $\ell^\xi_\pi(a_2)\le\ell^\xi_\pi(a_1)$. Moreover, the latter
inequality is strict because $\chi_\phi(a_2)=-1<1=\chi_\phi(a_1)$
and $\xi(\phi)>0$. Now $\ell^\xi_\pi(a_1)=0$ implies $\ell^\xi_\pi(a_2)<0$,
contradicting (\ref{eq:6-2}).
\end{proof}
Let $E^0\subseteq E^+$ be the set of (images of) zero
reduced length arcs of $\Gamma$. Lemma~\ref{lm:ub} implies that
the base arc $e_\phi$ of each fragment $\phi\in\Phi$ in $\Gamma$
is generated by an arc $e\in E^0$ with $u_f(e)=1$. We can
identify these $e$ and $e_\phi$ and consider $\phi$ as a fragment
of $G^+$ as well. One can see that $\overline
\Gamma^0$ is precisely the split-graph for $(\overline H,\overline h)$, where
$\overline H=(\overline V,\overline E^0)$ is obtained from $H=(V,E^0)$ by trimming
the maximal fragments in $\Phi$, and $\overline h$ is the restriction of
$u_f$ to $\overline E^0$.
Based on the property of each fragment to have unit capacity of
the base arc, we reduce step (P1) to the desired problem, namely:
bin{itemize}
\item[(B)] {\em Find a totally blocking IS-flow in $(\overline H,\overline
h)$}.
\end{itemize}
To explain the reduction,
suppose we have found a solution $\overline g$ to (B).
For each maximal fragment $\phi$ in $\Phi$ with
$e_\phi\in\mbox{supp}(\overline g)$, we
have $\overline g(e_\phi)=1$; therefore, exactly one unit of flow goes
out of the head of $e_\phi$, through an arc $a\in \overline E^0$ say.
We choose the path $Q=Q_\phi(a)$ as in~(\ref{eq:Q}) to connect
$e_\phi$ and $a$ in (the subgraph on $V_\phi$ of) $H$ and then
push a unit of flow
through $Q$ and a unit of flow through the symmetric path $Q'$.
Doing so for all maximal fragments $\phi$, we extend $\overline g$ to
an IS-flow $g$ in $(H,h)$, where $h$ is the restriction of $u_f$ to
$E^0$. Moreover, $g$ is a shortest blocking IS-flow in $(G^+,u_f)$.
Indeed, the fact that the chosen paths $Q$ have zero reduced length and
are compatible with $\Phi$ implies that a symmetric decomposition of
$g$ consists of shortest $u_f$-regular paths (cf. (A2) in
Theorem~\ref{tm:sharp}); so $g$ is shortest. Also $G^+$ cannot contain
a $(u_f-g)$-regular $s$ to $s'$ path $R$ of length $g(E^+)/|g|$. For
such an $R$ would be a path in $H$ compatible with $\Phi$ (in view of
Theorem~\ref{tm:sp}); then the arcs of $R$ occurring in $\overline H$ should
form an $(\overline h-\overline g)$-regular $s$ to $s'$ path in it, contrary to
the fact that $\overline g$ is totally blocking.
Since each path $Q_\phi(a)$ is constructed in $O(|V_\phi|)$ time
(by~(\ref{eq:Q})), and the sets $V_\phi$ of maximal fragments $\phi$ are
pairwise disjoint, the reduction to (B) takes linear time.
bin{lemma}\label{lm:redu}
A totally blocking IS-flow in $(\overline H,\overline h)$ can be extended to
a shortest blocking IS-flow in $(G^+,u_f)$, in $O(m)$ time.
\ \vrule width.2cm height.2cm depth0cm
t
\end{lemma}
bin{corollary}\label{cor:cvb}
SBFM solves the maximum IS-flow problem in $O(qT(n,m)+qm)$ time,
where $q$ is the number of phases ($q\le n$) and $T(n,m)$
is the time needed to find a totally blocking IS-flow in an acyclic
network with at most $n$ nodes and $m$ arcs.
\end{corollary}
Clearly $T(n,m)$ is $O(m^2)$, as a totally blocking flow can be
constructed by $O(m)$ applications of the regular reachability
algorithm; this is slower compared with the phase time $O(nm)$ in
Dinits' algorithm. However, we shall show in the next section that
problem (B) can be solved in $O(nm)$ time as well. Moreover,
the bound will be better for important special cases.
Next we estimate the number of phases.
For the standard max-flow problem,
the number of phases of Dinits' algorithm becomes significantly less
than $n$ in the cases of unit arc capacities and unit ``node
capacities''. To combine these into one case, given a network
$N=(G=(V,E),u)$ with integer capacities $u$, for a node $x\in V$,
define the {\em transit capacity}\/ $u(x)$ to be the minimum of values
$\sum_{y:(x,y)\in E} u(x,y)$ and $\sum_{y:(y,x)\in E}u(y,x)$.
Define
$$
\Delta:=\Delta(N):=\sum(u(x) : x\in V-\{s,s'\}).
$$
As shown in \cite{ET-75,kar-73-2}), the number $q$ of
phases of the blocking flow method does not exceed $2\sqrt{\Delta}$.
In particular, if $u\equiv 1$ then $q=O(\sqrt{m})$, and if the transit
capacities $u(x)$ of all nodes $x\ne s,s'$ ({\em inner} nodes)
are ones, e.g., in the case arising from the bipartite matching
problem, then $q=O(\sqrt{n})$.
A similar argument works for skew-symmetric networks (see
also~\cite{FJ-99} for a special case).
bin{lemma}\label{lm:root}
The number of phases of SBFM is at most
${\rm min}\{n,2\sqrt{\Delta}\}$.
\end{lemma}
bin{proof}
After performing $d:=\sqrt{\Delta}$ phases, the r-distance
from $s$ to $s'$ in the network $N'=(G^+,u_f)$ for the current IS-flow
$f$ becomes greater than $d$, by Lemma~\ref{lm:incr}. Let $f^*$
be a maximum IS-flow in $N$, and let $g$ be defined as in the proof of
Theorem~\ref{tm:aug}. Then $g$ is a feasible IS-flow in $N'$ and
$|g|=|f^*|-|f|$. We assert that $|g|\le d$, which immediately implies
that the number of remaining phases is at most $d/2$, thus
proving the lemma. To see this, take a symmetric decomposition
${\cal D}$ of $g$ consisting of elementary flows $(P,P',\delta)$
with $\delta=1$. Let ${\cal D}'$ be the family of $s$ to $s'$ paths
$P,P'$ in ${\cal D}$; then $|{\cal D}'|\ge |g|$. It is easy to see that
$u_f(x)=u(x)$ for each inner node $x$. Each path
in ${\cal D}'$ contains at least $d$ inner nodes, and therefore, it
uses at least $d$ units of the total transit capacity of inner nodes of
$N'$. So we have $d|{\cal D}'|\le \Delta(N)$. This implies $|g|\le d$.
\end{proof}
\section{\Large Finding a Totally Blocking IS-Flow in an Acyclic
Network}
\label{sec:acyc}
Our aim is to show the following.
bin{theorem} \label{tm:ac-unit}
A totally blocking IS-flow in an acyclic skew-symmetric graph with
$O(n)$ nodes, $O(m)$ arcs, and unit arc capacities can be found in
$O(n+m)$ time.
\end{theorem}
This together with Corollary~\ref{cor:cvb} and Lemma~\ref{lm:root}
yields the following result for the shortest blocking IS-flow method.
bin{corollary} \label{cor:unit}
In case of a network $N$ with unit arc capacities, SBFM can be
implemented so that it finds a maximum IS-flow in
$O(m\sqrt{\Delta(N)})$ time (assuming $n=O(m)$). In
particular, if the indegree or outdegree of each node is at most one,
then the running time becomes $O(\sqrt{n}m)$.
\end{corollary}
In the second half of this section we will extend
Theorem~\ref{tm:ac-unit} to general capacities, in which case the phase
time will turn into $O(nm)$, similarly to Dinits' algorithm.
For convenience we keep the original notation for the network in
question. Let $G=(V,E)$ be a skew-symmetric {\em acyclic} graph with
source $s$ and the capacity $u(e)=1$ of each arc $e\in E$.
One may assume that each node belongs to a path from $s$ to
$\sigma(s)$.
First of all we make a reduction to the {\em maximal balanced path-set
problem (MBP)} stated in the Introduction. Since
$G$ is acyclic, one can assign, in linear time, a potential function
$\pi:V\to{\mathbb Z}$ which is {\em antisymmetric} ($\pi(x)=-\pi(\sigma
(x))$ for each $x\in V$) and {\em increasing} on the arcs
($\pi(y)>\pi(x)$ for each $(x,y)\in E$).
(Indeed, a function $q:V\to{\mathbb Z}$ increasing on the arcs is constructed,
in linear time, by use of the standard topological sorting.
Now set $\pi(v):=q(v)-q(\sigma(v))$, $v\in V$.)
Subdivide each arc $(x,y)$ with $\pi(x)<0$ and $\pi(y)>0$ into two arcs
$(x,z)$ and $(z,y)$ and assign zero potential to $z$.
The new graph $G$, with $O(m)$ nodes and $O(m)$ arcs, is again
skew-symmetric, and the problem remains essentially the same.
Let $\Gamma$ be the subgraph of the new $G$ induced by the nodes
with nonnegative potentials. Then $\Gamma\cup\sigma(\Gamma)=G$ and
$\Gamma\cap\sigma(\Gamma)=(Z,\emptyset)$, where $Z$ is the
self-symmetric set of zero potential nodes of $G$. Also $\Gamma$
contains $\sigma(s)$.
Clearly every $s$ to $\sigma(s)$ path $P$ of $G$ meets $Z$ at exactly one
node $z$, which subdivides $P$ into an $s$ to $z$ path $R'$ in
$\sigma(\Gamma)$ and a $z$ to $\sigma(s)$ path $Q$ in $\Gamma$.
Then $P$ is regular if and only if $\sigma(R')$ and
$Q$ are arc-disjoint. Conversely, let $Q,R$ be two arc-disjoint $Z$ to
$\sigma(s)$ paths in $\Gamma$ beginning at symmetric nodes in $Z$. Then
the concatenation $\sigma(Q)\cdot R$ (as well as $\sigma(R)\cdot Q$) is
a regular $s$ to $\sigma(s)$ path of $G$ if and only if $Q$ and $R$ are
arc-disjoint.
This shows that our particular totally blocking IS-flow problem is
reduced, in linear time, to MBP with $\Gamma,\sigma(s),Z$ (in fact, the
problems are equivalent).
Theorem~\ref{tm:ac-unit} is implied by the following.
bin{theorem} \label{tm:bbp}
MBP is solvable in linear time.
\end{theorem}
We devise an algorithm for MBP and prove Theorem~\ref{tm:bbp}.
Let the input of MBP consist of an acyclic graph
$\Gamma=(X,U)$, a sink $t$ and a source set $Z$ with a map
(involution) $\sigma:Z\to Z$ giving a partition of $Z$ into pairs.
We say that two arc-disjoint $Z$ to $t$ paths $Q,R$ beginning at
``symmetric'' sources $z,\sigma(z)$ form a {\em good pair}, and say
that a collection of pairwise arc-disjoint $Z$ to $s'$ paths in
$\Gamma$ is a {\em balanced path-set} if its members can be partitioned
into good pairs. So the task is to find a maximal (or ``blocking'')
balanced path-set.
Each iteration of the algorithm will reduce the
arc set of $\Gamma$ and, possibly, the set $Z$, and we sometimes will
use index $i$ for objects in the input of $i$-th iteration. So
$\Gamma_1=(X_1,U_1)$ is the initial graph. Without loss of generality,
one may assume that initially each source has zero indegree and
bin{itemize}
\item[(C1)] each node of $\Gamma$ lies on a path from $Z$ to $t$,
\end{itemize}
and will maintain these properties during the algorithm.
The iteration input will include a path $D$ from a certain node of
$\Gamma$ to $t$, called the {\em pre-path}.
Initially, $D$ is trivial: $D=t$.
The nodes of $\Gamma$ not in $Z\cup\{t\}$ are called {\em inner}.
The current $\Gamma$ may contain special inner nodes, called
{\em complex} ones. They arise when the algorithm shrinks certain
subgraphs of $\Gamma$; the initial graph has no complex nodes.
The adjacency structure of $\Gamma$ is given by double-linked
lists $I_x$ and $O_x$ of the incoming and outgoing arcs,
respectively, for each node $x$.
The arc set of a path $P$ is denoted by $E(P)$.
An $i$-th iteration begins with extending $D$ to a
$Z$ to $t$ path $P$ in a natural way;
this takes $O(|P|-|D|)$ time.
Let $z$ be the first node of $P$. Then we try to obtain a good pair
by constructing a path from $z'=\sigma(z)$ to $t$, possibly
rearranging $P$. By standard arguments, a good pair for $z,z'$
exists if and only if there exists a path $A$ from $z'$ to $t$,
with possible backward arcs, in which the forward arcs belong to
$U-E(P)$ and the backward arcs belong to $E(P)$, called an {\em
augmenting path} w.r.t. $P$. For certain reasons, we admit $A$
to be self-intersecting in nodes (but not in arcs). Once $A$ is found,
the symmetric difference $E(P)\triangle E(A)$ gives a good pair $Q,R$
(taking into account that $\Gamma$ is acyclic).
To search for an augmenting path, we replace each arc $e=(x,y)\in
E(P)$ by the reverse arc $\overline e=(y,x)$; let $\overline \Gamma=(X,\overline U)$
be the resulting graph, and $\overline P$ the $t$ to $Z$ path
reverse to $P$. Thus, we have to construct a (directed) path from $z'$
to $t$ in $\overline\Gamma$ or establish that it does not exist.
To achieve the desired time bound, we apply a variant of
depth first search which we call here {\em transit depth first search
(TDFS)} (such a search procedure was applied in~\cite{kar-70}).
The difference from the standard depth first search (DFS) is as
follows. When scanning a new outgoing arc $(x,y)$ in the list $O_x$
of a current node $x$, if $y$ has already been visited, then DFS
stays at $x$. In contrast, TDFS moves from $x$ to $y$, making $y$ the
new current node. Both procedures maintain the stack
of arcs traversed only in forward direction and ordered by the
time of their traversal. If all outgoing arcs of the current node
$x$ are already scanned, then the last arc $(w,x)$ of the stack is
traversed in backward direction and $w$ becomes the new current
node. We refer to the path determined by the stack, from the initial
node to the current one, as the {\em active path}. Note that in case of
TDFS the active path may be self-intersecting (while it is simple in
DFS).
We impose the condition that the outgoing arc lists of
$\overline\Gamma$ are arranged so that
bin{itemize}
\item[(C2)] for each node $x\ne z$ of $\overline P$, the arc $\overline e$
of $\overline P$ leaving $x$ is the {\em last} element of $\overline O_x$.
\end{itemize}
This guarantees that TDFS would scan $\overline e$ after all other outgoing
arcs of $x$ (i.e., the arcs of $\overline P$ are ignored as long as
possible).
At an iteration, we apply TDFS to $\overline\Gamma$ starting from $z'$ as
above. The search terminates
when either it reaches $t$ or it returns to $z'$ having all arcs of
$O_{z'}$ traversed. In the first case ({\em breakthrough}) the final
active path $\overline A$ in $\overline\Gamma$ determines the desired augmenting
path $A$ in $\Gamma$, and we create a good pair $Q,R$ as described
above. In the second case, the non-existence of a good pair for the
given $z,z'$ is declared. Consider both cases.
{\em Breakthrough case.} Delete from $\Gamma$ the arcs of $Q,R$ and
then delete all the nodes and arcs that are no longer contained in $Z$
to $t$ paths (thus maintaining (C1)). This is carried out by an obvious
{\em cleaning procedure} in $O(q)$ time, where $q$ is the number of
arcs deleted. If $Q$ or $R$ contains a
complex node, the iteration finishes by transforming $Q,R$ into a good
pair of paths of the initial graph; this is carried out by a
{\em path expansion procedure} which will be described later.
The obtained $\Gamma,Z$ form the input of the next iteration, and
the new pre-path $D$ is assigned to be the trivial path $t$.
If $\Gamma$ vanishes, the algorithm terminates.
The following observation is crucial for estimating the time bound.
bin{lemma} \label{lm:break}
Let $q$ be the number of arcs deleted at an iteration with a
breakthrough. Then, excluding the path expansion procedure if applied,
the iteration runs in $O(q)$ time.
\end{lemma}
bin{proof}
Let $\overline W$ be the set of arcs of $\overline\Gamma$ traversed by
TDFS on the iteration, and $W$ the corresponding set in $\Gamma$,
i.e., $W=\{e\in U: e\in \overline W$ or $\overline e\in\overline W\}$.
The iteration runs in $O(q+|W|)$ time, taking into account that each
arc of $P$ not in $Q\cup R$ is contained in $W$. Therefore, it
suffices to show that no arc from $W$ remains in the new graph
$\Gamma$. Suppose this is not so. Then there is a $Z$ to $t$ path $L$
of the old $\Gamma$ that meets $W$ but not $E(Q)\cup E(R)$ (as the
arcs of $Q\cup R$ are deleted).
Let $e=(x,y)$ be the {\em last} arc of $L$ in $W$. Let $b=(y,w)$ be
the next arc of $L$ (it exists since $y=t$ would imply that $e$
is in $A$ but not in $P$, whence $e$ belongs to $Q\cup R$).
Then $b\not\in W\cup E(Q)\cup E(R)$, by the choice of $e$. Two cases
are possible.
(i) $e$ is in $P$. Then $\overline e=(y,x)\in\overline W$. According to
condition (C2), at the time TDFS traversed $\overline e$ from $y$ to $x$
all arcs of $\overline\Gamma$ leaving $y$ had already been traversed.
So $b$ is not in $\overline\Gamma$, implying $b\in E(P)-W$.
Then $b$ is in $Q\cup R$; a contradiction.
(ii) $e$ is not in $P$. Then $e\in \overline W$ and $e$ does not lie on
the final active path $\overline A$ (otherwise $e$ is in $Q\cup R$).
Therefore, TDFS traversed $e$ in both directions. To the time of
traversal of $e$ in backward direction, from $y$ to $x$, all arcs
of $\overline\Gamma$ leaving $y$ have been traversed (at this point the
difference between TDFS and DFS is important). So $b$ is not in
$\overline\Gamma$, whence $b\in E(P)$. Now $\overline b\not\in \overline W$ implies
that $b$ is in $Q\cup R$; a contradiction.
\end{proof}
{\em Non-breakthrough case.} Let $Y$ be the set of nodes visited by
TDFS. Then no arc of $\overline\Gamma$ leaves $Y$. Therefore, in view of
(C1),
bin{myitem}
the set of arcs of $\Gamma$ leaving $Y$ consists
of a unique arc $a=(v,w)$, this arc lies on $P$, and the nodes of the
part of $P$ from $z$ to $v$ are contained in $Y$.
\label{eq:N1}
\end{myitem}
Since no arc of $\Gamma$ enters $Z$, we also have
bin{equation} \label{eq:N2}
Y\cap Z=\{z,z'\}.
\end{equation}
We reduce $\Gamma$ by shrinking its subgraph $\Gamma_Y=(Y,U_Y)$
induced by $Y$ into one node; the formed {\em complex} node $v_Y$ is
identified with $v$.
We call $v$ the {\em root} of $\Gamma_Y$ and store $\Gamma_Y$.
The list of arcs entering $v_Y$ in the new graph is
produced by simply merging the lists $I_x$ for $x\in Y$ from which
the arcs occurring in $\Gamma_Y$ are explicitly removed, using the
lists $O_y$ for $y\in Y$.
(We do not need to correct the outgoing arc lists $O_x$ for
$x\not\in Y$ explicitly, as we explain later.)
Thus, to update $\Gamma$ takes time linear in $|U_Y|$. By~\refeq{N1},
$a$ is the only arc leaving $v_Y$ in $\Gamma$.
From (C1) and~\refeq{N1} it follows that
bin{myitem}
the new graph $\Gamma$ is again acyclic, and for each
$x\in Y$, there is a path $P_Y(x)$ from $x$ to the root $v$ in
$\Gamma_Y$.
\label{eq:N3}
\end{myitem}
In view of~\refeq{N2}, the set $Z$ is updated as $Z:=Z-\{z,z'\}$.
If there is at least one arc entering $v_Y$, then the new graph
$\Gamma$ and set $Z$ satisfy (C1) and form the input of the next
iteration. The new pre-path $D$ is assigned to be the part of $P$ from
the formed complex node $v_Y$ to $t$. If no arc enters $v_Y$, we
finish the current iteration by removing the nodes and the arcs not
contained in $Z$ to $t$ paths. This further reduce $\Gamma$ and may
reduce $Z$ and shorten $D$.
One can see that the set $U_Y$ is exactly $W$. This and the
construction of pre-paths imply the following.
bin{lemma} \label{lm:nobreak}
Let $q$ be the number of arcs deleted by an $i$-th iteration
without a breakthrough.
Then the iteration runs in
$O(q+{\rm max}\{0,|D_{i+1}|-|D_i|\})$ time. \ \vrule width.2cm height.2cm depth0cm
t
\end{lemma}
As mentioned above, we do not need to explicitly correct the
outgoing arc lists $O_x$ for $x\not\in Y$
(this would be expensive). Let ${\cal V}$ be the current set of all
complex nodes created from the beginning of the algorithm.
We take advantage of the following facts. First, the elements of
${\cal V}$ that are nodes of the current graph (the {\em maximal} complex
nodes) lie on the current pre-path $D$. Second, at an iteration with a
breakthrough, all complex nodes are removed. Third, at an iteration
without a breakthrough, the
subgraph $\Gamma_Y$ forming the new complex node $v_Y$ contains a
subpath of $P$ from its beginning node (by~\refeq{N1}), and the
cleaning procedure (if applied at the iteration) deletes a part of
the updated $P$ from its beginning node as well. Therefore, one can
store ${\cal V}$ as a tree in a natural way and use the
{\em disjoint set union} data structure from~\cite{GT-85} to
maintain ${\cal V}$. This enables us to efficiently access the head $v_Y$
of any arc $e=(x,v_Y)$ when $e$ is traversed by TDFS (with $O(1)$
amortized time per one arc).
To complete the algorithm description, it remains to explain the
{\em path expansion procedure} to be applied when an iteration with a
breakthrough finds paths $Q,R$ contained complex nodes. It
proceeds in a natural way by recursively expanding complex nodes
occurring in the current $Q,R$ into the corresponding paths $P_Y(x)$ as
in~\refeq{N3} and building $P_Y(x)$ into $Q$ or $R$ (this takes
$O(|P_Y(x)|)$ time). The arc sets of
subgraphs $\Gamma_Y$ extracted during the algorithm are pairwise
disjoint, so the total time for all applications of the procedure
is $O(m)$.
Thus, we can conclude from Lemmas~\ref{lm:break}
and~\ref{lm:nobreak} that the algorithm runs in $O(m)$ time,
yielding Theorem~\ref{tm:bbp}.
In the rest of this section we extend the above approach and
algorithm ({\em Algorithm 1}) to a general case of acyclic $(G,u)$.
The auxiliary graph $\Gamma=(X,U)$ and the set $Z$ are constructed
as above, and the capacity $u(e)$ of each arc $e\in U$ is defined
in a natural way. We call an integer $Z$ to
$t$ flow $g$ in $(\Gamma,u)$ {\em balanced} if the flow values out of
``symmetric'' sources are equal, i.e.,
$$
\mbox{div}_g(z)=\mbox{div}_g(\sigma(z))\quad \mbox{for each $z\in Z$,}
$$
and {\em blocking balanced} if there exists no balanced
flow $g'$ satisfying $g\ne g'\ge g$ (taking into account that $\Gamma$
is acyclic). Then the problem of finding a totally blocking IS-flow in
$(G,u)$ is reduced to {\em problem BBF}: find a balanced blocking flow
for $\Gamma,u,Z,t$.
{\em Algorithm 2} will find a balanced blocking flow
$g$ in the form $g=\alpha_1\chi^{Q_1}+\alpha_1\chi^{R_1}+\ldots+
\alpha_r\chi^{Q_r}+\alpha_r\chi^{R_r}$, where each $\alpha_i$ is a
positive integer, $Q_i$ is a path from some $z\in Z$ to $t$, and $R_i$
is a path from $\sigma(z)$ to $t$.
It iteratively constructs pairs $Q_i,R_i$ for current $\Gamma,u,Z$,
assigns the weight $\alpha_i$ to them as large as possible, and
accordingly reduces the current capacities as $u:=u-
\alpha_i\chi^{Q_i}-\alpha_i\chi^{R_i}$. All arc capacities in $\Gamma$
are positive: once the capacity of an arc becomes
zero, this arc is immediately deleted from $\Gamma$.
Each pair $Q_i,R_i$ is constructed as in Algorithm 1 when
it is applied to the corresponding {\em split-graph} $S=S(\Gamma,u)$.
More precisely (cf.~Section~\ref{sec:theo}),
$S$ is formed by replacing each arc $e=(x,y)$
of $\Gamma$ by two parallel arcs ({\em split-mates}) $e_1,e_2$ from
$x$ to $y$ with the capacities $\lceil u(e)/2\rceil$ and
$\lfloor u(e)/2\rfloor$, respectively. When $u(e)=1$, $e_2$ vanishes in
$S$, and $e_1$ is called {\em critical}. The algorithm maintains $S$
explicitly. The desired pair $Q_i,R_i$ in $(\Gamma,u)$ is
determined by a good pair in $S$ in a natural way.
The main part of an iteration of Algorithm 2 is a slight modification
of an iteration of Algorithm 1. The difference is the following. While
Algorithm 1 deletes {\em all} arcs of the paths $Q,R$ found at an
iteration, Algorithm 2 deletes only a {\em nonempty subset} $B$ of arcs
in $Q\cup R$ (concerning the graph $S$) including all critical arcs in
these paths. One may think that Algorithm 2 essentially treats with
a graph $S$ (ignoring $(\Gamma,u)$) in which
some disjoint pairs of parallel arcs (analogs of split-mates) are
distinguished and the other arcs are regarded as critical, and at each
iteration, the corresponding subset $B\subseteq E(Q)\cup E(R)$ to be
deleted is given by an oracle. Emphasize that the unique arc leaving
a complex node is always critical. Therefore, each complex node in
$Q\cup R$ will be automatically removed. Computing $\alpha_i$'s and
other operations of the algorithm beyond the work with the graph $S$
do not affect the asymptotic time bound.
We now estimate the complexity of an iteration of Algorithm 2. In case
without a breakthrough, properties \refeq{N1},\refeq{N2},\refeq{N3} and
Lemma~\ref{lm:nobreak} (with $S$ instead of $\Gamma$)
remain valid. Note that the arc $a$ in~\refeq{N1} is critical
(since it is a unique arc leaving $Y$); therefore, the arc
leaving the created complex node is critical. Our analysis of the
breakthrough case involves the subset $\overline W_2\subset \overline W$ of
arcs traversed by TDFS in both directions (where $\overline W$ is the
set of all traversed arcs in the corresponding auxiliary graph
$\overline S$). Let $W_2$ be the corresponding set in $S$.
bin{lemma} \label{lm:break-gen}
Suppose an iteration of Algorithm 2 results in a breakthrough. Let
$e=(x,y)$ be an arc of $S$ such that $e\in W_2$ or $e\in E(P)\cap W$.
Then any $x$ to $t$ path $L$ in $S$ starting with the arc $e$
contains a critical arc in $Q\cup R$ (and therefore, $e$ vanishes in
the new graph $S$).
\end{lemma}
bin{proof}
Suppose this is not so and consider a counterexample $(e,L)$ with
$|L|$ minimum.
Let $\overline A_0$ be the active path in $\overline S$ just before the
traversal of $e$ or $\overline e$ from $y$ to $x$, and $A_0$ the
corresponding (undirected) path in $S$. At that time, for the set
$\overline O_y$ of arcs of $\overline S$ leaving $y$,
bin{myitem}
all arcs in $\overline O_y$ except $\overline e$ (in case $e\in E(P)$) are
already traversed
\label{eq:ast}
\end{myitem}
(in view of condition (C2)).
Let $b=(y,w)$ be the second arc of $L$ (existing as
$y=t$ is impossible). By~(\ref{eq:ast}), if $b$ is not in $P$, then
$b\in W$. Also $b\not\in W_2$ and $b\not\in E(P)\cap W$ (otherwise
the part of $L$ from $y$ to $t$ would give a smaller counterexample).
This implies that $b$ belongs to $Q\cup R$, and
therefore, $b$ is not critical. Let $b'$ be the split-mate of $b$.
Considering the path starting with $b'$ and then following
$L$ from $w$ to $t$ (which is smaller than $L$), we similarly
conclude that $b'\not\in W_2$ and $b'\not\in E(P)\cap W$. To come to a
contradiction, we proceed as follows.
The fact that $S$ is acyclic implies that the symmetric difference
(on the arcs) of $P$ and $A_0$ is decomposed into a
path from $Z$ to $t$ and a path from $Z$ to $y$; therefore,
$E(P)\triangle E(A_0)$ contains {\em at most one} arc $a$ leaving $y$.
This and~(\ref{eq:ast}) imply that all arcs in $O_y\cap \overline O_y$
except, possibly, $a$ have been traversed twice; so they are in $W_2$.
Hence, one of $b,b'$ must be in $P$; let for definiteness $b\in E(P)$
(then $b'\not\in E(P)$).
Now $b\not\in W$ implies $b\in E(P)-E(A_0)$, and $b'\in W-W_2$ implies
$b'\in E(A_0)-E(P)$. Thus, both arcs $b,b'$ leaving $y$ are in
$E(P)\triangle E(A_0)$; a contradiction.
\end{proof}
The running time of an iteration with a breakthrough is
$O(|P|+|W|+q)$, where $q$ is the number of arcs deleted from $S$.
Lemma~\ref{lm:break-gen} allows us to refine this bound as
$O(|Q|+|R|+q)$. Combining this with Lemma~\ref{lm:nobreak}, we can
conclude that, up to a constant factor, the total time of Algorithm 2
is bounded from above by $m$ plus the sum $\Sigma$ of lengths of paths
$Q_1,R_1,\ldots,Q_r,R_r$ in the representation of the flow $g$
constructed by the algorithm. Since $|Q_i|,|R_i|\le n$ and $r\le 2m$
(as each iteration decreases the arc set of $S$), $\Sigma$ is $O(nm)$.
Also $\Sigma$ does not exceed the sum of the transit capacities $u(x)$
of inner nodes $x$ of $\Gamma$ (assuming, without loss of generality,
that no arc goes from $s$ to $s'$). Thus, Theorem~\ref{tm:ac-unit} is
generalized as follows.
bin{theorem} \label{tm:ac-gen}
For an acyclic capacitated skew-symmetric network $N$ with $O(n)$ nodes
and $O(m)$ arcs, a totally blocking IS-flow can be found in
$O({\rm min}\{m+\Delta(N),nm\})$ time.
\end{theorem}
Together with Corollary~\ref{cor:cvb} and Lemma~\ref{lm:root},
this yields the desired generalization.
bin{corollary} \label{cor:gen_cap}
SBFM can be implemented so that it finds a maximum IS-flow in an
arbitrary skew-symmetric network $N$ in
$O({\rm min}\{n^2m,\sqrt{\Delta(N)}(m+\Delta(N))\})$ time.
\end{corollary}
\section{\Large Applications to Matchings}\label{sec:mat}
Apply the reduction of the maximum u-capacitated b-matching problem
(CBMP) in a graph $G'=(V',E')$ to the maximum IS-flow problem in
a network $N=(G=(V,E),\sigma,u,s)$; see Section~\ref{sec:back}.
The best time bound for a general case of CBMP is attained by applying
the algorithm of Section~\ref{sec:gisa}. Theorem~\ref{tm:gisa} implies
the following.
bin{corollary} \label{cor:bmat}
CBMP can be solved in $O(M(n,m)+nm)$ time, where $n:=|V'|$ and
$m:=|E'|$.
\end{corollary}
When the input functions $u,b$ in CBMP are small enough, the
transit capacities of nodes in $N$ become small as well. Then the
application of the shortest blocking IS-flow method may result in a
competitive or even faster algorithm for CBMP.
Let the capacities of all edges of $G'$ be ones. We have
$\Delta(N)=O(m)$ in general, and $\Delta(N)=O(n)$ if $b$ is all-unit.
Then Corollary~\ref{cor:unit} yields the same time bounds as
in~\cite{gab-83,MV-80} for the corresponding cases.
bin{corollary} \label{cor:bmat_un}
SBFM (with the fast implementation of a phase as in
Section~\ref{sec:acyc}) solves the maximum degree-constrained subgraph
(or b-factor) problem in $O(m^{3/2})$ time and solves the maximum
matching problem in a general graph in $O(\sqrt{n}m)$ time.
\end{corollary}
Feder and Motwani~\cite{FM-91} elaborated a clique compression
technique and used it to improve the $O(\sqrt{n}m)$ bound for the
maximum bipartite matching problem to $O(\sqrt{n}m\log(n^2/m)/\log{n})$.
We explain how to apply a similar approach to a special case of MSFP,
lowering the bound for dense nonbipartite graphs. We need a brief
review of the method in~\cite{FM-91}.
Let $H=(X,Y,E)$ be a bipartite digraph, where $E\subseteq X\times Y$,
$|X|=|Y|=n$ and $|E|=m$. A {\em (bipartite) clique} is a complete
bipartite subgraph $(A,B,A\times B)$ of $H$, denoted by $C(A,B)$.
Define the {\em size} $s(C)$ of $C=C(A,B)$ to be $|A|+|B|$.
A {\em clique partition} of $H$ is a collection
${\cal C}$ of cliques whose arc sets form a partition of $E$; the {\em
size} $s({\cal C})$ of ${\cal C}$ is the sum of sizes of its members.
Let a constant $0<\delta<1/2$ be fixed. Then a clique $C(A,B)$ of $H$
is called a $\delta$-{\em clique} if $|A|=\lceil n^{1-\delta}\rceil$
and $|B|=\lfloor \delta\log{n}/\log(2n^2/m) \rfloor$. It is shown
in~\cite{FM-91} that a $\delta$-clique exists.
The {\em clique partition algorithm} in~\cite{FM-91}
finds a $\delta$-clique $C_1$ in the initial graph $H_1=(X,Y,E=:E_1)$
and deletes the arcs of $C_1$, obtaining the next graph $H_2=
(X,Y,E_2)$. Then it finds a $\delta$-clique $C_2$
(concerning the number of arcs of $H_2$) and delete the arcs of $C_2$
from $H_2$, and so on while the number of arcs of the current graph
is at least $2n^{2-\delta}$ and the $Y$-part of a $\delta$-clique is
nonempty. The remaining arcs are partitioned into
cliques consisting of a single arc each. So the cliques $C_i$ extracted
during the algorithm form a clique partition. The running time of the
algorithm is estimated as the sum of bounds $\tau(C_i)$ on the time to
extract the cliques $C_i$ plus a time bound $\tau'$ to maintain a
certain data structure (so-called neighborhood trees). One shows that
bin{myitem}
the algorithm runs in $O(\sqrt{n}m\beta)$ time and finds a clique
partition ${\cal C}$ of $H$ such that $s({\cal C})=O(m\beta)$, where
$\beta:=\log(n^2/m)/\log{n}$.
\label{eq:part}
\end{myitem}
Suppose we wish to find a maximum matching in a bipartite graph or,
equivalently, to find a maximum integer flow from $s$ to $t$ in a
digraph $G$ with unit arc capacities, node set $X\cup Y\cup\{s,t\}$
and arc set $E\cup(s\times X)\cup(Y\times t)$, where $E\subseteq
X\times Y$. One may assume $|X|=|Y|=n$. Using the above algorithm,
form a clique partition ${\cal C}$ as in~\refeq{part} for $(X,Y,E)$.
Transform each clique $C(A,B)$ in ${\cal C}$ into a star by replacing
its arcs by a node $z$, arcs $(x,z)$ for all $x\in A$ and arcs
$(z,y)$ for all $y\in B$.
There is a natural one-to-one correspondence between the
$s$ to $t$ paths in $G$ and those in the resulting graph $G^\ast$,
and the problem for $G^\ast$ is equivalent to that for $G$.
Compared with $G$, the graph $G^\ast$ has $|{\cal C}|$ additional nodes
but the number $m^\ast$ of its arcs becomes
$2n+s({\cal C})$, or $O(m\beta)$. Given a flow in $G^\ast$, any (simple)
augmenting path of length $q$ meets exactly $(q-1)/2$ nodes in
$X\cup Y$, and these nodes have unit transit capacities. This implies
that Dinits' algorithm has $O(\sqrt{n})$ phases (arguing as
in~\cite{ET-75,kar-73-2}). Since each phase takes $O(m^\ast)$ time, the
whole algorithm runs in $O(\sqrt{n}m\beta)$ time, as desired.
Now suppose $H=(X,Y,E,\sigma)$ is a skew-symmetric bipartite graph
without parallel arcs, where the sets $X$ and $Y$ are symmetric
each other. We modify the above method as follows. Note that any two
symmetric cliques in $H$ are disjoint (otherwise some
$x\in X$ is adjacent to $\sigma(x)$, implying the existence of two arcs
from $x$ to $\sigma(x)$). We call a clique partition ${\cal C}$
{\em symmetric} if $C\in{\cal C}$ implies $\sigma(C)\in{\cal C}$. An
iteration of the {\em symmetric clique partition algorithm} works as in
the previous algorithm, searching for a $\delta$-clique $C'$ in the
current $H$, but then deletes the arcs of {\em both} $C'$ and
$\sigma(C')$. Let the algorithm construct a partition ${\cal C}'$
consisting of cliques $C'_1,\sigma(C'_1),\ldots,C'_r,\sigma(C'_r)$
obtained in this order.
To estimate the size of ${\cal C}'$ and the running time, imagine we
would apply the previous algorithm to our $H$ (ignoring the
fact that $H$ is skew-symmetric). Let the resulting partition ${\cal C}$
be formed by cliques $C_1,\ldots,C_q$ (in this order).
Note that for a bipartite graph with $n$ nodes and $m$ arcs, both the
number $e(C)$ of arcs of a $\delta$-clique $C$ and its size $s(C)$
are computed uniquely, and these are monotone functions in $m$,
as well as the above-mentioned time bound $\tau(C)$ (indicated
in~\cite{FM-91}). Moreover, one can check that $m'\le m$ and
$e(C')\ne 0$ imply $m'-2e(C')\le m-e(C)$,
where $C'$ is a $\delta$-clique in a graph with
$n$ nodes and $m'$ arcs. Using these, we can conclude that
$r\le q$ and that for $i=1,\ldots r$, $s(C'_i)\le s(C_i)$ and
$\tau(C'_i)\le \tau(C_i)$. Then $s({\cal C}')\le 2s({\cal C})$,
implying $s({\cal C})=O(m\beta)$, by~\refeq{part}.
Also the time of the modified algorithm is $O(\sqrt{n}m\beta)$
(by~\refeq{part} and by the fact that the above
bound $\tau'$ remains the same) plus the time needed to treat the
symmetric cliques $\sigma(C'_i)$, which is $O(m)$.
Finally, the graph $H^\ast$ obtained from $H$ by transforming the
cliques $C'_1,\sigma(C'_1),\ldots, C'_r,\sigma(C'_r)$ into stars
has a naturally induced skew-symmetry. By the above argument, $H^\ast$
has $O(m\beta)$ arcs, and computing $H^\ast$ takes
$O(\sqrt{n}m\beta)$ time. Apply such a transformation to the input
graph of MSFP arising from an instance of the maximum matching problem.
Arguing as in the bipartite matching case above and as in the proof of
Lemma~\ref{lm:root}, we conclude with the following.
bin{theorem} \label{tm:compr}
A maximum matching in a general graph with $n$ nodes and $m$
edges can be found in $O(\sqrt{n}m\log(n^2/m)/\log{n})$ time.
\end{theorem}
{\bf Acknowledgements.}
We thank the anonymous referees for suggesting improvements in the
original version of this paper and pointing out to us important
references.
\small
bin{thebibliography}{99}
\bibitem{ans-85}
R.P.~Anstee. An algorithmic proof of Tutte's $f$-factor theorem.
{\sl J. of Algorithms} {\bf 6} (1985) 112--131.
\bibitem{ans-87}
R.P.~Anstee. A polynomial algorithm for $b$-matchings: An alternative
approach. {\sl Information Proc. Letters} {\bf 24} (1987) 153--157.
\bibitem{blu-90}
N.~Blum.
A new approach to maximum matching in general graphs.
In {\sl Automata, Languages and Rpogramming} [Lecture Notes in Comput.
Sci. 443] (Springer, Berlin, 1990), pp. 586--597.
\bibitem{din-70}
E.~A. Dinic.
Algorithm for solution of a problem of maximum flow in networks with
power estimation.
{\sl Soviet Math. Dokl.} {\bf 11} (1970) 1277--1280.
\bibitem{edm-65}
J.~Edmonds.
Paths, trees and flowers.
{\sl Canada J. Math.} {\bf 17} (1965) 449--467.
\bibitem{EJ-70}
J.~Edmonds and E.~L. Johnson.
Matching, a well-solved class of integer linear programs.
In R.~Guy, H.~Haneni, and J.~Sch{\"o}nhein, eds,
{\sl Combinatorial Structures and Their Applications},
Gordon and Breach, NY, 1970, pp. 89--92.
\bibitem{EK-72}
J.~Edmonds and R.~M. Karp.
Theoretical improvements in algorithmic efficiency for network flow
problems.
{\sl J. Assoc. Comput. Mach.} {\bf 19} (1972) 248--264.
\bibitem{ET-75}
S.~Even and R.~E. Tarjan.
Network flow and testing graph connectivity.
{\sl SIAM J. Comput.} {\bf 4} (1975) 507--518.
\bibitem{FM-91}
T.~Feder and R.~Motwani.
Clique partitions, graph compression and speeding-up algorithms.
In {\sl Proc. 23rd Annual ACM Symp. on Theory of Computing},
1991, pp.~123--133.
\bibitem{FF-62}
L.~R. Ford and D.~R. Fulkerson.
{\sl Flows in Networks}.
Princeton Univ. Press, Princeton, NJ, 1962.
\bibitem{FJ-99}
C.~Fremuth-Paeger and D.~Jungnickel.
Balanced network flows. Parts I--III.
{\sl Networks} {\bf 33} (1999) 1--56.
\bibitem{gab-83}
H.~N. Gabow.
An efficient reduction technique for degree-constrained subgraph and
bidirected network flow problems.
{\sl Proc. of STOC} {\bf 15} (1983) 448--456.
\bibitem{GT-85}
H.~N. Gabow and R.~E. Tarjan.
A linear-time algorithm for a special case of disjoint set union.
{\sl J. Comp. and Syst. Sci.} {\bf 30} (1985) 209--221.
\bibitem{GT-91}
H.~N. Gabow and R.~E. Tarjan.
Faster scaling algorithms for general graph-matching problems.
{\sl J. ACM} {\bf 38} (1991) 815--853.
\bibitem{GK-95}
A.~V. Goldberg and A.~V. Karzanov.
Maximum skew-symmetric flows.
In P.~Spirakis, ed., {\sl Algorithms -- ESA '95} (Proc. 3rd European
Symp. on Algorithms), {\sl Lecture Notes in Computer Sci.}
{\bf 979}, 1995, pp. 155--170.
\bibitem{GK-96}
A.~V. Goldberg and A.~V. Karzanov.
Path problems in skew-symmetric graphs.
{\sl Combinatorica} {\bf 16} (1996) 129--174.
\bibitem{GK-99}
A.~V. Goldberg and A.~V. Karzanov.
Maximum skew-symmetric flows and their applications to b-matchings.
{\sl Preprint} 99-043, SFB 343, Bielefeld Universit\"at, Bielefeld,
1999, 25 pp.
\bibitem{HK-73}
J.~E. Hopcroft and R.~M. Karp.
An $n^{5/2}$ algorithm for maximum matching in bipartite graphs.
{\sl SIAM J. Comput.} {\bf 2} (1973) 225--231.
\bibitem{kar-70}
A.~V. Karzanov.
\`Ekonomny\u{i} algoritm nakhozhdeniya bikomponent grafa
[Russian; An efficient algorithm for finding the bicomponents of a
graph].
In {\sl Trudy Tret'e\u{i} Zimne\u{i} Shkoly po Matematicheskomu
Programmirovaniyu i Smezhnym Voprosam} [Proc. of 3rd Winter School on
Mathematical Programming and Related Topics], issue {\bf 2}. Moscow
Engineering and Construction Inst. Press, Moscow, 1970, pp.~343-347.
\bibitem{kar-73}
A.V.~Karzanov.
Tochnaya otsenka algoritma nakhozhdeniya maksimal'nogo potoka,
primenennogo k zadache ``o predstavitelyakh''
[Russian; An exact estimate of an algorithm for
finding a maximum flow, applied to the problem ``of representatives''].
In {\sl Voprosy Kibernetiki} [Problems of Cybernetics],
volume~{\bf 3}. Sovetskoe Radio, Moscow, 1973, pp.~66--70.
\bibitem{kar-73-2}
A.V.~Karzanov.
O nakhozhdenii maksimal'nogo potoka v setyakh spetsial'nogo vida i
nekotorykh prilozheniyakh
[Russian; On finding maximum flows in networks
with special structure and some applications].
In {\sl Matematicheskie Voprosy Upravleniya Proizvodstvom}
[Mathematical Problems for Production Control],
volume~{\bf 5}.
Moscow State University Press, Moscow, 1973, pp. 81-94.
\bibitem{KS-93}
W.~Kocay and D.~Stone. Balanced network flows.
{\sl Bulletin of the ICA} {\bf 7} (1993) 17--32.
\bibitem{law-76}
E.~L. Lawler.
{\sl Combinatorial Optimization: Networks and Matroids}.
Holt, Reinhart, and Winston, New York, NY., 1976.
\bibitem{LP-86}
L.~Lov{\'a}sz and M.~D. Plummer.
{\sl {Matching Theory}}.
Akad{\'e}miai Kiad{\'o}, Budapest, 1986.
\bibitem{MV-80}
S.~Micali and V.V.~Vazirani.
An $O(\sqrt{V} E)$ algorithm for finding maximum matching in general
graphs.
{\sl Proc. of the 21st Annual IEEE Symposium in Foundation of Computer
Science}, 1980, pp.~71--109.
\bibitem{sch-03}
A.~Schrijver. {\sl Combinatorial Optimization. Polyhedra and
Efficiency}, Volume~A. ({\sl Algorithms and Combinatorics} {\bf 24}),
Springer, Berlin and etc., 2003.
\bibitem{tar-72}
R.~Tarjan. Depth-first search and linear graph algorithms.
{\sl SIAM J. Comput.} {\bf 1} (1972) 146--160.
\bibitem{tut-67}
W.T.~Tutte.
Antisymmetrical digraphs.
{\sl Canadian J. Math.} {\bf 19} (1967) 1101--1117.
\bibitem{vaz-90}
V.V.~Vazirani.
A theory of alternating paths and blossoms for proving correctness of
the $O(\sqrt{V} E)$ general graph maximum matching algorithm.
In: R.~Kannan and W.R.~Cunningham, eds., {\sl Integer Programming and
Combinatorial Optimization} (Proc. 1st IPCO Conference), University of
Waterloo Press, Waterloo, Ontario, 1990, pp.~509--535.
\end{thebibliography}
\end{document} |
\begin{document}
\title{Bass and Betti numbers of a module and its deficiency modules}
{\let\thefootnote\relax\footnote{{{\it Date:} \today}}}
{\let\thefootnote\relax\footnote{{{\it 2020 Mathematics Subject Classification.} Primary 13C14, 13D45; Secondary 13H10, 14B15.}}}
{\let\thefootnote\relax\footnote{{{\it Key words and phrases.} Generalized Cohen-Macaulay module, deficiency modules, Auslander-Reiten conjecture.}}}
{\let\thefootnote\relax\footnote{{The second-named author was supported by a CAPES Doctoral Scholarship.}}}
\begin{abstract}
This paper aims to provide several relations between Bass and Betti numbers of a given module and its deficiency modules. Such relations and the tools used throughout allow us to generalize some results of Foxby, characterize Cohen-Macaulay modules in equidimensionality terms, study the Cohen-Macaulay and complete intersection properties of a ring and furnish a case for the Auslander-Reiten conjecture.
\end{abstract}
\section{Introduction}
In the celebrated paper \cite{F}, Foxby proved that over a Gorenstein local ring $R$ of dimension $d$, a Cohen-Macaulay $R$-module $M$ of dimension $t$ is such that $$\beta_j(M)=\mu^{j+t}(\Ext^{d-t}_R(M,R))$$ and $$\mu^j(M)=\beta_{j-t}(\Ext^{d-t}_R(M,R))$$ for all $j\geq0$. In particular, $\pd_RM<\infty$ if and only if $\id_R\Ext^{d-t}_R(M,R)<\infty$, and $\id_RM<\infty$ if and only if $\pd_R\Ext^{d-t}_R(M,R)<\infty$. Recently, Freitas and Jorge-Pérez \cite{FJP} generalized the first equivalence for local rings which are factor of Gorenstein local rings. In this paper, we shall look at these results in a wider situation as follows.
Schenzel \cite{S} generalized the notion of canonical module in the following sense. Given a Noetherian local ring $R$ which is a factor ring of a $s$-dimensional Gorenstein local ring $S$ and a finite $R$-module $M$, the \emph{$j$-th deficiency module of $M$} is defined as $$K^j(M)=\Ext^{s-j}_S(M,S)$$ for all $j=0,...,\dim_RM$. Local duality assures that these modules are well-defined. Particularly, $K(M):=K^{\dim_RM}(M)$ is called the \emph{canonical module of $M$}. In a certain sense, the deficiency modules of $M$ measure the extent of the failure of $M$ to be Cohen-Macaulay.
In this paper, we shall look for relations between Bass and Betti numbers of a given module and its deficiency modules. As Foxby provided the relations above for Cohen-Macaulay modules over a Gorenstein local ring, we furnish the same relations for generalized Cohen-Macaulay canonically Cohen-Macaulay modules with zeroth and first deficiency modules of positive depth over a local ring which is a factor of a Gorenstein local ring, see Theorem \ref{foxbygeneralization}. Furthermore, the theorems \ref{foxbygeneralization2} and \ref{foxbygeneralization3} show the same relations for arbitrary finite $R$-modules when certain homological conditions over its deficiency modules are imposed.
Besides such generalizations, we exhibit bounds for the Bass numbers (Betti numbers) of a module in terms of the Betti numbers (Bass numbers) of its deficiency modules, see the theorems \ref{mu<beta} and \ref{beta<mu}. They provide several applications that are worked out through this paper. Three examples of such applications are Corollary \ref{bassgeneralization}, providing the Cohen-Macaulay property of a local ring in terms of homological conditions over deficiency modules, Corollary \ref{CIchar} furnishing a characterization of the complete intersection property in terms of the first and second Bass numbers of the residue field, and Corollary \ref{AR} that states that the Auslander-Reiten conjecture holds for modules such that its deficiency modules have finite injective dimensions, generalizing then a similar application given quite recently in \cite{FJP}.
Our methods are especially concerned with studying the behaviour of some spectral sequences. The first of them is called Foxby spectral sequence \ref{foxbyss}, as it was firstly used by Foxby in \cite{F}. The first applications of such spectral sequences regard general information on the canonical module of a generalized Cohen-Macaulay module or an equidimensional module, see Theorem \ref{GCMtheorem} and Proposition \ref{equidimensional}. These results provide sufficient conditions for when the module is also canonically Cohen-Macaulay and its canonical module is generalized Cohen-Macaulay, see the corollaries \ref{GCMCCM} and \ref{GCMcanonicalmodule}, also a characterization of Cohen-Macaulay modules in Corollary \ref{CMequivalence} and a version for generalized Cohen-Macaulay modules of a Schenzel's result, see Corollary \ref{weakercmcanonicalmodule}.
\section{Generalized Cohen-Macaulay modules}
\noindent\textbf{Setup.} Throughout this paper, $R$ will always denote a commutative Noetherian local ring with non-zero unity, maximal ideal $\mathfrak{m}$ and residue class field $k$. Also, $R$ is supposed to be a factor of a Gorenstein local ring $S$ of dimension $s$, i.e., there exists a surjective ring homomorphism $S\rightarrow R$. We say that an $R$-module $M$ is \emph{finite} if it is a finitely generated $R$-module and denote by $M^\vee$ its Matlis dual.
For an $R$-module $M$, $\pd_RM$ and $\id_RM$ denote, respectively, the projective dimension and injective dimension of $M$. Further, $\beta_i(M)=\dim_k\Tor^R_i(k,M)$ is the $i$-th Betti number of $M$, $\mu^i(M)=\dim_k\Ext^i_R(k,M)$ is the $i$-th Bass number of $M$ and $\type(M)=\dim_k\Ext^{\depth_RN}_R(k,M)$ is its type.
The following spectral sequences have first appeared in the \cite{F}.
\begin{lemma}[Foxby spectral sequences]\label{foxbyss}
Given a finite $R$-module $X$, an $R$-module $Y$ and a $S$-module $Z$, if either $\pd_RX<\infty$ or $\id_SZ<\infty$, then there exist a graded $R$-module $H$ and first quadrant spectral sequences
$$E_2^{p,q}=\Ext^p_S(\Ext^q_R(X,Y),Z)\Rightarrow_p H^{q-p}$$ and
$$'E_2^{p,q}=\Tor_R^p(X,\Ext^q_S(Y,Z))\Rightarrow_p H^{p-q}.$$
\end{lemma}
\begin{proof}
Let $F_\bullet$ be a free $R$-resolution of $X$ and let $E^\bullet$ be an injective $S$-resolution of $Z$. The desired spectral sequences yield from the isomorphism of first quadrant double complexes
$$\Hom_S(\Hom_R(F_\bullet,Y),E^\bullet)\simeq F_\bullet\otimes_R\Hom_S(Y,E^\bullet).$$
\end{proof}
The first application of the Foxby spectral sequences \ref{foxbyss} is a generalization of a well-known result about Cohen-Macaulay modules and its canonical modules, see \cite[Theorem 1.14]{S}. First, we need an auxiliary lemma.
We say that a finite $R$-module $M$ satisfies \emph{Serre's condition $S_k$}, for $k$ being a non-negative integer, provided $$\depth_{R_\mathfrak{p}}M_\mathfrak{p}\geq\min\{k,\dim_{R_\mathfrak{p}}M_{\mathfrak{p}}\}$$ for all $\mathfrak{p}\in\Supp M$.
\begin{lemma}\cite[Lemma 1.9]{S}\label{schenzellemma}
Let $M$ be a finite $R$-module of dimension $t$. The modules $K^j(M)$ satisfy the following properties.
\begin{itemize}
\item [(i)] $\dim_R K^j(M)\leq j$ for all integer $j$ and $\dim_RK(M)=t$;
\item [(ii)] Suppose that $M$ is equidimensional. Then, $M$ satisfies Serre's condition $S_k$ if and only if $\dim_RK^j(M)\leq j-k$, for all $0\leq j<t$.
\end{itemize}
\end{lemma}
A finite $R$-module $M$ is said to be \emph{generalized Cohen-Macaulay} if $H^j_\mathfrak{m}(M)$ is of finite length for all $j<\dim_RM$. It should be noticed, due to Matlis duality, that it is equivalent to say that $K^j(M)$ is of finite length for all $j<\dim_RM.$
\begin{theorem}\label{GCMtheorem}
Let $M$ be a generalized Cohen-Macaulay $R$-module of dimension $t$. The following statements hold.
\begin{itemize}
\item [(i)] There exists isomorphism $$K^0(K(M))\simeq\Tor_{-t}^S(M,S);$$
\item [(ii)] There exists a five-term type exact sequence
$$\xymatrix@=1em{
\Tor^S_{-t+2}(M,S)\ar[r] & K^2(K(M))\ar[r] & K^0(K^{t-1}(M))\ar[dl] \\ & \Tor^S_{-t+1}(M,S)\ar[r] & K^1(K(M))\ar[r] & 0
}$$
\item [(iii)] There exists an exact sequence
$$\xymatrix@=1em{
0\ar[r] & K^0(K^0(M))\ar[r] & M\ar[r] & K(K(M))\ar[r] & K^0(K^1(M))\ar[r] & 0;
}$$
\item [(iv)] If $t\geq3$, then there exist isomorphisms $$K^{t-j}(K(M))\simeq K^0(K^{j+1}(M))$$ for all $1\leq j\leq t-2$.
\end{itemize}
\end{theorem}
\begin{proof}
Consider the Foxby spectral sequences \ref{foxbyss} by taking $X=M$ as $S$-module and $Y=Z=S$
$$E_2^{p,q}=\Ext^p_S(\Ext^q_S(M,S),S)\Rightarrow_p H^{q-p}$$ and
$$'E_2^{p,q}=\Tor^S_p(M,\Ext^q_S(S,S))\Rightarrow_p H^{p-q}.$$
Since $'E_2^{p,q}=0$ for all $q\neq0$, we have
$$H^j\simeq{}'E_2^{j,0}=\Tor_j^S(M,S)$$ for all $j\geq0$, and $$E_2^{p,q}=\Ext^p_S(\Ext^q_S(M,S),S)\Rightarrow_p\Tor^S_{q-p}(M,S).$$
Once $H^j_\mathfrak{m}(M)$ being of finite length, so is $K^j(M)$ for all $j<t$, and by local duality $$\Ext^p_S(\Ext^q_S(M,S),S)=\Ext^p_S(K^{s-q}(M),S)=0$$ for all $q>s-t$ and for all $p\neq s$. Also, Lemma \ref{schenzellemma} $(i)$ assures that $\dim_RK(M)=t$. Thus, $E_2$ has the following shape
$$
\xymatrix@=1em{
0 & 0 & 0 & \cdots & 0 & 0 \\
0 & 0 & 0 & \cdots & \Ext^s_S(K^0(M),S) & 0 \\
\vdots & \vdots & \vdots & \iddots & \vdots & \vdots\\
0 & 0 & 0 & \cdots & \Ext^s_S(K^{t-1}(M),S) & 0 \\
0 & K(K(M)) & \Ext^{s-(t-1)}_S(K(M),S) & \cdots & \Ext^s_S(K(M),S) & 0 \\
0\ar@{--}[rrrrruuuuu] & 0 & 0 & 0 & 0 & 0.
}
$$
By convergence, there are isomorphisms
$$K^0(K(M))=\Ext^s_S(K(M),S)\simeq E_\infty^{s,s-t}\simeq\Tor_{-t}^S(M,S), \ K^1(K(M))=\Ext^{s-1}_S(K(M),S)\simeq E_\infty^{s-1,s-t}$$ and
$$K^0(K^0(M))=\Ext^s_S(K^0(M),S)\simeq E_\infty^{s,s}.$$
Thus we get item $(i)$ and by applying Matlis dual one has isomorphisms
$$H^1_\mathfrak{m}(K(M))\simeq(E_\infty^{s-1,s-t})^\vee \ \mbox{and} \ H^0_\mathfrak{m}(K^0(M))\simeq(E^{s,s}_\infty)^\vee.$$
The convergence again gives us short exact sequences
\begin{equation}\xymatrix@=1em{
0\ar[r] & E_\infty^{s,s-j}\ar[r] & \Tor_{-j}^S(M,S)\ar[r] & E_\infty^{s-(t-j),s-t}\ar[r] & 0
}\label{eq:GCMconv}\end{equation} for all $j\geq0$.
Further, as we move through the pages of $E$, the differentials between the vertical and horizontal lines in the diagram above come out. In other words, there is an exact sequence
\begin{equation}\xymatrix@=1em{
0\ar[r] & E_\infty^{s-(t-j),s-t}\ar[r] & \Ext_S^{s-(t-j)}(K(M),S)\ar[r] & \Ext^s_S(K^{j+1}(M),S)\ar[r] & E_\infty^{s,s-(j+1)}\ar[r] & 0
}\label{eq:GCMdifferentials}\end{equation}
for all $0\leq j\leq t-2$.
Item $(ii)$ is exactly the five-term exact sequence of $E$. For item $(iii)$, by taking $j=0$ in both above exact sequences, we have the following exact sequences
$$\xymatrix@=1em{
0\ar[r] & \Ext^s_S(K^0(M),S)\ar[r] & M\ar[r] & E^{s-t,s-t}_\infty\ar[r] & 0
}$$ and
$$\xymatrix@=1em{
0\ar[r] & E^{s-t,s-t}_\infty\ar[r] & K(K(M))\ar[r] & \Ext^s_S(K^1(M),S)\ar[r] & E_\infty^{s,s-1}\ar[r] & 0.
}$$ The result follows by splicing these sequences and noticing that $E_\infty^{s,s-1}\subseteq\Tor^S_{-1}(M,S)=0$.
The exact sequence \ref{eq:GCMconv} assures that $E_\infty^{s-(t-j),s-t}=E_\infty^{s,s-j}=0$ for all $j>0$, so that, by the exact sequence \ref{eq:GCMdifferentials}, $$K^{t-j}(K(M))=\Ext^{s-(t-j)}_S(K(M),S)\simeq\Ext^s_S(K^{j+1}(M),S)=K^0(K^{j+1}(M))$$ for all $1\leq j\leq t-2$.
\end{proof}
The concept of \emph{canonically Cohen-Macaulay module} was introduced by Schenzel \cite{S2}. We say that a finite $R$-module $M$ is canonically Cohen-Macaulay if its canonical module $K(M)$ is Cohen-Macaulay.
\begin{corollary}\label{GCMCCM}
Let $M$ be a generalized Cohen-Macaulay $R$-module of dimension $t$. The following statements hold.
\begin{itemize}
\item [(i)] If $t>j$ with $j\in\{0,1\}$, then $\depth_RK(M)>j$;
\item [(ii)] If $t=1$, then $M$ is canonically Cohen-Macaulay and there exists the short exact sequence
$$\xymatrix@=1em{
0\ar[r] & K^0(K^0(M))\ar[r] & M\ar[r] & K(K(M))\ar[r] & 0;
}$$
\item [(iii)] If $t=2$, then $M$ is canonically Cohen-Macaulay;
\item [(iv)] If $t\geq3$, then $K(M)$ is generalized Cohen-Macaulay.
\end{itemize}
\end{corollary}
\begin{proof}
Item $(i)$ follows immediately from Theorem \ref{GCMtheorem} $(i)$ and $(ii)$. For item $(ii)$, item $(i)$ assures that $K(M)$ is Cohen-Macaulay and Theorem \ref{GCMtheorem} $(iii)$ is the desired exact sequence. As to item $(iii)$, item $(i)$ again assures that $K(M)$ is Cohen-Macaulay. Item $(iv)$ follows directly from item $(i)$ and Theorem \ref{GCMtheorem} $(iv)$.
\end{proof}
\begin{corollary}\label{GCMcanonicalmodule}
If $M$ is generalized Cohen-Macaulay, then so is $K(M)$.
\end{corollary}
Corollary \ref{GCMcanonicalmodule} inspires us to ask the following.
\begin{question}
Given a finite $R$-module $M$, when is $K(M)$ generalized Cohen-Macaulay?
\end{question}
As Corollary \ref{GCMCCM} assures that generalized Cohen-Macaulay of dimension at most two are canonically Cohen-Macaulay, Theorem \ref{GCMtheorem} $(iv)$ recovers a characterization \cite{BN} for the case where the dimension is at least three.
\begin{corollary}\cite[Corollary 2.7]{BN}
Let $M$ be a generalized Cohen-Macaulay $R$-module of dimension $t\geq3$. Then, the following statements are equivalent
\begin{itemize}
\item [(i)] $M$ is canonically Cohen-Macaulay;
\item [(ii)] $H^j_\mathfrak{m}(M)=0$ for all $j=2,...,t-1$;
\item [(iii)] The $\mathfrak{m}$-transform functor $\D_\mathfrak{m}(M)$ is a Cohen-Macaulay $R$-module.
\end{itemize}
\end{corollary}
\begin{proposition}\label{equidimensional}
Let $M$ be a finite $R$-module of depth $g$ and dimension $t$. The following statements hold.
\begin{itemize}
\item [(i)] Assume $M$ is generalized Cohen-Macaulay $R$-module. If $\depth_RK^j(M)>0$ for $j=0,1$, then $M\simeq K(K(M))$. In particular, this isomorphism holds true whenever $g\geq2$.
\item [(ii)] Suppose $M$ is equidimensional. If $M$ satisfies Serre's condition $S_{k+1}$ for some positive integer $k$, then $$K^j(K(M))\simeq \Tor^S_{-t+j}(M,S)$$ for all $t-k+1\leq j\leq t$.
\end{itemize}
\end{proposition}
\begin{proof}
Item $(i)$ follows immediately from Theorem \ref{GCMtheorem} $(iii)$ and from the fact that $K^0(M)=K^1(M)=0$ in case of $g\geq2$.
For item $(ii)$, consider the Foxby spectral sequences given in Theorem \ref{GCMtheorem} $$E_2^{p,q}=\Ext^p_S(\Ext^q_S(M,S),S)\Rightarrow_p\Tor^S_{q-p}(M,S).$$
By Lemma \ref{schenzellemma} $(ii)$ and local duality, we have $$E_2^{s-i,s-j}=\Ext^{s-i}_S(K^j(M),S)=0$$ for all $0\leq j<t$ and $i>j-k-1$. In other words, all modules $E_2^{p,q}$ such that $q\neq s-t$ above the dotted line in the below diagram must be zero
$$
\xymatrix@=1em{
0 & 0 & \cdots & 0 & 0\\
\vdots & \vdots & \iddots & \Ext^s_S(K^{k+1}(M),S) & 0 \\
0 & \Ext^{s-(t-k-2)}_S(K^{t-1}(M),S) & \cdots & \vdots & 0 \\
\Ext^{s-(t-k-1)}_S(K(M),S)\ar@{--}[rrrruuu] & \Ext^{s-(t-k-2)}_S(K(M),S) & \cdots & \Ext^s_S(K(M),S) & 0 \\
0 & \cdots & 0 & 0 & 0.
}
$$
The result follows from the convergence.
\end{proof}
Our results also retrieve the well-known fact that every Cohen-Macaulay module is canonically Cohen-Macaulay, see \cite[Theorem 1.14]{S}.
\begin{corollary}\label{cmcanonicalmodule}
If $M$ is Cohen-Macaulay of dimension $t$, then so is $K(M)$ and $K(K(M))\simeq M$.
\end{corollary}
\begin{proof}
There are two immediate ways of proving the desired result. Indeed the result follows directly from Theorem \ref{GCMtheorem} as well as from Proposition \ref{equidimensional} $(ii)$ too.
\end{proof}
Proposition \ref{equidimensional} provides a characterization for the Cohen-Macaulay property.
\begin{corollary}\label{CMequivalence}
If $M$ is a finite $R$-module, then $M$ is Cohen-Macaulay if and only if $M$ is equidimensional canonically Cohen-Macaulay satisfying Serre's condition $S_{k+1}$ for some positive integer $k$.
\end{corollary}
\begin{proof}
It is well-known that a Cohen-Macaulay module is equidimensional and satisfies Serre's condition $S_k$ for any $k$. Corollary \ref{cmcanonicalmodule} assures that such a module is also canonically Cohen-Macaulay. Conversely, by taking $j=t$ in Proposition \ref{equidimensional} $(ii)$, we have the isomorphism $K(K(M))\simeq M$. Since $K(M)$ is Cohen-Macaulay, Corollary \ref{cmcanonicalmodule} again assures that $M\simeq K(K(M))$ is Cohen-Macaulay.
\end{proof}
The next corollary is a version of Corollary \ref{cmcanonicalmodule} for generalized Cohen-Macaulay modules.
\begin{corollary}\label{weakercmcanonicalmodule}
If $M$ is a generalized Cohen-Macaulay module such that $\depth_RK^j(M)>0$ for $j=0,1$, then so is $K(M)$ and $M\simeq K(K(M))$.
\end{corollary}
\begin{proof}
It follows directly from Corollary \ref{GCMcanonicalmodule} and Proposition \ref{equidimensional} $(i)$.
\end{proof}
\section{Bounding Bass numbers}
The Foxby spectral sequences \ref{foxbyss} are fundamental tools in our work. They provide the main result of this section.
\begin{theorem}\label{mu<beta}
If $M$ is a finite $R$-module of depth $g$ and dimension $t$, then the following inequality holds for all $j\geq0$
$$\mu^j(M)\leq\sum_{i=g}^t\beta_{j+i}(K^i(M)).$$
Moreover, $\type(M)=\beta_0(K^g(M))$ and $$\mu^{g+2}(M)-\mu^{g+1}(M)\leq\beta_2(K^g(M))-\beta_1(K^g(M))-\beta_0(K^{g+1}(M)).$$
\end{theorem}
\begin{proof}
Consider the Foxby spectral sequences \ref{foxbyss} by taking $S=R, X=k, Y=M, Z=S$.
$$E_2^{p,q}=\Ext^p_S(\Ext^q_R(k,M),S)\Rightarrow_p H^{q-p}$$ and
$$'E_2^{p,q}=\Tor_p^R(k,\Ext^q_S(M,S))\Rightarrow_p H^{p-q}.$$
Since $\Ext^q_R(k,M)$ is of finite length, we must have $E_2^{p,q}=0$ for all $p\neq s$, so that
$$H^j\simeq E_2^{s,j+s}=\Ext^s_S(\Ext^{j+s}_R(k,M),S)$$ for all integer $j$. Once $K^{s-q}(M)=\Ext^q_S(M,S)$ for all $q\geq0$, we conclude that
\begin{equation}'E_2^{p,q}=\Tor_p^R(k,K^{s-q}(M))\Rightarrow_p\Ext^s_S(\Ext^{p-q+s}_R(k,M),S).\label{eq:1}\end{equation}
Now, since $\Ext^s_S(k,S)^\vee\simeq k$, where $\_^\vee$ denotes the Matlis dual of $R$, we have
$$\Ext^s_S(\Ext^j_R(k,M),S)\simeq\Ext^s_S(k,S)^{\mu^j(M)}\simeq k^{\mu^j(M)}$$ as $k$-vector spaces. Therefore, by the convergence of $'E$,
$$\mu^j(M)\leq\sum_{j=p-q+s}\beta_p(K^{s-q}(M))=\sum_{i=g}^t\beta_{j+i}(K^i(M))$$ for all $j\geq0$.
Now, since $K^i(M)=\Ext^{s-i}_S(M,S)=0$ for all $i<g$, then $'E_2$ has the following corner $$\xymatrix@=1em{
& \vdots & \vdots & \vdots
\\
\cdots & \Tor_2^R(k,K^{g+1}(M)) & \Tor_2^R(k,K^g(M))\ar[ddl] & 0 & \cdots
\\
\cdots & \Tor_1^R(k,K^{g+1}(M)) & \Tor_1^R(k,K^g(M)) & 0 & \cdots
\\
\cdots & k\otimes_RK^{g+1}(M) & k\otimes_RK^g(M) & 0 & \cdots
}$$
Therefore, $$k\otimes_RK^g(M)={}'E_2^{0,s-g}\simeq H^{g-s}\simeq\Ext_S^s(\Ext^g_R(k,M),S)$$ so that $\type(M)=\beta_0(K^g(M))$ and there exists a five-term-type exact sequence
$$\xymatrix@=1em{
\Ext^s_S(\Ext^{g+2}_R(k,M),S)\ar[r] & \Tor_2^R(k,K^g(M))\ar[r] & k\otimes_RK^{g+1}(M)\ar[dl]
\\
& \Ext^s_S(\Ext^{g+1}_R(k,M),S)\ar[r] & \Tor_1^R(k,K^g(M))\ar[r] & 0
}$$ whence the desired formula.
\end{proof}
\begin{corollary}\label{finiteid}
Let $M$ be a finite $R$-module of depth $g$ and dimension $t$. If $\pd_RK^i(M)<\infty$ for all $i=g,...,t$, then $\id_RM<\infty$.
\end{corollary}
\begin{proof}
The hypothesis means that $\beta_l(K^i(M))=0$ for all $l\gg0$ and by Theorem \ref{mu<beta} one has
$$\mu^j(M)\leq\sum_{i=g}^t\beta_{j+i}(K^i(M))=0$$ for $j\gg0$, i.e., $\id_RM<\infty$.
\end{proof}
Bass' conjecture \cite{B} was first proved by Peskine-Szpiro in \cite{PS} and after in a more general setting by Roberts \cite{R}. It states that a local ring admitting a non-zero module of finite injective dimension must be Cohen-Macaulay. The next corollary provides sufficient conditions in terms of projective dimension for a local ring to be Cohen-Macaulay.
\begin{corollary}\label{bassgeneralization}
Let $M$ be a finite $R$-module of depth $g$ and dimension $t$. If $\pd_RK^i(M)<\infty$ for all $i=g,...,t$, then $R$ is Cohen-Macaulay.
\end{corollary}
\begin{proof}
Corollary \ref{finiteid} assures that $\id_RM<\infty$ and thus the result follows from Bass' conjecture.
\end{proof}
\begin{theorem}\label{foxbygeneralization}
If $M$ is a generically Cohen-Macaulay canonically Cohen-Macaulay $R$-module of dimension $t$ such that $\depth_RK^j(M)>0$ for $j=0,1$, then
$$\beta_j(M)=\mu^{j+t}(K(M))$$ and $$\mu^j(M)=\beta_{j-t}(K(M))$$ for all $j\geq0$. In particular, $\pd_RM<\infty$ if and only if $\id_RK(M)<\infty$ and $\id_RM<\infty$ if and only if $\pd_RK(M)<\infty$.
\end{theorem}
\begin{proof}
By Lemma \ref{schenzellemma} $(i)$, $K(M)$ is Cohen-Macaulay of dimension $t$ and by Proposition \ref{equidimensional} $(i)$, $K(K(M))\simeq M$, that is, $K^i(K(M))=0$ for all $i\neq t$ and $K^t(K(M))\simeq M$. The spectral sequence \ref{eq:1}
$$'E_2^{p,q}=\Tor^R_p(k,K^{s-q}(K(M)))\Rightarrow_p\Ext^s_S(\Ext^{p-q+s}_R(k,K(M)),S)$$ degenerates, so that
$$\Tor_j^R(k,M)\simeq\Tor_j^R(k,K(K(M)))={}'E_2^{j,s-t}\simeq\Ext^s_S(\Ext^{j+t}_R(k,K(M)),S)$$ for all $j\geq0$. Therefore,
$$\beta_j(M)=\dim_k\Tor_j^R(k,M)=\dim_k\Ext^s_S(\Ext^{j+t}_R(k,K(M)),S)=\mu^{j+t}(K(M))$$ for all $j\geq0$. The other equality follows from the fact $K(K(M))\simeq M$.
\end{proof}
Theorem \ref{foxbygeneralization} generalizes \cite[Corollary 3.6]{F} and improves \cite[Corollary 3.3]{FJP}. We record this in the next corollary.
\begin{corollary}\label{foxbyresult}
If $M$ is Cohen-Macaulay $R$-module of dimension $t$, then $$\beta_j(M)=\mu^{j+t}(K(M))$$ and $$\mu^j(M)=\beta_{j-t}(K(M))$$ for all $j\geq0$. In particular, $\pd_RM<\infty$ if and only if $\id_RK(M)<\infty$ and $\id_RM<\infty$ if and only if $\pd_RK(M)<\infty$.
\end{corollary}
\begin{proof}
If $t\geq2$, then the result follows from Theorem \ref{foxbygeneralization}. Otherwise, Corollary \ref{cmcanonicalmodule} and the spectral sequence argument given in the proof of Theorem \ref{foxbygeneralization} asserts the result.
\end{proof}
The next theorem is an attempt to extent part of Theorem \ref{foxbygeneralization} to arbitrary modules. In the next section, we work on the other part.
\begin{theorem}\label{foxbygeneralization2}
Let $M$ be a finite $R$-module of depth $g$ and dimension $t$. If $\pd_RK^i(M)<\infty$ for all $g\leq i<t$, then $$\mu^j(M)=\beta_{j-t}(K(M))$$ for all $j>\depth R+t$. In particular, $\id_RM<\infty$ if and only if $\pd_RK(M)<\infty$.
\end{theorem}
\begin{proof}
The spectral sequence \ref{eq:1} is such that $'E_2^{p,q}=0$ for all $p>\depth R$ and $g\leq q<t$, so that $$\Tor_j^R(k,K(M))={}'E_2^{j,s-t}\simeq\Ext^s_S(\Ext^{j+t}_R(k,M),S),$$ whence the result.
\end{proof}
We derive other consequences of Theorem \ref{mu<beta}. In particular, we say exactly when the type of a finite module is one in terms of its deficiency modules.
\begin{corollary}\label{typecaracterization}
Let $M$ be a finite $R$-module of depth $g$ and dimension $t$. The following statements hold.
\begin{itemize}
\item [(i)] If $M$ is Cohen-Macaulay of dimension $t$, then $$\mu^{t+2}(K(M))-\mu^{t+1}(K(M))\geq\beta_2(M)-\beta_1(M).$$ In particular, if $\pd_RM<\infty$ then $\beta_1(M)\geq\beta_2(M)$.
\item [(ii)] If $\id_RM<\infty$, then $$\beta_0(K^{g+1}(M))\geq\beta_2(K^g(M))-\beta_1(K^g(M)).$$ In particular, if $M$ is also Cohen-Macaulay, then $\beta_1(K(M))\geq\beta_2(K(M))$.
\item [(iii)] $\type(M)=1$ if and only if $K^g(M)$ is cyclic.
\end{itemize}
\end{corollary}
\begin{proof}
Item $(iii)$ follows directly from Theorem \ref{mu<beta}. Item $(i)$ follows from Corollary \ref{cmcanonicalmodule}, Theorem \ref{mu<beta} and Corollary \ref{foxbyresult}, and item $(ii)$ follows from \cite[Theorem 3.7]{BH}, corollaries \ref{cmcanonicalmodule} and \ref{foxbyresult} and item $(i)$.
\end{proof}
The spectral sequence \ref{eq:1} provides more information when the module involved has only two (possibly) non-zero deficiency modules.
\begin{proposition}\label{t=g+rfiniteid}
Let $M$ be a finite $R$-module of depth $g$ and dimension $t$. Suppose $K^i(M)=0$ for all $i\neq g,t$. If $\id_RM<\infty$ then $\beta_j(K^g(M))=\beta_{j+g-t-1}(K(M))$ for all $j>\depth R-g+1$.
\end{proposition}
\begin{proof}
Write $t=g+r$. The spectral sequence \ref{eq:1} has only two vertical lines as the following diagram shows
$$\xymatrix@=1em{
& & \vdots & \vdots & & \vdots & \vdots
\\
\cdots & 0 & \Tor_{r+1}^R(k,K(M)) & 0 & \cdots & 0 & \Tor_{r+1}^R(k,K^g(M))\ar[ddddllll] & 0 & \cdots
\\
& & \vdots & & \iddots & & \vdots
\\
\cdots & 0 & \Tor_2^R(k,K(M)) & 0 & \cdots & 0 & \Tor_2^R(k,K^g(M)) & 0 & \cdots
\\
\cdots & 0 & \Tor_1^R(k,K(M)) & 0 & \cdots & 0 & \Tor_1^R(k,K^g(M)) & 0 & \cdots
\\
\cdots & 0 & k\otimes_RK(M) & 0 & \cdots & 0 & k\otimes_RK^g(M) & 0 & \cdots
}$$
From convergence, we obtain an exact sequence
$$\xymatrix@=1em{
\Ext^s_S(\Ext^{j+g}_R(k,M),S)\ar[r] & \Tor_j^R(k,K^g(M))\ar[r] & \Tor_{p-r-1}^R(k,K(M))\ar[r] & \Ext^s_S(\Ext^{j+g-1}_R(k,M),S)}$$ for all $j\geq0$. Thus, since $\id_RM=\depth R$ (see \cite[Theorem 3.7.1]{BH}), we conclude that $$\Tor_j^R(k,K^g(M))\simeq\Tor_{j-r-1}^R(k,K(M))$$ for all $j>\depth R-g+1$, whence the result.
\end{proof}
Based on Corollary \ref{finiteid} and Proposition \ref{t=g+rfiniteid}, we finish this section by asking the following.
\begin{question}\label{question1}
Let $M$ be a finite $R$-module of depth $g$ and dimension $t$. Is it true that $$\id_RM<\infty\Leftrightarrow\pd_RK^i(M)<\infty,\forall i=g,...,t?$$
\end{question}
\section{Bounding Betti numbers}
In last section, we bounded the Bass numbers of a module in terms of the Betti numbers of the deficiency modules. In this section, we get a dual version of Theorem \ref{mu<beta} in the following sense.
\begin{theorem}\label{beta<mu}
For a finite $R$-module $M$ of depth $g$ and dimension $t$, the following inequality holds true for all $j\geq0$
$$\beta_j(M)\leq\sum_{i=g}^t\mu^{j+i}(K^i(M)).$$ Moreover, $\mu^0(K(M))=\beta_{-t}(M)$ and $$\beta_{-t+2}(M)-\beta_{-t+1}(M)\geq\mu^2(K(M))-\mu^1(K(M))-\mu^0(K^{t-1}(M)).$$
\end{theorem}
\begin{proof}
By taking a free $R$-resolution $F_\bullet$ of $k$ and an injective $S$-resolution $E^\bullet$ of $S$, the tensor-hom adjunction induces a first quadrant double complex isomorphism $$\Hom_S(F_\bullet,\Hom_S(M,E^\bullet))\simeq\Hom_S(F_\bullet\otimes_Rk,E^\bullet)$$ which yields two spectral sequences as follows
$$E_2^{p,q}=\Ext^p_R(k,\Ext^q_S(M,S))\Rightarrow_p H^{p+q}$$
and
$$'E_2^{p,q}=\Ext^p_S(\Tor_q^R(k,M),S)\Rightarrow_p H^{p+q}.$$
Since $\Tor^R_q(k,M)$ is of finite length for all $q\geq0$, due to local duality, we must have $'E_2^{p,q}=0$ for all $p\neq s$, so that
$$H^j\simeq{}'E_2^{s,j-s}=\Ext^s_S(\Tor_{j-s}^R(k,M),S)$$
for all $j\geq0$. Once $K^{s-q}(M)=\Ext^q_R(M,S)$ for all $q\geq0$, one has spectral sequence
\begin{equation}E_2^{p,q}=\Ext^p_R(k,K^{s-q}(M))\Rightarrow_p\Ext^s_S(\Tor^R_{p+q-s}(k,M),S).\label{eq:2}\end{equation}
By convergence, we conclude that
$$\beta_j(M)=\dim_k\Ext^ s_S(\Tor^R_{(j+s)-s}(k,M),S)\leq\sum_{p+q=j+s}\dim_k\Ext^p_R(k,K^{s-q}(M))=\sum_{i=g}^t\mu^{i+j}(K^i(M)).$$
Now, since $K^i(M)=0$ for all $i<g$ or $i>t$, then $E_2^{p,q}=0$ for all $q<s-t$ or $q>s-g$. In particular, $E_2$ has a corner as follows
$$\xymatrix@=1em{
\vdots & \vdots & \vdots
\\
\Hom_R(k,K^{t-1}(M))\ar[drr] & \Ext^1_R(k,K^{t-1}(M)) & \Ext^2_R(k,K^{t-1}(M)) & \cdots
\\
\Hom_R(k,K(M)) & \Ext^1_R(k,K(M)) & \Ext^2_R(k,K(M)) & \cdots
\\
0 & 0 & 0 & \cdots
\\
\vdots & \vdots & \vdots}$$
Therefore, there exists the isomorphism $$\Hom_R(k,K(M))=E_2^{0,s-t}\simeq\Ext^s_S(\Tor^R_{-t}(k,M),S)$$ and a five-term type exact sequence
$$\xymatrix@=1em{
0\ar[r] & \Ext^1_R(k,K(M))\ar[r] & \Ext^s_S(\Tor^R_{-t+1}(k,M),S)\ar[r] & \Hom_R(k,K^{t-1}(M))\ar[dl]
\\
& & \Ext^2_R(k,K(M))\ar[r] & \Ext^s_S(\Tor^R_{-t+2}(k,M),S)
}$$
whence the result.
\end{proof}
\begin{remark}
It should be noticed that the estimate $\beta_j(M)\leq\sum_{i=g}^t\mu^{j+i}(K^i(M))$ is already known, see \cite[Theorem 3.2]{S}.
\end{remark}
\begin{corollary}\label{t=0}
The following statements hold.
\begin{itemize}
\item [(i)] If $t=0$, then $\beta_0(M)=\mu^0(K(M))$ and $$\beta_2(M)-\beta_1(M)\geq\mu^2(K(M))-\mu^1(K(M)).$$ Otherwise, $\depth_RK(M)>0$;
\item [(ii)] If $t=1$, then $\beta_1(M)-\beta_0(M)\geq\mu^2(K(M))-\mu^1(K(M))-\mu^0(K^0(M))$;
\item [(iii)] If $t=2$, then $\beta_0(M)\geq\mu^2(K(M))-\mu^1(K(M))-\mu^0(K^1(M))$;
\item [(iv)] If $t>2$, then $\mu^0(K^{t-1}(M))\geq\mu^2(K(M))-\mu^1(K(M))$.
\end{itemize}
\end{corollary}
\begin{proof}
It follows directly from Theorem \ref{beta<mu}.
\end{proof}
\begin{corollary}\label{artinianlemma}
If $M$ is a finite Artinian $R$-module, then $$\beta_2(M)-\beta_1(M)=\mu^2(K(M))-\mu^1(K(M)).$$
\end{corollary}
\begin{proof}
By the corollaries \ref{typecaracterization} $(i)$ and \ref{t=0} $(i)$, $$\mu^2(K(M))-\mu^1(K(M))\geq\beta_2(M)-\beta_1(M)\geq\mu^2(K(M))-\mu^1(K(M)).$$
\end{proof}
\begin{lemma}\cite[Proposition 2.8.4]{H}\label{CIlemma}
Suppose $R$ is $d$-dimensional with embedding dimension $e$. Then $\beta_1(R/\mathfrak{m})=e$ and the following statements are equivalent.
\begin{itemize}
\item [(i)] $\beta_2(R/\mathfrak{m})=\binom{e}{2}+e-d$;
\item [(ii)] $R$ is a complete intersection.
\end{itemize}
\end{lemma}
\begin{corollary}\label{CIchar}
If $R$ is $d$-dimensional of embedding dimension $e$, then $$\mu^2(k)-\mu^1(k)=\binom{e}{2}-d$$ if and only if $R$ is a complete intersection.
\end{corollary}
\begin{proof}
It follows directly from Corollary \ref{artinianlemma} and Lemma \ref{CIlemma}.
\end{proof}
\begin{corollary}\label{finitepd}
Let $M$ be a finite $R$-module of depth $g$ and dimension $t$. If $\id_RK^i(M)<\infty$ for all $i=g,...,t$, then $\pd_RM<\infty$.
\end{corollary}
\begin{proof}
By hypothesis, we have $\mu^l(K^i(M))=0$ for all $l\gg0$ and by Theorem \ref{beta<mu} one has
$$\beta_j(M)\leq\sum_{i=g}^t\mu^{j+i}(K^i(M))=0$$ for all $j\gg0$, whence $\mu^j(M)=0$ for all $j\gg0$, that is, $\pd_RM<\infty$.
\end{proof}
The \emph{Auslander-Reiten conjecture} \cite{AR} states the following. Given a finite $R$-module $M$, if $$\Ext^j_R(M,M\oplus R)=0$$ for all $j>0$, then $M$ is free.
This long-standing conjecture has been largely studied and several positive answers are already known, see for instance \cite{A,AY,AB,DEL,FJP,HL,LM,NS}. Corollary \ref{finitepd} provides another positive answer for the Auslander-Reiten conjecture for a class of modules. But first, we need a lemma.
\begin{lemma}\cite[Lemma 1 (iii)]{M}\label{pdfinite}
Let $R$ be a local ring and let $M$ and $N$ be finite $R$-modules. If $\pd_RM<\infty$ and $N\neq0$, then
$$\pd_RM=\sup\{j:\Ext^j_R(M,N)\neq0\}.$$
\end{lemma}
\begin{theorem}\label{arconjtheorem}
Let $M$ be a finite $R$-module of depth $g$ and dimension $t$. $M$ is free provided the following statements hold.
\begin{itemize}
\item [(i)] $\id_RK^i(M)<\infty$ for all $i=g,...,t$;
\item [(ii)] There exists an $R$-module $N$ such that $\Ext^j_R(M,N)=0$ for all $j=1,...,d$.
\end{itemize}
\end{theorem}
\begin{proof}
It follows directly from Corollary \ref{finitepd} and Lemma \ref{pdfinite}.
\end{proof}
The next corollary proves the Auslander-Reiten conjecture for a certain class of modules. It generalizes the case of the conjecture obtained in \cite{FJP}.
\begin{corollary}\label{AR}
The Auslander-Reiten conjecture holds for finite modules having deficiency modules of finite injective dimension over local rings which are factors of Gorenstein local rings.
\end{corollary}
\begin{proof}
It follows immediately from Theorem \ref{arconjtheorem}.
\end{proof}
In the next theorem, such as Theorem \ref{foxbygeneralization2}, we furnish another attempt to remove the generalized Cohen-Macaulayness hypothesis from Theorem \ref{foxbygeneralization}.
\begin{theorem}\label{foxbygeneralization3}
Let $M$ be a finite $R$-module of depth $g$ and dimension $t$. If $\id_RK^i(M)<\infty$ for all $g\leq i<t$, then $$\beta_j(M)=\mu^{j+t}(K(M))$$ for all $j>s+\depth R-t-g$. In particular, $\pd_RM<\infty$ if and only if $\id_RK(M)<\infty$.
\end{theorem}
\begin{proof}
Consider the spectral sequence \ref{eq:2}
$$E_2^{p,q}=\Ext^p_R(k,K^{s-q}(M))\Rightarrow_p\Ext^s_S(\Tor^R_{p+q-s}(k,M),S).$$ The hypothesis and \cite[Theorem 3.7.1]{BH} assures that $E_2^{p,q}=0$ for all $p>\depth R$ and for all $s-t<q\geq s-g$. Therefore, the convergence of $E$ implies that $$\Ext^j_R(k,K(M))\simeq\Ext^s_S(\Tor^R_{j-t}(k,M),S)$$ for all $j>s-\depth R-g$, whence the result.
\end{proof}
The next proposition is an attempt to understand the converse of Corollary \ref{finitepd}.
\begin{proposition}\label{t=g+rfinitepd}
Assume $K^i(M)=0$ for all $i\neq g,t$. If $\pd_RM<\infty$, then $\mu^j(K^g(M))=\mu^{j-g+t+1}(K(M))$ for all $j>\pd_RM+1$.
\end{proposition}
\begin{proof}
The spectral sequence \ref{eq:2} has only two lines as follows
$$\xymatrix@=1em{
0 & 0 & \cdots & 0 & \cdots
\\
\vdots & \vdots & & \vdots
\\
\Hom_R(k,K^g(M))\ar[ddrrr] & \Ext^1_R(k,K^g(M)) & \cdots & \Ext^{p+r+1}_R(k,K^g(M)) & \cdots
\\
\vdots & \vdots & \ddots & \vdots
\\
\Hom_R(k,K(M)) & \Ext^1_R(k,K(M)) & \cdots & \Ext^{p+r+1}_R(k,K(M)) & \cdots
\\
0 & 0 & \cdots & 0 & \cdots
\\
\vdots & \vdots & & \vdots}$$
Such a shape and convergence yields an exact sequence
$$\xymatrix@=1em{
\Ext^s_R(\Tor^R_{j-g}(k,M),S)\ar[r] & \Ext^j_R(k,K^g(M))\ar[r] & \Ext^{j+r+1}_R(k,K(M))\ar[r] & \Ext^s_S(\Tor^R_{j-g+1}(k,M),S)}$$ for all $j\geq0$. Thus, if $j>\pd_RM+1$, then $$\Ext^j_R(k,K^g(M))\simeq\Ext^{j+r+1}_R(k,K(M))$$ and, in particular, $\mu^j(K^g(M))=\mu^{j+r+1}(K(M))$.
\end{proof}
Corollary \ref{finitepd} and Proposition \ref{t=g+rfinitepd} lead us to ask the following.
\begin{question}\label{question2}
Let $M$ be a finite $R$-module of depth $g$ and dimension $t$. Is it true that $$\pd_RM<\infty \Leftrightarrow \id_RK^i(M)<\infty, \forall i=g,...t?$$
\end{question}
\end{document} |
\begin{document}
\title{Conditions Implying Energy Equality for Weak Solutions of the Navier--Stokes Equations}
\author{Trevor M. Leslie and Roman Shvydkoy}
\email{[email protected]; [email protected]}
\address{Department of Mathematics, Statistics, and Computer Science \\851 S Morgan St, M/C 249 \\ University of Illinois at Chicago, Chicago, IL 60607}
\thanks{This work was partially supported by NSF grants DMS-1210896 and DMS-1515705. The authors thank V. {\v{S}}ver{\'a}k for valuable discussions and constant interest in the work. We also thank the anonymous referees for numerous helpful comments and suggestions.}
\subjclass[2010]{76S05,35Q35}
\maketitle
\begin{abstract}
When a Leray--Hopf weak solution to the NSE has a singularity set $S$ of dimension $d$ less than~$3$---for example, a suitable weak solution---we find a family of new $L^q L^p$ conditions that guarantee validity of the energy equality. Our conditions surpass the classical Lions--Lady{\v{z}}enskaja $L^4 L^4$ result in the case $d<1$. Additionally, we establish energy equality in certain cases of Type-I blowup. The results are also extended to the NSE with fractional power of the Laplacian below $1$.
\end{abstract}
\section{Introduction}
Consider the incompressible Navier--Stokes equations
\begin{equation}
\label{e:momentum}
\p_t u + u\cdot \n u - \nu\Delta u = - \n p
\end{equation}
\begin{equation}
\label{e:divfree}
\n \cdot u = 0
\end{equation}
where $u$ is the velocity field, $p$ is the scalar pressure, and $\nu$ is the viscosity. We restrict attention to the case of the open domain $\ensuremath{\mathbb{R}}^3$ for definiteness. The results below carry over ad verbatim to $\ensuremath{\mathbb{T}}^3$ and locally to the interior of a bounded domain as well.
By a classical result of Leray \cite{Leray}, it is known that for divergence-free initial data $u_0\in L^2$, there exists a weak solution to \eqref{e:momentum}--\eqref{e:divfree} up to a specified time $T$ such that $u\in L^2 H^1\cap L^\infty L^2$ and
\begin{equation}
\label{e:global_ei}
\int_{\ensuremath{\mathbb{R}}^3\times \{t\}} |u|^2\,dx
\le \int_{\ensuremath{\mathbb{R}}^3\times \{t_0\}} |u|^2\,dx -2\nu \int_{t_0}^t \int_{\ensuremath{\mathbb{R}}^3} |\n u |^2\,dx\,dt
\end{equation}
for all $t\in (0,T]$ and a.e. $t_0\in [0,t]$ including $t_0 = 0$.
Moreover, strong solutions to \eqref{e:momentum}--\eqref{e:divfree} satisfy the corresponding energy equality:
\begin{equation}
\label{e:global_ee}
\int_{\ensuremath{\mathbb{R}}^3\times \{t\}} |u|^2\,dx - \int_{\ensuremath{\mathbb{R}}^3\times \{0\}} |u|^2\,dx
= -2\nu \int_{t_0}^t \int_{\ensuremath{\mathbb{R}}^3} |\n u |^2\,dx\,dt.
\end{equation}
Since the introduction of Leray--Hopf solutions, it has been notoriously difficult to establish energy equality for all such solutions. The question, beyond purely mathematical interest, is motivated on physical grounds as well: Knowing \eqref{e:global_ee} rather than \eqref{e:global_ei} rules out the presence of anomalous energy dissipation due to the nonlinearity, a phenomenon normally associated with weak solutions of the inviscid Euler system in the framework of the so-called Onsager conjecture \cite{Onsager} (more on this below). This allows, as stipulated, for example, in the text of Frisch \cite{Frisch}, to precisely equate the classical Kolmogorov residual energy anomaly $\e_\nu \ra \e_0$ of a turbulent flow to the Onsager dissipation in the limit of vanishing viscosity.
Let us give a brief overview of what has been done so far in the direction of resolving the question of energy equality. Lions proved \cite{Lions} that \eqref{e:global_ee} holds for $u\in L^4 L^4$; techniques developed in the classical book of Lady{\v{z}}enskaja, Solonnikov, and Ural{$'$}ceva \cite{Ladyzhenskaya} reproduce this result. Later, Serrin \cite{Serrin} proved energy equality in space dimension $n$ under the condition $ \frac np + \frac2q \le 1$. Shinbrot \cite{Shinbrot} improved upon this result, proving equality when $\frac 2p + \frac2q \le 1$, $ p\ge 4$, independent of the dimension. Kukavica \cite{Kukavica} has proven equality under the assumption $p\in L^2 L^2$; this assumption is weaker than---but dimensionally equivalent to---Lions's result. A number of new conditions have appeared more recently after the introduction of critical conditions for the parallel question of energy conservation for the Euler system (cf. \cite{CET}, \cite{DR}, \cite{CCFS}). In \cite{CCFS}, energy equality is shown to hold for both the Euler and the Navier--Stokes systems for all solutions in the Besov-type regularity class
\def \reg {\mathcal{R}}
\begin{equation}\label{e:Besov}
\reg_0 = \left\{ u \in L^3_t L^3_x : \lim_{|y|\ra 0 } \frac{1}{|y|} \int_{\ensuremath{\mathbb{R}}^n\times[0,T]}|u(x+y,t) - u(x,t)|^3\, dx \, dt = 0 \right\}.
\end{equation}
Note that this class measures regularity $1/3$ in space, ``$L^3$-averaged" over space-time. In particular, the condition defining $\reg_0$ holds if $u\in L^3 B^{1/3}_{3,p}$ for some $p\in [1,\infty)$; the class includes spaces like $L^3W^{1/3, 3}$ and $L^3 H^{5/6}$. On a bounded domain, the energy equality is established in \cite{CFS} for the dimensionally equivalent class $L^3 D(A^{5/12})$, where $A$ is the Stokes operator; see also \cite{FT} for extension to exterior domains. Let us note that by interpolation with the enstrophy class $L^2 H^1$, any solution in $L^4 L^4$ lands in $L^3 B^{1/3}_{3,3} \ss \reg_0$. Thus, Lions's condition can be recovered from Onsager's.
It was not until after most of the results above had been proven that arguments establishing \eqref{e:global_ee} began to make use of the fact that the set of singular points of a weak solution may be confined to a lower-dimensional subset of time-space. This is of course the case for suitable weak solutions, according to the Caffarelli--Kohn--Nirenberg (CKN) theorem \cite{CKN}. In \cite{SS-pressure}, the authors examine the situation where $u$ is bounded in an energy class which is scaling invariant in space, and the energy equality is established by covering the singularity set in accordance with the CKN theorem. Presently, we can address these cases in a systematic way with the use of the class $\reg_0$. Indeed, any condition on the solution $u$ which is spatially both shift-invariant and scale-invariant implies that $u$ belongs to $L^\infty B^{-1}_{\infty,\infty}$, the largest such class by Cannone's theorem \cite{Cannone}. By interpolation with $L^2 H^1 = L^2 B^{1}_{2,2}$, we find again that $u \in L^3 B^{1/3}_{3,3} \ss \reg_0$, and \eqref{e:global_ee} follows (see Section~\ref{ss:typeI}). A cutoff procedure was also previously used in \cite{ShvydkoyGeometric} to establish energy equality; there it was assumed that the singularity was confined to a curve $s \in C^{1/2}([0,T]; \ensuremath{\mathbb{R}}^3)$ and additionally that $u\in L^3 L^{9/2}$, $\n u \in L^3 L^{9/5}((0,T)\times \ensuremath{\mathbb{R}}^3\backslash\mathrm{Graph}(s))_{\mathrm{loc}}$, the assumption dimensionally equivalent to the class $\reg_0$.
In this paper, we establish new sufficient conditions for energy equality which specifically exploit low dimensionality of the singularity set. We consider both classical and fractional dissipation cases. The results are sorted into two categories: the more special case where \eqref{e:global_ee} is established on a time interval of regularity until the first time of blowup, and the more general case of singularities spread over space-time. In the former case the results are stronger. Although it is more restrictive in terms of setup, it is also the case that is most relevant for the blowup problem. The conditions we find depend on the dimension $d<3$ of the singularity set, which is defined precisely below. The bifurcation value of $d$ occurs at $d=1$, or $d=5-4\g$ in the fractional case, where $\g$ is the power of the Laplacian. Recall that if $u$ is a suitable solution of the classical NSE, then by CKN we have $d\le 1$, so the low dimensionality comes as given in this case.
We state our main result in terms of suitable solutions to the classical Navier--Stokes equation, as it appears to be the most addressed case in the literature. However, this result is a special case of a much more general set of criteria depending on values of $d$ and $\g \leq 1$, which we will state in detail in the sections below. To illustrate our results, we make extensive use of diagrams, drawn in $(x = 1/p,\,y=1/q)$ coordinates. The striped regions in our figures correspond to new values of $p$ and $q$ for which the condition $u\in L^q L^p$ implies energy equality. A dotted boundary indicates that values on the boundary are not included, while a solid line indicates included values.
\begin{theorem} \label{t:main}
Suppose $u\in C_w([0,T];L^2) \cap L^2([0,T]; H^1)$ is a suitable weak solution on $[0,T]$, regular on $[0,T)$. Assume that $u \in L^q L^p$, where one of the following conditions holds (see Figure~\ref{fig:d1}):
\begin{align}
\frac{2}{p} + \frac{2}{q} & \le 1, \;\; 3 \leq q \le p \label{t:opt1} \\
\frac{2}{p} + \frac{2}{q} &< 1, \;\; 3 \leq p < q \label{t:opt2}\\
\frac{7}{p}-\frac{6}{p^2} + \frac{2}{q} & < 2, \;\; p<3.
\end{align}
Then $u$ satisfies \eqref{e:global_ee} on the interval $[0,T]$.
\end{theorem}
Theorem \ref{t:main} will be proven in Section~\ref{s:1-slice} as part of a more general result for dimensions $d<3$ on an interval of regularity. The results are summarized in Figures~\ref{fig:dzero}, \ref{fig:done}, \ref{fig:d1}, \ref{fig:d13}. We can also treat the situation where $u$ has one of the following Type-I blowups at $T$:
\begin{equation}\label{e:typeI}
\sup_x |u(x,t)| \leq \frac{C}{\sqrt{T-t}} \text{ or } \sup_{0<t<T} |u(x,t)| \leq \frac{C}{|x|}.
\end{equation}
We call these two scenarios ``Type-I in time'' blowup and ``Type-I in space'' blowup, respectively.
\begin{theorem}\label{t:typeI}
Suppose $u$ is a Leray--Hopf solution on $[0,T]$ which is regular on $[0,T)$. If $u$ experiences Type-I in space blowup as in \eqref{e:typeI}, then $u$ satisfies \eqref{e:global_ee} on $[0,T]$. If $u$ experiences Type-I in time blowup as in \eqref{e:typeI} and additionally $d<1$, where $d$ denotes the Hausdorff dimension of the singularity set at time $T$, then $u$ satisfies \eqref{e:global_ee} on $[0,T]$.
\end{theorem}
General singularity sets which are spread out in space-time will be addressed in Section~\ref{s:gen} for the classical NSE; the results are depicted in Figure~\ref{fig:0<d<1_gen} for $0<d<1$. At $d=1$, the new region collapses to the known classical diagram; see Figure~\ref{fig:d=1_gen}. We give extensions for the fractional dissipation case in Section~\ref{s:frac} and present similar figures for each significantly distinct range of values $d,\g$ pertaining to the time-slice singularity case. On the way, we prove a commutator estimate in Lemma~\ref{p:commut} which may be of independent interest.
\section{Setup}
As our first order of business, we make precise the notion of the regular and singular sets under consideration in our analysis. We follow the setup of \cite{Shvydsing}. First, we define two regularity classes of vector fields, reminiscent of \eqref{e:Besov}. For a subinterval $I\subset [0,T]$, we denote
\begin{equation}
\reg(\ensuremath{\mathbb{R}}^n \times I)
= \left\{ u \in L^3_t L^3_x : \lim_{|y|\ra 0 } \frac{1}{|y|} \int_{\ensuremath{\mathbb{R}}^n\times I}|u(x+y,t) - u(x,t)|^3 \, dx \, dt = 0 \right\}.
\end{equation}
We also define a local version of this class, denoting by $\reg(U\times I)$ the class of vector fields $u$ such that $u\phi\in \reg(\ensuremath{\mathbb{R}}^n\times I)$ for all $\phi\in C_0^\infty(U)$, where $U$ is any open set in $\ensuremath{\mathbb{R}}^n$.
\begin{definition}
Let $u$ be a Leray--Hopf weak solution to the classical Navier--Stokes equations on $\ensuremath{\mathbb{R}}^3\times [0,T]$. We say that a point $(x_0,t_0)$ is an \emph{Onsager regular point} if $u\in \reg(U\times I)$ for some open set $U\subset \ensuremath{\mathbb{R}}^3$ and relatively open interval $I\subset [0,T]$ such that $(x_0, t_0)\in U\times I$. We say that $(x_0, t_0)$ is an \emph{Onsager singular point} if it is not a regular point; further, we denote the (closed) set of all Onsager singular points by $\ensuremath{\mathbb{S}}igma_{ons}$ and refer to it as the \emph{Onsager singular set}. The complement of $\ensuremath{\mathbb{S}}igma_{ons}$ in $\ensuremath{\mathbb{R}}^3\times[0,T]$ is called the \emph{Onsager regular set} of $u$.
\end{definition}
\begin{REMARK}
The CKN theorem implicates a different type of singularity set which we denote $\ensuremath{\mathbb{S}}igma_{CKN}$. This set can be defined as the complement in $\ensuremath{\mathbb{R}}^3\times [0,T]$ of
\begin{equation}
\reg_{CKN} = \{(x_0,t_0)\in \ensuremath{\mathbb{R}}^3\times [0,T] : \exists \text{ nbd. } D\subset \ensuremath{\mathbb{R}}^3\times [0,T] \text{ of } (x_0,t_0) \text{ s.t. } u\in L^\infty(D) \},
\end{equation}
i.e., $\ensuremath{\mathbb{S}}igma_{CKN} = (\ensuremath{\mathbb{R}}^3\times [0,T])\backslash\reg_{CKN}$. Clearly $\ensuremath{\mathbb{S}}igma_{ons}\subset \ensuremath{\mathbb{S}}igma_{CKN}$ so that in particular the bounds on the size of $\ensuremath{\mathbb{S}}igma_{CKN}$ from the CKN theorem apply a fortiori to $\ensuremath{\mathbb{S}}igma_{ons}$.
\end{REMARK}
Our next item is to introduce a local energy equality which is fundamental to our work. Suppose $(u,p)$ is a Leray--Hopf weak solution to the Navier--Stokes system on $\dom \times [0,T]$, and consider the following local energy equality for $0\le s<t\le T$:
\begin{multline}\label{localee}
\int_{\dom} |u(t)|^2 \phi - \int_{\dom} |u(s)|^2 \phi - \int_{\dom \times (s,t)} |u|^2 \p_t \phi \\= \int_{\dom \times (s,t)} |u|^2 u \cdot \n \phi + 2 \int_{\dom \times (s,t)} p\ u \cdot \n \phi - 2\nu \int_{\dom \times (s,t)} |\n u|^2 \phi - 2 \nu \int_{\dom \times (s,t)} u \otimes \n \phi : \n u.
\end{multline}
The main idea of the present work is to construct a sequence of test functions which satisfy this equality and to show that when we pass to the limit, the local energy equality reduces to \eqref{e:global_ee}. It is shown in \cite{Shvydsing} that \eqref{localee} is valid for all $\phi\in C_0^\infty((\ensuremath{\mathbb{R}}^3\times [0,T])\backslash \ensuremath{\mathbb{S}}igma_{ons})$ in the case of the Euler equations ($\nu = 0$). Straightforward modifications of the proof in \cite{Shvydsing} show that \eqref{localee} is also valid when $\nu>0$. In fact, an approximation argument shows that \eqref{localee} remains valid for functions $\phi$ (supported outside $\ensuremath{\mathbb{S}}igma_{ons}$, as before) which belong only to $W^{1,\infty}$ rather than $C^\infty$.
Recall that Leray--Hopf solutions satisfy $u(t)\to u(0)$ strongly in $L^2(\ensuremath{\mathbb{R}}^3)$ as $t\to 0^+$. Therefore, in order to establish \eqref{e:global_ee}, it suffices to prove energy balance on the time interval $[s,T]$ for each $s\in (0,T)$; the (Onsager) singularity set at the initial time is irrelevant for our analysis. Therefore, we introduce the following singularity set, which we call the \emph{postinitial singularity set} $S$ (or simply the \emph{singularity set} when it will cause no confusion), defined by
\[
S = \ensuremath{\mathbb{S}}igma_{ons}\backslash(\ensuremath{\mathbb{R}}^3\times \{0\}).
\]
Working with $S$ rather than all of $\ensuremath{\mathbb{S}}igma_{ons}$ allows us to obtain better conditions guaranteeing energy balance for solutions which have arbitrary divergence free initial condition $u_0\in L^2$ (but which have small postinitial singularity sets). A priori, this replacement requires us to assume $s>0$ rather than $s\ge 0$ in \eqref{localee}. However, as pointed out above, we may extend to $s=0$ by continuity, so that we may consider $S$ instead of $\ensuremath{\mathbb{S}}igma_{ons}$ at no real cost. We will make the standing assumption that the Lebesgue measure $|S|$ of $S$ in $\ensuremath{\mathbb{R}}^3\times [0,T]$ is equal to zero.
Let us label each of the terms in \eqref{localee} (in the same order as before) and rewrite the equation as
\begin{equation}
A - B -C = D +2 P - 2\nu E - 2 \nu F.
\end{equation}
Having established the above considerations and notation, we can now describe the main idea more clearly and succinctly. Given a Leray--Hopf solution $u$ and its (postinitial) singularity set $S$, we seek a sequence $\{\phi_\d\}_{\d>0}$ of test functions such that
\begin{itemize}
\item $\supp \phi_\d \subset (\ensuremath{\mathbb{R}}^3\times[0,T])\backslash S$ and $\phi_\d\in W^{1,\infty}(\ensuremath{\mathbb{R}}^3\times [0,T])$ (so \eqref{localee} is valid for all $0<s<t \le T$);
\item $0\le \phi_\d\le 1$ and $\phi_\d\to 1$ pointwise a.e. as $\d\to 0$ (which is possible since $|S|=0$), guaranteeing the convergence of the terms $A$, $B$, and $E$ to their natural limits
\[
\int_{\dom} |u(t)|^2, \int_{\dom} |u(s)|^2, \int_{\dom \times (s,t)} |\n u|^2,
\]
respectively. These convergences follow from the fact that $u\in L^\infty L^2\cap L^2 H^1$, together with the dominated convergence theorem.
\end{itemize}
When $A$, $B$, and $E$ tend to their natural limits, we see that in order to establish energy balance on $[s,T]$, it suffices to prove that the other terms $C$, $D+2P$, and $F$ vanish as $\d\to 0$. In order to ensure this, we make integrability assumptions on the solution $u$, i.e., $u\in L^q([0,T],L^p(\ensuremath{\mathbb{R}}^3))$ for some pair $(p,q)$ of integrability exponents. The set of admissible values for $p$ and $q$, which will make the terms $C$, $D+2P$ and $F$ vanish, depend on the integrability properties of the functions $\phi_\d$, which in turn depend on the size and structure of $S$. Therefore, we continue our discussion in the sections below, where we restrict attention to certain kinds of singularity sets $S$. Note that in the discussion below, we generally suppress the notation $\d$ from the subscript of our sequence of test functions.
\section{Energy equality at the first time of blowup}\label{s:1-slice}
The case addressed in this section pertains to the situation when singularity $S$ occurs only at the critical time $T$. For notational convenience, we will replace the interval $[0,T]$ with $[-1,0]$, $0$ being critical, and thus assume that $S\subset \ensuremath{\mathbb{R}}^3\times \{0\}$.
\subsection{Construction of the test function}
We assume that $S$ has Hausdorff dimension $d<3$. (Recall that if $(u,p)$ is a suitable solution, then by CKN, we have $d\leq 1$.) For convenience, we will identify $S$ with its spatial slice at time $t=0$. We denote by $\cH_d(S)$ the $d$-dimensional Hausdorff measure of $S$ and assume that $\cH_d(S)<\infty$. In what follows below, we take advantage of the fact that $S$ belongs only to the time-slice at $t=0$ and that we can therefore cover $S$ with cylinders scaled arbitrarily in time. Specifically, let us denote by $B_r(x)$ the open ball $\{y\in \ensuremath{\mathbb{R}}^3: |y-x| < r\}$. Choose $\d\in (0,1)$, then choose finitely many $x_i \in \ensuremath{\mathbb{R}}^3$, $r_i\in (0,\d)$ for all $i$, such that $S\subset \bigcup_i B_{r_i}(x_i)$ and $\sum_{i=1}^\infty r^d_i \le \cH_d(S)+1$. Denote $I_i = (-2r_i^{\a},\,2r_i^{\a})$ (where $\a$ is determined below); let $Q_i$ denote the cylinder $Q_i = B_{r_i}(x_i)\times (-r_i^{\a},\,r_i^{\a})$, and put $Q = \bigcup_i Q_i$, $I = \bigcup_i I_i$. Let $\psi(s)$ be the usual (symmetric, radially decreasing) cutoff function on the line with $\psi(s) = 1$ on $|s|<1.1$ and $\psi(s)$ vanishing on $|s|>1.9$. Let $\phi_i(x,t) = \psi(|x-x_i|/r_i) \psi(t/r_i^{\a})$. Define $\phi = 1 - \sup_i \phi_i$. Clearly, $\phi$ vanishes on an open neighborhood of $Q$, while any partial derivative $\p \phi$ is supported within the union of the double-dilated cylinders, which we denote by $Q^*$. Note that the Lebesgue measure of the sequence of $Q^*$'s vanishes as $\d \ra 0$; the same is true of the measure of the sequence of $I$'s. Also note that $\phi$ is differentiable a.e.\ and
\[
|\p \phi(x,t) | \leq \sup_i |\p \phi_i(x,t)| \, \text{ a.e.; see \cite[Theorem 4.13]{Evans}}.
\]
Therefore, for any $a>0$, we have the following bounds, which hold for a.e. $t$:
\begin{subequations} \label{e:phi}
\begin{align}
\int_{\dom} |\p_t \phi(x,t)|^a\,dx & \leq \sum_i \int_{\dom} |\p_t \phi_i (x,t)|^a\,dx \leq \sum_i r_i^{-\a a + 3}\chi_{I_i}(t) \label{e:phit} \\
\int_{\dom} |\n_x \phi(x,t)|^a\,dx &\leq \sum_i \int_{\dom} |\n_x \phi_i(x,t)|^a\,dx \leq \sum_i r_i^{-a+3}\chi_{I_i}(t). \label{e:phix}
\end{align}
\end{subequations}
\subsection{Type-I singularities}\label{ss:typeI}
Generally we say that a solution $u$ of the classical Navier--Stokes equations experiences a Type-I blowup at time $0$ if it stays bounded in some scale-invariant functional space:
\[
\| u\|_{X([-1,0]; Y)} \leq C.
\]
Examples include those stated in \eqref{e:typeI}. It also occurs naturally in the case of a self-similar blowup with critical decay of the profile at infinity,
\[
u(x,t) = |t|^{-1/2} U(x / |t|^{1/2}),\quad |U(y)| \leq C / |y|, \text{ as } |y| \to \infty.
\]
In this case, $u$ clearly belongs to the Lions space $L^4 L^4$ and therefore satisfies the energy equality. A more subtle situation occurs in the case of Type-I in space only or Type-I in time only blowup, which is addressed in our \thm{t:typeI}. By Type-I in space, we mean a weak solution on a time interval $[-1,0]$ with the bound given by the second inequality in \eqref{e:typeI} (technically, in this case, multiple blowups are possible on the interval).
Now, any solution $u$ on the time interval $[-1,0]$ which experiences Type-I in space blowup belongs to the class $L^\infty(-1,0,L^{3,\infty}(\ensuremath{\mathbb{R}}^3))$. It can be seen in (at least) two different ways that solutions in this class necessarily satisfy the energy balance relation. First, we see that $L^2 L^6 \cap L^\infty L^{3,\infty}\subset L^4 L^4$, simply by interpolation, so that the Lions criterion can be used. Alternatively, we can apply Cannone's theorem \cite{Cannone} to the space $L^{3,\infty}$, which is invariant with respect to both shifts $f\mapsto f(\cdot - x_0)$ and rescalings of the form $f \mapsto \l f(\l \cdot)$, allowing us to conclude that $L^{3,\infty}$ embeds in the largest space with these properties, namely, $B^{-1}_{\infty, \infty}$. By interpolation with $L^2 H^1 = L^2 B^{1}_{2,2}$, we naturally find $u \in L^3 B^{1/3}_{3,3} \ss \reg_0$, which implies energy equality as mentioned in the introduction. This settles the first part of \thm{t:typeI}.
By Type-I in time, we mean a regular solution $u$ on time interval $[-1,0)$ that experiences blowup at time $t=0$ and satisfies the first inequality in \eqref{e:typeI}. If $u$ is a Type-I in time solution, then $u\in L^r L^\infty$ for any $r<2$. If additionally we have that $0\le d<1$, then we can choose $r<2$ large enough so that the pair $(p,q) = (\infty, r)$ satisfies \eqref{opt1} below. We will see that this is a sufficient condition to guarantee \eqref{e:global_ee}. This resolves the second claim in \thm{t:typeI}.
\subsection{Vanishing of the terms $C,D,P,F$ in the one-time singularity case.} We now turn to the proof of \thm{t:main}, which encompasses the next two subsections. Actually, we will address the time-slice singularity case whenever $S$ has Hausdorff dimension $d<3$, giving a range of $L^q L^p$ conditions for which energy equality holds. Of course, the case $d=1$ is the one which is relevant for purposes of Theorem \ref{t:main}.
The outline of our argument is as follows: We will start with basic estimates on the terms $C$, $D$, $P$, and $F$, the terms in the local energy equality that depend on derivatives of $\phi$ (and hence are singular). In this subsection, we give conditions on $p$, $q$, $d$, and $\a$ that guarantee that each of the terms $C$, $D+2P$, and $F$ vanish as $\d\to 0$; as argued above, energy balance is achieved when all of these vanish concurrently. We treat $d$ as fixed; therefore, for each value of $\a>0$, we get a different collection of pairs $(p,q)$ for which $u\in L^q L^p$ implies energy balance. In the following subsection, we take the union over $\a$ of all such regions to obtain all possible pairs $(p,q)$ for which our method is valid. However, in order to record our results as explicitly as possible, we frame the process of taking this union as an optimization problem; see below. Once this optimization problem has been solved, there is nothing more to prove, and we conclude our discussion of the one-time singularity at that point.
Let us bound term $C$ first. By H\"older's inequality, we have that for all $p,q \geq 2$,
\begin{equation}\label{e:C}
\begin{split}
|C|& \leq \|u\|_{L^q(I;L^p)}^2 \left( \int_{-1}^0 \left( \int_{\dom} |\p_t \phi(x,t)|^{\frac{p}{p-2}}\,dx \right)^{\frac{p-2}{p}\frac{q}{q-2}} dt \right)^{\frac{q-2}{q}} \\
& \leq \|u\|_{L^q(I;L^p)}^2 \left( \int_{-1}^0 \left( \sum_i r_i^{- \frac{\a p}{p-2} + 3}\chi_{I_i}(t) \right)^{\frac{p-2}{p}\frac{q}{q-2}} dt \right)^{\frac{q-2}{q}}.
\end{split}
\end{equation}
Note that if $q<\infty$, then we have $\|u\|_{L^q(I,L^p)} \ra 0$ since $|I|\to 0$ as $\d\to 0$. So in the case $q<\infty$, in order to conclude that $C\to 0$, it suffices to prove that the term in parentheses is bounded as $\d\to 0$; the latter need not vanish. Vanishing of this term (as well as $D+2P$ and $F$; see below) for certain pairs $(p,\infty)$ will follow by interpolation.
The viscous term $F$ is bounded by
\begin{equation}\label{}
|F| \leq \int_{Q^*} |u|^2 |\n \phi|^2\,dx\,dt + \int_{Q^*} |\n u|^2\,dx\,dt.
\end{equation}
Clearly, the second integral on the right vanishes as $\d \ra 0$. For the first integral, we have a bound similar to $C$:
\begin{equation}\label{e:F}
\int_{Q^*} |u|^2 |\n \phi|^2\,dx\,dt \leq \|u\|_{L^q(I;L^p)}^2 \left( \int_{-1}^0 \left( \sum_i r_i^{-\frac{2p}{p-2} + 3} \chi_{I_i}(t) \right)^{\frac{p-2}{p} \frac{q}{q-2}}\,dt \right)^{\frac{q-2}{q}}.
\end{equation}
Before we proceed with estimates for $D$ and $P$, let us produce conditions on $p$ and $q$ that guarantee vanishing of the right-hand sides of \eqref{e:C} and \eqref{e:F}. The following lemma will assist us.
\begin{LEMMA}\label{l:conv} Let $d$, $\d$, $r_i$, $I_i$ be as above, and let $\s,s$ be positive numbers. Suppose the sum $H = \sum_i r_i^d$ is finite. Then the inequality
\begin{equation}
\label{lemma_ineq}
\int \left( \sum_i r_i^{-\s}\chi_{I_i}(t) \right)^s\,dt \lesssim H^s
\end{equation}
holds whenever $s\ge 1$ and $s(\s + d)\le \a$ or $s<1$ and $s(\s + d)< \a$; the implied constant is independent of $\d$. When $d = 0$, the above holds (trivially) under the nonstrict assumption $s\s \leq \a$.
\end{LEMMA}
\begin{proof}
Case 1. $s\ge 1$. By H\"older's inequality, we have
\begin{equation}\label{}
\begin{split}
\left( \sum_i r_i^{-\s}\chi_{I_i}(t) \right)^s
= \left( \sum_i r^d_i r_i^{-\s-d}\chi_{I_i}(t) \right)^s
& \leq \left( \sum_i r_i^{d} \right)^{s-1} \sum_i r_i^{d-(\s+d)s}\chi_{I_i}(t) \\
& = H^{s-1} \sum_i r_i^{d-(\s+d)s}\chi_{I_i}(t) .
\end{split}
\end{equation}
Integrating in time, we obtain
\[
\int \left( \sum_i r_i^{-\s}\chi_{I_i}(t) \right)^s\,dt \lesssim H^{s-1} \sum_i r_i^{d-(\s+d)s+\a}.
\]
The sum is at most $H$ whenever the condition stated in the lemma is satisfied.
Case 2. $s<1$. For each $j\in \ensuremath{\mathbb{Z}}$, define $R_j:=\{r_i: r_i \in [2^{-j},\,2^{-j+1})\}$, and let $N_j$ denote the cardinality of $R_j$. Clearly, $N_j\lesssim 2^{jd} H$ and $N_j=0$ for $j\le 0$. Also denote $J_j = [-2^{(-j+1)\a},\,2^{(-j+1)\a}]$. So if $r_i \in R_j$, then $r_i^{-\s}\chi_{I_i}(t)\le 2^{j\s}\chi_{J_j}(t)$. Therefore,
\[
\begin{split}
\int \left( \sum_i r_i^{-\s}\chi_{I_i}(t) \right)^s\,dt & \le \int \left( \sum_j N_j 2^{j\s}\chi_{J_j}(t)\right)^s \,dt
\lesssim H^s \int \left( \sum_j 2^{j(\s+d)}\chi_{J_j}(t)\right)^s \,dt \\
& \leq H^s \int \sum_{j=1}^\infty 2^{j(\s+d)s}\chi_{J_j}(t)\,dt \lesssim H^s \sum_{j=1}^\infty 2^{j((\s+d)s - \a)}.
\end{split}
\]
The final sum converges to an adimensional number by the assumption of the lemma.
\end{proof}
With \lem{l:conv} in hand, we continue our discussion of the terms $C$ and $F$ for various values of $p,q \geq 2$ and arbitrary $\a>0$. In order to obtain the desired conditions which guarantee vanishing of these terms, it suffices to translate between the quantities $s, \s$ in the lemma and the integrability exponents $p$ and $q$ at hand.
First, note that $p<q \iff \frac{p-2}{p}\frac{q}{q-2}<1$ and $p\geq q \iff \frac{p-2}{p}\frac{q}{q-2}\geq 1$. Applying \lem{l:conv} with $\s = \frac{\a p}{p-2} - 3$ and $s = \frac{p-2}{p}\frac{q}{q-2}$, we see that $C$ vanishes whenever
\begin{equation}
\label{C}
\frac{3-d}{p} + \frac{\a}{q}\le \frac{3-d}{2},\; p\ge q \geq 2;\quad
\frac{3-d}{p} + \frac{\a}{q}< \frac{3-d}{2},\; 2\leq p< q.
\end{equation}
Reasoning similarly, we have $F\to 0$ whenever
\begin{equation}
\label{F}
\frac{3-d}{p} + \frac{\a}{q}\le \frac{3-d + \a-2}{2},\; p\ge q \geq 2;\quad
\frac{3-d}{p} + \frac{\a}{q} < \frac{3-d + \a-2}{2},\;2 \leq p< q.
\end{equation}
In both cases, the conditions are nonstrict if $d=0$.
We now turn our attention to the terms $D$ and $P$. The estimates we use to bound these terms depend on whether $p\ge 3$ or $p<3$; we consider each case in turn. First, suppose $p,q\in [3,\infty)$. Using H\"older's inequality together with the bound $\|up\|_{L^{q/3}(I;L^{p/3})}\lesssim \|u\|_{L^q(I;L^p)}^3$, we have the following bound:
\begin{equation}
\label{}
|D+2P| \lesssim \|u\|_{L^qL^p}^3 \left( \int_{-1}^0 \left( \sum_i
r_i^{-\frac{p}{p-3} +3} \chi_{I_i}(t) \right)^{\frac{p-3}{p} \frac{q}{q-3}} dt \right)^{\frac{q-3}{q}}.
\end{equation}
Arguing as before with the use of Lemma~\ref{l:conv}, we see that $|D+2P|\to 0$ whenever
\begin{equation}
\label{DP}
\frac{3-d}{p} + \frac{\a}{q}\le \frac{2 + \a - d}{3},\; 3\le q\le p<\infty;\quad
\frac{3-d}{p} + \frac{\a}{q}< \frac{2 + \a - d}{3},\; 3\leq p< q<\infty,
\end{equation}
with the nonstrict inequality in both cases if $d=0$.
In the case $p<3$, we can no longer use H\"older's inequality alone to bound the term $D+2P$. Instead, we will use interpolation with the enstrophy norm. When $p<3$, we have
\begin{equation}
\label{e:DPpl3}
|D+2P|\lesssim \|u\|_{L^2 H^1}^{3\b} \|u\|_{L^q L^p}^{3(1-\b)}\|\n \phi\|_{L^\s L^\infty},
\end{equation}
where
\begin{equation}
\label{e:defbeta}
\frac13 = \frac{\b}{6} + \frac{1-\b}{p}\implies
\b = \frac{6-2p}{6-p};
\quad \frac1\s = 1 - \frac{3\b}{2} - \frac{3(1-\b)}{q} = \frac{2pq - 3p - 3q}{(6-p)q}.
\end{equation}
Now
\[
\|\n \phi\|_{L^\s L^\infty}^\s = \int \sup_i r_i^{-\s} \chi_{I_i}(t)\,dt \le \int \sup_j 2^{j\s} \chi_{J_j}(t)\,dt \lesssim \sum_j (2^{\a-\s})^{-j},
\]
and the sum on the right is bounded whenever $\s < \a$. Substituting in for $\s$ and simplifying, we obtain
\begin{equation}
\label{DPenstr}
\frac{2 + \a}{p} + \frac{\a}{q} < \frac{1 + 2\a}{3}, \quad p<3 \leq q.
\end{equation}
\subsection{Optimization and the main result} Let us now discuss the optimal values of $\a$, beginning with the case $p\ge 3$. Here we have the three constraints \eqref{C}, \eqref{F}, and \eqref{DP}, representing a triple of parallel lines. The $C$-line pivots around the energy space $L^\infty L^2$ and rotates counterclockwise (toward a more stringent condition) as $\a$ increases. The $DP$-line pivots around $L^3 L^\frac{9-3d}{2-d}$ and also rotates counterclockwise (but toward a more relaxed condition) as $\a$ increases. The $F$-line pivots around $L^2 L^\frac{6-2d}{1-d}$ counterclockwise, relaxing as $\a$ increases. (Note that some exponents can become negative for larger $d$'s; however, the region beyond $p,q=\infty$ can be disregarded at this moment.) Therefore, the conditions become optimal when the two lower lines coincide. Simple linear algebra shows that for $d<1$, the $C$- and $DP$-lines are lower; for $d>1$, the $C$- and $F$-lines are lower; and at $d=1$, all three lines coincide at their optimal tilt. So in the case $d\leq 1$, we set the $C$- and $DP$-lines equal to one another and find that $\a = \frac{5-d}{2}$. (Clearly, $\a \geq 2$ in this case, and so the condition on $F$ is more relaxed than the one on $C$.) When $d >1$, we set the $C$- and $F$-lines equal to one another and recognize $\a = 2$ as being optimal. We thus obtain the following conditions, which guarantee energy equality:
\begin{align}
\frac{2(3-d)}{p} + \frac{5-d}{q} & \le 3-d, \;\; 3 \le p,\;q\le p, \; d \leq 1 \label{opt1} \\
\frac{2(3-d)}{p} + \frac{5-d}{q} &< 3-d, \;\; 3 \leq p < q,\; d \leq 1 \label{opt2}\\
\frac{3-d}{p} + \frac{2}{q} &< \frac{3-d}{2}, \;\; 3 \leq p < q,\; 1<d < 3. \label{opt3}
\end{align}
Before proceeding, we make a few remarks concerning these conditions. First, we note that even though \eqref{DP} is valid only inside the square where $p\in [3,\infty)$ and $q\in [3,\infty)$, we can interpolate in order to include certain pairs $(p,q)$ outside of this region in \eqref{opt1}--\eqref{opt3}. Second, in the case $d>1$, the optimal line drops below the Lions space $L^4 L^4$ in such a way that only the case $p<q$ yields new results; this line intersects the segment $[L^4 L^4, L^\infty L^3]$ at the space $L^\frac{6+2d}{3-d} L^\frac{6+2d}{1+d}$. Third, the inequality \eqref{opt2} once again becomes nonstrict in the case $d=0$. And finally, for the values $0\leq d \leq 1$, the point on the bisectrice separating the open and closed regions is $L^{\frac{11-3d}{3-d}} L^{\frac{11-3d}{3-d}}$. When $d=1$, it becomes the classical Lions space $L^4 L^4$.
Let us now address the case $p<3$. In this case, we use \eqref{DPenstr} to replace \eqref{DP} in the previous argument, while \eqref{C} and \eqref{F} are understood under the lighter restriction $p,q \geq 2$. Also, note that the region under consideration now lies only in the cone $q>p$. The new $DP$-line pivots around the enstrophy point $L^2 L^6$ counterclockwise as $\a$ increases. For $d\leq 1$, a non-trivial new region appears as $\a$ increases beyond $\a = \frac{5-d}{2}$. The $F$-line is less restrictive than $C$-line, so we can disregard it. At $\a = \frac{5-d}{2}$, the $C$- and $DP$-lines intersect at $L^{\frac{15 - 3d}{3-d}}L^3$. The point of intersection reaches its final state at the energy space $L^\infty L^2$ when $\a = 4$. In the process, it traverses the curve given by
\begin{equation}
\label{xy_curve}
(18-6d)x^2 + (6-6d)xy - (21-7d)x - (7 - 3d)y + 6-2d = 0
\end{equation}
in coordinates $x = p^{-1}$ and $y = q^{-1}$. Notice that the curve in fact contains both $L^2 L^6$ and $L^\infty L^2$ for all values of $d$, as we expect it to. (Indeed, these points are the two axes of rotation for our lines.) However, since we are restricted to the case when $p<3$, the part of the curve that we can use is limited to that connecting $L^{\frac{15 - 3d}{3-d}}L^3$ and $L^\infty L^2$. The curve is a part of a hyperbola, as can be seen from the negative Hessian. (The exception is when $d=1$, in which case the curve is a parabola.)
For $d >1$, the two lines $C$ and $F$ coincide when $\a = 2$; at this value of $\a$, the new $DP$-line cuts through the $C$-line at space $L^\frac{6+6d}{3-d} L^\frac{6+6d}{1+3d}$, which is already inside the strip $p<3$. It does not make sense to decrease $\a$ since doing so would move the $F$- and $DP$-lines clockwise inside the already discovered region. Increasing $\a$ above $2$ makes the $F$-line more relaxed, and the intersection point of $C$- and $DP$-lines falls on the same curve \eqref{xy_curve}. This time, however, the curve begins farther to the right at the space $L^\frac{6+6d}{3-d} L^\frac{6+6d}{1+3d}$ and ends at $L^\infty L^2$, corresponding to the fixed range $2\leq \a \leq 4$.
Finally, recall that in all the arguments above, we have assumed $q<\infty$ in order to ensure that the vanishing of the terms comes from the norm $L^q(I; L^p(\dom))$ and not from the Hausdorff measure of $S$. We also assumed $p<\infty$ in order ensure boundedness of the Riesz transforms on $L^p$. (This was necessary in order to bound the pressure term directly.) However, the cases $p = \infty$ and $q = \infty$ follow automatically by interpolation with the Leray--Hopf line, which lands the solution strictly inside the quadrant $q,p<\infty$.
Figures \ref{fig:dzero}-\ref{fig:d13} illustrate the new regions uncovered in each case.
\begin{figure}
\caption{$d=0$.}
\label{fig:dzero}
\caption{$0<d<1$.}
\label{fig:done}
\end{figure}
\begin{figure}
\caption{$d=1$.}
\label{fig:d1}
\caption{$1<d < 3$.}
\label{fig:d13}
\end{figure}
\section{General singularities}\label{s:gen}
Even if the energy equality is known on each time interval of regularity including at the critical time, it is unknown whether energy equality holds globally on the time interval of existence of the weak solution. This is due to lack of a proper gluing procedure that could restore energy equality from pieces. In this section, we therefore address the question when singularity set $S$ is spread in space-time. In this case, we have no freedom in choosing the time scale of the covering cylinders; rather, the scale should already be built into the definition of the Hausdorff dimension. We choose to work with the classical parabolic dimension, i.e., $\a=2$ in our terms.
The main technical difference of this general case compared to the case of a one-time singularity is that when $s < 1$, the conclusion of Lemma \ref{l:conv} may not be valid. Instead, we can only prove that the left side of \eqref{lemma_ineq} is bounded above by $H$ (multiplied by some constant which is independent of $\d$) under the stronger assumption $\s s + d\le \a =2$. This is achieved simply by bringing the exponent $s$ inside the sum. However, the condition $\s s + d \le \a = 2$ is the sharpest possible under which the conclusion of the lemma holds, as one can see by considering an example of the opposite extreme, where all the intervals $I_i$ are disjoint. However, the proof of the lemma in the case $s\ge 1$ does not depend on the intervals $I_i$ being nested; the proof and conclusion remain valid in this case.
Assume then that $S$ has finite $d$-dimensional parabolic Hausdorff measure for some $d\in[0, 1]$ but no other special properties. (Our method does not yield anything new for $d>1$, so we do not treat these values of $d$.) Let $B_r(x)$, $\d$ be as above; then choose finitely many $(x_i, t_i)\in \ensuremath{\mathbb{R}}^3 \times (0,T]$ and $r_i\in (0,\d)$ such that $S\subset Q:=\bigcup_i Q_i$, where $Q_i = B_{r_i}(x_i)\times (t_i - r_i^2,\;t_i + r_i^2)$. Write $I_i = (t_i - 2r_i^2,\;t_i + 2r_i^2)$. Let $Q^*$ denote the union of the double-dilated cylinders and $I = \bigcup_i I_i$. Let $\psi$ be as above, and put $\phi_i = \psi(|x-x_i|/r_i) \psi(|t-t_i|/r_i^2)$ and $\phi = 1 - \sup_i \phi_i$.
Let us note that in the special case $d=0$, $S$ is once again a finite point set. The energy balance relation holds on each of the finitely many time-slices associated to each of the points in $S$ under the criteria of the previous section. Therefore, it holds under these criteria for a general $0$-dimensional singularity set. Below we assume that $d\in (0,1]$.
We also note that, as before, we have $|I|\to 0$ as $\d\to 0$, even though the intervals $I_i$ are no longer nested. This is because
\begin{equation}
\label{e:Ito0}
|I|\le \sum_i |I_i|\lesssim \sum_i r_i^{d + (2-d)}< \d^{2-d} \sum_i r_i^d
\end{equation}
and because $d<2$ in all cases considered in this section.
Assume $p\le q$. Using bounds analogous to \eqref{e:C}, \eqref{e:F}, we see that $C,F\to 0$ whenever $\left(\frac{2p}{p-2} - 3\right)\frac{p-2}{p}\frac{q}{q-2} + d \le 2$, or, simplifying,
\begin{equation}
\label{e:CFgen}
\frac3p + \frac{2-d}{q} \le \frac{3-d}{2} \quad (p\le q).
\end{equation}
Similarly, if $\infty>q\ge p\ge 3$, then $D,P\to 0$ whenever
\begin{equation}
\label{e:DPgen_3pq}
\frac3p + \frac{2-d}{q} \le \frac{4-d}{3}
\quad (3\le p\le q<\infty).
\end{equation}
Of course, when $d\in [0,1]$, we have $\frac{4-d}{3}\le \frac{3-d}{2}$, so the restriction \eqref{e:DPgen_3pq} is limiting in this case.
On the other hand, if $p<3$, then we use \eqref{e:DPpl3} and \eqref{e:defbeta}. Estimating
\[
\|\n \phi\|_{L^\s L^\infty}^\s
\le \sum_i \int r_i^{-\s} \chi_{I_i}(t)\,dt
\le \sum_i r_i^{2-\s},
\]
we see that $D,P\to 0$ whenever and $2-\s\ge d$, i.e.,
\begin{equation}
\label{e:DPgen_p3q}
\frac{4-d}{p} + \frac{2-d}{q} \le \frac{5-2d}{3}, \quad p<3.
\end{equation}
Notice that we could have also reached this inequality by interpolation. This argument covers all terms under consideration in the case $p\le q$; it remains to deal with the case when $p>q$. Most of the analysis from the single time-slice situation carries over in this case since Lemma \ref{l:conv} does not require nested $I_i$ in the case $s\ge 1$. However, the lack of freedom to choose $\a$ restricts the applicable range of pairs $(p,q)$. After translating the condition $s(\s + d)\le 2$ into conditions on $C,D,P,F$, we see that $D,P$ are most stringent when $p\ge q\ge 3$ and correspond to the condition
\[
\frac{3-d}{p} + \frac{2}{q} \le \frac{4-d}{3}, \quad 3\le q\le p.
\]
Using interpolation to treat the cases $p=\infty$, $q<3$, and $q = \infty$ as well, we can state our criteria for energy balance as follows:
\begin{subequations}
\label{e:gen_rest}
\begin{align}
\frac{2(3-d)}{p} + \frac{5-d}{q} \le 3-d, \quad q\le 3\le p \\
\frac{3-d}{p} + \frac{2}{q} \le \frac{4-d}{3}, \quad 3\le q\le p \\
\frac{3}{p} + \frac{2-d}{q} \le \frac{4-d}{3}, \quad 3\le p\le q \\
\frac{4-d}{p} + \frac{2-d}{q} \le \frac{5-2d}{3}, \quad p\le 3\le q.
\end{align}
\end{subequations}
As $d\to 1^-$, these criteria collectively collapse to the region implicated by the Lions $L^4 L^4$ condition. However, when $d\in (0,1)$ we obtain a new region bounded by the points $L^{\frac{5-d}{3-d}}L^\infty$, $L^3 L^{\frac{9-3d}{2-d}}$, $L^{\frac{15 - 3d}{4-d}} L^{\frac{15 - 3d}{4-d}}$, $L^{\frac{6-3d}{1-d}}L^3$, $L^\infty L^{\frac{5-2d}{12-3d}}$. See Figures \ref{fig:0<d<1_gen} and \ref{fig:d=1_gen}.
\begin{figure}
\caption{$0<d<1$.}
\label{fig:0<d<1_gen}
\caption{$d=1$.}
\label{fig:d=1_gen}
\end{figure}
\section{Fractional NSE}\label{s:frac}
In this section, we present extensions of the results for the classical NSE to the case of fractional dissipation $\g<1$:
\begin{equation}
\label{e:momentum_g}
\p_t u + u\cdot \n u + \nu \L_{2\g} u = - \n p
\end{equation}
\begin{equation}
\label{e:divfree_g}
\n \cdot u = 0
\end{equation}
where $\widehat{\L_s u} = |\xi|^s \widehat{u}$. We define the (Onsager) regular and singular sets as in the classical case. We also define the postinitial singularity set $S$ as before. In the fractional dissipation case, weak solutions belong to $L^2 H^\g \cap L^\infty L^2$, and the energy equality can be written
\begin{equation}
\label{localeefrac}
\begin{split}
\int_{\ensuremath{\mathbb{R}}^3} |u(t)|^2 \phi & - \int_{\ensuremath{\mathbb{R}}^3} |u(s)|^2 \phi - \int_{\ensuremath{\mathbb{R}}^3 \times (s,t)} |u|^2 \p_t \phi \\
&= \int_{\ensuremath{\mathbb{R}}^3 \times (s,t)} |u|^2 u \cdot \n \phi + 2 \int_{\ensuremath{\mathbb{R}}^3 \times (s,t)} p\ u \cdot \n \phi - 2\nu \int_{\ensuremath{\mathbb{R}}^3 \times (s,t)} |\L_{\g} u|^2 \phi \\
&-2\nu \int_{\ensuremath{\mathbb{R}}^3 \times (s,t)} \L_{\g} u \cdot u\L_{\g}\phi - 2\nu \int_{\ensuremath{\mathbb{R}}^3 \times (s,t)} \L_{\g} u \cdot [\L_{\g}(u\phi) - (\L_{\g}u)\phi - u\L_{\g}\phi].
\end{split}
\end{equation}
As in the classical case, this equality is valid for $\phi\in W^{1,\infty}(\ensuremath{\mathbb{R}}^3\times [0,T])$ which are supported outside $S$. We label our terms in the same manner as in the classical case:
\[
A - B - C = D + 2P - 2\nu E - 2\nu F - 2\nu G.
\]
As before, convergence of $A,B,E$ is obvious; proving energy equality amounts to showing that the other terms vanish.
For sufficiently regular $f$ and $\g\in (0,2)$, we have
\[
\L_\g f(x) = -c_\g \int \frac{\d_{-z} \d_z f(x)}{|z|^{3+\g}}\,dz = \widetilde{c}_\g \ p.v. \int \frac{\d_z f(x)}{|z|^{3 + \g}}\,dz,
\]
where $\d_z$ denotes the difference operator $\d_z f(x) = f(x + z) - f(x)$.
\begin{lemma}
Suppose $\phi\in W^{1,a}$ for some $a\in [1,\infty]$, and let $\g\in (0,1)$. Then $\L_\g \phi\in L^a$, and we have the bound
\begin{equation}
\label{Lgphi_bd}
\|\L_\g \phi \|_{L^a} \lesssim \|\phi\|_{L^a}^{1-\g} \|\n \phi \|_{L^a}^\g.
\end{equation}
\end{lemma}
\begin{proof}
For any $r>0$, we estimate
\begin{align*}
\|\L_\g \phi \|_{L^a}
& = \left\| \int_{|z|\le r} \frac{ \d_z \phi }{|z|^{3 + \g}}\,dz + \int_{|z|> r} \frac{ \d_z \phi }{|z|^{3 + \g}}\,dz \right\|_{L^a} \\
& \le \int_{|z|\le r} \frac{\| \n \phi \|_{L^a}}{|z|^{2 + \g}}\,dz + \int_{|z|>r} \frac{2 \| \phi \|_{L^a}}{|z|^{3 + \g}}\,dz \\
& \le r^{-\g} [ r \|\n \phi\|_{L^a} + 2\|\phi\|_{L^a}].
\end{align*}
We put $r = \|\phi\|_{L^a} \|\n \phi\|_{L^a}^{-1}$ to optimize. The bound \eqref{Lgphi_bd} follows immediately.
\end{proof}
\begin{lemma}\label{p:commut}
Let $u\in H^\g \cap L^p$, $p>2$, $\g\in (0,1)$, and $\phi\in W^{1,\frac{2p}{p-2}}$. Then
\begin{equation}
\| \L_\g (u\phi) - (\L_\g u)\phi - u\L_\g \phi\|_{L^2} \lesssim \|u\|_{L^p}\|\phi\|_{L^{\frac{2p}{p-2}}}^{1-\g} \|\n \phi \|_{L^{\frac{2p}{p-2}}}^\g.
\end{equation}
The inequality continues to hold when $p=2$ and $2p/(p-2)$ is replaced by $\infty$.
\end{lemma}
\begin{proof}
We use the identity
\[
[ \L_{\g}(u\phi) - (\L_{\g}u)\phi - u\L_{\g}\phi](x)
= - \int \frac{\d_z u(x)\, \d_z \phi(x)}{|z|^{3 + \g}}\,dz
\]
and estimate the right side of this equality. Let $r>0$ be arbitrary for now. Then
\begin{align*}
\left\| \int \frac{\d_z u\, \d_z \phi}{|z|^{3 + \g}}\,dz \right\|_{L^2}
& \le \int_{|z|\le r} \frac{ \|\d_z u\, \d_z \phi \|_{L^2}}{|z|^{3 + \g}}\,dz + \int_{|z|>r} \frac{ \|\d_z u\, \d_z \phi \|_{L^2}}{|z|^{3 + \g}}\,dz\\
& \le 2 \|u\|_{L^p} \left[ \int_{|z|\le r} \frac{ \|\n \phi \|_{L^{\frac{2p}{p-2}}}}{|z|^{2 + \g}}\,dz + \int_{|z|>r} \frac{ 2 \| \phi \|_{L^{\frac{2p}{p-2}}}}{|z|^{3 + \g}}\,dz \right] \\
& \le 2r^{-\g} \|u\|_{L^p} [r \|\n\phi \|_{L^{\frac{2p}{p-2}}} + 2 \|\phi\|_{L^{\frac{2p}{p-2}}}].
\end{align*}
Put $r = \|\phi\|_{L^{\frac{2p}{p-2}}} \|\n \phi\|_{L^{\frac{2p}{p-2}}}^{-1}$ to complete the proof.
\end{proof}
We combine the two propositions above and apply them to our original test function $\phi$:
\begin{align*}
& \hspace{-10 mm} \int \|u\L_{\g}\phi\|_{L^2}^2 + \|\L_{\g}(u\phi) - (\L_{\g}u)\phi - u\L_{\g}\phi\|_{L^2}^2 \,dt \\
& \lesssim \int (\|u\|_{L^p} \|\phi\|_{L^{\frac{2p}{p-2}}}^{1-\g} \|\n \phi\|_{L^{\frac{2p}{p-2}}}^{\g})^2\,dt \\
& \le \|u\|_{L^q L^p}^2 \|\phi\|_{L^\infty L^{\frac{2p}{p-2}}}^{2(1-\g)} \|\n \phi\|_{ L^{\frac{2q\g}{q-2}} L^{\frac{2p}{p-2}}}^{2\g}.
\end{align*}
Now with the bound $|\n \phi(x,t)|\le \sup_i |\n \phi_i(x,t)|$, we obtain
\begin{equation}
\label{FG_frac}
\|\n \phi\|_{ L^{\frac{2q\g}{q-2}} L^{\frac{2p}{p-2}}}^{\frac{2q\g}{q-2}}
\le \int \left( \sum_i r_i^{-\frac{2p}{p-2} + 3} \chi_{I_i}(t) \right)^{\frac{p-2}{p}\frac{q}{q-2}\g}\,dt.
\end{equation}
So we can use Lemma \ref{l:conv} to give conditions on when $|F|+|G|\to 0$, depending on whether we are dealing with the one-slice or general type singularity.
\subsection{One-time singularity case, $\frac12<\g<1$}
We recall some of the conditions for the vanishing of $C$ and $D+P$ and (using the lemma) add to them conditions for the vanishing of $F+G$. Note that the restriction \eqref{DPfracslice} below on $D+P$ is only valid inside the square $p,q\ge 3$, just as before. We deal with this case first and investigate the case $p<3$ separately:
\begin{equation}
\label{Cfracslice}
\frac{3-d}{p} + \frac{\a}{q}\le \frac{3-d}{2},\; p\ge q \geq 2;\quad
\frac{3-d}{p} + \frac{\a}{q}< \frac{3-d}{2},\; 2\leq p< q
\end{equation}
\begin{equation}
\label{DPfracslice}
\frac{3-d}{p} + \frac{\a}{q}\le \frac{2 + \a - d}{3},\; p\ge q \geq 3;\quad
\frac{3-d}{p} + \frac{\a}{q}< \frac{2 + \a - d}{3},\; 3\leq p< q
\end{equation}
\begin{subequations} \label{FG}
\begin{align}
\label{FGfracslicetop}
\frac{(3-d)\g}{p} + \frac{\a}{q} & \le \frac{(3-d)\g + \a - 2\g}{2}, \quad \frac1q - \frac{\g}{p} \ge \frac{1-\g}{2}, \;\; p,q\ge 2 \\
\label{FGfracslicebottom}
\frac{(3-d)\g}{p} + \frac{\a}{q} & < \frac{(3-d)\g + \a - 2\g}{2}, \quad \frac1q - \frac{\g}{p} < \frac{1-\g}{2}, \;\; p,q\ge 2.
\end{align}
\end{subequations}
The line $\frac1q - \frac{\g}{p} = \frac{1-\g}{2}$ joins $L^{\frac{2}{1-\g}} L^\infty$ with $L^2 L^2$. It plays the role for $F+G$ that the bisectrice plays for $C$ and $D+P$. Also note that for each restriction, all inequalities are nonstrict in the special case $d=0$, just as before.
When $d\le 5-4\g$, we find using the same argument as in the classical case that $\a=\frac{5-d}{2}$ gives the optimal region. At this value of $\a$, $\eqref{Cfracslice}$ and $\eqref{DPfracslice}$ coincide, and \eqref{FGfracslicetop} and \eqref{FGfracslicebottom} are less restrictive than \eqref{Cfracslice} and \eqref{DPfracslice}. Furthermore, since the line corresponding to \eqref{Cfracslice} rotates about $L^\infty L^2$, we may use interpolation to remove the restriction $q\ge 3$.
In the case $p<3$, we repeat the argument used for the classical NSE and make changes where necessary. Assume first that $\g\ge \frac34$. Then we have
\begin{equation}
\label{frac_D}
|D|+|P|\le \|u\|_{L^2 H^1}^{3\b} \|u\|_{L^q L^p}^{3(1-\b)}\|\n \phi\|_{L^\s L^\infty},
\end{equation}
where
\begin{equation}
\frac13 = \frac{(3-2\g)\b}{6} + \frac{1-\b}{p}\implies
\b = \frac{6-2p}{6-(3-2\g)p};\;
1-\b = \frac{(2\g-1)p}{6-(3-2\g)p}
\end{equation}
\begin{equation}
\frac1\s = 1 - \frac{3\b}{2} - \frac{3(1-\b)}{q} = \frac{2\g pq - 3p(2\g-1) - 3q}{(6-(3-2\g)p)q}.
\end{equation}
Now
\[
\|\n \phi\|_{L^\s L^\infty}^\s = \int \sup_i r_i^{-\s} \chi_{I_i}(t)\,dt \le \int \sup_j 2^{j\s} \chi_{J_j}(t)\,dt \lesssim \sum_j (2^{\a-\s})^{-j},
\]
and the sum on the right is bounded whenever $\s < \a$. Substituting in for $\s$ and simplifying, we obtain
\begin{equation}
\label{frac_Drest_pl3}
\frac{2 + \a}{p} + \frac{(2\g-1)\a}{q} < \frac{3-2\g + 2\a\g}{3}.
\end{equation}
Note that as $\a$ increases, the line corresponding to equality rotates counterclockwise about $L^2 L^{\frac{6}{3-2\g}}$. Combining \eqref{Cfracslice} and \eqref{frac_Drest_pl3} with inequality replaced by equality in both cases, we find the curve
\begin{equation}\label{frac_xy_curve}
\begin{split}
6(3-d)x^2 + 6(6\g - 5 - (& 2\g - 1)d)xy - (4\g +3)(3-d)x \\& - (22\g - 15 - (2\g-1)3d)y + 2\g(3-d) = 0,
\end{split}
\end{equation}
where $x = p^{-1}$ and $y = q^{-1}$. Notice that the curve contains both $L^2 L^{\frac{6}{3-2\g}}$ and $L^\infty L^2$, as we expect it to. (Indeed, these points are the two axes of rotation for our lines.) However, since we are restricted to the case when $p<3$, the part of the curve that we can use is limited to that connecting $L^{\frac{15 - 3d}{3-d}}L^3$ and $L^\infty L^2$.
If $\g<\frac34$, then \eqref{frac_D} is not valid for all values of $p,q$. In particular, we need $3\b \le 2$ for the obvious application of H\"older to be valid, which translates to $p\ge \frac{3}{2\g}$. When $\g\in (\frac12, \frac34)$, we also see that the two places where the curve \eqref{frac_xy_curve} crosses the $x$-axis are at $x=\frac12$ and $x= \frac{2\g}{3}$. When $\g\in (\frac12, \frac34)$, we have $\frac{2\g}{3}\in (\frac13, \frac12)$. So the curve still gives us a meaningful restriction up to the point where it crosses the $x$-axis for the first time. Once $\g<\frac12$, however, we have $\frac{2\g}{3}<\frac13$, so the use of enstrophy does not allow us to make any statement about the range $p<3$.
All in all, our criteria for energy equality in the case $\frac12 < \g < 1$, $0\le d\le 5-4\g$ can be stated as
\begin{equation}
\frac{2(3-d)}{p} + \frac{5-d}{q}\le 3-d,\; p\ge q;\quad
\frac{2(3-d)}{p} + \frac{5-d}{q}< 3-d,\; 3\leq p< q
\end{equation}
\begin{equation}
\begin{split}
6(3-d)x^2 + 6(& 6\g - 5 - ( 2\g - 1)d)xy - (4\g +3)(3-d)x \\& - (22\g - 15 - (2\g-1)3d)y + 2\g(3-d) > 0,
\quad \frac13 < x< \min\{ \frac12, \frac{2\g}{3}\}.
\end{split}
\end{equation}
Once again, strict inequalities are replaced by nonstrict ones if $d=0$.
\begin{figure}
\caption{$\frac34\le\g<1$, $0<d\leq 5-4\g$.}
\label{fig:frac_78}
\caption{$\frac12<\g<\frac34$, $0<d\leq 5-4\g$.}
\label{fig:frac_58}
\end{figure}
Figures \ref{fig:frac_78} and \ref{fig:frac_58} diagram our results for a fixed value of $d\in (0, 5-4\g)$ (we use $d=\frac23$) and varying $\g\in (\frac12, 1)$. Note that $L^{\frac{6\g-2}{2\g-1}} L^{\frac{6\g-2}{2\g-1}}$ serves as the analogue of the Lions space in the present context, because interpolation between this space and $L^2 H^\g$ lands in the Onsager space $L^3 B^{1/3}_{3,c_0}$.
As we take $d\to 5-4\g$ from below, the new region above the bisectrice collapses to the segment $[L^\infty L^{\frac{2\g}{2\g-1}}, L^{\frac{6\g-2}{2\g-1}}L^{\frac{6\g-2}{2\g-1}}]$. In this respect, the value $d=5-4\g$ serves a similar role to the value $d=1$ in the classical case. Things are slightly more complicated when $5-4\g<d<3$. In this case, setting $\a$ equal to its usually optimal value of $\a = \frac{5-d}{2}$ places too heavy a burden on $F+G$; for a fixed $p$, we must increase $\a$ to optimize until the restrictions on $C$ and $F+G$ coincide. An elementary computation gives the optimal value of $\a$ to be
\begin{equation}
\label{eq:optalphafracdlarge}
\a_{CF}(x) = (3-d)(1-\g)(1-2x) + 2\g \quad \quad (x = p^{-1}).
\end{equation}
We see then that as $x$ increases, the optimal value of $\a$ decreases. When $p\ge 3$, the restriction on $D+P$ is always less stringent for this value of $\a$ than the corresponding restriction for $C$. However, as $x$ increases beyond $\frac13$, $\a_{CF}(x)$ eventually becomes sufficiently small so that \eqref{frac_Drest_pl3} becomes limiting once again. At this point, the optimal restriction is once again determined by the intersection of the $C$ and $D+P$ lines, following the curve \eqref{frac_xy_curve}. Indeed, along the curve \eqref{frac_xy_curve}, $\a$ is given by
\begin{equation}
\a_{CDP}(x) = \frac{3[(3-d)(2\g - 1) - 2](1-2x) + 4\g}{2(2\g - 3x)}.
\end{equation}
Now
\[
\a_{CDP}(1/3) = \frac{5-d}{2} < 2\g < \a_{CF}(1/3),
\]
whereas
\[
\a_{CDP}(1/2) = \frac{4\g}{4\g-3}>2\g = \a_{CF}(1/2) \quad (3/4<\g<1),
\]
\[
\lim_{x\to \frac{2\g}{3}^-}\a_{CDP}(x) = \infty > \a_{CF}(2\g/3) \quad (1/2<\g\le 3/4).
\]
So there must be some $x_0\in (\frac13, \min\{\frac12, \frac{2\g}{3}\})$, where $\a_{CDP}(x_0) = \a_{CF}(x_0)$. The actual value of $x_0$ does not seem to take a particularly enlightening form in general, but it can be easily calculated given $\g\in (\frac12, 1)$ and $d\in (5-4\g, 3)$. See Figures \ref{fig:frac_78_dlarge} and \ref{fig:frac_58_dlarge}.
\begin{figure}
\caption{$\frac34\le \g<1$, $\;5-4\g<d<3$.}
\label{fig:frac_78_dlarge}
\caption{$\frac12<\g<\frac34$, $\;5-4\g<d<3$.}
\label{fig:frac_58_dlarge}
\end{figure}
Altogether, the criteria for energy equality in the case $\g\in (\frac12, 1)$ and $d\in (5-4\g, 3)$ can be stated as
\begin{equation}
4(1-\g)(3-d)xy - 2(3-d + (d-1)\g)y + (1-2x)(3-d) > 0, \quad x<x_0
\end{equation}
\begin{equation}
\begin{split}
6(3-d)x^2 + 6(& 6\g - 5 - ( 2\g - 1)d)xy - (4\g +3)(3-d)x \\& - (22\g - 15 - (2\g-1)3d)y + 2\g(3-d) > 0,
\quad x_0 < x< \min\{ \frac12, \frac{2\g}{3}\}.
\end{split}
\end{equation}
\subsection{One-time singularity case, $0<\g\le\frac12$} Much of the analysis of the previous subsection carries over to the case when $\g\in (0,\frac12]$. However, there are a few important differences. For one thing, the Lions region is the single point $L^\infty L^\infty$ when $\g=\frac12$ and trivial otherwise. Second, the case $d>5-4\g$ is geometrically impossible since $5-4\g>3$ here. Finally, we cannot say anything about the region $p<3$. As was mentioned earlier, the enstrophy argument used to deal with this region for larger values of $\g$ does not apply when $\g\in (0,\frac12)$. In fact, we cannot even get any new information by interpolation with the Leray--Hopf line since the point $L^2 L^{\frac{6}{3-2\g}}$ lies on the line $x=\frac13$ when $\g = \frac12$ and to the right of this line when $\g<\frac12$. So the region for which we have proved energy equality is independent of $\g$ for $\g<\frac12$; the region depends only on $d$. See Figure \ref{fig:frac_14}. \color{black}
\begin{figure}
\caption{$0<\g\leq \frac12$, $d<3$.}
\label{fig:frac_14}
\end{figure}
\subsection{General singularities}
We fix $\a = 2\g$ in consideration of the natural scaling. The restrictions corresponding to $C$, $D+P$, and $F+G$ become
\begin{equation}
\label{Cfrac}
\frac{3-d}{p} + \frac{2\g}{q}\le \frac{3-d}{2},\; p\ge q \geq 2;\quad
\frac{3}{p} + \frac{2\g - d}{q}\le \frac{3-d}{2},\; 2\leq p< q
\end{equation}
\begin{equation}
\label{DPfrac}
\frac{3-d}{p} + \frac{2\g}{q}\le \frac{2 + 2\g - d}{3},\; p\ge q \geq 3;\quad
\frac{3}{p} + \frac{2\g - d}{q}\le \frac{2 + 2\g - d}{3},\; 3\leq p< q
\end{equation}
\begin{subequations}
\begin{align}
\label{FGfractop}
\frac{3-d}{p} + \frac{2}{q} & \le \frac{3-d}{2}, \quad \frac1q - \frac{\g}{p} \ge \frac{1-\g}{2}, \;\; p,q\ge 2 \\
\label{FGfracbottom}
\frac{3\g}{p} + \frac{2\g-d}{q} & \le \frac{3\g-d}{2}, \quad \frac1q - \frac{\g}{p} < \frac{1-\g}{2}, \;\; p,q\ge 2.
\end{align}
\end{subequations}
We will not present figures pertaining to this particular situation, as the reader can easily verify conditions above for any particular values of $\g,d,p,q$. However, we make several comments.
First, we note that the measure of $I$ may not vanish for certain combinations of $\g, d$. Mimicking the argument of \eqref{e:Ito0} only gives $|I|\to 0$ when $d\le 2\g$. If $d>2\g$, then we continue with the additional assumption that $\cH_d(S)$ is actually zero (rather than merely finite, as we usually assume).
Assume first that $\g\in (\frac12,1)$. Then \eqref{DPfrac} is more stringent than \eqref{Cfrac} when $d<5-4\g$; the two inequalities coincide when $d=5-4\g$. At this value of $d$, the region satisfying \eqref{Cfrac}, \eqref{DPfrac} is exactly the region already covered by the analogue of the Lions result. So only the case $d<5-4\g$ can give new information. However, in contrast to the classical case, the restrictions \eqref{FGfractop}, \eqref{FGfracbottom} are not always superfluous. If $\g< \frac12$, then the Lions region is trivial, and consequently the value $d = 5 - 4\g$ has no special significance for our argument in the case of a general $2\g$-parabolic $d$-dimensional singularity with $\g\in (0,\frac12)$.
When $d=0$, the singularity set can be covered by finitely many time-slices, and the region covered is the same as in the one-slice case. When $d\in (0, 2\g - 1)$, the $DP$-lines are limiting, but there is still a nontrivial region covered in the range $p<3$ by interpolation. This region disappears when $d = 2\g - 1$, but the $DP$-lines remain the limiting restriction until $d$ surpasses the value $\frac12(5+\g - \sqrt{ 9\g^2 - 18\g + 25})$, at which point the lower $FG$-line (corresponding to \eqref{FGfracbottom}) cuts into both the upper and the lower $DP$-lines. This situation prevails until $d$ reaches the value $\frac{5\g-4\g^2}{3-2\g}$, at which point the lower $FG$-line becomes more stringent than the lower $DP$-line everywhere below the bisectrice. However, at this point, the upper $FG$-line is still less stringent than the upper $DP$-line; this changes once $d$ surpasses $1$. Note that the point $L^{\frac{5-d}{3-d}}L^\infty$ is no longer included in the region covered for $d>1$. Rather, the upper $FG$-line lies strictly below the interpolation line obtained in the region $q<3$ from the uppermost point on the $DP$ segment. When $d$ lies in the range $d\in [1, 2\g + 1 - 2\sqrt{3\g^2 - 3\g + 1})$, the upper $DP$-line remains more stringent than the $FG$-lines on a small segment. However, once $d\ge 2\g + 1 - 2\sqrt{3\g^2 - 3\g + 1}$, the $FG$ restrictions are limiting in all cases.
There are a few larger values of significance for $d$, but they involve the interaction between the $FG$-lines and the Lions region rather than the $FG$-lines and the other restrictions imposed by our method. We describe briefly the bifurcations of the diagrams. When $d$ reaches the value $2-\g$, the Lions point $L^{\frac{6\g - 2}{2\g - 1}}L^{\frac{6\g - 2}{2\g - 1}}$ lies on the lower $FG$ segment. When $d= \g(5-4\g)$, the new region below the bisectrice disappears entirely (since $\frac{3\g - d}{6\g} = \frac{2\g - 1}{3}$ for this value of $d$). The new region disappears entirely into the Lions region once $d=\frac{2-\g}{\g}$. Indeed, at this value of $d$, we have $\frac{3-d}{4} = \frac{2\g-1}{2\g}$; furthermore, both the upper $FG$-line and the line containing the upper part of the boundary for the Lions region pass through $L^\infty L^2$. Therefore, the upper $FG$-line collapses to (a portion of) the boundary of the Lions region when $d = \frac{2-\g}{\g}$.
\begin{remark} Finally, we make a remark about the case $\g>1$. The main technical reason why this case eludes our analysis is a failure to produce a proper cutoff function $\f$ for which $\L_\g \f$ would remain under control, as $\n_x \f$ would already develop jump discontinuities. However if $d=0$, i.e., a finite-point set $S$, one can construct each $\f$ from $\f_i$'s having disjoint support, allowing the analysis to be carried out. In this case, the region of conditions is the same as what is shown in Figure~\ref{fig:dzero}, except that the equation for the hyperbola connecting $L^5 L^3$ to the energy space is now given by \eqref{frac_xy_curve} with $d=0$:
\begin{equation}
18 x^2 + (6\g - 5)6xy - (12\g +9) x - (22\g - 15)y + 6\g = 0, \quad \frac13 \le x \le \frac12.
\end{equation}
\end{remark}
\def$'${$'$}
\end{document} |
\begin{document}
\title[FRACTIONAL IVPS]{Fractional integral equations\\ tell us how to impose initial values\\ in fractional differential equations}
\author[D. Cao Labora]{Daniel Cao Labora}
\address{Dept. of Statistics, Mathematical Analysis and Optimization, University of Santiago de Compostela, Facultade de Matem\'aticas, Campus Vida,
R\'ua Lope G\'omez de Marzoa s/n -- 15782 Santiago de Compostela, SPAIN }
\curraddr{}
\email{[email protected]}
\thanks{}
\subjclass[2010]{26A33, 34A08}
\keywords{Fractional differential equations, initial values, existence, uniqueness}
\date{October 6, 2019}
\begin{abstract}
The goal of this work is to discuss how should we impose initial values in fractional problems to ensure that they have exactly one smooth unique solution, where smooth simply means that the solution lies in a certain suitable space of fractional differentiability. For the sake of simplicity and to show the fundamental ideas behind our arguments, we will do this only for the Riemann-Liouville case of linear equations with constant coefficients.
In a few words, we study the natural consequences in fractional differential equations of the already existing results involving existence and uniqueness for their integral analogues, in terms of the Riemann-Liouville fractional integral. Under this scope, we derive naturally several interesting results. One of the most astonishing ones is that a fractional differential equation of order $\beta>0$ with Riemann-Liouville derivatives can demand, in principle, less initial values than $\lceil \beta \rceil$ to have a uniquely determined solution. In fact, if not all the involved derivatives have the same decimal part, the amount of conditions is given by $\lceil \beta - \beta_* \rceil$ where $\beta_*$ is the highest order in the differential equation such that $\beta-\beta_*$ is not an integer.
\end{abstract}
\maketitle
\section{Introduction}
One of the most typical trademarks involving Fractional Calculus is the wide range of opinions about the notions of what is a natural fractional version of some integer order concept and what is not. On the one hand, this plurality leads to very interesting debates and fosters a very relevant critical thinking about whether things are going ``in the right direction'' or not. On the other hand, it is difficult to handle such an amount of different notions and ideas in the extant literature, since there are usually lots of generalized fractional versions of a single integer order concept, some of them not very accurate. These debates are still very alive nowadays, and we are in a concrete moment where even the most fundamental aspects of fractional calculus are being reviewed, \cite{Lu}.
In this frame, the task of this paper is to point out some relevant facts concerning the imposition of initial values for Riemann-Liouville fractional differential equations, which is the most classical extension for the usual derivative, in the particular case of linear equations with constant coefficients. However, it seems natural that the ideas described here could be extended to much more general cases.
In our opinion, we have to begin from the little things we are sure about. In this sense, if restrict the study of Fractional Calculus to functions defined on finite length intervals $[a,b]$, it is a big consensus that Riemann-Liouville fractional integral with base point $a$ is the unique reasonable extension for the integral operator $\int_a^t$. The previous asseveration is not a simple opinion, since Riemann-Liouville fractional integral can be characterized axiomatically in very reasonable terms.
\begin{theorem}[Cartwright-McMullen, \cite{CaMc}]\label{TCaMc}
Given a fixed $a \in \mathbb{R}$, there is only one family of operators $\left(I_{a^+}^{\alpha}\right)_{\alpha > 0}$ on $L^1[a,b]$ satisfying the following conditions:
\begin{enumerate}
\item The operator of order $1$ is the usual integral with base point $a$. (Interpolation property)
\item The Index Law holds. That is, $I_{a^+}^{\alpha} \circ I_{a^+}^{\beta}=I_{a^+}^{\alpha+\beta}$ for all $\alpha,\beta > 0$. (Index Law)
\item The family is continuous with respect to the parameter. That is, the following map $\textnormal{Ind}_a:\mathbb{R}^+ \longrightarrow \textnormal{End}_B \left(L^1[a,b] \right)$ given by $\textnormal{Ind}_a(\alpha) = I_{a^+}^{\alpha}$ is continuous, where $\textnormal{End}_B \left(L^1[a,b] \right)$ denotes the Banach space of bounded linear endomorphisms on $L^1[0,b]$. (Continuity)
\end{enumerate}
This family is precisely given by the Riemann-Liouville fractional integrals, whose expression will be recalled during this paper.
\end{theorem}
Hence, it makes sense to study in detail fractional integral problems for the Riemann-Liouville fractional integral to derive consequences for the corresponding fractional equations afterwards. Finally, to draw the attention of curious readers, we mention again that one of the most interesting results that we have found out is that a fractional differential equation of order $\alpha>0$ with Riemann-Liouville derivatives can demand, in principle, less initial values than $\lceil \alpha \rceil$ to have a uniquely determined solution. A complete range of highlighted results with their implications can be consulted in Section \ref{s:conc}, while the previous sections are devoted to the corresponding deductions.
\subsection{Goal of the work}
The goal of this work is to study how should we impose initial values in fractional problems with Riemann-Liouville derivative to ensure that they have a smooth and unique solution, where smooth simply means that the solution lies in a certain suitable space of fractional differentiability. To achieve this, we will depart from the results involving the Riemann-Liouville fractional integral, since it arises as the natural generalization of the usual integral operator, recall Theorem \ref{TCaMc}.
First, we will recall some results that imply that fractional integral problems have always a unique solution. We also recall the fundamental notions concerning Fractional Calculus, and we pay special attention to the functional spaces where the performed calculations, and specially fractional derivatives, are well defined. Note that this point of ``where are functions defined'' is crucial to talk about existence or uniqueness of solution and is often neglected in the literature. Indeed, to avoid this problem, many research has been conducted for Caputo derivatives instead of Riemann-Liouville, see for instance \cite{DiFo, Maina} or general comments in \cite{MMKA}. The ideas of this paragraph are developed in the second section, and most of them are available in the extant literature, except (to the best of our knowledge) Lemma \ref{estructura}.
Second, we see how each fractional differential equation of order $\alpha$ is linked with a family of fractional integral problems, whose source term lives in a $\lceil \alpha \rceil$ dimensional affine subspace of $L^1[0,b]$. This means that each solution to the fractional differential equation is a solution to one (and only one!) fractional integral problem of the $\lceil \alpha \rceil$ dimensional family. Conversely, any solution to a fractional integral problem of the family is a solution to the fractional differential equation, provided that the solution is smooth enough. In general, the set of source terms of the family of fractional integral problems that provide a smooth solution will consist in an affine subspace of $L^1[0,b]$ of a dimension lower than $\lceil \alpha \rceil$. This is done in the third section.
Third, we characterize when a source term of the $\alpha$ dimensional family induces a smooth solution, and thus a solution for the associated fractional differential equation. This characterization induces a natural correspondence between each source term inducing a smooth solution for the integral problem and the vector of initial values fulfilled by the solution. This correspondence is performed in a way that ensures that the fractional differential problem has existence and uniqueness of solution. This final part is discussed in the fourth section.
Finally, we establish a section of conclusions to highlight the most relevant obtained results, and to point out to some relevant work that should be performed in the future to continue with this approach.
\section{Basic notions}
In this section we will introduce the basics notions of Fractional Calculus that we are going to use, together with their more relevant properties and some results of convolution theory that can be not so well known. We assume that the reader is familiar with the basic theory of Banach spaces, Special Functions and Integration Theory, specially the fundamental facts involving the space of integrable functions over a finite length interval, denoted by $L^1[a,b]$, and the main properties of the $\Gamma$ function.
\subsection{The Riemann-Liouville fractional integral}
We will briefly introduce the Riemann-Liouville fractional integral, together with its most relevant properties. We will make this introduction from the perspective of convolutions, since it will be relevant to notice that the Riemann-Liouville fractional integral is no more than a convolution operator, to apply later some adequate results of convolution theory.
\begin{definition} \label{Convolution} Given $f \in L^1[a,b]$, we defined its associated convolution operator as $C_a(f):L^1[a,b] \longrightarrow L^1[a,b]$ defined as $$(f *_a g)(t):=(C_a(f)\,g)(t):= \int_{a}^t f(t-s+a) \cdot g(s) \, ds$$ for $g \in L^1[a,b]$ and $t \in [a,b]$. Under the previous notation, we say that $f$ is the kernel of the convolution operator $C_a(f)$.
\end{definition}
\begin{definition} \label{DRiLiIn}
We define the left Riemann-Liouville fractional integral of order $\alpha>0$ of a function $f \in L^1[a,b]$ with base point $a$ as $$I_{a^+}^{\alpha}g(t)=\int_a^t \frac{(t-s)^{\alpha-1}}{\Gamma(\alpha)} \cdot g(s) \, ds,$$ for almost every $t \in [a,b]$. In the case that $\alpha=0$, we just define $$I_{a^+}^{0}\, g(t)=\textnormal{Id} \, g(t)=g(t).$$
\end{definition}
From now on we will assume that $a=0$, since the results for a generic value of $a$ can be achieved from the ones that we will mention or develop for $a=0$ after a suitable translation. Moreover, when using the expression ``Riemann-Liouville fractional integral'' we will understand that it is the left Riemann-Liouville fractional integral with base point $a=0$.
\begin{remark}
We observe that, for $\alpha>0$, the Riemann-Liouville fractional integral operator $I_{0^+}^{\alpha}$ can be written as a convolution operator $C_0(f)$, with kernel \[f(t)=\frac{t^{\alpha-1}}{\Gamma(\alpha)}.\]
\end{remark}
It is well known that the Riemann-Liouville fractional integral fulfils the following properties, see \cite{Samko}.
\begin{proposition}\label{propo} For every $\alpha,\beta \geq 0$:
\begin{itemize}
\item $I_{0^+}^{\alpha}$ is well defined, meaning that $I_{0^+}^{\alpha}L^1[0,b] \subset L^1[0,b]$.
\item $I_{0^+}^{\alpha}$ is a continuous operator (equivalently, a bounded operator) from the Banach space $L^1[0,b]$ to itself.
\item $I_{0^+}^{\alpha}$ is an injective operator.
\item $I_{0^+}^{\alpha}$ preserves continuity, meaning that $I_{0^+}^{\alpha}\mathcal{C}[0,b] \subset \mathcal{C}[0,b]$.
\item We have the Index Law $I_{0^+}^{\beta} \circ I_{0^+}^{\alpha} = I_{0^+}^{\alpha + \beta}$ for $\alpha,\beta \geq 0$. In particular, $I_{0^+}^{\alpha+\beta}L^1[0,b] \subset I_{0^+}^{\beta} L^1[0,b]$.
\item Given $f \in L^1[0,b]$ and $\alpha \geq 1$, we have that $I_{0^+}^{\alpha}f$ is absolutely continuous and, moreover, $I_{0^+}^{\alpha}f(0)=0$.
\end{itemize}
\end{proposition}
Moreover, we will also use several times the following well known and straightforward remark, that can be obtained after direct computation and find in the basic bibliography involving Fractional Calculus \cite{KiSrTr, MiRo, Pod, Samko}.
\begin{remark}\label{Example}
We have that, for $\beta>-1$ and $\alpha \geq 0$, \[I_{0^+}^{\alpha}t^{\beta}=\frac{\Gamma(\beta+1)}{\Gamma(\alpha+\beta+1)}t^{\alpha+\beta} \in I_{0^+}^{\gamma}L^1[0,b].\] Indeed, $I_{0^+}^{\alpha}t^{\beta} \in I_{0^+}^{\gamma}L^1[0,b]$ if and only if $\alpha+\beta>\gamma-1$.
\end{remark}
\subsection{The Riemann-Liouville fractional derivative}
In this subsection, we will indicate the most relevant points when constructing the Riemann-Liouville fractional derivative. We will begin with a short introduction to absolutely continuous functions of order $n$, since the spaces where Riemann-Liouville differentiability is well defined can be understood as their natural generalization for the fractional case, see \cite{Samko}.
\subsubsection{Absolutely continuous functions and the Fundamental Theorem of Calculus}
We shall briefly indicate how the set of absolutely continuous functions is made up of the functions that, essentially, are antiderivatives of some function in $L^1[0,b]$. We will see later, how this notion is highly relevant to construct the spaces where fractional derivatives are well-defined.
\begin{definition} A real function $f$ of real variable is absolutely continuous on $[a,b]$ if for any $\varepsilon > 0$ there is $\delta >0$ such that for every family of subintervals $\{[a_1,b_1],...,[a_n,b_n]\}$ with disjoint interiors we have $$\sum_{k=1}^n (b_k-a_k) < \delta \Longrightarrow \sum_{k=1}^n \vert f(b_k)-f(a_k)\vert < \varepsilon.$$ We denote the set of this functions by $AC[a,b]$.
\end{definition}
\begin{remark} It follows trivially from the definition that any absolutely continuous function is uniformly continuous and, hence, continuous.
\end{remark}
This definition appears to carry little information. Furthermore, is seems a bit complicated to check the absolute continuity of a given function if its expression is not very manageable. However, the following theorem characterizes the absolutely continuous functions in a simple way.
\begin{theorem}[Fundamental Theorem of Calculus] \label{absolute} Consider a real function $f$ defined on an interval $[0,b] \subset \mathbb{R}$. Then, $f \in AC[0,b]$ if and only if there exists $\varphi \in L^1[0,b]$ such that
\begin{equation} \label{abscon}
f(t)=f(a)+\int_{0}^t \varphi(s) \, ds.
\end{equation}
\end{theorem}
\begin{remark} This result establishes that, essentially, absolutely continuous functions defined on $[0,b]$ are the primitives of the functions of $L^1[0,b]$, that is, antiderivatives of measurable functions whose absolute value has finite integral.
\end{remark}
This last result allows us to define the derivative of an absolutely continuous function on $[0,b]$ as a certain function in $L^1[0,b]$.
\begin{definition} \label{weak} If $f \in AC([0,b])$, we define its derivative $D^1 f$ as the unique function $\varphi \in L^{1}[0,b]$ that makes (\ref{abscon}) hold.
\end{definition}
\begin{remark} It is relevant to have in mind that the previous definition makes sense because, once fixed $f(0)$, the antiderivative operator $I_{0^+}^1$ is injective when defined on $L^1[0,b]$, recall Proposition \ref{propo}. In particular, \[AC[0,b]= \left\langle\{ 1 \}\right \rangle \oplus I_{0^+}^1 L^1[0,b],\] where ``$1$'' denotes the constant function with value $1$.
\end{remark}
\begin{definition} For any $n \in \mathbb{Z}^+$, we say that $f \in AC^{n}[0,b]$ provided that $f \in \mathcal{C}^{n-1}[0,b]$ and $D^{n-1}f \in AC[0,b]$.
\end{definition}
Thus, $AC^n[0,b]$ consists of functions that can be differentiated $n$ times, but the last derivative might be computable only in the weak sense of Definition \ref{weak}. Analogously to the previous remark, we have the following result, see page 3 of \cite{Samko}.
\begin{remark} \label{direct} We have that \[AC^{n}[0,b]= \left\langle\{ 1, t, \dots, t^{n-1}\}\right \rangle \oplus I_{0^+}^n L^1[0,b],\] after applying $n$ times the Fundamental Theorem of Calculus \ref{absolute}. Moreover, the sum is direct since the property $f \in I_{0^+}^n L^1[0,b]$ implies that $f(0)=f'(0)=\cdots=f^{n-1)}(0)=0$ and the only polynomial of degree at most $n-1$ satisfying such conditions is the zero one.
\end{remark}
The key observation is that the vector space of functions that can be differentiated $n$ times, in the sense of Fundamental Theorem of Calculus \ref{absolute}, has two disjoint parts that only share the zero function. The left one $\left\langle\{ 1, t, \dots, t^{n-1}\}\right \rangle$, which are polynomials of degree strictly lower than $n$, consists in the functions that are annihilated by the operator $D^n$ and, thus, \[\ker D^n=\left\langle\{ 1, t, \dots, t^{n-1}\}\right \rangle.\] The right part $I_{0^+}^n L^1[0,b]$ consists of functions that are obtained after integrating $n$ times an element of $L^1[0,b]$, and hence it contains functions of trivial initial values until the derivative of order $n-1$.
We have the following well known relation between the spaces $AC^{n}[0,b]$, with respect to the inclusion.
\begin{proposition} If $n > m> 0$, we have that \[AC^{n}[0,b] \subset AC^{m}[0,b].\]
\end{proposition}
\begin{proof} We note that \[AC^{n}[0,b]= \left\langle\{ 1, t, \dots, t^{m-1}\}\right\rangle \oplus \left\langle\{t^m, \dots, t^{n-1}\}\right\rangle \oplus I_{0^+}^n L^1[0,b]\] and we make the following straightforward claims
\begin{itemize}
\item $\left\langle\{ 1, t, \dots, t^{m-1}\}\right\rangle \subset AC^{m}[0,b]$,
\item $I_{0^+}^n L^1[0,b] \subset I_{0^+}^m L^1[0,b] \subset AC^{m}[0,b]$.
\end{itemize}
Therefore, we only need to check $\left\langle\{t^m, \dots, t^{n-1}\}\right\rangle \subset AC^{m}[0,b]$. We will only need to see that, indeed, \[\left\langle\{t^m, \dots, t^{n-1}\}\right\rangle \subset I_{0^+}^{m}L^1[0,b],\] but this is trivial since \[\left\langle\{t^m, \dots, t^{n-1}\}\right\rangle \subset I_{0^+}^{m}\left(\left\langle\{t^1, \dots, t^{n-m-1}\}\right\rangle\right).\]
\end{proof}
Although the previous result seems pretty immediate and irrelevant, it hides the key for a successful treatment of the fractional case. In the next part of the paper, we will reproduce the natural construction of the fractional analogue of the spaces $AC^n[0,b]$. For this construction, already presented in \cite{Samko}, it is not true that the space of order $\alpha$ is contained in the space of order $\beta$ if $\alpha > \beta$. Indeed, this particular behaviour will imply the existence of functions that can differentiated $\alpha$ times, but not $\beta$ times, which is surprising since we are assuming that $\alpha > \beta$.
\subsubsection{The fractional abstraction}
We define the Riemann-Liouville fractional derivative as the left inverse operator for the fractional integral. After that, an easy analytical expression for its computation, available in the classical literature, for instance \cite{Samko}, follows
\begin{definition} \label{inversas} Consider $\alpha \geq 0$. We define the Riemann-Liouville fractional derivative of order $\alpha$ (and base point $0$) as the left inverse of the corresponding Riemann-Liouville fractional integral, meaning
\begin{align*} D_{0^+}^{\alpha} I_{0^+}^{\alpha} f = f,
\end{align*}
for every $f \in L^1[0,b]$.
\end{definition}
We should note that the Riemann-Liouville fractional derivative is well defined, due to the injectivity of the fractional integral, recall Proposition \ref{propo}. Moreover, it will be a surjective operator from $I_{0^+}^{\alpha}L^1[0,b]$ to $L^1[0,b]$. However, it is clear that we are missing something if we pretend that $D_{0^+}^{\alpha}$ matches perfectly the usual derivative when $\alpha$ is an integer. In particular, observe that for an integer value of $\alpha$, Definition \ref{inversas} only describes the behaviour of $D^{\alpha}$ over the space $I_{0^+}^{\alpha}L^1[0,b]$, but we are missing the behaviour over the complementary part in $AC^{\alpha}[0,b]$, which is $\ker D^{\alpha}$.
It happens that it is possible to describe Definition \ref{inversas} more explicitly, since the left inverse for $I_{0^+}^{\alpha}$ is clearly $D_{0^+}^{\alpha}=D_{0^+}^{\lceil \alpha \rceil}I_{0^+}^{\lceil \alpha \rceil - \alpha}$, due to the Fundamental Theorem of Calculus \ref{absolute} and Proposition \ref{propo}. Thus, one could define $D_{0^+}^{\alpha}$ in a more general space than $I_{0^+}^{\alpha}L^1[0,b]$, since the only necessary condition to define $D_{0^+}^{\lceil \alpha \rceil}I_{0^+}^{\lceil \alpha \rceil - \alpha}f$ is to ensure that $I_{0^+}^{\lceil \alpha \rceil - \alpha}f \in AC^{\lceil \alpha \rceil}[0,b]$. Hence, the following definition makes sense.
\begin{definition} \label{RLDerEsp} For each $\alpha >0$ we construct the following space $$\mathcal{X}_\alpha=\left(I_{0^+}^{\lceil \alpha \rceil -\alpha}\right)^{-1}\left(AC^{\lceil \alpha \rceil}[0,b]\right),$$ which will be called the space of functions with summable fractional derivative of order $\alpha$. If $\alpha=0$, we define $\mathcal{X}_\alpha=L^1[0,b]$.
\end{definition}
\begin{remark} Therefore, functions of $\mathcal{X}_{\alpha}$ are defined as the ones producing a function in $AC^{\lceil \alpha \rceil}[0,b]$ after being integrated $\lceil \alpha \rceil -\alpha$ times. This new function can be differentiated $\lceil \alpha \rceil$ times in the weak sense of Fundamental Theorem of Calculus \ref{absolute}.
\end{remark}
Now we see that this definition is, indeed, the same one that was already presented in \cite{Samko} with an explicit expression.
\begin{lemma} \label{equisa} For any $\alpha > 0$ we have that $$\mathcal{X}_\alpha=\left\langle\left\{ t^{\alpha-\lceil \alpha \rceil}, \dots , t^{\alpha-2}, t^{\alpha-1}\right\}\right\rangle \oplus I_{0^+}^{\alpha} L^1[0,b].$$
\end{lemma}
\begin{proof}
First, we check $\left\langle\left\{ t^{\alpha-\lceil \alpha \rceil}, \dots , t^{\alpha-2}, t^{\alpha-1}\right\}\right\rangle \cap I_{0^+}^{\alpha} L^1[0,b]=\{0\}$. If there is a function $f$ in both addends, then $I_{0^+}^{\lceil \alpha \rceil -\alpha} f$ will be simultaneously a polynomial of degree at most $\lceil \alpha \rceil-1$, and a function in $I_{0^+}^{\lceil \alpha \rceil} L^1[0,b]$. Therefore, $I_{0^+}^{\lceil \alpha \rceil -\alpha} f$ has to be the zero function after repeating the argument in Remark \ref{direct} and, since fractional integrals are injective (Proposition \ref{propo}), $f \equiv 0$.
It is clear that applying $I_{0^+}^{\lceil \alpha \rceil -\alpha}$ to the right hand side we will produce a function $AC^{\lceil \alpha \rceil}[0,b]$. Moreover, it is trivial that any function in $AC^{\lceil \alpha \rceil}[0,b]$ can be obtained in this way in virtue of Remark \ref{Example}. Since the operator $I_{0^+}^{\lceil \alpha \rceil -\alpha}$ is injective, the result follows.
\end{proof}
From the previous lemma, we get this immediate corollary.
\begin{corollary} \label{preimagen} Given $f \in L^1[0,b]$, we have that $f \in I_{0^+}^{\alpha}L^1[0,b]$ if, and only if, $f\in \mathcal{X}_{\alpha}$ and also $D^{s} I_{0^+}^{\lceil \alpha \rceil - \alpha}f(0)= 0$ for each $s \in \{0,\dots, \lceil \alpha \rceil-1\}$.
\end{corollary}
Hence, we can upgrade Definition \ref{inversas} in the following way, coinciding with Definition 2.4 in \cite{Samko}.
\begin{definition} \label{DerivadaFrac} Consider $\alpha \geq 0$ and $f \in \mathcal{X}_{\alpha}$. We define the Riemann-Liouville fractional derivative of order $\alpha$ (and base point $0$) as
\begin{align*}D_{0^+}^{\alpha} f := D_{0^+}^{\lceil \alpha \rceil} \circ I_{0^+}^{\lceil \alpha \rceil - \alpha} f,
\end{align*}
where the last derivative may be understood in the weak sense exposed previously.
\end{definition}
\subsubsection{Properties of the space $\mathcal{X}_{\alpha}$}
We want to fully understand how $D_{0^+}^{\alpha}$ works over $\mathcal{X}_{\alpha}$ and the most natural way is to split the problem into two parts, as suggested by Lemma \ref{equisa}. We already know that $D_{0^+}^{\alpha}$ is the left inverse for $I_{0^+}^{\alpha}$, so we should study how does it behave when applied to $\left\langle\left\{ t^{\alpha-\lceil \alpha \rceil}, \dots , t^{\alpha-2}, t^{\alpha-1}\right\}\right\rangle$. It is a well known and straightforward computation that \[D_{0^+}^{\alpha}\left(\left\langle\left\{ t^{\alpha-\lceil \alpha \rceil}, \dots , t^{\alpha-2}, t^{\alpha-1}\right\}\right\rangle\right)=\{0\}.\] and, hence the kernel of $D_{0^+}^{\alpha}$ has dimension $\lceil \alpha \rceil$ and is given by \[\ker D_{0^+}^{\alpha}=\left\langle\left\{ t^{\alpha-\lceil \alpha \rceil}, \dots , t^{\alpha-2}, t^{\alpha-1}\right\}\right\rangle.\]
Moreover, we should note that if $f(t)=a_0 \, t^{\alpha-\lceil \alpha \rceil}+\cdots+a_{\lceil \alpha \rceil-1}\,t^{\alpha -1}$, with $a_j\in \mathbb{R}$ for each $j \in \left\{0,1,\dots,\lceil \alpha \rceil-1\right\}$, it is immediate to do the following calculations from Remark \ref{Example}, where $j \in \{1,\cdots,\lceil \alpha \rceil -1\}$,
\begin{align} \label{Iniciales}
\begin{split}
\left( I_{0^+}^{\lceil \alpha \rceil - \alpha}f \right)(0)=a_0\,{\Gamma(\alpha-\lceil \alpha \rceil+1)},\\
\left( D_{0^+}^{\alpha - \lceil \alpha \rceil + j}f \right)(0)=a_j\,\Gamma(\alpha - \lceil \alpha \rceil + j+1).
\end{split}
\end{align}
The previous formula generalizes the obtention of the Taylor coefficients for a fractional case and it can be used to codify functions in $\mathcal{X}_{\alpha}$ modulo $I_{0^+}^{\alpha}L^1[0,b]$, since
\begin{align} \label{Iniciales2}
\begin{split}
\left( I_{0^+}^{\lceil \alpha \rceil - \alpha}g \right)(0)=0,\\
\left( D_{0^+}^{\alpha - \lceil \alpha \rceil + j}g \right)(0)=0,
\end{split}
\end{align}
for $g \in I_{0^+}^{\alpha}L^1[0,b]$, due to Proposition \ref{propo}.
\subsubsection{Intersection of fractional summable spaces}
In general, fractional differentiation presents some extra problems that do not exist when dealing with fractional integrals. One of the most famous ones is that there is no Index Law for fractional differentiation. The main reason underlying all these complications is the following one.
\begin{remark} \label{nocont} The condition $\alpha > \beta$ does not ensure $\mathcal{X}_{\alpha} \subset \mathcal{X}_{\beta}$, although the condition $\alpha - \beta \in \mathbb{Z}^+$ trivially does. This makes Riemann-Liouville derivatives somehow tricky, since the differentiability for a higher order does not imply, necessarily, the differentiability for a lower order with different decimal part. In particular, this fact has critical implications when considering fractional differential equations, as we shall see in the paper, since the function has to be differentiable for each order involved in the equation. These problems give an idea of why can be a logical thought to work with fractional integrals instead, and try to inherit the results obtained for the case of fractional derivatives; instead of proving them for fractional derivatives directly.
\end{remark}
Consequently, it is interesting to compute the exact structure of a finite intersection of such spaces of different orders. To the best of our knowledge, this result is not available in the extant literature.
\begin{lemma}\label{estructura}
Consider $\beta_n > \dots > \beta_1 \geq 0$, we have that $$ \bigcap_{j=1}^n \mathcal{X}_{\beta_j} =\left\langle\left\{ t^{\beta_n-\lceil \beta_n-\beta_* \rceil}, \dots, t^{\beta_n -1} \right\}\right\rangle \oplus I_{0^+}^{\beta_n} L^1[0,b],$$ where $\beta_*$ is the maximum $\beta_j$ such that $\beta_n - \beta_j \not \in \mathbb{Z}^+$. If such a $\beta_j$ does not exist, the result still holds after defining $\beta_*=0$.
In particular, $I_{0^+}^{\beta_n} L^1[0,b] \subset \bigcap_{j=1}^n \mathcal{X}_{\beta_j}$ and it has codimension $\lceil \beta_n-\beta_* \rceil$.
\end{lemma}
\begin{proof}
It is obvious that $\bigcap_{j=1}^n \mathcal{X}_{\beta_j} \subset \mathcal{X}_{\beta_n}$. Hence,
\begin{equation}\label{ecurara}
\bigcap_{j=1}^n \mathcal{X}_{\beta_j} \subset \mathcal{X}_{\beta_n} = \left\langle \left\{ t^{\beta_n-\lceil \beta_n \rceil}, \dots, t^{\beta_n -1} \right\}\right\rangle \oplus I_{0^+}^{\beta_n} L^1[0,b].
\end{equation}
It is clear that $I_{0^+}^{\beta_n} L^1[0,b]$ lies in $\bigcap_{j=1}^n \mathcal{X}_{\beta_j}$, so the remaining question is to see when a linear combination of the $t^{\beta_n-k}$, where $k \in \{1,\dots,\lceil \beta_n \rceil \}$, lies in $\bigcap_{j=1}^n \mathcal{X}_{\beta_j}$.
The key remark is to realise that for any finite set $\mathcal{F} \subset (-1,+\infty)$ $$\sum_{\gamma \in \mathcal{F}} c_{\gamma}\, t^{\gamma} \in \mathcal{X}_{\beta_j}, \textnormal{ where } c_{\gamma} \neq 0 \textnormal{ for each } \gamma \in \mathcal{F},$$ if and only if $\gamma-\beta_j>-1$ or $\gamma-\beta_j \in \mathbb{Z}^{-}$ for every $\gamma \in \{1,\dots,r\}$ with $c_{\gamma}\neq 0$. Consequently, it is enough study when $t^{\beta_n-k}$ lies in $\mathcal{X}_{\beta_j}$, and there are two options:
\begin{itemize}
\item If $\beta_n-\beta_j \in \mathbb{Z}^+$, we know that $t^{\beta_n-k} \in \mathcal{X}_{\beta_j}$. This happens because either $\beta_n -k \in \left\{\beta_j-\lceil \beta_j \rceil, \dots, \beta_j -1 \right\}$ or $\beta_n-k>\beta_j-1$.
\item In other case, we need $\beta_n-k>\beta_j-1$, that can be rewritten as $k<\beta_n-\beta_j+1$. If we want this to happen for every $j$ such that $\beta_n-\beta_j \not \in \mathbb{Z}$, the condition is equivalent to $k<\beta_n-\beta_*+1$, where $\beta_*$ is the greatest $\beta_j$ such that $\beta_n-\beta_j \not \in \mathbb{Z}$. Indeed, it can be rewritten as $1 \leq k \leq \lceil \beta_n-\beta_* \rceil$.
\end{itemize}
Therefore, the coefficients which are not necessarily null are the ones associated to $t^{\beta_n-k}$, where $k \in \{1,\dots,\lceil \beta_n-\beta_* \rceil\}$.
\end{proof}
\begin{remark} \label{interse} Due to Lemma \ref{estructura}, any affine subspace of $ \bigcap_{j=1}^n \mathcal{X}_{\beta_j}$ with dimension strictly higher than $\lceil \beta_n - \beta_* \rceil$ contains two distinct functions whose difference lies in $I_{0^+}^{\beta_n}L^1[0,b]$. Thus, in any vector subspace of $ \bigcap_{j=1}^n \mathcal{X}_{\beta_j}$ with dimension strictly higher than $\lceil \beta_n - \beta_* \rceil$, there are infinitely many functions that lie in $I_{0^+}^{\beta_n}L^1[0,b]$.
\end{remark}
\subsection{Fractional integral equations}
Consider the fractional integral equation \[\left(c_n\, I_{0^+}^{\gamma_n}+\cdots+c_1\,I_{0^+}^{\gamma_1}+I_{0^+}^{\gamma_0}\right)x(t)=\widetilde{f}(t),\] where $f \in L^1[0,b]$, $\gamma_n > \cdots > \gamma_0 \geq 0$ and assume that it has a solution $x \in L^1[0,b]$. Since $I_{0^+}^{\gamma_n} L^1[0,b] \subset \cdots \subset I_{0^+}^{\gamma_0} L^1[0,b]$, the left hand side lies in $I_{0^+}^{\gamma_0} L^1[0,b]$ and the condition $f \in I_{0^+}^{\gamma_0} L^1[0,b]$ is mandatory to ensure the existence of solution. In that case, we can apply the operator $D_{0^+}^{\gamma_0}$ to the previous equation and we obtain
\begin{align}\label{fundamental}
\left(c_n\,I_{0^+}^{\alpha_n}+\cdots+c_1\,I_{0^+}^{\alpha_1}+\textnormal{Id}\right)x(t)=f(t),
\end{align} where $D_{0^+}^{\gamma_0}\widetilde{f}=f$ and $\alpha_j=\gamma_j-\gamma_0$ for $j \in \{1,\dots,n\}$. If we use the notation \[\Upsilon=c_n\, I_{0^+}^{\alpha_n}+\cdots+c_1\,I_{0^+}^{\alpha_1},\] Equation (\ref{fundamental}) can be rewritten as
\begin{align}\label{fundamental2}
\left(\Upsilon+\textnormal{Id}\right)x(t)=f(t).
\end{align}
Therefore, it is relevant to study the properties of the operator $\Upsilon+\textnormal{Id}$ from $L^1[0,b]$ to itself to understand Equation (\ref{fundamental2}).
\subsubsection{$\Upsilon+\textnormal{Id}$ is bounded}
This claim is a very well-known result, since each addend in $\Upsilon$ is a bounded operator, and Id too, see \cite{KiSrTr}. It is also possible to prove this, just recalling that $\Upsilon$ is a convolution operator with kernel in $L^1[0,b]$ and, thus, a bounded operator.
\subsubsection{$\Upsilon+\textnormal{Id}$ is injective}
To prove that $\Upsilon+\textnormal{Id}$ is injective we will need a result concerning the annulation of a convolution. In a few words, we need to know what are the possibilities for the factors of a convolution, provided that the obtained result is the zero function. Roughly speaking, the classical result in this direction, known as Titchmarsh Theorem, states that the integrand of the convolution from $0$ to $t$ is always zero, independently of $t$.
\begin{theorem}[Titchmarsh, \cite{Titchmarsh}] \label{Titchmarsh}
Suppose that $f,g \in L^1[0,b]$ are such that $f*_0 g \equiv 0$. Then, there exist $\lambda,\mu \in \mathbb{R}^+$ such that the following three conditions hold:
\begin{itemize}
\item $f \equiv 0$ in the interval $[0,\lambda]$,
\item $g \equiv 0$ in the interval $[0,\mu]$,
\item $\lambda+\mu \geq b$.
\end{itemize}
\end{theorem}
We will not provide the proof of this result, since it is a bit technical and it is not interesting from the point of view of the work that we are going to develop. The proof can be consulted in \cite{Titchmarsh}.
\begin{remark} \label{RTi} In particular, Titchmarsh Theorem states that the operator $C_0(f):L^1[0,b] \longrightarrow L^1[0,b]$ is injective, provided that $f \in L^1[0,b]$ and that $f$ is not null at any interval $[0,\lambda]$ for $\lambda >0$.
\end{remark}
\begin{corollary} The operator $\Upsilon+\textnormal{Id}$ described in (\ref{fundamental2}) is injective.
\end{corollary}
\begin{proof} Note that we can not apply Theorem \ref{Titchmarsh} directly to $\Upsilon+\textnormal{Id}$, since it is not a convolution operator due to the ``Id'' term. However, $I_{0^+}^1 \circ \left(T+\textnormal{Id}\right)$ is a convolution operator and we conclude, following Remark \ref{RTi}, that $I_{0^+}^1 \circ \left(\Upsilon+\textnormal{Id}\right)$ is injective. If the previous composition is injective the right factor $\left(\Upsilon+\textnormal{Id}\right)$ has to be injective.
\end{proof}
\subsubsection{$\Upsilon+\textnormal{Id}$ is surjective}
In this case, we will use the following result, concerning Volterra integral equations of the second kind. This result essentially states that some family of integral equations do always have a continuous solution, provided that the source term is continuous.
\begin{theorem}[Rust] \label{Rust} Given $k \in L^1[0,b]$, the Volterra integral equation $$\left(C_0(k)\,v\right)(t)+v(t):=\int_0^t k(t-s) \cdot v(s) \, ds + v(t) = w(t)$$ has exactly one continuous solution $v \in \mathcal{C}[0,b]$, provided that $w \in \mathcal{C}[0,b]$ and that the following two conditions hold:
\begin{itemize}
\item If $h \in \mathcal{C}[0,b]$, then $C_a(k)\,h \in \mathcal{C}[0,b]$.
\item If $n \in \mathbb{Z}^+$ is big enough, then $\left(C_0(k)\right)^n = C_0(\widetilde{k})$ for some $\widetilde{k} \in \mathcal{C}[0,b]$,
\end{itemize}
\end{theorem}
We will not provide the proof, analogously to what we previously did with Titchmarsh Theorem. The result can be found in a more general context in \cite{Rust}, since here we have stated it for the particular case of convolution kernels.
\begin{remark} \label{Rru} We know that the image of $\Upsilon+\textnormal{Id}$ will lie in $\mathcal{C}[0,b],$ since fractional integrals map continuous functions into continuous functions (Proposition \ref{propo}) and $\Upsilon^n$ will be defined by a continuous kernel when $n\geq\alpha_1^{-1}$, which is the inverse of the least integral order in $\Upsilon$.
\end{remark}
We need to conclude that, indeed, the image of $\Upsilon+\textnormal{Id}$ is $L^1[0,b]$.
\begin{corollary} The operator $\Upsilon+\textnormal{Id}$ described in (\ref{fundamental2}) is surjective.
\end{corollary}
\begin{proof} Consider $f \in L^1[0,b]$ and the equation \[\left(\Upsilon+\textnormal{Id}\right)x(t)=f(t).\] Observe that $x$ solves the previous equation if and only if it solves \[\left(\Upsilon+\textnormal{Id}\right)(x(t)-f(t))=-\Upsilon\,f(t),\] but now the source term is in $I_{0^+}^{\alpha_1}L^1[0,b]$. If we repeat this idea inductively, we see that $x$ solves the original equation if and only if \[\left(\Upsilon+\textnormal{Id}\right)(x(t)-(\textnormal{Id}-\Upsilon+\cdots+(-1)^n \Upsilon)f(t))=(-1)^{n+1}\Upsilon^{n+1}\,f(t).\] The right hand side will be continuous for $n \geq \alpha_1^{-1}$ and, by Remark \ref{Rru}, it will have a solution.
\end{proof}
\subsubsection{$\Upsilon+\textnormal{Id}$ is a bounded automorphism in $L^1[0,b]$}
We have already seen that $\Upsilon+\textnormal{Id}$ is bounded and bijective, and hence the inverse is also bounded due to the Bounded Inverse Theorem for Banach spaces. Therefore, we have the following result.
\begin{theorem} The operator $T+\textnormal{Id}$, described in (\ref{fundamental2}) is an invertible bounded linear map from the Banach space $L^1[0,b]$ to itself, whose inverse is also bounded.
\end{theorem}
In particular, we get the following corollary
\begin{theorem} \label{uncidad} Given $f \in L^1[0,b]$, the equation
\begin{align}
\left(\Upsilon+\textnormal{Id}\right)x(t)=f(t) \tag{\ref{fundamental2}}
\end{align}
has exactly one solution $x \in L^1[0,b]$.
\end{theorem}
Although it is not the scope of this paper, we highlight that such an equation can be solved using classical techniques for integral equations or specifical tools for the particular case of fractional integral equations, like the one exposed in \cite{CaRo}.
\section{Implications of fractional integral equations in fractional differential equations}
It would be nice to inherit some of the previous results for fractional differential equations. In fact, we are interested in studying the solutions of this general linear problem with constant coefficients
\begin{equation} \label{previodiff}
L \, u(t):=\left(c_1 \, D_{0^+}^{\beta_1}+\dots+ c_{n-1}\,D_{0^+}^{\beta_{n-1}}+D_{0^+}^{\beta_n}\right) u(t) = w(t),
\end{equation}
where $\beta_n>\dots>\beta_1\geq 0$ and $w \in L^1[0,b]$. Of course, the first question is where should we look for the solution. It is very relevant to clarify completely this point, since there are classical references, for instance see \cite{MiRo} (Theorem 1, Section 5.5), that state the following theorem or equivalent versions.
\begin{theorem} \label{incompl} Consider a linear homogeneous fractional differential equation (for Riemann-Liouville derivatives) with constant coefficients and rational orders. If the highest order of differentiation is $\alpha$, then the equation has $\lceil \alpha \rceil$ linearly independent solutions.
\end{theorem}
It is important to note that many references for are not clear enough about the notion of solution to a fractional differential equation. With the previous sentence, we mean that it is desirable to introduce a suitable space of differentiable functions first, to later discuss about the solvability of the fractional differential equation. We devote the rest of the paper to show that the previous theorem is true only in some weak sense. Indeed, after defining formally the notion of ``strong solution'', we will see that, in general, there are less than $\lceil \alpha \rceil$ linearly independent solutions. Indeed, only for those ``strong'' solutions it will be coherent to talk about initial values.
If we go back to \eqref{previodiff}, we can make the following vital remark.
\begin{remark} We recall that, in the usual case of integer orders, we look for the solutions in $\mathcal{X}_{\beta_n}$. Although it is quite common to forget it, the underlying reason to do this is that $\bigcap_{j=1}^n \mathcal{X}_{\beta_j}=\mathcal{X}_{\beta_n}$ when every $\beta_j$ is a non-negative integer. This means that any function with summable derivative of order $\beta_n$ has summable derivative of any fewer order too. However, in general, this does not necessarily happen when the involved orders are non-integers. Thus, we may have $\bigcap_{j=1}^n \mathcal{X}_{\beta_j} \neq \mathcal{X}_{\beta_n}$ and, of course, a solution to Equation (\ref{previodiff}) has to lie in $\bigcap_{j=1}^n \mathcal{X}_{\beta_j}$.
\end{remark}
Consequently, it is convenient to know the structure of the set $\bigcap_{j=1}^n \mathcal{X}_{\beta_j}$, which has already been described in Lemma \ref{estructura}, to study existence and uniqueness of solution. Of course, to expect uniqueness of solution, some initial conditions have to be added to Equation (\ref{previodiff}), but this will be detailed in the next section. The fundamental remark is that Equation (\ref{previodiff}) can be rewritten as
\begin{equation} \label{previodiff2}
L \, u(t):=D_{0^+}^{\beta_n} \, \left(c_1 I_{0^+}^{\beta_n-\beta_1}+\dots+c_{n-1} I_{0^+}^{\beta_n-\beta_{n-1}}+\textnormal{Id}\right) u(t) = w(t).
\end{equation}
In consequence, it is quite natural to make the following reflection. If $u(t)$ solves (\ref{previodiff2}), it is because
\begin{equation} \label{weaksol}
\left(c_1 I_{0^+}^{\beta_n-\beta_1}+\dots+c_{n-1} I_{0^+}^{\beta_n-\beta_{n-1}}+ \textnormal{Id}\right) u(t) \in I_{0^+}^{\beta_n} w(t) + \ker D_{0^+}^{\beta_n}.
\end{equation}
We will refer to the set of solutions to Equation (\ref{weaksol}), as the set of weak solutions. The previous terminology obeys the following reason: although a solution to (\ref{previodiff2}) solves (\ref{weaksol}), the converse does not hold in general. The point is that a solution of (\ref{weaksol}) may not lie in $\bigcap_{j=1}^n \mathcal{X}_{\beta_j}$. Of course, if the weak solution lies in $\bigcap_{j=1}^n \mathcal{X}_{\beta_j}$, then it solves (\ref{previodiff2}). The set of solutions to (\ref{previodiff2}) will be called set of strong solutions.
At this moment, we know two vital things:
\begin{itemize}
\item We have already described $\ker D_{0^+}^{\beta_n}=\left \langle \left\{ t^{\beta_n-1} , \dots, t^{\beta_n-\lceil \beta_n \rceil} \right\}\right \rangle$, that is a vector space of dimension $\lceil \beta_n \rceil$. Therefore, the set of weak solutions has dimension $\lceil \beta_n \rceil$ too, since it is the image of the affine space $I_{0^+}^{\beta_n} w(t) + \ker D_{0^+}^{\beta_n}$ via the automorphism $T^{-1} \in \textnormal{Aut}_B(L^1[0,b])$.
\item The dimension of the set of strong solutions is bounded from above by $\lceil \beta_n - \beta_*\rceil$. If the dimension were higher we could find two different solutions to (\ref{previodiff2}) whose difference would lie in $I_{0^+}^{\beta_n}L^1[0,b]$, due to Remark \ref{interse}. After writing their difference as $I_{0^+}^{\beta_n} g$ with $g \neq 0$, it would trivially fulfil
\begin{equation*}
\left(c_1 I_{0^+}^{\beta_n-\beta_1}+\dots+c_{n-1} I_{0^+}^{\beta_n-\beta_{n-1}}+\textnormal{Id}\right)g(t) = 0,
\end{equation*}
which is not possible since the linear operator in the left hand side is injective.
\end{itemize}
From these remarks, there are some remaining points that need to be studied in detail. First, we prove that the bound $\lceil \beta_n - \beta_*\rceil$ is sharp by inspecting which of elements in $\ker D_{0^+}^{\beta_n}$ guarantee that the weak solution associated to those elements is, indeed, a strong one.
\begin{remark} \label{remarklenders} We have $\bigcap_{j=1}^n \mathcal{X}_{\beta_j} \subset I_{0^+}^{\beta_n-\lceil \beta_n - \beta_* \rceil+1-\varepsilon} L^1[0,b]$ for every increment $\varepsilon>0$, but not for $\varepsilon=0$. Moreover, note that if $f \in \ker D_{0^+}^{\beta_n}$ is chosen as the right addend in the right hand side in (\ref{weaksol}), we have that, for $\gamma \leq \beta_n$, $f \in I_{0^+}^{\gamma}L^1[0,b]$ if and only if $u \in I_{0^+}^{\gamma}L^1[0,b]$. Therefore, to have a strong solution, it is mandatory to select $f \in \left \langle \left\{t^{\beta_n-1} , \dots, t^{\beta_n-\lceil \beta_n \rceil} \right\} \right \rangle$.
\end{remark}
\begin{lemma}
If $u \in L^1[0,b]$ solves
\begin{equation}\label{cansado}
\left(c_1 I_{0^+}^{\beta_n-\beta_1}+\dots+c_{n-1} I_{0^+}^{\beta_n-\beta_{n-1}}+ \textnormal{Id}\right) u(t) = I_{0^+}^{\beta_n} w(t) + f(t)
\end{equation}
for $f \in \left \langle \left\{t^{\beta_n-1} , \dots, t^{\beta_n-\lceil \beta_n-\beta_* \rceil} \right\} \right \rangle \subset \ker D_{0^+}^{\beta_n}$, then $u \in \bigcap_{j=1}^n \mathcal{X}_{\beta_j}$.
\end{lemma}
\begin{proof}
If we use the notation $\Upsilon:=c_1 I_{0^+}^{\beta_n-\beta_1}+\dots+c_{n-1} I_{0^+}^{\beta_n-\beta_{n-1}}$, we deduce from Equation (\ref{cansado}) that $$\left(\Upsilon+ \textnormal{Id}\right) \left(u(t)-f(t)\right) = I_{0^+}^{\beta_n} w(t) - \Upsilon\, f(t).$$ Observe now that the addend $-\Upsilon\, f(t)$ can be decomposed in two parts, since two different situations can happen:
\begin{itemize}
\item If $\beta_n- \beta_j \not \in \mathbb{Z}$, we see that $I_{0^+}^{\beta_n- \beta_j} t^{\beta_n-k}$ will be always in the space $I_{0^+}^{\beta_n+(\beta_n-\beta_*+1)-\lceil \beta_n-\beta_* \rceil-\varepsilon}L^1[0,b]$ for every $\varepsilon>0$. This simply occurs because the worst choice is $\beta_j=\beta_*$ and $k=\lceil \beta_n -\beta_* \rceil$. Indeed, for $\varepsilon$ small enough, the previous space is contained in $I_{0^+}^{\beta_n}L^1[0,b]$, since $\beta_n-\beta_*+1-\lceil \beta_n-\beta_* \rceil$ is strictly positive.
\item If $\beta_n- \beta_j \in \mathbb{Z}^+$, there are two options:
\subitem If $\beta_n- \beta_j>\beta_n- \beta_*$, we have that $I_{0^+}^{\beta_n- \beta_j} t^{\beta_n-k}$ lies again in $I_{0^+}^{\beta_n}L^1[0,b]$, since the maximum value admitted for $k$ is $\lceil \beta_n -\beta_* \rceil$.
\subitem If $\beta_n- \beta_j<\beta_n- \beta_*$, we have that $I_{0^+}^{\beta_n- \beta_j} t^{\beta_n-k} \in \left \langle \left\{t^{\beta_n-k'}\right\} \right \rangle$ for some $k'<k$.
\end{itemize}
Thus, we can write $I_{0^+}^{\beta_n} w(t) - \Upsilon\, f(t)= I_{0^+}^{\beta_n} w_1(t) + f_1(t)$, and arrive to the equation $$\left(c_1 I_{0^+}^{\beta_n-\beta_1}+\dots+c_{n-1} I_{0^+}^{\beta_n-\beta_{n-1}}+ \textnormal{Id}\right) \left(u(t)-f(t)\right) = I_{0^+}^{\beta_n} w_1(t) + f_1(t).$$ Note that $f$ lived in a $\lceil \beta_n-\beta_* \rceil$ dimensional vector space, but $f_1$ lives in a (at most) $\lceil \beta_n-\beta_* \rceil-1$ dimensional vector space.
If we repeat the process, we obtain $$\left(c_1 I_{0^+}^{\beta_n-\beta_1}+\dots+c_{n-1} I_{0^+}^{\beta_n-\beta_{n-1}}+ \textnormal{Id}\right) \left(u(t)-f(t)-f_1(t)\right) = I_{0^+}^{\beta_n} w_2(t) + f_2(t),$$ with $f_2$ lying in a (at most) $\lceil \beta_n-\beta_* \rceil-2$ dimensional vector space. After enough iterations, the vector space has to be zero dimensional and we would have the situation $$\left(\Upsilon+ \textnormal{Id}\right) \left(u(t)-f(t)-\cdots-f_{r-1}(t)\right) = I_{0^+}^{\beta_n} w_r(t) \in I_{0^+}^{\beta_n} L^1[0,b].$$ Therefore, $u(t)-f(t)-\cdots-f_{r-1}(t) \in I_{0^+}^{\beta_n} L^1[0,b]$. Finally, if we use that $$f(t)+f_1(t)+\cdots+f_{r-1}(t) \in \left \langle \left\{t^{\beta_n-1} , \dots, t^{\beta_n-\lceil \beta_n-\beta_* \rceil} \right\} \right \rangle,$$ it follows $u \in \left \langle \left\{t^{\beta_n-1} , \dots, t^{\beta_n-\lceil \beta_n-\beta_* \rceil} \right\} \right \rangle \oplus I_{0^+}^{\beta_n}L^1[0,b]= \bigcap_{j=1}^n \mathcal{X}_{\beta_j}$.
\end{proof}
\section{Smooth solutions for fractional differential equations}
Until this point we have checked that, a priori, there are more weak solutions (a $\lceil \beta_n \rceil$ dimensional space) than strong solutions (a $\lceil \beta_n-\beta_* \rceil$ dimensional space). We have also seen how weak solutions are codified depending on the source term, more concretely depending on the element chosen in $\ker D_{0^+}^{\beta_n}$. Moreover, we know that if the choice is made in a certain subspace of $\ker D_{0^+}^{\beta_n}$, then the obtained solution is a strong one. However, one could think about codifying strong solutions directly in the fractional differential equation via initial conditions, instead of using fractional integral problems and selecting a source term linked to a strong solution. Therefore, the last task should consist in relating the choices for $\ker D_{0^+}^{\beta_n}$ that give a strong solution with the corresponding initial conditions for the strong problem.
First, to simplify the notation, we reconsider Equation (\ref{previodiff2}) with the additional hypotheses that each positive integer less or equal than $\beta_n-\beta_1$ can be written as $\beta_n-\beta_j$ for some $j$.
\begin{equation}
D_{0^+}^{\beta_n} \, \left(c_1 I_{0^+}^{\beta_n-\beta_1}+\dots+c_{n-1} I_{0^+}^{\beta_n-\beta_{n-1}}+\textnormal{Id}\right) u(t) = w(t) \tag{\ref{previodiff2}}
\end{equation}
This does not imply a loss of generality, since we can assume that some $c_j=0$, if needed. The only purpose of this assumption is to ease the notation in this proof in the way that is described in the following paragraph.
If $\beta_*=\beta_{n-m}$, then $\beta_n-\beta_{n-m}$ is the least possible non-integer difference $\beta_n-\beta_{j}$. Thus, we can use the previous notational assumption to check that $\beta_n-\beta_{n-j}=j$ for $j<m$ and $\beta_n-\beta_{n-m} \in (m-1,m)$. Thus, $\lceil \beta_n-\beta_{*} \rceil=\lceil \beta_n-\beta_{n-m} \rceil =m$ and $c_{n-m+1},\dots,c_{n-1}$ are $m-1$ constants multiplying integrals of integer order in (\ref{previodiff2}).
Now, we provide the main result of this section.
\begin{lemma} Under the previous notation, Equation (\ref{previodiff2}) with determined initial values $D_{0^+}^{\beta_n-m} u(0), \dots, D_{0^+}^{\beta_n-1} u(0)$ has a unique solution in $\bigcap_{j=1}^n \mathcal{X}_{\beta_j}$. This solution coincides with the unique solution of (\ref{cansado}), where the source term is the unique function $f \in \left \langle \left\{t^{\beta_n-1} , \dots, t^{\beta_n-m} \right\} \right \rangle$ fulfilling
\begin{align*}
D_{0^+}^{\beta_n-m}u(0)&=D_{0^+}^{\beta_n-m}f(0),\\
D_{0^+}^{\beta_n-m+1}u(0)+c_{n-1}\,D_{0^+}^{\beta_n-m}u(0)&=D_{0^+}^{\beta_n-m+1}f(0),\\
&\cdots \\
D_{0^+}^{\beta_n-1}u(0)+c_{n-1}\,D_{0^+}^{\beta_n-2}u(0)+\cdots+c_{n-m+1}&\,D_{0^+}^{\beta_n-m-1}u(0)=D_{0^+}^{\beta_n-1}f(0).
\end{align*}
\end{lemma}
\begin{proof}
Consider again the equation
\begin{equation}
\left(c_1 I_{0^+}^{\beta_n-\beta_1}+\dots+c_{n-1} I_{0^+}^{\beta_n-\beta_{n-1}}+ \textnormal{Id}\right) u(t) = I_{0^+}^{\beta_n} w(t) + f(t), \tag{\ref{cansado}}
\end{equation}
Recall that we look for strong solutions to (\ref{cansado}), that lie in the functional space $\left \langle \left\{t^{\beta_n-1} , \dots, t^{\beta_n-m} \right\} \right \rangle \oplus I_{0^+}^{\beta_n}L^1[0,b]$, so we write $$u(t)=d_1\,t^{\beta_n-m}+\cdots+d_{m}\,t^{\beta_n-1}+I_{0^+}^{\beta_n}\widetilde{u}(t).$$ Moreover, take into account that a strong choice for $f \in \ker D_{0^+}^{\beta_n}$ allows to describe $$f(t) = b_1\,t^{\beta_n-m}+\cdots+b_{m}\,t^{\beta_n-1}.$$ Now, we will derive the initial conditions after applying $D_{0^+}^{\beta_n-k}$, for every $k \in \{1,\dots,m\}$, and substituting $t=0$ in (\ref{cansado}).
At the right hand side this is easy, since $D_{0^+}^{\beta_n-k}I_{0^+}^{\beta_n} w(t) \in I_{0^+}^1 L^1[0,b]$ and, thus, the substitution at $t=0$ gives zero. The function $D_{0^+}^{\beta_n-k}f(t)$ can be computed trivially, due to the expression of $f$, obtaining \[D_{0^+}^{\beta_n-k}f(0)=\Gamma(\beta_n - k +1) \,b_k.\]
At the left hand side, on the one hand, we have again a similar situation to the previous one, since $D_{0^+}^{\beta_n-k}I_{0^+}^{\beta_n-\beta_j}I_{0^+}^{\beta_n} \widetilde{u}(t) \in I_{0^+}^{k+(\beta_n-\beta_j)}L^1[0,b] \subset I_{0^+}^1 L^1[0,b]$ for any subindex $j\in \{1,\dots,n\}$ and, thus, the substitution at $t=0$ gives zero. On the other hand, $D_{0^+}^{\beta_n-k}I_{0^+}^{\beta_n-\beta_j}t^{\beta_n-l}$ has three possibilities:
\begin{itemize}
\item If $\beta_n-\beta_j>l-k$, then $D_{0^+}^{\beta_n-k}I_{0^+}^{\beta_n-\beta_j}t^{\beta_n-l}$ is a scalar multiple of a power of $t$ with positive exponent. Thus, when we make the substitution at $t=0$ we get $0$.
\item If $\beta_n-\beta_j=l-k$, then $D_{0^+}^{\beta_n-k}I_{0^+}^{\beta_n-\beta_j}t^{\beta_n-l}=\Gamma(\beta_n-l+1)$ is constant, and it is obviously defined for $t=0$.
\item If $\beta_n-\beta_j < l-k \leq m-1$, then $\beta_n-\beta_j$ is an integer and the computation $D_{0^+}^{\beta_n-k}I_{0^+}^{\beta_n-\beta_j}t^{\beta_n-l}$ gives the zero function.
\end{itemize}
The interest of the previous trichotomy is that we never obtain some $t^{-\gamma}$ with $\gamma>0$. In other case, we would have a huge trouble, since we could not evaluate the expression for $t=0$. Fortunately, we can always apply $D_{0^+}^{\beta_n-k}$ to Equation (\ref{cansado}), for every value $k \in \{1,\dots,m\}$, and substitute at $t=0$. We arrive to the following linear system of equations
\begin{align*}
D_{0^+}^{\beta_n-m}u(0)&=D_{0^+}^{\beta_n-m}f(0),\\
D_{0^+}^{\beta_n-m+1}u(0)+c_{n-1}\,D_{0^+}^{\beta_n-m}u(0)&=D_{0^+}^{\beta_n-m+1}f(0),\\
&\cdots \\
D_{0^+}^{\beta_n-1}u(0)+c_{n-1}\,D_{0^+}^{\beta_n-2}u(0)+\cdots+c_{n-m+1}&\,D_{0^+}^{\beta_n-m}u(0)=D_{0^+}^{\beta_n-1}f(0).
\end{align*}
Note that all the involved derivatives in the initial conditions have the same decimal part, since only coefficients $c_{n-1},\cdots,c_{n-m+1}$ appear in the system. We also highlight that the system has always a unique solution, since it is triangular and it has no zero element in the diagonal. Therefore, a choice for $f$ linked to a strong solution determines a vector of initial values $(D_{0^+}^{\beta_n-m}u(0),\dots,D_{0^+}^{\beta_n-1}u(0))$ and vice-versa in a bijective way.
\end{proof}
We shall give two examples summarizing how to apply all the previous results.
\begin{example}
Consider the following fractional differential equation (strong problem)
\begin{equation*}
\left(D_{0^+}^{\frac{7}{3}}+3\,D_{0^+}^{\frac{4}{3}}+4\,D_{0^+}^{\frac{1}{3}}\right)u(t)=t^3
\end{equation*}
and define $\beta_1=\frac{1}{3},\beta_2=\frac{4}{3},\beta_3=\frac{7}{3}$. In this case, note that $\beta_*=0$, since all the differences $\beta_3-\beta_j$ are integers. The strong solutions for the example will lie in $\bigcap_{j=1}^3 \mathcal{X}_{\beta_j}$. The dimension of the affine space of strong solutions will be $\lceil \beta_3\rceil=3$ and the initial conditions that ensure existence and uniqueness of solution will be $D_{0^+}^{\frac{4}{3}}u(0)=a_3$, $D_{0^+}^{\frac{1}{3}}u(0)=a_2$ and $I_{0^+}^{\frac{2}{3}}u(0)=a_1$.
Moreover, after left-factoring $D_{0^+}^{\frac{7}{3}}$, we find that the associated family of weak problems is
\begin{equation*}
\left(4\,I_{0^+}^{2}+3\,I_{0^+}^1+\textnormal{Id}\right)u(t)=I_{0^+}^{\frac{7}{3}} t^3 + f(t)
\end{equation*}
where $f(t) \in \left \langle \left\{t^{\frac{4}{3}}, t^{\frac{1}{3}}, t^{-\frac{2}{3}} \right\} \right \rangle$, which lives in a three dimensional space. The, a priori weak, obtained solution is always strong since we have that $\lceil \beta_3 - \beta_*\rceil =\lceil \beta_3 \rceil$.
Finally, the relation between a choice for $f(t)=b_3\,t^{\frac{4}{3}}+b_2\,t^{\frac{1}{3}}+b_1\,t^{-\frac{2}{3}}$ providing a strong solution and the initial conditions $a_1$, $a_2$ and $a_3$ is
\begin{align*}
a_1=I_{0^+}^{\frac{2}{3}}f(0)=b_1 \cdot \Gamma\left(1-\frac{2}{3}\right), \\
a_2+3\,a_1=D_{0^+}^{\frac{1}{3}}f(0)=b_2 \cdot \Gamma\left(1+\frac{1}{3}\right),\\
a_3+3\,a_2+4\,a_3=D_{0^+}^{\frac{4}{3}}f(0)=b_3 \cdot \Gamma\left(1+\frac{4}{3}\right).
\end{align*}
\end{example}
\begin{example}
Consider the following fractional differential equation (strong problem)
\begin{equation*}
\left(D_{0^+}^{\frac{13}{4}}+3\,D_{0^+}^{\frac{9}{4}}+D_{0^+}^{2}+D_{0^+}^{\frac{5}{4}}+D_{0^+}^{1}\right)u(t)=t
\end{equation*}
and define $\beta_1=1,\beta_2=\frac{5}{4},\beta_3=2,\beta_4=\frac{9}{4},\beta_5=\frac{13}{4}$. In this case, note that $\beta_*=\beta_3$, since it fulfils the property that $\beta_5-\beta_*$ is the least possible non-integer difference $\beta_5-\beta_j$. The strong solutions for the example will lie in $\bigcap_{j=1}^5 \mathcal{X}_{\beta_j}$. The dimension of the affine space of strong solutions will be $\lceil \beta_5-\beta_* \rceil=2$ and the initial conditions that ensure existence and uniqueness of solution will be $D_{0^+}^{\frac{9}{4}}u(0)=a_2$ and $D_{0^+}^{\frac{5}{4}}u(0)=a_1$.
Moreover, after left-factoring $D_{0^+}^{\frac{13}{4}}$, we find that the associated family of weak problems is
\begin{equation*}
\left(I_{0^+}^{\frac{9}{4}}+I_{0^+}^{2}+I_{0^+}^{\frac{5}{4}}+3\,I_{0^+}^{1}+\textnormal{Id}\right)u(t)=I_{0^+}^{\frac{13}{4}} t + f(t)
\end{equation*}
where $f(t) \in \left \langle \left\{ t^{\frac{9}{4}}, t^{\frac{5}{4}}, t^{\frac{1}{4}}, t^{-\frac{3}{4}} \right\} \right \rangle$, which lives in a four dimensional space. The, a priori weak, obtained solution will be strong if $f(t) \in \left \langle \left\{ t^{\frac{9}{4}}, t^{\frac{5}{4}} \right\} \right \rangle$.
Finally, the relation between a choice for $f(t)=b_2\,t^{\frac{9}{4}}+b_1\,t^{\frac{5}{4}}$ providing a strong solution and the initial conditions $a_1$ and $a_2$ is
\begin{align*}
a_1=D_{0^+}^{\frac{5}{4}}u(0)=D_{0^+}^{\frac{5}{4}}f(0)=b_1 \cdot \Gamma\left(1+\frac{5}{4}\right) \\
a_2+3\,a_1=D_{0^+}^{\frac{9}{4}}u(0)+3\,D_{0^+}^{\frac{5}{4}}u(0)=D_{0^+}^{\frac{9}{4}}f(0)=b_2 \cdot \Gamma\left(1+\frac{9}{4}\right)
\end{align*}
\end{example}
\section{Conclusions}\label{s:conc}
We summarize the conclusions obtained in this paper.
\begin{itemize}
\item We have recalled the main results involving existence and uniqueness of solution for linear fractional integral equations with constant coefficients.
\item We have seen that from each linear fractional differential equation with constant coefficients of order $\beta_n$ is possible to derive a $\lceil \beta_n \rceil$ dimensional family of associated fractional integral equations, in a natural way. Moreover, each solution to the fractional differential equation fulfils exactly one of these fractional integral equations.
\item We have shown that there exists a $\lceil \beta_n-\beta_*\rceil$ dimensional subfamily (of the $\lceil \beta_n \rceil$ dimensional family of associated fractional integral equations) such that each solution to a problem of the subfamily gives a solution to the original linear fractional differential equation of order $\beta_n$. This value $\beta_*$ is obtained as the greatest involved order in the fractional differential equation such that $\beta_n-\beta_*$ is not an integer. If such a value does not exist, the same result holds after defining $\beta_*=0$.
\item We have seen how initial values at $t=0$ for the derivatives of orders $\beta_n-\lceil \beta_n-\beta_* \rceil,\dots,\beta_n-1$ guarantee existence and uniqueness of solution to a linear fractional differential equation with constant coefficients of order $\beta_n$. We have described the correspondence between such initial values for the fractional differential equation and the selection of a source term in the $\lceil \beta_n-\beta_*\rceil$ dimensional subfamily, in such a way that both problems have the same unique solution. If $\beta_n-\lceil \beta_n-\beta_* \rceil \in (-1,0)$ this first initial value is imposed, indeed, for the fractional integral of order $\lceil \beta_n-\beta_* \rceil-\beta_n$.
\item We expect that this idea can be extended to different type of fractional differential problems. It would be nice to amplify the scope of this work to a more general case than the one of constant coefficients. Moreover, the same idea could be applied to obtain similar results for other derivatives like Caputo and compare them with the already existing theory in literature and solution methods \cite{AsCaTu}. Furthermore, the same philosophy could be applied to other type of problems as, for instance, periodic ones that have relevant applications \cite{St}.
\end{itemize}
\end{document} |
\begin{document}
\title[Commensurators of abelian subgroups]{Commensurators of abelian subgroups in CAT(0) groups}
\author[J. Huang]{Jingyin Huang}
\address{Department of Mathematics, The Ohio State University, 100 Math Tower, 231 W 18th Ave, Columbus, OH 43210, U.S.}
\email{[email protected]}
\author[T. Prytu{\l}a]{Tomasz Prytu{\l}a}
\address{Max Planck Institute for Mathematics, Vivatsgasse 7, 53111 Bonn, Germany}
\email{[email protected]}
\subjclass[2000]{Primary 20F65; Secondary 20F67}
\keywords{Commensurator, CAT(0) group, abelian subgroup, Hadamard manifold, CAT(0) cube complex, Bredon dimension}
\begin{abstract}
We study the structure of the commensurator of a virtually abelian subgroup $H$ in $G$, where $G$ acts properly on a $\mathrm{CAT}(0)$ space $X$. When $X$ is a Hadamard manifold and $H$ is semisimple, we show that the commensurator of $H$ coincides with the normalizer of a finite index subgroup of $H$. When $X$ is a $\mathrm{CAT}(0)$ cube complex or a thick Euclidean building and the action of $G$ is cellular, we show that the commensurator of $H$ is an ascending union of normalizers of finite index subgroups of $H$. We explore several special cases where the results can be strengthened and we discuss a few examples showing the necessity of various assumptions. Finally, we present some applications to the constructions of classifying spaces with virtually abelian stabilizers.
\end{abstract}
\maketitle
\section{Introduction}
\label{sec:intro}
\subsection*{Background and motivation}
We say that two subgroups $H_1, H_2$ of a group $G$ are \emph{commensurable} if $H_1 \cap H_2$ has finite index in both $H_1$ and $H_2$. The \emph{commensurator} of $H$ in $G$, denoted by $\comm{G}{H}$, is a subgroup consisting of all elements $g \in G$ such that $gHg^{-1}$ and $H$ are commensurable. In this article we would like to understand $\comm{G}{H}$ when $H$ is virtually abelian and $G$ acts properly on a $\mathrm{CAT}(0)$ space $X$.
One motivation for studying such commensurators comes from the connection between some of their properties and the topology of classifying spaces of $G$ with respect to families of virtually abelian subgroups \cite{MR2900176}.
Another motivation comes from $\mathrm{CAT}(0)$ geometry. For $\mathrm{CAT}(0)$ groups, the normalizers of their abelian subgroups are well-understood and they play a fundamental role in the theory of $\mathrm{CAT}(0)$ groups \cite{bh}. However, the commensurators of abelian subgroups are much more mysterious and they contain subtle information of the action which is not seen by the normalizers.
The commensurator $\comm{G}{H} \le G$ contains normalizers of finite index subgroups of $H$. It is therefore natural to ask how far the commensurator is from being a normalizer. In general $\comm{G}{H}$ may not be finitely generated for a $\mathrm{CAT}(0)$ group~$G$; such an example can be found in Wise's work on irreducible lattices acting on product of trees \cite{wise}, we refer to Proposition~\ref{prop:anti torus} for an explanation. On the other hand, the normalizer of $H$ is always finitely generated \cite{bh}. Thus we ask about finitely generated subgroups of the commensurator instead. This leads to the following, which is a generalization of L{\"u}ck's $\text{Condition } \mathrm{(C)}$ for cyclic subgroups \cite{luckcat0}.
\begin{defn}[$\text{Condition } \mathrm{(C)}$]We say that a virtually abelian subgroup $H$ of $G$ satisfies $\text{Condition } \mathrm{(C)}$ if every finitely generated subgroup $K \le \comm{G}{H}$ normalizes some finite index subgroup of $H$. A group $G$ satisfies $\text{Condition } \mathrm{(C)}$ if each of its finitely generated virtually abelian subgroup satisfies $\text{Condition } \mathrm{(C)}$.
\end{defn}
If $\text{Condition } \mathrm{(C)}$ holds for $H$, then $\comm{G}{H}$ is an ascending union of normalizers of finite index subgroups of $H$. Note that if $\comm{G}{H}$ does not have pathologies of type I (not being finitely generated) and type II (not satisfying $\text{Condition } \mathrm{(C)}$), then $\comm{G}{H}$ is equal to the normalizer of a finite index subgroup of $H$.
Recently, Leary and Minasyan gave an example of a $\mathrm{CAT}(0)$ group which does not satisfy $\text{Condition } \mathrm{(C)}$ \cite{LM}, so both types of pathologies can occur for $\mathrm{CAT}(0)$ groups. However, there are many natural classes of $\mathrm{CAT}(0)$ groups where such pathologies can be eliminated due to certain geometric or combinatorial structure of these groups, which we will discuss below. We will also indicate several results for general $\mathrm{CAT}(0)$ groups, including an application to Bredon cohomological dimension for virtually abelian stabilizers.
\subsection*{Structure of the commensurators}
A group $G$ acts \emph{geometrically} on a metric space $X$ if it acts isometrically, properly and cocompactly.
A subgroup $H \leq G$ is \emph{semisimple} if each element of $H$ is a semisimple isometry of $X$.
Recall that a \emph{Hadamard manifold} is a complete, simply connected smooth manifold with non-positive sectional curvature. For groups acting geometrically on Hadamard manifolds, none of the above pathologies occurs. Actually, a slightly more general result holds.
\begin{thm}[Theorem~\ref{thm:hadamard}]\label{introthm:hadamard}
Suppose $G$ acts properly on a Hadamard manifold $X$ by isometries. Let $H\le G$ be a semisimple finitely generated virtually abelian subgroup. Then $\comm{G}{H}$ is equal to the normalizer of a finite index subgroup of $H$. In particular, if the action $G\curvearrowright X$ is geometric then $\comm{G}{H}$ is finitely generated.
\end{thm}
This theorem fails if we relax the assumption of `Hadamard manifold' to `complete $\mathrm{CAT}(0)$ manifold without boundary', see Corollary~\ref{cor:LM cor}. We remark that Theorem~\ref{introthm:hadamard} gives obstructions to (virtually) embed a group into a fundamental group of a non-positively curved smooth closed manifold.
For singular $\mathrm{CAT}(0)$ spaces the structure of commensurators is generally complicated, even if the space admits a piecewise Euclidean structure. However, certain types of piecewise Euclidean structures give rise to rigidity of commensurators.
\begin{thm}[Theorem~\ref{thm:cubicalpropc} and Proposition~\ref{prop:symmetricspace}]
\label{introthm:symmetricspace}
Suppose $G$ acts properly on a $\mathrm{CAT}(0)$ space $X$. Let $H\le G$ be a finitely generated virtually abelian subgroup. Suppose one of the following is satisfied.
\begin{enumerate}
\item $X$ is a finite dimensional $\mathrm{CAT}(0)$ cube complex and $G$ acts on $X$ by cubical automorphisms.
\item $X=X_1\times X_2\times\cdots \times X_n$ such that each $X_i$ is either a nonflat irreducible symmetric space of noncompact type or an irreducible thick Euclidean Tits building with cocompact affine Weyl group, and $H$ is semisimple.
\end{enumerate}
Then any finitely generated subgroup of $\comm{G}{H}$ normalizes a finite index subgroup of $H$.
\end{thm}
Note that the conclusion is slightly weaker than in Theorem~\ref{introthm:hadamard}. In both cases of Theorem~\ref{introthm:symmetricspace}, there are examples of $\comm{G}{H}$ being not finitely generated, even if the action of $G$ on $X$ is cocompact. Moreover, one cannot remove the `finitely generated' assumption from the last sentence of Theorem~\ref{introthm:symmetricspace}, that is, the commensurator $\comm{G}{H}$ may not normalize any finite index subgroup $H'\leq H$, as shown in Proposition~\ref{prop:anti torus}. However, such assumption can be removed when the action of $G$ on a cube complex is virtually special, see Corollary~\ref{cor:centralizer}.
We now look at more general $\mathrm{CAT}(0)$ spaces. A finitely generated virtually abelian subgroup is \emph{highest}, if it does not have a finite index free abelian subgroup that lies in a free abelian subgroup of higher rank.
\begin{prop}[Proposition~\ref{prop:highestvab}]\label{introprop:highestvab}
Let $G$ be a group acting geometrically on a $\mathrm{CAT}(0)$ space and suppose $H$ is a highest virtually abelian subgroup of $G$. Then $\comm{G}{H}$ contains $H$ as a finite index subgroup. In particular $\comm{G}{H}$ is finitely generated and it normalizes a finite index subgroup of $H$.
\end{prop}
The assumption of being highest cannot be removed \cite{LM}. Also note that a highest abelian subgroup might not be `highest' in a geometric sense. More precisely, there is an example by Rattaggi and Robertson \cite{RattaggiRobertson} of a highest abelian subgroup $H$ in a $\mathrm{CAT}(0)$ group $G$ such that $H$ acts cocompactly on a flat $F\subset X$ with $F$ being contained in a higher dimensional flat, see Proposition~\ref{prop:highest}.
We also observe that for $\mathrm{CAT}(0)$ groups, the examples by Leary and Minasyan \cite{LM} are the only obstructions to $\text{Condition } \mathrm{(C)}$, see Proposition~\ref{prop:conc obstruction}.
\subsection*{Applications to the classifying spaces for families of subgroups} Given a group $G$ and a family of subgroups $\mathcal{F}$, \emph{the classifying space of $G$ for the family} $\mathcal{F}$, denoted by $E_{\mathcal{F}}G$, is the universal $G$--CW--complex with stabilizers in $\mathcal{F}$. Classifying spaces for families appear in Baum-Connes and Farrell-Jones isomorphism conjectures in $K$--theory and they can be used to compute Bredon cohomology of $G$ \cite{lucksurv}. Therefore it is desirable to construct simple models for $E_{\mathcal{F}}G$ and in particular to bound its dimension. The minimal dimension of $E_{\mathcal{F}}G$ is called \emph{geometric dimension} of $G$ for the family $\mathcal{F}$, and is denoted by $\mathrm{gd}_{\mathcal{F}}G$. There is an algebraic counterpart of geometric dimension called \emph{Bredon cohomological dimension} and it is denoted by $\mathrm{cd}_{\mathcal{F}}G$. These two dimensions are related by Eilenberg-Ganea-type inequality
\begin{equation*}
\mathrm{cd}_{\mathcal{F}}G \le \mathrm{gd}_{\mathcal{F}}G \le \mathrm{max}\{3, \mathrm{cd}_{\mathcal{F}}G \}.
\end{equation*}
The standard by now method to construct a classifying space $E_{\mathcal{F}}G$ or to bound $\mathrm{cd}_{\mathcal{F}}G$ is a construction due to L{\"u}ck and Weiermann~\cite{MR2900176}. One may say that the key point of that construction is the study of $\comm{G}{H}$ for subgroups $H \in \mathcal{F}$ and construction of classifying spaces for this group for certain families (simpler than~$\mathcal{F}$).
In vast majority of constructions, the following approach has been used: one first proves $\text{Condition } \mathrm{(C)}$ for $G$ and then approximates $\comm{G}{H}$ by normalizers of subgroups $H_i$ commensurable with $H$. Then one constructs respective classifying spaces for normalizers, as they are usually much simpler groups, and finally one reconstructs classifying space for $\comm{G}{H}$ from classifying spaces for $N_G(H_i)$ for subgroups $H_i$.
Now let $\mathcal{F}_r$ be a family of subgroups of $G$ which consists of all finitely generated virtually abelian subgroups of rank at most $r$.
Following the above procedure, the second author obtained a bound on the $\mathrm{cd}_{\mathcal{F}_r}G$ for $G$ acting properly by semisimple isometries on a proper finite dimensional $\mathrm{CAT}(0)$ space, assuming that $G$ satisfies $\text{Condition } \mathrm{(C)}$ \cite[Theorem~1.1]{cat0vab}.
However, by studying the action of $\comm{G}{H}$ more carefully, now we can remove $\text{Condition } \mathrm{(C)}$ from the assumptions of \cite[Theorem~1.1]{cat0vab}.
\begin{thm}[Theorem~\ref{thm:improvedcat0vab}]\label{introthm:improvedcat0vab} Let $G$ be a group acting properly by semisimple isometries on a complete proper $\mathrm{CAT}(0)$ space of topological dimension $n$. Then for any $0 \leqslant r \leqslant n$ we have $\mathrm{cd}_{\mathcal{F}_r}G \leq n+r+1.$
\end{thm}
Let us point out that Theorem~\ref{introthm:improvedcat0vab} gives a partial answer to a question by Lafont \cite{lafont2008construction}, concerning constructions of classifying spaces for the family of virtually abelian subgroups.
\subsection*{Comments on the proof}
Suppose $G$ acts properly on a $\mathrm{CAT}(0)$ space $X$ and suppose $H\le G$ is a semisimple finitely generated virtually abelian subgroup. Let $F\subset X$ be an $H$--invariant flat such that $H\curvearrowright F$ is cocompact. Then $\comm{G}{H}$ preserves the parallel set $P_F=F\times F^{\mathcal Perp}$ and its product structure (Lemma~\ref{lem:split}). This gives rise to two factor actions $\comm{G}{H}\curvearrowright F$ and $\comm{G}{H}\curvearrowright F^\mathcal Perp$. Theorem~\ref{introthm:symmetricspace} and Proposition~\ref{introprop:highestvab} come from analyzing the regularity of $\comm{G}{H}\curvearrowright F$; and Theorem~\ref{introthm:hadamard} and Theorem~\ref{introthm:improvedcat0vab} come from analyzing the regularity of $\comm{G}{H}\curvearrowright F^\mathcal Perp$.
\subsection*{Organization of the paper}
In Section~\ref{sec:C} we give background on $\text{Condition } \mathrm{(C)}$. In Section~\ref{sec:honolomy} we collect several preparatory observations for later sections. Sections~\ref{sec:smoothmfds}, \ref{sec:cube}, \ref{sec:buildings}, \ref{sec:bredon} and~\ref{sec:lmgroups} are essentially independent from one another. In Sections~\ref{sec:smoothmfds},~\ref{sec:cube} and~\ref{sec:buildings} we handle the cases of Hadamard manifolds, cube complexes and Euclidean buildings respectively. Section~\ref{sec:bredon} is about applications to Bredon cohomological dimension. In Section~\ref{sec:lmgroups} we discuss the relation between $\text{Condition } \mathrm{(C)}$ and the examples by Leary and Minasyan. More examples of possible pathological behavior of commensurators are given in Section~\ref{sec:examples}.
\subsection*{Acknowledgments}
J.\ H.\ thanks the Max Planck Institute for Mathematics where part of the work was completed. J.\ H.\ thanks J.\ Lafont, T.\ Nguyen and T.\ T.\ Nguyen-Phan for helpful discussions.
T.\ P.\ thanks the Fields Institute for Research in Mathematical Sciences where part of the work was completed.
T.\ P.\ was supported by EPSRC First Grant EP/N033787/1. T.\ P.\ thanks G.\ Margulis and J.\ Schwermer for helpful discussions.
Both authors thank I.\ Leary and A.\ Minasyan for valuable discussions and comments improving the paper.
\section{Background on $\text{Condition } \mathrm{(C)}$}
\label{sec:C}
We refer to the \mathcal Hyperref[sec:intro]{Introduction} for definitions of a commensurator and $\text{Condition } \mathrm{(C)}$. Throughout, we will be using the following simple observation.
\begin{lem}
\label{lem:comm invariant}
Let $G$ be a group and let $H_1,H_2$ be two finitely generated virtually abelian subgroups which are commensurable. Then $H_1$ satisfies $\text{Condition } \mathrm{(C)}$ if and only if $H_2$ satisfies $\text{Condition } \mathrm{(C)}$.
\end{lem}
If $\text{Condition } \mathrm{(C)}$ holds for all finitely generated virtually abelian subgroups of rank equal to (respectively, at most) $r$ then we denote it by $\mathrm{(C)}_{r}$ (respectively, $\mathrm{(C)}_{\leq r})$. Condition $\mathrm{(C)}_{1}$ essentially boils down to showing that for any infinite order element $h \in G$, whenever \[gh^kg^{-1} =h^l\] for some $g \in G$ and $k,l\neq 0$, then $k= \mathcal Pm l$. This can be easily shown if one can assign to every such $h$ a `norm' which satisfies $\|h^n\|=|n| \cdot \|h\|$ and is invariant under conjugation. In several classes of non-positively curved groups such norm is given by (different variants of) \emph{translation length}, and so condition $\mathrm{(C)}_{1}$ is satisfied by $\mathrm{CAT}(0)$ groups, $\delta$--hyperbolic groups, systolic groups and biautomatic groups. A simple example of a group which does not satisfy $\mathrm{(C)}_{1}$ is the Baumslag-Solitar group $BS(1,n)$, for $n>1$.
When passing to higher rank abelian subgroups, the translation length alone is insufficient. In this case elements of $\comm{G}{H}$ may also `rotate' various subgroups of $H$, and in fact this condition is not always satisfied by non-positively curved groups. To the best of our knowledge the only general method of showing $\text{Condition } \mathrm{(C)}$ is \cite[Corollary~9]{ckrw}, which states that $H$ satisfies $\text{Condition } \mathrm{(C)}$ if $H$ is weakly separable in $G$. Let us point out that this result does not require $H$ to be virtually abelian. On the other hand, combining this result with the fact that virtually abelian subgroups of linear groups are separable implies $\text{Condition } \mathrm{(C)}$ for any linear group (see \cite{cat0vab} for a short account of the proof). This applies to, among others, Coxeter groups, graph products of finite groups, or fundamental groups of special cube complexes. One easily finds examples of groups $G$ where $H$ satisfies $\text{Condition } \mathrm{(C)}$ but $H$ is not weakly separable in $G$.
\section{General $\mathrm{CAT}(0)$ case}
\label{sec:honolomy}
\subsection{Finitely generated subgroups of commensurators}
\label{subsec:honolomy}
Let $X$ be a $\mathrm{CAT}(0)$ space and let $G$ be a group acting properly on $X$.
A subgroup $H$ of $G$ is \emph{semisimple} if each element of $H$ acts as a semisimple isometry.
Let $H\le G$ be a semisimple free abelian group of rank $n$ and let $\minset{H}$ denote the minimal set of $H$.
It is a standard fact that $\minset{H}$ splits as $\mathbb E^n\times Y$ where $Y$ is a $\mathrm{CAT}(0)$ space, moreover, $H$ acts freely and cocompactly by translations on the $\mathbb E^n$--factor and acts as the identity on the $Y$--factor. Let $F$ be a flat of form $\mathbb E^n\times\{y\}$ for $y\in Y$. Then $H$ stabilizes $F$. Let $P_F=F\times F^\mathcal Perp$ be the parallel set of~$F$.
We will need the following theorem.
\begin{thm}\label{thm:combinenormalizerminset}
Let $N_G(H)$ denote the normalizer of $H$ in $G$ and let $Z_G(H)$ denote the centralizer of $H$ in $G$. Then:
\begin{enumerate}[label=(\roman*)]
\item \label{thm:centralizer} The index $[N_G(H):Z_G(H)]$ is finite. In particular, if a subgroup $\mathcal Gamma \le G$ normalizes $H$ then $\mathcal Gamma$ has a finite index subgroup which centralizes $H$.
\item \label{thm:ruane}
The normalizer $N_G(H)$ preserves $\minset{H} \cong \mathbb E^n\times Y$ and its product structure.
If the action of $G$ on $X$ is in addition cocompact then $Z_G(H)$ acts geometrically on $\minset{H}$ (and thus by \ref{thm:centralizer} the same holds for $N_G(H)$).
\end{enumerate}
\end{thm}
\begin{proof}
\ref{thm:centralizer} and the first assertion of \ref{thm:ruane} is proven in \cite[Theorem II.7.1]{bh}. In the original statement, the $G$--action is required to be faithful, but this condition is not necessary. The second assertion of \ref{thm:ruane} can be proven in a similar way to \cite[Theorem 3.2]{ruane2001dynamics}.
\end{proof}
We now begin studying the action of $\comm{G}{H}$ on the parallel set $P_F$.
\begin{lem}
\label{lem:split}
The set $P_F$ is invariant under $\comm{G}{H}$. Moreover, for each element $\alpha\in \comm{G}{H}$, the action of $\alpha$ on $P_F$ splits as a product of an isometry of $F$ and an isometry of $F^\mathcal Perp$.
\end{lem}
\begin{proof}
Let $\alpha \in \comm{G}{H}$. To prove both assertions of the lemma, it is enough to show that for any point $y \in F^\mathcal Perp$, flats $F\times\{y\} $ and $\alpha(F\times\{y\}) $ are parallel. Indeed, in this case flat $\alpha(F\times\{y\})$ is clearly contained in $P_F$, and thus it is of form $F \times \{y'\}$ for some $y' \in F^\mathcal Perp$. Then \cite[Proposition 5.3(3)]{bh} implies that $\alpha$ splits as a product.
Consider an $H$--invariant flat $F \times \{y_0\} \subset P_F$.
The flats $F \times \{y\}$ and $F \times \{y_0\}$ are parallel, and thus so are flats $\alpha(F \times \{y\})$ and $\alpha(F \times \{y_0\})$. Since parallelism is an equivalence relation, to show that $F\times\{y\} $ and $\alpha(F\times\{y\}) $ are parallel it suffices to show that $F\times\{y_0\} $ and $\alpha(F\times\{y_0\}) $ are parallel.
Since $F\times\{y_0\} $ is $H$--invariant we get that $\alpha(F\times\{y_0\}) $ is $\alpha H \inv{\alpha}$--invariant. The intersection $H'=H \cap \alpha H \inv{\alpha}$ is semisimple and is of finite index in both $H$ and $\alpha H \inv{\alpha}$. Note that both $\alpha(F\times\{y_0\})$ and
$\alpha(F\times\{y_0\}) $ are $H'$--invariant. Therefore they are parallel.\end{proof}
\begin{defn}
\label{def:honolomy}
By Lemma~\ref{lem:split}, we have a well-defined homomorphism \[\mathcal Phi \colon \comm{G}{H} \to \mathcal Isom(F)\] by considering the action on the $F$--factor of $P_F$. Note that each element of $\mathcal Isom(F)$ acts on the Tits boundary $\mathcal Partial_T F$ of $F$, which induces a homomorphism $\mathcal Isom(F)\to O(n,\mathbb R)$ where $n=\dim F$. Let $\Phi$ be the composition $\comm{G}{H} \to \mathcal Isom(F)\to O(n,\mathbb R)$.
\end{defn}
The following observation is also of independent interest.
\begin{lem}
\label{lem:comm}
We view $H$ as a subgroup of $\mathcal Isom(F)$. Then the image of $\mathcal Phi$ (see Definition~\ref{def:honolomy}) is contained in the commensurator of $H$ in $\mathcal Isom(F)$.
\end{lem}
\begin{proof}
Let $g\in \comm{G}{H}$. Then $\mathcal Phi(g)$ is a composition $F\overset{g}{\to} gF\overset{p}{\to} F$ where $p$ is the parallelism map. Since $g\in\comm{G}{H}$, there exists a finite index subgroup $L\le gHg^{-1}$ such that $L\le H$. Let $L'\le H$ be the finite index subgroup such that $gL'g^{-1}=L$ and let $\alpha:L'\to L$ be the isomorphism induced by conjugation by $g$. Then the map $F\overset{g}{\to} gF$ is equivariant with respect to $L'\curvearrowright F$, $L\curvearrowright gF$ and $\alpha:L'\to L$. The map $p:gF\to F$ is equivariant with respect to the action of $L$ on both $gF$ and $F$. Thus $\mathcal Phi(g)$ is equivariant with respect to $L'\curvearrowright F$, $L\curvearrowright F$ and $\alpha:L'\to L$. Thus $\mathcal Phi(g)L'(\mathcal Phi(g))^{-1}=L$ when viewed as subgroups of $\mathcal Isom(F)$. Since $L'$ and $L$ are finite index subgroups of $H$, the lemma follows.
\end{proof}
\begin{prop}
\label{prop:normalize}
Suppose $G$ acts properly on a $\mathrm{CAT}(0)$ space $X$. Let $H\le G$ be a semisimple free abelian subgroup of finite rank. Let $K\le \comm{G}{H}$ be a finitely generated subgroup. Then $K$ normalizes a finite index subgroup of $H$ if and only if $\Phi(K)$ is finite.
\end{prop}
\begin{proof}
First suppose that $K$ normalizes a finite index subgroup $H'$ of $H$. Then by Theorem~\ref{thm:combinenormalizerminset}.\ref{thm:centralizer} there exists a finite index subgroup $K'\le K$ which centralizes $H'$. We will show that $\Phi(K')= \{e\}$. This clearly implies that $\Phi(K)$ is finite.
Let $g\in K'$ be arbitrary. Since $g$ centralizes $H'$, proceeding as in the proof of Lemma~\ref{lem:comm}, we get that $\mathcal Phi(g)$ is an $H'$--equivariant isometry of $F$. Since $\mathcal Phi(g)$ commutes with linearly independent translations whose axes span $F$ (consider generators of $H'$), it is not hard to see that it has to be a translation itself. Thus its restriction to the boundary $\mathcal Partial_TF$ is trivial. Since $g$ was arbitrary, we get that $\Phi(K') = \{e\}$.
Now suppose the image $\Phi(K)$ is finite. Thus there is a finite index subgroup $K' \le K$ for which $\Phi(K')=\{e\}$. This means that for any $g \in K'$ the isometry $\mathcal Phi(g) \in \mathcal Isom(F)$ is a translation. Thus for any $g\in K'$ and $h\in H$, we have
\begin{equation}\label{eq:translationscommute}\mathcal Phi(h)=\mathcal Phi(g)\mathcal Phi(h)(\mathcal Phi(g))^{-1}=\mathcal Phi(ghg^{-1}),
\end{equation} where the first equality follows from the fact that by definition $\mathcal Phi(h)$ is also a translation, hence it commutes with $\mathcal Phi(g)$.
Since $K' \le \comm{G}{H}$, for any $g \in K'$ the intersection $H_g = H \cap gH\inv{g}$ is a finite index subgroup of $H$. Take an element $h\in H_g$. Thus we have $h\in H$ and also $h=gh'\inv{g}$ for some $h' \in H$. By \eqref{eq:translationscommute} we get $\mathcal Phi(h')=\mathcal Phi(gh'\inv{g})=\mathcal Phi(h)$. Since $h,h'\in H$ and $\mathcal Phi|_H$ is an embedding we obtain $h'=h$. Therefore $g$ centralizes $H_g$.
Let $g_1, \ldots, g_n$ be a set of generators of $K'$.
Every $g_i$ centralizes $H_{g_i}$ and thus $K$ centralizes the intersection $H' =\bigcap_{g_i}H_{g_i}$, which is a finite index subgroup of $H$. Now the following elementary lemma completes the proof.
\end{proof}
\begin{lem}
\label{lem:ftindex}
Let $H\le K' \le K \le \comm{G}{H}$ and suppose that $H$ is central in $K'$ and that index $[K \colon K']$ is finite. Then there exists a finite index subgroup $H' \le H$ which is normal in $K$.
\end{lem}
\begin{proof}
Let $e=s_1, s_2, \ldots, s_n \in K$ be representatives of left cosets of $K'$ in $K$.
Define \[H'= \bigcap_{i}s_iH\inv{s_i}.\]
Since $K \le \comm{G}{H}$, clearly $H'$ has finite index in $H$. It is straightforward to check that $H'$ is normal in $K$.
\end{proof}
\begin{remark}\label{rem:bounddiam}
If $F^{\mathcal Perp}$ has bounded diameter, then $\comm{G}{H}$ acts on $F\times F^\mathcal Perp$ cocompactly, as $H\le \comm{G}{H}$ and $H$ acts cocompactly on $F\times \{c_0\}$ and thus on $F\times F^\mathcal Perp$. It follows that $\comm{G}{H}$ contains $H$ as a finite index subgroup. Then $\comm{G}{H}$ is clearly finitely generated, and it normalizes a finite index subgroup of $H$.
\end{remark}
\subsection{The highest virtual abelian subgroups}
A virtually abelian subgroup $H \le G$ is called \emph{highest} if it is not virtually contained in a virtually abelian subgroup $H' \leq G$ of higher rank. In this section we show that the commensurator of a highest virtually abelian subgroup always behaves nicely. We need the following well-known lemma. We give a proof for the sake of completeness.
\begin{lem}\label{lem:highest}Let $G$ be a group acting geometrically on a $\mathrm{CAT}(0)$ space $X$ and suppose $H$ is a highest abelian subgroup of $G$. Then the normalizer $N_G(H)$ contains $H$ as a finite index subgroup.
\end{lem}
\begin{proof}Since $G$ acts geometrically on $X$, by Theorem~\ref{thm:combinenormalizerminset}.\ref{thm:ruane} the normalizer $N_G(H)$ acts geometrically on the minimal set $\minset{H} \cong \mathbb E^n\times Y$. Since the action preserves the splitting and $H$ acts on $Y$ trivially, it follows that $N_G(H)/H$ acts geometrically on the $\mathrm{CAT}(0)$ space $Y$ (see \cite[Section~II.7]{bh}). Now suppose $[N_G(H) \colon H]$ is infinite. In this case $N_G(H)/H$ is an infinite $\mathrm{CAT}(0)$ group and therefore it contains an element of infinite order $g$ \cite[Theorem~11]{Swenson}. Let $\tilde{g} \in N_G(H)$ be any preimage of $g$. Since by Theorem~\ref{thm:combinenormalizerminset}.\ref{thm:centralizer} the index $[N_G(H) \colon Z_G(H) ]$ is finite we get that some power $\tilde{g}^n$ commutes with $H$, thus contradicting the fact that $H$ is highest.
\end{proof}
\iffalse
\begin{proof}Since $G$ acts geometrically on $X$, the quotient $N_G(H)/H$ acts geometrically on a convex subspace $C \subset X$, where $C$ is a factor of the minimal set of $H$ on which $H$ acts trivially (see \cite[Section~II.7]{bh}). Now suppose $[N_G(H) \colon H]$ is infinite. Since $N_G(H)/H$ is an infinite $\mathrm{CAT}(0)$ group, it follows that it contains an element of infinite order $g$ \cite[Theorem~11]{Swenson}. Since $[N_G(H) \colon C_G(H) ]$ is finite \cite[Theorem~II.7.1(5)]{bh}, we get that some power $g^n$ commutes with $H$ thus contradicting the fact that $H$ is highest.
\end{proof}
\fi
\begin{prop}\label{prop:highestvab}
Let $G$ be a group acting geometrically on a $\mathrm{CAT}(0)$ space and suppose $H$ is a highest virtually abelian subgroup of $G$. Then $\comm{G}{H}$ contains $H$ as a finite index subgroup. In particular $\comm{G}{H}$ is finitely generated, and it normalizes a finite index subgroup of $H$.
\end{prop}
\begin{proof}Without loss of generality we can assume that $H$ is abelian. Let $\Phi \colon \comm{G}{H} \to O(n, \mathbb{R})$ be the map given in Definition~\ref{def:honolomy}. By Proposition~\ref{prop:normalize} any finitely generated subgroup $L \le \mathrm{ker}(\Phi)$ normalizes some finite index subgroup of $H$. Therefore $\mathrm{ker}(\Phi)$ can be written as an ascending union $\bigcup_i L_i$ such that $L_i$ normalizes some finite index subgroup $H_i \le H$. We can assume that every $L_i$ contains $H$ (replace $L$ with $\langle L, H\rangle)$.
Since any $H_i$ is highest, it follows from Lemma~\ref{lem:highest} that $N_G(H_i)$ is a finite extension of $H_i$, and thus $L_i$ is a virtually abelian group of rank $\rk{H}$ since $H_i \le H \le L_i \le N_G(H_i)$. We obtain that $\mathrm{ker}(\Phi)$ is an ascending union of finitely generated virtually abelian groups of rank $\rk{H}$. By the Ascending Chain Theorem \cite[Theorem~II.7.5]{bh} this ascending union stabilizes after finitely many terms. Thus $\mathrm{ker}(\Phi)$ is a finitely generated virtually abelian group or rank $\rk{H}$.
Since the index $[\mathrm{ker}(\Phi) \colon H]$ is finite, we can find a finite index characteristic subgroup $H'$ of $ \mathrm{ker}(\Phi)$ with $H'\le H$. Now since $H'$ is characteristic in $\mathrm{ker}(\Phi)$ and $\mathrm{ker}(\Phi)$ is normal in $\comm{G}{H}$, it follows that $H'$ is normal in $\comm{G}{H}$. By Lemma~\ref{lem:highest} the index $[\comm{G}{H} \colon H']$ is finite and thus $[\comm{G}{H} \colon H]$ is finite as well.
\end{proof}
We remark that Proposition~\ref{prop:highestvab} is not a consequence of Remark~\ref{rem:bounddiam}, see Proposition~\ref{prop:highest}.
\begin{cor}\label{cor:2dimconc} Let $G$ be a group that either
\begin{enumerate}
\item acts properly by semisimple isometries on a $2$--dimensional $\mathrm{CAT}(0)$ space, or
\item acts geometrically on a $\mathrm{CAT}(0)$ space and contains no subgroup isomorphic to $\mathbb{Z}^n$ for $n>2$.
\end{enumerate}
Then $\text{Condition } \mathrm{(C)}$ holds for $G$.
\end{cor}
\begin{proof}Since $G$ does not contain free abelian subgroups of rank higher than $2$, $\text{Condition } \mathrm{(C)}$ is equivalent to $\text{Condition } \mathrm{(C)}h{\le 2}$. Since $G$ acts properly by semisimple isometries on a $\mathrm{CAT}(0)$ space, $\text{Condition } \mathrm{(C)}h{\le 1}$ holds for $G$ (see Section~\ref{sec:C}). Let $H$ be a rank $2$ free abelian subgroup of $G$. In the first case $\text{Condition } \mathrm{(C)}$ for $H$ is satisfied by Remark~\ref{rem:bounddiam}. In the second case one observes that $H$ is the highest abelian subgroup and thus $\text{Condition } \mathrm{(C)}$ for $H$ follows from Proposition~\ref{prop:highestvab}.
\end{proof}
\subsection{Core of \texorpdfstring{$F^\mathcal Perp$}{$F$-`perp'}}
\label{subsec:core}
Let $G,H$ and $P_F=F \times F^\mathcal Perp$ be as in Section~\ref{subsec:honolomy}. In this section we look at the action $\comm{G}{H}$ on $P_F$ more closely. By Lemma~\ref{lem:split}, there is a factor action $\rho:\comm{G}{H}\curvearrowright F^\mathcal Perp$. For each $x\in F^\mathcal Perp$, let $\text{Stab}_\rho(x)$ be the stabilizer of $x$ with respect to the action $\rho$.
\begin{defn}
We define the \emph{core} of $F^\mathcal Perp$, denoted $C$, to be the subset of $F^\mathcal Perp$ made of points whose stabilizer (with respect to $\rho$) is commensurable to $H$.
\end{defn}
\begin{lem}
\label{lem:core}
The core $C \subset F^{\mathcal Perp}$ is non-empty, convex and $\comm{G}{H}$--invariant.
\end{lem}
\begin{proof}
Clearly $C$ is non-empty. Note that $\text{Stab}_\rho(gx)=g\text{Stab}_\rho(x)g^{-1}$ for any $g\in \comm{G}{H}$, thus $C$ is $\comm{G}{H}$--invariant. To see that $C$ is convex, choose $c_1,c_2\in C$ and let $c_0\in C$ be a point in the geodesic segment $\overline{c_1c_2}$. For $0\le i\le 2$, let $H_i=\text{Stab}_\rho(c_i)$, and let $H'_0=H_1\cap H_2$. Clearly $H'_0\le H_0$ and $H'_0$ is commensurable to $H$. Since $H'_0$ acts cocompactly on $F\times\{c_0\}$ and $H_0$ acts properly on $F\times\{c_0\}$, $H'_0$ is of finite index in $H_0$. Thus $H_0$ is commensurable to $H$ and so $c_0\in C$.
\end{proof}
Note that in general $C$ is not complete or closed in $F^{\mathcal Perp}$.
\begin{lem}\label{lem:discreteorb}Assume that $C$ is a proper metric space. Then the action $\comm{G}{H}\curvearrowright C$ has discrete orbits.
\end{lem}
\begin{proof}
Suppose the contrary, that there exists $y_0 \in C$ such that for any $\epsilon>0$ there are infinitely many distinct elements $(g_i)_{i\in \mathbb{N}}$ such that $d(g_iy_0, y_0) < \epsilon$.
Embed $C \mathcal Hookrightarrow F \times C \subset P_F$ by $y \mapsto (x_0, y)$ for some chosen $x_0 \in F$ and consider the action of $\comm{G}{H}$ on $F \times C$. We will suppress from writing $\mathcal Phi$ and $\rho$ and simply write $g(x,y)=(gx,gy)$ for this action.
We need the following claim.
\textbf{Claim.} There exists a constant $R>0$ such that for any $g_i \in \comm{G}{H}$, given any two points $(a, g_iy_0) ,(b, g_iy_0) \in F \times \{g_iy_0\}$ there exists $h_i \in \text{Stab}( F \times \{g_iy_0\})$ such that \[d(h_i(a, g_iy_0),(b, g_iy_0))\leq R.\]
To see the claim, first observe that since $y_0 \in C$, the stabilizer $\text{Stab}( F \times \{y_0\})$ acts cocompactly on $F \times \{y_0\}$ and therefore such constant $R$ exists for $ F \times \{y_0\}$. Since for any $g_i$ we have $\text{Stab}( F \times \{g_iy_0\})=g_iStab( F \times \{y_0\})g_i^{-1}$ and the $\text{Stab}( F \times \{g_iy_0\})$--action on $F \times \{g_iy_0\}$ is conjugate to the $\text{Stab}( F \times \{y_0\})$ on $F \times \{y_0\}$ it follows that constant $R$ works for $F \times \{g_iy_0\}$ as well.
We proceed with the proof of the lemma. By the assumption we have infinitely many distinct points $(g_ix_0, g_iy_0) \in F \times B(y_0, \epsilon)$.
By the claim there exist elements $h_i \in \text{Stab}( F \times \{g_iy_0\})$ such that
\[d(h_i(g_ix_0, g_iy_0) , (x_0, g_iy_0)) \le R.\] Therefore infinitely many points $(h_i(g_ix_0, g_iy_0))_{i \in \mathbb{N}}$ are contained in the compact subset $B(x_0,R) \times B(y_0, \epsilon) \subset F\times C$. These points are distinct because their second coordinates are distinct. Consequently, all the elements $(h_ig_i)_{i \in \mathbb{N}}$ are distinct, which contradicts the properness of the action $\comm{G}{H}$ on $ F \times C$.\end{proof}
\section{The smooth manifolds case}\label{sec:smoothmfds}
In this section we show that commensurators of abelian subgroups are well-behaved for groups acting on Hadamard manifolds.
Let $M$ be a Riemannian manifold without boundary and let $C\subset M$ be a \emph{totally convex} subset, i.e.,\ for any pair of points $x,y\in C$ and any Riemannian geodesic $\omega$ connecting $x$ and $y$, we have $\omega\subset C$.
Let $k$ be the largest integer such that the collection $\{N_\alpha\}$ of smoothly embedded $k$--manifolds of $M$ which are contained in $C$ is non-empty. Let $N=\cup_\alpha N_\alpha$. The following result is well-known (see e.g.\ \cite[pp. 139 - 141]{cheeger2008comparison}).
\begin{lem}
\label{lem:convex subset} The subset $N$ is a totally geodesic, connected, smoothly embedded submanifold of $M$ such that $N\subset C \subset \bar N$, where $\bar N$ is the closure of $N$ in $M$.
\end{lem}
If $C$ is a point, then $N=C$ is also a point.
\begin{thm}\label{thm:hadamard}
Suppose $G$ acts properly on a Hadamard manifold $X$ by isometries. Let $H\le G$ be a semisimple finitely generated virtually abelian subgroup. Then $\comm{G}{H}$ is equal to the normalizer of a finite index subgroup of $H$. In particular, if the action $G\curvearrowright X$ is geometric then $\comm{G}{H}$ is finitely generated.
\end{thm}
Recall that isometries of Riemannian manifolds as metric spaces are actually diffeomorphisms and preserve the Riemannian tensor.
\begin{proof}Without loss of generality we can assume that $H$ is free abelian. Let $F$ be a flat in the minimal set of $H$ where $H$ acts cocompactly. Let $P_F = F\times F^\mathcal Perp$ be the parallel set of $F$. As in Section~\ref{subsec:core}, let $\rho:\comm{G}{H}\curvearrowright F^{\mathcal Perp}$ be the factor action and let $C \subset F^\mathcal Perp$ be the core.
Choose a basepoint $y\in F$. Then $F^\mathcal Perp$ (respectively $C$) can be realized as a convex subset $\{y\}\times F^\mathcal Perp$ (respectively $\{y\}\times C$) of $X$. Let $p:P_F\to \{y\}\times F^\mathcal Perp$ be the projection map. Let $N=\cup_\alpha N_\alpha \subset C$ be as in Lemma~\ref{lem:convex subset}. Since $N$ is totally geodesic, $N$ is convex in $C$. For any $g\in \comm{G}{H}$, the composition $N_\alpha\to (p\circ g) (N_\alpha)$ is a diffeomorphism. Thus $\rho:\comm{G}{H}\curvearrowright F^{\mathcal Perp}$ leaves $N$ invariant and acts on $N$ by Riemannian isometries.
Choose $x\in N$. Since $N \subset C$, the stabilizer $\text{Stab}_\rho(x)$ is commensurable to $H$. Let $T_x N$ be the tangent space of $N$ at $x$. Let $H'$ be the kernel of the natural homomorphism from $\text{Stab}_\rho(x)$ to orthogonal group of $T_x N$.
\begin{claim}
The subgroup $H'$ is of finite index in $\text{Stab}_\rho(x)$.\end{claim}
To see the claim, take an orthogonal frame ${e_1,e_2,\ldots, e_n}$ in $T_x N$ (it is possible that $n=0$), and find ${x_1,x_2,\ldots,x_n}$ in $N$ such that the geodesic segment from $x$ to $x_i$ has tangent vector $e_i$ at $x$ (such $x_i$ exists since $N$ is a manifold without boundary). Since $x_i \in N \subset C$, the subgroup $\text{Stab}_\rho(x_i)$ is commensurable to $\text{Stab}_\rho(x)$. Thus a finite index subgroup $H_i\le\text{Stab}_\rho(x)$ stabilizes $x_i$, hence also stabilizes $e_i$. It follows that $\cap_{i=1}^nH_i$ stabilizes all of $\{e_1,e_2,\ldots,e_n\}$ and thus the claim follows.
Let $h:\comm{G}{H}\to\mathcal Isom(N)$ be the homomorphism induced by $\rho$. Since $N$ is a smooth manifold, $H'$ acts trivially on $N$. Thus $H'\le \ker(h)$. On the other hand, $\ker(h) \le \text{Stab}_\rho(x)$. Since $H'$ is of finite index in $\text{Stab}_\rho(x)$ and $\text{Stab}_\rho(x)$ is commensurable to $H$, we obtain that $\ker(h)$ is commensurable to $H$. Let $H''$ be a finite index characteristic subgroup of $\ker(h)$ such that $H''\le \ker(h)\cap H$. Then $H''$ is of finite index in $H$ and $H''$ is normalized by $\comm{G}{H}$. Since clearly we have $N_G(H'') \le \comm{G}{H}$, we conclude that $\comm{G}{H} = N_G(H'')$.
If the action $G\curvearrowright X$ is geometric then by Theorem~\ref{thm:combinenormalizerminset}.\ref{thm:ruane} the normalizer $N_G(H'')$ acts geometrically on $\minset{H''}$ and thus it is finitely generated.
\end{proof}
\section{The cube complex case}
\label{sec:cube}
\subsection{General actions}
We refer to the excellent notes by Sageev \cite{sageevnotes} for background on $\mathrm{CAT}(0)$ cube complexes and hyperplanes. Let $X$ be a finite dimensional $\mathrm{CAT}(0)$ cube complex and let $F\subset X$ be a flat.
A hyperplane $h$ \emph{crosses} $F$ if $F$ is not contained in a halfspace bounded by $h$. Note that if $h$ crosses $F$, then $h\cap F$ is a codimension 1 flat in $F$. Given $F$, let $\mathcal H(F)$ be the collection of hyperplanes that cross $F$.
\begin{lem}
\label{lem:intersection1}
Let $F_1$ and $F_2$ be two parallel flats. Then $\mathcal H(F_1)=\mathcal H(F_2)$. Moreover, for any $h\in \mathcal H(F_1)$, $h\cap F_1$ and $h\cap F_2$ are parallel.
\end{lem}
\begin{proof}
First we claim that if $h$ crosses $F_1$ then for any $N>0$, there exist $x_1,x_2 \in~F_1$ such that they are on different sides of $h$, and we have $d(x_1,h)>N$ and $d(x_2,h)>N$. Note that the first assertion of the lemma follows readily from this claim. To see the claim, first take $y_0\in F_1\cap h$ and $y_1,y_2\in F_1$ on different sides of $h$. Let $r_1:[0,\infty)\to F_1$ be a ray emanating from $y_0$ and passing through $y_1$. Then the convexity of the function $t\to d(r_1(t),h)$ implies that $\lim_{t\to\infty} d(r_1(t),h)=\infty$ and $r_1\cap h=\{y_0\}$. Thus we can define $x_1$ to be $r_1(t)$ for a sufficiently large $t$. Similarly we can find $x_2$.
Now we prove the `moreover' statement. Let $E$ be the convex hull of $F_1$ and $F_2$. By \cite[Chapter II.2.12]{bh}, $E$ is isometric to $F_1\times[0,a]$ where $a=d(F_1,F_2)$. Since $h$ has a product neighborhood isometric to $h\times [0,1]$, $E\cap h$ is a convex codimension 1 surface of $E$. Thus $E\cap h$ is isometric to $(h\cap F_1)\times[0,a']$ for some $a'\ge a$. Hence $h\cap F_1$ and $h\cap F_2$ are parallel.
\end{proof}
Let $G$ be a group acting on $X$ properly by cubical automorphisms. Let $H\le G$ be a free abelian subgroup and suppose $H$ acts on a flat $F\subset X$ cocompactly.
Since for $k\in \comm{G}{H}$, flats $F$ and $k F$ are parallel, we get that $k\mathcal H(F)=\mathcal H(kF)=\mathcal H(F)$ by Lemma~\ref{lem:intersection1}. This shows that $\mathcal H(F)$ is $\comm{G}{H}$--invariant.
For $h_1,h_2\in \mathcal H(F)$, we define $h_1\sim h_2$ if $h_1\cap F$ and $h_2\cap F$ are parallel. It is clear that $\sim$ is an equivalence relation. Since each element in $\mathcal H(F)$ intersects $F$ in a codimension 1 flat, any pair of non-equivalent hyperplanes in $\mathcal H(F)$ have non-empty intersection. Since $X$ is finite dimensional, the collection $\mathcal H(F)$ has finitely many equivalence classes, which we denote by $\{\mathcal H_i(F)\}_{i=1}^n$.
Choose a basepoint $o\in F$. For each $i$, let $\vec{v}_i$ be a non-zero vector based at $o$ such that it is orthogonal to $h\cap F$ for some $h\in \mathcal H_i(F)$.
\begin{lem}
\label{lem:span}
The flat $F$ is spanned by $\{\vec{v}_i\}_{i=1}^n$.
\end{lem}
\begin{proof}
Suppose the contrary is true. Then there is a line $\ell\subset F$ which is orthogonal to each $\vec{v}_i$. Let $h_0$ be a hyperplane crossing $\ell$. Then $h_0\in \mathcal H(F)$. It follows from the choice of $\ell$ that $h\cap F$ contains a line parallel to $\ell$ for each $h\in\mathcal H(F)$. Thus $h_0\neq h$ and $h_0\cap h\neq\emptyset$ for each $h\in\mathcal H(F)$, which yields a contraction.
\end{proof}
\begin{lem}
\label{lem:finite index1}
There is a finite index subgroup $L$ of $\comm{G}{H}$ such that $L(\mathcal H_i(F))=\mathcal H_i(F)$ for each $i$.
\end{lem}
\begin{proof}
An \emph{orthogonal partition} of $\mathcal H(F)$ is a partition $\mathcal H(F)=\sqcup_{i=1}^m W_i$ such that for any $i\neq j$, each element in $W_i$ crosses every element in $W_j$. Note that every two orthogonal partitions of $\mathcal H(F)$ have a common refinement which is an orthogonal partition. Thus $\mathcal H(F)$ has a canonical finest orthogonal partition $\mathcal H(F)=\sqcup_{i=1}^{l} W'_i$ (since $X$ is finite dimensional, $l<\infty$), and $\comm{G}{H}$ permutes the factors of this partition. Thus $\comm{G}{H}$ has a finite index subgroup $L$ such that $L(W'_i)=W'_i$ for $1\le i\le l$. Since $\mathcal H(F)=\sqcup_{i=1}^n \mathcal H_i(F)$ is also an orthogonal partition, the lemma follows.
\end{proof}
\begin{cor}
\label{cor:parallel}
Let $L \le \comm{G}{H}$ be as in Lemma~\ref{lem:finite index1}. Then for each $k\in L$ and $h\in \mathcal H(F)$, $k(h\cap F)$ and $h\cap F$ are parallel.
\end{cor}
\begin{proof}
By Lemma~\ref{lem:finite index1}, $kh$ and $h$ are in the same equivalence class. Thus $F\cap h$ and $F\cap kh$ are parallel. By Lemma~\ref{lem:intersection1}, flats $F\cap kh$ and $kF\cap kh$ are parallel (as $F$ and $kF$ are parallel). Thus the corollary follows.
\end{proof}
\begin{thm}\label{thm:cubicalpropc}
Suppose $G$ acts properly on a finite dimensional $\mathrm{CAT}(0)$ cube complex $X$ by cubical automorphisms. Let $H\le G$ be a finitely generated virtually abelian subgroup and let $K\le \comm{G}{H}$ be a finitely generated subgroup. Then $K$ normalizes a finite index subgroup of $H$.
\end{thm}
\begin{proof}
We can assume $H$ is free abelian by Lemma~\ref{lem:comm invariant}. Let $\mathcal Phi$ and $\Phi$ be the maps in Definition~\ref{def:honolomy}. By Proposition~\ref{prop:normalize} it suffices to show that $\Phi(K)$ is finite. Define $K' = K \cap L$ where $L$ is as in Lemma~\ref{lem:finite index1}. Note that $[K : K']$ is finite since $[\comm{G}{H} : L]$ is finite. Then Corollary~\ref{cor:parallel} implies that for any $k \in K'$ and for any $h\in \mathcal H(F)$, flats $h\cap F$ and $\mathcal Phi(k)(h\cap F)$ are parallel. Let $\{\vec{v}_i\}_{i=1}^n$ be as in Lemma~\ref{lem:span}, and let $\{s_i\}_{i=1}^n$ denote the corresponding points on Tits boundary $\mathcal Partial F$. Then each element in $\Phi(K')$ maps $s_i$ to $s_i$ or $-s_i$. Since $\{\vec{v}_i\}_{i=1}^n$ spans $F$, we conclude that $\Phi(K')$ is finite and hence that $\Phi(K)$ is finite.
\end{proof}
\begin{remark}
We cannot relax the assumption in Theorem~\ref{thm:cubicalpropc} that $G$ acts by cubical automorphisms to $G$ acts by isometries (though for many cube complexes these two conditions are equivalent). This is because LM groups in Definition~\ref{def:LM} clearly act on a $\mathrm{CAT}(0)$ cube complex by isometries, but the action does not respect the cubical structure.
\end{remark}
\begin{remark}
It follows from \cite{niblo1997groups} that if a group $G$ acts on Davis complex for a Coxeter group properly by cellular isometries, then $G$ satisfies the assumptions of Theorem~\ref{thm:cubicalpropc} and hence $G$ satisfies $\text{Condition } \mathrm{(C)}$. More generally, we speculate that by the same proof, $\text{Condition } \mathrm{(C)}$ should hold for groups acting properly by cellular isometries on $\mathrm{CAT}(0)$ piecewise Euclidean polyhedral complexes whose cells are isometric to Coxeter cells.
\end{remark}
\subsection{Virtually special actions}
In this section, we comment on an important class of actions which are \emph{virtually special}. The main point is that the pathological behavior in Proposition~\ref{prop:anti torus} cannot happen when the action is virtually special.
\begin{defn}
The action of $G$ on a $\mathrm{CAT}(0)$ cube complex $X$ is \emph{virtually special} if there exists a torsion free finite index subgroup $G'\le G$ such that $X/G'$ is a (not necessarily compact) special cube complex in the sense of \cite{haglund2008special}.
\end{defn}
Recall that a group $G$ has the \emph{unique root property} if for any positive integer $n$ and arbitrary elements $x,y\in G$ the equality $x^n=y^n$ implies $x=y$ in $G$. We will need the following elementary property of groups with the unique root property.
\begin{lem}
\label{lem:commute}
Suppose $G$ has the unique root property and let $g,h\in G$. If $g^m$ and $h^n$ commute for some non-zero integers $m$ and $n$, then $g$ and $h$ commute.
\end{lem}
If $X/G'$ is a special cube complex, then $G'$ is a subgroup of a (possibly infinitely generated) right-angled Artin group \cite[Theorem 4.2]{haglund2008special}. Since any finitely generated right-angled Artin group is biorderable \cite{duchamp1992simple}, the group $G'$ is a union of biorderable groups. As biorderable groups have the unique root property \cite[Lemma 6.3]{MR2914863}, we get that $G'$ has the unique root property.
\begin{cor}
\label{cor:centralizer}
Suppose $G$ acts properly on a finite dimensional $\mathrm{CAT}(0)$ cube complex $X$ by cubical automorphisms such that the action is virtually special, or more generally $G$ has a finite index subgroup $G'$ which has the unique root property. Let $H\le G$ be a finitely generated virtually abelian subgroup. Then $\comm{G}{H}$ is equal to the normalizer of a finite index subgroup of $H$. In particular, if the action $G\curvearrowright X$ is geometric then $\comm{G}{H}$ is finitely generated.
\end{cor}
\begin{proof}
By replacing $H$ with $H \cap G'$ if necessary, we can assume that $H \le G'$. Note that in this case we have $\comm{G'}{H}=\comm{G}{H}\cap G'$. It follows from Theorem~\ref{thm:cubicalpropc} and Theorem~\ref{thm:combinenormalizerminset}.\ref{thm:centralizer} that each finitely generated subgroup $K\le \comm{G'}{H}$ has a finite index subgroup $K'$ such that $K'$ centralizes a finite index subgroup $H'$ of $H$. Thus for any $k\in K$ and $h\in H$, there exist non-zero integers $n,m$ such that $k^n$ and $h^m$ commute. By applying Lemma~\ref{lem:commute} we get that $k$ and $h$ commute. Thus $K$ centralizes $H$. Since $\comm{G'}{H}$ is a union of finitely generated subgroups, $\comm{G'}{H}$ centralizes $H$. Since $\comm{G'}{H}$ has finite index in $\comm{G}{H}$, by Lemma~\ref{lem:ftindex} we get that $\comm{G}{H}$ normalizes a finite index subgroup $H''$ of $H$, and thus we conclude that $\comm{G}{H}=N_G(H'')$. If the action $G\curvearrowright X$ is geometric then $\comm{G}{H}$ is finitely generated by Theorem~\ref{thm:combinenormalizerminset}.\ref{thm:ruane}.
\end{proof}
\begin{remark}For virtually special actions, $\text{Condition } \mathrm{(C)}$ follows from \cite[Corollary~9]{ckrw} (since abelian subgroups of right-angled Artin groups, or, more generally, of $GL(n,\mathbb Z)$ are always separable, cf.\ Section~\ref{sec:C}). Thus in the proof of Corollary~\ref{cor:centralizer} one can replace Theorem~\ref{thm:cubicalpropc} with \cite[Corollary~9]{ckrw}.
\end{remark}
\section{Products of symmetric spaces and Euclidean buildings}
\label{sec:buildings}
In this section we discuss how an intersection pattern of flats in a $\mathrm{CAT}(0)$ space interacts with $\text{Condition } \mathrm{(C)}$. The main example is a product of irreducible symmetric spaces of noncompact type and/or irreducible thick Euclidean Tits buildings.
\begin{prop}
\label{prop:symmetricspace}
Suppose $G$ acts properly by isometries on $X=X_1\times X_2\times\cdots \times X_n$ such that each $X_i$ is either a nonflat irreducible symmetric space of noncompact type or an irreducible thick Euclidean Tits building with cocompact affine Weyl group. Let $H\le G$ be a finitely generated semisimple virtually abelian subgroup and let $K\le \comm{G}{H}$ be a finitely generated subgroup. Then $K$ normalizes a finite index subgroup of $H$.
\end{prop}
\begin{proof}
Recall that the Tits boundary $\mathcal Partial_T X_i$ is an irreducible spherical building, which has the structure of a simplicial complex. A top--dimensional isometrically embedded sphere in $\mathcal Partial_T X_i$ is called an \emph{apartment} and $\mathcal Partial_T X_i$ is a union of apartments. Since we are assuming thickness, each top--dimensional simplex is an intersection of apartments. Let $q_i:X_i\to X_i$ be an isometry. Then $\mathcal Partial q_i:\mathcal Partial_T X_i\to \mathcal Partial_T X_i$ clearly preserves the collections of apartments, and hence it respects the simplicial structure. The Tits boundary $\mathcal Partial_T X$ is a spherical join of irreducible spherical buildings, and thus it has a structure of a polyhedral complex. \emph{Apartments} in $\mathcal Partial_TX$ are spherical joins of apartments in each of its factors. Any isometry $q \colon X\to X$ respects the product decomposition (up to permutation of factors), and therefore the induced boundary map $\mathcal Partial q \colon \mathcal Partial_T X \to \mathcal Partial_T X$ respects the polyhedral structure of $\mathcal Partial_T X$.
Let $H\le G$ be a finitely generated semisimple virtually abelian subgroup and let $K\le \comm{G}{H}$ be a finitely generated subgroup. By Lemma~\ref{lem:comm invariant} we can assume that $H$ is free abelian. Then $H$ acts on a flat $F\subset X$ cocompactly by translations. Let $S\subset \mathcal Partial_T X$ be the smallest isometrically embedded sphere containing $\mathcal Partial_T F$ which is also a subcomplex. Note that at least one such sphere exists, since $\mathcal Partial_T F$ is contained in an apartment \cite[Proposition 3.9.1]{kleiner1997rigidity}. Let $k\in K$. We claim $\mathcal Partial k(S)=S$, where $\mathcal Partial k$ is the boundary map. By Lemma~\ref{lem:split}, $\mathcal Partial k(\mathcal Partial_T F)=\mathcal Partial_T F$. Since $\mathcal Partial k$ respects the polyhedral structure and $S$ is the smallest spherical subcomplex containing $\mathcal Partial_T F$, the claim follows.
The action of $K$ on $\mathcal Partial_T X$ provides a homomorphism $\beta:K\to \mathcal Isom(\mathcal Partial_T F)$ (note that $\beta$ equals to $\Phi$ from Definition~\ref{def:honolomy}). By the previous claim, each element in $\beta(K)$ is the restriction of an isometry of $S$ which respects the polyhedral complex structure on $S$. Thus $\beta(K)$ is finite, which implies the theorem by Proposition~\ref{prop:normalize}.
\end{proof}
What is really happening in Proposition~\ref{prop:symmetricspace} is that flats in $X$ branch in sufficiently many directions. To this end, we formulate Proposition~\ref{prop:branching} below without referring to the structure of symmetric spaces and Euclidean buildings, where Proposition~\ref{prop:symmetricspace} is a special case of Proposition~\ref{prop:branching}.
\begin{defn}
\label{def:branching}
Let $F$ be a flat in a $\mathrm{CAT}(0)$ space $X$. Let $\mathcal Partial_T F$ be the Tits boundary of $F$. A subsphere $S$ of $\mathcal Partial_T F$ is \emph{singular} if there is a subflat $F_0\subset F$ with $\mathcal Partial_T F_0=S$ such that the parallel set $P_{F_0}$ of $F_0$ is not contained in a bounded neighborhood of $P_F$.
\end{defn}
The following generalization of Proposition~\ref{prop:symmetricspace} is straightforward.
\begin{prop}
\label{prop:branching}
Suppose $G$ acts on a $\mathrm{CAT}(0)$ space $X$ properly by isometries. Let $H\le G$ be a finitely generated free abelian group acting cocompactly on a flat $F\subset X$ by translations. Suppose the collection of all singular subspheres in $\mathcal Partial_T F$ is rigid in the sense that there are only finitely many isometries of $\mathcal Partial_T F$ permuting the singular subspheres. Then any finitely generated subgroup $K$ in $\comm{G}{H}$ normalizes a finite index subgroup of $H$.
\end{prop}
\section{Bredon cohomological dimension for virtually abelian stabilizers}\label{sec:bredon}
Let $G$ be a group and let $\mathcal{F}$ be a \emph{family} of subgroups of $G$, i.e.,\ a collection of subgroups which is closed under taking subgroups and conjugation. Let $\mathrm{cd}_{\mathcal{F}}G$ denote the \emph{Bredon cohomological dimension of $G$ for the family $\mathcal{F}$}. For definition and properties of Bredon cohomological dimension we refer the reader to \cite{lucksurv}. Let us mention that a closely related invariant is the \emph{Bredon geometric dimension} $\mathrm{gd}_{\mathcal{F}}G$ which is the lowest dimension of the universal $G$--CW--complex with stabilizers in~$\mathcal{F}$. These two invariants are related by $\mathrm{cd}_{\mathcal{F}}G \le \mathrm{gd}_{\mathcal{F}}G \le \mathrm{max}\{3, \mathrm{cd}_{\mathcal{F}}G\}$.
For any integer $r \geq 0$, let $\mathcal{F}_r$ denote the family of all subgroups of $G$ which are finitely generated virtually abelian of rank at most $r$. Thus $\mathcal{F}_0$ consists of all finite subgroups of $G$ and $\mathcal{F}_1$ consists of all virtually cyclic subgroups of $G$.
In \cite{cat0vab} there is presented a method for bounding $\mathrm{cd}_{\mathcal{F}_r}G$ for $\mathrm{CAT}(0)$ groups, which depends on $\text{Condition } \mathrm{(C)}$.
\begin{thm}{\cite[Theorem~1.1]{cat0vab}}\label{thm:cat0vab} Let $G$ be a group acting properly by semisimple isometries on a complete proper $\mathrm{CAT}(0)$ space of topological dimension $n$. Suppose additionally that $G$ satisfies $\text{Condition } \mathrm{(C)}$. Then for any $0 \leqslant r \leqslant n$ we have $\mathrm{cd}_{\mathcal{F}_r}G \leq n+r+1.$
\end{thm}
However, by analyzing the action of $\comm{G}{H}$ on the core $C \subset F^{\mathcal Perp}$, we are able to remove $\text{Condition } \mathrm{(C)}$ from the assumptions of the above theorem. We need the following definition.
\begin{defn}
Given a subgroup $H \in \mathcal{F}_r$, let $\mathrm{All}[H]$ denote the family of subgroups of $\comm{G}{H}$ which consists of all subgroups $A$ such that $A \cap H$ is of finite index in $A$. \end{defn}
The only place where $\text{Condition } \mathrm{(C)}$ is used in the proof of \cite[Theorem~1.1]{cat0vab} is the proof of {\cite[Lemma~3.4]{cat0vab}}, where it is shown that
\[\mathrm{cd}_{\mathrm{All}[H]} \comm{G}{H} \leq n-r+1.\]
This is obtained in two steps. First, using $\text{Condition } \mathrm{(C)}$ one writes $\comm{G}{H}$ as the limit $\mathrm{lim}_i N_G(H_i)$ of normalizers of subgroups $H_i$ which are commensurable with $H$. Then one bounds $\mathrm{cd}_{\mathrm{All}[H] \cap N_G(H_i)} N_G(H_i)$ for every $i$ using the proper action of $N_G(H_i)/H_i$ on a $\mathrm{CAT}(0)$ space $\minset{H_i} \cap F^{\mathcal Perp}$.
\begin{prop}\label{prop:bredondimall}Let $G$ be a group acting properly by semisimple isometries on a proper $\mathrm{CAT}(0)$ space $X$ of topological dimension $n$. Let $H \in \mathcal{F}_r$ be subgroup of $G$. Then \[\mathrm{cd}_{\mathrm{All}[H]} \comm{G}{H} \leq n-r.\]
\end{prop}
The proposition is an easy consequence of the following theorem of Degrijse-Petrosyan.
\begin{thm}{\cite[Corollary~1]{DePe}} Let $G$ be a group acting by isometries on a separable $\mathrm{CAT}(0)$ space of topological dimension $n$ and suppose that the $G$--orbit of every point $x \in X$ is discrete. Let $\mathcal{F}$ be the smallest family of subgroups of $G$ containing the point stabilizers $G_x$ for every $x \in X$. Then we have
\[\mathrm{cd}_{\mathcal{F}}G \leq n.\]
\end{thm}
\begin{proof}[Proof of Proposition~\ref{prop:bredondimall}] Consider the action of $\comm{G}{H}$ on the core $C \subset F^{\mathcal Perp}$ given by Lemma~\ref{lem:core}. Clearly $C$ is a $\mathrm{CAT}(0)$ space, since it is a convex subset of $X$. We have that $C$ is separable, since it is a subset of a proper, and hence separable, metric space $X$. Let $\mathrm{dim}$ denote the topological dimension. Notice that \[\mathrm{dim} (F \times F^{\mathcal Perp}) \leq \mathrm{dim}(X) \leq n.\] Since $\mathrm{dim}(F) =r$ and $C \subset F^{\mathcal Perp}$ we obtain that $\mathrm{dim}(C) \leq n-r$. By Lemma~\ref{lem:discreteorb} the action has discrete orbits.
It remains to check that $\mathrm{All}[H]$ is the smallest family of subgroups of $\comm{G}{H}$ which contains point stabilizers. By definition of $C$ every point stabilizer is commensurable with $H$ and thus belongs to $\mathrm{All}[H]$. On the other hand, any subgroup $A \in \mathrm{All}[H]$ has a fixed point. To see this, notice that the intersection $A\cap H$ has a fixed point, and since $[A \colon A\cap H]$ is finite, the subgroup $A$ has a finite orbit and thus a fixed point as well.
\end{proof}
Combining proof of \cite[Theorem~1.1]{cat0vab} with Proposition~\ref{prop:bredondimall} we obtain the following.
\begin{thm}\label{thm:improvedcat0vab} Let $G$ be a group acting properly by semisimple isometries on a complete proper $\mathrm{CAT}(0)$ space of topological dimension $n$. Then for any $0 \leqslant r \leqslant n$ we have $\mathrm{cd}_{\mathcal{F}_r}G \leq n+r+1.$
\end{thm}
\begin{remark}In Proposition \ref{prop:bredondimall} we obtain a better dimension bound when compared with \cite[Lemma~3.4]{cat0vab}. However, this does not improve the bound for $\mathrm{cd}_{\mathcal{F}_r}G$ in Theorem~\ref{thm:improvedcat0vab}.
Nonetheless, it does simplify the construction, as for any commensurability class $[H]$ one can use a single $\mathrm{CAT}(0)$ space $C \subset F^{\mathcal Perp}$ rather than a countable collection of spaces $\minset{H_i} \cap F^{\mathcal Perp}$.
\end{remark}
\section{Relation with Leary-Minasyan groups}\label{sec:lmgroups}
The following construction is due to Leary and Minasyan \cite{LM}.
\begin{defn}
\label{def:LM}
Let $T$ be a flat torus of dimension $n$. We identify $H=\mathcal Pione{T}$ with a subgroup of $\mathcal Isom(\mathbb E^n)$. Let $\alpha$ be an element in the commensurator of $H$ in $\mathcal Isom(\mathbb E^n)$ such that the induced action of $\alpha$ on the Tits boundary $\mathcal Partial_T\mathbb E^n$ has infinite order. Let $H_1$ and $H_2$ be finite index subgroups of $H$ such that $\alpha H_1\alpha^{-1}=H_2$. Let $T_1$ and $T_2$ be the coverings of $T$ corresponding to $H_1$ and $H_2$ respectively. Then $\alpha$ descents to an isometry $\alpha':T_1\to T_2$.
Now we define \[X=(T_1\times[0,1])\sqcup T/\sim\] where the relation $\sim$ is defined as follows. We identify points in $T_1\times\{0\}$ with points in $T$ via the covering map $T_1\to T$, and identify points in $T_1\times\{1\}$ with points in $T$ via $T_1\to T_2\to T$ where the first map is $\alpha'$ and the second map is the covering map. We endow $T_1\times[0,1]$ with the product metric. Since the identification maps are local isometric, $X$ has a well-defined quotient metric, and one readily verifies that this metric is locally $\mathrm{CAT}(0)$.
Then $\mathcal Pione{X}$ is defined to be a \emph{Leary-Minasyan group} (or LM group). Note that $\mathcal Pione{X}$ is an HNN--extension of form \[\{H,t\mid tH_1t^{-1}=H_2\}\] where the isomorphism between $H_1$ and $H_2$ is induced by $\alpha$. Moreover, $\mathcal Pione{X}$ is a $\mathrm{CAT}(0)$ group acting geometrically on $\widetilde X\cong \mathbb E^n\times T$ where $T$ is a locally finite tree.
\end{defn}
It follows from Proposition~\ref{prop:normalize} that $\text{Condition } \mathrm{(C)}$ does not hold for LM groups. This leads to the following result of \cite{LM}.
\begin{thm}
There exists a $\mathrm{CAT}(0)$ group which does not satisfy $\text{Condition } \mathrm{(C)}$.
\end{thm}
\begin{cor}
\label{cor:LM cor}
There exists a group $G$ acting geometrically on a $\mathrm{CAT}(0)$ piecewise Euclidean complex such that $G$ does not satisfy $\text{Condition } \mathrm{(C)}$. There exists a closed non-positively curved manifold such that its fundamental group does not satisfy $\text{Condition } \mathrm{(C)}$.
\end{cor}
\begin{proof}
For the first statement, we claim that the space $X$ constructed in Definition~\ref{def:LM} admits a piecewise Euclidean structure. To obtain such structure, one chooses an appropriate net inside $X$ and takes the corresponding Voronoi tesselation.
For the second statement, we triangulate $X$ further such that it is a piecewise Euclidean simplicial complex. Then $\mathcal Pione{X}$ can be embedded as a subgroup of the fundamental group $G'$ of some non-positively curved closed manifold via relative hyperbolization \cite{hu1995retractions}. Clearly $G'$ does not satisfy $\text{Condition } \mathrm{(C)}$.
\end{proof}
It turns out that LM groups are the only obstructions for a $\mathrm{CAT}(0)$ group to satisfy $\text{Condition } \mathrm{(C)}$ in the following sense.
\begin{prop}
\label{prop:conc obstruction}
Let $G$ be a group acting properly on a $\mathrm{CAT}(0)$ space $X$ by semisimple isometries. The $\text{Condition } \mathrm{(C)}$ fails for $G$ if and only if there is a group homomorphism $\eta:G_0\to G$ such that $G_0=\{H,t\mid tH_1t^{-1}=H_2\}$ is a LM group and $\eta|_H$ is injective.
\end{prop}
\begin{proof}
We first prove the `if' direction. Given the existence of such $\eta$, we claim the subgroup $K\le G$ generated by $\eta(t)$ cannot normalize any finite index subgroup (in particular $K$ is not the trivial subgroup). If the claim does not hold, then there is a finite index subgroup $H'\le H$ such that $\eta(tH't^{-1})=\eta(H')$. Since $tH't^{-1}$ and $H'$ are contained in $H$ and $\eta|_H$ is injective, we get $tH't^{-1}=H'$. This contradicts the definition of LM groups and Proposition~\ref{prop:normalize}.
Now we prove the `only if' direction. Suppose there is an abelian subgroup $H\le G$ such that a finitely generated subgroup $K\le \comm{G}{H}$ does not normalize any finite index subgroups of $H$. Then Proposition~\ref{prop:normalize} implies that $\Phi(K)$ is infinite. Since $\Phi(K)$ is a finitely generated subgroup of some orthogonal group, by Selberg's lemma \cite{selberg1962discontinuous} $\Phi(K)$ has a finite index torsion free subgroup, which is also infinite. Thus there is $t\in K$ such that $\Phi(t)$ is of infinite order. Choose finite index subgroups $H_1,H_2\le H$ such that $tH_1t^{-1}=H_2$. Let $G_0$ be the HNN--extension of $H$ along the isomorphism between $H_1$ and $H_2$ induced by $t$. Clearly there is a homomorphism $G_0\to G$ which is injective on $H$. It remains to show $G_0$ is an LM group. Let $F\subset X$ be a flat where $H$ acts cocompactly. Let $\mathcal Phi$ be as in Definition~\ref{def:honolomy}. It follows from the proof of Lemma~\ref{lem:comm} that $\mathcal Phi(t)$ conjugates $H_1$ to $H_2$ when viewing them as subgroups of $\mathcal Isom(F)$, which finishes the proof.
\end{proof}
\section{Examples, comments and questions}
\label{sec:examples}
In this section we discuss several examples in the literature which serve as a comparison to results in other sections. The examples show possible pathological behavior of commensurators. The following is a consequence of an example in Wise's thesis \cite{wise}.
\begin{prop}
\label{prop:anti torus}
There exists a torsion free group $G$ acting geometrically on a product of two trees such that there is a $\mathbb Z$--subgroup $H\le G$ whose commensurator $\comm{G}{H}$ is not finitely generated. Moreover, $\comm{G}{H}$ does not normalize any finite index subgroup of $H$.
\end{prop}
\begin{proof}
Let $X$ be the compact non-positively curved square complex defined in \cite[pp.38, Section II.2.1]{wise}. Let $a,b,c,x$ and $y$ be loop in $X$ indicated in \cite[pp.38, Section II.2.1]{wise}. Let $V$ be the subspace of $X$ which is a union of $a,b$ and $c$. Take two copies of $X$ and identify them along $V$ to obtain $X'$ (\cite[Section II.5]{wise}). The universal cover of $X'$ is isomorphic to a product of two trees. We denote edges (which are actually loops) of $X'$ by $a,b,c,x,y,x_1$ and $y_1$. Let $H=\langle c\rangle\le \mathcal Pione{X'}$ and $K=\comm{\mathcal Pione{X'}}{H}$. It is clear that $\Phi(K)$ is at most of order two ($\Phi$ is defined in Definition~\ref{def:honolomy}). On the other hand, it follows from \cite[pp.40, Figure 10]{wise} and discussion around there that for any $n>0$, $y^n(y_1)^{-n}\in \comm{\mathcal Pione{X'}}{H}$ and the biggest subgroup of $H$ normalized by $y^n(y_1)^{-n}$ is of index $2^n$ in $H$. Thus $\comm{\mathcal Pione{X'}}{H}$ does not normalize any finite index subgroup of $H$, and thus it cannot be finitely generated by Proposition~\ref{prop:normalize}.
\end{proof}
\begin{remark}
Proposition~\ref{prop:anti torus} shows that the `finitely generated' assumption in Proposition~\ref{prop:normalize}, Theorem~\ref{thm:cubicalpropc} and Proposition~\ref{prop:symmetricspace} cannot be removed.
\end{remark}
Now we discuss another type of irreducible lattices. Let $(p,l)$ be a pair of distinct odd primes and let $\mathcal Gam=\mathcal Gam_{p,l}$ be the lattice in $PGL_2(\mathbb Q_p)\times PGL_2(\mathbb Q_l)$ defined in \cite[Section 3]{mozes1995actions} and \cite[Chapter 3]{rattaggi2004computations}. It is known that $\mathcal Gam_{p,l}$ is torsion free and it acts geometrically on $T_{p+1}\times T_{l+1}$, where $T_k$ denotes the homogeneous tree of degree $k$. The following is a consequence of \cite{RattaggiRobertson}.
\begin{prop}
\label{prop:highest}
There exists a torsion free group $\mathcal Gam$ acting geometrically on a product of two trees such that there is a $\mathbb Z$--subgroup $H\le \mathcal Gam$ which is highest.
\end{prop}
\begin{proof}
Let $\mathcal Gam=\mathcal Gam_{p,l}$ be as above. The key property which we need, proven in \cite[Corollary 2.2]{RattaggiRobertson}, is that $\mathcal Gam$ is \emph{commutative transitive}. Recall that a group is commutative transitive if the relation of commutativity is transitive on its non-trivial elements. This property implies that if $H$ is a maximal abelian subgroup in the sense that $H$ is not properly contained in another abelian subgroup, then $H$ is highest. However, there exists an example of $\mathcal Gam_{p,l}$ which has a maximal abelian subgroup isomorphic to $\mathbb Z$ \cite[Corollary 3.7 and Example 3.8]{RattaggiRobertson}. Thus the proposition follows.
\end{proof}
\begin{cor}
\label{cor:commensurator computation}
Let $\mathcal Gam=\mathcal Gam_{p,l}$ and let $H\le \mathcal Gam$ be a non-trivial abelian subgroup. Then the commensurator of $H$ in $\mathcal Gam$ is isomorphic to either $\mathbb Z$ or $\mathbb{Z}^2$.
\end{cor}
\begin{proof}
By Theorem~\ref{thm:cubicalpropc}, $H$ satisfies $\text{Condition } \mathrm{(C)}$. Let $K\le \comm{\mathcal Gam}{H}$ be a finitely generated subgroup. Then $K$ normalizes a finite index subgroup $H'\le H$. By Theorem~\ref{thm:combinenormalizerminset}.\ref{thm:centralizer}, the subgroup $\langle K,H'\rangle$ has a finite index subgroup that centralizes $H'$. Thus each $k\in K$ has a non-trivial power which centralizes $H'$. Then the commutative transitivity implies that $K$ is abelian. It follows that $\comm{\mathcal Gam}{H}$ is a countable union of abelian subgroups, and thus it is abelian by commutative transitivity. Then the corollary follows.
\end{proof}
\begin{defn}
\label{defn:regular and singular}
Let $G$ be a group acting geometrically and cellularly on a product of infinite trees $T\times T'$. The Tits boundary of $T\times T'$ is a complete bipartite graph. A $\mathbb Z$--subgroup $H\le G$ is \emph{regular} if the two boundary points of an axis of $H$ are not vertices of the graph $\mathcal Partial_T (T\times T')$, otherwise $H$ is \emph{singular}.
\end{defn}
Let $G$ be as in Definition~\ref{defn:regular and singular}. Then $G$ contains both a regular $\mathbb Z$--subgroup and a singular $\mathbb Z$--subgroup \cite[Lemma 8.8]{ballmann1995orbihedra}. Moreover, the commensurator of a regular $\mathbb Z$--subgroup is virtually $\mathbb{Z}^2$ \cite[Lemma 7.13]{ballmann1995orbihedra}.
In the special case where $G$ is $G_{p,l}$ defined above, the commensurator of a singular $\mathbb Z$--subgroup is either $\mathbb Z$ or $\mathbb{Z}^2$. In particular, for a singular $\mathbb Z$--subgroup $H$, the commensurator $\comm{\mathcal Gam}{H}$ never acts cocompactly on the parallel set of the axis of $H$.
This is very different from the case of virtually compact special actions, where the algebraic properties of $\comm{\mathcal Gam}{H}$ are always compatible with the geometry of the space on which $\mathcal Gam$ acts on. More precisely:
\begin{prop}
\label{prop:compactible}
Suppose $W$ is a compact virtually special cube complex. Then for any abelian subgroup $H\le \mathcal Pione{W}$, the commensurator of $H$ acts cocompactly on the parallel set $P_F$, where $F$ is a flat in the universal cover $\widetilde W$ stabilized by $H$ such that $H\curvearrowright F$ is cocompact.
\end{prop}
\begin{proof}[Sketch of a proof]
Without loss of generality we can assume that $W$ is special. Since there is a local isometric embedding from $W$ to a compact Salvetti complex of some right-angled Artin group, it reduces to proving the lemma in the case where $W$ is a Salvetti complex. In this case, it follows from Servatius' centralizer theorem \cite[Section III]{servatius1989automorphisms} that the centralizer of an abelian subgroup $H$ of a right-angled Artin group acts cocompactly on the parallel set of $F$ in $\widetilde W$, where $F$ is a flat in $\widetilde W$ stabilized by $H$ such that $H\curvearrowright F$ cocompact. Now the lemma follows.
\end{proof}
Recall that for a group $G$ acting geometrically and cellularly on a product of two trees, the action is reducible if and only if the quotient cube complex is virtually special \cite{wise}. This together with Corollary~\ref{cor:commensurator computation} and Lemma~\ref{prop:compactible} naturally leads to the following question, which is a variant of a question by Wise on whether irreducible actions always give rise to an anti-torus.
\begin{ques}
Suppose $G$ acts geometrically and cellularly on a product of two trees $X$. Suppose for each singular $\mathbb Z$--subgroup $H\le G$, the commensurator of $H$ acts cocompactly on the parallel set of an axis of $H$. Is $G$ reducible, i.e.,\ is $G$ commensurable to a product of two free groups?
\end{ques}
We also ask whether one can find examples similar to Proposition~\ref{prop:highest} in the world of symmetric spaces.
\begin{ques}\label{ques:notorious}
Let $G$ be a cocompact lattice in $SL(3,\mathbb R)$. Can $G$ contain a highest $\mathbb Z$--subgroup?
\end{ques}
One readily verifies that if such $\mathbb Z$--subgroup exists, then it is generated by a matrix $M$ in $SL(3,\mathbb R)$ such that
\begin{enumerate}
\item $M$ has one real eigenvalue and two complex eigenvalues;
\item the real eigenvalue is not $1$ or $-1$;
\item the rotation induced by the pair of complex eigenvalues has irrational angle.
\end{enumerate}
However, we do not know whether such matrix can live inside a cocompact lattice, though we speculate that the answer is positive.
\begin{remark} Note that Theorem~\ref{thm:hadamard} and Corollary~\ref{cor:centralizer} have the same conclusion for $\comm{G}{H}$. However, the geometry of the action of $\comm{G}{H}$ on $P_F$ could be quite different. For virtually compact special actions, the action of $\comm{G}{H}$ on $P_F$ is geometric (Proposition~\ref{prop:compactible}). We speculate that this is not the case for geometric actions on Hadamard manifolds. In particular, a positive answer to Question~\ref{ques:notorious} would give an example of a non-cocompact action of $\comm{G}{H}$ on $P_F$ (consider the action on the associated symmetric space).
\end{remark}
\end{document} |
\begin{document}
\begin{abstract}
We show that the maximal number of (real) lines in a (real) nonsingular spatial
quartic surface is 64 (respectively, 56). We also give a complete projective
classification of all quartics containing more than 52 lines: all such
quartics are
projectively rigid.
Any value not exceeding 52 can appear as the number of lines of an
appropriate quartic.
\end{abstract}
\title{Lines on quartic surfaces}
\frak Sion{Introduction}
\subsection{Principal results}\label{s.results}
Throughout the paper,
all algebraic varieties are defined over~$\C$.
Given an algebraic surface $X\subset\Cp3$, we denote by $\operatorname{Fn}(X)$ the set of
projective lines contained in~$X$. If $X$ is real (see definition below),
$\operatorname{Fn}_\R(X)$ stands for the set of real lines contained in~$X$.
\theorem[see~\autoref{proof.64}]\label{th.unique}
Let $X\subset\Cp3$ be a nonsingular quartic, and assume that
$\ls|\operatorname{Fn}(X)|>52$.
Then $X$ is projectively equivalent to either
\roster*
\item
Schur's
quartic~$*uartic{64}$, see \autoref{s.Schur}, or
\item
one of the three
quartics~*uartic{60},
*uartic{60.2},
\bquartic{60.2}
described in \autoref{ss.th.unique}, or
\item
the quartic *uartic{56.real}, see \autoref{s.56}, or quartics *uartic{56},
\bquartic{56},
*uartic{q56} described in \autoref{ss.th.unique}, or
\item
one of the two quartics *uartic{54}, *uartic{q54}
described in \autoref{s.60}.
\endroster
In particular, one has
$\ls|\operatorname{Fn}(X)|=64$, $60$, $56$, or $54$, respectively.
\endtheorem
\corollary[see Segre~\cite{Segre} and Rams, Sch\"{u}tt~\cite{rams:2012}]\label{th.64}
Any nonsingular quartic in $\Cp3$ contains at most $64$ lines.
\done
\endcorollary
Note that the field of definition~$\C$ is essential for all statements. For
example, over $\Bbb F_9$, the quartic given by the equation
$z_0 z_3^3+z_1 z_2^3 + z_1^3 z_2+ z_0^3 z_3=0$
contains $112$ lines. According to Rams, Sch\"{u}tt~\cite{rams:2012},
the bound $\ls|\operatorname{Fn}(X)|\le64$ holds over any field of characteristic other
than~$2$ or~$3$.
As
was observed by T.~Shioda, $*uartic{56}$ and \bquartic{56} are
alternative projective models of
the Fermat quartic: this fact follows from the description of their
transcendental lattice, see \autoref{lem.unique}.
I.~Shimada has recently
found an explicit defining equation of these surfaces.
Other similar examples are discussed in \autoref{rem.abstract.K3}.
Recall that a \emph{real variety} is a complex algebraic variety~$X$ equipped
with a \emph{real structure}, \ie, an
anti-holomorphic involution $\conj\:X\to X$.
The \emph{real part} of~$X$ is
the fixed point set $X_\R:=\Bbb Fix\conj$. A subvariety (\eg, a line)
$Y\subset X$ is called \emph{real} if it is $\conj$-invariant.
When speaking about a \emph{real quartic} $X\subset\Cp3$, we assume that the
real structure on~$X$
is the restriction of the standard coordinatewise complex conjugation
$z\mapsto\bar z$ on $\Cp3$.
\corollary[see~\autoref{proof_th.56}]\label{th.56}
Let $X\subset\Cp3$ be a nonsingular \rom(over~$\C$\rom) real quartic,
and assume that $\ls|\operatorname{Fn}_\R(X)|>52$.
Then $X$ is projectively equivalent \rom(over $\R$\rom) to
the quartic $*uartic{56.real}$ given by \eqref{eq.56}.
In particular, one has $\ls|\operatorname{Fn}_\R(X)|=56$,
and this is the maximal number of real lines that can be contained in
a nonsingular real quartic.
\endcorollary
\addendum[see~\autoref{proof.counts}]\label{ad.counts}
For any number
\[*
n\in\{0,1,\ldots,51,52,54,56,60,64\},
\]
there exists a nonsingular
quartic $X\subset\Cp3$ such that $\ls|\operatorname{Fn}(X)|=n$.
For any
number
\[*
m\in\{0,1,\ldots,47,48,52,56\},
\]
there exists a nonsingular real
quartic $X\subset\Cp3$ such that $\ls|\operatorname{Fn}_\R(X)|=m$.
\endaddendum
Thus, for the moment
we are not certain about the values $\ls|\operatorname{Fn}_\R(X)|=49,50,51$.
We know three families of real quartics with $52$ real lines;
for a list of currently known large configurations of lines, see
\autoref{tab.list} in \autoref{obs.pencils}.
The quartic *uartic{56.real} can be defined over~$\Q$;
however, some of the lines are still defined only over $\Q(\sqrt2)$
(see \autoref{rem.Y.rational}).
At present, we do not know how many lines defined over~$\Q$ a quartic defined
over~$\Q$ may have; since $\Q\subset\R$ and
*uartic{56.real} has been ruled out,
\autoref{th.56} implies that this maximal number is at
most~$52$, the first candidates being the configurations
\config{52.5}, \config{52.real}, \config{52.0}.
Though, see \autoref{rem.min.field}.
Another open question is the maximal number of lines
contained in a triangle free
configuration, see \autoref{th.Segre} and \autoref{rem.triang.free}.
\subsection{Contents of the paper}
In \autoref{S.history},
we start with a brief introduction to the history of the subject.
In \autoref{S.reduction}, we recall basic notions and facts related to
integral lattices and $K3$-surfaces and use the theory of $K3$-surfaces to
reduce the original geometric problem to a purely arithmetical question
about \emph{configurations}; the
main results of this section are stated in \autoref{arithmetical_reduction}.
The simplest properties of
configurations, not related directly to
quartic surfaces, are treated in \autoref{S.geometry}, whereas
\autoref{S.arithm} deals with the more subtle arithmetic properties of the main
technical tool of the paper, the so-called \emph{pencils}.
The technical part is \autoref{S.counting}: we outline the algorithm used for
counting lines in a pair of obverse pencils and state the counts obtained in
the output. \autoref{tab.list} lists most known large configurations
of lines.
In \autoref{S.triang.free}, we digress to the so-called \emph{triangle free}
configurations, for which one can obtain a stronger bound
on the number of lines, see \autoref{th.Segre}.
The principal results of the paper stated in \autoref{s.results} are proved
in \autoref{S.pencils}. Finally, in \autoref{S.examples}, we discuss the
properties of quartics with many lines (in particular, \autoref{s.56}
contains an explicit equation of *uartic{56.real}) and make a few concluding
remarks.
\begin{equation*} z_0^d+z_1^d+z_2^d+z_3^d=0, \end{equation*}
where $[z_0:z_1:z_2:z_3]$ are homogeneous coordinates of $\PP^3$, contains
exactly $3d^2$ lines, for all $d\ge 3$. This then prompts the more relevant
question about how many lines a surface of degree $d\ge 4$ can have. In
particular, for a fixed $d\ge 4$, is there an upper bound for the number of
lines that a surface of degree $d$ can contain?
At this point it is appropriate to call attention to the difference between
the existence of rational curves on a surface and the existence of lines. A
line in $\PP^3$ is defined as the intersection of two hyperplanes but a
rational curve is an isomorphic image of $\PP^1$, which need not be a line.
Whereas we are expecting a finite number of lines on a surface the situation
is drastically different for the existence of rational curves. While a
generic quartic does not contain a line, it is shown by Mori and Mukai
\cite{mori:1982} that every projective $K3$-surface, in particular every smooth
quartic in $\PP^3$, contains at least one rational curve. Moreover Bogomolov,
Hassett and Tschinkel showed in \cite{bogomolov:2011} that a generic
$K3$-surface, including a generic quartic surface in $\PP^3$, contains infinitely
many rational curves. Going away from generic case to specific examples,
Bogomolov and Tschinkel showed in \cite{bogomolov:2000} that if a $K3$-surface
admitting an elliptic fibration has Picard number at most 19, then it
contains infinitely many rational curves.
Xi Chen showed in \cite{chen:1999} that for a generic quartic in $\PP^3$,
every linear system $\mathcal{O}(n)$, for any $n>0$, contains a nodal
rational curve. In fact Yau and Zaslow in \cite{yau:1996}, inspired by string
theory, counted those rational curves for the $n=1$ case.
Existence of smooth curves on quartic surfaces in $\PP^3$ is also relatively
well understood. Mori showed in \cite{mori:1984} that a quartic surface in
$\PP^3$ contains a smooth curve of degree $n>0$ and genus $g\ge 0$ if and
only if
either
\roster*
\item
$g=(n^2/8)+1$, or
\item
$g<(n^2/8)$ and $(n,g)\not=(5,3)$.
\endroster
The problem of counting lines on smooth surfaces in $\PP^3$ is on the other
hand a totally different game.
The first work which we can trace about this problem is Schur's article
\cite{schur:1882} where he exhibits a certain quartic surface which contains
64 lines. This surface is now known as Schur's quartic and is given by the
equation
\begin{equation*}
z_0(z_0^3-z_1^3)=z_2(z_2^3-z_3^3).
\end{equation*}
In \autoref{s.Schur} we give an account of the 64 lines on this quartic.
Apparently no progress was made on this result for about half a century until
1943 when Segre published some articles on the arithmetic and geometry of
surfaces in $\PP^3$. In one of these articles, in \cite{Segre}, he
claimed that the number of lines which can lie on a quartic surface cannot
exceed 64. Since Schur's quartic already contains 64 lines, this result of
Segre would close the question for quartics were it not for a flaw in his
arguments which was only recently detected and corrected by Rams and Sch\"{u}tt
in \cite{rams:2012}. Rams and Sch\"{u}tt showed that the theorem is correct but
the proof needs some modifications using techniques which were not available
to Segre at that time.
Segre article \cite{Segre} contains an upper bound for the number of
lines which can lie on a surface of degree $d\ge 4$. His upper bound, which
is not affected by his erroneous argument about quartics, is $(d-2)(11d-6)$.
This bound is not expected to be sharp. For quartics it
predicts 76,
larger than the actual 64.
There is one curious fact about Segre's work of 1943. Most of the techniques
he uses were already in Salmon's book \cite{salmon:1862} which was originally
published in 1862. It would be reasonable to expect that a work similar to
Segre's be published much earlier than 1943. We learn from a footnote in
\cite{Segre} that the problem was mentioned by Meyer in an encyclopedia
article \cite{meyer:1908} as early as 1908 but even that was not enough to
spur interest in the subject at the time.
After Segre's work there was again a period of long silence on the problem of
lines on surfaces. In 1983 Barth mentioned this problem in \cite{barth:1983}
which turned out to be an influential manuscript on the subject. There he
also noted that since a smooth quartic in $\PP^3$ is a $K3$-surface and since
by Torelli theorems a $K3$-surface is nothing but its Picard
lattice, all
results of Segre on quartics could possibly be reproduced in the lattice
language. This teaser was one of the challenges which prompted us to work on this
problem
thirty
years later.
In 1995, Caporaso, Harris and Mazur, in \cite{caporaso:1995}, while
investigating the number of rational points on a curve over an algebraic
number field, attacked the problem of finding a lower bound for the maximal
number $N_d$ of lines lying on a surface of the form
$\Bbb Gf(z_0,z_1)=\Bbb Gf(z_2,z_3)$, where $\Bbb Gf$ is a homogeneous form of degree
$d$. Their arguments being purely geometric, their findings made sense in the
complex domain. They found that in general for all $d\ge 4$,
\begin{equation*} N_d\ge 3d^2, \text{ but } N_4\ge 64, *uad N_6\ge 180, *uad N_8\ge 256, *uad N_{12}\ge 864, *uad N_{20}\ge 1600. \end{equation*}
Here the equality $N_4=64$ follows from Segre's work \cite{Segre}.
In 2006 Boissi\'{e}re and Sarti attacked this problem in \cite{boissiere:2007}
using group actions. They studied the maximal number of lines on symmetric
surfaces in $\PP^3$, where we called a surface symmetric
if its equation is of the
form
\begin{equation*}
\Bbb Gf(z_0,z_1)=\psi(z_2,z_3),
\end{equation*}
where $\Bbb Gf$ and $\psi$ are homogeneous forms of degree $d$, as
studied by
Caporaso, Harris and Mazur. This approach may seem restrictive at first;
nonetheless,
it is reasonable since Schur's surface which contains the maximal
possible number of lines a quartic surface can contain is itself of this
form. Boissi\'{e}re and Sarti first showed that for symmetric surfaces, the
inequalities about $N_d$ which Caporaso, Harris and Mazur obtained are
actually equalities. This increased the hope that the symmetric surfaces are
candidates to carry the most number of lines among other surfaces of the same
degree. However, Boissi\'{e}re and Sarti showed in the same work that this
expectation fails. They showed that the non-symmetric surface given by
\begin{align*} z_0^8&+z_1^8+z_2^8+z_3^8+168 z_0^2z_1^2z_2^2z_3^2 \\
&+14(z_0^4z_1^4+z_0^4z_2^4+z_0^4z_3^4+z_1^4z_2^4+z_1^4z_3^4+z_2^4z_3^4)=0
\end{align*}
contains 352 lines,
which is far greater than the upper bound of 256 for the symmetric
surfaces of the same degree. Notice that the number 352 is within the limits
allowed by Segre's upper bound, which gives 492 in this case.
Finally, almost thirty years after Barth's teaser, two teams started to work
on this problem, unaware of each other, from two different points of
approach. While we concentrated on understanding the ``lines on surfaces"
problem for $K3$-surfaces in $\PP^3$ and aimed at transliterating Segre's
results into the lattice language, Rams and Sch\"{u}tt decided to re-attack the
problem by using elliptic fibration techniques
in \cite{rams:2012}.
They
discovered a flow in Segre's arguments which rendered his
proof void;
nonetheless, his theorem proved to be correctly stated.
Moreover,
Rams and Sch\"{u}tt's proof works on any
algebraically closed field of any characteristic
$p\not=2,3$.
Schur's quartic
becomes singular
when $p=2$ (still containing $64$ lines); when $p=3$,
it is shown in \cite{rams:2012}
that the surface contains 112 lines.
It is interesting to note that the concept of an elliptic fibration is
inevitable in studying the lines on a quartic. If $X$ is a smooth quartic in
$\PP^3$ and $L$ is a line lying on $X$,
one can parametrize the space of planes
$\bold Lambda_t$ in $\PP^3$ passing through $L$ by $t\in\PP^1$. Then any point
$p\in X$ determines a unique plane $\bold Lambda_t$, and the map sending $p$ to
$t$ is an elliptic fibration. If $p\in L$, we take $\bold Lambda_t$ as the plane
tangent to $X$ at~$p$. Segre starts with this observation but, using intuitive
geometric arguments,
he
erroneously claims that the maximal number of lines in the fibers of the
pencil is~$18$. The true bound is~$20$, see~\cite{rams:2012} or
\eqref{tablichka}, which calls for more work to establish the ultimate
bound~$64$ for the total number of lines in~$X$.
\frak Sion{The reduction}\label{S.reduction}
Throughout the paper, we consider various abelian groups $A$ equipped
with bilinear and/or quadratic forms. Whenever the form is fixed, we use
the abbreviation $x \cdot y$ (respectively, $x^2$)
for the value of the bilinear form on $x \otimes y$ (respectively, the quadratic form on $x$).
Given a subset $B \subset A$, its \emph{orthogonal complement} is $B^\perp =
\bigl\{x \in A \bigm|
\text{$x\cdot y = 0$ for all $y \in B$}\bigr\}.
$
\subsection{Integral lattices}\label{s.lattices}
An \emph{\rom(integral\rom) lattice} is a finitely generated free abelian group~$S$
supplied with a symmetric bilinear form $b\:S\otimes S\to\ZZ$.
A lattice~$S$ is
{\it even\/} if $x^2=0\bmod2$ for all $x\in S$.
As the transition matrix between two integral bases
has determinant $\pm1$, the
determinant $\det S\in\ZZ$
({\it i.e.}, the determinant
of the Gram matrix of~$b$ in
any
basis of~$S$)
is well defined.
A lattice~$S$ is called
{\it nondegenerate\/} if $\det S\ne0$; it is called {\it
unimodular\/} if $\det S=\pm1$.
Alternatively, $S$ is nondegenerate if and only if its \emph{kernel} $\ker S := S^\perp$
is trivial. An \emph{isometry} $\psi\: S \to S'$
between two lattices
is a group homomorphism respecting the bilinear forms;
obviously, one always has $\Ker \psi \subset \ker S$.
The group of auto-isometries of a
nondegenerate
lattice $S$ is denoted by $\OG(S)$.
Given a collection of subsets/elements $A_1,\ldots$ in~$S$, we
use the notation $\OG(S,A_1,\ldots)$ for the subgroup of $\OG(S)$
preserving each~$A_i$ as a set.
Given a lattice~$S$,
the bilinear form extends to $S\otimes\Q$ by linearity.
The inertia indices $\Bbb Gs_\pm S$, $\Bbb Gs_0 S$ and the signature $\Bbb Gs S$ of $S$
are defined as those of $S \otimes \Q$.
The orthogonal projection establishes a linear isomorphism between any two maximal positive definite subspaces of $S \otimes Q$,
thus providing a way for comparing their orientations.
A coherent choice of orientations of all
maximal positive definite subspaces is called a \emph{positive sign structure}.
Assuming $S$ nondegenerate, we denote by $\OG^+(S) \subset \OG(S)$ the subgroup
formed by the auto-isometries preserving a positive sign structure.
A {\it $d$-polarized lattice} is a lattice $S$ with a distinguished
vector $h \in S$,
referred to as the {\it polarization},
such that $h^2 = d$.
We use the abbreviation $\OG_h(A_1, \ldots)$ for $\OG(h, A_1, \ldots)$;
a similar convention applies for $\OG^+$.
If
$S$ is nondegenerate, the dual group $S^\vee=\Hom(S,\ZZ)$ can
be identified with the subgroup
$$
\bigl\{x\in S\otimes\Q\bigm|
\text{$x\cdot y\in\ZZ$ for all $y\in S$}\bigr\}.
$$
In particular, $S\subset S^\vee$ and the quotient $S^\vee\!/S$
is a finite group; it is called the {\it discriminant group\/}
of~$S$ and is denoted by $\discr S$ or~$\Cal S$. The discriminant
group~$\Cal S$ inherits from $S\otimes\Q$ a symmetric bilinear form
$\Cal S\otimes\Cal S\to\Q/\ZZ$,
called the {\it discriminant form},
and, if $S$ is even, a quadratic
extension $\Cal S\to\Q/2\ZZ$ of this form.
When
speaking about the discriminant groups, their
(anti-)isomorphisms, \etc., we always assume that the discriminant
form (and its quadratic extension if the lattice is even) is taken
into account.
The number of elements in $\Cal S$ is equal to
$\mathopen|\det S\mathclose|$; in
particular, $\Cal S=0$ if and only if $S$ is unimodular.
Given a prime number $p$, we denote by $\Cal S_p$ or $\discr_p S$
the $p$-primary part of $\Cal S = \discr S$.
The form $\Cal S$ is called \emph{even} if there is no order $2$ element $\alpha \in \Cal S_2$
with $\alpha^2 = \pm \frac{1}{2} \bmod 2\Z$.
We use the notation $\ell(\Cal S)$ for the minimal
number of generators of~$\Cal S$, and we put $\ell_p(\Cal S) = \ell(\Cal S_p)$.
The quadratic form on $\Cal S$ can be described by means
of an analog $(\varepsilon_{ij})$
of the Gram matrix: assuming that $d_1 \mathbin | d_2 \mathbin | \ldots \mathbin | d_\ell$
are the invariant factors of $\Cal S$, we pick a basis $\alpha_1, \alpha_2, \ldots, \alpha_\ell \in \Cal S$
so that the order of $\alpha_i$ is $d_i$, and let $\varepsilon_{ij} = \alpha_i \cdot \alpha_j \bmod \Z$ for $i \ne j$
and $\varepsilon_{ii} = \alpha^2_i \bmod 2\Z$.
A similar construction applies to $\Cal S_p$.
Furthermore, according to R. Miranda and D. Morrison \cite{Miranda.Morrison:book},
unless $p = 2$ and $\Cal S_2$ is odd, the determinant of the resulting matrix is
a unit in $\Z_p$ well defined
modulo $(\Z^*_p)^2$; this determinant is denoted by
$\det_p \Cal S\in\Z_p^*/(\Z^*_p)^2$.
Two nondegenerate lattices are said to have the same {\it genus} if
their localizations
at all primes and at infinity
are pairwise
isomorphic.
The genus of
an even lattice is determined by its signature and the isomorphism
class of the quadratic extension of the discriminant
form,
see~\cite{Nikulin:forms}.
In what follows, we denote by $[s]$ the rank one lattice $\Z w$, $w^2 = s$.
The notation $\bU$ stands for the {\it hyperbolic plane},
{\it i.e.},
the lattice generated by a pair of vectors~$u$,~$v$
(referred to as a {\it standard basis\/} for~$\bU$)
with
$u^2=v^2=0$ and $u\cdot v=1$. Furthermore, given a lattice~$S$, we
denote by~$nS$, $n\in\NN$, the orthogonal direct sum of $n$~copies
of~$S$, and by~$S(q)$, $q\in\QQ$, the lattice obtained from~$S$ by
multiplying the form by~$q$ (assuming that the result is still an
integral lattice). The notation $n\Cal S$ is also used for the
orthogonal sum of $n$~copies of a discriminant group~$\Cal S$.
A {\it root\/} in an even lattice~$S$ is a vector $r\in S$ of
square~$-2$. A {\it root system\/} is an even negative definite
lattice generated by its roots. Recall that each root system
splits (uniquely up to order of the summands) into orthogonal sum
of indecomposable root systems, the latter being those of types
$\bA_n$, $n\ge1$, $\bD_n$, $n\ge4$, $\bE_6$, $\bE_7$, or~$\bE_8$,
see~\cite{Bourbaki:Lie:French}.
From now on, we fix an even unimodular lattice $\bold L$ of rank~$22$ and signature~$-16$.
All such lattices are isomorphic to $2\bE_8\oplus3\bU$.
It can easily be shown that, up to
the action $\OG^+(S)$,
this lattice has a unique $4$-polarization $h$;
thus, $\bold L$ is always considered equipped with a distinguished $4$-polarization $h$
and a positive sign structure.
We also fix the notation for certain discriminant forms.
Given coprime integers $m$, $n$ such that one of them is even,
$\<\frac{m}{n}\>$ is the quadratic form $1 \mapsto \frac{m}{n} \bmod 2\Z$
on $\CG{n}$.
Given a positive integer $k$, consider the group $\CG{2^k} \times \CG{2^k}$
generated by $\alpha = (1, 0)$ and $\beta=(0, 1)$;
denote by $\Cal U_{2^k}$ (respectively, $\Cal V_{2^k}$) the quadratic form
on the above group such that $\alpha \cdot \beta = \frac{1}{2^k} \bmod \Z$
and $\alpha^2 = \beta^2 = 0 \bmod 2\Z$ (respectively, $\alpha^2 = \beta^2 = \frac{1}{2^{k - 1}} \bmod 2\Z$).
An {\it extension\/} of a nondegenerate lattice~$S$ is another lattice~$M$
containing~$S$. An \emph{isomorphism} between two extensions $M', M'' \supset S$
is a bijective isometry $M' \to M''$ identical on $S$.
More generally, given a subgroup $G \subset \OG(S)$,
a $G$-isomorphism is a bijective isometry $M' \to M''$
whose restriction to $S$ is an element of $G$.
The two extreme cases are those of \emph{finite index} extensions
(where $S$ has finite index in $M$) and
\emph{primitive} ones
(where $M/S$
is torsion free).
The general case
$M\supset S$ splits into the finite index
extension $\tilde S\supset S$ and primitive extension
$M\supset\tilde S$, where
$$
\tilde S=\bigl\{x \in M\bigm|nx\in S\ \text{for some $n\in\ZZ$}\bigr\}
$$
is the {\it primitive hull\/} of~$S$ in~$M$.
If $S$ is nondegenerate and $M \supset S$ is a finite index extension, we have a chain of inclusions
$$
S \subset M \subset M^\vee \subset S^\vee,
$$
and, hence, a subgroup $\Cal K = M/S \subset \Cal S$;
this subgroup is called the \emph{pivot} of $M \supset S$.
The pivot $\Cal K$ is \emph{$b$-isotropic}, that is, the restriction to $\Cal K$
of the discriminant form $\Cal S \otimes \Cal S \to \Q/\Z$ is trivial.
Furthermore, the lattice $M$ is even if and only if $S$ is even
and $\Cal K$ is \emph{isotropic}, that is, the restriction to $\Cal K$
of the quadratic extension $\Cal S \to \Q/2\Z$ of the discriminant form is trivial.
\begin{theorem}[V. Nikulin \cite{Nikulin:forms}]\label{thm:Nik1} Given a nondegenerate
lattice~$S$,
the map sending $M \supset S$ to the pivot $\Cal K = M/S \subset \Cal S$
establishes a
one-to-one correspondence
between the set of isomorphism classes of finite index extensions of $S$
and the set of $b$-isotropic subgroups of $\Cal S$.
Under this correspondence, one has
$\discr M=\Cal K^\perp\!/\Cal K$ and
$M=\bigl\{x\in S^\vee\bigm|x\bmod S\in\Cal K\bigr\}$.
\end{theorem}
In the other extreme case, we confine ourselves to primitive extensions $M \supset S$
to an even unimodular lattice $M$. Assuming $S$
nondegenerate,
these are equivalent to
appropriate finite index extensions of $S \oplus S^\perp$, the pivot of the latter
giving rise to an anti-isomorphism $\Cal S \to \discr S^\perp$ and thus determining the genus of $S^\perp$.
It follows that, given a subgroup $G \subset \OG(S)$ and the signature of $M$,
a $G$-isomorphism class of even unimodular primitive extensions $M \supset S$
is determined by a choice of
\roster*
\item
an even lattice $T$ such that
$\discr T \cong -\Cal S$ and $\sigma_\pm T = \sigma_\pm M - \sigma_\pm S$, and
\item a bi-coset in $G\backslash\!\Aut\discr T/\!\OG(T)$.
\endroster
For details see \cite{Nikulin:forms}.
The following theorem is a combination of the above observation and Nikulin's existence theorem \cite{Nikulin:forms}
applied to the genus of $S^\perp$.
\theorem[V. Nikulin \cite{Nikulin:forms}]\label{th.Nikulin}
A nondegenerate even lattice $S$ admits a primitive extension to the lattice $\bold L$
if and only if the following conditions are satisfied\rom:
\roster
\item $\sigma_+ S \leq 3$, \ $\sigma_- S \leq 19$, and
$\rank S + \ell(\Cal S) \leq 22$\rom;
\item $(-1)^{\sigma_+ S - 1}\ls|\Cal S| = \det_p \Cal S \bmod (\Z^*_p)^2$ for all odd prime numbers $p$
such that $\rank S + \ell_p(\Cal S) = 22$\rom;
\item either $\rank S + \ell_2(\Cal S) < 22$, or $\Cal S_2$ is odd, or
$\ls|\Cal S| = \pm \det_2 \Cal S \bmod (\Z^*_2)^2$.
\endroster
\endtheorem
\subsection{$K3$-surfaces}\label{s.K3}
Let $X\subset\Cp3$ be a nonsingular quartic.
It is a minimal $K3$-surface. Introduce the following
objects:
\roster*
\item $L_X=H_2(X)=H^2(X)$,
regarded as a lattice \via\ the intersection form
(we always identify homology and cohomology \via\ Poincar\'{e}
duality);
\item
$h_X\in L_X$, the class of a generic plane section
of~$X$;
\item
$\Cal F(X) \subset H_2(X; \Z)$,
the primitive sublattice
spanned over $\Q$ by $h_X$ and the classes of lines $l \subset X$
(the {\it Fano configuration}
of $X$);
\item
$\Bbb Go_X\subset L_X\otimes\R$, the oriented $2$-subspace
spanned by the real and imaginary parts of the class of a
holomorphic $2$-form on~$X$ (the \emph{period} of~$X$).
\endroster
Note that $\Bbb Go_X$ is positive definite and orthogonal to $h_X$;
furthermore, the Picard group $\operatorname{Pic} X$ equals $\omega_X^\perp \cap L_X$.
The following statement is an immediate consequence of the above description of $\operatorname{Pic} X$
and the Riemann--Roch theorem.
\lemma\label{line_classes}
A vector $a \in L_X$ is realized by a line $l\subset X$
if and only if $a \cdot \omega_X = 0$, $a^2 = -2$, and $a \cdot h_X = 1$.
Distinct lines represent distinct classes in $L_X$.
\done
\endlemma
In view of the uniqueness part of this statement, we identify lines in $X$
and their classes in $L_X$.
As is well known, the lattice $L_X$ is isomorphic to $\bold L$;
a \emph{marking} of $X$ is a choice of
a particular isomorphism $\psi\: L_X \to \bold L$
such that $\psi(h_X) = h \in \bold L$
and the maximal positive definite subspace $\psi(\R h_X \oplus \omega_X)$
is positively oriented.
Consider a period $\omega$, {\it i.e.}, an oriented positive definite
$2$-subspace $\omega \subset \bold L \otimes \R$
orthogonal to $h$.
The following statement provides a criterion for the realizability of the
triple $(\bold L, h, \omega)$
by a quartic, \ie, the existence of a marked nonsingular quartic $(X, \psi)$
such that $\psi$ takes $\omega_X$ to $\omega$.
It is a combination of the surjectivity of the period map for $K3$-surfaces
(see Vik\. Kulikov
\cite{Kulikov:periods})
and Saint-Donat's description~\cite{Saint-Donat} of projective models of $K3$-surfaces.
\proposition\label{Saint-Donat}
A triple $(\bold L ,h , \omega)$ is realizable by a quartic $X \subset \Cp3$ if and only if
$\bold L$ contains no vector $e$ such that $e \cdot \omega = 0$ and either
\roster
\item\label{ex-div1} $e^2 = -2$ and $e \cdot h= 0$, or
\item\label{ell-pencil1} $e^2 = 0$ and $e \cdot h = 2$.
\pni
\endroster
\endproposition
Denote by $\Omega$ the space of oriented positive definite $2$-subspaces $\omega \subset \bold L \otimes \R$
orthogonal to $h$ and such that $\R h \oplus \omega$ is positively oriented.
By \autoref{Saint-Donat}, the image of the \emph{period map}
$(X, \psi) \mapsto \psi(\omega_X)$
is the subset $\Omega^\circ \subset \Omega$ obtained by removing the locally finite collection
of codimension two subspaces
\[*
\Omega_e = \{\omega \in \Omega \; | \; \omega \cdot e = 0 \},
\]
where $e \in \bold L$ runs over all vectors as in \autoref{Saint-Donat}\iref{ex-div1} or \iref{ell-pencil1}.
Restricting to $\Omega^\circ$ Beauville's universal family \cite{Beauville:moduli} of marked polarized $K3$-surfaces,
we obtain the following statement on marked quartics.
\proposition\label{moduli}
The subset $\Omega^\circ \subset \Omega$ is a fine moduli space of marked nonsingular quartics in $\Cp3$. \pni
\endproposition
Now, let $X \subset \Cp3$ be a real nonsingular quartic.
The complex conjugation induces an involutive isometry $c_X\: L_X \to L_X$
taking $h_X$ to $-h_X$, preserving $\omega_X$ as a subspace and reversing its orientation.
In particular, it follows that the positive inertia index of the skew-invariant eigenlattice of $c_X$
equals $2$.
Consider an involutive isometry $c\: \bold L \to \bold L$ and denote by $L_{\pm c}$
its ($\pm 1$)-eigen\-lattices.
The involution $c$ is called \emph{geometric} if $h \in L_{-c}$ and $\sigma _+ L_{-c} = 2$.
As explained above, a marking of a nonsingular real quartic $X \subset \Cp3$ takes $c_X$ to a geometric involution
on $\bold L$.
This involution
is called
the \emph{homological type} of $X$; it is determined by $X$ up to
the action of $\OG^+_h(\bold L)$.
Conversely, according to Nikulin \cite{Nikulin:forms}, any geometric involution $c\: \bold L \to \bold L$
is the homological type of a marked nonsingular real quartic,
and the periods of such quartics constitute the whole space
\[
\Omega^\circ \cap \{\R\omega_+ \oplus \R\omega_- \; | \; \omega_\pm \in L_{\pm c} \otimes \R\}. \label{real_periods}
\]
\subsection{Configurations}\label{s.configurations}
Motivated by \autoref{line_classes}, we define
a {\it line} in a $4$-polarized lattice $S$ as a vector $a \in S$
such that $a^2 = -2$ and $a \cdot h = 1$.
The set of all lines in~$S$ is denoted by $\operatorname{Fn}(S)$.
\definition\label{pre-conf}
A {\it pre-configuration} is a $4$-polarized lattice $S$
generated over $\Q$ by its polarization $h$ and all lines $a \in S$.
A pre-configuration $S$ is called {\it hyperbolic} if $\sigma_+(S) = 1$.
A \emph{configuration} is a
nondegenerate
hyperbolic pre-configuration~$S$
that
contains no vector $e$ such that either
\roster
\item\label{ex-div} $e^2 = -2$ and $e \cdot h= 0$, or
\item\label{ell-pencil} $e^2 = 0$ and $e \cdot h = 2$
\endroster
(\cf. Proposition \ref{Saint-Donat}).
For a pre-configuration $(S, h)$ and a subset $A \subset \operatorname{Fn}(S)$,
the notation $\operatorname{span}_h(A)$ stands for
the pre-configuration $S'\subset S$ generated (over~$\Z$) by $A$ and $h$.
\enddefinition
\remark\label{subconf}
Let $S$ be a nondegenerate hyperbolic pre-configuration. Then
\roster*
\item
$S$ contains finitely many lines, and
\item
any pre-configuration $S' \subset S$ is also
nondegenerate and hyperbolic.
\endroster
In particular, if $S$ is a configuration, then so is~$S'$.
\endremark
Let
$L \subset \bold L$ be a nondegenerate primitive polarized sublattice.
An {\it $L$-configuration}
is a
configuration $S\subset L$ \emph{primitive} in~$L$.
Two $L$-configurations $S',S''\subset L$
are said to be \emph{isomorphic}, or \emph{strictly isomorphic},
if
there exists an element of the group $\OG^+_h(\bold L,L)$ sending~$S'$ to~$S''$.
An {\it $L$-realization}
of a pre-configuration $S$ is a polarized isometry $\psi\: S \to L$
such that the image $\Im\psi$ is non-degenerate, \ie,
$\Ker\psi = \ker S$.
If the primitive hull $(\Im(\psi) \otimes \Q) \cap L$ is an
$L$-configuration,
the realization~$\psi$ is called \emph{geometric}.
A configuration admitting
a primitive geometric $L$-realization is called
\emph{$L$-geometric}
(or just \emph{geometric} if $L=\bold L$).
Note that there is a subtle difference between
$\bold L$-configurations and geometric ones: typically,
the former are considered up to
the action of $\OG_h^+(\bold L)$,
whereas the latter, up to abstract automorphisms of polarized lattices
(\cf. \autoref{lem.unique}).
To
simplify the classification of configurations,
we introduce also the notion of weak
isomorphism. Namely, two $\bold L$-configurations are said to be \emph{weakly
isomorphic} if they are taken to each other by an element of the group
$\OG_h(\bold L)$; in other words, we disregard the positive sign structure
on~$\bold L$.
Respectively, an $\bold L$-configuration $S\subset\bold L$
is called \emph{symmetric}
if it is preserved by an element $a \in \OG_h(\bold L) \sminus \OG^+_h(\bold L)$;
if such an element $a$ can be chosen involutive (respectively, involutive and identical on $S$),
the configuration $S$ is called \emph{reflexive} (respectively, \emph{totally reflexive}).
Putting $c = -a$, one concludes that $S$ is totally reflexive if and only if $S \subset L_{-c}$
for some geometric involution $c$. It is also clear that each weak
isomorphism class consists of one or two strict isomorphism classes,
depending on whether the configurations are symmetric or not, respectively.
\lemma\label{totally_reflexive}
An $\bold L$-configuration $S$ is totally reflexive if and only if the orthogonal complement $S^\perp$
contains either $[2]$ or $\bU(2)$.
\endlemma
\proof
We use the classification of geometric involutions found in \cite{Nikulin:forms}.
On the one hand, any sublattice isomorphic to $[2]$ or $\bU(2)$ in $h^\perp \subset \bold L$
is of the form $L_{+c}$ for some geometric involution $c$.
On the other hand, for any geometric involution $c$ the sublattice $L_{-c}$ is totally reflexive.
\endproof
\subsection{The arithmetical reduction}\label{arithmetical_reduction}
Let $X \subset \PP^3$ be a nonsingular quartic surface.
Choosing a marking
$\psi\: L_X \to \bold L$,
we obtain an $\bold L$-configuration $\psi(\Cal F(X))$
(see Proposition \ref{Saint-Donat}).
Since any two markings differ by an element of $\OG^+_h(\bold L)$,
the surface $X$ gives rise to a well-defined isomorphism class
$[\Cal F(X)]$ of $\bold L$-configurations.
Two nonsingular quartics $X_0$ and $X_1$ in $\Cp3$
are said to be \emph{equilinear deformation equivalent}
if there exists a path $X_t$, $t \in [0,1]$, in the space of nonsingular quartics
such that the number of lines in $X_t$ remains constant.
\theorem\label{th.complex}
The map $X \mapsto [\Cal F(X)]$
establishes
a bijection between the set of equilinear deformation
classes of nonsingular quartics in $\PP^3$
and that of strict isomorphism classes of
$\bold L$-configurations.
\endtheorem
\proof
For the surjectivity, we choose a period $\omega \in \Omega^\circ$
so that $\omega^\perp \cap \bold L$
represents the chosen class of $\bold L$-configurations
and apply \autoref{Saint-Donat} and \autoref{line_classes}.
For the injectivity, we prove a stronger statement, \viz. the connectedness
of the space $\Omega'(S)$
of marked nonsingular quartics whose lines are taken by the marking
to the lines of a fixed $\bold L$-configuration $S \subset \bold L$.
To this end, consider the spaces
\[*
\Omega(S)=\{\Bbb Go\in\Omega\,|\,S\subset\Bbb Go^\perp\},\frak quad
\Omega^\circ(S)=\Omega(S)\cap\Omega^\circ.
\]
By \autoref{moduli}, the latter is a fine moduli space of marked nonsingular
quartics $(X,\psi)$ such that $\psi(\operatorname{Pic} X)\supset S$; hence, by
\autoref{line_classes}, the space $\Omega'(S)$ is obtained from $\Omega^\circ(S)$ by
removing the union of the subspaces $\Omega_e$, where
\roster[3]
\item\label{ex-line1}
$e\in\bold L\sminus S$ is such that $e^2=-2$ and $e\cdot h=1$.
\endroster
In other words, $\Omega'(S)$ is obtained from a connected (in a sense, convex)
manifold $\Omega(S)$ by removing the codimension~$2$ subspaces $\Omega_e$
with $e$ as in \autoref{Saint-Donat}\iref{ex-div1}, \iref{ell-pencil1} or as
in~\iref{ex-line1} above. This family of subspaces is obviously locally
finite, and this fact implies the connectedness of the complement.
\endproof
\proposition\label{symmetric-reflexive}
Let $S$ be an $\bold L$-configuration, and
denote by $\Cal X$ the equilinear deformation class
corresponding to $S$ under the bijection of \autoref{th.complex}.
Then\rom:
\roster*
\item $\Cal X$ is invariant under the complex conjugation if and only if $S$ is symmetric\rom;
\item $\Cal X$ contains a real quartic if and only if $S$ is reflexive.
\endroster
\endproposition
\proof
Since $\omega_{\bar X}$ is $\omega_X$ with the orientation reversed,
the statement follows from the description of the moduli space $\Omega'(S)$
given in the proof of \autoref{th.complex}.
\endproof
A nonsingular quartic $X \subset \PP^3$ is called \emph{$\Cal F$-maximal}
if $\rank\Cal F(X) = 20$.
\addendum\label{app.complex}
The map $X \mapsto [\Cal F(X)]$ establishes
a bijection between the set of projective equivalence classes of $\Cal F$-maximal quartics in $\PP^3$
and that of isomorphism classes of
$\bold L$-configurations of rank $20$.
\endaddendum
\proof
Such quartics have maximal Picard rank, and for $S \subset \bold L$ of rank $20$,
the moduli space $\Omega'(S)/\!\PGL(4, \C)$ (\cf. the proof of \autoref{th.complex})
is discrete.
\endproof
Now, consider a nonsingular real quartic $X \subset \PP^3$ of
a certain homological type $c\: \bold L \to \bold L$.
The real structure on $X$ reverses the orientation of any real algebraic curve $C \subset X$,
thus reversing the class $[C] \in L_X$.
Hence, as above, considering real lines only,
we can define the \emph{real Fano configuration} $\Cal F_\R(X)$ and
the isomorphism class
$[\Cal F_\R(X)]$ of $L_{-c}$-configurations.
The following statements are straightforward, \cf. \eqref{real_periods}.
\theorem\label{th.real}
The real Fano configuration
of a nonsingular real quartic $X \subset \PP^3$
of homological type
$c\: \bold L \to \bold L$
is $L_{-c}$-geometric.
Conversely, any isomorphism class of
$L_{-c}$-configurations
is of the form $[\Cal F_\R(X)]$ for some nonsingular real quartic $X \subset \PP^3$
of homological type~$c$.
\done
\endtheorem
\corollary\label{cor.real}
An $\bold L$-configuration~$S$ is in the class $[\Cal F_\R(X)]$
for some nonsingular real quartic $X \subset \PP^3$
if and only if
$S$ is totally reflexive.
\done
\endcorollary
A nonsingular real quartic $X$ is called \emph{$\Cal F_\R$-maximal}
if $\rank\Cal F_\R(X) = 20$. Even though we do not study equivariant
equilinear deformations
of
real quartics, in the case of the maximal Picard rank, where the moduli
spaces are discrete,
we still have projective equivalence; the precise statement is as follows.
\addendum\label{app.real}
The map $X \mapsto [\Cal F_\R(X)]$ establishes
a bijection between the set of
real
projective equivalence classes of $\Cal F_\R$-maximal real quartics in $\PP^3$
of a given homological type $c\: \bold L \to \bold L$
and that of isomorphism classes of
$L_{-c}$-configurations of rank~$20$.
\done
\endaddendum
\frak Sion{Geometry of configurations}\label{S.geometry}
In this section, we study the simplest properties of configurations, \viz.
those with a simple geometric interpretation. Most statements hold
without the assumption that the configuration should be geometric.
\subsection{Planes}\label{s.planes}
Fix a configuration~$S$ and denote by $h \in S$
its polarization.
\lemma\label{matrix}
For any two distinct lines $a_1,a_2\in S$ one has
$a_1 \cdot a_2 = 0$ or $1$.
\endlemma
\proof
Let $a_1 \cdot a_2 = x$, and consider the subconfiguration
$S':=\operatorname{span}_h(a_1,a_2)$ (see \autoref{subconf}).
From $\det S' > 0$, one has $-1 \le x \le 2$.
If $x = -1$, then $a_1 - a_2$ is as in \autoref{pre-conf}\iref{ex-div};
if $x = 2$, then $a_1 + a_2$ is as in \autoref{pre-conf}\iref{ell-pencil}.
\endproof
Two distinct lines $a_1,a_2\in S$
are said to \emph{intersect} (respectively, to be \emph{disjoint}, or
\emph{skew}) if $a_1 \cdot a_2 = 1$
(respectively, $a_1 \cdot a_2 = 0$).
We regard the set of lines $\operatorname{Fn}(S)$ as a graph, with a pair of lines
(regarded as vertices)
connected by an edge if and only if the lines intersect.
A \emph{subgraph} of $\operatorname{Fn}(S)$ is always assumed induced.
A \emph{plane} in a configuration~$S$ is a collection
$\{a_1, a_2, a_3, a_4\} \subset S$ of four pairwise intersecting lines.
\lemma\label{plane}
For any plane $\{a_1, a_2, a_3, a_4\}\subset S$ one has
$a_1 + a_2 + a_3 + a_4 = h$.
\endlemma
\proof
The difference $h - (a_1 + a_2 + a_3 + a_4)$ is in the kernel
of $\operatorname{span}_h(a_1,a_2,a_3,a_4)$;
hence, this difference is zero, see \autoref{subconf}.
\endproof
\corollary[of Lemmas \ref{matrix} and \ref{plane}]\label{cor-plane}
Let $\alpha = \{a_1, a_2, a_3, a_4\} \subset S$ be
a plane and $b \in S$ a line not contained in $\alpha$.
Then $b$ intersects exactly one line of $\alpha$.
\done
\endcorollary
The \emph{valency} $\operatorname{val} l$ of a line $l\in S$
is the number of lines in $S$ that intersect $l$.
\corollary[of \autoref{cor-plane}]\label{cor-Segre}
For any plane $\alpha = \{a_1, a_2, a_3, a_4\} \subset S$,
one has
\[*
\ls| \operatorname{Fn}(S) | = \operatorname{val} a_1 + \operatorname{val} a_2 + \operatorname{val} a_3 + \operatorname{val} a_4 - 8.
\]
\endcorollary
\lemma\label{two-planes}
Let $a_1$, $a_2 \in S$ be two intersecting lines, and
assume that there is a line $b_1 \in S$ that intersects both $a_1$ and $a_2$.
Then, there exists exactly one other line $b_2 \in S$ intersecting $a_1$ and $a_2$.
Furthermore, the lines $a_1,a_2,b_1,b_2$ form a plane.
As a consequence, if two planes $\alpha_1$, $\alpha_2 \subset S$
share two lines,
then $\alpha_1=\alpha_2$.
\endlemma
\proof
For the existence, let $b_2 = h - (a_1 + a_2 + b_1)$ (\cf. \autoref{plane}).
For the uniqueness, consider a line $c$ as in the statement.
If $b_1 \cdot c = 0$, then the difference $h - (a_1 + a_2 + b_1 + c)$
is as in \autoref{pre-conf}\iref{ex-div}.
Otherwise, one has $b_1 \cdot c = 1$ by \autoref{matrix},
and $\{a_1,a_2,b_1,c\}$ is a plane.
Hence, $c = b_2$ by \autoref{plane}.
\endproof
If two distinct lines lie in a (unique) plane $\Bbb Ga\subset S$, they are said to
\emph{span}~$\Bbb Ga$.
\subsection{Skew lines}\label{s.skew}
We keep the notation $(S,h)$ from the previous section.
The next lemma states some properties of skew lines.
\lemma\label{lem.skew}
Consider a number of lines $a_1,\ldots,a_m,b_1,\ldots,b_n\in S$ such that
all $a_i$ are pairwise disjoint, all $b_j$ are pairwise distinct, and
$a_i\cdot b_j=1$ for all $i=1,\ldots,m$, $j=1,\ldots,n$.
Then the following holds\rom:
\roster
\item\label{skew.plane}
if $m\ge2$, then all lines $b_j$ are pairwise disjoint\rom;
\item\label{skew.2}
if $m=2$, then $n\le10$\rom;
if $n=9$, then there exists a unique other line $b_{10}$ such that
$a_i\cdot b_{10}=1$ for $i=1,2$\rom;
\cf. also \autoref{cor.10.fibers} below\rom;
\item\label{skew.4}
if $m=4$, then $n\le4$\rom;
if $n=3$, then there exists a unique other line $b_{4}$ such that
$a_i\cdot b_{4}=1$ for $i=1,2,3$\rom; for this line, also
$a_4\cdot b_4=1$\rom;
\item\label{skew.4'}
if $m=n=4$, then any other line $c\in S$ intersects exactly two of the
given lines $a_1,\ldots,a_4$, $b_1,\ldots,b_4$\rom;
\item\label{skew.5}
if $m\ge3$, then $n\le4$\rom; if $m\ge5$, then $n\le2$.
\endroster
\endlemma
\proof
Item~\iref{skew.plane} is a partial restatement of \autoref{two-planes}.
The next two statements are proved similarly, with
\[*
b_{10}=4h-3(a_1+a_2)-(b_1+\ldots+b_9)
\]
in item~\iref{skew.2} and
\[*
b_4=2h-(a_1+\ldots+a_4+b_1+b_2+b_3)
\]
in item~\iref{skew.4}.
In the latter case, if $a_4\cdot b_4$ were~$0$, the vector
$a_1+\ldots+b_4-2h$ would be as in \autoref{pre-conf}\iref{ex-div}.
The expression for $b_4$ proves also item~\iref{skew.4'}, and
item~\iref{skew.5} is a simple consequence of item~\iref{skew.4}.
\endproof
Recall that our ultimate goal is the study of the configuration~$S$ of lines
in a nonsingular quartic surface~$X$.
From this perspective, as the name suggests, a plane is the subconfiguration
cut on~$X$ by a plane in~$\Cp3$, provided that the intersection splits
completely into components of degree one. A collection
$a_1,\ldots,a_4,b_1,\ldots,b_4$ as in \autoref{lem.skew}\iref{skew.4}
and~\iref{skew.4'} can similarly be interpreted as the intersection of~$X$
with a quadric (the lines~$a_i$ and~$b_j$ lying in the two distinct families of
generatrices),
and a subconfiguration as in
\autoref{lem.skew}\iref{skew.2} is (probably, a special case of) the
intersection of~$X$ with another quartic.
The following lemma, not used in the paper, is in the same spirit: it
describes the intersection of~$X$ with a cubic.
For the statement, define a \emph{double sextuple} as a collection of lines
$a_1,\ldots,a_6,b_1,\ldots,b_6$ in a configuration~$S$ intersecting as
follows:
\[
a_i\cdot b_j=1-\delta_{ij}
\label{eq.double.6}
\]
(where $\Bbb Gd_{ij}$ is the Kronecker symbol).
\lemma\label{lem.cubic}
Let $A':=\{a_1,\ldots,a_6,b_1,\ldots,b_5\}\subset S$ be a collection of lines
which
satisfy~\eqref{eq.double.6}.
Then there is a unique line $b_6\in S$ completing $A'$ to a double
sextuple $A$.
Furthermore,
all elements of~$A$ are pairwise distinct,
the lines~$a_i$ are pairwise disjoint,
the lines $b_j$ are pairwise disjoint, and
any other line $c\in S$
intersects exactly three elements of $A$.
\endlemma
\proof
The twelfth line is
\[*
b_6=3h-(a_1+\ldots+a_6+b_1+\ldots+b_5),
\]
and the other statements are immediate, \cf. the proof of \autoref{lem.skew}.
\endproof
\subsection{Pencils}\label{s.pencils}
Let $X \subset \Cp3$ be a nonsingular quartic such that
$\rank\Cal F(X)\ge2$. Fix a line $l \subset X$. The pencil of planes through $l$ gives rise to
an elliptic pencil $X \to \Cp1$. Each fiber containing a line is
reducible:
it splits either into three lines
or a line and a conic;
in the former case, the three lines and~$l$ form a plane in~$\Cal F(X)$.
Clearly, the lines in $X$ contained in the fibers of the pencil defined by $l$
are precisely those intersecting $l$.
Motivated by this observation,
we define a \emph{pencil} $\Cal P$ in a
configuration $(S, h)$ as a set of lines
satisfying the following properties:
\roster*
\item
all lines in $\Cal P$ intersect a given line $l$, called the
\emph{axis} of $\Cal P$;
\item
if $a_1$, $a_2 \in \Cal P$ and $a_1 \cdot a_2 = 1$, then
$h-l-a_1-a_2 \in \Cal P$ (\cf. \autoref{plane}).
\endroster
\autoref{two-planes} implies that
\[*
\text{$a \sim b$ if $a = b$ or $a \cdot b = 1$}
\]
is an equivalence relation on $\Cal P$. The equivalence classes are called
the \emph{fibers} of $\Cal P$. The number $m$ of lines in a fiber may take
values $3$ or $1$; a fiber consisting of $m$ lines is called an
\emph{$m$-fiber}, and the number of such fibers is denoted by
$\operatorname\#_m(\Cal P)$.
By \autoref{cor-plane}, $\Cal P$ has a unique axis whenever
$\operatorname\#_3(\Cal P)\ge1$ and $\operatorname\#_3(\Cal P)+\operatorname\#_1(\Cal P)\ge2$.
Each line $l\in S$ gives rise to a well-defined pencil
\[*
\Cal P(l):=\{a\in\operatorname{Fn} S\,|\,a\cdot l=1\};
\]
such a pencil is called \emph{maximal}.
Any line $a \in S$ disjoint
from $l$ is called a \emph{section} of $\Cal P(l)$ or any subpencil thereof.
The set of sections of~$\Cal P$ depends on the ambient
(pre-)configuration~$S$; it
is denoted by $S(\Cal P)$. By definition,
\[*
S(\Cal P)=\{a\in\operatorname{Fn}(S)\,|\,a\cdot l=0\}.
\]
Clearly, for any line~$l\in S$,
one has
\[*
\operatorname{val} l=\ls|\Cal P(l)|=3\operatorname\#_3(\Cal P(l))+\operatorname\#_1(\Cal P(l)).
\]
The number $\operatorname{mult} l:=\operatorname\#_3(\Cal P(l))$
is called the \emph{multiplicity}
of~$l$. Alternatively,
$\operatorname{mult} l$ is the number of distinct planes containing~$l$.
Two pencils $\Cal P_1$, $\Cal P_2$ are called \emph{obverse} if their axes
are disjoint; otherwise, the pencils are called \emph{adjacent}.
The following lemma is an immediate consequence of Lemmas~\ref{two-planes}
and \ref{lem.skew}\iref{skew.2}.
\lemma\label{lem.2-pencils}
Let $\Cal P_1\ne\Cal P_2$ be two pencils. Then
\roster
\item
$\ls|\Cal P_1\cap\Cal P_2|\le10$ if $\Cal P_1$, $\Cal P_2$ are obverse,
and
\item
$\ls|\Cal P_1\cap\Cal P_2|\le2$ if $\Cal P_1$, $\Cal P_2$ are adjacent.
\done
\endroster
\endlemma
\subsection{Combinatorial invariants}\label{s.invariants}
A pencil~$\Cal P$ is often said to be of \emph{type~$(p,q)$},
where $p:=\operatorname\#_3(\Cal P)$ and $q:=\operatorname\#_1(\Cal P)$. If an $\bold L$-realization~$\psi$
is fixed, the pencil is called \emph{primitive} or \emph{imprimitive} if so is
the sublattice $\operatorname{span}_h\psi(\Cal P)\subset\bold L$.
In this case, the type is further
refined to $(p,q)^\bullet$ and $(p,q)^\circ$, respectively.
A geometric configuration containing a maximal pencil~$\Cal P$
of type $(p,q)^*$ is
called a \emph{$(p,q)^*$-configuration}, and the pair $(S,\Cal P)$ is called
a \emph{$(p,q)^*$-pair}.
The multiset
\[*
\operatorname{\frak{ps}}(S):=\bigl\{\text{type of $\Cal P(l)$}\bigm|l\in\operatorname{Fn}(S)\bigr\}
\]
is called the \emph{pencil structure} of a configuration~$S$.
We usually represent $\operatorname{\frak{ps}}(S)$
in the partition notation (see, \eg, \autoref{obs.pencils} below):
a ``factor'' $(p,q)^a$ means that $S$ has $a$ pencils of type $(p,q)$.
The \emph{linking type} $\operatorname{lk}(\Cal P_1,\Cal P_2)$ of a pair of obverse
pencils is the pair $(\mu_1,\mu_3)$, where
$\mu_1:=\ls|\Cal P_1\cap\Cal P_2|$ and $\mu_3$ is the number of lines in
$\Cal P_1\cap\Cal P_2$ that belong to a $3$-fiber both in~$\Cal P_1$
and~$\Cal P_2$.
If $\Cal P_i=\Cal P(l_i)$, $i=1,2$, we also use the notation
$\operatorname{lk}(l_1,l_2)$.
The multiset
\[*
\bold Ls(S):=\bigl\{\operatorname{lk}(l_1,l_2)\bigm|
l_1,l_2\in\operatorname{Fn}(S),\ l_1\cdot l_2=0\bigr\}
\]
is called the \emph{linking structure} of~$S$.
Clearly, both $\operatorname{\frak{ps}}(S)$ and $\bold Ls(S)$ are invariant under isomorphisms.
\frak Sion{The arithmetics of pencils}\label{S.arithm}
In this section, we study the more subtle properties of geometric
configurations related to their primitive embeddings to~$\bold L$.
\subsection{Notation and setup}\label{s.setup}
Throughout this section, we consider a pencil~$\Cal P$ of a certain
type~$(p,q)$.
Thus, we have the sets $\operatorname{fb}_3\Cal P=\{1,\ldots,p\}$ and
$\operatorname{fb}_1\Cal P=\{1,\ldots,q\}$ of the $3$- and $1$-fibers
of~$\Cal P$, respectively, and the full set
$\operatorname{fb}\Cal P:=\operatorname{fb}_3\Cal P\sqcup\operatorname{fb}_1\Cal P$ of fibers
is their disjoint
union.
We regard~$\Cal P$ as a pencil in the ``minimal'' configuration $P:=P_{p,q}$,
which is
generated over~$\Z$ by $\Cal P$ itself, the axis~$l$, and the
polarization~$h$. We also keep in mind a geometric realization
$\psi\:P\to\bold L$, identifying $\Cal P$ and~$P$ with their images in~$\bold L$ and
denoting by~$\smash{\tilde P}$ the primitive hull
$(P\otimes\Q)\cap\bold L$.
When speaking about sections of~$\Cal P$, we assume $\Cal P$ embedded to a
configuration $S$, which is usually not specified.
(One can consider the minimal configuration generated by~$P$ and the
sections in question.) However, {\em we always assume that the realization
of~$P$ extends to a geometric realization $S\to\bold L$.}
The group of symmetries of $\Cal P$ is obviously
\[*
\Bbb G_{p,q}:=(\SG3^p\rtimes\SG{p})\times\SG{q}.
\]
In addition to~$h$ and~$l$,
consider the following classes in $P_{p,q}$:
\roster*
\item
$m_{i,j}$, $i\in\operatorname{fb}_3\Cal P$, $j\in\CG3$, the lines in the $3$-fibers;
\item
$n_k$, $k\in\operatorname{fb}_1\Cal P$, the lines in the $1$-fibers.
\endroster
Then $P_{p,q}$ is the hyperbolic lattice
freely generated by $h$, $l$, $m_{i,j}$, $i\in\operatorname{fb}_3\Cal P$,
$j=\pm1$, and $n_k$, $k\in\operatorname{fb}_1\Cal P$.
For the lines $m_{i,\pm1}$, we will also use the shortcut $m_{i.\pm}$.
\observation\label{obs.3-torsion}
One has $\det P_{p,q}=-3^{p+2}(-2)^q$.
The $3$-primary part $\discr_3P_{p,q}$ contains the classes
represented by the following mutually orthogonal vectors:
\roster*
\item
$\Bbb Gl:=\frac13(l-h)$: one has $\Bbb Gl^2=0$ and $\Bbb Gl\cdot h=\Bbb Gl\cdot l=-1$;
\item
$\Bbb Gm_i=\Bbb Gm_{i,0}:=\frac13(m_{i,+}-m_{i,-})$, $i\in\operatorname{fb}_3\Cal P$:
one has $\Bbb Gm_i^2=-\frac23$ and $\Bbb Gm_i\cdot h=0$.
\endroster
If $r:=p+q-1\ne0\bmod3$, then $\discr_3P_{p,q}$ is generated by $\Bbb Gm_i$,
$i\in\operatorname{fb}_3\Cal P$, and
the order~$9$ class of the vector
\roster*
\item
$\Bbb Gu:=\frac13\bigl(l-r\Bbb Gl-\sum_{k=1}^qn_k\bigr)$;
note that $3\Bbb Gu=-r\Bbb Gl\ne0\bmod P$.
\endroster
Hence, in this case the subgroup of elements of order~$3$ is
generated by~$\Bbb Gl$ and~$\Bbb Gm_i$.
If $p+q=1\bmod3$, then $\discr_3P_{p,q}$ is generated by $\Bbb Gl$, $\Bbb Gm_i$,
and the order~$3$ class of
\roster*
\item
$\Bbb Go:=\frac13\bigl(l+\sum_{i=1}^p(m_{i,+}+m_{i,-})-\sum_{k=1}^qn_k\bigr)$.
\endroster
The $2$-primary part $\discr_2P_{p,q}$
is generated by the classes of $3\Bbb Gn_k$, where
\roster*
\item
$\Bbb Gn_k:=n_k^*=-\frac12(\Bbb Gl+n_k)$, $k\in\operatorname{fb}_1\Cal P$:
one has $\Bbb Gn_k^2=-\frac12$ and $\Bbb Gn_k\cdot h=0$.
\endroster
The class $\Bbb Gm_i\in\discr P_{p,q}$ is also represented by the vector
$\bar\Bbb Gm_i^+:=\frac13(m_{i,+}+2m_{i,-})$, so that
one has $\bar\Bbb Gm_i^2=-\frac23$ and
$\bar\Bbb Gm_i\cdot h=1$.
The class $-\Bbb Gm_i\in\discr P_{p,q}$ is also represented by
$\bar\Bbb Gm_i^-:=\frac13(2m_{i,+}+m_{i,-})$.
For any line $a\in\Cal P$, the class $\Bbb Gl$ is represented by the vector
$\Bbb Gl+a\in h^\perp$, so that one has $(\Bbb Gl+a)^2=-2$.
\endobservation
The following two statements are immediate.
\lemma\label{lem.sum.3}
For any triple of distinct indices $i,j,k\in\operatorname{fb}_3\Cal P$ and any $u\in\CG3$, the
classes $\pm\Bbb Gl$ and $u\Bbb Gl\pm\Bbb Gm_i\pm\Bbb Gm_j\pm\Bbb Gm_k$ are represented by vectors of
square~$(-2)$ in $h^\perp\subset P_{p,q}$.
Hence, these classes cannot belong to the pivot~$\smash{\tilde P}/P$.
\done
\endlemma
\lemma\label{lem.sum.4}
The sum of any four distinct elements of the form $3\Bbb Gn_k$, $k\in\operatorname{fb}_1\Cal P$,
is represented by a vector of
square~$(-2)$ in $h^\perp\subset P_{p,q}$.
Hence, the class of such a sum cannot belong to the pivot~$\smash{\tilde P}/P$.
\done
\endlemma
\subsection{Euler's bound}\label{s.large}
We start with eliminating very large pencils.
\proposition\label{prop.Euler}
The type $(p,q)$ of a pencil
contained in a geometric
configuration
satisfies the inequalities
\[*
3p+2q\le24*uad\text{and}*uad 3p+q\le20.
\]
\endproposition
\corollary[\cf. Rams, Sch\"{u}tt~\cite{rams:2012}]\label{cor.le20}
The valency of any line~$l$ in a geometric configuration~$S$
does not exceed $20$.
\done
\endcorollary
In the real case, there is an additional restriction to the types of pencils.
\proposition\label{prop.5-2.real}
A pencil~$\Cal P$ contained in a totally reflexive geometric
configuration cannot be of type~$(6,0)^\bullet$
or~$(5,q)$, $q\ge2$.
\endproposition
\proof[Proof of Propositions~\ref{prop.Euler} and~\ref{prop.5-2.real}]
Assume that $(p,q)=(7,0)$.
By \autoref{obs.3-torsion}, the isotropic elements in $\discr_3P_{7,0}$ are:
\roster
\item\label{i.(7,0).3}
the classes mentioned in \autoref{lem.sum.3};
\item\label{i.(7,0).6}
classes of the form $u\Bbb Gl+\sum_{i\in I}\pm\Bbb Gm_i$, where $u\in\CG3$ and
$I\subset\operatorname{fb}_3\Cal P$, $\ls|I|=6$;
all these classes form a single orbit of $\Bbb G_{7,0}$;
\item\label{i.(7,0).any}
classes of the form (up to sign) $\Bbb Go+u\Bbb Gl-\sum_{i\in I}\pm\Bbb Gm_i$, where
$I\subset\operatorname{fb}_3\Cal P$ is any subset and $u=(5-\ls|I|)\bmod3$.
\endroster
Each class as in \autoref{i.(7,0).any} is represented by a vector of
square~$(-2)$ orthogonal to~$h$, \viz.
$\Bbb Go+(5-\ls|I|)\Bbb Gl-\sum_{i\in I}\bar\Bbb Gm_i^\pm$.
Hence, neither~\iref{i.(7,0).3} nor~\iref{i.(7,0).any} can belong to
the pivot $\smash{\tilde P}/P$.
On the other hand, by \autoref{th.Nikulin}, one has $\ell_3(\smash{\tilde P}/P)\ge2$
and $\smash{\tilde P}/P$ must contain two distinct nontrivial orthogonal vectors
$\Bbb Gb_1$, $\Bbb Gb_2$ as
in~\iref{i.(7,0).6}.
On the other hand,
if both vectors are as in~\iref{i.(7,0).6}, then at least one of their linear
combinations is as in~\iref{i.(7,0).3}, \cf.~\cite{degt:Shapiro}.
Similar arguments apply to the other border cases: by \autoref{th.Nikulin},
one has
\roster*
\item
$\ell_3(\smash{\tilde P}/P)\ge1$ if $(p,q)=(5,4)$ (use \autoref{lem.sum.3}),
\item
$\ell_2(\smash{\tilde P}/P)\ge1$ if $(p,q)=(3,8)$,
\item
$\ell_2(\smash{\tilde P}/P)\ge2$ if $(p,q)=(1,11)$ (use \autoref{lem.sum.4}), and
\item
$\ell_2(\smash{\tilde P}/P)\ge3$ if $(p,q)=(0,13)$ (use \autoref{lem.sum.4}).
\endroster
In the case $(p,q)=(3,8)$, the only isotropic element allowed by
\autoref{lem.sum.4} is the characteristic element $\Bbb Gn:=\sum_{k=1}^8\Bbb Gn_k$.
The discriminant form $\Bbb Gn^\perp/\Bbb Gn$ is even, and the new lattice does not
embed to~$\bold L$ by \autoref{th.Nikulin}.
For \autoref{prop.5-2.real}, one
uses \autoref{obs.3-torsion} and \autoref{th.Nikulin}; the latter should
be applied to either $P\oplus[2]$ or an appropriate finite
index extension of $P\oplus[2]$ or $P\oplus\bU(2)$, see
\autoref{totally_reflexive}.
\endproof
The conclusion of \autoref{prop.Euler} can be recast as follows:
for any line~$l$ in a geometric configuration~$S$, one
has $\operatorname{val} l\le20$ and $\operatorname{mult} l\le6$; furthermore,
\[
\def\phantom{0}{\phantom{0}}
\aligned
\text{if $\operatorname{mult} l$}&\le\phantom{0}0,\,\phantom{0}1,\,\phantom{0}2,\,\phantom{0}3,\,\phantom{0}4,\,\phantom{0}5,\,\phantom{0}6={\max},\\
\text{then $\operatorname{val} l$}&\le12,\,13,\,15,\,16,\,18,\,18,\,20={\max},\text{ respectively}.
\label{tablichka}
\endaligned
\]
It follows from~\eqref{tablichka} that $\max\{\operatorname{val} l\,|\,l\in\operatorname{Fn}(S)\}\le18$
if and only if $S$ does not contain a pencil of type $(6,q)$, $q\ge1$.
\remark\label{rem.Euler}
Interpreting pencil geometrically as in \autoref{s.pencils},
one can easily see that
the first inequality $3p+2q\le24$ in \autoref{prop.Euler} is nothing but the
well-known bound on the number and types of singular fibers in
an elliptic pencil.
\endremark
\subsection{Coordinates}
Consider a section~$s$ of a pencil~$\Cal P$.
By \autoref{cor-plane}, for each index
$i\in\operatorname{fb}_3\Cal P$, the section~$s$ intersects exactly one of the three lines
$m_{i,j}$, $j\in\CG3$; the corresponding index $\Bbb Ge_i:=j\in\CG3$
is called the \emph{$i$-th $3$-coordinate} of~$s$.
Introduce also the \emph{$k$-th $1$-coordinate} as the residue
$\Bbb Gr_k:=(s\cdot n_k)\bmod2\in\CG2$, $k\in\operatorname{fb}_1\Cal P$.
We will treat the coordinate space
$\Cal C_{p,q}:=(\CG3)^p\times(\CG2)^q$ as an abelian group,
even though
only few linear combinations of coordinate vectors have
invariant meaning.
To avoid confusion with the operations in lattices, we will use $\op$ and
$\om$ for the addition and subtraction in~$\Cal C_{p,q}$, respectively.
\convention\label{conv.coord}
Given sections $s,s_1,s_2,\dots$ of~$\Cal P$ and $u=1,3$,
we will use the following notation:
\roster*
\item
$\Bbb Ge_i:=\Bbb Ge_i(s)$ and $\Bbb Gr_k:=\Bbb Gr_k(s)$ are, respectively, the $3$- and
$1$-coordinates of~$s$;
\item
$[s]$ or $\bar s:=[\Bbb Ge_1,\ldots,\Bbb Ge_p;\Bbb Gr_1,\ldots,\Bbb Gr_q]$ is the sequence of all
coordinates of~$s$;
\item
$\num u(s)$ is the number of non-vanishing $u$-coordinates of~$s$;
\item
$\dif u(s_1,s_2)$ is the number of positions where the $u$-coordinates
of~$s_1$, $s_2$ differ;
\item
$\scom3(s_1\cs s_2\cs\ldots):=\{i\in\operatorname{fb}_3\Cal P\,|\,\Bbb Ge_i(s_1)=\Bbb Ge_i(s_2)=\ldots\}$;
\item
$\scom1(s_1\cs s_2\cs\ldots):=\{k\in\operatorname{fb}_1\Cal P\,|\,\Bbb Gr_k(s_1)=\Bbb Gr_k(s_2)=\ldots=1\}$;
\item
$\scom(\ldots):=\scom3(\ldots)\sqcup\scom1(\ldots)$ (regarded as a set of
fibers of~$\Cal P$);
\item
$\com*(\ldots)$ is the cardinality of the set $\scom*(\ldots)$ for $*=1,3$,
or empty;
\item
$\Bbb I:=\Bbb I_{p,q}=[0,\ldots,0;1,\ldots,1]\in\Cal C_{p,q}$.
\endroster
The same notation applies if all or some of $s$, $s_1$, $s_2$ are elements of
the coordinate space $\Cal C_{p,q}$.
The $3$-coordinates $\Bbb Ge_i(s)$, numbers $\num3(s)$,
and element $\Bbb I\in\Cal C$ depend on the indexing
of the lines in the $3$-fibers;
however, the sets $\scom3(\ldots)$, numbers $\dif3(s_1,s_2)$, and expressions
of the form
\[*
\Bbb I\op\bar s,*uad
\bar s_1\op\bar s_2\op\bar s_3=\Bbb I,*uad\text{or}*uad\bar s_3=\Bbb I\om\bar s_1\om\bar s_2
\]
have invariant meaning.
Note also the difference between the definitions of $\scom3(\ldots)$ and
$\scom1(\ldots)$: in the former case, we count \emph{all} equal coordinates,
whereas
in the latter, only the \emph{non-vanishing} ones.
\endconvention
The following statements are immediate consequences of Lemmas~\ref{two-planes}
and~\ref{lem.skew}.
\lemma\label{lem.s.s=1}
Let $s_1$, $s_2$ be two sections of~$\Cal P$ and $s_1\cdot s_2=1$.
Then
$\com(s_1\cs s_2)\le1$. If $\com(s_1\cs s_2)=1$, then there
is a
section~$s$ satisfying
$\bar s\+\bar s_1\+\bar s_2=\Bbb I$\rom;
the
sections
$s$, $s_1$, $s_2$ and the only line $a\in\Cal P$
intersecting all three of them constitute a plane.
\done
\endlemma
\lemma\label{lem.s.s=0}
Let $s_1$, $s_2$, $s_3$ be distinct sections of~$\Cal P$.
Then\rom:
\roster
\item\label{com.5}
one has $\com(s_1\cs s_2)\le4$\rom;
\item\label{com.4}
if $\com(s_1\cs s_2)=4$, there is a unique
section~$s$ such that
$\bar s\+\bar s_1\+\bar s_2=\Bbb I$\rom;
\item\label{com.3}
if $\com(s_1\cs s_2\cs s_3)=3$, the pencil $\Cal P$ is not maximal.
\done
\endroster
\endlemma
\remark\label{rem.linear.combination}
In Lemmas~\ref{lem.s.s=1} and~\ref{lem.s.s=0}, as well as in the other
similar places below, the existence statement means that $s$ is a certain
(explicit, but not specified)
integral linear combination of the other sections involved and
generators of~$\smash{\tilde P}$.
\endremark
\corollary\label{cor.coord}
If $p\ge5$, then, for any configuration $S\supset P$, the coordinate
map $c\colon S(\Cal P)\to\Cal C_{p,q}$, $s\mapsto[s]$, is injective.
\done
\endcorollary
The injectivity of~$c$ for types $(4,*)$ and $(3,7)$ is
discussed in \autoref{ss.4-q} below.
The next corollary deals with an obverse pencil in a configuration
$S\supset\Cal P$.
\corollary\label{cor.obverse}
Given a section $s_0\in S(\Cal P)$, consider
$s,s_1,s_2\in\Cal P(s_0)\cap S_k(\Cal P)$
and assume that $s_1\cdot s_2=1$. Then\rom:
\roster
\item\label{obverse.<=1}
one has $\com(s\cs s_0)\le1$\rom;
\item\label{obverse.common}
$\scom(s_0\cs s_1)=\scom(s_0\cs s_2)=\scom(s_1\cs s_2)=\scom(s_0\cs s_1\cs s_2)$\rom;
\item\label{obverse.maximal}
if $\Cal P$ is maximal, then $\com(s_1\cs s_2)=1$\rom;
\item\label{obverse.3-fiber}
if $\Cal P$ is maximal, then $s$ is in a $1$-fiber of~$\Cal P(s_0)$ if and only if
$\com(s\cs s_0)=0$.
\endroster
\endcorollary
\proof
Statement~\iref{obverse.<=1} is a paraphrase of \autoref{lem.s.s=1}.
For~\iref{obverse.common} and~\iref{obverse.maximal}, just observe that
$s_0,s_1,s_2$ span a plane, and the forth line~$a$ of this plane must
intersect~$l$, see \autoref{cor-plane}; hence, either $a\in\Cal P$ or
$\Cal P$ is not maximal.
Finally, Statement~\iref{obverse.3-fiber} is a paraphrase
of~\iref{obverse.maximal}.
\endproof
Denote $D:=2p+\frac12q-2$ and, given a collection of sections $s_1,\ldots,s_k$,
let
\[*
r_{ij}:=(s_1\cdot s_2)+\tfrac19D+\tfrac12\com1(s_1\cs s_2)
-\tfrac16(\num1(s_1)+\num1(s_2))-\tfrac13\dif3(s_1,s_2),*uad
1\le i,j\le k,
\]
and define the \emph{determinant}
\[*
\det(s_1,\ldots,s_k):=\det[-r_{ij}]_{1\le i,j\le k}.
\]
The following lemma is a simple sufficient condition for the existence
of a collection of sections in terms of their coordinates
and pairwise intersections:
the orthogonal complement $P^\perp$ in any configuration $S\supset\Cal P$
must be negative definite.
\lemma\label{lem.det}
For any collection $s_1,\ldots,s_k$ of sections one has
$\det(s_1,\ldots,s_k)\ge0$.
If $\det(s_1,\ldots,s_k)=0$, then the sections are linearly dependent.
\done
\endlemma
\subsection{Combinatorial rigidity}\label{s.rigidity}
The group $\Bbb G_{p,q}$ acts on the coordinate space $\Cal C_{p,q}$.
Furthermore, given two configurations $S,S'\supset\Cal P$, any isometry
$(S,\Cal P)\to(S',\Cal P)$
induces an injection $\bar S\into\bar S'$, which is the restriction of an element
of $\Bbb G_{p,q}$. (Here, $\bar S$ and $\bar S'$ are the images of $S(\Cal P)$ and
$S'(\Cal P)$, respectively, under the coordinate map.)
A configuration $S\supset\Cal P$
or, more precisely, pair $(S,\Cal P)$
is called \emph{\rom(combinatorially\rom) rigid} if,
for any configuration $S'\supset\Cal P$, any
bijection $g(\bar S)=\bar S'$ restricted from an element $g\in\Bbb G_{p,q}$
is induced by an isometry
$(S,\Cal P)\to(S',\Cal P)$.
We say that $S$ or $(S,\Cal P)$ is
\emph{generated by a subset $\bar A\subset\bar S$} if
$S=\bigl(\smash{\tilde P}+\sum_{\bar s\in\bar A}\Z s\bigr)/\!\ker$;
if $\bar A=\bar S$, then $S$ is said to be \emph{generated by
sections}.
For such a configuration,
an obvious
sufficient condition for the combinatorial rigidity is that
the intersection $s_1\cdot s_2$ of a pair of sections
$s_1,s_2$ such that $\bar s_1,\bar s_2\in\bar A$
is determined by their images $\bar s_1,\bar s_2$, \ie, for any other
configuration $S'\supset\Cal P$ and pair of sections
$s_1',s_2'\in S'(\Cal P)$ such that $\bar S'=\bar S$ and $\bar s_1'=\bar s_1$,
$\bar s_2'=\bar s_2$, one has $s_1'\cdot s_2'=s_1\cdot s_2$.
By \autoref{lem.s.s=1}, an ambiguity may arise only if $\com(s_1\cs s_2)\le1$.
The following statement is a partial converse of \autoref{lem.s.s=1};
we do not need to assume that the configuration
$S\supset\Cal P$ is geometric.
\lemma\label{lem.com2=1}
Let $p=6$, $(p,q)=(5,3)$, $p=4$ and $q\ge4$, or $(p,q)=(3,7)$.
Consider a
pair of sections $s_1,s_2\in S(\Cal P)$
such that $\com(s_1\cs s_2)=1$.
Then, $\Cal P$ has a pair of sections $s_1',s_2'$ such that
$s_1'\cdot s_2'=1$ and $[s_i']=\bar s_i$, $i=1,2$, if and only if there is a
section~$s$ such that $\bar s\+\bar s_1\+\bar s_2=\Bbb I$.
\endlemma
\proof
The necessity is given by \autoref{lem.s.s=1}. For the converse, it suffices
to show that three sections
$s,s_1,s_2$ as in the statement cannot be pairwise disjoint.
Most such triples are eliminated by
\autoref{lem.det}, and the few remaining ones violate condition~\iref{ex-div}
in \autoref{pre-conf}.
\endproof
\subsection{Primitivity and rigidity for type $(6,*)$}\label{ss.6-q}
Primitive and imprimitive pencils of type $(6,*)$ exhibit very different
behaviour. Here, we start with a few common observations; imprimitive pencils
are treated separately in the next section.
\proposition\label{prop.6-0}
Assume
that $p = 6$. Then the following holds\rom:
\roster
\item\label{6.0_one}
if $\Cal P$ is not maximal or $q \ge 1$, then $\Cal P$ is imprimitive\rom;
\item\label{6.0.pivot}
if $\Cal P$ is imprimitive, then $\smash{\tilde P}/P=\<\Bbb Gb\>$,
$\Bbb Gb:=\sum_{i=1}^6\Bbb Gm_i$, up to automorphism.
\endroster
\endproposition
\proof
The imprimitivity follows from \autoref{th.Nikulin}, and the only possible
nontrivial pivot is given by \autoref{obs.3-torsion} and \autoref{lem.sum.3}.
\endproof
\lemma\label{lem.6.0-sections}
Let $(p,q)=(6,0)$. Consider a geometric configuration $S\supset\Cal P$,
let $\bar S$ be the image of $S(\Cal P)$ under the coordinate map,
and,
for a pair
$s_1,s_2\in S(\Cal P)$, denote
$\bar s:=\Bbb I\om\bar s_1\om\bar s_2\in\Cal C_{6,0}$.
Then the following holds\rom:
\roster
\item\label{i.(6,p).0}
if $\com(s_1\cs s_2)=0$ and
$s_1\cdot s_2=0$, then $\Cal P$ is imprimitive
and $\frac13(s_1-s_2)\in\smash{\tilde P}$\rom;
\item\label{i.(6,p).0-intr}
if $\com(s_1\cs s_2)=0$ or~$3$ and $\bar s\in\bar S$,
then $\Cal P$ is imprimitive\rom;
\item\label{i.(6,p).1-intr}
if $\com(s_1\cs s_2)=1$, then $\bar s\in\bar S$ if and only if
$s_1\cdot s_2=1$\rom;
\item\label{i.(6,p).4}
if $\com(s_1\cs s_2)=4$, then $\bar s\in\bar S$.
\endroster
\endlemma
\proof
Statement~\iref{i.(6,p).0}:
the two vectors are linearly dependent by
\autoref{lem.det}; then $s_1-s_2=\Bbb Gb$ up to automorphism.
Statement~\iref{i.(6,p).0-intr}, $\com(s_1\cs s_2)=0$:
if $\Cal P$ is primitive, then $s\cdot s_1=s\cdot s_2=s_1\cdot s_2=1$ by
Statement~\iref{i.(6,p).0}; hence, the three sections span a plane, and the
forth line of this plane is in $\Cal P(l)\sminus\Cal P$, which contradicts
\autoref{prop.6-0}\iref{6.0_one}.
Statement~\iref{i.(6,p).0-intr}, $\com(s_1\cs s_2)=3$: the imprimitivity of
$\operatorname{span}_h(\Cal P,s_1,s_2,s)$ is given by \autoref{th.Nikulin},
and the enumeration of isotropic elements
not realized by vectors $e$ as in \autoref{pre-conf}\iref{ex-div}
shows that the pivot is generated by~$\Bbb Gb$ (up to isomorphism).
Statements~\iref{i.(6,p).1-intr} and~\iref{i.(6,p).4} follow from
Lemmas~\ref{lem.com2=1}
and~\ref{lem.skew}\iref{skew.4}, respectively.
\endproof
\corollary\label{cor.6-0.combinatorial}
Any $(6,0)^\bullet$-configuration
generated by sections is rigid.
\done
\endcorollary
\subsection{Triplets of sections}\label{s.triplets}
In this section, we study in more detail an imprimitive pencil of
type $(6,0)^\circ$. Thus, we fix a pencil~$\Cal P$ and
number the lines $m_*$ in the fibers so that the pivot $\smash{\tilde P}/P$ is generated
by the element~$\Bbb Gb$ introduced in \autoref{prop.6-0}.
Then, for any section~$s$,
\[
\Bbb Ge_1(s)+\ldots+\Bbb Ge_6(s)=0\bmod3.
\label{eq.6-0.coord}
\]
The group $\OG_h(\smash{\tilde P},l)$ is obviously the subgroup
\[
\tilde\Bbb G:=((\CG3)^5\rtimes\CG2)\rtimes\SG{6}\subset\Bbb G_{6,0};
\label{eq.6-0.group}
\]
indeed, the choice of~$\Bbb Gb$ gives rise to a distinguished cyclic order in
each fiber, which is well defined up to simultaneous reversal.
This group has a distinguished subgroup of order~$3$: it is generated by the
permutations
$\Bbb Gs^{\pm1}\:m_{i,j}\mapsto m_{i,j\pm1}$, $i\in\operatorname{fb}_3\Cal P$, $j\in\CG3$.
A choice of one of these two generators makes $\Cal C_{6,0}$ an
$\Bbb F_3$-affine space.
Consider a configuration $S\supset\smash{\tilde P}$ and let $\bar S\subset\Cal C_{6,0}$
be the image of
$S(\Cal P)$ under the coordinate map.
\lemma\label{lem.triplet}
The set $\bar S$ is $\Bbb Gs$-invariant, \ie, $\bar s_\pm:=\Bbb Gs^{\pm1}\bar s\in\bar S$
whenever $\bar s\in\bar S$. The three sections $\bar s,\bar s_\pm$ are
pairwise disjoint.
\endlemma
\proof
Up to automorphism, one can assume that $\bar s=[1,\ldots,1]$.
Then the two other sections are $s+\Bbb Gb$ and
$s-2h+2l+\sum_{i=1}^6(m_{i,1}+m_{i,-1})-\Bbb Gb$.
\endproof
A subset $\{s,s_\pm\}\subset S(\Cal P)$ or
$\{\bar s,\bar s_\pm\}\subset\bar S$ as in \autoref{lem.triplet} is called a
\emph{triplet}. Two sections $s_1,s_2\in S(\Cal P)$ are said to be
\emph{equivalent}, $s_1\sim s_2$, if they belong to one triplet.
Note that $\com(s_1\cs s_2)=0$ whenever $s_1\sim s_2$.
\lemma\label{lem.6-0.intr}
For a pair of sections $s_1,s_2\in S(\Cal P)$, one has
$s_1\cdot s_2=1$ if and only
if $\com(s_1\cs s_2)\le1$ and $s_1\not\sim s_2$.
\endlemma
\proof
If $\com(s_1\cs s_2)=0$ and $s_1\cdot s_2=0$,
\autoref{lem.6.0-sections}\iref{i.(6,p).0} and the fact that
$\ell_3(\smash{\tilde P}/P)=1$ imply that $s_1\sim s_2$.
If $\com(s_1\cs s_2)=1$, then, using~\eqref{eq.6-0.coord} and
Lemmas~\ref{lem.6.0-sections}\iref{i.(6,p).4} and~\ref{lem.triplet}, one can
easily show that $\Bbb I\om\bar s_1\om\bar s_2\in\bar S$; then, $s_1\cdot s_2=1$ by
\autoref{lem.6.0-sections}\iref{i.(6,p).1-intr}.
\endproof
\corollary\label{cor.6-q.combinatorial}
Any $(6,*)^\circ$-configuration
generated by sections is rigid.
\done
\endcorollary
Note that, for $(6,*)^\circ$-configurations, the rigidity
holds in a very strong sense: the intersection of two
sections is completely determined by their coordinates.
The set of triplets can be coordinatized by the affine space
\[*
\Cal A:=\{\bar s\in\Cal C_{6,0}\,|\,\text{$\bar s$ satisfies~\eqref{eq.6-0.coord}}\}/\Bbb Gs.
\]
In fact, $\Cal A$ is naturally a principal homogeneous space over
the subquotient $\Bbb Gl^\perp\!/\Bbb Gl$ of the discriminant $\discr\smash{\tilde P}$.
Denote by~$\frak q$ the descent of the discriminant form of~$\smash{\tilde P}$
reduced modulo~$\Z$; then, clearly,
\smash{$\frak q(\bar s_1-\bar s_2)=\frac13(\dif(s_1,s_2)\bmod3)$}.
Comparing the orders, one can see that the group $\tilde\Bbb G/\Bbb Gs$ is isomorphic
to the full group $\OG(\frak q)\rtimes\Cal A$ of $\frak q$-isometries of~$\Cal A$.
In other words, any \smash{$\tilde\Bbb G$}-invariant property of a set of
sections $\bar S\subset\Cal C_{6,0}$
satisfying~\eqref{eq.6-0.coord} and \autoref{lem.triplet} can be stated as a
``metric'' (with respect to~$\frak q$) property
of the projection $\bar\Cal S$ of this set to~$\Cal A$.
Below, we state two properties that hold for any configuration~$S$,
not necessarily geometric.
Recall that the lines in $\Bbb Gl^\perp\!/\Bbb Gl$ can be subdivided into
\roster*
\item
$15$ \emph{positive} lines~$\ell^+$ and $15$ \emph{negative} lines~$\ell^-$,
with $q|_{\ell^\pm}\cong\<\pm\frac13\>$, and
\item
$10$ \emph{isotropic} lines $\ell^0$, with $q|_{\ell^0}\equiv0$.
\endroster
The planes in $\Bbb Gl^\perp\!/\Bbb Gl$ can be subdivided into
\roster*
\item
$20$ \emph{positive} planes~$\pi^+$ and $20$ \emph{negative} planes~$\pi^-$,
with $\pi^\pm\cong \ell^\pm\oplus \ell^0$,
\item
$45$ \emph{hyperbolic} planes, isomorphic to $\ell^+\oplus \ell^-$, and
\item
$45$ \emph{definite} planes, isomorphic to
$\ell^+\oplus \ell^+\cong \ell^-\oplus \ell^-$.
\endroster
(There are no isotropic planes.)
The same terminology applies to the lines/planes in~$\Cal A$, according to the
underlying vector space.
The group $\OG(\frak q)$ acts transitively on
the set of lines/planes of the same type.
\lemma\label{lem.convex}
For any configuration $S\supset\smash{\tilde P}$, the set $\bar\Cal S\subset\Cal A$ is ``convex''\rom:
whenever a negative line $\ell^-\subset\Cal A$ has two common points
with~$\bar\Cal S$, it is contained in $\bar\Cal S$.
\endlemma
\lemma\label{lem.plane}
Let $S\supset\smash{\tilde P}$ be a configuration and $\pi^-\subset\Cal A$ a negative plane.
Then
the intersection $\bar\Cal S\cap\pi^-$ is contained in a line\rom;
equivalently, $\pi^-\not\subset\bar\Cal S$.
\endlemma
\proof[Proof of Lemmas~\ref{lem.convex} and~\ref{lem.plane}]
\autoref{lem.convex} is a restatement of
\autoref{lem.6.0-sections}\iref{i.(6,p).4}.
By \autoref{lem.convex}, the two restrictions in \autoref{lem.plane} are
equivalent: $\bar\Cal S\supset \pi^-$ if and only if $S$ contains three
non-collinear points of~$\pi^-$. If this is the case, the points can be chosen to
form an equilateral triangle with side~$-\frac13$; by
\autoref{lem.triplet}, we can find three sections $s_1,s_2,s_3$ so that
$\com(s_i\cs s_j)=1$ for all $i\ne j$ but $\com(s_1\cs s_2\cs s_3)=0$.
Then $s_i\cdot s_j=1$, see
\autoref{lem.6-0.intr}, and the three sections span a plane. This plane must
contain three more lines, \viz. the elements of $\Cal P$ intersecting
the three pairs
$s_i$, $s_j$, $1\le i<j\le3$. This is a contradiction to \autoref{two-planes}.
\endproof
Remarkably, Lemmas~\ref{lem.convex} and~\ref{lem.plane} almost characterize
the sets of sections in configurations (not necessarily geometric) containing
a pencil of type $(6,0)^\circ$: this fact is established experimentally during
the proof of \autoref{th.6-q}. There is but one extra restriction, stated
below without proof.
\lemma\label{lem.two.lines}
Let $S\supset\smash{\tilde P}$ be a configuration
and $\ell',\ell''$ two parallel isotropic lines in a positive plane in $\Cal A$.
If $\ell'\subset\bar\Cal S$ and
$\bar\Cal S$ contains two points of~$\ell''$,
then $\ell''\subset\bar\Cal S$.
\done
\endlemma
\subsection{Primitivity and rigidity for types $(4,*)$ and $(3,*)$}\label{ss.4-q}
As above,
we fix a configuration $S\supset\Cal P$ and denote by
$\bar S\subset\Cal C_{p,q}$
the image of the set of sections $S(\Cal P)$ under the coordinate map.
It follows from
\autoref{obs.3-torsion} and \autoref{lem.sum.3} that
any pencil of type $(4,q)$, $q\le5$, or $(3,q)$, $q\le6$, is primitive.
Below, we consider in detail the two extremal cases.
\proposition\label{prop.4-6}
If $(p,q) = (4, 6)$, then $\frak pencil$ is imprimitive
and has a unique, up to isomorphism, geometric finite index extension.
Furthermore,
\roster
\item\label{4.6-l*}
$\frak pencil$ has a unique section $l^* \in S$ intersecting all ten fibers\rom;
\item\label{4.6-max}
as a consequence, $\Cal P$ is maximal in any
configuration\rom;
\item\label{4.6-plane}
if a section~$s$ intersects~$l^*$, then the lines $s$ and~$l^*$ span a
plane\rom;
\item\label{4.6-minus}
the set $\bar S$
is invariant under the
involution $\bar s\mapsto\bar s\raise3pt\hbox{$\scriptstyle\sqrt{}$}:=\Bbb I\om l^*\om s$.
\endroster
If $(p,q)=(3,7)$, then $\Cal P$ is imprimitive if and only if there is a
section~$l^*$ as in~\iref{4.6-l*} above\rom;
if this is the case, Statements~\iref{4.6-plane} and~\iref{4.6-minus} also
hold.
\endproposition
\proof
Let $(p,q)=(4,6)$.
The pivot $\smash{\tilde P}/P$ must have $3$-torsion by
\autoref{th.Nikulin}, whereas its $2$-torsion is trivial by
\autoref{lem.sum.4}. In addition to
the classes mentioned in \autoref{lem.sum.3},
the isotropic elements in $\discr_3P_{4,6}$ are those
constituting the $\Bbb G_{4,6}$ orbits of the classes of $\pm\Bbb Go$,
see \autoref{obs.3-torsion}.
Hence, up to automorphism, $\smash{\tilde P}/P$ is generated by~$\Bbb Go$,
and it is immediate that $\Bbb Go$ is a section~$l^*$ as in
\iref{4.6-l*}. A section with these properties is unique due to
\autoref{lem.s.s=0}\iref{com.5}.
If $(p,q)=(3,7)$, the only nontrivial elements that may be contained in the
pivot are the orbits of the classes of $\pm(\Bbb Go-\Bbb Gl)$, and $\Bbb Go-\Bbb Gl$ is a
section $l^*$ as in \iref{4.6-l*}.
With the above choice of $l^*$, we have $[l^*]=[0,\ldots,0;1,\ldots,1]$ and
\[
\com(s\cs l^*)=4-3(s\cdot l^*)
\label{eq.l*}
\]
for any other section~$s$. (In particular, this relation restricts the
coordinate vectors realized by sections.) Clearly, $s\cdot l^*=1$
if and only if $\com(s\cs l^*)=1$, in which case $s$ and~$l^*$
intersect a third common line $a\in\Cal P$ and thus span a plane;
in fact, this plane is $\{l^*,a,s,s\raise3pt\hbox{$\scriptstyle\sqrt{}$}\}$.
Statement~\iref{4.6-minus} follows from
\autoref{lem.s.s=1} or
\autoref{lem.s.s=0}\iref{com.4}
if $s\cdot l^*=1$ or $0$, respectively.
\endproof
\proposition\label{prop.3-7}
Let $(p,q)=(3,7)$.
If $\Cal P$ is not maximal, then there is a section~$s$ of~$\Cal P$ such
that $\num2(s)\le6$. Conversely, if there is a section~$s$ such that
$\num2(s)=6$, then $\Cal P$ is not maximal.
\endproposition
\proof
The only pencil~$\Cal P'$ that may properly contain~$\Cal P$ is one of
type~$(4,6)$, and the section~$s$ as in the statement is the restriction
of~$l^*$ given by \autoref{prop.4-6}.
If $\Cal P$ has a section~$s$ such that $\num2(s)=6$, then $s$ and~$l$
intersect nine disjoint lines; by \autoref{lem.skew}\iref{skew.2},
they must intersect a
tenth line.
\endproof
\proposition\label{prop.4-5}
Let $(p,q)=(4,5)$. Then
$\Cal P$ is primitive, and
$\Cal P$ is maximal in a geometric configuration~$S$
if and only
$\num1(s)\le4$ for each section $s\in S(\Cal P)$.
\endproposition
\proof
The primitivity is essentially given by \autoref{obs.3-torsion} and
Lemmas~\ref{lem.sum.3} and~\ref{lem.sum.4}. By
\autoref{lem.skew}\iref{skew.4}, if there is a section~$s$
with $\num1(s)=5$,
the pencil has a tenth fiber.
Conversely, the only pencil that can
properly contain~$\Cal P$ is one of type $(4,6)$,
and its section~$l^*$ given by \autoref{prop.4-6}\iref{4.6-l*}
restricts to~$\Cal P$.
\endproof
\proposition\label{prop.coord.p<=4}
Let $p=4$, $q\ge4$ or $(p,q)=(3,7)$, and assume that $\Cal P$ is maximal.
Then, for any ambient geometric configuration $S\supset\Cal P$,
the coordinate map $c\colon S(\Cal P)\to\Cal C_{p,q}$, $s\mapsto[s]$,
identifies at most one pair of sections.
Furthermore, if such a pair $s_1,s_2$ identified by~$c$ does exist, then
there also is a
\rom(unique\rom) section $l^*\in S(\Cal P)$
such that $\bar s_1+\bar s_2+\bar l^*=\Bbb I$,
and, for this section~$l^*$, one has $\num2(l^*)=q$.
\endproposition
\proof
Let $s_1\ne s_2$ be a pair of sections such that $\bar s_1=\bar s_2$.
By \autoref{lem.skew}\iref{skew.4}, we have $\num2(\bar s_i)+p\le4$ and,
if $\num2(\bar s_i)+p=4$, there also is a section $l^*$ as in the statement.
The number of sections $l^*$ with $\num2(l^*)=q\ge4$ is
\roster*
\item
one if $(p,q)=(4,6)$ or at most one if $(p,q)=(3,7)$, see \autoref{prop.4-6},
\item
zero if $(p,q)=(4,5)$, see \autoref{prop.4-5}, and
\item
zero, one, or three if $(p,q)=(4,4)$, see \autoref{lem.skew}\iref{skew.4}.
\endroster
Furthermore, a given section~$l^*$ cannot share all $3$-coordinates with any
section other than $s_1,s_2$, see \autoref{lem.skew}\iref{skew.4} again.
If $(p,q)=(4,4)$ and $\Cal P$ has three sections $l_1^*,l_2^*,l_3^*$ with
$\num2(l_i^*)=4$, one can easily show that only one pull-back
$c\1(\bar l_i^*+\Bbb I)$ may be nonempty, as otherwise $S$ does not admit a
geometric $\bold L$-realization.
In the remaining case $(p,q)=(3,7)$ and $\num2(\bar s_i)=0$, one can use
\autoref{th.Nikulin} to show that the image of any geometric realization
of~$S$ must contain a section~$s$ of~$\Cal P$ such that $\num2(s)=6$;
hence, $\Cal P$ is not maximal, see \autoref{prop.3-7}.
\endproof
Till the rest of this section, we assume that $(p,q)=(4,6)$.
Denote $S^*(\Cal P):=\{s\in S(\Cal P)\,|\,s\cdot l^*=1\}$.
According to~\eqref{eq.l*}, the image of this set in $\Cal C_{4,6}$ can be
characterized as
\[
\bar S^*=\bigl\{\bar s\in\bar S\bigm|\com(\bar s\cs l^*)=1\bigr\}.
\label{eq.barS*}
\]
Let also
\[*
\bar S^\circ:=\bigl\{s\in\bar S\bigm|
\text{$\com(s\cs s')=0$ and $\num1(s)+\num1(s')=1$ for some $s'\in\bar S^*$}\bigr\}.
\]
The following statement complements \autoref{lem.com2=1};
we do not need to assume that
the configuration $S\supset\smash{\tilde P}$ is geometric.
\lemma\label{lem.4-6.com2=0}
Let $(p,q)=(4,6)$. Consider a
pair of sections $s_1,s_2\in S(\Cal P)$
such that $\com(s_1\cs s_2)=0$ and let
$\bar s_1':=\Bbb I\om\bar s_1\raise3pt\hbox{$\scriptstyle\sqrt{}$}\om\bar s_2$ and
$\bar s_2':=\Bbb I\om\bar s_1\om\bar s_2\raise3pt\hbox{$\scriptstyle\sqrt{}$}=(\bar s_1')\raise3pt\hbox{$\scriptstyle\sqrt{}$}$. Then\rom:
\roster
\item\label{4-6.bounds}
one has $1\le\num1(s_1)+\num1(s_2)\le5$\rom;
\item\label{4-6.=5}
if $\num1(s_1)+\num1(s_2)=5$, then also $\bar s_1',\bar s_2'\in\bar S$.
\endroster
If the pair $s_1,s_2$ is ``homogeneous'', then\rom:
\roster[\lastitem]
\item\label{4-6.S*}
if $\bar s_1,\bar s_2\in\bar S^*$, one has $s_1\cdot s_2=0$, and
\item\label{4-6.not.S*}
if $\bar s_1,\bar s_2\notin\bar S^*$, one has $s_1\cdot s_2=1$.
\endroster
If the pair is ``mixed'', $\bar s_1\in\bar S^*$ and $\bar s_2\notin\bar S^*$, then\rom:
\roster[\lastitem]
\item\label{4-6.S*-3}
if $\num1(s_1)+\num1(s_2)\ge3$, one has $s_1\cdot s_2=1$, and
\item\label{4-6.S*-2}
if $\num1(s_1)+\num1(s_2)=2$,
one has $s_1\cdot s_2=0$ if and only if $\bar s_1',\bar s_2'\in\bar S$.
\endroster
\endlemma
If $S$ is required to be geometric, then one can also state that
$\num1(s_1)+\num1(s_2)\le4$ whenever $\bar s_1\in\bar S^*$.
We do not use this restriction explicitly.
\proof[Proof of \autoref{lem.4-6.com2=0}]
Statement~\iref{4-6.S*} is obvious, as $s_1,s_2$ are in distinct fibers
of the pencil $\Cal P(l^*)$.
In all other cases, by \autoref{lem.skew}\iref{skew.4'}, the
section~$s_1$ must intersect exactly one (if $\bar s_1\in\bar S^*$) or two (if
$\bar s_1\notin\bar S^*$) of the lines $s_2$, $s_2\raise3pt\hbox{$\scriptstyle\sqrt{}$}$; with \eqref{eq.l*} taken
into account,
the intersection $s_1\cdot s_2\raise3pt\hbox{$\scriptstyle\sqrt{}$}$ is given by
Lemmas~\ref{lem.s.s=1} and~\ref{lem.com2=1}.
\endproof
\corollary[of Lemmas~\ref{lem.com2=1} and~\ref{lem.4-6.com2=0}]\label{lem.4-6.combinatorial}
Any $(4,6)$-configuration~$S$
generated by $\bar S\sminus\bar S^\circ$ is
rigid.
\done
\endcorollary
\remark\label{rem.4-6.combinatorial}
For many configurations,
the hypotheses of \autoref{lem.4-6.combinatorial} can also be verified
combinatorially, using
Lemmas \ref{lem.s.s=0}\iref{com.4} and~\ref{lem.com2=1}:
assuming that $S\supset\smash{\tilde P}$ is generated by sections, it is generated by
$\bar S\sminus\bar S^\circ$ if, for any $\bar s\in\bar S^\circ$, there is a pair
$\bar s_1,\bar s_2\subset\bar S\sminus\bar S^\circ$ such that
$\bar s\op\bar s_1\op\bar s_2=\Bbb I$ and $\com(\bar s_1\cs\bar s_2)=1$ or~$4$.
\endremark
\subsection{Rigidity for type $(5,3)$}
As an immediate consequence of
\autoref{obs.3-torsion} and \autoref{lem.sum.3},
any pencil of type $(5,*)$ is primitive.
In the next two statements, $S$ does not need to be geometric.
\lemma\label{lem.5-q.maximal}
Let $p=5$, $q\ge1$, and assume that $\Cal P$ has a section. Then $\Cal P$
is contained in a pencil $\Cal P'$ of type $(6,*)^\circ$ if and only if
$\Cal P$ has a pair of sections $s_1,s_2$ such that
$s_1\cdot s_2=0$, $\com3(s_1\cs s_2)=0$, and $\dif1(s_1,s_2)>0$.
\endlemma
\proof
If $\Cal P\subset\Cal P'$, then $s_1,s_2$ are two
appropriate equivalent sections
of~$\Cal P'$, see \autoref{lem.triplet}. For the sufficiency, assume that
$(p,q)=(5,1)$ and
\[*
\bar s_1=[0,0,0,0,0;1],*uad \bar s_2=[1,1,1,1,1;0].
\]
Then an extra member of~$\Cal P'$ is
$h-l+\sum_{i=1}^5(m_{i,+}-m_{i,0})-2n_1-3s_1+3s_2$.
\endproof
\corollary\label{cor.5-3.com=0}
Let $p=5$, $q\ge1$, and assume that $\Cal P$ is maximal.
Then, for any pair $s_1,s_2\in S(\Cal P)$
such that $\com(s_1\cs s_2)=0$ and $\num1(s_1)+\num1(s_2)>0$, one has
$s_1\cdot s_2=1$.
\done
\endcorollary
Let $(p,q)=(5,3)$ and assume that $\Cal P$ is maximal
(see \autoref{lem.5-q.maximal} for a criterion).
Then, according to \autoref{lem.com2=1} and
\autoref{cor.5-3.com=0}, the intersection $s_1\cdot s_2$ may not be
determined by the coordinates $\bar s_1,\bar s_2\in\bar S$ only if
\roster*
\item
one has $\com(\bar s_1\cs\bar s_2)=\num1(\bar s_1)=\num1(\bar s_2)=0$ and
\item
for any $\bar s\in\bar S$, if $\com(\bar s\cs\bar s_1)=\com(\bar s\cs\bar s_2)=0$, then
$\num1(\bar s)=0$.
\endroster
(For the latter condition, if $\num1(\bar s)>0$, then $s\cdot s_1=s\cdot s_2=1$
by \autoref{cor.5-3.com=0} and,
hence, $s_1\cdot s_2=0$,
see \autoref{cor.obverse}\iref{obverse.maximal}.)
Denote by $\bar S^\circ\subset\bar S$ the union of all such pairs
$(\bar s_1,\bar s_2)$.
\corollary\label{cor.5-3.combinatorial}
Any $(5,3)$-configuration~$S$
generated by $\bar S\sminus\bar S^\circ$ is rigid.
\done
\endcorollary
For another sufficient rigidity condition, consider a section
$s_0\in S(\Cal P)$
and let $\frak S(s_0):=\Cal P(s_0)\cap S_k(\Cal P)$.
If $\num1(s_0)>0$, this set is determined by the coordinates:
by \autoref{lem.com2=1} and \autoref{cor.5-3.com=0}, one has $s\in\frak S(s_0)$
if and only if $\com(\bar s\cs\bar s_0)=0$ or $\com(\bar s\cs\bar s_0)=1$ and
$\Bbb I\om\bar s\om\bar s_0\in\bar S$.
Furthermore, the intersections $s_1\cdot s_2$, $s_1,s_2\in\frak S$,
are also known: they are given by
\autoref{cor.obverse}.
\corollary\label{cor.5-3.pencil}
Any $(5,3)$-pair $(S,\Cal P)$ generated by the union
$\{\bar s_0\}\cup\bar\frak S(s_0)$ for some section
$s_0\in S(\Cal P)$ such that $\num1(s_0)>0$
is rigid.
\done
\endcorollary
\subsection{Other types}\label{s.other.pencils}
For completeness, we discuss the primitivity of the other types of
pencils.
We treat the $3$- and $2$-torsion of the pivot separately.
\proposition\label{prop.3.pivot}
Let $\Cal P$ be a pencil of type $(p,q)$ with $p\le2$.
If the
pivot $\smash{\tilde P}/P$ has $3$-torsion, then
\roster*
\item
$p+q=10$, \ie,
$(p,q)=(2,8)$, $(1,9)$, or $(0,10)$, and
\item
$\Cal P$ has a section $l^*$ as in \autoref{prop.4-6}\iref{4.6-l*}.
\endroster
Conversely, if $\Cal P$ has a section $l^*$
as in \autoref{prop.4-6}\iref{4.6-l*}, then $p+q=10$,
one has $\smash{\tilde P}/P=\CG3$, and
Statements~\iref{4.6-plane} and~\iref{4.6-minus} of \autoref{prop.4-6} also
hold.
A section $l^*$ as
above
\rom(or, equivalently, a
geometric index~$3$ extension $\smash{\tilde P}\supset P$\rom) is unique up to
automorphism.
\endproposition
\proof
The proof repeats literally that of \autoref{prop.4-6}; the section $l^*$ is
the class $\frac13[\Bbb Go+(p-4)\Bbb Gl]$ (\cf. also \autoref{lem.skew}\iref{skew.2}).
A direct computation shows that,
whenever the pivot $\smash{\tilde P}\ni l^*$, one has $\smash{\tilde P}/P=\CG3$, \ie, no further finite
index extension satisfies the conditions of \autoref{pre-conf}.
\endproof
By \autoref{obs.3-torsion}, any $2$-torsion element $\Bbb Ga\in\smash{\tilde P}/P$ is of the
form $\sum3\Bbb Gn_k$, where the index $k$ runs over a certain subset
$\supp\Bbb Ga\subset\operatorname{fb}_1\Cal P$, called the \emph{support} of $\Bbb Ga$.
It is clear that $\supp(\Bbb Ga+\Bbb Gb)$ is the symmetric difference
$(\supp\Bbb Ga)\bigtriangleup(\supp\Bbb Gb)$.
\proposition\label{prop.2.pivot}
Let $\Cal P$ be a pencil of type $(p,q)$, and let $\Bbb Ga\in\smash{\tilde P}/P$ be a nonzero
$2$-torsion element. Then
\roster
\item\label{2.pivot.=8}
one has $\ls|\supp\Bbb Ga|=8$ and, in particular, $q\ge8$\rom;
\item\label{2.pivot.section}
$\ls|\supp\Bbb Ga\cap\scom1(s)|=0$, $2$, or~$4$ for any section~$s$ of~$\Cal P$.
\endroster
Besides, the $2$-torsion of the pivot is as follows\rom:
\roster*
\item
$(\CG2)\oplus(\CG2)$ if $(p,q)=(0,12)$,
\item
$\CG2$ if $(p,q)=(0,11)$, $(1,10)$, or $(2,9)$,
\item
$0$ or $\CG2$ in all other cases with $q\ge8$.
\endroster
A geometric index~$2$ \rom(index~$4$ in the case $q=12$\rom) extension
$\smash{\tilde P}\supset P$ is unique up to automorphism.
\endproposition
\proof
Clearly, $\ls|\supp\Bbb Ga|=0\bmod4$;
hence, $\ls|\supp\Bbb Ga|=8$ or $12$ by \autoref{lem.sum.4}.
The last statement is proved by a direct computation using
\autoref{th.Nikulin}. In particular, it follows that, in the case $q=12$,
there are three distinct nonzero elements and, hence, none of them can have
support of length~$12$. This proves Statement~\iref{2.pivot.=8}.
For statement~\iref{2.pivot.section}, it suffices to consider the minimal
pencil of type $(0,8)$, so that $\supp\Bbb Ga=\operatorname{fb}_1\Cal P$.
Then, clearly, $\num1(s)$ is even, as otherwise $s\notin P$, and the values
$\num1(s)=6$ and $8$ are ruled out by \autoref{pre-conf}\iref{ex-div} and
\iref{ell-pencil}, respectively.
The uniqueness is immediate. In the case of index~$2$, an extension is
determined by a choice of the octet $\supp\Bbb Ga\subset\operatorname{fb}_1\Cal P$.
If $q=12$, three octets $\supp\Bbb Ga_i\subset\operatorname{fb}_1\Cal P$, $i=1,2,3$, should
be chosen so that $\ls|\supp\Bbb Ga_i\cap\supp\Bbb Ga_j|=4$ whenever $i\ne j$.
This choice is equivalent to partitioning $\operatorname{fb}_1\Cal P$ into three
quadruples.
\endproof
\corollary[\cf. \autoref{lem.skew}\iref{skew.2}]\label{cor.10.fibers}
If a pencil~$\Cal P$ has a section~$s$ intersecting ten fibers of~$\Cal P$,
then $\Cal P$ has no other fibers.
\endcorollary
\proof
Assuming that $\Cal P$ is of type $(0,11)$,
\autoref{prop.2.pivot}\iref{2.pivot.section} applied to~$s$ and
the nontrivial element $\Bbb Ga\in\smash{\tilde P}/P$ leads to a contradiction.
The existence of~$\Bbb Ga$ is also
guaranteed by \autoref{prop.2.pivot}.
\endproof
As another consequence of the results of this section, the type $(p,q)$ and
the primitivity bit almost determine a geometric realization $P\to\bold L$ up to
isomorphism. The pivot $\smash{\tilde P}/P$ may (must if $q>10$) have $2$-torsion if and
only if $q\ge8$ (see \autoref{prop.2.pivot}), and it may
(must if $(p,q)=(4,6)$ or $p=6$ and $q>0$) have
$3$-torsion if and only if $p=6$ (see \autoref{prop.6-0}) or $p+q=10$ (see
Propositions \ref{prop.4-6} and~\ref{prop.3.pivot}). The
case $p+q=10$ and $q\ge8$
is exceptional: here, the pivot may be trivial, $\CG2$, or $\CG3$,
\ie, there are three geometric realizations $P\to\bold L$.
In this latter case, it makes sense to subdivide the type $(p,q)^\circ$ into
$(p,q)^2$ and $(p,q)^3$.
\conjecture
The pivot $\smash{\tilde P}/P$ has $3$-torsion if and only if the axis of the pencil is a
line of the second kind in the sense of Segre~\cite{Segre}.
\endconjecture
\frak Sion{Counting sections of pencils}\label{S.counting}
The goal of this section is a computer aided estimate on the size of a
geometric configuration containing a pair of large obverse pencils.
Even though most extra restrictions in the ``counting'' lemmas seem purely
technical, for the moment we do need them to keep the computation under
control.
\subsection{The algorithm}\label{s.counting}
Fix a pencil $\Cal P:=\Cal P(l)$ of type~$(p,q)$ and a section~$s_0$ of~$\Cal P$.
Let $\bar s_0:=[s_0]\in\Cal C_{p,q}$ and denote by $\Bbb G(\bar s_0)$ the
stabilizer of~$\bar s_0$.
(Up to automorphism, there are $q+1$ possibilities for $\bar s_0$;
we usually choose for $\bar s_0$ the vector with several last $1$-coordinates equal
to~$1$ and all other coordinates equal to~$0$.)
More sections $s_1,s_2,\ldots$ are added one by one, building the
obverse pencil
$\Cal P(s_0)$. Thus, we assume that
\[
s_0\cdot s_i=1*uad\text{and}*uad s_i\cdot s_j=0*uad
\text{for $i>j\ge1$},
\label{eq.intr}
\]
\ie, all new sections are in separate fibers of~$\Cal P(s_0)$.
Our goal is adding sufficiently many sections, so that, in the resulting
configuration, $\Cal P$ is still a maximal pencil and
the multiplicity and valency of~$s_0$
satisfy certain prescribed bounds
\[*
p_{\min}\le\operatorname{mult} s_0\lep_{\max},\frak quad
v_{\min}\le\operatorname{val} s_0\lev_{\max}.
\]
It is essential that most of the time we deal with coordinates rather than
sections themselves: we choose certain elements $\bar s_i\in\Cal C_{p,q}$
and consider the pre-configuration
\[*
S_k:=P(\bar s_0,\ldots,\bar s_k)=(\smash{\tilde P}+\Z s_0+\ldots+\Z s_k)/\!\ker,
\]
where $[s_i]=\bar s_i$ for all $i\ge0$ and the intersection matrix of~$P$ is
extended using~\eqref{eq.intr} and the definitions of sections and
coordinates.
By \autoref{cor.obverse}, for each $i\ge1$ we must have
$\bar s_i\in\Cal C_0(\bar s_0)\cup\Cal C_1(\bar s_0)$, where
\[*
\Cal C_r(\bar s_0):=\bigl\{\bar s\in\Cal C_{p,q}\bigm|\com(\bar s\cs\bar s_0)=r\bigr\};
\]
furthermore,
$s_i$ is contained in a $1$-fiber of $\Cal P(s_0)$
if and only if $\bar s_i\in\Cal C_0(\bar s_0)$.
Once a lattice~$S_k$ has been constructed, we denote by
\[*
G_{k}:=\OG_h(S_{k},l,s_0)
\]
the group of its isometries preserving~$h$, $l$ and~$s_0$.
The computation of this group is discussed in \autoref{obs.group} below.
(At the expense of a certain overcounting, we compute separately the
stabilizers in $\SG{3}^p$ and $\SG{p}\times\SG{q}$.)
The algorithm runs in several steps.
\subsubsection{Step 1\rom: collecting the candidates}\label{obs.pre}
Assume $S_{k-1}$ known
and denote by
$\bar S_{k-1}$ the multiset $\{[s]\,|\,s\in S_{k-1}(\Cal P)\}$.
The group $G_{k-1}$
acts on
$\Cal C_0(\bar s_0)\cup\Cal C_1(\bar s_0)\sminus\bar S_{k-1}$ and, when passing
to~$S_k$,
it suffices to take for~$\bar s_k$ one representative from each orbit of this
action. We can also assume that all explicit $3$-fibers are added first
and avoid adding too many $3$-fibers:
\roster
\item\label{pre.plane}
$\bar s_k\in\Cal C_1(\bar s_0)$ if $\operatorname{mult} s_0<p_{\min}$ and
$\bar s_k\in\Cal C_0(\bar s_0)$ if $\operatorname{mult} s_0\gep_{\max}$.
\endroster
There is an obvious injective map from the set of $3$-fibers of
$\Cal P(s_0)$ to $\operatorname{fb}\Cal P$ (each $3$-fiber contains a unique line
$a\in\Cal P$); this map should remain injective:
\roster[\lastitem]
\item\label{pre.injective}
if $s\in S_{k-1}(\Cal P)$ is contained in a $3$-fiber of $\Cal P(s_0)$,
then $\com(\bar s_0\cs\bar s_k\cs s)=0$.
\endroster
Other restrictions taken into account when choosing $\bar s_k$ are as
follows:
\roster[\lastitem]
\item\label{pre.det}
\autoref{lem.det} (in fact, we check that $[-r_{ij}]$ is negative
semi-definite);
\item\label{pre.rank}
$\rank[-r_{ij}]+2p+q\le18$ (as $S_k$ should admit an embedding to~$\bold L$);
\item\label{pre.4}
$\bold S_4(\bar s_k)$:
$\com(\bar s_k\cs\bar s)\le4$ for any $\bar s\in\bar S_{k-1}$,
see \autoref{lem.s.s=0}\iref{com.5};
\item\label{pre.3}
$\bold S_3(\bar s_k)$:
$\com(\bar s_k\cs\bar s'\cs\bar s'')\le3$ for all $\bar s'\ne\bar s''\in\bar S_{k-1}$,
see \autoref{lem.s.s=0}\iref{com.4},~\iref{com.3};
\item\label{pre.h}
$\bold S_h(\bar s_k)$: if $\com(\bar s_k\cs\bar s)=4$ for some $\bar s\in\bar S$, then
$\bold S_4(\bar s')$ and $\bold S_3(\bar s')$ hold,
where $\bar s':=\Bbb I\om\bar s\om\bar s_k$, see \autoref{lem.s.s=0}\iref{com.4};
\item\label{pre.p}
if $\bar s_k\in\Cal C_1(\bar s_0)$, then
$\bold S_4(\bar s')$, $\bold S_3(\bar s')$, and $\bold S_h(\bar s')$ hold
for $\bar s':=\Bbb I\om\bar s_0\om\bar s_k$.
\endroster
In cases~\iref{pre.h} and~\iref{pre.p}, we also exclude from further
consideration the $G_{k-1}$-orbit of the respective section~$\bar s'$, as
its presence in $\bar S_k$ would imply the presence of~$\bar s_k$.
\subsubsection{Step 2\rom: validating a section $\bar s_k$}\label{obs.post}
Now, for each candidate~$\bar s_k$
collected at the previous step,
we compute
the pre-configuration $S_k=(S_{k-1}+\Z s_k)/\!\ker$, consider the orthogonal
complement $h^\perp$ in $S_k$, and use \Bbb GAP~\cite{GAP4}
function {\tt ShortestVectors} to compute the sets
$\frak V_2(S_k)$ and $\frak V_4(S_k)$, where
\[*
\frak V_r(S_k):=\{v\in h^\perp\subset S_k\,|\,v^2=-r\}.
\]
(Note that
the lattice $S_k$ is hyperbolic,
hence $h^\perp$ is elliptic,
by \autoref{obs.pre}\iref{pre.det}.)
A candidate $\bar s_k$ is rejected as invalid (not leading to a
geometric configuration) if one of the following holds:
\roster
\item\label{post.ex-div}
$\frak V_2\ne\varnothing$, see \autoref{pre-conf}\iref{ex-div};
\item\label{post.ell-pencil}
there is $v\in\frak V_4$ such that $v+h\in2S_k$,
see \autoref{pre-conf}\iref{ell-pencil}.
\endroster
Otherwise, the new set of sections $S_k(\Cal P)$ is computed \via
\[*
S_k(\Cal P)=\{v+l\,|\,v\in\frak V_4,\ v\cdot l=2\}.
\]
At this point, the full intersection matrix is known, and we can compute and
record
the set
\[*
\frak S_k:=\frak S(\bar s_0,\ldots,\bar s_k)=\Cal P(s_0)\cap S_k(\Cal P),
\]
including types of
the fibers. This set is used for the further validation. Namely, we
reject~$\bar s_k$ if
\roster[\lastitem]
\item\label{post.planes}
$\operatorname{mult} s_0>p_{\max}$ (too many $3$-fibers), or
\item\label{post.lines}
$\operatorname{val} s_0>v_{\max}$ (too many lines in $\Cal P(s_0)$), or
\item\label{post.fiber}
there is a pair $s'\ne s''\in\frak S_k$ such that $s'\cdot s''=1$ and
$\com(s_0\cs s'\cs s'')=0$,
see \autoref{cor.obverse}, or
\item\label{post.maximal}
any other type specific restriction is not satisfied
(whenever used, this extra restriction is specified
explicitly in the respective proof).
\endroster
To conserve space, for each candidate~$\bar s_k$ that passed the validation, we
record
\roster*
\item
the elements $\bar s_0,\ldots,\bar s_k\in\Cal C_{p,q}$,
\item
the multiset $\bar S_k$ (sections in terms of coordinates), and
\item
the image $\bar\frak S_k\subset\bar S_k$ of $\frak S_k$ under the coordinate
map,
\endroster
disregarding all other information.
\subsubsection{Step~3\rom: eliminating repetitions}\label{obs.group}
Before further processing, we eliminate the repetitions in the obtained list
of lattices~$S_k$ by retaining a single representative of each orbit of the
$\Bbb G(\bar s_0)$-action.
To compute the orbits or, equivalently, the stabilisers $G_k$,
we use on of the following two approaches.
\roster
\item\label{orbits.partial}
The stabilizers are computed \via\ $G_k=\stab\bar\frak S_k\subset\Bbb G(\bar s_0)$,
and the lattices are compared by means of the orbits of $\bar\frak S_k$.
This approach works if each $S_k$ is exactly as in the construction above,
\ie, generated over $\smash{\tilde P}_{p,q}$ by the set
$\{s_0\}\cup\frak S_k$, on which {\em the intersection matrix is known}.
\item\label{orbits.full}
The stabilizers are computed \via\ $G_k=\stab\bar S_k\subset\Bbb G(\bar s_0)$,
and the lattices are compared by means of the orbits of $\bar S_k$.
This approach applies if each $S_k$ is known to be combinatorially rigid.
\endroster
By default, we use approach~\iref{orbits.partial}.
\subsubsection{Step 4\rom: checking the $\bold L$-realizability}\label{obs.K3}
For each configuration~$S_k$ obtained at Step~3, we check if it admits
a geometric $\bold L$-realization.
To this end, we start with the lattice~$S_k$ itself and apply
\autoref{th.Nikulin}
to see if $S_k$ admits a primitive $\bold L$-realization.
If not, we replace~$S_k$ with a finite index extension
$\smash{\tilde S}_k\supset S_k$ defined by an
isotropic vector $v\in\discr S_k$ of prime order.
(This and subsequent steps are repeated
for each isotropic vector found.) The new lattice~$\smash{\tilde S}_k$ is rejected if
it fails to satisfy one of the conditions in \autoref{obs.post};
otherwise, we apply
\autoref{th.Nikulin}
again. The algorithm stops when
a primitive embedding is found (and then $S_k$ is accepted)
or all isotropic vectors are exhausted; in the
latter case, the original lattice~$S_k$ is rejected as not
admitting a geometric $\bold L$-realization.
Admittedly ineffective, this algorithm works reasonably well for the vast
majority of configurations.
\subsubsection{Increasing the rank}\label{obs.rank}
We repeat Steps~1--4 above until either nothing else can be added or the
desired bounds $\operatorname{mult} s_0\ge p_{\min}$, $\operatorname{val} s_0\ge v_{\min}$ have been
achieved. Most lattices~$S_k$ obtained have rank~$20$ and, hence, each geometric
configuration containing~$S_k$ is a finite index extension of~$S_k$.
In
the
cases where
$\rank S_k\le19$, we keep~$S_k$ on the
list, but
we allow
also
the addition of an extra
section~$s_{k+1}$ disjoint from~$s_0$. (Certainly, in this case we have to
switch to approach~\iref{orbits.full} in \autoref{obs.group}, \ie, we need
to know that the configurations obtained are combinatorially rigid.
If the latter property cannot be asserted, configurations with
extra
sections are excluded from Step~3.)
This time, we have $s_0\cdot s_{k+1}=0$,
but the intersections $\iota_i:=s_i\cdot s_{k+1}$, $i=1,\ldots,k$,
should be given as part of the input;
for each
pair $(\bar s_{k+1},[\iota_i])$,
we check conditions \iref{pre.det}--\iref{pre.p} in
\autoref{obs.pre}, requiring in addition that
$\rank S_{k+1}>\rank S_k$,
\ie, the same lattice cannot be obtained as a finite index
extension of~$S_k$.
Then, Steps~2--4 are repeated and, at Step~2,
we require that
\roster
\item\label{rank.=}
the valency of~$s_0$ in $S_{k+1}$ must be equal to that in~$S_k$,
\endroster
as otherwise the same lattice can be obtained by adding a section
intersecting~$s_0$.
\subsubsection{Final step\rom: computing $\bold L$-realizations}\label{obs.L}
There remains to enumerate, for each lattice~$S_k$,
its geometric $\bold L$-realizations.
This is done similar to
\autoref{obs.K3}, except that we do not stop at the first valid realization;
on the other hand, we
require that
\roster
\item\label{L.=}
the valency of~$s_0$ in $\smash{\tilde S}_{k}$ must be equal to that in~$S_k$,
\cf. \autoref{obs.rank}\iref{rank.=}.
\endroster
At this step,
for all consecutive extensions $S_k=\smash{\tilde S}_k^0\subset\smash{\tilde S}_k^1\subset\ldots$ of
prime index, we can also check that $\ls|\operatorname{Fn}(\smash{\tilde S}_k^i)|>\ls|\operatorname{Fn}(S_k^{i-1})|$;
this fact implies that all configurations
found are generated by sections.
For each finite index extension $\smash{\tilde S}_k\supset S_k$ found in this way,
{\em assuming that $\Cal P$ is maximal in $\smash{\tilde S}_k$}, we have
\[
\ls|\operatorname{Fn}(\smash{\tilde S}_k)|=\ls|\smash{\tilde S}_k(\Cal P)|+3p+q+1.
\label{eq.lines}
\]
In extreme cases (when too many lines have been found),
we recompute the maximal pencil \via
\[*
\Cal P(l)=\{v+l\,|\,v\in\frak V_6(\smash{\tilde S}_k),\ v\cdot l=3\}
\]
and compute the pencil structure of~$\smash{\tilde S}_k$.
(The computation of $\frak V_6$ is rather expensive and we try to avoid it as
much as possible.)
\subsection{A list of configurations}\label{obs.pencils}
For further references, we collect in
\autoref{tab.list}
a list of large
configurations found in the experiments.
(We list all known configurations with more that $48$ lines;
for the moment,
we do not assert that the list is complete.)
The notation
refers to certain particular configurations
found in the
computation.
We will also speak
about configurations of \emph{type $\config{X}_*$, $\config{Y}_*$}, \etc.,
meaning that the pencil
structures of the two configurations are equal.
The configurations
marked with a $^*$ in the table (most notably, the \aconfig{Y}-series)
admit totally reflexive $\bold L$-realizations; the others do
not.
One has
$\rank\aconfig{Z}_*=19$;
the other configurations listed in the table are of
rank~$20$. There is no particular difference between \aconfig{X} and
\aconfig{Q}.
\table
\caption{Known large geometric configurations}\label{tab.list}
\def\raise3pt\hbox{$\scriptstyle\sqrt{}$}{\relax\llap{$^*$}}
\hbox to\hsize{\hss\vbox{\halign{\strut*uad#*uad\hss&\hss#\hss*uad&#\hss\ \cr
\noalign{\hrule
}
\strut\hss$S$\hss&$\ls|\operatorname{Fn}|$&Pencil structure (see \autoref{s.invariants}), reference, remarks
\cr
\noalign{
\hrule
}
\aconfig{64}&64&
$\PS{[ [ 6, 0 ], 16 ], [ [ 4, 6 ], 48 ]}$,
see \autoref{th.6-q} and \autoref{th.4-6}
\cr
\aconfig{60}&60&
$\PS{[ [ 6, 2 ], 10 ], [ [ 4, 4 ], 30 ], [ [ 3, 7 ], 20 ]}$,
see \autoref{th.6-q}
\cr
\aconfig{60.2}&60&
$\PS{[ [ 4, 5 ], 60 ]}$,
see \autoref{lem.4-5}
\cr
\aconfig{56}&56&
$\PS{[ [ 4, 6 ], 8 ], [ [ 4, 4 ], 32 ], [ [ 2, 8 ], 16 ]}$,
see \autoref{th.4-6}
\cr
\raise3pt\hbox{$\scriptstyle\sqrt{}$}\aconfig{56.real}&56&
$\PS{[ [ 4, 4 ], 32 ], [ [ 3, 7 ], 24 ]}$,
see \autoref{lem.3-7}
\cr
\aconfig{q56}&56&
$\PS{[ [ 4, 4 ], 24 ], [ [ 3, 7 ], 32 ]}$,
see \autoref{lem.3-7}
\cr
\aconfig{54}&54&
$\PS{[ [ 6, 2 ], 4 ], [ [ 4, 6 ], 6 ], [ [ 4, 4 ], 6 ], [ [ 4, 2 ], 24 ],
[ [ 2, 8 ], 12 ], [ [ 0, 10 ], 2 ]}$,
see \autoref{th.6-q}
\cr
\aconfig{q54}&54&
$\PS{[ [ 4, 4 ], 24 ], [ [ 4, 3 ], 24 ], [ [ 0, 12 ], 6 ]}$,
see \autoref{lem.4-4}
\cr
\aconfig{52.1}&52&
$\PS{[ [ 6, 0 ], 1 ], [ [ 4, 4 ], 12 ], [ [ 4, 3 ], 12 ], [ [ 4, 2 ], 3 ],
[ [ 3, 5 ], 18 ], [ [ 0, 12 ], 6 ]}$,
see \autoref{th.6-q}
\cr
\aconfig{52.2}&52&
$\PS{[ [ 6, 0 ], 1 ], [ [ 4, 4 ], 9 ], [ [ 4, 3 ], 18 ], [ [ 3, 5 ], 18 ],
[ [ 0, 12 ], 6 ]}$,
see \autoref{th.6-q}
\cr
\aconfig{52.4}&52&
$\PS{[ [ 4, 6 ], 10 ], [ [ 3, 5 ], 40 ], [ [ 0, 10 ], 2 ]}$,
see \autoref{th.4-6}
\cr
\aconfig{52.5-3}&52&
$\PS{[ [ 5, 3 ], 8 ], [ [ 3, 5 ], 32 ], [ [ 2, 8 ], 12 ]}$,
see \autoref{th.5-3}
\cr
\raise3pt\hbox{$\scriptstyle\sqrt{}$}\aconfig{52.5}&52&
$\PS{[ [ 4, 6 ], 2 ], [ [ 4, 4 ], 16 ], [ [ 3, 5 ], 20 ], [ [ 2, 8 ], 14 ]}$,
see \autoref{th.4-6}
\cr
\raise3pt\hbox{$\scriptstyle\sqrt{}$}\aconfig{52.real}&52&
$\PS{[ [ 4, 5 ], 8 ], [ [ 4, 3 ], 12 ], [ [ 3, 6 ], 16 ], [ [ 2, 7 ], 16 ]}$,
see \autoref{lem.4-5}
\cr
\raise3pt\hbox{$\scriptstyle\sqrt{}$}\aconfig{52.0}&52&
$\PS{[ [ 6, 0 ], 4 ], [ [ 4, 4 ], 12 ], [ [ 4, 2 ], 24 ], [ [ 2, 8 ], 12 ]}$,
see \autoref{th.6-q}; $\rank\config{52.0}=19$
\cr
\aconfig{q52.1}&52&
$\PS{[ [ 4, 4 ], 16 ], [ [ 4, 3 ], 16 ], [ [ 4, 2 ], 16 ], [ [ 0, 12 ], 4 ]}$,
see \autoref{lem.4-4}
\cr
\aconfig{q52.2}&52&
$\PS{[ [ 4, 4 ], 8 ], [ [ 4, 3 ], 32 ], [ [ 4, 2 ], 8 ], [ [ 0, 12 ], 4 ]}$,
see \autoref{lem.4-4}
\cr
\aconfig{51}&51&
$\PS{[ [ 6, 2 ], 1 ], [ [ 5, 3 ], 6 ], [ [ 4, 3 ], 3 ], [ [ 3, 6 ], 6 ],
[ [ 3, 4 ], 8 ], [ [ 2, 7 ], 27 ]}$
, see \autoref{th.6-q}
\cr
\aconfig{50.1}&50&
$\PS{[ [ 6, 1 ], 1 ], [ [ 4, 4 ], 9 ], [ [ 4, 3 ], 9 ], [ [ 4, 2 ], 9 ],
[ [ 3, 4 ], 18 ], [ [ 0, 12 ], 3 ], [ [ 0, 10 ], 1 ]}$
, see \autoref{th.6-q}
\cr
\aconfig{50.2}&50&
$\PS{[ [ 6, 1 ], 1 ], [ [ 4, 4 ], 6 ], [ [ 4, 3 ], 15 ], [ [ 4, 2 ], 6 ],
[ [ 3, 4 ], 18 ], [ [ 0, 12 ], 3 ], [ [ 0, 10 ], 1 ]}$
, see \autoref{th.6-q}
\cr
\aconfig{50}&50&
$\PS{[ [ 5, 3 ], 4 ], [ [ 4, 4 ], 8 ], [ [ 3, 5 ], 16 ],
[ [ 2, 8 ], 4 ], [ [ 2, 6 ], 18 ]}$,
see \autoref{th.5-3}
\cr
\aconfig{50.0}&50&
$\PS{[ [ 4, 4 ], 10 ], [ [ 3, 5 ], 40 ]}$,
see \autoref{lem.4-4}; $\rank\config{50.0}=19$
\cr
\aconfig{49}&49&
$\PS{[ [ 6, 0 ], 1 ], [ [ 4, 3 ], 18 ], [ [ 4, 2 ], 9 ], [ [ 3, 4 ], 18 ],
[ [ 0, 12 ], 3 ]}$
, see \autoref{th.6-q}
; $\rank\config{49}=19$
\cr
\raise3pt\hbox{$\scriptstyle\sqrt{}$}\aconfig{48}&48&
$\PS{[ [ 5, 1 ], 2 ], [ [ 3, 7 ], 6 ], [ [ 3, 5 ], 24 ], [ [ 2, 6 ], 12 ],
[ [ 1, 9 ], 4 ]$},
see \autoref{lem.3-7}
\cr
\raise3pt\hbox{$\scriptstyle\sqrt{}$}\aconfig{48.2}&48&
$\PS{[ [ 4, 4 ], 4 ], [ [ 4, 2 ], 16 ], [ [ 3, 6 ], 8 ], [ [ 2, 7 ], 12 ],
[ [ 2, 6 ], 8 ]}$,
see \autoref{lem.4-4}
\cr
\noalign{
\hrule}\crcr}}\hss}
\endtable
\theorem\label{th.uniqueness}
A
geometric configurations of
each type
listed in \autoref{tab.list}
is unique up to isomorphism.
\endtheorem
\proof
Each configuration~$S$
satisfies the hypotheses of the
respective
classification statement cited in the table (with pencils of type
$(6,0)^\bullet$ ruled out by \autoref{th.6-0.primitive}),
and the uniqueness follows from the classification.
Indeed,
the essential part of the hypotheses is the existence of a certain pair of
obverse pencils.
Let $v:=\max\{\operatorname{val} l\,|\,l\in\operatorname{Fn}(S)\}$, and denote ny~$n$ the number of lines
of valency~$v$.
If $v>18$, then, in view of \eqref{tablichka},
the configuration is covered by \autoref{th.6-q}.
If $n\ge5$ or $n\ge4$ and $\ls|\operatorname{Fn}(S)|<4v-8$, then,
referring in the latter case to \autoref{cor-Segre}, we obtain a pair of
skew lines of valency~$v$, which suffices for all statements.
In the remaining four cases (\config{52.1}, \config{52.2}, \config{52.5},
and \config{49}), a similar argument gives us a pair of lines of valency
$v=18$ and $\ge15$.
\endproof
Among others,
\autoref{tab.list} lists
all geometric configurations~$S$ containing a pair of obverse
pencils $\Cal P_1$, $\Cal P_2$ such that
\[*
\ls|\operatorname{Fn}(S)|>48*uad\text{and}*uad
\ls|\Cal P_1|+\ls|\Cal P_2|\ge33.
\]
\subsection{Pencils of type $(6,*)$}\label{s.6.0}
For the moment, $(6,*)^\circ$-configurations is the only class that is
sufficiently well
understood. The properties of such configurations
are summarized in the next
theorem.
\theorem\label{th.6-q}
There are $300$ isomorphism classes of $(6,q)^\circ$-pairs\rom:
\roster*
\item
for $q=0$\rom: $62$ classes, of which $43$ are totally reflexive\rom;
\item
for $q=1$\rom: $107$ classes, none totally reflexive\rom;
\item
for $q=2$\rom: $131$ classes, none totally reflexive.
\endroster
Let $(S,\Cal P)$ and $(S',\Cal P')$ be two $(6,*)^\circ$-pairs. Then\rom:
\roster
\item\label{6-q.rigid}
$S$ is generated by sections and combinatorially rigid\rom;
\item\label{6-q.ls}
with one exception,
one has
$(S',\Cal P')\cong(S,\Cal P)$
if and only if
$\bold Ls(S')=\bold Ls(S)$\rom;
\item\label{6-q.known}
either one has $\ls|\operatorname{Fn}(S)|<52$ or
$S\cong\config{64}$, \config{60}, \config{54}, \config{52.1}, \config{52.2},
or~\config{52.0}.
\endroster
Furthermore, for any
$n\in\{19,\ldots,52,54,60,64\}$, there is a
$(6,*)^\circ$-configuration $S$ such that $\ls|\operatorname{Fn}(S)|=n$.
\endtheorem
As an addendum to \autoref{th.6-q}\iref{6-q.ls}, note that, with the
exception of eleven pairs, any two distinct $(6,*)^\circ$-configurations are
distinguished by the pencil structure.
\proof[Proof of \autoref{th.6-q}]
We start with a pencil~$\Cal P$ of type $(6,0)^\circ$ and apply the algorithm
of \autoref{s.counting}, introducing a number of modifications:
\roster*
\item
we do not fix a section~$\bar s_0$ and use
the group $\tilde\Bbb G$ instead of $\Bbb G(\bar s_0)$,
see~\eqref{eq.6-0.group};
the intersection matrices are computed by means of \autoref{lem.6-0.intr};
\item
at Step~1, all restrictions are lifted: instead, we construct the
``convex hull'' (in the sense of \autoref{lem.convex}) of the set
$\bar S_{k-1}\cup\bar s_k$ and check whether the resulting set~$\bar\Cal S_k$
satisfies \autoref{lem.plane};
certainly,
$\bar s_k$ must satisfy~\eqref{eq.6-0.coord};
\item
at Step~2, all restrictions except~\iref{post.ex-div}
and~\iref{post.ell-pencil} are lifted;
\item
at Step~3, approach~\iref{orbits.full} can be used due to
\autoref{cor.6-q.combinatorial};
\item
since all sets of sections are to be tried,
we replace condition~\iref{L.=} in \autoref{obs.L} with
$\ls|\operatorname{Fn}(\smash{\tilde S}_k)|=\ls|\operatorname{Fn}(S_k)|$.
It turns out that such extensions do not exist;
hence, any geometric configuration is generated by sections.
\endroster
As a result, we obtain $84$ configurations (of which $25$ are extremal with
respect to inclusion) generated by sections of~$\Cal P$; in these
configurations, $\Cal P$ is not always maximal.
Then, we try to add up to two extra $1$-fibers. The procedure is similar to
\autoref{obs.rank}: we specify the intersection of the fiber added with
sections generating~$S_k$ and repeat Steps~1--4 of the algorithm; a new
configuration $S_k'$ is accepted only if $\ls|\operatorname{Fn}(S_k')|=\ls|\operatorname{Fn}(S_k)|$.
Repetitions are eliminated using approach~\iref{orbits.full} of
\autoref{obs.group} and appropriate subgroup \smash{$\tilde\Bbb G\subset\Bbb G_{6,q}$}.
All other statements of the theorem follow directly from the classification.
\endproof
\theorem\label{th.6-0.primitive}
There are $69$ isomorphism classes of $(6,0)^\bullet$-pairs $(S,\Cal P)$
admitting a section
$s_0\in S(\Cal P)$ such that $15\le\operatorname{val} s_0\le18$.
Let $(S,\Cal P)$ and $(S',\Cal P')$ be two such pairs. Then\rom:
\roster
\item\label{6-0.rigid}
$S$ is generated by sections and combinatorially rigid\rom;
\item\label{6-0.ls}
$(S',\Cal P')\cong(S,\Cal P)$
if and only if
$\bold Ls(S')=\bold Ls(S)$\rom;
\item\label{6-0.known}
one has $\ls|\operatorname{Fn}(S)|<44$.
\endroster
\endtheorem
\proof
The sections are enumerated using the algorithm of
\autoref{s.counting}, letting
\[
p_{\min}=2,*uad p_{\max}=6,*uad v_{\min}=15,*uad v_{\max}=18.
\label{eq.param.18}
\]
Here, the lower bound $p_{\min}=2$ follows from~\eqref{tablichka}, and the
seemingly redundant upper bound $p_{\max}=6$ helps us eliminate a number of
configurations before any further processing.
We introduce also a few modifications to the algorithm.
First,
by \autoref{cor.6-0.combinatorial}, we can use approach~\iref{orbits.full} in
\autoref{obs.group}: this is necessary since some of the configurations~$S_k$
with $\operatorname{val} s_0\ge16$ have rank~$19$, see \autoref{obs.rank}.
Besides, we can
\roster*
\item
use \autoref{lem.6.0-sections}\iref{i.(6,p).0-intr} for
condition~\iref{post.maximal} in \autoref{obs.post}, and
\item
in \autoref{obs.rank}, consider only the candidates~$\bar s_{k+1}$ satisfying
$1\le\com(\bar s_{k+1}\cs\bar s_0)\le4$,
see \autoref{lem.6.0-sections}\iref{i.(6,p).0} and
\autoref{lem.s.s=0}\iref{com.5}.
\endroster
we obtain $81$ configurations, each with a distinguished section~$s_0$.
Switching to the full automorphism group~$\Bbb G_{6,0}$ and resorting reduces the
list down to $69$ classes.
The maximal number of lines in the configurations obtained is~$44$.
\endproof
\subsection{Pencils of type $(4,*)$}\label{s.4-6}
A complete classification of $(4,6)$-configurations also seems feasible;
however, for the moment we confine ourselves to a partial statement.
similar
to \autoref{th.6-0.primitive}.
\theorem\label{th.4-6}
There are $195$
isomorphism classes of $(4,6)$-pairs $(S,\Cal P)$ admitting
a section~$s_0\in S(\Cal P)$
such that $15\le\operatorname{val} s_0\le18$.
If $(S,\Cal P)$ is such a pair, then\rom:
\roster
\item\label{4-6.rigid}
$S$ is generated by sections and combinatorially rigid\rom;
\item\label{4-6.known}
either one has $\ls|\operatorname{Fn}(S)|\le48$ or $S\cong\config{64}$, \config{56},
\config{54}, \config{52.4}, or \config{52.5}.
\endroster
\endtheorem
\proof
First, assume that $\operatorname{mult} l^*\le2$, hence $s_0\ne l^*$. We need to
consider seven cases: $\num1(s_0)\in\{0,\ldots,4\}$ and
$s_0\cdot l^*=0$ or~$1$ for the
first two values $\num1(s_0)=0,1$.
In each case, we employ the algorithm of \autoref{s.counting},
using parameters~\eqref{eq.param.18},
restricting the candidates in \autoref{obs.pre} to satisfy~\eqref{eq.l*}, and
imposing the restriction
$\ls|\bar S^*|\le4$, see~\eqref{eq.barS*}, as condition~\iref{post.maximal} in
\autoref{obs.post}.
All pairs obtained are rigid by \autoref{lem.4-6.combinatorial}, and
resorting the list with the full automorphism group $\Bbb G_{4,6}$ reduces it to
$20$ classes.
Let $s_0=l^*$.
To avoid complications with
large pivots,
we start with a manual classification of configurations $S\supset\Cal P$
generated by up to four sections $s_i$ such that $s_i\cdot l^*=1$ and
$\num1(s_i)=0$.
It is easily shown that, in addition to~$P$ itself, there are six
isomorphism classes of such
configurations~$S$, each admitting a unique, up to automorphism, geometric
finite index extension $\smash{\tilde S}\supset S$.
Briefly, they are as follows:
\roster*
\item
$1$ class with $\operatorname{mult} l^*=1$, $\rank S=17$, and $\ell_3(\smash{\tilde S}/S)=0$,
\item
$2$ classes with $\operatorname{mult} l^*=2$, $\rank S=18$, and $\ell_3(\smash{\tilde S}/S)=1$,
\item
$1$ class with $\operatorname{mult} l^*=3$, $\rank S=19$, and $\ell_3(\smash{\tilde S}/S)=2$,
\item
$1$ class with $\operatorname{mult} l^*=4$, $\rank S=19$, and $\ell_3(\smash{\tilde S}/S)=2$,
\item
$1$ class with $\operatorname{mult} l^*=4$, $\rank S=20$, and $\ell_3(\smash{\tilde S}/S)=3$.
\endroster
Starting, instead of~$\smash{\tilde P}$,
with one of these geometric configurations~$\smash{\tilde S}$, we build a
separate list, replacing $\Bbb G(\bar s_0)$ with $\OG_h(\smash{\tilde S},l)$ and inhibiting
sections~$\bar s$ with $\num1(\bar s)=0$ at Step~1.
Running the algorithm, we obtain
a large number of configurations (due to the lack of sorting in
\autoref{obs.rank} and \autoref{obs.L}).
All but one are rigid by
\autoref{lem.4-6.combinatorial},
and the remaining one
has an ``ambiguous'' pair of sections $s_1,s_2$, but the assumptions
$s_1\cdot s_2=0$ or~$1$ result in configurations~$S_0$, $S_1$
with non-isomorphic sets of
sections (in fact, $S_0$ is generated by $\bar S_0\sminus\bar S_0^\circ$,
whereas $S_1$ is not; this phenomenon is similar to \autoref{lem.com2=1}).
Thus, \emph{a posteriori},
all configurations are rigid;
switching to approach~\iref{orbits.full}
in \autoref{obs.group} and
resorting the list
reduces it to $175$ classes.
\endproof
\lemma\label{lem.4-5}
If a $(4,5)$-pair $(S,\Cal P)$ admits a section
$s_0$ such that $16\le\operatorname{val} s_0\le17$, then
either one has $\ls|\operatorname{Fn}(S)|\le48$
or $S\cong\config{60.2}$ or~\config{52.real}.
Furthermore, a geometric configuration
of type \config{60.2} is unique up to isomorphism.
\endlemma
\proof
We apply the algorithm of \autoref{s.counting}, letting
\[
p_{\min}=3,*uad p_{\max}=5,*uad v_{\min}=16,*uad v_{\max}=17
\label{eq.param.17}
\]
and using for~\iref{post.maximal} in \autoref{obs.post}
the extra requirement
that
$\num1(\bar s)\le4$ for any $\bar s\in\bar S_k$,
see
\autoref{prop.4-5}. We also suppress the sorting
in \autoref{obs.L}, which results in a rather large number of classes in the
case where $\num1(\bar s_0)=4$.
Disregarding the pairs $(S,\Cal P)$ with $\ls|S(\Cal P)|\le30$, we arrive at
a number of configurations of type \config{52.real}
and
several dozens of those of type \config{60.2};
crucial is the fact that
{\em only two
configurations of type \config{60.2}
appear in the case where $\num1(\bar s_0)=0$.}
For the uniqueness,
we compute the linking structure of each configuration~$S$ of type
\config{60.2}.
The result is the same for all configurations:
\[*
\bold Ls(S)=\PS{[ [ 4, 4 ], 150 ], [ [ 5, 3 ], 360 ], [ [ 6, 2 ], 360 ],
[ [ 7, 2 ], 240 ], [ [ 8, 0 ], 30 ], [ [ 8, 3 ], 120 ] }.
\]
Since $(4,4)\in\bold Ls(S)$, it follows that $S$
has a pair of skew lines~$l$, $s_0$ such that
$\num1(s_0)=0$ with respect to $\Cal P(l)$; in particular, there are at most
two isomorphism classes.
A further computation in (any) one of the
configurations shows that there are at least
two classes of
pairs $\Cal P_1,\Cal P_2$ such that $\ls|\Cal P_1\cap\Cal P_2|=4$.
Namely, in each $3$-fiber of~$\Cal P_2$, consider the two lines $s',s''$
that are sections of~$\Cal P_1$ and compute
$\num1(s')$, $\num1(s'')$ with respect to $\Cal P_1$.
The resulting multiset of four unordered pairs is obviously an invariant of
$\Cal P_1,\Cal P_2$; it turns out to be symmetric, and it can take values
\[
\PS{[ [ 1, 4 ], 2 ], [ [ 2, 3 ], 2 ]}\ \text{($120$ pairs)}*uad\text{or}*uad
\PS{[ [ 1, 4 ], 4 ]}\ \text{($30$ pairs)}.
\label{eq.invariant}
\]
Thus, we conclude that the two classes obtained in the
case $\num1(s_0)=0$ correspond, in fact, to two distinct pairs of obverse
pencils in the same configuration.
All configurations of type \config{52.real}
(obtained in the computation) are isomorphic,
as only one configuration is obtained when $\num1(s_0)=2$ and
each configuration has a pair of obverse pencils
$\Cal P_1$, $\Cal P_2$ of type $(4,5)$ and such that
$\ls|\Cal P_1\cap\Cal P_2|=6$.
\endproof
\corollary[of the proofs]\label{cor.4-5.counts}
For any
$n\in\{18,\ldots,48, 52, 54, 56, 60, 64\}$,
there
exists a $(4,*)$-configuration~$S$
such that $\ls|\operatorname{Fn}(S)|=n$.
If
$n\in\{18,\ldots,47,52\}$,
this configuration~$S$ can be chosen totally reflexive.
\endcorollary
\proof
By Propositions~\ref{prop.4-6} and~\ref{prop.4-5}, we can reliably detect the
maximality of a pencil~$\Cal P$ of type $(4,6)$ or $(4,5)$
in a configuration~$S$ by the set of sections
$S(\Cal P)$, without recomputing the full set $\operatorname{Fn}(S)$.
Hence, \eqref{eq.lines} applies to any geometric
finite index extension $\smash{\tilde S}_k\supset S_k$
accepted in \autoref{obs.K3}; recording the values obtained, we obtain the
first statement of the corollary. The second one is
obtained by using, in addition, \autoref{totally_reflexive}, \cf. the proof
of \autoref{prop.5-2.real}.
\endproof
\subsection{Pencils of type $(5,*)$}\label{s.5-3}
As in the case $(4,*)$, we have a partial classification for the maximal
type $(5,3)$ and certain bounds for the submaximal type $(5,2)$.
\theorem\label{th.5-3}
There are $421$
isomorphism classes of $(5,3)$-pairs $(S,\Cal P)$ admitting
a section~$s_0\in S(\Cal P)$
such that $15\le\operatorname{val} s_0\le18$.
If $(S,\Cal P)$ is such a pair,
then either one has $\ls|\operatorname{Fn}(S)|\le48$ or
$S\cong\config{52.5-3}$, \config{51}, or~\config{50}.
\endtheorem
\proof
The
computation runs exactly as outlined in \autoref{s.counting},
with the parameters as in~\eqref{eq.param.18}
and \autoref{lem.5-q.maximal} used for
condition~\iref{post.maximal} in \autoref{obs.post}. (Note that, since the
only pencil that can properly contain~$\Cal P$ is that of type $(6,2)$,
\autoref{lem.5-q.maximal} gives us a criterion of maximality of~$\Cal P$.)
With two exceptions, all configurations obtained are rigid by
Corollary~\ref{cor.5-3.combinatorial} or~\ref{cor.5-3.pencil},
and we can resort the combined list
(the union over all four values $\num1(\bar s_0)=0,\ldots,3$)
using
approach~\eqref{orbits.full} in \autoref{obs.group} and the full
group $\Bbb G_{5,3}$.
Each of the two configurations whose rigidity could not be established
differs from all others by its linking structure.
\endproof
\lemma\label{lem.5-2}
If a $(5,2)$-pair $(S,\Cal P)$ admits a section
$s_0$ such that $16\le\operatorname{val} s_0\le17$, then
one has $\ls|\operatorname{Fn}(S)|\le48$.
\endlemma
\proof
The computation runs as outlined in \autoref{s.counting},
using parameters as in~\eqref{eq.param.17} and \autoref{lem.5-q.maximal} for
condition~\iref{post.maximal} in \autoref{obs.post}.
There are a few configurations~$S_k$ of rank~$19$,
to which we add extra sections
(see \autoref{obs.rank}) but {\em do not sort the results}, \ie, skip
Step~3.
Apart from several configurations of type~\config{52.5-3} or~\config{50},
one has
$\ls|\smash{\tilde S}_k(\Cal P)|\le30$ and the statement follows from~\eqref{eq.lines}.
\endproof
\subsection{Pencils of size $16$}\label{s.3-7}
In this section
we deal with
geometric
configurations containing
a pair of obverse
maximal pencils $\Cal P:=\Cal P(l)$ and $\Cal P':=\Cal P(s_0)$
such that $\ls|\Cal P|=\ls|\Cal P'|=16$.
Since we are interested in the configurations themselves rather than triples
$(S,\Cal P,\Cal P')$, we make several additional assumptions.
First of all, we assume that $\operatorname{mult} l\le\operatorname{mult} s_0$; hence, when applying the
algorithm outlined in \autoref{s.counting},
we can use the parameters
\[*
p_{\min}=p:=\operatorname{mult} l,*uad p_{\max}=5,*uad v_{\min}=v_{\max}=16.
\]
The next few restrictions are considered as part of the type specific
condition~\iref{post.maximal} in \autoref{obs.post}; the necessary
computation uses the set $\frak V_4(S_k)$.
\roster
\item\label{3-7.val}
We require that $\max\{\operatorname{val} l\,|\,l\in\operatorname{Fn}(S)\}\le17$.
\endroster
This restriction is part of all statements: on the one hand, it helps us
eliminate a number of configurations covered by other theorems and, on the
other hand, it is sufficient for the proof of
\autoref{prop.56} in its current form.
Besides, we list all pairs $l_1,l_2\in\operatorname{Fn}(S)$ of skew lines such that
$\operatorname{val} l_1=\operatorname{val} l_2=16$ and compute the refined types of the pencils
$\Cal P_i:=\Cal P(l_i)$, $i=1,2$, and the linking types $\operatorname{lk}(l_1,l_2)$.
For each
pair $l_1,l_2$,
assuming that $\operatorname{mult} l_1\le\operatorname{mult} l_2$,
we require that
\roster[\lastitem]
\item\label{3-7.>=p}
$\operatorname{mult} l_1\ge p$, and
\item\label{3-7.=p}
if $\operatorname{mult} l_1=p$, then
$\ls|\Cal P_1\cap\Cal P_2|\ge\num2(s_0)+p$.
\endroster
(If these two conditions are not satisfied, we can obtain the same
configuration~$S$ replacing $l,s_0$ with the ``smaller'' pair $l_1,l_2$.)
In \autoref{obs.group}, approach~\iref{orbits.partial} is used for
sorting. In \autoref{obs.rank}, we may need to add up to two extra sections;
since the combinatorial rigidity is not known,
the configurations containing extra sections are excluded from the
sorting algorithm.
Finally, at the final step
we only keep the configurations~$S$ such that $\ls|\operatorname{Fn}(S)|>48$ or
$\ls|\operatorname{Fn}(S)|=48$ and $S$ is totally reflexive.
\lemma\label{lem.3-7}
Let $(S,\Cal P)$ be a $(3,7)$-pair and $s_0\in S(\Cal P)$ a section such
that
\[*
\max\{\operatorname{val} l\,|\,l\in\operatorname{Fn}(S)\}\le17*uad\text{and}*uad\operatorname{val} s_0=16.
\]
Then either one has $\ls|\operatorname{Fn}(S)|\le48$ or
$S\cong\config{56.real}$ or $\config{q56}$.
If $S$ is totally reflexive, then either
$\ls|\operatorname{Fn}(S)|<48$ or
$S\cong\config{56.real}$ or \config{48}.
\endlemma
\proof
The computation runs as outlined above.
In addition to \iref{3-7.val}--\iref{3-7.=p},
we inhibit all configurations in which $\Cal P$ has a
section~$s$ such that $\num2(s)=6$, see \autoref{prop.3-7}.
We obtain several
configurations of type \config{56.real}, \config{q56}, or \config{48};
furthermore,
\roster*
\item
if $\num2(s_0)=0$, there is a single configuration~$S$;
this configuration~$S$ is of type \config{q56}, and
the pencils $\Cal P$ and $\Cal P'$ are of type $(3,7)^\bullet$;
\item
if $\num2(s_0)=1$,
there is a unique configuration~$S$ of type~\config{56.real} in which
$\Cal P$ is of type $(3,7)^\circ$ and $\Cal P'$ is
of type $(4,4)$;
\item
if $\num2(s_0)=2$,
there is a unique configuration~$S$ of type~\config{48} in which
$\Cal P$ is of type $(3,7)^\circ$ and $\Cal P'$ is of type~$(3,7)$.
\endroster
On the other hand, a direct computation shows that each configuration~$S$
obtained has a pair $\Cal P$, $\Cal P'$ of obverse pencils whose types and
intersection $\ls|\Cal P\cap\Cal P'|$ are as above. (Recall that
$\ls|\Cal P(l)\cap\Cal P(s_0)|=\num2(s_0)+3$.)
Replacing $l$ and~$s_0$ with the axes of these pencils, we conclude that,
up to isomorphism
{\em and under the assumptions of the lemma}, each
type \config{56.real}, \config{q56}, \config{48}
is represented
by a unique configuration.
\endproof
\lemma\label{lem.4-4}
Let $(S,\Cal P)$ be a $(4,4)$-pair and $s_0\in S(\Cal P)$ a section such
that
\[*
\max\{\operatorname{val} l\,|\,l\in\operatorname{Fn}(S)\}\le17*uad\text{and}*uad\operatorname{val} s_0=16.
\]
Then either one has $\ls|\operatorname{Fn}(S)|\le48$ or
$S\cong\config{56.real}$, \config{q56}, \config{q54},
\config{q52.1}, \config{q52.2}, or \config{50.0}.
If $S$ is totally reflexive, then either
$\ls|\operatorname{Fn}(S)|<48$ or
$S\cong\config{56.real}$ or \config{48.2}.
\endlemma
\proof
The configurations of type (hence, isomorphic to) \config{56.real} or
\config{q56} are given by \autoref{lem.3-7}.
The other types are obtained by a computation outlined above,
which returns several dozens of configurations with $\num2(s_0)\le2$.
Switching to approach~\iref{orbits.full} in \autoref{obs.group} and the full
automorphism group $\Bbb G_{4,4}$ {\em and checking explicitly that each
isomorphism $\bar S'\to\bar S''$ lifts to an isometry $S'\to S''$}, one can
show that, for any two configurations $S'$, $S''$ in the lists obtained,
$S'\cong S''$
if only if $\operatorname{\frak{ps}}(S')=\operatorname{\frak{ps}}(S'')$.
The pencil structures realized are those listed in the statement.
\endproof
\lemma\label{lem.5-1}
Let $(S,\Cal P)$ be a $(5,1)$-pair and $s_0\in S(\Cal P)$ a section such
that
\[*
\max\{\operatorname{val} l\,|\,l\in\operatorname{Fn}(S)\}\le17*uad\text{and}*uad\operatorname{val} s_0=16.
\]
Then one has $\ls|\operatorname{Fn}(S)|\le48$.
\endlemma
\proof
The computation runs as outlined at the beginning of this section, with
\autoref{lem.5-q.maximal} used to rule out some non-maximal pencils. This
computation results in an empty list of configurations.
\endproof
\subsection{Triangle free configurations}\label{s.trig.free}
A configuration~$S$ is called \emph{triangle} (respectively,
\emph{quadrangle}) \emph{free} if the graph $\operatorname{Fn} S$ has no cycles of length~$3$
(respectively, $3$ or~$4$). By \autoref{two-planes}, a configuration is
triangle free if and only if it contains no planes. Clearly, all pencils in
such a configurations are of type $(0,*)$.
\lemma\label{lem.0-q}
Let $\Cal P,\Cal P'$ be a pair of obverse pencils in a
geometric triangle free
configuration~$S$, and assume that $\ls|\Cal P\cap\Cal P'|\ge2$. Then one
has
either $\ls|\Cal P\cup\Cal P'|\le18$ or $\ls|\operatorname{Fn}(S)|\le33$.
\endlemma
\proof
Assuming that $\ls|\Cal P|\ge\ls|\Cal P'|$, denote by~$s_0$ the axis
of~$\Cal P'$; it is a section of~$\Cal P$ and $r:=\num1(s_0)\ge2$.
Clearly, $\Cal P$ is of type $(0,q)$, and we can assume that $q\ge11$
and $r\le2q-19$, as
otherwise the inequality $\ls|\Cal P\cup\Cal P'|\le18$ holds immediately.
The structure of the extension $\smash{\tilde P}\supset P$ is given by
\autoref{prop.2.pivot} (the pivot has no $3$-torsion by
\autoref{prop.3.pivot}) and, depending on the values of $q$, $r$, there are
up to two (up to automorphism) possibilities for the section $s_0$.
We apply the algorithm outlined in \autoref{s.counting},
using the parameters
\[*
p_{\min}=p_{\max}=0,*uad v_{\min}=19-q,*uadv_{\max}=q-r
\]
and introducing a few modifications. Namely, at Step~1 we
allow repetition when collecting sections $\bar s_i$, as the coordinate map
(\cf. \autoref{cor.coord}) is not injective for~$\Cal P$; on the other hand,
only the sections satisfying \autoref{prop.2.pivot}\iref{2.pivot.section} are to
be considered.
At Step~2, as condition~\iref{post.maximal} in \autoref{obs.post},
we check that the configuration is still triangle free.
Adding, if necessary, up to two extra sections disjoint from~$s_0$ (see
\autoref{obs.rank}; such records are not sorted), we arrive at a number of
configurations, each containing at most $33$ lines.
\endproof
\lemma\label{lem.quad.3}
Let $S$ be a
geometric quadrangle free configuration. Consider three lines
$l_0\in\operatorname{Fn}(S)$ and $l_1,l_2\in\Cal P(l_0)$
such that $\operatorname{val} l_0\ge\operatorname{val} l_1\ge\operatorname{val} l_2$.
Then either
\roster*
\item
$\operatorname{val} l_0+\operatorname{val} l_1\le14$ and $\operatorname{val} l_2=1$, or
\item
$\operatorname{val} l_0\le7$ and $\operatorname{val} l_2\le\operatorname{val} l_1\le5$, or
\item
$\operatorname{val} l_1\le\operatorname{val} l_0\le6$ and $\operatorname{val} l_2\le 5$.
\endroster
\endlemma
\proof
It is convenient to consider the pencil $\Cal P:=\Cal P(l_1)$, of which
$l_0$ is a fiber and $l_2$ is a section. Since $S$ is quadrangle free, each
section of~$\Cal P$ intersects at most one fiber, and two sections
intersecting~$l_2$ cannot intersect the same fiber. In addition to~$l_2$,
the pencil~$\Cal P$ has
$(\operatorname{val} l_0-2)$ sections intersecting~$l_0$ (all disjoint from~$l_2$) and
$(\operatorname{val} l_2-1)$ sections intersecting~$l_2$
(all disjoint from~$l_0$); all these sections are pairwise disjoint.
An extra parameter is the number of
the sections intersecting~$l_2$
that also intersect a fiber of~$\Cal P$. A direct computation
(applying
\autoref{th.Nikulin} to the finite index extensions allowed by
\autoref{pre-conf}) rules out the values
\[*
(6, 6, 6),\ (7, 6, 1),\ (8, 5, 1),\ (10, 4, 2),\ (11, 3, 2),\ (11, 4, 1),\ (12, 2, 1)
\]
for the triple $(\operatorname{val} l_0,\operatorname{val} l_1,\operatorname{val} l_2)$.
\endproof
\subsection{Existence and uniqueness}\label{s.uniqueness}
We conclude this section with two
statements related to the uniqueness of large configurations and
the existence of
configurations with a prescribed number of lines.
\table
\caption{$\bold L$-configurations with more than $52$ lines
(see \autoref{lem.unique})}\label{tab.large}
\def\raise3pt\hbox{$\scriptstyle\sqrt{}$}{\raise3pt\hbox{$\scriptstyle\sqrt{}$}}
\hbox to\hsize{\hss\vbox{\halign{\strut*uad#*uad\hss&\hss#\hss*uad&
\hss#\hss*uad&\hss#\hss*uad&\hss#\hss*uad&&\hss$#$\hss*uad\cr
\noalign{\hrule}
\vphantom{\Big(}\hss$S$\hss&$\ls|\operatorname{Fn}|$&t.r.&ref&sym&\ls|\OG_h(S)|&\discr S&T:=S^\perp
\cr
\noalign{
\hrule
}
\config{64}&64&&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&4608&
\Cal V_4\oplus\<\frac43\>&[8,4,8]
\cr
\config{60}&60&&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&480&
\Cal U_2\oplus\<\frac43\>\oplus\<\frac25\>&[4,2,16]
\cr
\config{60.2}&60&&&&240&
\<\frac65\>\oplus\<\frac{10}{11}\>&[4,1,14]
\cr
\config{56}&56&&&&128&
\<\frac{15}8\>\oplus\<\frac{15}8\>&[8,0,8]
\cr
\config{56.real}&56&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&64&
\<\frac32\>\oplus\<\frac{63}{32}\>&[2,0,32]
\cr
\config{q56}&56&&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&384&
\Cal U_2\oplus\<\frac43\>\oplus\<\frac25\>&[4,2,16]
\cr
\config{54}&54&&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&384&
\<\frac14\>\oplus\<\frac38\>\oplus\<\frac43\>&[4,0,24]
\cr
\config{q54}&54&&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&48&
\Cal V_2\oplus\<\frac2{19}\>&[4,2,20]
\cr
\noalign{
\hrule}\crcr}}\hss}
\endtable
\lemma\label{lem.unique}
Each pencil structure listed in \autoref{tab.large} is realized by a unique,
up to weak isomorphism, $\bold L$-configuration~$S$. This $\bold L$-configuration~$S$ is
totally reflexive if and only if $S=\config{56.real}$\rom;
it is reflexive
unless $S=\config{60.2}$ or \config{56},
whereas \config{60.2} and \config{56} are not
symmetric.
\endlemma
\proof
By \autoref{th.uniqueness},
each pencil structure as in the
statement is realized by a unique geometric configuration~$S$; hence, there
only remains to verify that each of the three configurations
admits a unique primitive
$\bold L$-realization.
All configurations are known explicitly, and one can compute their
automorphism groups,
discriminants, and perspective transcendental lattices
$T:=S^\perp$;
they are as shown in \autoref{tab.large}.
(The lattice~$T$ is generated by two vectors $u$, $v$ so that
$u^2=a$, $u\cdot v=b$, and $v^2=c$, where $[a,b,c]$ is the triple given
in the table.
Each lattice is unique in its genus, which follows from the
classical theory of binary forms~\cite{Gauss:Disquisitiones}.)
With two exceptions, the homomorphism $\rho\:\OG_h(S)\to\Aut\discr S$ is
surjective. The exceptions are:
\roster*
\item
$S=\config{q54}$, where $\Im\rho=\Aut\discr_2S$,
and
\item
$S=\config{56}$, which will be treated separately.
\endroster
Furthermore, each involution in $\Im\rho$ lifts to an involution in
$\OG_h(S)$. (This is not a common property of
configurations, a counterexample being \config{50.0}, see \autoref{ss.Z}.)
In each case (other than $S=\config{56}$), it is immediate that
the image of $\OG^+(T)$ intersects each coset modulo $\Im\rho$ and,
hence, a primitive $\bold L$-realization is unique
up to weak isomorphism (see the description of primitive extensions
in \autoref{s.lattices}).
Besides, whenever $T$ has an orientation reversing isometry (\ie, in all
cases except $S=\config{60.2}$, see \autoref{tab.large}), this
isometry, which is necessarily involutive, can be chosen to induce an element in
the image $\Im\rho$ and, thus, lift to an involution in $\OG_h(S)$.
Hence, the $\bold L$-configuration is
symmetric and reflexive.
In the exceptional case $S=\config{56}$, the image of $\OG_h(S)$
(respectively, $\OG(T)$) is the index~$2$
subgroup of $\Aut\discr S$ generated by the reflections $t_\Bbb Ga$,
where $\Bbb Ga\in\discr S$ and $\Bbb Ga^2=\frac38$ or
$\frac34\bmod2\Z$ (respectively, $\Bbb Ga^2=\frac{15}8$ or $\frac74\bmod2\Z$).
The intersection of the two subgroups has index~$4$ and coincides with the
image of $\OG^+(T)$. It follows that there is a single weak isomorphism
class, which is not symmetric.
The only
totally reflexive configuration is \config{56.real},
as $\config{56.real}^\perp$ is the only transcendental lattice
containing a
vector of square~$2$, see \autoref{tab.large}.
\endproof
\remark\label{rem.aut}
The
computation of the automorphism groups make use of the pencil structure:
we list all pencils of a given type (usually, the first one listed in
\autoref{tab.list}) and then enumerate the isometries taking one fixed
pencil to another one similar to the sorting algorithm in
\autoref{obs.group}.
\endremark
\remark\label{rem.nonunique}
Not every configuration~$S$ listed in \autoref{th.uniqueness} admits a unique
$\bold L$-realizations.
Simplest examples are $\config{Y}_*$, see
\autoref{ss.real} and \autoref{tab.Y} below.
More examples are found in \autoref{tab.Y} in \autoref{S.examples}.
\endremark
\lemma\label{lem.counts}
For any
number
$n\in\{0,\ldots,52,54,56,60,64\}$, there is a geometric configuration~$S$
such that $\ls|\operatorname{Fn}(S)|=n$.
If
$n\in\{0,\ldots,48,52,56\}$, this configuration can be chosen totally reflexive.
\endlemma
\proof
Any count $n\le17$ is easily realized by the span of a single pencil.
Hence, the first statement follows from \autoref{th.6-q},
and the second one
mostly follows from \autoref{cor.4-5.counts}.
The missing values $n=48$, $56$
for totally reflexive
configurations
are
given by \autoref{lem.3-7}.
\endproof
\frak Sion{Triangle free configurations}\label{S.triang.free}
Recall
that a configuration~$S$ is said to be triangle free if it contains no
planes. The principal goal of this section is a proof of a bound to the
number of lines in such a configuration, see \autoref{th.Segre}
in \autoref{s.triang.free} below.
Throughout the section, we fix a configuration~$S$ and
a geometric $\bold L$-realization $\psi\:S\to\bold L$.
\subsection{Adjacency graphs}
Given a graph~$\Bbb Gamma$, we denote by $\Z\Bbb Gamma$ the lattice freely generated
by the vertices of $\Bbb Gamma$, so that $v^2=-2$ for each vertex~$v$
and $u\cdot v=1$ (respectively,~$0$) if the vertices $u\ne v$ are
(respectively, are not) adjacent in $\Bbb Gamma$. If $\Bbb Gamma\subset\operatorname{Fn}(S)$, we also
consider the images $S\Bbb Gamma:=\Z\Bbb Gamma/\ker\subset S$ and
$\bold L\Bbb Gamma:=\psi(S\Bbb Gamma)\subset\bold L$
of this lattice in~$S$ and~$\bold L$,
denoting by $\psi_\Bbb Gamma\:\Z\Bbb Gamma\to\bold L$ the composed map.
A graph $\Bbb Gamma$ is called \emph{elliptic} (respectively,
\emph{parabolic}) if $\Z\Bbb Gamma$ is negative
definite (respectively, negative semi-definite).
The \emph{Milnor number} $\mu(\Bbb Gamma)$ of an elliptic or parabolic
graph~$\Bbb Gamma$ is the rank of the lattice $\Z\Bbb Gamma/\ker$.
A connected elliptic (parabolic) graph is called a \emph{Dynkin diagram}
(respectively, \emph{affine Dynkin diagram}).
A Dynkin diagram~$D$ extends to a unique affine Dynkin diagram,
which we denote
by $\tilde D\supset D$;
we refer to \cite{Bourbaki:Lie:French} for a detailed treaty of Dynkin
diagrams and their affine counterparts.
Recall that any graph~$\Bbb Gamma$ such that $\Z\Bbb Gamma$ is not negative definite
contains an affine Dynkin diagram as an induced subgraph.
For
any affine
Dynkin diagram~$\tilde D$, the kernel $\ker\Z\tilde D$
is spanned by a single distinguished generator
$k_{\tilde D}=\sum\kappa(e)e$, $e\in\tilde D$, with each coefficient
$\kappa(e)$ \emph{strictly positive}.
The coefficient sum
\smash{$\kappa(\tilde D):=\sum\kappa(e)$} of this linear combination is as follows:
\[
\kappa(\tA_p)=p+1,*uad
\kappa(\tD_q)=2q-2,*uad
\kappa(\tE_6)=12,*uad
\kappa(\tE_7)=18,*uad
\kappa(\tE_8)=30.
\label{eq.kappa}
\]
We extend this $\kappa$-notation to elliptic Dynkin diagrams letting
$\kappa(D):=\kappa(\tilde D)$.
\lemma\label{lem.Dynkin.mono}
Let $\Bbb Gamma\subset\operatorname{Fn}(S)$ be a parabolic subgraph such that
$\rank\ker\Z\Bbb Gamma=1$.
Then, the isometry $\psi_\Bbb Gamma\:\Z\Bbb Gamma\to\bold L$ is a monomorphism.
\endlemma
\proof
By the assumption, $\Bbb Gamma$ is a disjoint union of several Dynkin diagram and a
single affine Dynkin diagram~$\tilde D$.
Since $\psi_\Bbb Gamma$ is an isometry, one has $\Ker\psi_\Bbb Gamma\subset\ker\Z\Bbb Gamma$,
and, as
explained above, the latter subgroup is spanned by a single vector $k_{\tilde D}$ so
that \smash{$\psi_\Bbb Gamma(k_{\tilde D})\cdot h=\kappa(\tilde D)>0$}.
Hence, $\psi_\Bbb Gamma(k_{\tilde D})\ne0$ and $\Ker\psi_\Bbb Gamma=0$.
\endproof
\subsection{Pseudo-pencils}
Given a nonzero isotropic vector $v\in S$,
the \emph{pseudo-pencil} defined by $v$ is the set
\[*
\Cal K(v):=\bigl\{a\in\operatorname{Fn}(S)\bigm|a\cdot v=0\bigr\}.
\]
Since $S$ is hyperbolic,
$v\cdot h\ne0$ and we can assume
$v\cdot h>0$. We can also assume~$v$ primitive. Then, the
integer $\deg\Cal K:=v\cdot h$ is called the \emph{degree}
of~$\Cal K$.
The connected components of~$\Cal K$ are called its \emph{fibers}.
A \emph{section} (more generally, \emph{$n$-section}, $n>0$)
of~$\Cal K$ is a line
$s\in\operatorname{Fn}(s)$ such that $s\cdot v=1$ (respectively, $s\cdot v=n$).
The set of sections of~$\Cal K$, depending on the ambient
configuration~$S$, is denoted by $S(\Cal K)$.
Each pencil is a pseudo-pencil of degree~$3$:
one has $\Cal P(l)=\Cal K(h-l)$.
Conversely, if $v\cdot h=3$, then $l:=h-v\in\operatorname{Fn}(s)$ and
$\Cal K(v)=\Cal P(l)$.
As another example, fix an affine Dynkin diagram $\tilde D\subset\operatorname{Fn}(S)$ and
let $v\in S$ be the image of $k_{\tilde D}$; by \autoref{lem.Dynkin.mono},
$\psi(v)\ne0$ and
$\Cal K(\tilde D):=\Cal K(v)$ is a pseudo-pencil.
Clearly, \smash{$\tilde D\subset\Cal K(\tilde D)$}.
Since $k_{\tilde D}$ is a \emph{positive} linear combinations of the vertices
of~$\tilde D$ and the intersection of two lines is nonnegative (see
\autoref{matrix}), it follows that
\[
\Cal K(\tilde D)=\{a\in\operatorname{Fn}(S)\,|\,\text{$a\cdot v=0$ for each vertex $v\in\tilde D$}\}.
\label{eq.pseudo.count}
\]
\proposition\label{prop.pseudo}
For each pseudo-pencil~$\Cal K$ the following statements hold\rom:
\roster
\item\label{i.pseudo.1}
either $\deg\Cal K=1$ and $\ls|\operatorname{Fn}(S)|=1$, or $\deg\Cal K\ge3$\rom;
\item\label{i.pseudo.2}
as a graph, $\Cal K$ is elliptic or parabolic
and
$\mu(\Cal K)\le18$\rom;
\item\label{i.pseudo.3}
if $D\subset\Cal K$ is a Dynkin diagram and
$(\deg\Cal K)\mathbin|\kappa(D)$, then
$\tilde D$
is a fiber of $\Cal K$.
\endroster
Furthermore, if $s\in S(\Cal K)$, then, for any parabolic fiber $\tilde D$
of~$\Cal K$,
\roster[4]
\item\label{i.pseudo.4}
$\sum\kappa(e)(s\cdot e)=\deg\Cal K$, the summation running over
$e\in\tilde D$\rom;
\item\label{i.pseudo.5}
in particular, if $S(\Cal K)\ne\varnothing$, then
$\kappa(\tilde D)=\deg\Cal P$ and $k_{\tilde D}=v$.
\endroster
\endproposition
\proof
Let $\Cal K=\Cal K(v)$ with $v\cdot h=\deg\Cal K$.
The possibility $v\cdot h=2$ is excluded by
\autoref{ell-pencil} in \autoref{pre-conf}.
If $v\cdot h=1$, then $a:=h-3v$ is a line.
Consider another line $b\in\operatorname{Fn}(S)$.
If $b\cdot v\ne0$ or~$1$, then $\Bbb Gs_+(\Z h+\Z v+\Z b)=2$.
If $b\cdot v=0$, then $e:=b-v$
is as in \autoref{ex-div} in \autoref{pre-conf}.
In the remaining case $b\cdot v=1$ one has $\rank\ker(\Z h+\Z v+\Z b)=2$ and,
hence, $b=a$, \ie, $a$ is the only line.
The assumption that $v\ne0$ implies that $v^\perp$ has
a non-trivial kernel and, hence, is parabolic;
since also $\rank\psi(\Cal K)\le19=\Bbb Gs_-\bold L$,
this proves \autoref{i.pseudo.2}.
For \autoref{i.pseudo.3}, observe that $\kappa(e_0)=1$
for the only vertex $e_0\in\tilde D\sminus D$, see,
\eg,~\cite{Bourbaki:Lie:French}.
Hence, $e_0$ is an integral linear combination of~$v$ and the
vertices of~$D$, \ie, $e_0\in S$. Clearly, $e_0$ is a
line and, thus, $\tilde D\subset\Cal K$.
Finally,
any affine
Dynkin diagram is a whole connected component of any parabolic graph in which it is
contained.
The last two statements follow
from the definitions and the fact
that, for each parabolic fiber~$\tilde D$ of~$\Cal K$, the vector $k_{\tilde D}$ is
a multiple of~$v$ (as $k_{\tilde D}\cdot v=0$);
on the other hand, $\sum_{e\in\tilde D}\kappa(e)(s\cdot e)=s\cdot k_{\tilde D}$.
\endproof
\corollary\label{cor.pseudo}
For a pseudo-pencil $\Cal K$, one has $\ls|\Cal K|\le18(1+1/\mu)$,
where $\mu$ is the
minimal Milnor number
of the parabolic fibers of~$\Cal K$.
In particular,
$\ls|\Cal K|\le24$.
\endcorollary
\proof
The first bound follows from the obvious identity
\[*
\ls|\Cal K|=\mu(\Cal K)+\ls|\{\text{parabolic fibers of $\Cal K$}\}|.
\]
If $\Cal K$ has a fiber of type~$\tA_2$, it is an ordinary pencil and
$\ls|\Cal K|\le20$ by \autoref{cor.le20}. Otherwise, $\mu\ge3$ and we have
$\ls|\Cal K|\le24$.
\endproof
Geometrically, if $S=\Cal F(X)$ for a nonsingular quartic $X\subset\Cp3$,
a pseudo-pencil~$\Cal K$ can often be interpreted as an elliptic pencil
$\pi\:X\to\Cp1$ whose
fibers are curves of degree $\deg\Cal K$ in~$\Cp3$.
For example, this is so in the important special case where
$\Cal K$ has a parabolic fiber~$\tilde D$.
Indeed, in this case,
the class $v=\sum\kappa(e)e$, $e\in\tilde D$, regarded as a divisor,
is obviously
numerically effective
and, hence, does
define a linear system of
arithmetic genus~$1$ without fixed points or components.
From this
geometric point of view, $\Cal K$ is the union of lines contained in the
fibers of~$\pi$. More precisely, if \emph{all} components of a reducible fiber~$F$
of~$\pi$ are lines, these lines form a parabolic fiber of~$\Cal K$;
otherwise, the lines contained in~$F$ constitute
one or
several elliptic fibers of~$\Cal K$.
Furthermore, in this interpretation, the bound $\ls|\Cal K|\le24$
given by \autoref{cor.pseudo} follows from the inequality
\[*
\ls|\{\text{components in the singular fibers of~$F$}\}|\le\chi(X)=24.
\]
Using this geometric interpretation, one can
partially extend
Statements~\iref{i.pseudo.4} and~\iref{i.pseudo.5} of \autoref{prop.pseudo}
to the elliptic fibers of~$\Cal K$. Namely, for each
section $s\in S(\Cal K)$ and each elliptic fiber~$D$ of~$\Cal K$, one has
\roster[4]
\item
$\sum\kappa(e)(s\cdot e)\le\deg\Cal K$, the summation running over
$e\in D$\rom;
\item
in particular, if $S(\Cal K)\ne\varnothing$, then
$\kappa(D)<\deg\Cal P$.
\endroster
As we do not use these statements, we will not try to prove them
arithmetically.
(Unlike \autoref{prop.pseudo}, these statements may depend on the requirement
that $S$ should be geometric and involve a case-by-case analysis, \cf. the
discussion below.)
The \emph{type} of a pseudo-pencil~$\Cal K$ is the isomorphism type of the
lattice $\Z\Cal K$; by \autoref{prop.pseudo}, it is an orthogonal
direct sum of elliptic and parabolic root lattices. (For example, in this new
language, an ordinary pencil of type $(p,q)$ has type
\smash{$p\tA_2\oplus q\bA_1$}.)
Using \autoref{prop.pseudo} and arguing as in \autoref{S.arithm}, \ie, applying
Nikulin's \autoref{th.Nikulin} to all finite index extensions of
the lattice $P:=(\Z\Cal K+\Z h)/\ker$ that are not ruled out
by \autoref{pre-conf},
it should not be
difficult to obtain a complete classification of pseudo-pencils appearing in
geometric configurations; in particular, one can probably improve the bound
$\ls|\Cal K|\le24$ given by \autoref{cor.pseudo}. However, we confine
ourselves to just the two special cases used in the proof of
\autoref{th.Segre}.
\lemma\label{lem.tA3}
Assume that $S$ is triangle free, and
let $\Cal K\subset S$ be a pseudo-pencil with a fiber of type~$\tA_3$. Then
either $\ls|\Cal K|\le20$ or $\Cal K$ is of type $5\tA_3\oplus\bA_1$\rom;
in the latter case, one has $\ls|\operatorname{Fn}(S)|\le45$.
\endlemma
\proof
By \autoref{prop.pseudo}, one has $\deg\Cal K=\kappa(\tA_3)=4$ and all
fibers of~$\Cal K$ are of types \smash{$\tA_3$}, $\bA_2$, or $\bA_1$.
Arguing as explained above, we conclude that the only pseudo-pencil~$\Cal K$
such that $\ls|\Cal K|>20$ and
the lattice $P:=(\Z\Cal K+\Z h)/\ker$ admits a geometric
$\bold L$-realization is that of type \smash{$5\tA_3\oplus\bA_1$}.
Assuming this type, consider the quadrangle
$\tilde D:=\{l_1,\ldots,l_4\}$ constituting one of
the type~$\tA_3$ fibers. Letting $\Cal P_i:=\Cal P(l_i)$,
by~\eqref{eq.pseudo.count} we have
\[
\ls|\operatorname{Fn}(S)|=\ls|\Cal P_1\cup\Cal P_3|
+\ls|\Cal P_2\cup\Cal P_4|+\ls|\Cal K|-4.
\label{eq.tA3}
\]
(Since $S$ is triangle free, a line $a\in\operatorname{Fn}(S)$ cannot intersect two
adjacent vertices of the quadrangle.)
Due to \autoref{lem.skew}\iref{skew.2} and \autoref{cor.10.fibers}, for each
of the two pairs $(i,j)=(1,3)$ or $(2,4)$, either
$\ls|\Cal P_i\cup\Cal P_j|=\ls|\Cal P_i\cap\Cal P_j|=10$
or $\ls|\Cal P_i\cap\Cal P_j|\le8$; thus, letting
$n_i:=\ls|\Cal P_i\sminus\Cal P_j|$, we get
$\ls|\Cal P_i\cup\Cal P_j|\le\max\{20,16+n_i+n_j\}$
and, if $n_i\le3$ for all $i=1,\ldots,4$, from~\eqref{eq.tA3} we obtain
$\ls|\operatorname{Fn}(S)|\le45$, as stated.
What remains is the case where one of the integers~$n_i$, say, $n_1$, is at
least~$4$, \ie, there are at least four lines intersecting~$l_1$ and disjoing
from the three other lines.
In this case, we run an algorithm similar to that described in
\autoref{s.counting}, adding to~$S$ up to three sections
intersecting~$l_1$ in order
to increase the rank from $\rank P=18$ to the maximum~$20$.
By \autoref{prop.pseudo}\iref{i.pseudo.4}, each section
intersects exactly one line of each other parabolic
fiber; given the rich automorphism group, this observation leaves
relatively few possibilities for pairs and triples of sections.
Then, as in \autoref{obs.L}, we enumerate the geometric realizations of each
configuration of maximal rank and compute the number of lines, arriving at
the inequality $\ls|\operatorname{Fn}(S)|\le33$.
\endproof
\lemma\label{lem.tD4}
If $\Cal K\subset S$ is a pseudo-pencil with a fiber of type~$\tD_4$,
then $\ls|\Cal K|\le19$.
\endlemma
\proof
By \autoref{prop.pseudo}, one has $\deg\Cal K=\kappa(\tD_4)=6$ and all
fibers of~$\Cal K$ are of types \smash{$\tD_4$}, \smash{$\tA_5$},
or $\bA_p$, $1\le p\le4$. Trying all combinations one by one and arguing as
explained prior to \autoref{lem.tA3},
we arrive at the inequality stated. (In fact, the only type with
$\ls|\Cal K|=19$ is \smash{$2\tD_4\oplus\tA_5\oplus\bA_2\oplus\bA_1$}.)
\endproof
\subsection{The bound}\label{s.triang.free}
The following theorem is the principal result of this section.
\theorem\label{th.Segre}
If a geometric configuration $S$ is triangle free, then
$\ls|\operatorname{Fn}(S)| \leq 52$.
\endtheorem
\proof
We consider separately several cases, each time picking an appropriate affine
Dynkin diagram $\tilde D\subset\operatorname{Fn}(S)$ and using~\eqref{eq.pseudo.count} to
estimate the number of lines,
which is
$\ls|\Cal K(\tilde D)|+\ls|\{\text{lines intersecting a vertex of~$\tilde D$}\}|$.
First, assume that the maximal valency of a line in~$S$ is at most~$3$.
If $\operatorname{Fn}(S)$ is elliptic, then $\ls|\operatorname{Fn}(S)|\le19$. Otherwise, $\operatorname{Fn}(S)$
contains an affine Dynkin diagram; pick one \smash{$\tilde D\subset\operatorname{Fn}(S)$}
of the minimal
Milnor number~$\mu$. Using the classification of affine Dynkin diagrams,
we conclude that the number of lines that are not in~$\tilde D$ and adjacent to a
vertex of $\tilde D$ is at most $2n_1+n_2\le\mu+3$, where $n_i$ is the number of
vertices of~\smash{$\tilde D$} of valency~$i$. Since $2\le\mu\le18$,
by~\eqref{eq.pseudo.count} and \autoref{cor.pseudo},
\[*
\ls|\operatorname{Fn}(S)|\le\mu+3+\ls|\Cal K(D)|\le\mu+\frac{18}\mu+21\le40.
\]
Now, assume that $S$ has a line of valency at least~$4$ and is quadrangle
free. Let $l_0$ be a line of maximal valency, and pick four lines
$l_1,\ldots,l_4$ adjacent to~$l_0$ so that $\operatorname{val} l_1\ge\ldots\ge\operatorname{val} l_4$.
Then, $\tilde D:=\{l_0,\ldots,l_4\}$ is a subgraph of type~$\tD_4$ and,
by~\eqref{eq.pseudo.count} and \autoref{lem.tD4},
\[*
\ls|\operatorname{Fn}(S)|\le\operatorname{val} l_0+\operatorname{val} l_1+\operatorname{val} l_2+\operatorname{val} l_3+\operatorname{val} l_4+11.
\]
The sum of the valencies in the latter expression is estimated using
\autoref{lem.quad.3} (and the assumption $\operatorname{val} l_3,\operatorname{val} l_4\le\operatorname{val} l_2$),
and we obtain $\ls|\operatorname{Fn}(S)|\le38$.
Finally, assume that $\operatorname{Fn}(S)$ has a quadrangle, \ie, a $4$-cycle
$l_1,l_2,l_3,l_4$, which can be regarded as a subgraph~\smash{$\tilde D$} of
type~\smash{$\tA_3$}. Assume that $\ls|\operatorname{Fn}(S)|\ge46$ and
apply \eqref{eq.tA3}:
each of the first two terms is bounded by~$18$ by \autoref{lem.0-q},
and $\ls|\Cal K|\le20$ by \autoref{lem.tA3}; hence,
$\ls|\operatorname{Fn}(S)|\le52$.
\endproof
\remark\label{rem.triang.free}
The idea that triangle free configurations of lines in quartics should be
treated separately is also due to B.~Segre, and his geometric proof~\cite{Segre}
of the bound $\ls|\operatorname{Fn}(S)|\le64$ for such configurations can easily be
modified to get $\ls|\operatorname{Fn}(S)|\le60$. Our bound $\ls|\operatorname{Fn}(S)|\le52$ given by
\autoref{th.Segre} can be improved to $\ls|\operatorname{Fn}(S)|\le50$: in
\autoref{lem.tA3}, the few types with $\ls|\Cal K|=19$ or~$20$ can be ruled
out similar to \smash{$5\tA_3\oplus\bA_1$}. Probably, this better bound is
still not sharp:
currently, the best known example of triangle free configurations has
$37$ lines.
\endremark
\frak Sion{Proofs}\label{S.pencils}
In this section, we prove the principal results of the paper,
\viz. \autoref{th.unique}, \autoref{th.56}, and
\autoref{ad.counts}.
\subsection{Large configurations}\label{s.large.conf}
All proofs
are based on the following
statement,
which bounds the number of lines in a
geometric
configuration containing a
plane.
With further applications in mind, we state it in a slightly stronger form.
\proposition\label{prop.56}
If a geometric configuration~$S$ contains a plane, then either
\roster*
\item
$S$ is isomorphic to \config{64},
\config{60}, \config{60.2}, \config{56}, \config{56.real}, \config{q56},
\config{54}, \config{q54},
\config{52.1}, \config{52.2}, \config{52.4}, \config{52.5-3}, \config{52.5},
or~\config{52.0},
or
\item
one has $\ls|\operatorname{Fn}(S)|\le52$ and $\max\{\operatorname{val} l\,|\,l\in\operatorname{Fn}(S)\}\le17$, or
\item
one has $\ls|\operatorname{Fn}(S)|<52$.
\endroster
\endproposition
\proof
Assume that $\ls|\operatorname{Fn}(S)|\ge52$.
If $S$
has a pencil of type $(6,*)^\circ$,
\autoref{th.6-q} implies that
$S\cong\config{64}$, \config{60}, \config{54}, \config{52.1}, \config{52.2},
or \config{52.0}.
Hence, from now on we can also assume that $S$ does not
have such a pencil. In particular,
\[*
v:=\max\{\operatorname{val} l\,|\,l\in\operatorname{Fn}(S)\}\le18;
\]
if $v\le15$, then $\ls|\operatorname{Fn}(S)|=52$
by \autoref{cor-Segre}.
Pick a maximal pencil~$\Cal P$ such that $\ls|\Cal P|=v$.
By~\eqref{tablichka}, this pencil~$\Cal P$ has
a $3$-fiber $\{m_1,m_2,m_3\}$, which we order so that
$\operatorname{val} m_1\le\operatorname{val} m_2\le\operatorname{val} m_3$.
We have
\[*
\operatorname{val} m_1 + \operatorname{val} m_2 + \operatorname{val} m_3 = \ls| \operatorname{Fn}(S) | + 8 - v \ge 42;
\]
hence $\operatorname{val} m_3\ge14$. Then $\operatorname{mult} m_3\ge2$ by~\eqref{tablichka} again,
and one can find another plane
$\{s_0,s_1,s_2,m_3\}$ containing~$m_3$.
The lines $s_0,s_1,s_2$ are sections of~$\Cal P$, and they satisfy
the inequality
\[*
\operatorname{val} s_0 + \operatorname{val} s_1 + \operatorname{val} s_2 = \ls|\operatorname{Fn}(S)|+8-\operatorname{val} m_3.
\]
Assuming that $\operatorname{val} s_0\ge\operatorname{val} s_1\ge\operatorname{val} s_2$, we obtain
\[
3\operatorname{val} s_0\ge\ls|\operatorname{Fn}(S)|+8-\operatorname{val} m_3.
\label{eq.valency}
\]
Let $v=18$. We need to show that $\operatorname{val} s_0\ge15$; then,
Theorems~\ref{th.6-0.primitive}, \ref{th.4-6}, and~\ref{th.5-3},
would imply that $S\cong\config{56}$,
\config{52.4}, \config{52.5-3}, or~\config{52.5}.
If $\operatorname{val} m_3\le17$, the desired inequality $\operatorname{val} s_0\ge15$ follows
from~\eqref{eq.valency}. If $\operatorname{val} m_3=18$ \emph{and} $\operatorname{val} s_0\le14$, we
repeat the same argument, taking $m_3$ and~$s_0$ for~$l$ and~$m_3$,
respectively, and obtaining a section $s_0'$ of the new pencil $\Cal P(m_3)$ of
valency $\operatorname{val} s_0'\ge16$.
If $v=16$ and $\ls|\operatorname{Fn}(S)|>52$, the same argument as above produces a
pencil~$\Cal P'$
(not necessarily the original one)
and section $s_0'$ of~$\Cal P'$ such that
$\ls|\Cal P'|=\operatorname{val} s_0'=16$; hence, Lemmas~\ref{lem.3-7}, \ref{lem.4-4},
and~\ref{lem.5-1} imply that $S\cong\config{56.real}$, \config{q56}, or
\config{q54}.
Finally, let $v=17$. If $\ls|\operatorname{Fn}(S)|\ge54$, we use the same argument to get a
pencil~$\Cal P'$ and section~$s_0'$ of~$\Cal P'$ such that $\ls|\Cal P'|=17$
and $\operatorname{val} s_0'\ge16$; hence, by Lemmas~\ref{lem.4-5} and~\ref{lem.5-2}, we have
$S\cong\config{60.2}$. If $\ls|\operatorname{Fn}(S)|=53$, the argument may fail as one may
have $\operatorname{val} s_0\le15$ and $\operatorname{val} m_3=16$.
But in the latter case, starting with $\Cal P':=\Cal P(m_3)$, we obtain a
section $s_0'$ of $\Cal P'$ such that $\operatorname{val} s_0'\ge16$;
this is a contradiction to Lemmas~\ref{lem.3-7}, \ref{lem.4-4},
and~\ref{lem.5-1} (if $\operatorname{val} s_0'=16$) or~\ref{lem.4-5} and~\ref{lem.5-2} (if
$\operatorname{val} s_0'=17$;
in this latter case,
when applying the lemmas, we regard~$m_3$ as a section of $\Cal P(s_0')$).
\endproof
\subsection{Real configurations}\label{s.config.real}
In the next statement, we consider a configuration~$S$
equipped with a ``real structure'', \ie,
involutive automorphism $S\to S$, $a\mapsto\bar a$.
For such a configuration, the \emph{real part} is the subconfiguration
$S_\R:=\{a\in S\,|\,\bar a=a\}$.
We let $\operatorname{Fn}_\R(S):=\operatorname{Fn}(S_\R)$ and call the lines contained in $\operatorname{Fn}_\R(S)$
\emph{real}.
\proposition\label{prop.real.plane}
Let $S$ be a geometric configuration equipped with an involutive automorphism
$a\mapsto\bar a$, and assume that $\ls|\operatorname{Fn}_\R(S)|>48$. Then any plane
$\Bbb Ga\subset\operatorname{Fn}(S)$ is \emph{totally real}, \ie, $\Bbb Ga\subset\operatorname{Fn}_\R(S)$.
\endproposition
\proof
Consider a plane $\alpha = \{a_1, a_2, a_3, a_4\}$.
Let $r$ be the number of real lines in $\alpha$,
and let $r_i$ be the number of real lines in $\Cal P(a_i)$, $i = 1,\ldots,4$.
The following formula is a
straightforward modification of the conclusion of \autoref{cor-Segre}:
\[*
\ls| \operatorname{Fn}_\R(X) | = r _1 + r_2 + r_3 + r_4 - 2r. \label{Segre-real-equality}
\]
If $a_i$ is real, then $r_i \le \ls|\Cal P(a_i)|\le 20$ by~\eqref{tablichka}.
Otherwise,
$r_i \le \ls|\Cal P(a_i) \cap \Cal P(\bar a_i)|$, which
does not exceed $2$ or~$10$ if $a_i\cdot\bar a_i=1$ or~$0$,
respectively, see \autoref{lem.2-pencils}.
Consider the conjugate plane~$\bar\Bbb Ga$. If $\alpha \cap \bar\alpha = \varnothing$,
then
$r = 0$ and $\ls | \operatorname{Fn}_\R(X)| \le 40$.
If $\ls| \alpha \cap \bar\alpha | = 1$ (\ie, $r = 1$),
then $\ls| \operatorname{Fn}_\R(X)| \le 48$.
If $\ls| \alpha \cap \bar\alpha | > 1$,
then $\alpha = \bar\alpha$ by \autoref{two-planes}
and $r_i \le 2$ for each non-real line $a_i$;
hence, $\ls| \operatorname{Fn}_\R(X)| \le 16r + 8$ and, since $r \ne 3$,
we conclude that $r = 4$, \ie, $\Bbb Ga\subset\operatorname{Fn}_\R(S)$.
\endproof
The following corollary is a real counterpart of \autoref{th.Segre}.
\corollary\label{Segre_real}
Let $X \subset \Cp3$ be a nonsingular real quartic.
If $\ls| \operatorname{Fn}_\R(X) | > 52$,
then $\Cal F_\R(X)$ contains a plane\rom;
moreover, any plane in $\Cal F(X)$ is contained in $\Cal F_\R(X)$.
\endcorollary
\proof
Clearly, $\Cal F_\R(X)$ is the real part of the Fano configuration $\Cal F(X)$
with respect to the involution $a\mapsto-\conj_*a$ induced by the real
structure. The configuration $\Cal F(X)$ is geometric
(see \autoref{th.complex})
and it contains a plane (see \autoref{th.Segre});
there remains to apply \autoref{prop.real.plane}.
\endproof
\subsection{Proof of \autoref{th.unique}}\label{proof.64}
According to \autoref{th.complex},
the Fano configuration $\Cal F(X)$ is
geometric and, since we assume $\ls|\operatorname{Fn}(X)|>52$,
\autoref{th.Segre} implies that this configuration
contains a plain. Then, by
\autoref{prop.56}, $\Cal F(X)$ is isomorphic to
one of the configurations listed in \autoref{tab.large},
and the statement of the theorem
follows from \autoref{lem.unique} and \autoref{app.complex}.
(The quartic corresponding to~\config{64} is
identified as Schur's quartic since both contain $64$
lines.)
*ed
\subsection{Proof of \autoref{th.56}}\label{proof_th.56}
The real Fano configuration $\Cal F_\R(X)$ is
geometric (see \autoref{th.complex})
and, assuming that $\ls|\operatorname{Fn}_\R(X)|>52$,
this configuration contains a plain due to \autoref{Segre_real}.
Then,
the statement of the corollary follows from \autoref{prop.56} and
\autoref{cor.real}.
*ed
\subsection{Proof of \autoref{ad.counts}}\label{proof.counts}
The statement is an immediate consequence
of \autoref{lem.counts} and \autoref{th.complex} (for
lines in complex quartics) or \autoref{cor.real} (for real lines in real
quartics).
*ed
\frak Sion{The known examples}\label{S.examples}
\subsection{Schur's quartic}\label{s.Schur}
The following example
is more than $130$ years old: it goes back to F.~Schur~\cite{Schur:quartics}
(see also \cite{barth:1983,boissiere:2007}).
According to our \autoref{th.unique}, this is the \emph{only} nonsingular
quartic containing $64$ lines, and its configuration of lines is \config{64}.
Consider the quartic~$*uartic{64}$ given by the equation
\[
\Bbb Gf(z_0,z_1)=\Bbb Gf(z_2,z_3),\frak quad
\Bbb Gf(u,v):=u(u^3-v^3).
\label{eq.Schur}
\]
Let $k_0:=0$, $k_1:=1$, and $k_{2,3}:=(-1\pm i\sqrt3)/2$ be the four roots of
$\Bbb Gf(u/v,1)$. Then, $*uartic{64}$ contains the sixteen lines
\[
z_1=k_rz_0,*uad z_3=k_sz_2,*uad r,s=0,\ldots,3.
\label{eq.16}
\]
Besides,
$*uartic{64}$ contains the line
\[*
l_0:=\{z_0=z_2,\ z_1=z_3\}.
\]
Finally, observe that $\Bbb Gf$ is the ``most symmetric'' polynomial of degree
four: its zero locus $\{k_0,k_1,k_2,k_3\}\subset\Cp1$ has $j$-invariant~$0$,
\ie, $\Bbb Gf$ is
invariant under a subgroup $G\cong\AG4\subset\PGL(2,\C)$.
This subgroup lifts a a subgroup $\smash{\tilde G}\subset\Bbb GL(2,\C)$ preserving $\Bbb Gf$
literally, not just up to a factor; it is generated by
\[*
\frac1{\sqrt3}\bmatrix1&-1\\-2&-1\endbmatrix,
\bmatrix1&0\\phantom{0}&\epsilon\endbmatrix\in\Bbb GL(2,\C),*uad
\epsilon^3=1,\epsilon\ne1,
\]
and the kernel of the projection $\smash{\tilde G}\onto G$ is the
central subgroup $H\cong\CG4$ generated by $i\id$.
Letting~$\smash{\tilde G}$ act separately on $(z_0,z_1)$ and $(z_2,z_3)$, we obtain a
subgroup $\Aut_0*uartic{64}:=\smash{\tilde G}\odot\smash{\tilde G}\subset\Aut *uartic{64}$, where the central
product is the quotient of $\smash{\tilde G}\times\smash{\tilde G}$ by the diagonal
$H\subset H\times H$.
The stabilizer of~$l_0$ is the diagonal $\smash{\tilde G}/H\subset\Aut_0*uartic{64}$; hence,
its orbit consists of $48$ distinct lines, and $X$ contains $16+48=64$ lines.
A computation of the intersection matrix reveals that the sixteen
lines~\eqref{eq.16}
are distinguished: each is contained in six planes~$\Bbb Ga$ such that
$*uartic{64}\cap\Bbb Ga$ splits into four lines, whereas any other line
is contained in
four such planes. Hence, any \hbox{(anti-)}\penalty0automorphism of~$*uartic{64}$
preserves
the pair of lines $m_{ij}:=\{z_i=z_j=0\}$,
$(i,j)=(0,1)$ or $(2,3)$.
It follows that $\Aut *uartic{64}$ is an extension of
the group $\Aut_0*uartic{64}$ preserving each of $m_{01}$, $m_{23}$
by the involution $z_0\leftrightarrow z_2$, $z_1\leftrightarrow z_3$
interchanging $m_{01}\leftrightarrow m_{23}$.
This group has order~$1152$.
As a consequence, we have the following statement.
\proposition\label{prop.X64}
Up to automorphism, the quartic $*uartic{64}$ has four real structures,
\viz. those sending $[z_0:z_1:z_2:z_3]$ to
\[*
[\bar z_0:\bar z_1:\bar z_2:\bar z_3],*uad
[\bar z_0:\bar z_1:i\bar z_2:i\bar z_3],*uad
[\bar z_2:\bar z_3:\bar z_0:\bar z_1],*uad
[\bar z_2:\bar z_3:-\bar z_0:-\bar z_1].
\]
The numbers of real lines are $8$, $4$, $28$, and~$4$, respectively.
\endproposition
\proof
Denote by $\bar\,$ the standard complex conjugation, and extend its action to
matrices. Then, any real structure
on~$*uartic{64}$ is
$\Bbb Gs_g\:z\mapsto g\bar z$, where $g\in\Aut *uartic{64}$ is such that $g\bar g=\id$.
Two real structures $\Bbb Gs_g$, $\Bbb Gs_{g'}$ are isomorphic if and only if
one has $g'=h\1g\bar h$ for some $h\in\Aut *uartic{64}$.
The set of lines real with respect to a real structure
$\Bbb Gs_g$ is found as follows.
A line $l\subset *uartic{64}$ as in~\eqref{eq.16} is
uniquely
determined by its
``endpoints'' $l\cap m_{01}$, $l\cap m_{23}$, and the set of all eight
endpoints is preserved by any (anti-)automorphism of~$*uartic{64}$.
Hence, such a line is real if and only if $\Bbb Gs_g$ preserves its pair of
endpoints;
there are four such lines for any~$g$.
The other lines constitute the orbit $\Aut_0*uartic{64}/G$ of~$l_0$,
where $G=\smash{\tilde G}/H$ is the diagonal. Since $\bar l_0=l_0$, a line $h l_0$ is
$\Bbb Gs_g$-real
if and only if $h\1g\bar h\in G$.
Now, both statements are easily proved using
\Bbb GAP~\cite{GAP4}.
\endproof
\subsection{A real quartic with $56$ real lines}\label{s.56}
To our knowledge, this example is new.
Below, we make use of bihomogeneous polynomials, \ie, algebraic curves in the
product $\Cp1\times\Cp1$. For the sake of simplicity, we use the affine
coordinates $u:=z_0/z_1$, $v:=z_2/z_3$ in the two copies of~$\Cp1$.
Fix
$\e:=\pm\sqrt2$ and consider the polynomials
\[*
\poly_1(u,v):=-3v+v^3+2\e u,\frak quad
\poly_2(u,v):=2\e u^3-v+3u^2v
\]
of bidegree $(1,3)$ and $(3,1)$, respectively.
The quartic $Y:=*uartic{56.real}$ in question is given by the polynomial
\[
z_1z_3^3\poly_1\Bigl(\frac{z_0}{z_1}, \frac{z_2}{z_3}\Bigr)
-z_1^3z_3\poly_2\Bigl(\frac{z_0}{z_1}, \frac{z_2}{z_3}\Bigr),
\label{eq.56}
\]
or, explicitly,
\[*
3\e z_0^2z_1z_2+3\e z_1z_2z_3^2-\e z_1^3z_2-\e z_1z_2^3+4z_0^3z_3-4z_0z_3^3.
\]
Below, we show that $Y$ contains $56$ real lines; by \autoref{th.unique}, this
configuration of lines is \config{56.real}, and $Y$ is the only real quartic
with this property.
The quartic~$Y$ contains the two lines
\[
m_1:=\{z_0=z_1=0\},*uad
m_2:=\{z_2=z_3=0\}.
\label{eq.m}
\]
The curves $\{\poly_k=0\}\subset\Cp1\times\Cp1$, $k=1,2$, intersect at $10$
real points, see \autoref{tab.points}.
\table
\caption{The solutions to $\poly_1(u,v)=\poly_2(u,v)=0$}\label{tab.points}
\centerline{\vbox{\openup2pt
\halign{\strut\ $#$\hss&&\frak quad$#$\hss\ \cr
\noalign{\hrule
}
P_1(-1+\e, -1+\e)&A_1(1/\e, -2)&B_1(\infty,\infty)\cr
P_2(1+\e, -1-\e)&A_2(1/2, \e)&B_2(0,0)\cr
P_3(1-\e, 1-\e)&C_1(-1/\e, 2)\cr
P_4(-1-\e, 1+\e)&C_2(-1/2, -\e)\cr
\noalign{
\hrule}\crcr}}}
\endtable
Each such point~$L(u,v)$ gives rise to the line
\[
l:=\{z_0=uz_1,\ z_2=vz_3\}
\label{eq.l}
\]
through $[u:1:0:0]\in m_2$ and $[0:0:v:1]\in m_1$; it is
contained in~$Y$.
The intersection of~$Y$ with each of the six planes
shown in \autoref{tab.planes} splits into four
lines; twelve of the resulting $24$ lines (some of which coincide)
are among~\eqref{eq.m} and~\eqref{eq.l}, see \autoref{tab.planes},
and the twelve others are new and distinct.
\table
\caption{The six special planes}\label{tab.planes}
\hbox to\hsize{\hss\vbox{\openup2pt
\halign{\strut\ $\deMaple#$:\hss&&\frak quad\deMaple#\hss\cr
\noalign{\hrule
}
\omit\strut\ Plane\hss&New lines&Old lines\ \cr
\noalign{\hrule
}
z[1]=0&$(z[0]-z[3])*(z[0]+z[3])$&$m_1$, $b_1$\cr
z[2]=0&$(z[0]-z[3])*(z[0]+z[3])$&$m_2$, $b_2$\cr
z[1]=\e*z[0]&$(z[0]+z[2]-z[3])*(z[0]-z[2]+z[3])$&$m_1$, $a_1$\cr
z[1]=-\e*z[0]&$(z[0]+z[2]+z[3])*(z[0]-z[2]-z[3])$&$m_1$, $c_1$\cr
z[2]=\e*z[3]&$(z[0]+z[1]+z[3])*(z[0]+z[1]-z[3])$&$m_2$, $a_2$\cr
z[2]=-\e*z[3]&$(z[0]-z[1]-z[3])*(z[0]-z[1]+z[3])$&$m_2$, $c_2$\cr
\noalign{
\hrule}\crcr}}\hss}
\endtable
Finally, the ten skew lines~\eqref{eq.l} constitute sixteen quadruples
$(l_1,l_2,l_3,l_4)$, each lying in a quadric, see
\autoref{tab.quadrics}. The equation of this quadric~$Q$ is
\[*
z_1z_3\,\chi\Bigl(\frac{z_0}{z_1}, \frac{z_2}{z_3}\Bigr)=0,
\]
where $\chi(u,v)$ is the polynomial given in \autoref{tab.quadrics}
(see also \autoref{rem.quadrics} below).
The intersection $Y\cap Q$ is a bidegree~$(4,4)$ curve in~$Q$. Since it
contains four skew generatrices of~$Q$, it must split into $l_1,\ldots,l_4$
and four generatrices of the other family.
Two of them are $m_1$, $m_2$, and the two others are new.
It is straightforward
that the $16\times2=32$ lines thus obtained are all
real (see also \autoref{rem.real} below),
pairwise distinct (as the sixteen quadrics
are distinct),
and distinct from~\eqref{eq.m}, \eqref{eq.l}, and
the lines in \autoref{tab.planes}
(as they are disjoint from $m_1\cup m_2$).
\table
\caption{The sixteen special quadrics}\label{tab.quadrics}
\hbox to\hsize{\hss\vbox{\openup2pt
\halign{\strut\ \lowercase{$#$}:\hss&&\frak quad$#$\ \hss\cr
\noalign{\hrule
}
(A_1,B_1,C_1,B_2)&u+2\e v\cr
(A_2,B_2,C_2,B_1)&u-2\e v\cr
(P_1,P_3,B_1,B_2)&u-v\cr
(P_2,P_4,B_1,B_2)&u+v\cr
(P_3,P_4,B_1,A_1)&1+\e u+v\cr
(P_1,P_2,B_1,C_1)&1-\e u-v\cr
(P_2,P_3,B_2,A_2)&\e u-v+uv\cr
(P_1,P_4,B_2,C_2)&\e u-v-uv\cr
(P_1,P_4,A_1,C_1)&\e-2\e u-v+uv\cr
(P_2,P_3,A_1,C_1)&\e+2\e u+v+uv\cr
(P_1,P_2,A_2,C_2)&1-2\e u+v-\e uv\cr
(P_3,P_4,A_2,C_2)&1+2\e u-v-\e uv\cr
(P_1,P_3,A_1,A_2)&-3\e+4+(2\e-2)u-(2\e-2)v+\e uv\cr
(P_1,P_3,C_1,C_2)&-3\e+4-(2\e-2)u+(2\e-2)v+\e uv\cr
(P_2,P_4,A_1,C_2)&3\e+4+(2\e+2)u+(2\e+2)v+\e uv\cr
(P_2,P_4,A_2,C_1)&3\e+4-(2\e+2)u-(2\e+2)v+\e uv\cr
\noalign{
\hrule}\crcr}}\hss}
\endtable
Summarizing, we obtain $2+10+12+32=56$ real lines in~$Y$.
\remark\label{rem.quadrics}
Let $u_1,\ldots,u_4\in m_1$ and $v_1,\ldots,v_4\in m_2$ be two quadruples,
where, as above, we let $u:=z_0/z_1$ and $v:=z_2/z_3$.
Then the lines $l_i:=(u_iv_i)$, $i=1,\ldots,4$, \cf.~\eqref{eq.l},
lie in a quadric if and only if the quadruples $(u_i)$ and $(v_i)$ are
isomorphic, \ie, their cross-ratios are equal.
When this is the case,
the quadruples are related by a fractional linear transformation,
$v_i=f(u_i)$ for $i=1,\ldots,4$, and
the equation of the quadric is obtained from
$z_2/z_3=f(z_1/z_0)$
by clearing the denominators.
\endremark
\subsection{Further properties of $*uartic{56.real}$}\label{s.56.more}
Let $Y:=*uartic{56.real}$ be the quartic constructed in the previous section.
The following statements are straightforward.
\roster
\item
The lines~$m_1$ and~$m_2$ are disjoint.
\item
The
lines~\eqref{eq.l} are pairwise disjoint; each of them
intersects~$m_1$ and~$m_2$.
\endroster
Let~$\Bbb Ga$ be a plane as in
\autoref{tab.planes}. Then
$Y\cap\Bbb Ga$ splits into $m_i$, $l$, and a pair $r_1$, $r_2$,
where $i=1$ or~$2$ and $l$ is one of the lines~\eqref{eq.l},
see \autoref{tab.planes}.
\roster[3]
\item
The lines~$r_1$ and~$r_2$ intersect~$m_i$, $l$, and each other;
they are disjoint from $m_{3-i}$ and any line~$l'\ne l$ as in~\eqref{eq.l}.
\endroster
This observation confirms the fact that all twelve lines thus obtained are
pairwise distinct and distinct from~\eqref{eq.m} and~\eqref{eq.l}.
Note that, according to \autoref{tab.planes}, the plane~$\Bbb Ga$ is completely
determined by the line $l\subset\Bbb Ga$ as in~\eqref{eq.l};
hence, we can use the notation $\Bbb Ga(l)$ and $r_{1,2}(l)$.
Finally, pick a quadruple $(l_1,l_2,l_3,l_4)$ as in \autoref{tab.quadrics},
let~$Q$ be the corresponding quadric,
and let $n_1$, $n_2$ be the two extra lines (other
than $m_1$, $m_2$) in $Y\cap Q$.
The remaining observations follow from the properties of the generatrices
of~$Q$; in particular,
the intersection $Y\cap Q$ may contain at most four generatrices of
each family and, if a line intersects three generatrices of the same family,
it lies in~$Q$.
\roster[4]
\item
The lines~$n_1$ and~$n_2$ are disjoint from~$m_1\cup m_2$; they intersect
each of $l_1,l_2,l_3,l_4$ and are disjoint from all other lines as
in~\eqref{eq.l}.
\item\label{intr.5}
If a line $l$ as in~\eqref{eq.l} is distinct from all~$l_i$, $i=1,\ldots,4$, the
lines~$n_1$, $n_2$ and $r_{1,2}(l)$
can be indexed so that $\operatorname\#(n_i\cap r_j)=\Bbb Gd_{ij}$ is the Kronecker symbol.
\endroster
In more details, the intersection matrix can be computed using explicit
equations of all lines. We leave this exercise to the reader.
\remark\label{rem.real}
Statement~\iref{intr.5} proves also that $n_1$ and~$n_2$ are real:
if they were complex conjugate, they would have to intersect the same
real line~$r_1$ or~$r_2$.
\endremark
We conclude with a description of the automorphism group $\Aut *uartic{56.real}$.
\proposition\label{prop.Aut56}
The group $\Aut *uartic{56.real}\subset\PGL(4,\C)$
is generated by
\roster*
\item
the reflections $z_i\mapsto\rho_iz_i$ with
$\rho_i=\pm1$ and $\rho_0\rho_3=\rho_1\rho_2$,
\item
the transposition $z_1\leftrightarrow z_2$,
\item
the order~$4$ map $z_0\mapsto z_3$, $z_3\mapsto-z_0$, and
\item
the involution $z_0\mapsto(z_0+z_3)/\e$, $z_3\mapsto(z_0-z_3)/\e$.
\endroster
This group has order~$32$\rom;
it acts faithfully on the set of lines contained in~$*uartic{56.real}$.
\endproposition
\proof
Computing the intersection matrix, one can see that there are exactly four
pairs $(l_1,l_2)$ of skew lines
such that $l_1$ and~$l_2$ intersect ten other common lines.
In turn, these pairs split
into four quadrangles: one is $(m_1,m_2)$, $(b_1,b_2)$, and the other is
formed by the four remaining lines in the planes $\{z_1=0\}$ and $\{z_2=0\}$,
see \autoref{tab.planes}.
The last involution in the statement interchanges the two quadrangles. The
other transformations preserve the quadrangle $(m_1,m_2)$, $(b_1,b_2)$ and,
hence, the coordinate tetrahedron; they can easily be listed.
The last two statements are straightforward.
\endproof
\remark\label{rem.Y.aut}
All automorphisms of~$*uartic{56.real}$ are real with respect to the standard
complex conjugation $c\:[z_i]\mapsto[\bar z_i]$. Hence, the last statement of
\autoref{prop.Aut56} implies that $c$ is the only real structure on~$*uartic{56.real}$
with respect to which all $56$ lines are real.
(In fact, up to automorphism $*uartic{56.real}$ has six real structures: they are
enumerated by the conjugacy classes of the involutions in $\Aut *uartic{56.real}$.)
\endremark
\remark\label{rem.Y.rational}
By
rescaling $u\mapsto\e u$, one can make
the quartic~*uartic{56.real} defined
over~$\Q$; however, some of the lines are still defined over the quadratic
number field $\Q(\e)$ only. To see this, one can observe that the
cross-rations of some of the quadruples of points in $m_1$ cut by the lines
as in~\eqref{eq.l} are irrational, see \autoref{tab.points}.
\endremark
\subsection{A few other quartics}\label{s.60}
In
this concluding section, we describe
briefly a few other quartics with large configurations of lines,
for which we do not know explicit equations.
The existence (and uniqueness, when it holds) is given by the existence of
the corresponding $\bold L$-configurations, see \autoref{tab.list},
and the results of \autoref{arithmetical_reduction}.
Other properties, \eg, groups of projective automorphisms, classes of real
structures, \etc., can easily be computed using the corresponding properties
of configurations
and Nikulin's theory of lattice extensions; however, we
omit these details.
\subsubsection{The quartics menioned in \autoref{th.unique}}\label{ss.th.unique}
By \autoref{lem.unique} and \autoref{th.complex}, for each
of the four configurations
$S=\config{60}$, \config{q56}, \config{54}, \config{q54},
there
exists
a unique, up to projective equivalence, quartic~$X$ such that
$\Cal F(X)\cong S$;
this
quartic can be chosen real, see
\autoref{symmetric-reflexive}.
We denote these quartics by
*uartic{60}, *uartic{q56}, *uartic{54}, *uartic{q54}, respectively.
Besides, for $S=\config{60.2}$ or \config{56}, there is a unique pair of
nonequivalent
complex conjugate quartics $X$, $\bar X$ such that
$\Cal F(X)\cong\Cal F(\bar X)\cong S$; these pairs are denoted by
*uartic{60.2}, \bquartic{60.2} and
*uartic{56}, \bquartic{56}, respectively.
Together with *uartic{64} (see~\autoref{s.Schur}) and *uartic{56.real}
(see \autoref{s.56} and \autoref{s.56.more}), these
surfaces make a complete list of
quartics containing more than $52$ lines.
\table
\caption{Configurations with many $\bold L$-realizations}\label{tab.Y}
\def\raise3pt\hbox{$\scriptstyle\sqrt{}$}{\raise3pt\hbox{$\scriptstyle\sqrt{}$}}
\hbox to\hsize{\hss\vbox{\halign{\strut*uad#*uad\hss&\hss#\hss*uad&
\hss#\hss*uad&\hss#\hss*uad&\hss#\hss*uad&&\hss$#$\hss*uad\cr
\noalign{\hrule}
\vphantom{\Big(}\hss$S$\hss&$\ls|\operatorname{Fn}|$&t.r.&ref&sym&\ls|\OG_h(S)|&
\discr S&T:=S^\perp&(r,c)
\cr
\noalign{
\hrule
}
\config{60.2}&60&&&&240&
\<\frac65\>\oplus\<\frac{10}{11}\>&[4,1,14]&(0,1)
\cr
\config{56}&56&&&&128&
\<\frac{15}8\>\oplus\<\frac{15}8\>&[8,0,8]&(0,1)
\cr
\config{52.5}&52&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&8&\<\frac12\>\oplus\<\frac32\>\oplus\<\frac4{19}\>
&[2,0,38]&(1,1)\cr
&&&&&&&[8,2,10]
\cr
\config{52.real}&52&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&8&\<\frac6{79}\>
&[2,1,40]&(1,2)\cr
&&&&&&&[4,1,20]\cr
&&&&&&&[8,1,10]
\cr
\config{q52.2}&52&&&&64&\<\frac14\>\oplus\<\frac54\>\oplus\<\frac25\>
&[8,4,12]&(0,1)
\cr
\config{51}&51&&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&12&
\<\frac43\>\oplus\<\frac2{29}\>
&[6,3,16]&(1,1)\cr
&&&&&&&[4,1,22]
\cr
\config{50.2}&50&&$\times2$&$\times2$&12&
\<\frac74\>\oplus\<\frac58\>\oplus\<\frac43\>
&[4,0,24]&(2,0)
\cr
\config{50}&50&&&&16&
\<\frac74\>\oplus\<\frac58\>\oplus\<\frac43\>
&[4,0,24]&(0,1)
\cr
\config{48}&48&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&8&\<\frac12\>\oplus\<\frac5{16}\>\oplus\<\frac23\>
&[2,0,48]&(1,0)
\cr
\config{48.2}&48&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&8&\<\frac25\>\oplus\<\frac4{19}\>
&[2,1,48]&(2,1)\cr
&&&&&&&[8,1,12]\cr
&&&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&\raise3pt\hbox{$\scriptstyle\sqrt{}$}&&&[10,5,12]
\cr
\noalign{
\hrule}\crcr}}\hss}
\endtable
\subsubsection{Large configurations of real lines}\label{ss.real}
Arguing as in the proof of \autoref{lem.unique}, it is not difficult to
classify the $\bold L$-realizations of the four other \config{Y}-type
configurations listed in \autoref{tab.list}; we summarize the results in
\autoref{tab.Y}. This table is organized similar to \autoref{tab.large},
with the
last column showing the numbers~$r$, $c$ of, respectively, real and pairs of
complex conjugate quartics with the given configuration of lines.
Note, though, that, with the only exception of \autoref{prop.unique.real}
below, we never assert the uniqueness of the real form: considering the large
automorphism groups, it is likely not unique, \cf. \autoref{prop.X64} and
\autoref{rem.Y.aut}.
If $S=\config{48.2}$, the natural homomorphism
$\rho\:\OG_h(S)\to\Aut\discr S$ maps $\OG_h(S)$ onto the index~$2$ subgroup
$\Aut\discr_5S$; in the other three cases, $\rho$ is surjective.
It follows that,
in all four cases, the weak isomorphism classes of $\bold L$-realizations are
classified by the transcendental lattices $T:=S^\perp$.
In three cases, there are several isomorphism classes;
however, only one of them is totally reflexive. In view of
\autoref{app.real},
this fact merits a separate statement.
\proposition\label{prop.unique.real}
For each \config{Y}-type configuration~$S$ listed in \autoref{tab.list},
there is a unique, up to real projective equivalence, real quartic
$Y$ such that $\Cal F_\R(Y)\cong S$.
The real part of this real quartic is a connected surface of genus~$10$.
\done
\endproposition
The configuration $S=\config{48.2}$ admits another reflexive
$\bold L$-realization, which is not totally reflexive;
thus, the corresponding quartic~$X$ can be chosen real, but some of the
lines contained in~$X$ are necessarily complex conjugate.
(Note that, unlike
the case of
Schur's quartic~*uartic{64}, see \autoref{prop.X64},
or the maximizing real quartic *uartic{56.real}, see
\autoref{rem.Y.aut},
this quartic~$X$ and the quartic~$Y$ given by \autoref{prop.unique.real}
are not just distinct real forms of the same surface: $X$ and $Y$ are not
projectively equivalent even over~$\C$.)
\remark\label{rem.min.field}
\autoref{tab.Y} suggests also that the quartics $*uartic{Y}_*$ realizing
each of the configurations $\config{Y}_*:=\config{52.5}$, \config{52.real},
\config{48}, \config{48.2} should be Galois conjugate over a certain
algebraic number field~$\Bbb K$ of degree~$3$, $5$, $1$, $4$,
respectively, so that this field~$\Bbb K$ is the minimal field of definition
of~$*uartic{Y}_*$. In particular, *uartic{52.5} and *uartic{52.real} are
probably not defined over~$\Q$, \cf. open problems at the end of
\autoref{s.results}.
\endremark
\subsubsection{Configurations with many realizations}\label{ss.many}
For completeness, we describe also the few configurations from
\autoref{tab.list} that admit more than one geometric realization.
For all configurations of maximal rank, the computation runs exactly as in
the proof of \autoref{lem.unique} and can easily be automated.
Omitting the straightforward details, we summarize the results in
\autoref{tab.Y}.
(The meaning of the columns is explained in \autoref{ss.real}.)
In the line containing~\config{50.2}, the symbol ``$\times2$'' means
that there are two distinct
geometric $\bold L$-realizations, which are both reflexive.
Any other configuration in \autoref{tab.list} admits a unique geometric
$\bold L$-realization, and this realization is reflexive.
In particular, for a configuration~$S$ as in
\autoref{tab.list}, a
geometric $\bold L$-realization is reflexive if and only if
it is symmetric. Currently, we do not know whether this is a common property
of configurations: in some similar $K3$-related problems, it may not hold
(\cf. the existence of a connected real equisingular family of simple
plane sextics containing no real curves discovered in~\cite{degt:geography}.)
\remark\label{rem.abstract.K3}
The isomorphism type of a singular $K3$-surface (\ie, one of Picard
rank~$20$) is determined by its transcendental lattice. Analyzing
Tables~\ref{tab.large} and~\ref{tab.Y}, one can observe that the quartics
*uartic{60} and *uartic{q56} are isomorphic as abstract $K3$-surfaces;
a similar statement holds for
the seven quartics realizing the configurations
\config{54}, \config{50.2}, and \config{50}.
On the other hand, each of the configurations \config{52.5},
\config{52.real}, \config{51}, \config{48.2} is realized by several distinct
$K3$-surfaces.
\endremark
\subsubsection{Families with parameters}\label{ss.Z}
Finally, worth mentioning are the
configurations $S=\config{52.0}$, \config{50.0}, \config{49}
in \autoref{tab.list}.
Recall
that the dimension of the equilinear moduli space
$\Omega'(S)/\!\PGL(4, \C)$,
\cf. the proof of \autoref{th.complex},
equals $20-\rank S$; hence, we obtain $1$-parameter families of
distinct quartics sharing the same combinatorial type of configurations of
lines.
The connectedness of each family follows from \autoref{th.complex} and
a
computation based on
the results of \cite{Miranda.Morrison:book}, covering
indefinite transcendental lattices.
We have
\roster*
\item
if $S=\config{52.0}$, then
$\Cal S=\<\frac12\>\oplus\<\frac12\>\oplus\<\frac58\>\oplus\<\frac43\>$
and $\OG^+(T)\onto\Aut\discr T$;
\item
if $S=\config{50.0}$, then
$\Cal S=\<\frac74\>\oplus\<\frac25\>\oplus\<\frac25\>$
and $\OG_h(S)\onto\Aut\Cal S$;
\item
if $S=\config{49}$, then
$\Cal S=\Cal V_2\oplus\<\frac54\>\oplus\<\frac67\>$
and
$\Im[\OG_h(S)\to\Aut\Cal S]=\Aut\Cal S_2$.
\endroster
The uniqueness of $T:=S^\perp$ in its genus and the assertion on $\OG^+(T)$
for $S=\config{52.0}$ follow from~\cite{Miranda.Morrison:book}.
Thus, in each case, there is a unique geometric $\bold L$-realization.
If $S=\config{52.0}$, this realization is totally reflexive,
\ie,
there is a
$1$-parameter family (not necessarily
connected) of real quartics~$Z$ such
that $\Cal F_\R(Z)\cong\config{52.0}$.
For the other two configurations, for each involution
$a\in\Aut\discr S$, exactly one of $\pm a$
admits an involutive lift to $\OG_h(S)$. Hence,
these configurations are reflexive
(not totally)
and the corresponding equilinear families also
contain real quartics.
The existence of the family corresponding to \config{52.0}, with
exactly $52$ lines
in each quartic, as well as the non-uniqueness
of $\bold L$-realizations discussed in \autoref{ss.real} and \autoref{ss.many},
can be regarded as yet another justification for the
assumption $\ls|\operatorname{Fn}(X)|>52$
in \autoref{th.unique}: quartics with fewer lines are probably
more difficult to control.
{
\let\.\DOTaccent
\def$'${$'$}
}
\end{document} |
\betagin{document}
\title{An absolutely stable discontinuous Galerkin method for the indefinite
time-harmonic Maxwell equations with large wave number}
\markboth{X. FENG AND H. WU}{DG METHODS FOR THE MAXWELL EQUATIONS}
\author{
Xiaobing Feng\thanks{Department of Mathematics, The University of
Tennessee, Knoxville, TN 37996, U.S.A. ({\tt [email protected]}).
The work of this author was partially supported by the NSF grants DMS-0710831
and DMS-1016173.}
\and
Haijun Wu\thanks{Department of Mathematics, Nanjing University, Jiangsu,
210093, P.R. China. ({\tt [email protected]}). The work of this author was
partially supported by the National Magnetic Confinement Fusion Science Program under grant 2011GB105003 and by the NSF of China grants 10971096, 11071116, 91130004.}}
\maketitle
\betagin{abstract}
This paper develops and analyzes an interior penalty discontinuous
Galerkin (IPDG) method using piecewise linear polynomials for the indefinite
time harmonic Maxwell equations with the impedance boundary condition
in the three dimensional space. The main novelties of the proposed IPDG method
include the following: first, the method penalizes not only the jumps of the
tangential component of the electric field across the element faces
but also the jumps of the tangential component of its vorticity field; second,
the penalty parameters are taken as complex numbers of negative imaginary parts.
For the differential problem, we prove that the sesquilinear form
associated with the Maxwell problem satisfies a generalized
weak stability (i.e., inf-sup condition) for star-shaped domains.
Such a generalized weak stability readily infers wave-number explicit
a priori estimates for the solution of the Maxwell problem, which
plays an important role in the error analysis for the IPDG method.
For the proposed IPDG method, we show that the discrete sesquilinear form
satisfies a coercivity for all positive mesh size $h$ and wave number $k$ and
for general domains including non-star-shaped ones.
In turn, the coercivity easily yields the well-posedness
and stability estimates (i.e., a priori estimates) for the discrete
problem without imposing any mesh constraint. Based on these discrete stability
estimates, by adapting a nonstandard error estimate technique
of \cite{fw08a}, we derive both the
energy-norm and the $L^2$-norm error estimates for the IPDG method
in all mesh parameter regimes including pre-asymptotic regime
(i.e., $k^2 h\gtrsim 1$). Numerical experiments are also presented
to gauge the theoretical results and to numerically examine the
pollution effect (with respect to $k$) in the error bounds.
\end{abstract}
\betagin{keywords}
Time harmonic Maxwell equations, impedance boundary condition,
interior penalty discontinuous Galerkin methods, absolute stability, error estimates
\end{keywords}
\betagin{AMS}
65N12,
65N15,
65N30,
78A40
\end{AMS}
\section{Introduction}\lambdabel{sec-1}
This paper develops and analyzes interior penalty discontinuous Galerkin (IPDG)
methods for the following time harmonic Maxwell problem:
\betagin{alignat}{2} \lambdabel{e1.1}
\curl\curl\mathbf{E}- k^2\mathbf{E}&=\mathbf{f} &&\qquad\mbox{in }\Omegagaega, \\
\curl\mathbf{E}\times\mathbf{n}u-{\rm\mathbf i} \lambdambda \mathbf{E}_T&=\mathbf{g}
&&\qquad\mbox{on }\Gammamamma:=\partialartialrtial\Omegagae,\lambdabel{e1.2}
\end{alignat}
where $\Omegagae\subset\mathbf{R}^3$ is a bounded domain with
Lipschitz continuous boundary $\partialartialrtial\Omegagae$ and of diameter $R$.
$\mathbf{n}u$ denotes the unit outward normal to $\partialartialrtial\Omegagaega$, ${\rm\mathbf i}:=\sqrt{-1}$, the
imaginary unit, and $\mathbf{E}_T=(\mathbf{n}u\times\mathbf{E})\times\mathbf{n}u$,
the {\em tangential component} of the electric field $\mathbf{E}$. $k$, called
{\em wave number}, is a positive constant and $\lambdambda> 0$
is known as the impedance constant. \eqref{e1.2} is
the standard impedance boundary condition. Assume that $\mathbf{g}\cdot\mathbf{n}u=0$,
hence, $\mathbf{g}_T=\mathbf{g}$.
Problem \eqref{e1.1}--\eqref{e1.2} is a prototypical problem in
electromagnetic scattering (cf. \cite{Colton_Kress99} and the references therein)
and has been used extensively as a model (and benchmark) problem to develop various
numerical discretization methods including finite element methods
\cite{Monk03,ZSWX09} and discontinuous Galerkin methods
\cite{HMP11,HPSS05,HPS04,CLS04,NPC11}, and to develop fast
solvers (cf. \cite{TW05} and the references therein).
The above Maxwell problem with large wave number $k$ is numerically
difficult to solve mainly because of the following two reasons.
First, the large wave number $k$ implies the small wave length
$\ell:=2\partiali/k$, that is, the wave is a short wave and very oscillatory.
It is well known that, in every coordinate direction, one must put some
minimal number of grid points in each wave length in order to resolve the
wave. Using such a fine mesh evidently results in a huge algebraic problem
to solve regardless what discretization
method is used. Practically, ``the rule of thumb" is to use $6-10$ grid
points per wave length, which means that the mesh
size $h$ must satisfy the constraint $hk\lesssim 1$. To the
best of our knowledge, no numerical method in the literature has been proved to be
uniquely solvable and to have an error bound under the mesh constraint $hk\lesssim 1$
for the above Maxwell problem. Moreover, numerical experiments have shown
that under the mesh condition $hk\lesssim 1$ the errors of all existing numerical
methods grow as the wave number $k$ increases. This means that
the error is not completely controlled by the product $hk$ and it
provides strong evidences of the existence of so-called ``pollution"
in the error bounds. It is known now \cite{bs00} that
the existence of pollution is related to the loss of stability
of numerical methods with large wave numbers for the scalar wave equation,
which is also expected to be the case for the vector wave equations.
Second, for large wave number $k$, the Maxwell operator is strongly
indefinite. Such a strong indefiniteness certainly passes onto
any discretization of the Maxwell problem. In other words,
the stiffness matrix of the discrete problem is not only very
large but also strongly indefinite. Solving such a large,
strongly indefinite, and ill-conditioned algebraic problem
is proved to be very challenging and all the well-known iterative
methods were proved numerically to be either ineffective or divergent
for indefinite wave problems in the case of large wave number
(cf. \cite{TW05} and the references therein).
This paper is an attempt to address the first difficulty
mentioned above for the Maxwell equations. In particular,
our goal is to design and analyze discretization methods which
have superior stability properties and give optimal rates of convergence
for the Maxwell problem. Motivated by our previous
experiences with the Helmholtz equation \cite{fw08a,fw08b},
we again try to accomplish the goal by developing some interior
penalty discontinuous Galerkin method for problem
\eqref{e1.1}--\eqref{e1.2}. The focus of the paper
is to establish the rigorous stability and error analysis
for the proposed IPDG method, in particular, in
the preasymptotic regime (i.e., when $k^2h\gtrsim 1$).
For the ease of presentation and to better present ideas, we confine
ourselves to only consider the linear element in this paper
and will discuss its high order extensions in a forthcoming paper.
The remainder of this paper is organized as follows. section
\ref{sec-2} is devoted to the study of the coercivity of the Maxwell
operator and the wave-number explicit estimates for
the solution of \eqref{e1.1}--\eqref{e1.2}. We show that
the sesquilinear form associated with the Maxwell problem
satisfies a generalized weak coercivity (i.e., inf-sup
condition). This coercivity in turn readily infers
the wave-number explicit solution estimates which
were proved in \cite{Feng10,HMP10}. We note that the proofs
of both results given in this paper are of independent interest
and refer the reader to \cite{Feng10b} for further
discussions in the direction. section \ref{sec-3} presents
the construction of our IPDG method and some simple properties
of the proposed discrete sesquilinear form. section \ref{sec-4}
studies the coercivity of the discrete sesquilinear form and
derives stability estimates for the IPDG solutions. It is proved
that the discrete sesquilinear form satisfies a
coercivity for all mesh size $h>0$ and all wave number $k>0$
and for general domains including non-star-shaped ones,
which is stronger than the generalized
weak coercivity satisfied by its continuous counterpart.
All these are possible because of the special design of the
discrete sesquilinear form and the special property
$\curl\curl\mathbf{v}_h=0$ (element-wise) for all piecewise
linear functions $\mathbf{v}_h$.
This coercivity in turn readily infers the well-posedness
and stability estimates for the discrete problem without imposing
any mesh constraint. section \ref{sec-5} devotes to the error
analysis for the proposed IPDG method. By using the discrete
stability estimates and adapting a nonstandard
error estimate technique of \cite{fw08a},
we derive both the energy-norm and the $L^2$-norm error estimates
for the IPDG method in all mesh parameter regimes including
pre-asymptotic regime (i.e., $k^2 h\gtrsim 1$). Finally,
we present some numerical experiment results in section \ref{sec-6}
to gauge the theoretical results and to numerically
examine the pollution effect (with respect to $k$) in the error bounds.
\section{Generalized inf-sup condition and stability estimates for PDE solutions}
\lambdabel{sec-2}
The standard space, norm and inner product notation
are adopted in this paper. Their definitions can be found in
\cite{bs94,ciarlet78}.
In particular, $(\cdot,\cdot)_Q$ and $\lambdangle \cdot,\cdot\rangle_\Sigma$
for $Q\subset \Omegagae$ and $\Sigma\subset \partialartial \Omegagae$ denote the $L^2$-inner product
on {\em complex-valued} $L^2(Q)$ and $L^2(\Sigma)$ spaces, respectively.
For a given function space $W$, let $\mathbf{W}=(W)^3$. In particular,
$\mathbf{L}^2(\Omegagae)=(L^2(\Omegagae))^3$ and $\mathbf{H}^k(\Omegagae)=(H^k(\Omegagae))^3$.
We also define
\betagin{align*}
\mathbf{H}(\curl, \Omegagae)&:=\bigl\{ \mathbf{v}{\rm\mathbf i}n \mathbf{L}^2(\Omegagae);\, \curl \mathbf{v}{\rm\mathbf i}n \mathbf{L}^2(\Omegagae) \bigr\},\\
\mathbf{H}(\ddiv, \Omegagae)&:=\bigl\{ \mathbf{v}{\rm\mathbf i}n \mathbf{L}^2(\Omegagae);\, \ddiv \mathbf{v}{\rm\mathbf i}n L^2(\Omegagae) \bigr\},\\
\mathbf{H}(\ddiv_0, \Omegagae)&:=\bigl\{ \mathbf{v}{\rm\mathbf i}n \mathbf{L}^2(\Omegagae);\, \ddiv \mathbf{v}=0 \bigr\},\\
\mathbf{c}V &:= \bigl\{ \mathbf{v}{\rm\mathbf i}n \mathbf{H}(\curl, \Omegagae);\, \mathbf{v}_T {\rm\mathbf i}n \mathbf{L}^2(\Gammamamma) \bigr\},\\
\hat{\mathbf{c}V} &:= \bigl\{ \mathbf{v}{\rm\mathbf i}n \mathbf{H}(\curl, \Omegagae);\, \curl \mathbf{v}{\rm\mathbf i}n \mathbf{H}(\curl,\Omegagae),\, \mathbf{v} {\rm\mathbf i}n \mathbf{H}(\curl, \Gammamamma) \bigr\}.
\end{align*}
Throughout this paper, the bold face letters are used to denote
three-dimensional vectors or vector-valued functions, and
$C$ is used to denote a generic positive constant
which is independent of $h$ and $k$. We also use the shorthand
notation $A\lesssim B$ and $B\gtrsim A$ for the
inequality $A\leq C B$ and $B\geq CA$. $A\sigmameq B$ is a shorthand
notation for the statement $A\lesssim B$ and $B\lesssim A$.
We now recall the definition of star-shaped domains.
\betagin{definition}\lambdabel{def1}
$Q\subset \mathbf{R}^3$ is said to be a {\em star-shaped} domain with respect
to $\mathbf{x}_Q{\rm\mathbf i}n Q$ if there exists a nonnegative constant $c_Q$ such that
\betagin{equation}\lambdabel{estar}
(\mathbf{x}-\mathbf{x}_Q)\cdot \mathbf{n}u_Q\ge c_Q \qquad \forall \mathbf{x}{\rm\mathbf i}n\partialartial Q.
\end{equation}
$Q\subset \mathbf{R}^3$ is said to be {\em strictly star-shaped} if $c_Q$ is positive.
Where $\mathbf{n}u_Q$ denotes the unit outward normal to $\partial Q$.
\end{definition}
Throughout this paper, we assume that $\Omegagae$ is a strictly star-shaped
domain.
Introduce the following sesquilinear form on $\mathbf{c}V\times \mathbf{c}V$
\betagin{equation}\lambdabel{e1.3}
a(\mathbf{u},\mathbf{v}):= (\curl\mathbf{u},\curl\mathbf{v})_\Omegagae-k^2(\mathbf{u},\mathbf{v})_\Omegagae
-{\rm\mathbf i}\lambdambda\lambdangle \mathbf{u}_T,\mathbf{v}_T\rangle_\Gammamamma,
\end{equation}
Then the weak formulation for the Maxwell system \eqref{e1.1}--\eqref{e1.2}
is defined as seeking $\mathbf{E}{\rm\mathbf i}n \mathbf{c}V$ such that
\betagin{eqnarray}\lambdabel{e1.4}
a(\mathbf{E},\mathbf{v}) =(\mathbf{f},\mathbf{v})_\Omegagae+\lambdangle\mathbf{g},\mathbf{v}_T\rangle_\Gammamamma
\qquad\forall\mathbf{v}{\rm\mathbf i}n \mathbf{c}V.
\end{eqnarray}
Using the Fredholm Alternative Principle it can be
shown that problem \eqref{e1.4} has a unique solution
(cf. \cite{Colton_Kress99, Monk03}).
Note that choosing $\mathbf{v}=\nablalabla \partialsi$ with $\partialsi{\rm\mathbf i}n H^1_0(\Omegagae)$ shows that $
(k^2\mathbf{E}+\mathbf{f}, \nablalabla \partialsi)_\Omegagae=0,
$
or
\betagin{equation}\lambdabel{e1.6}
\ddiv (k^2\mathbf{E}+\mathbf{f})=0 \qquad\text{in } \Omegagae.
\end{equation}
Next, we prove that the sesquilinear form $a(\cdot,\cdot)$ satisfies
a generalized weak coercivity which is expressed in terms of a generalized
{\em inf-sup} condition.
\betagin{theorem}\lambdabel{inf-sup}
Let $\Omegagae\subset \mathbf{R}^3$ be a bounded star-shaped domain
with the positive constant $c_\Omegagae$ and the diameter $R=\mbox{\rm dim}(\Omegagae)$.
Then for any $\mathbf{u}{\rm\mathbf i}n \hat{\mathbf{c}V}\cap \mathbf{H}(\ddiv_0,\Omegagae)$ there holds the following
generalized inf-sup condition for the sesquilinear form $a(\cdot,\cdot)$:
\betagin{align}\lambdabel{eq2.0a}
\sup_{\mathbf{v}{\rm\mathbf i}n \hat{\mathbf{c}V}} \frac{ |{\rm\mathbf i}m a(\mathbf{u},\mathbf{v})|}{ \|\mathbf{v}\|_{E}}
\,\,+\sup_{\mathbf{v}{\rm\mathbf i}n \hat{\mathbf{c}V}} \frac{ |\re a(\mathbf{u},\mathbf{v})|}{ \normL{\mathbf{v}}{\Omegagae}}
\geq \frac{1}{\gammamamma} \|\mathbf{u}\|_{E},
\end{align}
where
\betagin{align}\lambdabel{eq2.0b}
&\gammamamma :=\max\bigl\{4kR, M \bigr\},\qquad
M:= \frac{4R^2(k^2+\lambdambda^2)}{\lambdambda c_\Omegagae}, \displaybreak[0] \\
&\normL{\mathbf{u}}{\Omegagae} :=\Bigl( k^2\|\mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2
+ k^2 c_\Omegagae\|\mathbf{u}\|_{\mathbf{L}^2(\Gammamamma)}^2 \Bigr)^{\frac12}, \displaybreak[0]\\
&\|\mathbf{u}\|_{E} := \Bigl( k^2 \|\mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2+ k^2 c_\Omegagae \|\mathbf{u}\|_{\mathbf{L}^2(\Gammamamma)}^2+\|\curl\mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2
+c_\Omegagae \|\curl\mathbf{u}\|_{\mathbf{L}^2(\Gammamamma)}^2 \Bigr)^{\frac12}.\lambdabel{eq2.0c}
\end{align}
\end{theorem}
\betagin{proof}
Let $\mathbf{w}:=\mathbf{x}-\mathbf{x}_\Omegagae$. Setting $\mathbf{v}=\mathbf{u}$ in \eqref{e1.3} and taking
the real and imaginary parts we get
\betagin{align}\lambdabel{eq2.1}
\re a(\mathbf{u},\mathbf{u}) &=\|\curl \mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2 - k^2\|\mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2,\\
{\rm\mathbf i}m a(\mathbf{u},\mathbf{u}) &=-\lambdambda \|\mathbf{u}_T\|_{\mathbf{L}^2(\Gammamamma)}^2. \lambdabel{eq2.2}
\end{align}
Alternatively, setting $\mathbf{v}=\curl \mathbf{u}\times \mathbf{w}$ in \eqref{e1.3} (notice that $\mathbf{v}{\rm\mathbf i}n\mathbf{c}V$ is a valid test function
for $\mathbf{u}{\rm\mathbf i}n \hat{\mathbf{c}V}$), taking the real part, and using the following integral identity (cf. \cite{Feng10})
\betagin{align} \lambdabel{eq2.3}
\re (\mathbf{u},\mathbf{v})_\Omegagae + \frac12 \|\mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2
&+\frac12 \lambdangle \mathbf{w}\cdot \mathbf{n}u, |\mathbf{u}|^2 \rangle_\Gammamamma \\
&=\re \lambdangle \mathbf{w}\times \mathbf{u}, \mathbf{u}\times \mathbf{n}u \rangle_\Gammamamma
+\re (\ddiv \mathbf{u}, \mathbf{u}\cdot \mathbf{w})_\Omegagae \nonumber
\end{align}
and the assumption that $\ddiv \mathbf{u}=0$, we get
\betagin{align}\lambdabel{eq2.4}
2\re a(\mathbf{u},\mathbf{v}) &=2\re\bigr( \curl\mathbf{u},\curl\mathbf{v})_\Omegagae
- 2k^2\re (\mathbf{u},\mathbf{v})_\Omegagae +2\lambdambda {\rm\mathbf i}m \lambdangle \mathbf{u}_T,\mathbf{v}_T \rangle_\Gammamamma \\
&=2\re\bigr( \curl\mathbf{u},\curl\mathbf{v})_\Omegagae + k^2 \|\mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2
+k^2 \lambdangle \mathbf{w}\cdot \mathbf{n}u, |\mathbf{u}|^2 \rangle_\Gammamamma \nonumber\\
&\qquad
-2k^2 \re \lambdangle \mathbf{w}\times \mathbf{u}, \mathbf{u}\times \mathbf{n}u \rangle_\Gammamamma
+2\lambdambda {\rm\mathbf i}m \lambdangle \mathbf{u}_T,\mathbf{v}_T \rangle_\Gammamamma. \nonumber
\end{align}
From \eqref{eq2.1} and \eqref{eq2.4} and using the following integral
identity (cf. \cite{Feng10})
\betagin{align}\lambdabel{eq2.5}
2\re\bigr( \curl\mathbf{u},\curl\mathbf{v})_\Omegagae
=\|\curl \mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2 + \lambdangle \mathbf{w}\cdot\mathbf{n}u, |\curl \mathbf{u}|^2 \rangle_\Gammamamma,
\end{align}
we have
\betagin{align}\lambdabel{eq2.6}
2k^2 \|\mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2 &= k^2 \|\mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2 + k^2 \|\mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2\\
&=-2\re\bigr( \curl\mathbf{u},\curl\mathbf{v})_\Omegagae
-k^2 \lambdangle \mathbf{w}\cdot \mathbf{n}u, |\mathbf{u}|^2 \rangle_\Gammamamma \nonumber\\
&\qquad
+2k^2 \re \lambdangle \mathbf{w}\times \mathbf{u}, \mathbf{u}\times \mathbf{n}u \rangle_\Gammamamma
-2\lambdambda {\rm\mathbf i}m \lambdangle \mathbf{u}_T,\mathbf{v}_T \rangle_\Gammamamma \nonumber \\
&\qquad
+ 2\re a(\mathbf{u},\mathbf{v}) +\|\curl \mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2 - \re a(\mathbf{u},\mathbf{u}) \nonumber \displaybreak[0]\\
&=-\lambdangle \mathbf{w}\cdot\mathbf{n}u, |\curl \mathbf{u}|^2 \rangle_\Gammamamma
-k^2 \lambdangle \mathbf{w}\cdot \mathbf{n}u, |\mathbf{u}|^2 \rangle_\Gammamamma \nonumber\\
&\qquad
+2k^2 \re \lambdangle \mathbf{w}\times \mathbf{u}, \mathbf{u}\times \mathbf{n}u \rangle_\Gammamamma
-2\lambdambda {\rm\mathbf i}m \lambdangle \mathbf{u}_T,\mathbf{v}_T \rangle_\Gammamamma \nonumber \\
&\qquad
+ 2\re a(\mathbf{u},\mathbf{v})-\re a(\mathbf{u},\mathbf{u}) \nonumber \displaybreak[0]\\
&=-\lambdangle \mathbf{w}\cdot\mathbf{n}u, |\curl \mathbf{u}|^2 \rangle_\Gammamamma
-k^2 \lambdangle \mathbf{w}\cdot \mathbf{n}u, |\mathbf{u}|^2 \rangle_\Gammamamma \nonumber \\
&\qquad
-2k^2 \lambdangle \mathbf{w}\cdot \mathbf{n}u, |\mathbf{u}\times \mathbf{n}u|^2 \rangle_\Gammamamma
+2k^2 \re \lambdangle \mathbf{w}_T\times \mathbf{u}, \mathbf{u}\times \mathbf{n}u \rangle_\Gammamamma \nonumber \\
&\qquad
-2\lambdambda{\rm\mathbf i}m\lambdangle \mathbf{u}_T,\mathbf{v}_T \rangle_\Gammamamma
+ 2\re a(\mathbf{u},\mathbf{v})-\re a(\mathbf{u},\mathbf{u}). \nonumber
\end{align}
Here we have used the decomposition $\mathbf{w}=(\mathbf{w}\cdot\mathbf{n}u) \mathbf{n}u +\mathbf{w}_T$
to obtain the last equality.
On noting that $\lambdangle \mathbf{w}_T\times \mathbf{u}, \mathbf{u}\times \mathbf{n}u \rangle_\Gammamamma
=\lambdangle \mathbf{u}\cdot \mathbf{n}u, \mathbf{w}_T\cdot \mathbf{u}_T \rangle_\Gammamamma$,
$\|\mathbf{w}\|_{L^{\rm\mathbf i}nfty(\Omegagae)} \leq R$, and that $|\mathbf{v}|\le|\curl \mathbf{u}||\mathbf{w}|$, using the star-shaped domain assumption
and Schwarz inequality we obtain
\betagin{align}\lambdabel{eq2.7}
2k^2 \|\mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2
&\le -c_\Omegagae \|\curl \mathbf{u}\|_{\mathbf{L}^2(\Gammamamma)}^2
-k^2 c_\Omegagae \|\mathbf{u}\|_{\mathbf{L}^2(\Gammamamma)}^2 \\
&\quad
-2 k^2 c_\Omegagae \|\mathbf{u}_T\|_{\mathbf{L}^2(\Gammamamma)}^2
+2k^2 R \norml{\mathbf{u}}{\Gammamamma}\norml{\mathbf{u}_T}{\Gammamamma} \nonumber \\
&\quad
+2\lambdambda R\norml{\mathbf{u}_T}{\Gammamamma}\norml{\curl \mathbf{u}}{\Gammamamma}
+ 2\re a(\mathbf{u},\mathbf{v})-\re a(\mathbf{u},\mathbf{u}). \nonumber \displaybreak[0]\\
&\leq -\frac{c_\Omegagae}2 \|\curl \mathbf{u}\|_{\mathbf{L}^2(\Gammamamma)}^2
-\frac{k^2 c_\Omegagae}{2} \|\mathbf{u}\|_{\mathbf{L}^2(\Gammamamma)}^2
-2 k^2 c_\Omegagae \|\mathbf{u}_T\|_{\mathbf{L}^2(\Gammamamma)}^2 \nonumber\\
&\quad
+\frac{2R^2(k^2+\lambdambda^2)}{c_\Omegagae} \|\mathbf{u}_T\|_{\mathbf{L}^2(\Gammamamma)}^2
+ 2\re a(\mathbf{u},\mathbf{v})- \re a(\mathbf{u},\mathbf{u}). \nonumber
\end{align}
Finally, it follows from \eqref{eq2.1}, \eqref{eq2.2} and \eqref{eq2.7} that
\betagin{align}\lambdabel{eq2.9}
&2k^2 \|\mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2 +2\|\curl \mathbf{u}\|_{\mathbf{L}^2(\Omegagae)}^2
+c_\Omegagae \|\curl \mathbf{u}\|_{\mathbf{L}^2(\Gammamamma)}^2+k^2 c_\Omegagae \|\mathbf{u}\|_{\mathbf{L}^2(\Gammamamma)}^2 \\
&
\leq M |{\rm\mathbf i}m a(\mathbf{u},\mathbf{u})| + |\re a(\mathbf{u},4\mathbf{v})|, \nonumber
\end{align}
where $\mathbf{v}=\curl \mathbf{u}\times \mathbf{w}$ and $M$ is defined in \eqref{eq2.0b}.
It is easy to check that there holds for $\mathbf{v}=\curl \mathbf{u}\times \mathbf{w}$
\[
\normL{\mathbf{v}}{\Omegagae} \leq kR\|\mathbf{u}\|_{E}.
\]
Hence, it follows from \eqref{eq2.9} that
\betagin{align}\lambdabel{eq2.10}
\frac{ |{\rm\mathbf i}m a(\mathbf{u},\mathbf{u})|}{ \|\mathbf{u}\|_{E}}
+\frac{ |\re a(\mathbf{u},4\mathbf{v})|}{ \normL{4\mathbf{v}}{\Omegagae}}
&\geq \frac{ |{\rm\mathbf i}m a(\mathbf{u},\mathbf{u})|}{ \|\mathbf{u}\|_{E}}
+ \frac{|\re a(\mathbf{u},4\mathbf{v})|}{ 4kR \|\mathbf{u}\|_{E} } \\
& \geq \frac{1}{\gammamamma} \cdot \frac{ M|{\rm\mathbf i}m a(\mathbf{u},\mathbf{u})| + |\re a(\mathbf{u},4\mathbf{v})| }{\|\mathbf{u}\|_{E} }
\geq \frac{1}{\gammamamma} \|\mathbf{u}\|_{E}, \nonumber
\end{align}
where $\gammamamma= \max\bigl\{4kR, M \bigr\}$ as defined in \eqref{eq2.0b}.
The proof is complete.
\end{proof}
An immediate consequence of the above generalized {\em inf-sup}
condition is the following stability estimate for solutions of
problem \eqref{e1.1}--\eqref{e1.2}.
\betagin{theorem}\lambdabel{stability}
In addition to the assumptions of Theorem \ref{inf-sup}, assume
that $\mathbf{f}{\rm\mathbf i}n \mathbf{H}(\ddiv,\Omegagae)$ and $\mathbf{g}{\rm\mathbf i}n \mathbf{L}^2(\Gammamamma)$.
Let $\mathbf{E}{\rm\mathbf i}n \hat{\mathbf{c}V}\cap \mathbf{H}(\ddiv,\Omegagae)$ be a solution of the variational
problem \eqref{e1.4}. Then there holds following stability estimate:
\betagin{align}\lambdabel{e2.1}
\|\curl\mathbf{E}\|_{\mathbf{L}^2(\Omegagae)} &+k\|\mathbf{E}\|_{\mathbf{L}^2(\Omegagaega)}
+\sqrt{c_\Omegagae}\|\curl\mathbf{E}\|_{\mathbf{L}^2(\Gammama)}+k\sqrt{c_\Omegagae}\|\mathbf{E}\|_{\mathbf{L}^2(\Gammamamma)} \\
&\lesssim k^{-1} \gammamamma M(\mathbf{f},\mathbf{g}) + k^{-2}\|\ddiv \mathbf{f}\|_{\mathbf{L}^2(\Omegagae)} \nonumber
\end{align}
for all $k,\lambdambda> 0$. Where
\betagin{align}\lambdabel{e2.2}
M(\mathbf{f},\mathbf{g}) &:= \|\mathbf{f}\|_{\mathbf{L}^2(\Omegagae)} + c_\Omegagae^{-\frac12}\|\mathbf{g}\|_{\mathbf{L}^2(\Gammamamma)}.
\end{align}
\end{theorem}
\betagin{proof}
Let $\varphi {\rm\mathbf i}n H^1_0(\Omegagae)$ solve
\betagin{equation}\lambdabel{e2.3}
\Deltatalta \varphi = -k^{-2} \ddiv \mathbf{f} \qquad \mbox{in } \Omegagae.
\end{equation}
Set $\mathbf{F}=\nablalab \varphi$ and $\mathbf{u}=\mathbf{E}-\mathbf{F}$, where $\mathbf{E}$ is a solution
to \eqref{e1.4}. Trivially, we have $\curl \mathbf{F}=0$ and $\ddiv \mathbf{F}= - k^{-2} \ddiv \mathbf{f}$
in $\Omegagae$, and $\mathbf{F}_T= \nablalab_T \varphi =0$ on $\Gammamamma$.
By \eqref{e1.6} we also have $\ddiv\mathbf{u}=\ddiv(\mathbf{E}-\mathbf{F})=0$.
Hence, $\mathbf{u}{\rm\mathbf i}n\mathbf{H}(\ddiv_0,\Omegagae)$. Moreover, since $\mathbf{E}$
satisfies \eqref{e1.4}, it is easy to verify that $\mathbf{u}$ satisfies
\betagin{align}\lambdabel{e2.4}
a(\mathbf{u},\mathbf{v})=(\mathbf{f}+k^2\mathbf{F},\mathbf{v})_\Omegagae + \lambdangle \mathbf{g},\mathbf{v}_T\rangle_\Gammamamma
\qquad \forall \mathbf{v}{\rm\mathbf i}n \mathbf{c}V.
\end{align}
Testing \eqref{e2.3} by $\varphi$ and integrating by parts on both sides
of the resulting equation yield
\[
\|\nablalab \varphi\|_{\mathbf{L}^2(\Omegagae)}^2 = -k^{-2} (\mathbf{f}, \nablalab\varphi)_\Omegagae
\leq k^{-2} \|\mathbf{f}\|_{\mathbf{L}^2(\Omegagae)} \|\nablalab \varphi\|_{\mathbf{L}^2(\Omegagae)}.
\]
Hence,
\betagin{equation}\lambdabel{e2.5}
\|\mathbf{F}\|_{\mathbf{L}^2(\Omegagae)}=\|\nablalab \varphi\|_{\mathbf{L}^2(\Omegagae)}\leq k^{-2} \|\mathbf{f}\|_{\mathbf{L}^2(\Omegagae)}.
\end{equation}
Alternatively, testing \eqref{e2.3} by $\nablalab\varphi\cdot\mathbf{w} =\mathbf{F}\cdot \mathbf{w}$
with $\mathbf{w}=\mathbf{x}-\mathbf{x}_\Omegagae$, using the following Rellich identity for the Laplacian
(cf. \cite{Rellich40, Cummings_Feng06}):
\[
2\re (\Deltatal \varphi\, \nablalab\overline{\varphi}\cdot\mathbf{w})
= |\nablalab \varphi|^2 + 2\re \bigl(\ddiv(\nablalab \varphi\, \nablalab \overline{\varphi}\cdot \mathbf{w})\bigr)
-\ddiv( \mathbf{w} |\nablalab \varphi|^2 ),
\]
and integrating by parts we get (note that $\mathbf{F}_T=0$)
\betagin{align*}
-2 k^{-2} (\ddiv \mathbf{f}, \mathbf{F}\cdot \mathbf{w})_\Omegagae
&=\|\mathbf{F}\|_{\mathbf{L}^2(\Omegagae)}^2
+2\re \lambdangle \mathbf{F}\cdot \mathbf{n}u, \mathbf{F}\cdot\mathbf{w}\rangle_\Gammamamma
- \lambdangle \mathbf{w}\cdot \mathbf{n}u, |\mathbf{F}|^2 \rangle_\Gammamamma \\
&=\|\mathbf{F}\|_{\mathbf{L}^2(\Omegagae)}^2 + \lambdangle \mathbf{w}\cdot \mathbf{n}u, |\mathbf{F}|^2 \rangle_\Gammamamma.
\end{align*}
Hence, by \eqref{e2.5} and the star-shaped domain assumption we obtain
\betagin{align}\lambdabel{e2.6}
\|\mathbf{F}\|_{\mathbf{L}^2(\Omegagae)}^2+c_\Omegagae \|\mathbf{F}\|_{\mathbf{L}^2(\Gammamamma)}^2
&\leq 2 k^{-2}\|\mathbf{w}\|_{L^{\rm\mathbf i}nfty(\Omegagae)} \|\ddiv \mathbf{f}\|_{\mathbf{L}^2(\Omegagae)}
\|\mathbf{F}\|_{\mathbf{L}^2(\Omegagae)} \\
&\leq 2 k^{-4} R\, \|\ddiv \mathbf{f}\|_{\mathbf{L}^2(\Omegagae)}\norml{\mathbf{f}}{\Omegagae}.\nonumber
\end{align}
Finally, by \eqref{e2.5} and Schwarz inequality we get
\betagin{align}\lambdabel{e2.7}
\bigl| (\mathbf{f}+k^2\mathbf{F},\mathbf{v})_\Omegagae + \lambdangle \mathbf{g},\mathbf{v}_T\rangle_\Gammamamma \bigr|
&\leq 2k^{-1} M(\mathbf{f},\mathbf{g})\Bigl( k^2 \|\mathbf{v}\|_{\mathbf{L}^2(\Omegagae)}^2
+ k^2 c_\Omegagae \|\mathbf{v}_T\|_{\mathbf{L}^2(\Gammamamma)}^2 \Bigr)^{\frac12} \\
&\leq 2k^{-1} M(\mathbf{f},\mathbf{g})\,\normL{\mathbf{v}}{\Omegagae}. \nonumber
\end{align}
It follows from the generalized inf-sup condition \eqref{eq2.0a},
\eqref{e2.4} and \eqref{e2.7} that
\betagin{align}\lambdabel{e2.8}
\gammamamma^{-1} \|\mathbf{u}\|_{E} \leq 4k^{-1} M(\mathbf{f},\mathbf{g}),
\end{align}
which together with \eqref{e2.6} and the relation $\mathbf{u}=\mathbf{E}-\mathbf{F}$ as well
as the definition of the energy norm $\|\mathbf{u}\|_{E}$ infer that
(again, note that $\mathbf{F}_T=0$)
\betagin{align*}
\|\mathbf{E}\|_{E} &\leq \|\mathbf{u}\|_{E} + \|\mathbf{F}\|_{E} \\
&\leq 4k^{-1} \gammamamma M(\mathbf{f},\mathbf{g}) + k\big(\|\mathbf{F}\|_{\mathbf{L}^2(\Omegagae)}^2+c_\Omegagae \|\mathbf{F}\|_{\mathbf{L}^2(\Gammamamma)}^2\big)^{\frac12} \\
&\leq 4k^{-1} \gammamamma M(\mathbf{f},\mathbf{g})
+ 2R \|\mathbf{f}\|_{\mathbf{L}^2(\Omegagae)} + (2k)^{-2}\,\|\ddiv \mathbf{f}\|_{\mathbf{L}^2(\Omegagae)}.
\end{align*}
Hence, \eqref{e2.1} holds. The proof is complete.
\end{proof}
We conclude this section with a few remarks.
\betagin{remark}
Since problem \eqref{e1.1}--\eqref{e1.2} is linear,
the stability estimate \eqref{e2.1} immediately implies
the uniqueness of the problem in the function class
in which the estimate is derived. This provides an
alternative method (to the traditional integral equation
method and the unique continuation method) for establishing
uniqueness (and existence) for the Maxwell problem \eqref{e1.1}--\eqref{e1.2}.
\end{remark}
\betagin{remark}
(a) The generalized inf-sup condition \eqref{eq2.0a} is
a stronger result than a stability estimate for the solution of
the Maxwell problem. The reason to restrict $\mathbf{u}{\rm\mathbf i}n \mathbf{H}(\ddiv_0,\Omegagae)$
in \eqref{eq2.0a} is that $\curl$ operator has a non-trivial kernel.
(b) Stability estimates similar to \eqref{e2.1} were established
independently early in \cite{Feng10} and \cite{HMP10}.
\eqref{e2.1} also explicitly shows the dependence on the {\em size}
and the {\em shape constant} of the domain.
Such an estimate plays an important role for designing multilevel Schwarz
preconditioners for discretizations of \eqref{e1.4} and for doing practical
simulations because in practice the size of the computational domain $\Omegagae$ is
often taken to be proportional to the wave length.
In addition, not only the sharp wave number-explicit and domain size-explicit
stability estimate \eqref{e2.1} is obtained as a corollary of the
generalized inf-sup condition \eqref{eq2.0a}, but also the derivation
reveals some deep insights about the dependence of the solution on
the datum functions and the domain.
(c) The generalized inf-sup condition \eqref{eq2.0a} provides a guideline
for constructing ``good" numerical schemes for the Maxwell equations.
We shall call a discretization method ``a coercivity preserving
method" if it satisfies a discrete inf-sup condition which mimics
the continuous inf-sup condition. Constructing such a coercivity preserving
IPDG method is one of primary goals of this paper.
(d) Generalized inf-sup conditions similar to \eqref{eq2.0a} also hold for
the scalar Helmholtz equation and the elastic Helmholtz equations
(cf. \cite{Feng10b}).
\end{remark}
Based on the above stability estimates in lower norms, one can also derive
stability estimates in higher norms when the solution $\mathbf{E}$ is sufficient
regular. We state an $H^\deltalta$-estimate for $\curl\mathbf{E}$ below without
giving a proof (cf. \cite[Remark 4.9]{HMP10}).
\betagin{theorem}\lambdabel{high_stability}
Suppose that ${\rm div\,}\mathbf{f} =0$ and the solution $\mathbf{E}$ of problem \eqref{e1.1}--\eqref{e1.2}
satisfies $\mathbf{E}{\rm\mathbf i}n \mathbf{H}^\deltalta(\curl,\Omegagae)$ for $\frac12< \deltalta\leq 1$.
Then there holds estimate
\betagin{align}\lambdabel{e2.100a}
\|\mathbf{E}\|_{\mathbf{H}^\deltalta(\curl,\Omegagae)} \lesssim (1+\lambdambda+k) M(\mathbf{f},\mathbf{g})
+ \|\mathbf{g}\|_{H^{\frac12}(\Gammama)},
\end{align}
where
\betagin{align}\lambdabel{e2.100b}
\mathbf{H}^\deltalta(\curl,\Omegagae) &:=\bigl\{\mathbf{u}{\rm\mathbf i}n \mathbf{H}^\deltalta(\Omegagae);\,
\curl\mathbf{u}{\rm\mathbf i}n \mathbf{H}^\deltalta(\Omegagae) \bigr\}, \\
\|\mathbf{u}\|_{\mathbf{H}^\deltalta(\curl,\Omegagae)} &:=\Bigl( \|\mathbf{u}\|_{\mathbf{H}^\deltalta(\Omegagae)}^2
+\|\curl \mathbf{u}\|_{\mathbf{H}^\deltalta(\Omegagae)}^2 \Bigr)^{\frac12}.\lambdabel{e2.100c}
\end{align}
\end{theorem}
\section{Formulation of discontinuous Galerkin methods}\lambdabel{sec-3}
To formulate our IPDG methods, we first need to introduce some notation.
Let $\{\mathcal{T}_h\}$ be a family of partitions (into tetrahedrons
and/or parallelepipeds)
of the domain $\Omegagae$ parameterized by $h>0$. For any ``element"
$K{\rm\mathbf i}n \mathcal{T}_h$, we define $h_K:=\mbox{diam}(K)$. Similarly, for each
face $\mathcal{F}$ of $K{\rm\mathbf i}n \mathcal{T}_h$, define $h_\mathcal{F}:=\mbox{diam}(\mathcal{F})$.
We assume that the elements of $\mathcal{T}_h$ satisfy the minimal angle
condition. Let
\betagin{eqnarray*}
\mathcal{E}_h^I&:=& \mbox{ set of all interior faces of $\mathcal{T}_h$} ,\\
\mathcal{E}_h^B&:=& \mbox{ set of all boundary faces of $\mathcal{T}_h$ on $\Gammama=\partial\Omegagae$}.
\end{eqnarray*}
We define the jump $\jump{\mathbf{v}}$ and average $\avrg{\mathbf{v}}$ of $\mathbf{v}$ on an interior face
$\mathcal{F}=\partial K\cap \partial K^\partialrime$ as
\[
\jump{\mathbf{v}}|_{\mathcal{F}}:=\left\{\betagin{array}{l}
\mathbf{v}|_{K}-\mathbf{v}|_{K^\partialrime}, \mbox{ if the global label of $K$ is bigger},\\
\mathbf{v}|_{K^\partialrime}-\mathbf{v}|_{K}, \mbox{ if the global label of $K^\partialrime$ is bigger},
\end{array} \right.\;\; \avrg{\mathbf{v}}|_{\mathcal{F}}:=\frac12\bigl( \mathbf{v}|_{K}+ \mathbf{v}|_{K^\partialrime} \bigr).
\]
If $\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^B$, set $\jump{\mathbf{v}}|_{\mathcal{F}}=\mathbf{v}|_{\mathcal{F}}$ and $\avrg{\mathbf{v}}|_{\mathcal{F}}=\mathbf{v}|_{\mathcal{F}}$. For every
$\mathcal{F}=\partial K\cap \partial K^\partialrime{\rm\mathbf i}n\mathcal{E}_h^I$, let $\mathbf{n}u_\mathcal{F}$ be the unit outward normal
to the face $\mathcal{F}$ of the element $K$ if the global label of $K$ is bigger
and of the element $K^\partialrime$ if the other way around. For every
$\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^B$, let $\mathbf{n}u_\mathcal{F}=\mathbf{n}u$ the unit outward normal to $\partialartial\Omegaga$.
To formulate our IPDG methods, we recall the following (local) integration
by parts formula:
\betagin{align}\lambdabel{e3.1}
(\curl\mathbf{E},\mathbf{F})_K =(\mathbf{E},\curl\mathbf{F})_K
- \lambdangle \mathbf{E}\times \mathbf{n}u_K, \mathbf{F}_T\rangle_{\partial K}.
\end{align}
where $\mathbf{F}_T=(\mathbf{n}u_K\times \mathbf{F})\times \mathbf{n}u_K$.
Next, multiplying equation \eqref{e1.1} by a
test function $\overline{\bfF}$, integrating over $K{\rm\mathbf i}n \mathcal{T}_h$,
using the integration by parts formula \eqref{e3.1}, and
summing the resulted equation over all $K{\rm\mathbf i}n \mathcal{T}_h$ we get
\betagin{align}\lambdabel{e3.2}
\sum_{K{\rm\mathbf i}n\mathcal{T}_h} \bigl( (\curl \mathbf{E},\curl\mathbf{F})_K
- \lambdangle\curl \mathbf{E}\times \mathbf{n}u_K, \mathbf{F}_T\rangle_{\partial K} \bigr)
-k^2 (\mathbf{E},\mathbf{F})_\Omegagae= (\mathbf{f},\mathbf{F})_\Omegagae.
\end{align}
To deal with the boundary terms in the big sum, we appeal
to the following algebraic identity. For each interior
face $\mathcal{F}=K\cap K'{\rm\mathbf i}n \mathcal{E}_h^I$ there holds
\betagin{align}\lambdabel{e3.3}
\lambdangle\curl \mathbf{E}\times \mathbf{n}u_K, \mathbf{F}_T\rangle_{\mathcal{F}}
&+\lambdangle\curl \mathbf{E}\times \mathbf{n}u_{K'}, \mathbf{F}_T\rangle_{\mathcal{F}} \\
&=\big\lambdangle \jump{\curl \mathbf{E}\times\mathbf{n}u_{\mathcal{F}}},\avrg{\mathbf{F}_T}\bigr\rangle_{\mathcal{F}}
+\big\lambdangle \avrg{\curl \mathbf{E}\times\mathbf{n}u_{\mathcal{F}}},\jump{\mathbf{F}_T}\bigr\rangle_{\mathcal{F}}.
\nonumber
\end{align}
Substituting identity \eqref{e3.3} into \eqref{e3.2}
after dropping the first term on the right-hand side of \eqref{e3.3}
(because $\jump{\curl \mathbf{E}\times \mathbf{n}u_{\mathcal{F}}}|_{\mathcal{F}}=0$ if $\mathbf{E}$ is
sufficiently regular) yields
\betagin{align*}
\sum_{K{\rm\mathbf i}n\mathcal{T}_h} (\curl \mathbf{E},\curl\mathbf{F})_K
&-\sum_{\mathcal{F}{\rm\mathbf i}n \mathcal{E}_h^I} \big\lambdangle \avrg{\curl \mathbf{E}\times \mathbf{n}u_\mathcal{F}},
\jump{\mathbf{F}_T}\bigr\rangle_{\mathcal{F}}\\
&-\big\lambdangle \curl \mathbf{E}\times \mathbf{n}u,
\mathbf{F}_T\bigr\rangle_{\Gammama} -k^2 (\mathbf{E},\mathbf{F})_\Omegagae= (\mathbf{f},\mathbf{F})_\Omegagae,
\end{align*}
Utilizing the boundary condition \eqref{e1.2} in the third term
on the left-hand side and adding a ``symmetrization" term then lead
to the following equation:
\betagin{align}\lambdabel{e3.4}
&\sum_{K{\rm\mathbf i}n\mathcal{T}_h} (\curl \mathbf{E},\curl\mathbf{F})_K
-\sum_{\mathcal{F}{\rm\mathbf i}n \mathcal{E}_h^I} \Bigl( \big\lambdangle \avrg{\curl \mathbf{E}\times \mathbf{n}u_\mathcal{F}},
\jump{\mathbf{F}_T}\bigr\rangle_{\mathcal{F}} \\
+&\varepsilonsilon \big\lambdangle \jump{\mathbf{E}_T},\avrg{\curl\mathbf{F}\times \mathbf{n}u_\mathcal{F}}\bigr\rangle_{\mathcal{F}}
\Bigr)-{\rm\mathbf i}\lambdambda \lambdangle \mathbf{E}_T, \mathbf{F}_T\rangle_{\Gammama}
-k^2 (\mathbf{E},\mathbf{F})_\Omegagae
=(\mathbf{f},\mathbf{F})_\Omegagae + \lambdangle\mathbf{g}, \mathbf{F}_T\rangle_{\Gammama} \nonumber
\end{align}
where $\varepsilonsilon=-1, 0, 1$.
The most important and tricky issue for designing an IPDG method is
how to introduce suitable {\em interior penalty} term(s)
on the left-hand side of \eqref{e3.4}. Obviously, different
interior penalty terms will result in different numerical methods.
As it was proved in \cite{HPSS05}, using the standard interior
penalty terms will lead to IPDG methods which require
a restrictive mesh constraint to ensure the stability and
accuracy in the case of large wave number $k$. Inspired
by our previous work \cite{fw08a} on IPDG methods for the Helmholtz
equation and guided by our stability analysis
(see section \ref{sec-4}), here we introduce some non-standard
interior penalty terms into \eqref{e3.4}, which we shall describe
below, and the IPDG method so constructed will be proved
to be absolutely stable (with respect to wave number $k$ and
mesh size $h$) in the next section.
To define our IPDG methods, we first introduce
the ``energy" space $\mathbf{V}$ and the sesquilinear
form $b_h^\varepsilonsilon(\cdot,\cdot)$ on $\mathbf{V}\times \mathbf{V}$ as follows.
\betagin{align*}
\mathbf{V}&:=\partialrimeod_{K{\rm\mathbf i}n\mathcal{T}_h} \mathbf{V}_K,\quad \mathbf{V}_K:=\bigl\{ \mathbf{v}{\rm\mathbf i}n \mathbf{H}(\curl,K);\, \mathbf{v}|_{\partial K} {\rm\mathbf i}n \mathbf{L}^2(\partialartialrtial K),\,
\curl \mathbf{v}|_{\partial K} {\rm\mathbf i}n \mathbf{L}^2(\partial K) \bigr\}.
\end{align*}
\betagin{align}\lambdabel{eah}
b_h^\varepsilonsilon(\mathbf{u},\mathbf{v}):=&\sum_{K{\rm\mathbf i}n\mathcal{T}_h} (\curl\mathbf{u},\curl\mathbf{v})_K
\\
&-\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} \Bigl( \big\lambdangle \avrg{\curl \mathbf{u}\times \mathbf{n}u_\mathcal{F}},
\jump{\mathbf{v}_T}\bigr\rangle_{\mathcal{F}} +\varepsilonsilon\big\lambdangle \jump{\mathbf{u}_T},\avrg{\curl\mathbf{v}\times \mathbf{n}u_\mathcal{F}}\bigr\rangle_{\mathcal{F}}
\Bigr)\nonumber\\
&- {\rm\mathbf i} \bigl(\mathcal{J}_0(\mathbf{u},\mathbf{v})
+\mathcal{J}_1(\mathbf{u},\mathbf{v}) \bigr), \nonumber\displaybreak[0]\\
\mathcal{J}_0(\mathbf{u},\mathbf{v}):=&\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I}\frac{\gammama_{0,\mathcal{F}}}{h_\mathcal{F}}\,
\bigl\lambdangle \jump{\mathbf{u}_T},\jump{\mathbf{v}_T}\bigr\rangle_\mathcal{F},\lambdabel{e3.6}\displaybreak[0]\\
\mathcal{J}_1(\mathbf{u},\mathbf{v}):=&\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} \gammama_{1,\mathcal{F}} h_\mathcal{F}
\bigl\lambdangle\jump{\curl\mathbf{u}\times\mathbf{n}_\mathcal{F}},\jump{\curl\mathbf{v}\times\mathbf{n}_\mathcal{F}}
\bigr\rangle_\mathcal{F}, \lambdabel{cJ1}
\end{align}
where $\gammama_{0,\mathcal{F}}$ and $\gammama_{1,\mathcal{F}}$ are nonnegative numbers to be specified later.
\betagin{remark}
(a) Clearly, $b_h^\varepsilonsilon(\cdot,\cdot)$ is a consistent discretization for
$\curl\curl$ since $(\curl\curl \mathbf{u},\mathbf{v})_\Omegagae = b_h^\varepsilonsilon(\mathbf{u},\mathbf{v})$
for all $\mathbf{u}{\rm\mathbf i}n \mathbf{H}^2(\Omegagae)$ and $\mathbf{v}{\rm\mathbf i}n \mathbf{V}$ with $\mathbf{v}_T|_\Gammamamma=0$.
(b) The terms in $-{\rm\mathbf i}\bigl(\mathcal{J}_0(\mathbf{u},\mathbf{v})+\mathcal{J}_1(\mathbf{u},\mathbf{v}) \bigr)$
are called penalty terms.
The penalty parameters $-{\rm\mathbf i}\gammamamma_{0,\mathcal{F}}$ and $-{\rm\mathbf i}\gammamamma_{1,\mathcal{F}}$ are
pure imaginary numbers with {\em negative} imaginary parts. Our analysis still applies if they are taken as complex numbers of negative imaginary parts.
(c) The $\mathcal{J}_0$ term penalizes the jumps of the vector field $\mathbf{u}$
and the $\mathcal{J}_1$ term penalizes the jumps of the
tangential component of the vector field $\curl \mathbf{u}$.
which, to the best of our knowledge, has not been used before in the
context of IPDG methods for the Maxwell equations.
They play a vital role for our IPDG methods being
absolutely stable, see section \ref{sec-4}.
(d) $\varepsilonsilon=-1,0,1$ correspond to the nonsymmetric, incomplete, and
symmetric IPDG methods for the Poisson problem. In the remainder of
this paper, we shall only consider the symmetric case $\varepsilonsilon=1$
and set $b_h(\cdot,\cdot)=b_h^1(\cdot,\cdot)$ for notation brevity.
\end{remark}
With the help of the sesquilinear form $b_h(\cdot,\cdot)$ we now
introduce the following weak formulation for \eqref{e1.1}--\eqref{e1.2}:
Find $\mathbf{E}{\rm\mathbf i}n \mathbf{V}\cap \mathbf{H}(\curl,\Omegagae)$ such that
\betagin{equation} \lambdabel{e3.16}
a_h(\mathbf{E},\mathbf{F}) =(f,\mathbf{F})_\Omegagae +\lambdangle \mathbf{g}, \mathbf{F}_T\rangle_{\Gammama}
\qquad \forall \mathbf{F}{\rm\mathbf i}n \mathbf{V}\cap \mathbf{H}(\curl,\Omegagae),
\end{equation}
where
\betagin{equation} \lambdabel{e3.16a}
a_h(\mathbf{E},\mathbf{F}):= b_h(\mathbf{E},\mathbf{F}) - k^2(\mathbf{E},\mathbf{F})_\Omegagae
-{\rm\mathbf i} \lambdambda \lambdangle \mathbf{E}_T,\mathbf{F}_T\rangle_{\Gammama}.
\end{equation}
From \eqref{e3.4}, it is clear that, if $\mathbf{E}{\rm\mathbf i}n \mathbf{H}^2(\Omegagae)$ is the
solution of \eqref{e1.1}--\eqref{e1.2}, then \eqref{e3.16} holds
for all $\mathbf{F}{\rm\mathbf i}n \mathbf{V}$.
For any $K{\rm\mathbf i}n \mathcal{T}_h$, let $P_r(K)$ denote the set of all complex-valued polynomials
whose degrees in all variables (total degrees) do not exceed $r (\geq 1)$.
We define our IPDG approximation space $\mathbf{V}_h$ as
\[
\mathbf{V}_h:=\partialrimeod_{K{\rm\mathbf i}n \mathcal{T}_h} \mathbf{P}_r(K).
\]
Clearly, $\mathbf{V}_h\subset \mathbf{V}\subset \mathbf{L}^2(\Omegagae)$. But $\mathbf{V}_h\not\subset
\mathbf{H}(\curl,\Omegagae)$.
We are now ready to define our IPDG methods based on the weak formulation
\eqref{e3.16}: Find $\mathbf{E}_h{\rm\mathbf i}n \mathbf{V}_h$ such that for all $\mathbf{F}_h{\rm\mathbf i}n \mathbf{V}_h$
\betagin{equation}\lambdabel{e3.17}
a_h(\mathbf{E}_h,\mathbf{F}_h)
=(f,\mathbf{F}_h)_\Omegagae + \bigl\lambdangle \mathbf{g}, (\mathbf{F}_h)_{T} \bigr\rangle_{\Gammama}.
\end{equation}
We note that \eqref{e3.17} defines a family of IPDG methods
for $r\geq 1$. For the ease of presentation and to better
present ideas, in the rest of this paper we only consider the
case $r=1$, the linear element case.
In the next two sections, we shall study the stability and
error estimates for the above IPDG method with $r=1$. Especially,
we are interested in knowing how the stability constants and error
constants depend on the wave number $k$ (and mesh size $h$, of course)
and what are the ``optimal" relationship between mesh size $h$ and
the wave number $k$. We remark that the IPDG method with $r=1$ uses piecewise linear polynomials even for Cartesian meshes. By contrast, for the corresponding linear conforming edge element method on Cartesian meshes, the trial functions have to be chosen as piecewise trilinear polynomials.
We also note that the linear system resulted from \eqref{e3.17} is
ill-conditioned and strongly indefinite because the coefficient
matrix has many eigenvalues with very large negative real
parts. Solving such a large linear system is another challenging problem
associated with time harmonic Maxwell problems, which will be addressed
in a future work.
For further analysis we introduce the following semi-norms/norms on $\mathbf{V}$:
\betagin{align} \lambdabel{e3.11}
\|\curl\mathbf{v}\|_{\mathbf{L}^2(\mathcal{T}_h)}^2
:=&\sum_{K{\rm\mathbf i}n\mathcal{T}_h} \norml{\curl \mathbf{v}}{K}^2, \displaybreak[0]\\
\norm{\mathbf{v}}_{DG}^2 :=&\|\curl\mathbf{v}\|_{\mathbf{L}^2(\mathcal{T}_h)}^2 +\norml{\mathbf{v}}{\Omegaga}^2
\lambdabel{e3.12} \\
&+
\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} \Bigl( \frac{\gammama_{0,\mathcal{F}}}{h_\mathcal{F}} \norml{\jm{\mathbf{v}_T}}{\mathcal{F}}^2
+\gammama_{1,\mathcal{F}} h_\mathcal{F} \norml{\jm{\curl\mathbf{v}\times \mathbf{n}u_\mathcal{F}}}{\mathcal{F}}^2 \Bigr)
\nonumber\displaybreak[0]\\
=& \|\curl\mathbf{v}\|_{\mathbf{L}^2(\mathcal{T}_h)}^2 +\norml{\mathbf{v}}{\Omegaga}^2+\mathcal{J}_0(\mathbf{v},\mathbf{v}) + \mathcal{J}_1(\mathbf{v},\mathbf{v}), \nonumber \displaybreak[0]\\
\norme{\mathbf{v}}^2 :=&\norm{\mathbf{v}}_{DG}^2 +\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I}
\frac{h_\mathcal{F}}{\gammama_{0,\mathcal{F}}}\norml{\av{\curl \mathbf{v}\times \mathbf{n}u_\mathcal{F}}}{\mathcal{F}}^2.
\lambdabel{e3.13}
\end{align}
Clearly, the sesquilinear form $b_h(\cdot,\cdot)$ satisfies:
For any $\mathbf{v}{\rm\mathbf i}n \mathbf{V}$
\betagin{align} \lambdabel{e3.14}
\re b_h(\mathbf{v},\mathbf{v})&=\|\curl\mathbf{v}\|_{\mathbf{L}^2(\mathcal{T}_h)}^2
-2\re \sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} \bigl\lambdangle \avrg{\curl \mathbf{v}\times\mathbf{n}u_\mathcal{F}},
\jump{\mathbf{v}_T} \bigr\rangle_\mathcal{F}, \\
{\rm\mathbf i}m b_h(\mathbf{v},\mathbf{v})&=-\mathcal{J}_0(\mathbf{v},\mathbf{v})-\mathcal{J}_1(\mathbf{v},\mathbf{v}). \lambdabel{e3.15}
\end{align}
\section{Discrete coercivity and stability estimates}\lambdabel{sec-4}
In this section we shall prove that the discrete sesquilinear form
$a_h(\cdot,\cdot)$ satisfies a discrete coercivity, which
is slightly stronger than the generalized inf-sup
condition proved in the previous section for the sesquilinear form
$a(\cdot,\cdot)$. Such a discrete coercivity is possible for
the linear element because $\curl\curl \mathbf{v}_h=0$ (defined element-wise) for all
$\mathbf{v}_h{\rm\mathbf i}n \mathbf{V}_h$. As an immediate corollary of the discrete
coercivity, we shall derive a priori estimates for solutions of
\eqref{e3.17} for all $h,k>0$, which then infer
the well-posedness of \eqref{e3.17}.
We state the first main theorem of this section which
establishes a coercivity for the discrete sesquilinear
form $a_h(\cdot,\cdot)$.
\betagin{theorem}\lambdabel{discrete_coercivity}
Let $\gammama_0=\min_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} \set{\gammama_{0,\mathcal{F}}}$,
$\gammama_1=\min_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} \set{\gammama_{1,\mathcal{F}}}$, and
$h_{\min}= \min_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h} \set{h_{\mathcal{F}}}$.
Then there exists a constant $0<C<1$ such that
\end{theorem}
\betagin{align}\lambdabel{e4.5a}
|a_h(\mathbf{u}_h,\mathbf{u}_h)| &\geq \frac{C}{\gammamamma_h} \|\mathbf{u}_h\|_{E,h}^2
\qquad \forall \mathbf{u}_h{\rm\mathbf i}n \mathbf{V}_h
\end{align}
for all $k, \gammamamma_{0}, \gammamamma_{1}>0$. Where
\betagin{align}\lambdabel{e4.5b}
\gammamamma_h &:= \frac{1}{\lambdambda h_{\min}} + \frac{1}{\gammamamma_1 k^2 h_{\min}^2}
+\frac{1}{\gammamamma_0}+1, \\
\|\mathbf{u}_h\|_{E,h}&:=\Bigl(\|\curl\mathbf{u}_h\|_{\mathbf{L}^2(\mathcal{T}_h)}^2
+ k^2 \|\mathbf{u}_h\|_{\mathbf{L}^2(\Omegagae)}^2 \lambdabel{e4.5c} \\
&\qquad+\gammama_h\big(\mathcal{J}_0(\mathbf{u}_h,\mathbf{u}_h)+\mathcal{J}_1(\mathbf{u}_h,\mathbf{u}_h)+\lambdambda \|(\mathbf{u}_h)_T\|_{\mathbf{L}^2(\Gammamamma)}^2\big) \Bigr)^{\frac12}. \nonumber
\end{align}
\betagin{proof}
For any $\mathcal{F}{\rm\mathbf i}n \mathcal{E}_h$, define $\Omegagae_\mathcal{F}:=\bigcup\big\{K{\rm\mathbf i}n\mathcal{T}_h; \, \partialartial K\cap
\mathcal{F}\neq \emptyset \big\}$. By \eqref{e3.16a}, \eqref{e3.14}, and the following trace inequality
\betagin{equation}\lambdabel{trace_ineq}
\|\{\mathbf{v}_h\}\|_{\mathbf{L}^2(\mathcal{F})} \leq C h_\mathcal{F}^{-\frac12}\|\mathbf{v}_h\|_{\mathbf{L}^2(\Omegagae_\mathcal{F})}
\qquad\forall \mathbf{v}_h{\rm\mathbf i}n \mathbf{V}_h
\end{equation}
for some $h_\mathcal{F}$-independent positive constant $C$, we get
\betagin{align}\lambdabel{e4.6}
&\re a_h(\mathbf{u}_h, \mathbf{u}_h)
\leq\|\curl\mathbf{u}_h\|_{\mathbf{L}^2(\mathcal{T}_h)}^2 - k^2\|\mathbf{u}_h\|_{\mathbf{L}^2(\Omegagae)}^2\\
&\hskip 1.2in
+2\sum_{\mathcal{F}{\rm\mathbf i}n \mathcal{E}_h^I}
\|\{\curl\mathbf{u}_h\times \mathbf{n}u_\mathcal{F}\}\|_{\mathbf{L}^2(\mathcal{F})} \|[(\mathbf{u}_h)_T]\|_{\mathbf{L}^2(\mathcal{F})}\nonumber\\
&\hskip 0.5in
\leq\|\curl\mathbf{u}_h\|_{\mathbf{L}^2(\mathcal{T}_h)}^2 - k^2\|\mathbf{u}_h\|_{\mathbf{L}^2(\Omegagae)}^2 \nonumber\\
&\hskip 1.2in
+C \sum_{\mathcal{F}{\rm\mathbf i}n \mathcal{E}_h^I} h_\mathcal{F}^{-\frac12} \|\curl\mathbf{u}_h\|_{\mathbf{L}^2(\Omegagae_\mathcal{F})}
\|[(\mathbf{u}_h)_T]\|_{\mathbf{L}^2(\mathcal{F})}\nonumber\\
&\hskip 0.5in
\leq \frac32 \|\curl\mathbf{u}_h\|_{\mathbf{L}^2(\mathcal{T}_h)}^2 - k^2\|\mathbf{u}_h\|_{\mathbf{L}^2(\Omegagae)}^2
+C \sum_{\mathcal{F}{\rm\mathbf i}n \mathcal{E}_h^I} h_\mathcal{F}^{-1} \|[(\mathbf{u}_h)_T]\|_{\mathbf{L}^2(\mathcal{F})}^2.
\nonumber
\end{align}
Since $\mathbf{u}_h{\rm\mathbf i}n \mathbf{V}_h$ is piecewise linear, then $\curl\curl\mathbf{u}_h=0$
in each $K{\rm\mathbf i}n \mathcal{T}_h$. By integrating by parts and using the trace inequality
\eqref{trace_ineq} we obtain
\betagin{align*}
&\norml{\curl\mathbf{u}_h}{\mathcal{T}_h}^2
=\sum_{K{\rm\mathbf i}n\mathcal{T}_h} \bigl( \curl\mathbf{u}_h, \curl\mathbf{u}_h \bigr)_K\\
&\qquad
=\sum_{K{\rm\mathbf i}n\mathcal{T}_h}\Bigl( \bigl(\curl\curl\mathbf{u}_h, \mathbf{u}_h \bigr)_K
-\Lambdangle\curl\mathbf{u}_h\times\mathbf{n}u_K,(\mathbf{u}_h)_T\mathbb{R}angle_{\partialartial K} \Bigr) \nonumber \displaybreak[0]\\
&\qquad
=-\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^B} \Lambdangle\curl\mathbf{u}_h\times\mathbf{n}u_K,(\mathbf{u}_h)_T\mathbb{R}angle_{\mathcal{F}} \nonumber \\
&\qquad\qquad
-\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I}\Bigl(\Lambdangle[\curl\mathbf{u}_h\times\mathbf{n}u_\mathcal{F}],\{(\mathbf{u}_h)_T\}
\mathbb{R}angle_{\mathcal{F}}+\Lambdangle \{\curl\mathbf{u}_h\times\mathbf{n}u_\mathcal{F}\},[(\mathbf{u}_h)_T]\mathbb{R}angle_{\mathcal{F}}\Bigr)
\nonumber \displaybreak[0]\\
&\qquad
\leq C\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^B} h_\mathcal{F}^{-\frac12} \|\curl \mathbf{u}_h\|_{\mathbf{L}^2(\Omegagae_\mathcal{F})}
\|(\mathbf{u}_h)_T\|_{\mathbf{L}^2(\mathcal{F})} \nonumber \\
&\qquad\qquad
+ C\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} h_\mathcal{F}^{-\frac12}
\|[\curl\mathbf{u}_h\times\mathbf{n}u_\mathcal{F}]\|_{\mathbf{L}^2(\mathcal{F})} \|\mathbf{u}_h\|_{\mathbf{L}^2(\Omegagae_\mathcal{F})} \nonumber \\
&\qquad\qquad
+ C\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} h_\mathcal{F}^{-\frac12}
\|\curl\mathbf{u}_h\|_{\mathbf{L}^2(\Omegagae_\mathcal{F})} \|[(\mathbf{u}_h)_T]\|_{\mathbf{L}^2(\mathcal{F})} \nonumber \displaybreak[0]\\
&\qquad
\leq \frac13 \|\curl\mathbf{u}_h\|_{\mathbf{L}^2(\mathcal{T}_h)}^2 + \frac{k^2}{6} \|\mathbf{u}_h\|_{\mathbf{L}^2(\Omegagae)}^2
+C \sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^B} h_\mathcal{F}^{-1} \|(\mathbf{u}_h)_T\|_{\mathbf{L}^2(\mathcal{F})}^2 \nonumber\\
&\qquad\qquad
+C\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} h_\mathcal{F}^{-1}\Bigl( \|[(\mathbf{u}_h)_T]\|_{\mathbf{L}^2(\mathcal{F})}^2
+k^{-2} \|[\curl\mathbf{u}_h\times\mathbf{n}u_\mathcal{F}]\|_{\mathbf{L}^2(\mathcal{F})}^2 \Bigr). \nonumber
\end{align*}
Hence,
\betagin{align}\lambdabel{e4.7}
&2\norml{\curl\mathbf{u}_h}{\mathcal{T}_h}^2 \leq \frac{k^2}{2} \|\mathbf{u}_h\|_{\mathbf{L}^2(\Omegagae)}^2
+C \sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^B} h_\mathcal{F}^{-1} \|(\mathbf{u}_h)_T\|_{\mathbf{L}^2(\mathcal{F})}^2 \\
&\hskip 1.0in
+C\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} h_\mathcal{F}^{-1}\Bigl( \|[(\mathbf{u}_h)_T]\|_{\mathbf{L}^2(\mathcal{F})}^2
+ k^{-2} \|[\curl\mathbf{u}_h\times\mathbf{n}u_\mathcal{F}]\|_{\mathbf{L}^2(\mathcal{F})}^2 \Bigr). \nonumber
\end{align}
Adding \eqref{e4.6} and \eqref{e4.7} and rearranging the terms yield
\betagin{align}\lambdabel{e4.8}
&\|\curl\mathbf{u}_h\|_{\mathbf{L}^2(\mathcal{T}_h)}^2 + k^2\|\mathbf{u}_h\|_{\mathbf{L}^2(\Omegagae)}^2 \\
&
\leq -2\re a_h(\mathbf{u}_h, \mathbf{u}_h)
+\frac{C}{\lambdambda h_{\min}} \lambdambda \|(\mathbf{u}_h)_T\|_{\mathbf{L}^2(\Gammamamma)}^2+
\frac{C}{\gammamamma_0}\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} \frac{\gammama_{0,\mathcal{F}}}{h_\mathcal{F}} \norml{\jm{(\mathbf{u}_h)_T}}{\mathcal{F}}^2 \nonumber \\
&\quad+ \frac{C}{\gammamamma_1 k^2 h_{\min}^2} \sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I}\gammama_{1,\mathcal{F}} h_\mathcal{F} \norml{\jm{\curl\mathbf{u}_h\times\mathbf{n}u_\mathcal{F}}}{\mathcal{F}}^2 .\nonumber
\end{align}
Therefore, by the definitions of $\mathcal{J}_0(\cdot,\cdot)$ and $\mathcal{J}_1(\cdot,\cdot)$ and
the identity \eqref{e3.15} we get
\betagin{align*}
&\|\curl\mathbf{u}_h\|_{\mathbf{L}^2(\mathcal{T}_h)}^2 + k^2\|\mathbf{u}_h\|_{\mathbf{L}^2(\Omegagae)}^2
+\gammama_h\big(\mathcal{J}_0(\mathbf{u}_h,\mathbf{u}_h)+\mathcal{J}_1(\mathbf{u}_h,\mathbf{u}_h)+\lambdambda \|(\mathbf{u}_h)_T\|_{\mathbf{L}^2(\Gammamamma)}^2\big)\displaybreak[0]\\
&\qquad
\leq -2\re a_h(\mathbf{u}_h, \mathbf{u}_h)
+ C\gammama_h\big(\mathcal{J}_0(\mathbf{u}_h,\mathbf{u}_h)+\mathcal{J}_1(\mathbf{u}_h,\mathbf{u}_h)+\lambdambda \|(\mathbf{u}_h)_T\|_{\mathbf{L}^2(\Gammamamma)}^2\big) \nonumber \displaybreak[0]\\
&\qquad
= -2\re a_h(\mathbf{u}_h, \mathbf{u}_h) - C \gammamamma_h {\rm\mathbf i}m a_h(\mathbf{u}_h, \mathbf{u}_h) \nonumber \displaybreak[0]\\
&\qquad
\leq C \gammamamma_h \bigl(|\re a_h(\mathbf{u}_h,\mathbf{u}_h)|+|{\rm\mathbf i}m a_h(\mathbf{u}_h, \mathbf{u}_h)|\bigr) \nonumber\displaybreak[0]
\leq C \gammamamma_h |a_h(\mathbf{u}_h, \mathbf{u}_h)|, \nonumber
\end{align*}
where $\gammamamma_h$ is defined by \eqref{e4.5b}. Hence, \eqref{e4.5a} holds.
The proof is completed.
\end{proof}
\betagin{remark}
(a) The discrete sesquilinear form $a_h(\cdot,\cdot)$ satisfies a stronger
coercivity than its continuous counterpart $a(\cdot,\cdot)$ does,
see Theorem \ref{inf-sup}. Moreover, the proof of Theorem
\ref{discrete_coercivity} is simpler than that of Theorem \ref{inf-sup},
all these are possible because of the special form of $a_h(\cdot,\cdot)$
and the fact that $\curl\curl \mathbf{v}_h=0$ in $K{\rm\mathbf i}n \mathcal{T}_h$ for all piecewise
linear functions $\mathbf{v}_h{\rm\mathbf i}n \mathbf{V}_h$. However, a weak coercivity is only
expected to hold in the case of high order elements.
(b) It is also important to point out that Theorem \ref{discrete_coercivity}
holds without assuming that $\Omegagae$ is a star-shaped domain.
\end{remark}
An immediate consequence of the above discrete coercivity are the
following a priori estimates for solutions to the IPDG method \eqref{e3.17}.
\betagin{theorem}\lambdabel{discrete_stability}
Every solution $\mathbf{E}_h$ of the IPDG method \eqref{e3.17} satisfies the
following stability estimates.
\betagin{align*}
&\norml{\curl\mathbf{E}_h}{\mathcal{T}_h}+ k\norml{\mathbf{E}_h}{\Omegaga}
\lesssim k^{-1}\gammama_h \|\mathbf{f}\|_{\mathbf{L}^2(\Omegagae)}+(\lambda^{-1}\gammama_h)^{\frac12}\|\mathbf{g}\|_{\mathbf{L}^2(\Gammamamma)}, \\
&\bigl( \mathcal{J}_0(\mathbf{E}_h,\mathbf{E}_h) + \mathcal{J}_1(\mathbf{E}_h,\mathbf{E}_h)+\lambdambda \|(\mathbf{E}_h)_T\|_{\mathbf{L}^2(\Gammamamma)}^2 \bigr)^{\frac12}
\lesssim k^{-1}\gammama_h^\frac12 \|\mathbf{f}\|_{\mathbf{L}^2(\Omegagae)}+\lambda^{-\frac12}\|\mathbf{g}\|_{\mathbf{L}^2(\Gammamamma)}.
\end{align*}
\end{theorem}
\betagin{proof}
By \eqref{e3.17} and Schwarz inequality we get
\betagin{align*}
\abs{a_h(\mathbf{E}_h,\mathbf{E}_h)}&=\bigl|(\mathbf{f},\mathbf{E}_h)_\Omegagae + \lambdangle \mathbf{g},(\mathbf{E}_h)_T \rangle_\Gammamamma \bigr|
\leq \|\mathbf{f}\|_{\mathbf{L}^2(\Omegagae)} \|\mathbf{E}_h\|_{\mathbf{L}^2(\Omegagae)}
+ \|\mathbf{g}\|_{\mathbf{L}^2(\Gammamamma)} \|(\mathbf{E}_h)_T\|_{\mathbf{L}^2(\Gammamamma)} \\
&
\leq \bigl( k^2 \|\mathbf{E}_h\|_{\mathbf{L}^2(\Omegagae)}^2
+ \lambdambda\gammama_h \|(\mathbf{E}_h)_T^2\|_{\mathbf{L}^2(\Gammamamma)}\bigr)^{\frac12}
\bigl( k^{-2} \|\mathbf{f}\|_{\mathbf{L}^2(\Omegagae)}^2
+ (\lambdambda\gammama_h)^{-1} \|\mathbf{g}\|_{\mathbf{L}^2(\Gammamamma)}^2 \bigr)^{\frac12} \\
&
\leq \|\mathbf{E}_h\|_{E,h}\, \bigl( k^{-1} \|\mathbf{f}\|_{\mathbf{L}^2(\Omegagae)}
+ (\lambdambda\gammama_h)^{-\frac12} \|\mathbf{g}\|_{\mathbf{L}^2(\Gammamamma)} \bigr).
\end{align*}
The desired estimates follow from combining the above inequality
with \eqref{e4.5a}. The proof is completed.
\end{proof}
The above discrete stability estimates in turn immediately imply the
well-posedness of the IPDG method \eqref{e3.17}.
\betagin{corollary}\lambdabel{existence}
There exists a unique solution to \eqref{e3.17} for any fixed set of
parameters $k, h_\mathcal{F}, \gammama_{0,\mathcal{F}}, \gammama_{1,\mathcal{F}}>0$.
\end{corollary}
\section{Error estimates}\lambdabel{sec-5}
In what follows, we suppose $\gammama_{0,\mathcal{F}}\sigmameq \gammama_0$ and
$\gammama_{1,\mathcal{F}}\sigmameq \gammama_1$ for brevity. For simplicity, we assume
that ${\rm div\,}\mathbf{f}=0$ and that $\mathcal{T}_h$ is a quasi-uniform
partition of $\Omegagae$ consisting of tetrahedrons. Let $h:=\max\{h_K;\, K{\rm\mathbf i}n \mathcal{T}_h\}$.
\subsection{$\mathbf{H}(\curl,\Omegagae)$-elliptic projection and its error estimates}
\lambdabel{sec-5.1}
Let $\mathbf{E}$ be the solution to problem \eqref{e1.1}--\eqref{e1.2}
and $\widetilde\mathbf{E}_h{\rm\mathbf i}n\mathbf{V}_h$ be its IPDG $\mathbf{H}(\curl,\Omegagae)$-elliptic
projection defined as follows.
\betagin{equation}\lambdabel{e5.1}
b_h(\mathbf{E}-\widetilde\mathbf{E}_h,\mathbf{v}_h) +(\mathbf{E}-\widetilde\mathbf{E}_h,\mathbf{v}_h)_\Omegagae=0
\quad\forall \mathbf{v}_h{\rm\mathbf i}n \mathbf{V}_h.
\end{equation}
The following lemma establishes the continuity and coercivity
for the discrete sesquilinear form $b_h(\cdot,\cdot)$.
\betagin{lemma}\lambdabel{lem5.1} For any $\mathbf{v}, \mathbf{w}{\rm\mathbf i}n\mathbf{V}$, the sesquilinear form
$b_h(\cdot,\cdot)$ satisfies
\betagin{equation}\lambdabel{e5.2}
\abs{b_h(\mathbf{v},\mathbf{w})+(\mathbf{v},\mathbf{w})_\Omegaga},\quad \abs{b_h(\mathbf{w},\mathbf{v})+(\mathbf{w},\mathbf{v})_\Omegaga}\lesssim\norme{\mathbf{v}}\norme{\mathbf{w}}.
\end{equation}
In addition, there exists a positive constant $\underline{\gamma}$ such that, for $\gammama_0\ge\underline{\gamma}$,
\betagin{equation}\lambdabel{e5.3}
\re b_h(\mathbf{v}_h,\mathbf{v}_h)-{\rm\mathbf i}m b_h(\mathbf{v}_h,\mathbf{v}_h)+(\mathbf{v}_h,\mathbf{v}_h)_\Omegaga
\ge\frac12\norme{\mathbf{v}_h}^2 \qquad\forall \mathbf{v}_h{\rm\mathbf i}n\mathbf{V}_h.
\end{equation}
\end{lemma}
\betagin{proof}
Clearly, \eqref{e5.2} follows from the definitions \eqref{eah}--\eqref{cJ1}, \eqref{e3.11}--\eqref{e3.13}, and Schwarz inequality. It remains to prove \eqref{e5.3}.
From \eqref{e3.11}--\eqref{e3.15} we have
\betagin{align*}
\re b_h(\mathbf{v}_h,\mathbf{v}_h)&-{\rm\mathbf i}m b_h(\mathbf{v}_h,\mathbf{v}_h)+(\mathbf{v}_h,\mathbf{v}_h)_\Omegaga\\
=& \norme{\mathbf{v}_h}^2
-2\re \sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} \partialdaj{\curl \mathbf{v}_h\times\mathbf{n}u_\mathcal{F}}{(\mathbf{v}_h)_T}\\
&-\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I}
\frac{h_\mathcal{F}}{\gammama_{0,e}}\norml{\av{\curl \mathbf{v}_h\times \mathbf{n}u_\mathcal{F}}}{\mathcal{F}}^2.
\end{align*}
It follows from the derivation of \eqref{e4.6} that
there exists a constant $c_0>0$ such that
\betagin{align*}
2\re \sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I}& \partialdaj{\curl \mathbf{v}_h\times\mathbf{n}u_\mathcal{F}}{(\mathbf{v}_h)_T}\\
&\leq \frac{1}{4}\norml{\curl \mathbf{v}_h}{\mathcal{T}_h}^2 +\frac{c_0}{\gammama_0}\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I}\frac{\gammama_{0,\mathcal{F}}}{h_\mathcal{F}}
\norml{\jm{(\mathbf{v}_h)_T}}{\mathcal{F}}^2.
\end{align*}
On the other hand, from \eqref{trace_ineq}, there exists a constant $c_1>0$ such that,
\betagin{align*}
\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I}
\frac{h_\mathcal{F}}{\gammama_{0,e}}\norml{\av{\curl \mathbf{v}_h\times \mathbf{n}u_\mathcal{F}}}{\mathcal{F}}^2\le \frac{c_1}{\gammama_0}\norml{\curl \mathbf{v}_h}{\mathcal{T}_h}^2
\end{align*}
Therefore,
\betagin{align*}
\re b_h(\mathbf{v}_h,\mathbf{v}_h)-{\rm\mathbf i}m b_h(\mathbf{v}_h,\mathbf{v}_h)+(\mathbf{v}_h,\mathbf{v}_h)_\Omegaga
\ge\Big(1-\frac14-\frac{c_0+c_1}{\gammama_0}\Big)\norme{\mathbf{v}_h}^2
\end{align*}
which gives \eqref{e5.3} if $\gammama_0$ large enough. The proof is completed.
\end{proof}
\betagin{remark}\lambdabel{rem5.1}
The coercivity and continuity of $b_h(\cdot,\cdot)$ ensure that
the above $\mathbf{H}(\curl,\Omegagae)$-elliptic projection is well defined.
\end{remark}
The following lemma establishes error estimates for $\mathbf{E}-\widetilde\mathbf{E}_h$.
\betagin{lemma}\lambdabel{lem5.3}
Suppose problem \eqref{e1.1}--\eqref{e1.2} is $H^2$-regular, then, under the conditions of Lemma~\ref{lem5.1}, there hold
the following estimates:
\betagin{align}\lambdabel{e5.6}
&\norme{\mathbf{E}-\widetilde\mathbf{E}_h}
\lesssim h\,(1+\gammama_1)^\frac12 \|\mathbf{E}\|_{\mathbf{H}^1(\curl,\Omegagae)},\\
&\|\mathbf{E}-\widetilde\mathbf{E}_h\|_{\mathbf{L}^2(\Omegaga)} \lesssim h^2 (1+\gammama_1) \mathcal R(\mathbf{E}), \lambdabel{e5.7}\\
& \|\mathbf{E}-\widetilde\mathbf{E}_h\|_{\mathbf{L}^2(\Gammama)}\lesssim h^\frac32 (1+\gammama_1) \mathcal R(\mathbf{E}), \lambdabel{e5.8}
\end{align}
where
\betagin{align}\lambdabel{e5.7a}
\mathcal R(\mathbf{E}) &:= (1+\gammama_1)^{\frac12} \|\mathbf{E}\|_{\mathbf{H}^1(\curl,\Omegagae)}
+ \|\mathbf{E}\|_{H^2(\Omegagae)}.
\end{align}
\end{lemma}
\betagin{proof}
{\em Step 1:} It follows from \cite{Monk03, nedelec1986, gm11} that there exists
$\widehat\mathbf{E}_h{\rm\mathbf i}n \mathbf{V}_h\cap \mathbf{H}(\curl,\Omegagae)$ (i.e., the conforming N\'ed\'elec interpolation
of $\mathbf{E}$) such that the following estimates hold:
\betagin{align}\lambdabel{e5.8a}
\|\mathbf{E}-\widehat\mathbf{E}_h\|_{\mathbf{L}^2(\Omegaga)}
&\lesssim h^2\|\mathbf{E}\|_{H^2(\Omegaga)},\\
\|\mathbf{E}-\widehat\mathbf{E}_h\|_{\mathbf{L}^2(\Gammamamma)} &\lesssim h^{\frac32} \|\mathbf{E}\|_{H^2(\Omegaga)},
\lambdabel{e5.8b}\\
\|\mathbf{E}-\widehat\mathbf{E}_h\|_{\mathbf{H}(\curl,\Omegaga)} &\lesssim h\|\mathbf{E}\|_{\mathbf{H}^1(\curl,\Omegagae)},
\lambdabel{e5.8c}\\
\norme{\mathbf{E}-\widehat\mathbf{E}_h}&\lesssim h\, (1+\gammama_1)^\frac12 \|\mathbf{E}\|_{\mathbf{H}^1(\curl,\Omegagae)},\lambdabel{e5.8d}
\end{align}
where \eqref{e5.8d} can be proved by \eqref{e5.8c}, the commuting property between the curl-conforming interpolation operator and the div-conforming interpolation
operator \cite[Lemma~8.13]{Monk03}, and the trace inequality.
Let $\boldsymbol{\Phi}_h:=\widetilde\mathbf{E}_h - \widehat\mathbf{E}_h$
and $\boldsymbol{\Psi}_h := \mathbf{E}-\widehat\mathbf{E}_h$,
then $\mathbf{E}- \widetilde\mathbf{E}_h= \boldsymbol{\Psi}_h - \boldsymbol{\Phi}_h$. By \eqref{e5.1} we have
\betagin{align}\lambdabel{e5.9}
b_h(\boldsymbol{\Phi}_h,\boldsymbol{\Phi}_h) +(\boldsymbol{\Phi}_h,\boldsymbol{\Phi}_h)_\Omegagae
=b_h(\boldsymbol{\Psi}_h,\boldsymbol{\Phi}_h) +(\boldsymbol{\Psi}_h,\boldsymbol{\Phi}_h)_\Omegagae.
\end{align}
{\em Step 2:} From Lemma \ref{lem5.1} and \eqref{e5.9} we get
\betagin{align}\lambdabel{e5.10}
\frac12 \norme{\boldsymbol{\Phi}_h}^2
\leq& \re b_h(\boldsymbol{\Phi}_h,\boldsymbol{\Phi}_h) -{\rm\mathbf i}m b_h(\boldsymbol{\Phi}_h,\boldsymbol{\Phi}_h)+ \norml{\boldsymbol{\Phi}_h}{\Omegaga}^2 \\
=&\re \bigl( b_h(\boldsymbol{\Psi}_h,\boldsymbol{\Phi}_h) + (\boldsymbol{\Psi}_h,\boldsymbol{\Phi}_h)_\Omegagae \bigr) - {\rm\mathbf i}m \bigl( b_h(\boldsymbol{\Psi}_h,\boldsymbol{\Phi}_h) + (\boldsymbol{\Psi}_h,\boldsymbol{\Phi}_h)_\Omegagae\bigr) \nonumber\\
\lesssim& \norme{\boldsymbol{\Phi}_h}\, \norme{\boldsymbol{\Psi}_h}.\nonumber
\end{align}
Therefore, it follows from
\eqref{e5.8d} that
\betagin{align}\lambdabel{e5.11}
\norme{\boldsymbol{\Phi}_h}
&\lesssim \norme{\boldsymbol{\Psi}_h}\lesssim h\, (1+\gammama_1)^\frac12 \|\mathbf{E}\|_{\mathbf{H}^1(\curl,\Omegagae)},
\end{align}
which together with the relation $\mathbf{E}-\widetilde\mathbf{E}_h =\boldsymbol{\Psi}_h - \boldsymbol{\Phi}_h$ and
the triangle inequality immediately infer \eqref{e5.6}.
{\em Step 3:} To show \eqref{e5.7}, we first need the following results that can be proved by following the proof of
\cite[Proposition 4.5]{HPSS05} and their proofs are omitted: for any $\mathbf{v}_h{\rm\mathbf i}n \mathbf{V}_h$ there exists
$\mathbf{v}_h^c{\rm\mathbf i}n \mathbf{V}_h\cap \mathbf{H}(\curl, \Omegagae)$ such that
\betagin{align}
\|\mathbf{v}_h-\mathbf{v}_h^c\|_{\mathbf{L}^2(\Omegagae)}^2
&\leq C\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} h_\mathcal{F} \norml{\jm{(\mathbf{v}_h)_T}}{\mathcal{F}}^2, \lambdabel{e5.11c} \\
\|\curl(\mathbf{v}_h-\mathbf{v}_h^c)\|_{\mathbf{L}^2(\mathcal{T}_h)}^2 &\leq C\sum_{\mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I} h_\mathcal{F}^{-1} \norml{\jm{(\mathbf{v}_h)_T}}{\mathcal{F}}^2 . \lambdabel{e5.11d}
\end{align}
Let $\boldsymbol{\Phi}_h^c{\rm\mathbf i}n \mathbf{V}_h\cap \mathbf{H}(\curl, \Omegagae)$
be the conforming approximation of $\boldsymbol{\Phi}_h$ as defined above. Then it follows from the definition of the norm $\norm{\cdot}_{DG}$ (cf. \eqref{e3.12}), the above two estimates, and \eqref{e5.11} that
\betagin{align} \lambdabel{e5.11a}
\|\boldsymbol{\Phi}_h-\boldsymbol{\Phi}_h^c\|_{\mathbf{L}^2(\Omegagae)}& +h\|\curl(\boldsymbol{\Phi}_h-\boldsymbol{\Phi}_h^c)\|_{\mathbf{L}^2(\mathcal{T}_h)} \lesssim\gammama_0^{-\frac12} h \norm{\boldsymbol{\Phi}_h}_{DG}
\lesssim h^2 \mathcal R(\mathbf{E}).
\end{align}
Noting that
\betagin{align*}
&\|\mathbf{E}-\widetilde{\mathbf{E}}_h\|_{\mathbf{L}^2(\Omegagae)}^2
= \bigl(\mathbf{E}-\widetilde{\mathbf{E}}_h, \mathbf{E}-\widehat{\mathbf{E}}_h \bigr)_\Omegagae
- \bigl(\mathbf{E}-\widetilde{\mathbf{E}}_h, \boldsymbol{\Phi}_h^c \bigr)_\Omegagae
- \bigl(\mathbf{E}-\widetilde{\mathbf{E}}_h, \boldsymbol{\Phi}_h-\boldsymbol{\Phi}_h^c \bigr)_\Omegagae,
\end{align*}
we have
\betagin{align*}
\|\mathbf{E}-\widetilde{\mathbf{E}}_h\|_{\mathbf{L}^2(\Omegagae)}&
\leq \|\mathbf{E}-\widehat{\mathbf{E}}_h\|_{\mathbf{L}^2(\Omegagae)} -
\frac{\bigl(\mathbf{E}-\widetilde{\mathbf{E}}_h, \boldsymbol{\Phi}_h^c \bigr)_\Omegagae}{\norml{\mathbf{E}-\widetilde\mathbf{E}_h}{\Omegagae}}
+ \|\boldsymbol{\Phi}_h-\boldsymbol{\Phi}_h^c\|_{\mathbf{L}^2(\Omegagae)},
\end{align*}
which together with \eqref{e5.8a} and \eqref{e5.11a} yields
\betagin{align}\lambdabel{e5.11e}
\|\mathbf{E}-\widetilde{\mathbf{E}}_h\|_{\mathbf{L}^2(\Omegagae)}
\lesssim h^2 \mathcal R(\mathbf{E})
-\frac{\bigl(\mathbf{E}-\widetilde{\mathbf{E}}_h, \boldsymbol{\Phi}_h^c \bigr)_\Omegagae}{\norml{\mathbf{E}-\widetilde\mathbf{E}_h}{\Omegagae}}.
\end{align}
{\em Step 4:} We need to bound the last term on the
right-hand side of \eqref{e5.11e}. Notice that
$\boldsymbol{\Phi}_h^c{\rm\mathbf i}n \mathbf{V}_h\cap \mathbf{H}(\curl,\Omegagae)$, by
using a standard duality argument, see Appendix,
based on the Helmholtz decomposition of $\boldsymbol{\Phi}_h^c$, we can show that
\betagin{align} \lambdabel{e5.11f}
-\frac{\bigl(\mathbf{E}-\widetilde{\mathbf{E}}_h, \boldsymbol{\Phi}_h^c \bigr)_\Omegagae}{\norml{\mathbf{E}-\widetilde\mathbf{E}_h}{\Omegagae}}\lesssim (1+\gammama_1)h^2\mathcal R(\mathbf{E}).
\end{align}
{\em Step 5:} The desired estimate \eqref{e5.7} follows from combing
\eqref{e5.11e} and \eqref{e5.11f}. Finally, \eqref{e5.8} follows from $\|\mathbf{E}-\widetilde\mathbf{E}_h\|_{\mathbf{L}^2(\Gammama)}\le\|\mathbf{E}-\widehat\mathbf{E}_h\|_{\mathbf{L}^2(\Gammama)}+\|\widehat\mathbf{E}_h-\widetilde\mathbf{E}_h\|_{\mathbf{L}^2(\Gammama)}$, \eqref{e5.8b}, the trace inequality, \eqref{e5.8a}, and \eqref{e5.7}. The proof is complete.
\end{proof}
\betagin{remark}
The $P_1$-conforming N\'ed\'elec edge element (of second type)
projection $\widehat\mathbf{E}_h$ of $\mathbf{E}$
is introduced and used in the proof to simplify the analysis at the expense
of requiring $\mathcal{T}_h$ to be a quasi-uniform and conforming mesh.
We note that the proof is still valid if one replaces the
$P_1$-conforming N\'ed\'elec edge element
projection by the $P_1$-IPDG projection without assuming $\mathcal{T}_h$
is a quasi-uniform or conforming mesh. As expected, the new
proof will be more complicated and technical, and is left for
the interested reader to explore.
\end{remark}
\subsection{Error estimates for IPDG method \eqref{e3.17}}
The goal of this subsection is to derive error estimates for scheme
\eqref{e3.17}. Instead of using the well-known Schatz argument
\cite{Schatz74,HPSS05,HPS04,ZSWX09},
which is the (only) technique of choice for deriving error estimates
for indefinite problems in the literature, we shall obtain
our error estimates by exploiting the linearity of the Maxwell equations
and making strong use of the discrete stability estimates proved in
Theorem \ref{discrete_stability}
and the projection error estimates established in Lemma \ref{lem5.3}.
This new technique, which is adapted from \cite{fw08a}, allows
us to derive error estimates for
$\mathbf{e}_h:=\mathbf{E}-\mathbf{E}_h$ without imposing any mesh constraint.
It is easy to check that there holds the following error equation:
\betagin{equation*}
a_h(\mathbf{e}_h,\mathbf{v}_h)=0
\quad\forall \mathbf{v}_h{\rm\mathbf i}n \mathbf{V}_h.
\end{equation*}
Let $\mathbf{e}ta_h:= \mathbf{E}-\widetilde\mathbf{E}_h$ and $\mathbf{x}i_h:= \mathbf{E}_h-\widetilde\mathbf{E}_h$,
then $\mathbf{e}_h=\mathbf{e}ta_h-\mathbf{x}i_h$. From \eqref{e5.1} we get
\betagin{align}\lambdabel{e5.13}
a_h(\mathbf{x}i_h,\mathbf{v}_h) &=a_h(\mathbf{e}ta_h,\mathbf{v}_h) =b_h(\mathbf{e}ta_h,\mathbf{v}_h) -k^2 (\mathbf{e}ta_h,\mathbf{v}_h)_\Omegagae
-{\rm\mathbf i}\lambdambda \Lambdangle (\mathbf{e}ta_h)_T,(\mathbf{v}_h)_T \mathbb{R}angle_\Gammama \\
&=-(k^2+1) (\mathbf{e}ta_h,\mathbf{v}_h)_\Omegagae-{\rm\mathbf i}\lambdambda \Lambdangle (\mathbf{e}ta_h)_T,(\mathbf{v}_h)_T \mathbb{R}angle_\Gammama \qquad\forall \mathbf{v}_h{\rm\mathbf i}n \mathbf{V}_h, \nonumber
\end{align}
The above equation implies that $\mathbf{x}i_h{\rm\mathbf i}n \mathbf{V}_h$ is the solution
of scheme \eqref{e3.17} with the source functions
$\mathbf{f} = -(k^2+1) \mathbf{e}ta_h$ and $\mathbf{g}=-\lambda(\mathbf{e}ta_h)_T$. Hence, an application of
Theorem~\ref{discrete_stability} and Lemma~\ref{lem5.3} immediately
infers the following estimate for $\mathbf{x}i_h$.
\betagin{lemma}\lambdabel{lem5.4}
Under the conditions of Lemma~\ref{lem5.1}, there holds
\betagin{align}\lambdabel{e5.14}
&\|\mathbf{x}i_h\|_{DG} + k \|\mathbf{x}i_h\|_{\mathbf{L}^2(\Omegagae)}
\lesssim \widehat{C}_{\rm sta} (1+\gammama_1) \big(k^2 h^2+\lambda\, h^{\frac32}\big) \mathcal R(\mathbf{E}),
\end{align}
where
\betagin{align}\lambdabel{e5.15}
\widehat{C}_{\rm sta}:= \max\Big(k^{-1}(1+\gammama_h), \big(\lambda^{-1}(1+\gammama_h)\big)^{\frac12}\Big),
\end{align}
and $\gammama_h$ is defined by \eqref{e4.5b}.
\end{lemma}
By Lemmas \ref{lem5.3} and \ref{lem5.4} and the triangle inequality
we then obtain the following main theorem of this section.
\betagin{theorem}\lambdabel{main_theorem}
Let $\mathbf{E}$ and $\mathbf{E}_h$ be the solutions to problem \eqref{e1.1}--\eqref{e1.2}
and scheme \eqref{e3.17}, respectively. Assume $\mathbf{E}{\rm\mathbf i}n \mathbf{H}^2(\Omegagae)$.
Then, under the conditions of Lemma~\ref{lem5.1}, there hold the following error estimates:
\betagin{align}\lambdabel{e5.16}
&\|\mathbf{E}-\mathbf{E}_h\|_{DG}\lesssim \big(h +\widehat{C}_{\rm sta} (1+\gammama_1) \big(k^2 h^2+\lambda\, h^{\frac32}\big)\big) \, \mathcal R(\mathbf{E}),\\
&\|\mathbf{E}-\mathbf{E}_h\|_{\mathbf{L}^2(\Omegagae)}
\lesssim \bigl(h^2+\widehat{C}_{\rm sta} k^{-1} \big(k^2 h^2+\lambda\, h^{\frac32}\big)\bigr) (1+\gammama_1)
\, \mathcal R(\mathbf{E}). \lambdabel{e5.17}
\end{align}
\end{theorem}
To bound $\mathcal R(\mathbf{E})$ in terms of the source functions $\mathbf{f}$ and $\mathbf{g}$,
we need to bound $\|\mathbf{E}\|_{\mathbf{H}^2(\Omegagae)}$ and $\|\mathbf{E}\|_{\mathbf{H}^1(\curl,\Omegagae)}$
by the source functions. To the end, we appeal to the solution estimate
\eqref{e2.100a} to get
\betagin{align}\lambdabel{e5.18}
\mathcal R(\mathbf{E}) \lesssim (\lambdambda+k)M(\mathbf{f},\mathbf{g}) + \|\mathbf{g}\|_{H^{\frac12}(\Gammamamma)}
\end{align}
Substituting \eqref{e5.18} into \eqref{e5.16} and \eqref{e5.17} yields
the following explicit in all parameter error bounds for $\mathbf{E}-\mathbf{E}_h$.
\betagin{corollary}\lambdabel{cor_main} Suppose $k, \lambda\gtrsim 1$, and $ 0<\gammama_1\lesssim 1$.
Under the assumptions of Theorem \ref{main_theorem}, there exist constants $C_1$ and $C_2$ independent of $k, \lambda$, and $h$ such that
\betagin{align}\lambdabel{e5.19}
\|\mathbf{E}-\mathbf{E}_h\|_{DG}&\le C_1(k+\lambda) h + C_2 \widehat{C}_{\rm sta} (k+\lambda)\big(k^2 h^2+\lambda\, h^{\frac32}\big), \\
\|\mathbf{E}-\mathbf{E}_h\|_{\mathbf{L}^2(\Omegagae)}
&\le C_1(k+\lambda) h^2+ C_2 \widehat{C}_{\rm sta} k^{-1}(k+\lambda)\big(k^2 h^2+\lambda\, h^{\frac32}\big).\lambdabel{e5.20}
\end{align}
\end{corollary}
\betagin{remark} \lambdabel{r5.3}
(a) If $\lambda=O(k)$ and $h$ is in the pre-asymptotic range given by $k^2h\gtrsim 1$, then $\lambda\, h^{\frac32}\lesssim k\, h^{\frac32}\lesssim k^2h^2$ and the $H^1$-estimate \eqref{e5.19} becomes
\[\|\mathbf{E}-\mathbf{E}_h\|_{DG}\le C_1k h + C_2 \widehat{C}_{\rm sta} k^3 h^2.\]
(b) For asymptotic error estimates we refer to \cite[section 7.2]{Monk03}. When $k^3h^2$ is small, it is possible to improve the discrete stability estimates as well as the error estimates via the technique of stability-error iterative improvement from \cite{fw08b, Wu11}.
\end{remark}
\section{Numerical experiments}\lambdabel{sec-6}
Throughout this section, we consider the following Maxwell problem on
the unit cube $\Omegaga=(0,1)\times(0,1)\times(0,1)$:
\betagin{align}
\curl\curl\mathbf{E}- k^2\mathbf{E}&=\mathbf{0} \qquad\mbox{in }\Omegagaega,\lambdabel{e7.1} \\
\curl\mathbf{E}\times\mathbf{n}u-{\rm\mathbf i} k \mathbf{E}_T&=\mathbf{g}
\qquad\mbox{on }\Gammamamma:=\partialartialrtial\Omegagae.\lambdabel{e7.2}
\end{align}
where $\mathbf{g}$ is so chosen that the exact solution is
$\mathbf{E}=\big(e^{{\rm\mathbf i} k z}, e^{{\rm\mathbf i} k x}, e^{{\rm\mathbf i} k y}\big)^T.
$
Notice that we have chosen $\lambda=k$ for simplicity.
For any positive integer $m$, let $\mathcal{T}_{1/m}$ denote the Cartesian mesh that
consists of $m^3$ congruent cubes of edge length $h=1/m$. We adopt the IPDG method
using piecewise linear polynomials. We remark that the
number of total DOFs of the IPDG method on $\mathcal{T}_{1/m}$ is
$12m^3$ which is the about twice of that of the corresponding conforming
edge element method (EEM) which uses piecewise trilinear polynomials.
\subsection{Stability}\lambdabel{ssec-1} Given a Cartesian mesh $\mathcal{T}_h$, recall
that $\mathbf{E}_h$ denotes the IPDG solution. Let $\mathbf{E}_h^{\mathrm{EEM}}$ denotes the trilinear conforming
edge element approximation of the problem \eqref{e7.1}--\eqref{e7.2}.
In this subsection, we use the following penalty parameters in
the IPDG method \eqref{e3.17}:
\betagin{equation}\lambdabel{e7.4}
\gammama_{0,\mathcal{F}}\equiv\gammama_0=100\quad\text{ and }\quad \gammama_{1,\mathcal{F}}\equiv\gammama_1=0.1 \quad\forall \mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I.
\end{equation}
We plot in Figure~\ref{fsta} the following two ratios
\[
\dfrac{\norm{\mathbf{E}_h}_{H(\curl,\mathcal{T}_h)}}{\norm{\mathbf{E}}_{H(\curl,\mathcal{T}_h)}} \qquad
\mbox{and}\qquad
\dfrac{\norm{\mathbf{E}_h^{\mathrm{EEM}}}_{H(\curl,\mathcal{T}_h)}}{\norm{\mathbf{E}}_{H(\curl,\mathcal{T}_h)}}
\]
versus $k$ for $k=1, 2, \cdots, 200$ with $h=0.1, 0.05$, respectively. It is
shown that
\[
\norm{\mathbf{E}_h}_{H(\curl,\mathcal{T}_h)}\lesssim\norm{\mathbf{E}}_{H(\curl,\mathcal{T}_h)},
\]
which is also implied by Theorem~\ref{discrete_stability} and
Theorem~\ref{stability}. The $H(\curl)$-norm of the edge element
solution oscillates for $k$ near $3/h$ but is still bounded
by $\norm{\mathbf{E}}_{H(\curl,\mathcal{T}_h)}$.
\betagin{figure}[hbt]
\centerline{{\rm\mathbf i}ncludegraphics[scale=0.50]{fstadg}
{\rm\mathbf i}ncludegraphics[scale=0.50]{fsta}}
\caption{$\norm{\mathbf{E}_h}_{H(\curl,\mathcal{T}_h)}\big/\norm{\mathbf{E}}_{H(\curl,\mathcal{T}_h)}$ (left)
and $\norm{\mathbf{E}_h^{\mathrm{EEM}}}_{H(\curl,\mathcal{T}_h)}\big/\norm{\mathbf{E}}_{H(\curl,\mathcal{T}_h)}$ (right)
versus $k$ for $k=1, 2, \cdots, 200$ with $h=0.1, 0.05$, respectively.}\lambdabel{fsta}
\end{figure}
\subsection{Error estimates}\lambdabel{ssec-2} In this subsection, we use
the same penalty parameters as given in \eqref{e7.4}.
In the left graph of Figure~\ref{ferr1}, the relative $H(\curl)$-error
of the IPDG solution and the relative $H(\curl)$-error of the edge element
interpolant are displayed in one plot. When the mesh size is decreasing,
the relative error of the
IPDG solution stays around $100\%$ before it is less than $100\%$,
then decays slowly on a range increasing with $k$, and then decays at a
rate greater than $-1$ in the log-log scale but converges as fast as the
edge element interpolant (with slope $-1$) for small $h$. The relative
error grows with $k$ along line $k h=1.$ By contrast, as shown in
the right of Figure~\ref{ferr1}, the relative error of the finite
element solution first stay around $100\%$ but oscillates for large $k$,
then decays at a rate greater than $-1$ in the log-log scale but converges
as fast as the edge element interpolant (with slope $-1$) for small $h$.
The relative error of the edge element solution also grows with $k$
along line $k h=1$.
\betagin{figure}[ht]
\centerline{
{\rm\mathbf i}ncludegraphics[scale=0.45]{ferr1}
{\rm\mathbf i}ncludegraphics[scale=0.45]{ferr1f}
}
\caption{Left graph: the relative error of the IPDG solution with parameters
given in \eqref{e7.4} (solid) and the relative error of the edge element
interpolant (dotted) in $H(\curl)$-norm for $k=5, k=10, k=15,$ and $k=30$,
respectively. The dashed line gives reference slope of $-1$. Right graph:
corresponding plots for edge element solutions.}\lambdabel{ferr1}
\end{figure}
Unlike the error of the edge element interpolant, both the error of the
IPDG solution and that of the edge element solution are not controlled by
the magnitude of $k h$ as indicated by the two graphs in Figure~\ref{ferr2}.
It is shown that when $h$ is determined according to the ``rule of thumb",
the relative error of the IPDG solution keeps less than $100\%$ which means
that the IPDG solution has some accuracy even for large $k$, while the
edge element solution is unusable for large $k$. We remark that the
accuracy of the IPDG solution can be further improved by tuning
the penalty parameter ${\rm\mathbf i}\gammama_1$, see Subsection~\ref{ssec-3} below.
\betagin{figure}[ht]
\centerline{
{\rm\mathbf i}ncludegraphics[scale=0.48]{ferr2}
{\rm\mathbf i}ncludegraphics[scale=0.48]{ferr2f}
}
\caption{The relative error of the IPDG solution (left) with parameters given
in \eqref{e7.4} and that of the edge element solution (right) in $H(\curl)$-norm
computed for $k=2, 4, \cdots, 72$ with mesh size $h$ determined by $kh=2$.}
\lambdabel{ferr2}
\end{figure}
Next we verify more precisely the pollution errors. To do so, we recall
the definition of the critical mesh size with respect to a given relative
tolerance (cf. \cite[Definition 7.1]{Wu11}).
\betagin{definition}
Given a relative tolerance $\varepsilon$ and a wave number $k$, the critical mesh
size $h(k,\varepsilon)$ with respect to the relative tolerance $\varepsilon$ is defined
by the maximum mesh size such that the relative error of the IPDG solution
(or the edge element solution) in $H(\curl)$-norm is less than or equal to $\varepsilon$.
\end{definition}
It is clear that, if the pollution terms are of order $k^\beta h^\alpha$,
then $h(k,\varepsilon)$ should be proportional to $k^{-\beta/\alpha}$ for $k$ large
enough. Figure~\ref{ferr3} which plots $h(k,0.5)$ versus $k$ for the
IPDG solution (left) with parameters given in \eqref{e7.4} and for the edge
element solution (right), respectively. They all decay at a
rate of $O(k^{-3/2})$, just like the linear FEM for the Helmholtz
problem (cf. \cite{Wu11}). The results of this subsection indicate
that both methods satisfy the following pre-asymptotic error bounds (cf. Remark~\ref{r5.3}(a)):
\betagin{align*}
\Bigl\{\norm{\mathbf{E}-\mathbf{E}_h}_{H(\curl,\mathcal{T}_h)},\quad
\norm{\mathbf{E}-\mathbf{E}_h^{\mathrm{EEM}}}_{H(\curl,\Omegaga)} \Bigr\} \le C_1kh+C_2k^3h^2.
\end{align*}
\betagin{figure}[ht]
\centerline{
{\rm\mathbf i}ncludegraphics[scale=0.48]{ferr3}
{\rm\mathbf i}ncludegraphics[scale=0.48]{ferr3f}
}
\caption{$h(k,0.5)$ versus $k$ for the IPDG solution (left) with parameters
given in \eqref{e7.4} and for the edge element solution (right), respectively.
The dotted lines give lines of slope $-1.5$ in the log-log scale.}\lambdabel{ferr3}
\end{figure}
\subsection{Reduction of the pollution effect}\lambdabel{ssec-3}
In this subsection, we show that appropriate choice of the penalty parameters
can significantly reduce the pollution error of the IPDG method.
We use the following parameters:
\betagin{equation}\lambdabel{e7.8}
\gammama_{0,\mathcal{F}}\equiv\gammama_0=100\quad\text{ and }\quad {\rm\mathbf i}\gammama_{1,\mathcal{F}}
\equiv{\rm\mathbf i}\gammama_1=0.08+0.01{\rm\mathbf i} \quad\forall \mathcal{F}{\rm\mathbf i}n\mathcal{E}_h^I.
\end{equation}
We remark that ${\rm\mathbf i}\gammama_{1,\mathcal{F}}$ is simply chosen from the set
$\set{0.01(p+q{\rm\mathbf i}), -50\le p,q\le 50}$ to minimize the relative error of the
IPDG solution in $H(\curl)$-norm with $\gammama_0=100$ for
wave number $k=20$ and mesh size $h=1/10$. The optimal penalty parameter
can also be obtained by the dispersion analysis (cf. \cite{Ains04})
and will be considered in a future work.
The relative error of the IPDG solution with parameters given in \eqref{e7.8}
and the relative error of the edge element interpolant are displayed in the
left graph of Figure~\ref{ferr12b}. The IPDG method with parameters given
in \eqref{e7.8} is much better than both the IPDG method using parameters given
in \eqref{e7.4} and the EEM (cf. Figure~\ref{ferr1} and Figure~\ref{ferr2}).
The relative error does not increase much with the change of $k$ along
line $k h=1$ for $k\le 30$. But this does not mean that the pollution error
has been eliminated.
\betagin{figure}[ht]
\centerline{
{\rm\mathbf i}ncludegraphics[scale=0.46]{ferr1b}
{\rm\mathbf i}ncludegraphics[scale=0.46]{ferr2b}
}
\caption{Left graph: the relative error of the IPDG solution with parameters given
in \eqref{e7.8} (solid) and the relative error of the edge element
interpolant (dotted) in $H(\curl)$-norm for $k=5, k=10, k=15,$ and $k=30$,
respectively. Right graph: the relative error of the IPDG solution with
parameters given in \eqref{e7.8} in $H(\curl)$-norm computed for $k=2, 4,
\cdots, 72$ with mesh size $h$ determined by $kh=2$.}\lambdabel{ferr12b}
\end{figure}
For more detailed observation, the relative error of the IPDG solution with
parameters given in \eqref{e7.8} in $H(\curl)$-norm computed for $k=2, 4, \cdots, 72$
with mesh size $h$ determined by $kh=2$, are plotted in
the right graph of Figure~\ref{ferr12b}. It is shown that the pollution error is
reduced significantly.
Figure~\ref{ferr3b} plots $h(k,0.5)$, the critical mesh size with respect to the
relative tolerance $50\%$, versus $k$ for the IPDG method with parameters given
in \eqref{e7.8}. We recall that $h(k,0.5)$ is the maximum mesh size such that
the relative error of the IPDG solution in $H(\curl)$-norm is less than or
equal to $50\%$. The decreasing rate of $h(k,0.5)$ in the log-log scale is
less than $-1.5$, which means that the pollution effect is reduced.
\betagin{figure}[ht]
\centerline{
{\rm\mathbf i}ncludegraphics[scale=0.46]{ferr3b}
}
\caption{$h(k,0.5)$ versus $k$ for the IPDG method with parameters given
in \eqref{e7.8}. The dotted line gives a line of slope $-1.5$ in the
log-log scale.}\lambdabel{ferr3b}
\end{figure}
For more detailed comparison between the continuous interior penalty finite
element method (CIP-FEM) and the FEM, we consider the problem
\eqref{e7.1}--\eqref{e7.2} with wave number $k=36$.
The real parts of $\mathbf{E}_{hx}(0.5,0.5,z)$ with parameters given in
\eqref{e7.8} (left, solid), $\mathbf{E}_{hx}^{\mathrm{EEM}}(0.5,0.5,z)$ (right, solid),
and $\mathbf{E}_x(0.5,$ $0.5,z)$ (dotted) with mesh sizes $h=1/18$
and $1/36$ are plotted in Figure~\ref{fz}. Here $\mathbf{E}_{hx}$,
$\mathbf{E}_{hx}^{\mathrm{EEM}}$, and $\mathbf{E}_x$ are the $x$ components of the
IPDG solution, the edge element solution, and the exact solution, respectively.
The shape of the IPDG solution is roughly same as that of the exact solution
for $h=1/18$ and matches very well for $h=1/36$. While the edge element
solution has a wrong shape for $h=1/18$ and $z>0.5$ and has a correct
shape for $h=1/36$ but suffers an apparent phase error.
\betagin{figure}[ht]
\centerline{{\rm\mathbf i}ncludegraphics[scale=0.48]{fzb1}
{\rm\mathbf i}ncludegraphics[scale=0.46]{fzf1}}
\centerline{{\rm\mathbf i}ncludegraphics[scale=0.48]{fzb2}
{\rm\mathbf i}ncludegraphics[scale=0.46]{fzf2}}
\caption{The real parts of $\mathbf{E}_{hx}(0.5,0.5,z)$ with parameters
given in \eqref{e7.8} (left, solid), $\mathbf{E}_{hx}^{\mathrm{EEM}}(0.5,0.5,z)$
(right, solid), and $\mathbf{E}_x(0.5,0.5,z)$ (dotted) for $k=36$ and $h=1/18$, $1/36$,
respectively.} \lambdabel{fz}
\end{figure}
Table~\ref{table} shows the numbers of total DOFs needed for $50$\%
relative errors in $H(\curl)$-norm for the edge element interpolant, the
IPDG solution with parameters given in \eqref{e7.8}, and the edge element
solution, respectively. The IPDG method needs less DOFs than the EEM does
for $k\ge 10$ and much less for large wave number $k$.
\betagin{table}[hbt]
\centering
\betagin{tabular}{|l|r|r|r|r|r|}
\hline
$k$ & 10 & 20 & 30 & 40 & 50 \\\hline
Interpolation & 1,764& 12,168 & 33,048 & 79,488 & 141,288 \\\hline
IPDG & 2,592 & 20,736 & 69,984 & 187,500 & 393,216 \\\hline
EEM & 2,688 & 45,600 & 249,900 & 876,408 & 2,398,488 \\\hline
\end{tabular}
\caption{Numbers of total DOFs needed for 50\% relative errors
in $H(\curl)$-norm for the edge element interpolant, the IPDG solution with
parameters given in \eqref{e7.8}, and the edge element solution respectively.}
\lambdabel{table}
\end{table}
\textbf{Acknowledgments.} The authors would like to thank Dr. Huangxin Chen
of Xiamen University of China for his helpful suggestions on the construction
and analysis of the $\mathbf{H}(\curl,\Omegagae)$-elliptic projection in section \ref{sec-5.1}.
\betagin{thebibliography}{99}
\bibitem{Ains04}
{\sc M.~Ainsworth}, {\em Dispersive properties of high order
{N}\'ed\'elec/edge element approximation of the time-harmonic Maxwell
equations}, Phil. Trans. R. Soc. Lond. A, 362 (2004),
pp.~471--491.
\bibitem{bs00}
I.~M. Babu{\v{s}}ka and S.~A. Sauter.
\newblock Is the pollution effect of the {F}{E}{M} avoidable for the
{H}elmholtz equation considering high wave numbers?
\newblock {\em SIAM Rev.}, 42(3):451--484, 2000.
\bibitem{bs94} S. Brenner and R. Scott.
\newblock The Mathematical Theory of Finite Element Methods.
\newblock Springer-Verlag, New York, 1994.
\bibitem{ciarlet78} P. G. Ciarlet.
\newblock The Finite Element Method for Elliptic Problems.
\newblock North-Holland, Amsterdam, 1978.
\bibitem{CLS04}
B. Cockburn, F. Li and C.-W. Shu.
\newblock Locally divergence-free discontinuous Galerkin methods for
the Maxwell equations.
\newblock {\em J. Comput. Phys.}, 194:588-610, 2004.
\bibitem{Colton_Kress99}
D. L. Colton and R. Kress.
\newblock {\em Inverse Acoustic and Electromagnetic Scattering Theory}.
\newblock Springer, New York, 1999.
\bibitem{Cummings_Feng06} P. Cummings and X. Feng.
\newblock Sharp regularity coefficient estimates for complex-valued
acoustic and elastic Helmholtz equations.
\newblock {\em M$^3$AS}, 16:139--160, 2006.
\bibitem{Feng10}
X.~Feng.
\newblock Wave number-explicit a priori estimates for the time-harmonic
Maxwell equations, preprint, July 15, 2010.
\bibitem{Feng10b}
X.~Feng and C. Lorton.
\newblock Generalized inf-sup conditions and wave-number and domain-size
explicit a priori estimates for the time-harmonic acoustic, elastic
and electromagnetic wave equations, in preparation.
\bibitem{fw08a}
X.~Feng and H. Wu.
\newblock Discontinuous Galerkin methods for the Helmholtz equation with large
wave number.
\newblock {\em SIAM J. Numer. Anal.}, 47:2872--2896, 2009.
\bibitem{fw08b}
X.~Feng and H. Wu.
\newblock $hp$-discontinuous Galerkin methods for the Helmholtz equation
with large wave number.
\newblock {\em Math. Comp.}, 80:997--2024, 2011.
\bibitem{gm11}
G.N.~Gatica and S.~Meddahi.
\newblock Finite element analysis of a time harmonic Maxwell problem with an impedance boundary condition.
\newblock {\em IMA Journal of Numerical Analysis}, 32:534--552, 2011.
\bibitem{HMP10}
R. Hiptmair, A. Moiola and I. Perugia.
\newblock Stability results for the time-harmonic Maxwell equations with
impedance boundary conditions.
\newblock {\em Math. Models Methods Appl. Sci.} 21:2263--2287, 2011.
\bibitem{HMP11}
R. Hiptmair, A. Moiola and I. Perugia.
\newblock Error analysis of Trefftz-discontinuous Galerkin methods for
the time-harmonic Maxwell equations
\newblock {\em Math. Comp.} 82:247--268, 2013
\bibitem{HPSS05}
P. Houston, I. Perugia, A. Schneebeli and D. Sch\"otzau.
\newblock Interior penalty method for the indefinite time-harmonic
Maxwell equations.
\newblock {\em Numer. Math.}, 100:485--518, 2005.
\bibitem{HPS04}
P. Houston, I. Perugia and D. Sch\"otzau.
\newblock Mixed discontinuous Galerkin approximation of the Maxwell operator.
\newblock {\em SIAM J. Numer. Anal.}, 42:434--459, 2004.
\bibitem{Monk03}
P. Monk.
\newblock {\em Finite Element Methods for {M}axwell's Equations}.
\newblock Oxford University Press, New York, 2003.
\bibitem{nedelec1986}
J.C. N\'{e}d\'{e}lec.
\newblock A new family of mixed finite elements in $R^3$.
\newblock {\em Numerische Mathematik}, 50:57--81, 1986.
\bibitem{NPC11}
N. C. Nguyena, J. Perairea and B. Cockburn.
\newblock Hybridizable discontinuous Galerkin methods for the time-harmonic
Maxwell's equations.
\newblock {\em J. Comput. Phys.}, 230:7151--7175, 2011.
\bibitem{Rellich40} F. Rellich.
\newblock Darstellung der Eigenwerte von $\Deltatalta u+\lambdambda u=0$ durch ein
Randintegral.
\newblock {\em Math. Z.}, 46:635--636, 1940.
\bibitem{Schatz74}
A.~H. Schatz.
\newblock An observation concerning {R}itz--{G}alerkin methods with indefinite
bilinear forms.
\newblock {\em Math. Comp.}, 28:959--962, 1974.
\bibitem{TW05}
A. Toselli and O. Widlund,
\newblock Domain Decomposition Methods - Algorithms and Theory.
\newblock Springer, New York, 2005.
\bibitem{Wu11} H. Wu.
\newblock Pre-asymptotic error analysis of {CIP-FEM} and {FEM} for
{H}elmholtz equation with high wave number. {P}art {I}: Linear version.
\newblock to appear. (See also arXiv:1106.4079v1).
\bibitem{ZSWX09}
L. Zhong, S. Shu, G. Wittum and J. Xu.
\newblock Optimal error estimates for N\'ed\'elec edge elements for
time-harmonic Maxwell's equations.
\newblock {\em J. Comput. Math.}, 27:563--572, 2009.
\end{thebibliography}
\appendix
\section{Proof of \eqref{e5.11f}}
The proof follows the same lines
as those given in \cite[pages 502--505]{HPSS05} and in \cite{Monk03,ZSWX09}. Let
\[
U_h=\bigl\{ v_h{\rm\mathbf i}n H^1(\Omegaga);\, v_h|_K{\rm\mathbf i}n P_2(K), \forall K{\rm\mathbf i}n\mathcal{T}_h \bigr\}
\]
be the $H^1$-conforming linear finite element space. It follows from \eqref{e5.1} that $\mathbf{E}-\widetilde{\mathbf{E}}_h$
is discrete divergence-free, that is,
\[
\bigl(\mathbf{E}-\widetilde{\mathbf{E}}_h, \nablala \varphi_h\bigr)_\Omegagae=0
\qquad\forall \varphi_h{\rm\mathbf i}n U_h.
\]
Notice that $\boldsymbol{\Phi}_h^c{\rm\mathbf i}n \mathbf{V}_h\cap \mathbf{H}(\curl,\Omegagae)$,
we have the following discrete Helmholtz decomposition of
$\boldsymbol{\Phi}_h^c$:
\betagin{align}
\boldsymbol{\Phi}_h^c=\mathbf{w}_h+\nablala r_h,
\end{align}
where $r_h{\rm\mathbf i}n U_h$ and $\mathbf{w}_h{\rm\mathbf i}n \mathbf{V}_h\cap \mathbf{H}(\curl,\Omegagae)$ is also discrete
divergence-free. It is easy to check that
\betagin{align*}
\norml{\nablala r_h}{\Omegaga}\le\norml{\boldsymbol{\Phi}_h^c}{\Omegaga},
\qquad \norml{\mathbf{w}_h}{\Omegaga}\lesssim \norml{\boldsymbol{\Phi}_h^c}{\Omegaga}.
\end{align*}
Then from \cite[Lemma~7.6]{Monk03} and on noting that the domain $\Omegaga$ is convex, there exists $\mathbf{w}{\rm\mathbf i}n \mathbf{H}^1(\Omegaga)$ such that $\mathbf{w}\cdot\mathbf{n}=\mathbf{0}$ on $\Gammama$ and
\betagin{align}
&\curl\mathbf{w}=\curl\mathbf{w}_h,\quad {\rm div\,}\mathbf{w}=0,\lambdabel{e6.8}\quad
\norml{\mathbf{w}_h-\mathbf{w}}{\Omegaga}
\lesssim h\norml{\curl \boldsymbol{\Phi}_h^c}{\Omegaga}.
\end{align}
Thus, it follows from the identity
\betagin{align*}
\bigl(\mathbf{E}-\widetilde{\mathbf{E}}_h, \boldsymbol{\Phi}_h^c \bigr)_\Omegagae
=\bigl(\mathbf{E}-\widetilde{\mathbf{E}}_h, \mathbf{w}_h \bigr)_\Omegagae
=\bigl(\mathbf{E}-\widetilde{\mathbf{E}}_h, \mathbf{w}_h-\mathbf{w} \bigr)_\Omegagae
+\bigl(\mathbf{E}-\widetilde{\mathbf{E}}_h, \mathbf{w} \bigr)_\Omegagae
\end{align*}
that
\betagin{align}\lambdabel{e6.9}
-\frac{\bigl(\mathbf{E}-\widetilde{\mathbf{E}}_h, \boldsymbol{\Phi}_h^c \bigr)_\Omegagae}{\norml{\mathbf{E}-\widetilde\mathbf{E}_h}{\Omegagae}}
&\le \norml{\mathbf{w}_h-\mathbf{w}}{\Omegaga}+\norml{\mathbf{w}}{\Omegaga}\\
&\lesssim h\norml{\curl \boldsymbol{\Phi}_h^c}{\Omegaga}+\norml{\mathbf{w}}{\Omegaga}\nonumber.
\end{align}
The first term on the right-hand side of \eqref{e6.9} can be bounded as follows:
\betagin{align}\lambdabel{e6.10}
&h\norml{\curl\boldsymbol{\Phi}_h^c}{\Omegaga}
=h\norml{\curl(\widehat{\mathbf{E}}_h-\mathbf{E}+\mathbf{E}-\widetilde\mathbf{E}_h
+\boldsymbol{\Phi}_h- \boldsymbol{\Phi}_h^c)}{\Omegaga}\\
\le& h\|\widehat{\mathbf{E}}_h- \mathbf{E}\|_{\mathbf{H}(\curl,\Omegaga)}
+h\|\mathbf{E}-\widetilde\mathbf{E}_h\|_{DG}
+h\|\curl(\boldsymbol{\Phi}_h-\boldsymbol{\Phi}_h^c)\|_{\mathbf{L}^2(\mathcal{T}_h)}\lesssim h^2\mathcal R(\mathbf{E}).\nonumber
\end{align}
where we have used \eqref{e5.6}, \eqref{e5.8c}, and \eqref{e5.11a} to derive the last inequality.
To estimate $\norml{\mathbf{w}}{\Omegaga}$, we appeal to a duality argument to be
described next. Let $\mathbf{z}$ be the solution of the following auxiliary problem:
\betagin{align}
\curl\curl\mathbf{z}+\mathbf{z}&=\mathbf{w} \quad\mbox{in }\Omegagaega, \qquad\curl\mathbf{z}\times\mathbf{n}u=\mathbf{0}
\quad\mbox{on }\Gammamamma:=\partialartialrtial\Omegagae. \lambdabel{e:z}
\end{align}
Noting that $\Omegaga$ is convex, the above problem attains a unique solution $\mathbf{z}{\rm\mathbf i}n \mathbf{H}^1(\curl,\Omegaga)$ and satisfies the following regularity estimate (cf. \cite{HMP10,Monk03})
\betagin{equation}\lambdabel{C_la}
\|\mathbf{z}\|_{\mathbf{H}^1(\curl,\Omegagae)} \lesssim \|\mathbf{w}\|_{\mathbf{L}^2(\Omegagae)}.
\end{equation}
Define sesquilinear forms
\betagin{align*}
A(\mathbf{u},\mathbf{v})&:=(\curl\mathbf{u},\curl\mathbf{v})_\Omegagae +(\mathbf{u},\mathbf{v})_\Omegagae \qquad\forall \mathbf{u},\mathbf{v}{\rm\mathbf i}n \hat{\mathbf{c}V},\\
A_h(\mathbf{u},\mathbf{v})&:=b_h(\mathbf{u},\mathbf{v})+(\mathbf{u},\mathbf{v})_\Omegagae \qquad\forall \mathbf{u},\mathbf{v}{\rm\mathbf i}n \mathbf{V}.
\end{align*}
Let $\mathbf{z}_h^c{\rm\mathbf i}n \mathbf{V}_h\cap \mathbf{H}(\curl,\Omegaga)$ and $\mathbf{z}_h{\rm\mathbf i}n \mathbf{V}_h$ denote the edge finite element approximation
and the IPDG approximation to $\mathbf{z}$, respectively, that is,
\betagin{align*}
A(\mathbf{v}_h,\mathbf{z}_h^c)&=(\mathbf{v}_h,\mathbf{w})_\Omegagae
\qquad\forall\mathbf{v}_h{\rm\mathbf i}n \mathbf{V}_h\cap \mathbf{H}(\curl,\Omegaga),\\
A_h(\mathbf{v}_h,\mathbf{z}_h)&=(\mathbf{v}_h,\mathbf{w})_\Omegagae \qquad\forall\mathbf{v}_h{\rm\mathbf i}n \mathbf{V}_h.
\end{align*}
It can be shown that there hold the following estimates (cf. \eqref{e5.6}):
\betagin{align}\lambdabel{C1}
\|\mathbf{z}-\mathbf{z}_h^c\|_{\mathbf{H}(\curl,\Omegagae)}
&\lesssim h\, \|\mathbf{z}\|_{\mathbf{H}^1(\curl,\Omegagae)}
\lesssim h\, \norml{\mathbf{w}}{\Omegaga}, \nonumber \\
\norme{\mathbf{z}-\mathbf{z}_h^c},\quad\norme{\mathbf{z}-\mathbf{z}_h} &\lesssim h\,(1+\gammama_1)^\frac12 \|\mathbf{z}\|_{\mathbf{H}^1(\curl,\Omegagae)}
\lesssim h\,(1+\gammama_1)^\frac12 \norml{\mathbf{w}}{\Omegaga}. \nonumber
\end{align}
Since
\betagin{align*}
\norml{\mathbf{w}}{\Omegaga}^2&=A(\mathbf{w},\mathbf{z})=A(\mathbf{w},\mathbf{z}-\mathbf{z}_h^c)+A(\mathbf{w},\mathbf{z}_h^c),
\end{align*}
on noting that $\mathbf{w}, \mathbf{w}_h{\rm\mathbf i}n \mathbf{H}(\curl,\Omegagae)$, from \eqref{e6.8} and \eqref{e6.10}, we have
\betagin{align*}
A(\mathbf{w},\mathbf{z}-\mathbf{z}_h^c)&=A(\mathbf{w}-\mathbf{w}_h,\mathbf{z}-\mathbf{z}_h^c)
= (\mathbf{w}-\mathbf{w}_h,\mathbf{z}-\mathbf{z}_h^c)_\Omegagae\\
&\lesssim h\norml{\curl\boldsymbol{\Phi}_h^c}{\Omegaga}\, h\,\|\mathbf{w}\|_{\mathbf{L}^2(\Omegaga)}\lesssim h^3\mathcal R(\mathbf{E}) \norml{\mathbf{w}}{\Omegaga}.
\end{align*}
On the other hand,
\betagin{align*}
A(\mathbf{w},\mathbf{z}_h^c)&=(\curl \mathbf{w},\curl\mathbf{z}_h^c)_\Omegagae
+(\mathbf{w},\mathbf{z}_h^c)_\Omegagae =\bigl(\curl \boldsymbol{\Phi}_h^c,\curl\mathbf{z}_h^c\bigr)_\Omegagae
+(\mathbf{w},\mathbf{z}_h^c)_\Omegagae
\displaybreak[0]\\
&=A(\boldsymbol{\Phi}_h^c,\mathbf{z}_h^c)
+\bigl(\mathbf{w}-(\mathbf{w}_h+\nablala r_h),\mathbf{z}_h^c\bigr)_\Omegagae=A(\boldsymbol{\Phi}_h^c,\mathbf{z}_h^c)+(\mathbf{w}-\mathbf{w}_h,\mathbf{z}_h^c)_\Omegagae.
\end{align*}
From the definitions of $A, A_h$ and $b_h$, we get
\betagin{align*}
&A(\boldsymbol{\Phi}_h^c,\mathbf{z}_h^c)
=A_h(\boldsymbol{\Phi}_h^c,\mathbf{z}_h^c)
+{\rm\mathbf i} J_1(\boldsymbol{\Phi}_h^c,\mathbf{z}_h^c)\\
&\quad
=A_h(\mathbf{E}-\widehat{\mathbf{E}}_h,\mathbf{z}_h^c)
+A_h(\widetilde\mathbf{E}_h-\mathbf{E},\mathbf{z}_h^c)
+A_h(\boldsymbol{\Phi}_h^c-\boldsymbol{\Phi}_h,\mathbf{z}_h^c)
+{\rm\mathbf i} J_1(\boldsymbol{\Phi}_h^c,\mathbf{z}_h^c)\\
&\quad
=A_h(\mathbf{E}-\widehat{\mathbf{E}}_h,\mathbf{z}_h^c)
+A_h(\boldsymbol{\Phi}_h^c-\boldsymbol{\Phi}_h,\mathbf{z}_h^c)
+{\rm\mathbf i} J_1(\boldsymbol{\Phi}_h^c,\mathbf{z}_h^c-\mathbf{z}).
\end{align*}
Therefore
\betagin{align*}
A(\mathbf{w},\mathbf{z}_h^c)=&A_h(\mathbf{E}-\widehat{\mathbf{E}}_h,\mathbf{z}_h^c-\mathbf{z})
+A_h(\mathbf{E}-\widehat{\mathbf{E}}_h,\mathbf{z})+A_h(\boldsymbol{\Phi}_h^c-\boldsymbol{\Phi}_h,\mathbf{z}_h^c-\mathbf{z}+\mathbf{z}-\mathbf{z}_h)
\\
&+A_h(\boldsymbol{\Phi}_h^c-\boldsymbol{\Phi}_h,\mathbf{z}_h)+{\rm\mathbf i} J_1(\boldsymbol{\Phi}_h^c,\mathbf{z}_h^c-\mathbf{z})
+(\mathbf{w}-\mathbf{w}_h,\mathbf{z}_h^c)_\Omegagae.
\end{align*}
Since
\betagin{align*}
A_h(\mathbf{E}-\widehat{\mathbf{E}}_h,\mathbf{z})=(\mathbf{E}-\widehat{\mathbf{E}}_h,\mathbf{w})_\Omegagae,
\qquad
A_h(\boldsymbol{\Phi}_h^c-\boldsymbol{\Phi}_h,\mathbf{z}_h)
=(\boldsymbol{\Phi}_h^c-\boldsymbol{\Phi}_h,\mathbf{w})_\Omegagae,
\end{align*}
we have from Lemma~\ref{lem5.1} and the local trace inequality,
\betagin{align*}
&A(\mathbf{w},\mathbf{z}_h^c) \lesssim\norme{\mathbf{E}-\widehat{\mathbf{E}}_h} \norme{\mathbf{z}_h^c-\mathbf{z}}
+\norml{\mathbf{E}-\widehat{\mathbf{E}}_h}{\Omegaga}\norml{\mathbf{w}}{\Omegaga}\\
&\hskip 46pt+ \norme{\boldsymbol{\Phi}_h^c-\boldsymbol{\Phi}_h}
\big(\norme{\mathbf{z}_h^c-\mathbf{z}}+\norme{\mathbf{z}-\mathbf{z}_h}\big)
+\norml{\boldsymbol{\Phi}_h^c-\boldsymbol{\Phi}_h}{\Omegaga} \norml{\mathbf{w}}{\Omegaga}\\
&\hskip 46pt+\gammama_1\norml{\curl\boldsymbol{\Phi}_h^c}{\Omegaga}
\big(\norml{\curl(\mathbf{z}_h^c-\mathbf{z})}{\Omegaga}+h\|\curl(\mathbf{z}_h^c-\mathbf{z})\|_{H^1(\mathcal{T}_h)}\big)
\\
&\hskip 46pt+\norml{\mathbf{w}-\mathbf{w}_h}{\Omegaga} \norml{\mathbf{z}_h^c}{\Omegaga}\\
&\lesssim\norml{\mathbf{w}}{\Omegaga}\Big(h\,(1+\gammama_1)^\frac12 \norme{\mathbf{E}-\widehat{\mathbf{E}}_h}
+\norml{\mathbf{E}-\widehat{\mathbf{E}}_h}{\Omegaga}+h\,(1+\gammama_1)^\frac12
\norme{\boldsymbol{\Phi}_h^c-\boldsymbol{\Phi}_h} \\
&\quad+\norml{\boldsymbol{\Phi}_h^c-\boldsymbol{\Phi}_h}{\Omegaga}+\gammama_1h\norml{\curl\boldsymbol{\Phi}_h^c}{\Omegaga}+h\norml{\curl \boldsymbol{\Phi}_h^c}{\Omegaga}\Big).
\end{align*}
Moreover, from \eqref{e5.11a}, \eqref{e5.11}, $\gammama_0\gtrsim 1$, and the local trace inequality, we get
\betagin{align*}
\norme{
\boldsymbol{\Phi}_h^c-\boldsymbol{\Phi}_h}&\lesssim (1+\gammama_1)^\frac12
\|\curl(\boldsymbol{\Phi}_h-\boldsymbol{\Phi}_h^c)\|_{\mathbf{L}^2(\mathcal{T}_h)}+\|\boldsymbol{\Phi}_h\|_{DG}+\norml{\boldsymbol{\Phi}_h^c-\boldsymbol{\Phi}_h}{\Omegaga}\\
&\lesssim (1+\gammama_1)^\frac12 h\,\mathcal R(\mathbf{E}).
\end{align*}
Thus, it follows from \eqref{e5.11}, \eqref{e6.10}, \eqref{e5.11a}, and the above estimate that
\betagin{align*}
A(\mathbf{w},\mathbf{z}_h^c) \lesssim &(1+\gammama_1) h^2 \mathcal R(\mathbf{E})\norml{\mathbf{w}}{\Omegaga}.
\end{align*}
Then we obtain the following estimates for $\norml{\mathbf{w}}{\Omegaga}$:
\betagin{align*}
\norml{\mathbf{w}}{\Omegaga} \lesssim (1+\gammama_1)h^2\mathcal R(\mathbf{E}),
\end{align*}
which together with \eqref{e6.9} and \eqref{e6.10} implies that \eqref{e5.11f} holds.
The proof is complete.
\end{document} |
\begin{document}
\title{The Series Product for Gaussian Quantum Input Processes }
\author{John E. Gough \\
Aberystwyth University, SY23 3BZ, Wales, United Kingdom \\
e-mail: [email protected] \\
[2ex] Matthew R. James \\
Australian National University, Canberra, ACT 0200, Australia \\
e-mail: [email protected]}
\maketitle
\begin{abstract}
We present a theory for connecting quantum Markov components into a network
with quantum input processes in a Gaussian state (including thermal and
squeezed). One would expect on physical grounds that the connection rules
should be independent of the state of the input to the network. To compute
statistical properties, we use a version of Wicks' Theorem involving
fictitious vacuum fields (Fock space based representation of the fields) and
while this aids computation, and gives a rigorous formulation, the various
representations need not be unitarily equivalent. In particular, a naive
application of the connection rules would lead to the wrong answer. We
establish the correct interconnection rules, and show that while the quantum
stochastic differential equations of motion display explicitly the
covariances (thermal and squeezing parameters) of the Gaussian input fields
We introduce the Wick-Stratonovich form which leads to a way of writing
these equations that does not depend on these covariances and so corresponds
to the universal equations written in terms of formal quantum input
processes. We show that a wholly consistent theory of quantum open systems
in series can be developed in this way, and as required physically, is
universal and in particular representation-free.
\end{abstract}
\noindent \textbf{Keywords:} Gaussian Wick Theorem, Wick-Stratonovich Form,
Quantum Gaussian Feedback Networks.
\section{Introduction}
The quantum input-output theory has had an immense impact on quantum optics,
and in recent years has extended to opto-mechanical systems and beyond. The
prospect of routing the inputs through a network, or indeed using feedback
has lead to a burgeoning field of quantum feedback control \cite{I_12}-\cite
{JAM_14}. The development of a systems engineering approach to quantum
technology has benefited from having a systematic framework in which
traditional open quantum systems models can be combined according to
physical connection architectures.
The initial work on how to cascade two quantum input-output systems can be
traced back to Gardiner \cite{Gardiner_cascade} and Carmichael \cite
{Carmichael_cascade}. More generally, the authors have introduced the theory
of \textit{Quantum Feedback Networks} (QFN) which generalizes this to
include cascading, feedback, beam-splitting and general scattering of
inputs, etc., \cite{GJ-QFN}, \cite{GJ-Series}. One of the basic constructs
is the series product which gives the instantaneous feedforward limit of two
components connected in series via quantum input processes: in fact, the
systems need not necessarily be distinct and the series product generalizes
cascading by allowing for feedback. The original work was done for input
processes where the input fields where in the Fock vacuum field state. A
generalization to squeezed fields and squeezing components has been given
\cite{GJN_squeeze}, however this was restricted to the case of linear
coupling and dynamics: there it was shown that the resulting transform
analysis could be applied in a completely consistent manner. More recent
work has shown that non-classical states for the input fields, such as
shaped single-photon or multi-photon states, or cat states of coherent
fields, may in principle be generated from signal models \cite{GJNC}, \cite
{GZ_filter} - that is, where a field in the Fock vacuum state was passed
through an ancillary dynamical system (the signal generator) which is then
to be cascaded to the desired system. Quantum feedback network (QFN) theory
concerns the interconnection of open quantum systems. The interconnections
are mediated by quantum fields in the input-output theory, \cite
{GarZol00,GJ-QFN,GJ-Series}. The idea is that an output from one node is fed
back in as input to another (not necessarily distinct) node, the simplest
case being the cascade connection (e.g., light exiting one cavity being
directed into another). The components are specified by Markovian models
determined by SLH parameters which describe the self-energy of the system
and how the system interacts with the fields (via idealized Jaynes-Cummings
type interactions and scattering).
Here we turn to the problem of the general class of Gaussian states for
quantum fields. This includes thermal fields, and of course squeezed fields.
In principle, these may be approximated as the output of a degenerate
parametric amplifier (DPA) driven by vacuum input, see \cite{GarZol00}. In a
sense, we have that a singular DPA may serve is the appropriate signal
generator to modify a vacuum field into a squeezed field before passing into
a given network. We will exploit this in the paper, however, we will have to
pay attention to the operator ordering problem when inserting these
approximations into quantum dynamical equations of motion and input-output
relations.
The programme turns out to be rather more involved than one might expect at
first glance. It is always possible to represent a collection of $d$
Gaussian fields using $2d$ vacuum fields (a Bogoliubov transformation!) and
one might hope that the corresponding connection rules applied to the
representation in terms of vacuum fields would agree with the intuitive
rules one would desire. This turns out not to be the case, and the various
feedback constraints cannot be naively applied to the representing fields:
the reason is that the representations are a linear combination of creation
and annihilation operators for the representing vacuum fields, and we have
broken the Wick ordered form of the original equations.
If applied naively, the series product would predict a contribution to the
global network model that depended on the covariance parameters of the
state. From the physical point of view, this ought to be spurious. In
comparison with classical analog linear electronics, we see that the
components (e.g. resistors, capacitors, inductors) are described by
impedances. When components are interconnected to form a network, the
network may be described by an equivalent impedance, derived through an
application of Kirchhoff laws. Impedances do not depend on the applied
currents or voltages, and are therefore intrinsic to the device or network.
Similarly the rules for connecting a quantum feedback network should be
intrinsic, and not depend on the state of the noise fields.
\section{Background and Problem statement}
Let us begin in the concrete setting of the quantum stochastic calculus of
Hudson and Parthasarathy \cite{HP} with a fixed initial space $\mathfrak{h}
_{0}$ and a noise space that is the (Bose) Fock space over $\mathbb{C}^{d}$
-valued $L^{2}$-functions on the time interval $[0,\infty )$. In the
language of Hudson and Parthasarathy, we have a multiplicity space of
dimension $d$ and we select an orthonormal basis which determines $d$
channels. We denote by $A_{k}\left( t\right) $, $A_{k}\left( t\right) ^{\ast
}$, and $\Lambda _{jk}\left( t\right) $ the processes of annihilation,
creation (for channel $j$) and scattering (from channel $k$ to channel $j$).
In the following, we shall introduce an Einstein summation convention for
repeated channel indices. We will deal with the class of quantum stochastic
integrals processes satisfying the appropriate conditions of local
integrability, square-integrability \cite{HP} without explicit reference.
We have for instance the QSDE
\begin{equation}
dX\left( t\right) =x_{jk}\left( t\right) d\Lambda _{jk}\left( t\right)
+x_{j0}\left( t\right) dA_{j}\left( t\right) ^{\ast }+x_{0k}dA_{k}\left(
t\right) +x_{00}\left( t\right) dt
\end{equation}
where the coefficients are adapted and the increments are (quantum) It\={o}.
We have the quantum It\={o} product formula
\begin{equation}
d\left( X\left( t\right) Y\left( t\right) \right) =dX\left( t\right)
\,Y\left( t\right) +X\left( t\right) \,dY\left( t\right) +dX\left( t\right)
\,dY\left( t\right)
\end{equation}
where the It\={o} correction comes from the quantum It\={o} table \cite{HP}
\begin{eqnarray}
d\Lambda _{jk}\left( t\right) \,d\Lambda _{lm}\left( t\right) &=&\delta
_{kl}d\Lambda _{jm}\left( t\right) ,\quad d\Lambda _{jk}\left( t\right)
\,dA_{l}\left( t\right) ^{\ast }=\delta _{kl}dA_{j}\left( t\right) ^{\ast }, \nonumber
\\
dA_{k}\left( t\right) \,d\Lambda _{lm}\left( t\right) &=&\delta
_{kl}dA_{m}\left( t\right) ,\quad dA_{j}\left( t\right) \,dA_{k}=\delta
_{jk}dt,
\end{eqnarray}
with all other products of the fundamental increments vanishing.
\begin{definition}{Definition}
The Stratonovich integral is defined algebraically via
\begin{eqnarray}
X\left( t\right) \circ dY\left( t\right) &=&X\left( t\right) dY\left(
t\right) +\frac{1}{2}dX\left( t\right) \,dY\left( t\right) \\
dX\left( t\right) \circ Y\left( t\right) &=&dX\left( t\right) \,Y\left(
t\right) +\frac{1}{2}dX\left( t\right) \,dY\left( t\right) .
\end{eqnarray}
\end{definition}
This turns out to be equivalent to a mid-point rule
\cite{Chebotarev}.
If we consider the QSDE $dU\left( t\right) =-idE\left( t\right) \circ
U\left( t\right) $, with $U\left( 0\right) $ the identity and $E\left(
t\right) =E_{jk}\Lambda _{jk}\left( t\right) +E_{j0}B_{j}\left( t\right)
^{\ast }+E_{0k}B_{k}\left( t\right) +E_{00}$ a self-adjoint quantum
stochastic integral process, then we may convert to the It\={o} form to get
\begin{equation}
dU\left( t\right) =\bigg\{\left( S_{jk}-\delta _{jk}\right) d\Lambda
_{jk}\left( t\right) +L_{j}dA_{j}^{\ast }\left( t\right) -L_{j}^{\ast
}S_{jk}dA_{k}\left( t\right) +Kdt\bigg\}\,U\left( t\right) ,
\end{equation}
where (setting $E_{\ell \ell }$ to be the $d\times d$ matrix with entries $
E_{jk\text{)}}$
\begin{equation}
S=\left[
\begin{array}{ccc}
S_{11} & \cdots & S_{1d} \\
\vdots & \ddots & \vdots \\
S_{d1} & \cdots & S_{dd}
\end{array}
\right] =\frac{I-\frac{i}{2}E_{\ell \ell }}{I+\frac{i}{2}E_{\ell \ell }}
\end{equation}
is called the matrix of scattering coefficients unitary (that is, $
S_{jk}^{\ast }S_{jl}=\delta _{kl}=S_{lj}S_{kj}^{\ast }$),
\begin{equation}
L=\left[
\begin{array}{c}
L_{1} \\
\vdots \\
L_{d}
\end{array}
\right] =\frac{i}{I+\frac{i}{2}E_{\ell \ell }}\left[
\begin{array}{c}
E_{10} \\
\vdots \\
E_{d0}
\end{array}
\right]
\end{equation}
which is the column vector of coupling operators, and
\begin{equation}
K=-\frac{1}{2}L_{k}^{\ast }L_{k}-iH,
\label{eq:K_Fock}
\end{equation}
where $H$ is the Hamiltonian ($H^{\ast }=H=E_{00}+\frac{1}{2}E_{0j}\left[
\text{Im}\left\{ \frac{1}{I+\frac{i}{2}E_{\ell \ell }}\right\} \right]
_{jk}E_{k0}$). For simplicity we will assume that the terms $S_{jk},L_{j}$
and $H$ are bounded operators on the system Hilbert space $\mathfrak{h}_{0}$.
We generally refer to the triple $\mathbf{G}\sim \left( S,L,H\right) $ as
the Hudson-Parthasarathy parameters, or informally the ``SLH'' parameters
specifying the model. The unitary process they generate may be denoted as $
U^{\mathbf{G}}\left( t\right) $ if we wish to emphasize the dependence on
these parameters.
For $X$ an operator of the initial space, we introduce $j_{t}\left( X\right)
=U\left( t\right) ^{\ast }X\,U\left( t\right) $ and from the quantum It\={o}
rule obtain the Heisenberg-Langevin equation
\begin{equation}
dj_{t}(X)=j_{t}\left( \mathcal{L}_{jk}X\right) \,d\Lambda _{jk}+j_{t}(
\mathcal{L}_{j0}X)\,dA_{j}^{\ast }+j_{t}(\mathcal{L}_{0k}X)\,dA_{k}+j_{t}(
\mathcal{L}_{00}X)dt
\end{equation}
where
\begin{equation}
\mathcal{L}_{jk}X=S_{lj}^{\ast }XS_{lk}-\delta _{jk}X,\quad \mathcal{L}
_{j0}X=S_{lj}^{\ast }\left[ X,L_{l}\right] ,\quad \mathcal{L}_{0k}X=\left[
L_{l}^{\ast },X\right] S_{lk}
\end{equation}
and the Lindblad generator $\mathcal{L}_{00} \equiv \mathcal{L}$ is
\begin{equation}
\mathcal{L} X=\frac{1}{2}L_{k}^{\ast }\left[ X,L_{k}\right] +\frac{1}{2}
\left[ L_{k}^{\ast },X\right] L_{k}-i\left[ X,H\right] .
\label{eq:Linblad_Fock}
\end{equation}
The maps $\mathcal{L}_{\alpha \beta }$ are known as the \textit{Evans-Hudson
super-operators}. We shall occasionally write $j_{t}^{\mathbf{G}}\left(
X\right) $ for the dynamical flow of $X$ when we wish to emphasis the
dependence on the SLH parameters $\mathbf{G}$.
Let us now write the input processes as $A_{\mathrm{in},j}\left( t\right)
=A_{j}\left( t\right) $ and introduce the output processes as $A_{\mathrm{out
},j}\left( t\right) =U\left( t\right) ^{\ast }A_{\mathrm{in},j}\left(
t\right) U\left( t\right) $ then from the quantum It\={o} rule we see that
\begin{equation}
dA_{\mathrm{out},j}\left( t\right) =j_{t}\left( S_{jk}\right) \,dA_{\mathrm{
in},k}\left( t\right) +j_{t}\left( L_{l}\right) \,dt.
\end{equation}
\subsection{Thermal Fields}
Considering the single channel $\left( d=1\right) $ case for the moment, we
may introduce non-Fock quantum stochastic processes as follows \cite{HL}. For $n>0$,
we set
\begin{equation}
B\left( t\right) =\sqrt{n+1}A_{+}\left( t\right) +\sqrt{n}A_{-}\left(
t\right) ^{\ast },\quad \tilde{B}\left( t\right) =\sqrt{n}A_{+}\left(
t\right) +\sqrt{n+1}A_{-}\left( t\right) ^{\ast }
\end{equation}
which are canonical fields on the Fock space with a pair of channels
labeled as $k=\pm $. In fact, the map $\left( A_{+},A_{-}\right) \mapsto
\left( B,\tilde{B}\right) $ is a Bogoliubov transformation with inverse
\begin{equation}
\left[
\begin{array}{c}
A_{+} \\
A_{-}
\end{array}
\right] =\left[
\begin{array}{cc}
\sqrt{\left( n+1\right) } & -\sqrt{n} \\
-\sqrt{n} & \sqrt{\left( n+1\right) }
\end{array}
\right] \left[
\begin{array}{c}
B \\
\tilde{B}
\end{array}
\right] .
\end{equation}
This is of course based on an Araki-Woods representation of the fiels \cite{AW63}. As is well known, these transformation cannot be implemented unitarily.
However, from a quantum optics point of view, devices transforming or even
squeezing fields in this manner are frequently considered, and it is useful
to imagine a hypothetical device - a Bogoliubov box - performing such a
canonical transformation on our idealized fields.
Ignoring the second process $\tilde{B}$, we obtain the non-Fock quantum
It\={o} table
\begin{equation}
dB\left( t\right) dB\left( t\right) ^{\ast }=\left( n+1\right) dt,\quad
dB\left( t\right) ^{\ast }dB\left( t\right) =ndt.
\end{equation}
It problematic (read impossible) to incorporate a scattering process $
\Lambda $ into this table. We refer to $B$ as non-Fock quantum noise.
We need to drop the scattering term from the unitary evolution equation,
i.e. set $S\equiv I$, and with
\begin{equation}
L=\left[
\begin{array}{c}
L_{+} \\
L_{-}
\end{array}
\right] =\left[
\begin{array}{c}
\sqrt{n+1}L \\
-\sqrt{n}L^{\ast }
\end{array}
\right]
\end{equation}
we have
\begin{eqnarray}
dU\left( t\right) &=&\bigg\{LdB\left( t\right) ^{\ast }-L^{\ast }dB\left(
t\right) +K^{\text{th}}dt\bigg\}\,U\left( t\right) \nonumber \\
&=&\bigg\{L_{j}dA_{j}^{\ast }\left( t\right) -L_{j}^{\ast }dA_{j}\left(
t\right) +Kdt\bigg\}\,U\left( t\right) ,
\end{eqnarray}
where
\begin{equation}
K^{\text{th}}=-\frac{1}{2}L_{+}^{\ast }L_{+}-\frac{1}{2}L_{-}^{\ast
}L_{-}-iH=-\frac{n+1}{2}L^{\ast }L-\frac{n}{2}LL^{\ast }-iH.
\end{equation}
For the flow, we need that the Hudson-Evans super-operator associated with
the scattering terms are trivial. This is the case when the entries of the
scattering matrix $S$ are (e.g. scalars) commuting with operators of the
initial space, but we can get away without assuming that $\left[
\begin{array}{cc}
S_{++} & S_{+-} \\
S_{-+} & S_{--}
\end{array}
\right] $ is the identity. By inspection we find that flow equation will
take the form
\begin{equation}
dj_{t}\left( X\right) =j_{t}\left( \left[ X,L\right] \right) S^{\ast
}dB\left( t\right) ^{\ast }+j_{t}\left( \left[ L,X\right] \right) SdB\left(
t\right) +j_{t}\left( \mathcal{L}^{\text{th}}X\right) dt
\end{equation}
if and only if we take $\left[
\begin{array}{cc}
S_{++} & S_{+-} \\
S_{-+} & S_{--}
\end{array}
\right] \equiv \left[
\begin{array}{cc}
S & 0 \\
0 & S^{\ast }
\end{array}
\right] $ - otherwise we obtain the other noise $\tilde{B}$ - and in which
case the Lindbladian is
\begin{eqnarray}
\mathcal{L}^{\text{th}}X &=&\frac{1}{2}\left[ L_{+}^{\ast },X\right] L_{+}+
\frac{1}{2}L_{+}^{\ast }\left[ X,L_{+}\right] +\frac{1}{2}\left[ L_{-}^{\ast
},X\right] L_{-}+\frac{1}{2}L_{-}^{\ast }\left[ X,L_{-}\right] -i[X,H] \nonumber \\
&=&\frac{n+1}{2}\left\{ \left[ L^{\ast },X\right] L+L^{\ast }\left[ X,L
\right] \right\} +\frac{n}{2}\left\{ \left[ L^{\ast },X\right] L+L^{\ast }
\left[ X,L\right] \right\} -i[X,H].
\end{eqnarray}
\subsection{The Series Product - Vacuum Inputs}
In \cite{GJ-Series} the authors introduce a rule for combining SLH models in
series. For instance, we have the output
of the $\mathbf{G}_{\mathscr{A}}\sim \left( S_{\mathscr{A}},L_{\mathscr{A}
},H_{\mathscr{A}}\right) $ fed instantaneously as input to $\mathbf{G}_{
\mathscr{B}}\sim \left( S_{\mathscr{B}},L_{\mathscr{B}},H_{\mathscr{B}
}\right) $ and it is shown that this is equivalent to the model generated by
\begin{eqnarray}
\mathbf{G}_{\mathscr{B}}\vartriangleleft \mathbf{G}_{\mathscr{A}} &\sim
&\left( S_{\mathscr{A}},L_{\mathscr{A}},H_{\mathscr{A}}\right)
\vartriangleleft \left( S_{\mathscr{B}},L_{\mathscr{B}},H_{\mathscr{B}
}\right) \notag \\
&=&\bigg(S_{\mathscr{B}}S_{\mathscr{A}},L_{\mathscr{B}}+S_{\mathscr{B}}L_{
\mathscr{A}},H_{\mathscr{A}}+H_{\mathscr{B}}+\mathrm{Im}\left\{ L_{
\mathscr{B}}^{\ast }S_{\mathscr{B}}L_{\mathscr{A}}\right\} \bigg).
\label{eq:series_prod}
\end{eqnarray}
Here $\mathrm{Im}\left\{ C\right\} $ means $\frac{1}{2i}\left( C-C^{\ast
}\right) $.We note that every model may be written as a purely scattering
component and a non-scattering component in series, since we have the law $
(S,L,H)=(I,L,H)\vartriangleleft (S,0,0)$.
We should remark that it is not necessary to view the two systems $
\mathscr{A}$ and $\mathscr{B}$ as separate systems - specifically, in the
derivation of the series product\cite{GJ-Series} it is not assumed that the $
\mathscr{A}$ and $\mathscr{B}$ operators need commute!
\subsection{Statement of the Problem}
If we wish to have a pair of systems $\mathscr{A}$ and $\mathscr{B}$ (both
accepting $d$ inputs) in series, then we obtain an equivalent Markov model
in the limit where the intervening connection is instantaneous. Let $L_{
\mathscr{A}}$ be the column of the $d$ operators $L_{\mathscr{A},k}$, $
k=1,\cdots , d$, and similar for system $\mathscr{B}$. The series product
says that the equivalent model has coupling $L_{\mathscr{A}}+L_{\mathscr{B}}$
and Hamiltonian
\begin{eqnarray}
H_{\mathscr{A}}+H_{\mathscr{B}}+\mathrm{Im}\left\{ L_{
\mathscr{B}}^{\ast }L_{\mathscr{A}}\right\} .
\end{eqnarray}
Suppose we were to apply the series product to two systems with the same
single thermal input $B$, and try and describe this as a series connection
using the two vacuum inputs $A_{+}$ and $A_{-}$. Naively applying the series
product to the construction in the $A_{\pm }$ format leads to the correct
rule $L_{\mathscr{A}}+L_{\mathscr{B}}$ for the coupling terms, but
\begin{equation}
H_{\mathscr{A}}+H_{\mathscr{B}}+\mathrm{Im}\left\{ L_{\mathscr{B}}^{\ast }L_{
\mathscr{A}}\right\} +n\mathrm{Im}\left[ L_{\mathscr{B}}^{\ast },L_{
\mathscr{A}}\right] .
\end{equation}
We have picked up an $n$-dependent term. For pure cascading, the systems $
\mathscr{A}$ and $\mathscr{B}$ are distinct and so $\left[ L_{\mathscr{B}
}^{\ast },L_{\mathscr{A}}\right] =0$. However, the series product should
also apply to the situation where the systems share degrees of freedom. In
such cases the additional term is physically unreasonable as it depends on
the state of the noise.
It is not immediately obvious what is wrong with the construction. Going to
the double Fock vacuum representations and then using the vacuum version of
the series product would seem a reasonable thing to do. However, a fully
quantum description would involve the $\tilde{B}$ fields as well, and at a
schematic level this would involve one or more Bogoliubov boxes - something
conspicuously. We will give the correct procedure in this paper.
\section{Multi-Dimensional Gaussian Processes}
\subsection{Notation}
We will use the symbol $\triangleq $ to signify a defining equation. We will
denote the operations of complex conjugation, hermitean conjugation, and
more generally adjoint by *. For $X=[x_{ij}]$ an $n\times m$ array with
complex-valued entries, or more generally operator-valued entries, we write $
X^{\ast }$ for the $m\times n$ array obtained by transposition of the array
and conjugation of the entries: that is the $ij$ entry is $x_{ji}^{\ast }$.
The transpose alone will be denotes as $X^{\top }$, that is the $m\times n$
array with $ij$ entry $x_{ji}$. We will also use the notation $
X^{\#}=(X^{\top })^{\ast }$ which is the $n\times m$ array with $ij$ entry $
x_{ij}^{\ast }$.
\subsection{Finite Dimensional Gaussian States}
\label{sec:FD_Gaussian}Let $a_{1},\cdots ,a_{d}$ be the annihilation
operators for $d$ independent oscillators. We consider a mean zero Gaussian
state with second moments
\begin{equation}
n_{ij}=\langle a_{i}^{\ast }a_{j}\rangle ,\quad m_{ij}=\langle
a_{i}a_{j}\rangle ,
\end{equation}
which we assemble into a hermitean $d\times d$ matrix, $N$, with entries $
n_{ji}^{\ast }=n_{ij}$, and a symmetric matrix $M$ is the $d\times d$ matrix
with entries $m_{ij}=m_{ji}$. The \emph{covariance matrix} is
\begin{equation}
F=\left[
\begin{array}{cc}
I+N^{\top } & M \\
M^{\ast } & N
\end{array}
\right] .
\end{equation}
In order to yield mathematically correct variances, we must have both $F$
and $N$ positive. The vacuum state is characterized by having $N=M=0$, that
is
\begin{equation}
F_{\mathrm{vac}}\equiv \left[
\begin{array}{cc}
I & 0 \\
0 & 0
\end{array}
\right] .
\label{eq:cov}
\end{equation}
The covariance matrix $F$ defined by (\ref{eq:cov}) must be positive
semi-definite, as will be the matrices $N$ and $I+N^{\top }$. We must also
have ran$\left( M\right) \subseteq $ran$\left( I+N^{\top }\right) $ and $
MN^{-}M^{\ast }\leq I+N$, where $N^{-}$ is the Moore-Penrose inverse of $N$.
A linear transformation of the form
\begin{eqnarray}
\tilde{a}=Ua+Va^{\#},
\end{eqnarray}
that is $\tilde{a}_{j}=\sum_{k}\left( U_{jk}a_{k}+V_{jk}a_{k}^{\ast }\right)
$, is called a \textit{Bogoliubov transformation} if we have again the
canonical commutation relations for the primed operators.
The transformation $\tilde{a}=Ua+Va^{\#}$ is Bogoliubov if and only if the
following identities hold $UU^{\ast }=I+VV^{\ast },\quad UV^{\top }=VU^{\top
}.$
This is easily established by inspection, as are the following.
\begin{lemma}{Lemma}
Let $\tilde{a}=Ua+Va^{\#}$ be a Bogoliubov transformation, then the
covariance matrix for $\tilde{a}$ is
\begin{eqnarray}
\tilde{F}=WFW^{\dag }
\end{eqnarray}
where $W=\Delta \left( U,V\right) $. In particular, the new matrices are
\begin{eqnarray}
N^{\prime } &=&V^{\#}V^{\top }+V^{\#}N^{\top }V^{\top }+U^{\#}M^{\ast
}V^{\top } +V^{\#}MU^{\top }+U^{\#}NU^{\top }, \nonumber\\
M^{\prime } &=&UV^{\top }+UN^{\top }V^{\top }+VM^{\ast }V^{\top } +UM^{\ast }U^{\top }+VNU^{\top }.
\end{eqnarray}
\end{lemma}
\begin{lemma}{Lemma}
\label{Prop:W_vac}
Given $a_{\mathrm{vac}}$ with the choice of the vacuum state, the
Bogoliubov transformation $a=Ua_{\mathrm{vac}}+Va_{\mathrm{vac}}^{\#}$ leads to
operators with the covariance matrix
\begin{eqnarray}
F=WF_{\mathrm{vac}}W^{\ast }=\left[
\begin{array}{cc}
I+N^{\top } & M \\
M^{\ast } & N
\end{array}
\right]
\end{eqnarray}
where $W=\Delta \left( U,V\right) $ and
\begin{eqnarray}
N=V^{\#}V^{\top },\quad M=UV^{\top }.
\end{eqnarray}
\end{lemma}
We note that the determinant of the covariance matrix is preserved under
Bogoliubov transformations. In particular, if we have $F=WF_{\mathrm{vac}
}W^{\ast }$, as in the last Proposition, then $F$ must also be singular.
This means that if we wish to obtain a given covariance matrix $F$ for $d$
modes by a Bogoliubov transformation of vacuum modes, we will typically need
a larger number $D$ of these modes with $F$ being a sub-block of a
transformed matrix $WF_{\mathrm{vac}}W^{\ast }$. The example in the Theorem
shows that in order to obtain the $d=1$ covariance
\begin{eqnarray}
F=\left[
\begin{array}{cc}
1+n & 0 \\
0 & n
\end{array}
\right]
\end{eqnarray}
we need a Bogoliubov transformation of $D=2$ modes. We remark that we may
obtain the covariance
\begin{eqnarray}
F=\left[
\begin{array}{cc}
1+n & m \\
m^{\ast } & n
\end{array}
\right] ,
\end{eqnarray}
with the constraint $\left| m\right| ^{2}\leq n\left( n+1\right) $ ensuring
positivity, from 2 vacuum modes via \cite{HHKKR02,G_QWN_ME}
\begin{eqnarray}
\tilde{a}=\sqrt{n+1-\frac{1}{n}\left| m\right| ^{2}}a_{1}+\sqrt{n}
a_{2}^{\ast }+\frac{m}{\sqrt{n}}a_{2}. \label{eq:bog_m}
\end{eqnarray}
The maximal case $\left| m\right| ^{2}=n\left( n+1\right) $ may be obtained
from a \textit{single} mode $a_{1}$ via $a=\sqrt{n+1}a_{1}+e^{i\theta }\sqrt{
n}a_{1}^{\ast }$ where $m\equiv \sqrt{n\left( n+1\right) }e^{i\theta }$.
\subsection{Quantum Ito Calculus: Gaussian Noise}
One would like to extend this to non-vacuum inputs, in particular, those
with general flat power Gaussian states for the noise. (We restrict to a
single noise channel for transparency but the generalization is
straightforward enough.) It is possible to construct noises having the
following quantum It\={o} table
\begin{eqnarray}
dB_{i}dB_{j}^{\ast } &=&\left( n_{ji}+\delta _{ij}\right) dt,\quad
dB_{i}^{\ast }dB_{j}=n_{ij}dt, \notag \\
dB_{i}dB_{j} &=&m_{ij}dt,\quad dB_{i}^{\ast }dB_{j}^{\ast }=m_{ji}^{\ast }dt,
\label{eq:table_non_Fock}
\end{eqnarray}
where $N=[n_{ij}]$ and $M=[m_{ij}]$ have the same properties and constraints
as introduced above.
In reality, we are assuming that the fields $B_{j}\left( t\right) $
correspond to a representation on a double Fock space, say,
\begin{equation}
B(t)=U\left[
\begin{array}{c}
A_{+}\left( t\right) \otimes I \\
I\otimes A_{-}(t)
\end{array}
\right] +V\left[
\begin{array}{c}
A_{+}\left( t\right) ^{\#}\otimes I \\
I\otimes A_{-}(t)^{\#}
\end{array}
\right]
\end{equation}
where $A_{k}\left( t\right) =\left[
\begin{array}{c}
A_{k,1}\left( t\right) \\
\vdots \\
A_{k.d}\left( t\right)
\end{array}
\right] $ are copies of the Fock fields encountered above, and where $N=V^{\#}V,M=UV^{\top }$ as in Proposition
\ref{Prop:W_vac}.
The underlying mathematical problem is that we are trying to implement a
canonical transformation that is not inner \cite{partha,Shale,DG}- specifically the various
representations for different pairs $\left( N,M\right) $ are not unitarily
equivalent.
Instead we must restrict to QSDE models in the general Gaussian case which
are driven by $B$ and $B^{\ast }$ only. We in fact find the class of QSDEs
\begin{equation}
dU\left( t\right) =\left\{ L_{k}dB_{k}^{\ast }\left( t\right) -L_{k}^{\ast
}dB_{k}\left( t\right) +K^{\left( N,M\right) }dt\right\} \,U\left( t\right)
\label{eq:QSDE_non_Fock1}
\end{equation}
generating unitaries and we now require that
\begin{equation}
K^{\left( N,M\right) }=-\frac{1}{2}(\delta _{ij}+n_{ji})L_{i}^{\ast }L_{j}-
\frac{1}{2}n_{ij}L_{i}L_{j}^{\ast }+\frac{1}{2}m_{ij}L_{i}^{\ast
}L_{j}^{\ast }+\frac{1}{2}m_{ji}^{\ast }L_{i}L_{j}-iH,
\end{equation}
with $H$ again self-adjoint.
Let us denote the conditional expectation from the algebra of
operators on the system-tensor-Fock Hilbert space down to the system
operators (i.e., the partial trace over the Gaussian state) as $\mathbb{E}_{\left(
N,M\right) }\left[ \cdot |\mathrm{sys}\right] $. As the differentials $dB_{k}\left( t\right) $ and $
dB_{k}\left( t\right) ^{\ast }$ are It\={o} (future pointing) their products
with adapted operators will have conditional expectation zero. Therefore
\begin{equation}
\mathbb{E}_{\left( N,M\right) }\left[ dU_{t}|\mathrm{sys}\right] =K^{\left(
N,M\right) }\,\mathbb{E}_{\left( N,M\right) }\left[ U_{t}|\mathrm{sys}\right]
\,dt
\end{equation}
and we deduce that
\begin{equation}
\mathbb{E}_{\left( N,M\right) }\left[ U_{t}|\mathrm{sys}\right]
=e^{tK^{\left( N,M\right) }}.
\end{equation}
The corresponding Heisenberg-Langevin equations are of the form
\begin{equation}
dj_{t}(X)=j_{t}(\left[ X,L_{k}\right] )dB_{k}^{\ast }+j_{t}(\left[
L_{k}^{\ast },X\right] )dB_{k}+j_{t}(\mathcal{L}^{\left( N,M\right) }X)dt
\end{equation}
where the new Lindbladian is
\begin{eqnarray}
\mathcal{L}^{\left( N,M\right) }X &=&\frac{1}{2}(\delta _{ij}+n_{ji})\big\{
L_{i}^{\ast }\left[ X,L_{j}\right] +\left[ L_{i}^{\ast },X\right] L_{j}\big\}
\notag \\
&&+\frac{1}{2}n_{ij}\big\{L_{i}\left[ X,L_{j}^{\ast }\right] +\left[ L_{i},X
\right] L_{j}^{\ast }\big\} \notag \\
&&-\frac{1}{2}m_{ij}\big\{L_{i}^{\ast }\left[ X,L_{j}^{\ast }\right] +\left[
L_{i}^{\ast },X\right] L_{j}^{\ast }\big\} \notag \\
&&-\frac{1}{2}m_{ji}^{\ast }\big\{L_{i}\left[ X,L_{j}\right] +\left[ L_{i},X
\right] L_{j}\big\}-i\left[ X,H\right] . \notag \\
&& \label{eq:Lindblad_non_Fock}
\end{eqnarray}
Likewise, we find that
\begin{equation}
\mathbb{E}_{\left( N,M\right) }\left[ j_{t}\left( X\right) |\mathrm{sys}
\right] =e^{t\mathcal{L}^{\left( N,M\right) }}X.
\end{equation}
A little algebra allows us to relate these to the vacuum expressions:
\begin{eqnarray}
K^{\left( N,M\right) } =K-\frac{1}{2}n_{ji}L_{i}^{\ast }L_{j}-\frac{1}{2}
n_{ij}L_{i}L_{j}^{\ast } +\frac{1}{2}m_{ij}L_{i}^{\ast }L_{j}^{\ast }+\frac{1
}{2}m_{ji}^{\ast }L_{i}L_{j}, \label{eq:K_form}
\end{eqnarray}
\begin{eqnarray}
\mathcal{L}^{\left( N,M\right) }X &=&\mathcal{L}X+\frac{1}{2}n_{ji}\big\{
L_{i}^{\ast }\left[ X,L_{j}\right] +\left[ L_{i}^{\ast },X\right] L_{j}\big\}
\notag \\
&&+\frac{1}{2}n_{ij}\big\{L_{i}\left[ X,L_{j}^{\ast }\right] +\left[ L_{i},X
\right] L_{j}^{\ast }\big\} \notag \\
&&-\frac{1}{2}m_{ij}\big\{L_{i}^{\ast }\left[ X,L_{j}^{\ast }\right] +\left[
L_{i}^{\ast },X\right] L_{j}^{\ast }\big\} \notag \\
&&-\frac{1}{2}m_{ji}^{\ast }\big\{L_{i}\left[ X,L_{j}\right] +\left[ L_{i},X
\right] L_{j}\big\} \notag \\
&\equiv &\mathcal{L}X+\frac{1}{2}n_{ji}\big\{\left[ L_{i}^{\ast },\left[
X,L_{j}\right] \right] +\left[ \left[ L_{i}^{\ast },X\right] ,L_{j}\right]
\big\} \notag \\
&&+\frac{1}{2}m_{ij}\left[ L_{j}^{\ast }\left[ L_{i}^{\ast },X\right] \right]
+\frac{1}{2}m_{ij}^{\ast }\left[ \left[ X,L_{i}\right] ,L_{j}\right] .
\notag \\
&& \label{eq:Lind_form}
\end{eqnarray}
\section{Representation-Free Form}
\label{Sec:Rep_Free} Returning to the problem stated in the Introduction, we
have that \textit{all} the $U_{t}^{\left( N,M\right) }$ arise from the \textit{same} physical dynamical evolution $U_{t}$,
and the dynamics show not depend on the state! The $U_{t}^{\left( N,M\right) }$ unfortunately belong to representations that are
\textit{not} generally unitarily equivalent! There should be some sense in which the QSDEs for the
various $U_{t}^{\left( N,M\right) }$ should in some sense be equivalent.
These QSDEs will depend explicitly on the state parameters $\left(
N,M\right) $ of the input field, but what we would like to do is to show
that there is nevertheless a representation-free version of each of these
QSDEs in each fixed representation.
We now show that there is a way of presenting the unitary (\ref
{eq:QSDE_non_Fock1}) and Heisenberg (\ref{eq:Lindblad_non_Fock}) QSDEs so as
to be independent of the state parameters $(N,M)$.
\begin{theorem}{Theorem}
\textbf{(Representation-Free Form)}
The non-Fock QSDEs (\ref{eq:QSDE_non_Fock1}) and (\ref{eq:Lindblad_non_Fock}
) may be written in the equivalent Stratonovich forms
\begin{eqnarray}
dU &=&dA_{k}^{\ast }\circ L_{k}U-L_{k}^{\ast }U\circ d A_{k}+KU\left( t\right) \circ dt, \label{eq:Strat_QSDE} \\
dj_{t}(X) &=& d A_{k}^{\ast }\circ j_{t}(\left[ X,L_{k}\right] )+j_{t}(
\left[ L_{k}^{\ast },X\right] )\circ d A_{k} +j_{t}(\mathcal{L}X)\circ dt, \label{eq:Strat_Heis}
\end{eqnarray}
respectively, where $K$ and $\mathcal{L}$ are the Fock representation
expressions (\ref{eq:K_Fock}) and (\ref{eq:Linblad_Fock}).
\end{theorem}
\begin{proof}
We first observe that
\begin{equation}
dB_{k}^{\ast }\circ L_{k}U=dB_{k}^{\ast }L_{k}U+\frac{1}{2}dB_{k}^{\ast
}L_{k}dU
\end{equation}
and substituting the QSDE (\ref{eq:QSDE_non_Fock1}) for $dU$ and using the
quantum It\={o} table (\ref{eq:table_non_Fock}) gives
\begin{equation}
dB_{k}^{\ast }\circ L_{k}U=L_{k}UdB_{k}^{\ast }+\frac{1}{2}L_{k}\left(
m_{kj}^{\ast }L_{j}-n_{kj}L_{j}^{\ast }\right) Udt,
\end{equation}
and similarly
\begin{equation}
-L_{k}^{\ast }U\circ dB_{k}=-L_{k}^{\ast }UdB_{k}-\frac{1}{2}L_{k}^{\ast
}dUdB_{k}=-L_{k}^{\ast }UdB_{k}-\frac{1}{2}L_{k}^{\ast }\left(
n_{jk}L_{j}-m_{ki}L_{j}^{\ast }\right) dt.
\end{equation}
Combining these terms and using the identity (\ref{eq:K_form}) shows that (
\ref{eq:Strat_QSDE}) is equivalent to (\ref{eq:QSDE_non_Fock1}).
For the Heisenberg equation, we first note that
\begin{eqnarray}
dB_{k}^{\ast }\circ j_{t}(\left[ X,L_{k}\right] ) &=&dB_{k}^{\ast }j_{t}(
\left[ X,L_{k}\right] )+\frac{1}{2}dB_{k}^{\ast }dj_{t}(\left[ X,L_{k}\right]
) \nonumber\\
&=&j_{t}(\left[ X,L_{k}\right] )dB_{k}^{\ast } \nonumber\\
&&+\frac{1}{2}dB_{k}^{\ast }
\bigg\{j_{t}(\left[ \left[ X,L_{k}\right] ,L_{j}\right] )dB_{j}^{\ast
}+j_{t}\left( \left[ L_{j}^{\ast },\left[ X,L_{k}\right] \right] \right)
dB_{j}\bigg\} \nonumber\\
&=&j_{t}(\left[ X,L_{k}\right] )dB_{k}^{\ast }+j_{t}\big(\frac{1}{2}
m_{kj}^{\ast }\big[\left[ X,L_{k}\right] ,L_{j}\big]+\frac{1}{2}n_{kj}\left[
L_{j}^{\ast },\left[ X,L_{k}\right] \right] \big)dt, \nonumber \\
\quad
\end{eqnarray}
and similarly
\begin{equation}
j_{t}(\left[ L_{k}^{\ast },X\right] )\circ dB_{k}=j_{t}(\left[ L_{k}^{\ast
},X\right] )dB_{k}+j_{t}\bigg(\frac{1}{2}n_{jk}\big[\left[ L_{k}^{\ast },X
\right] ,L_{j}\big]+\frac{1}{2}m_{jk}\left[ L_{j}^{\ast },\big[L_{k}^{\ast
},X\right] \big]\bigg)dt.
\end{equation}
Combining these terms and using the identity (\ref{eq:Lind_form}) shows that
(\ref{eq:Strat_Heis}) is equivalent to (\ref{eq:Lindblad_non_Fock}).
\end{proof}
Note that in both equations (\ref{eq:Strat_QSDE}) and (\ref{eq:Strat_Heis})
the Stratonovich differentials occur in Wick order relative to the integrand
terms. What is remarkable about these relations is that they are structurally the same as the Fock vacuum form of the QSDEs with $S=I$.
We say that the equations (\ref{eq:Strat_QSDE}) and (\ref{eq:Strat_Heis})
are \textit{representation-free} in the sense that they do not depend on the
parameters $N$ and $M$ determining the state of the noise.
\section{White Noise Description}
We now present a more formal, but insightful account of quantum stochastic
processes. Consider a collection of quantum noise input processes $
\{b_{k}\left( t\right) :t\in \mathbb{R},k=1,\cdots ,d\}$ obeying the
commutation relations
\begin{equation}
\left[ b_{j}\left( t\right) ,b_{k}^{\ast }\left( s\right) \right] =\delta
\left( t-s\right) ,\qquad \left[ b_{j}^{\ast }\left( t\right) ,b_{k}^{\ast
}\left( s\right) \right] =\left[ b_{j}\left( t\right) ,b_{k}\left( s\right)
\right] =0.
\end{equation}
We wish to model the interaction of a quantum mechanical system driven by
these processes, and to this end introduce a unitary dynamics given by
\begin{equation}
U\left( t\right) =\vec{\mathbf{T}}\exp \left\{ -i\int_{0}^{t}\Upsilon
_{s}ds\right\}
\end{equation}
where (with an implied summation convention with range 1,$\cdots ,d$)
\begin{equation}
-i\Upsilon _{t}=L_{k}\otimes b_{k}^{\ast }\left( t\right) -L_{k}^{\ast
}\otimes b_{k}\left( t\right) -iH\otimes I.
\end{equation}
Here $L_{k}$ and $H=H^{\ast }$ are system operators. The time ordering $\vec{
\mathbf{T}}$ is understood in the usual sense of a Dyson series expansion.
From this we may arrive at
\begin{equation}
\dot{U}\left( t\right) =L_{k}b_{k}^{\ast }\left( t\right) U\left( t\right)
-L_{k}^{\ast }b_{k}\left( t\right) U\left( t\right) -iHU\left( t\right) .
\label{eq:SCHROd}
\end{equation}
We claim that $U\left( t\right) $ should correspond to the evolution
operator for $\mathbf{G}\sim \left( S=I,L,H\right) $ without due reference
to a particular state for the noise. If we fix the state, say the vacuum,
then we use Wick ordering to compute the partial expectations with respect
to that state.
To see how to proceed, let us consider a general quantum stochastic integral
$X\left( t\right) $ described by a formal equation
\begin{equation}
\dot{X}\left( t\right) =b_{j}\left( t\right) ^{\ast }x_{jk}\left( t\right)
b_{k}\left( t\right) +b_{j}\left( t\right) ^{\ast }x_{j0}\left( t\right)
+x_{0k}\left( t\right) b_{k}\left( t\right) +x_{00}\left( t\right) .
\label{eq:wn_qsi}
\end{equation}
where the terms $x_{\alpha \beta }\left( t\right) $ are ``adapted'' in the
formal sense that they do not depend on the noises $b_{k}\left( s\right) $
for $s>t$. As we are talking about the vacuum representation for the time
being, we can bootstrap from the vacuum $|\Omega \rangle $ to
construct the Fock space as the completion of the span of all vectors of the
type $\int f_{k\left( 1\right) }\left( t_{1}\right) b_{k\left( 1\right)
}\left( t_{1}\right) ^{\ast }\cdots f_{k\left( n\right) }\left( t_{n}\right)
b_{k\left( n\right) }\left( t_{m}\right) ^{\ast }|\Omega \rangle $, and
moreover we can build up the domain of exponential vectors. We quickly see
that (\ref{eq:wn_qsi}), with Wick ordered right hand side, corresponds to
the QSDE
\begin{equation}
dX\left( t\right) =x_{jk}\left( t\right) d\Lambda _{lk}\left( t\right)
+x_{j0}\left( t\right) dB_{j}\left( t\right) ^{\ast }+x_{0k}\left( t\right)
dB_{k}\left( t\right) +x_{00}\left( t\right) dt.
\end{equation}
Our issue however is how do we put to Wick order a given expression, for
instance, the right hand side of (\ref{eq:SCHROd}).
\begin{proposition}{Proposition}
For the process $X\left( t\right) $ described by (\ref{eq:wn_qsi}), we have
\begin{eqnarray}
b_{k}\left( t\right) X\left( t\right) &=&X\left( t\right) b_{k}\left(
t\right) +\frac{1}{2}x_{kl}\left( t\right) b_{l}\left( t\right) +\frac{1}{2}
x_{k0}\left( t\right) , \notag \\
X\left( t\right) b_{k}\left( t\right) ^{\ast } &=&b_{k}\left( t\right)
^{\ast }X\left( t\right) +\frac{1}{2}b_{j}\left( t\right) ^{\ast
}x_{j0}\left( t\right) +\frac{1}{2}x_{0k}\left( t\right) .
\label{eq:wn_Strat}
\end{eqnarray}
\end{proposition}
We may justify this as follows:
\begin{eqnarray}
\left[ b_{k}\left( t\right) ,X\left( t\right) \right] &=&\int_{0}^{t}\left[
b_{k}\left( t\right) ,\dot{X}\left( s\right) \right] ds=\int_{0}^{t}\delta
\left( t-s\right) \left\{ x_{kl}\left( s\right) b_{l}\left( s\right)
+x_{k0}\left( s\right) \right\} \nonumber \\
&=&\frac{1}{2}x_{kl}\left( t\right) b_{l}\left( t\right) +\frac{1}{2}
x_{k0}\left( t\right)
\end{eqnarray}
with the factor of $\frac{1}{2}$ coming from the half-contribution of the $
\delta $-function. Evidently what the equations in (\ref{eq:wn_Strat})
correspond to is our definition of a Stratonovich differential - at least
for the Fock vacuum representation. While we can make a connection between (
\ref{eq:wn_qsi}) and the rigorously defined Hudson-Parthasarathy processes,
it should be appreciated at the very least that (\ref{eq:wn_Strat}) is the
correct mnemonic for doing the Wick ordering - an attempt to convert into a
Dyson-type series expansion and Wick ordering under the iterated integral
signs to get a Maassen-Meyer kernel expansion shows this. At work here is an
old principle that ``It\^{o}’s formula is the chain rule with Wick
ordering'' \cite{HS}. Let us now examine
(\ref{eq:SCHROd}) and put it to Wick ordered form. By a similar argument,
we have
\begin{equation}
\left[ b_{k}\left( t\right) ,U\left( t\right) \right] =\int_{0}^{t}\left[
b_{k}\left( t\right) ,\Upsilon \left( s\right) \right] U\left( s\right)
ds\equiv \frac{1}{2}L_{k}U\left( t\right) ,
\end{equation}
or $b_{k}\left( t\right) U\left( t\right) =U\left( t\right) b_{k}\left(
t\right) +\frac{1}{2}L_{k}U\left( t\right) $. By means of this we may place (
\ref{eq:SCHROd}) into the Wick-ordered form
\begin{equation}
U\left( t\right) =L_{k}b_{k}^{\ast }\left( t\right) U\left( t\right)
-L_{k}^{\ast }U\left( t\right) b_{k}\left( t\right) -(\frac{1}{2}L_{k}^{\ast
}L_{k}+iH)U\left( t\right) ,
\end{equation}
and picking up the correct vacuum damping (\ref{eq:K_Fock}), $K$, as a result.
Setting $X_{t}=U\left( t\right) (X\otimes I)U\left( t\right) $, the same
Wick ordering rule can be applied to the Heisenberg equations to obtain
\begin{equation}
\dot{X}_{t}=\left\{ b_{k}^{\ast }\left( t\right) +\frac{1}{2}L_{k,t}^{\ast
}\right\} \left[ X,L_{k}\right] _{t}+\left[ L_{k}^{\ast },X\right]
_{t}\left\{ b_{k}\left( t\right) +\frac{1}{2}L_{k,t}\right\} +\frac{1}{i}
U\left( t\right) \left[ X,H\right] U\left( t\right) .
\end{equation}
Here we use the notation $L_{k,t}=U\left( t\right) (L_{k}\otimes I)U\left(
t\right) $, etc.
We also remark that we may define the corresponding \textit{output fields}
by
\begin{eqnarray}
b^{\mathrm{out}}_k (t) \triangleq U^\ast_T \, b(t) \, U_T,
\end{eqnarray}
where $T>t$. One may show that the input-output relations are
\begin{eqnarray}
b^{\mathrm{out}}_k (t) \equiv b_k (t) + L_{k,t}. \label{eq:i-o}
\end{eqnarray}
If, on the other hand, we want the state of the noise to be a mean-zero
Gaussian with correlations, say
\begin{equation}
\left\langle b_{j}\left( t\right) ^{\ast }b_{k}\left( s\right) \right\rangle
=n_{jk}\,\delta \left( t-s\right) ,\quad \left\langle b_{j}\left( t\right)
b_{k}\left( s\right) \right\rangle =m_{jk}\,\delta \left( t-s\right) ,
\label{eq:cov_flat}
\end{equation}
then we represent the noise as
\begin{equation}
b_{k}\left( t\right) =U_{jk}a_{+,k}\left( t\right) +V_{jk}a_{-,k}\left(
t\right) ^{\ast } \label{eq:wn_bog}
\end{equation}
employing a suitable Bogoliubov transformation. Here we now have double the
number of quantum white noises $a_{+,k}$ and $a_{-,k}$ but these are
represented as Fock processes.
If we now substitute (\ref{eq:wn_bog}) into (\ref{eq:SCHROd}) we see
explicitly that the $a_{\pm ,k}$ are out Wick order, but this can be
rectified by the same sort of manipulation as above. Once the $a_{\pm
,k}\left( t\right) $ are Wick ordered, we have a equation which we can
interpret as the It\={o} non-Fock QSDE, and this leads to the correct
expressions $K^{\left( N,M\right) }$ and $\mathcal{L}^{\left( N,M\right) }$
in the unitary and flow equations respectively.
Given a Gaussian state $\left\langle \cdot \right\rangle $ on the noise, we
may introduce a conditional expectation according to $\mathbb{E}\left[ \cdot
|\mathrm{sys}\right] :A\otimes B\mapsto \left\langle B\right\rangle \,A$.
For instance, $\mathbb{E}\left[ U\left( t\right) |\mathrm{sys}\right] $ then
defines a contraction on the system Hilbert space and we have
\begin{equation}
\mathbb{E}\left[ U\left( t\right) |\mathrm{sys}\right] =I_{\mathrm{sys}
}+\sum_{n\geq 1}\left( -i\right) ^{n}\int_{\Delta _{n}\left( t\right) }
\mathbb{E}\left[ \Upsilon _{s_{n}}\cdots \Upsilon _{s_{1}}|\mathrm{sys}
\right] .
\end{equation}
Now the expression $\mathbb{E}\left[ \Upsilon _{s_{n}}\cdots \Upsilon
_{s_{1}}|\mathrm{sys}\right] $ will be a sum of products of the operators $
L,-L^{\ast }$ and $H$ times a $n$-point function in the fields. Similarly,
we obtain a reduced Heisenberg equation. To compute these averages we need
to be able to calculate $n$-point functions of chronologically ordered
Gaussian fields - this is the realm of Wick's Theorem, so what we have
presented may be interpreted as a Gaussian Wick's Theorem \cite{Evans_Steer}. We of course recover the
partial traces appearing in the previous section.
\section{Approximate Signal Generator for Thermal States}
In this section we show how to go from a general SLH model driven by
the output of a Degenerate Parametric Amplifier (DPA) to the limit where the same SLH model is driven by a
thermal white noise. We start with the single channel for simplicity.
\subsection{The Thermal White Noise as Idealization of the Output of a
Degenerate Parametric Amplifier}
We now show that in the strong coupling limit the output of a degenerate
parametric amplifier approximates a thermal white noise. the model consists
of a system of two cavities modes $c_{+}$ and $c_{-}$ coupled to input
processes $A_{+}\left( t\right) $ and $A_{-}\left( t\right) $ respectively.
Both inputs are taken to be in the vacuum state and the Schr\"{o}dinger
equation is
\begin{equation}
\dot{U}_{t}=\sum_{i=+,-}L_{i}U\left( t\right) dA_{i}\left( t\right) ^{\ast
}-\sum_{i=+,-}L_{i}^{\ast }U\left( t\right) dA_{i}\left( t\right) -iH_{
\mathrm{amp}}U_{t},
\end{equation}
with initial condition $U_{0}=I$ and
\begin{equation}
L_{+}=\sqrt{2\kappa k}c_{+},\quad L_{-}=\sqrt{2\kappa k}c_{-}\text{ and }H_{
\mathrm{amp}}=\frac{\varepsilon k}{i}\left( c_{+}c_{-}-c_{+}c_{-}\right) .
\end{equation}
Here $\varepsilon >\kappa $ and $k>0$ is a scaling parameter which we
eventually model to be large. It is more convenient to work with the white noises
$a_{\pm }\left( t\right) $.
The model is linear and we obtain the input-output relations in the Laplace
domain to be \cite{GJN_squeeze}
\begin{equation}
\left[
\begin{array}{c}
b\left[ s\right] \\
\tilde{b}\left[ s\right]
\end{array}
\right] =\Xi _{-}^{\left( k\right) }\left( s\right) \left[
\begin{array}{c}
a_{+}\left[ s\right] \\
a_{-}\left[ s\right]
\end{array}
\right] +\Xi _{+}^{\left( k\right) }\left( s\right) \left[
\begin{array}{c}
a_{+}\left[ s\right] \\
a_{-}\left[ s\right]
\end{array}
\right]
\end{equation}
where $\Xi _{-}^{\left( k\right) }\left( s\right) =\left[
\begin{array}{cc}
u\left( s/k\right) & 0 \\
0 & u\left( s/k\right)
\end{array}
\right] ,\quad \Xi _{+}^{\left( k\right) }\left( s\right) =\left[
\begin{array}{cc}
0 & v\left( s/k\right) \\
v\left( s/k\right) & 0
\end{array}
\right] $ with the functions $u\left( s\right) =\frac{s^{2}-\kappa
^{2}-\varepsilon ^{2}}{s^{2}+2s\kappa +\kappa ^{2}-\varepsilon ^{2}},\quad
v\left( s\right) =\frac{2\kappa \varepsilon }{s^{2}+2\kappa +\kappa
^{2}-\varepsilon ^{2}}$.
In the limit $k\rightarrow \infty $ we find the static ($s$-independent)
coefficients
\begin{equation}
\lim_{k\rightarrow \infty }\Xi _{-}^{\left( k\right) }\left( s\right) =\frac{
\varepsilon ^{2}+\kappa ^{2}}{\varepsilon ^{2}-\kappa ^{2}}\left[
\begin{array}{cc}
1 & 0 \\
0 & 1
\end{array}
\right] ,\quad \lim_{k\rightarrow \infty }\Xi _{+}^{\left( k\right) }\left(
s\right) =\frac{2\varepsilon \kappa }{\varepsilon ^{2}-\kappa ^{2}}\left[
\begin{array}{cc}
0 & 1 \\
1 & 0
\end{array}
\right] .
\end{equation}
and returning to the time domain, the limit output fields are just a
Bogoliubov transform of the inputs
\begin{equation}
b\left( t\right) =\sqrt{n+1}a_{+}\left( t\right) +\sqrt{n}a_{-}\left(
t\right) ,\quad \tilde{b}\left( t\right) =\sqrt{n}a_{+}\left( t\right) +
\sqrt{n+1}a_{-}\left( t\right) ,
\end{equation}
Here the parameter $n$ corresponds is $n=\left( \frac{2\varepsilon \kappa }{
\varepsilon ^{2}-\kappa ^{2}}\right) ^{2}.$
It is instructive to look closely at the finite $k$ equations. We have the
Heisenberg equations
\begin{eqnarray}
\dot{c}_{+}\left( t\right) &=&-k\kappa c_{+}\left( t\right) +k\varepsilon
c_{-}\left( t\right) -\sqrt{2\kappa k}a_{+}\left( t\right) , \nonumber \\
\dot{c}_{-}\left( t\right) &=&-k\kappa c_{-}\left( t\right) +k\varepsilon
c_{+}\left( t\right) -\sqrt{2\kappa k}a_{-}\left( t\right) ,
\end{eqnarray}
and for $k$ large we may ignore the $\dot{c}_{+}\left( t\right) $ and $
\dot{c}_{-}\left( t\right) $ terms leaving a pair of simultaneous equations
which we may solve to get
\begin{eqnarray}
\sqrt{k}c_{+}\left( t\right) &\simeq &\frac{\sqrt{2\kappa }}{\varepsilon
^{2}-\kappa ^{2}}\left[ \kappa a_{+}\left( t\right) +\varepsilon a_{-}\left(
t\right) ^{\ast }\right] ,\nonumber \\
\sqrt{k}c_{-}\left( t\right) &\simeq &\frac{
\sqrt{2\kappa }}{\varepsilon ^{2}-\kappa ^{2}}\left[ \kappa a_{-}\left(
t\right) +\varepsilon a_{+}\left( t\right) ^{\ast }\right] .
\label{eq:approx_eq}
\end{eqnarray}
The output is then
\begin{eqnarray}
b\left( t\right) &=&a_{+}\left( t\right) +\sqrt{2\kappa k}c_{+}\left(
t\right) \simeq a_{+}\left( t\right) +\frac{2\kappa }{\varepsilon
^{2}-\kappa ^{2}}\left[ \kappa a_{+}\left( t\right) +\varepsilon a_{-}\left(
t\right) ^{\ast }\right] \nonumber \\
&\equiv &\sqrt{n+1}a_{+}\left( t\right) +\sqrt{n}a_{-}\left( t\right) ,
\end{eqnarray}
and likewise
\begin{eqnarray}
\tilde{b}\left( t\right) &=&a_{-}\left( t\right) +\sqrt{2\kappa k}
c_{-}\left( t\right) \simeq a_{-}\left( t\right) +\frac{2\kappa }{
\varepsilon ^{2}-\kappa ^{2}}\left[ \kappa a_{-}\left( t\right) +\varepsilon
a_{+}\left( t\right) ^{\ast }\right] \nonumber \\
&\equiv &\sqrt{n}a_{+}\left( t\right) +\sqrt{n+1}a_{-}\left( t\right) .
\end{eqnarray}
It is relatively straightforward to find a multi-dimensional version of this
for a general Bogoliubov transformation
\begin{equation}
\left[
\begin{array}{c}
b\left( t\right) \\
\tilde{b}\left( t\right)
\end{array}
\right] =U\left[
\begin{array}{c}
a_{+}\left( t\right) \\
a_{-}\left( t\right)
\end{array}
\right] +V\left[
\begin{array}{c}
a_{+}\left( t\right) \\
a_{-}\left( t\right)
\end{array}
\right] .
\end{equation}
\subsection{Cascade Approximation}
The DPA which is described by
\begin{equation}
\mathbf{G}_{DPA}\sim \left( \left[
\begin{array}{cc}
1 & 0 \\
0 & 1
\end{array}
\right] ,\left[
\begin{array}{c}
\sqrt{2\kappa k}c_{+} \\
\sqrt{2\kappa k}c_{-}
\end{array}
\right] ,H_{\mathrm{amp}}\right)
\end{equation}
driven by the (vacuum) input pair $\left[
\begin{array}{c}
a_{+}\left( t\right) \\
a_{-}\left( t\right)
\end{array}
\right] $. It is then put in series with
\begin{equation}
\mathbf{G}\sim \left( S,L,H\right) \boxplus \left( 1,0,0\right) =\left(
\left[
\begin{array}{cc}
S & 0 \\
0 & 1
\end{array}
\right] ,\left[
\begin{array}{c}
L \\
0
\end{array}
\right] ,H\right)
\end{equation}
which means that the output $a_{+}\left( t\right) $ is fed in as input to
the system $\mathbf{G}\sim \left( S,L,H\right) $ and $a_{-}\left( t\right) $
is left to go away unhindered, $\mathbf{G}_{\mathrm{trivial}}\sim \left(
1,0,0\right) $. According to the series product rule, we get DPA and system
in series is described by,
\begin{equation}
\mathbf{G}\vartriangleleft \mathbf{G}_{DPA}\sim \bigg(\left[
\begin{array}{cc}
S & 0 \\
0 & 1
\end{array}
\right] ,\left[
\begin{array}{c}
L+S\sqrt{2\kappa k}c_{+} \\
\sqrt{2\kappa k}c_{-}
\end{array}
\right] ,H+H_{\mathrm{amp}}+\frac{\sqrt{\kappa k}}{\sqrt{2}i}\left( L^{\ast
}Sc_{+}-c_{+}^{\ast }S^{\ast }L\right) \bigg).
\end{equation}
From this we obtain the Heisenberg equations
\begin{eqnarray}
\dot{X}_{t} &=&a_{+}\left( t\right) ^{\ast }\left( S^{\ast }XS-X\right)
_{t}a_{+}\left( t\right) +a_{+}\left( t\right) ^{\ast }S_{t}^{\ast }\left[
X,L\right] _{t}+\left[ L^{\ast },X\right] _{t}S_{t}a_{+}\left( t\right) \nonumber \\
&&+\frac{1}{2}\left[ L^{\ast },X\right] _{t}\left( L+S\sqrt{2\kappa k}
c_{+}\right) _{t}+\frac{1}{2}\left( L+S\sqrt{2\kappa k}c_{+}\right)
_{t}^{\ast }\left[ X,L\right] _{t} \nonumber \\
&&-i\left[ X,H+\frac{\sqrt{2\kappa k}}{2i}\left( L^{\ast }Sc_{+}-c_{+}^{\ast
}S^{\ast }L\right) \right] _{t}.
\end{eqnarray}
We now make the approximation $\sqrt{k}c_{+}\left( t\right) \simeq \frac{
\sqrt{2\kappa }}{\varepsilon ^{2}-\kappa ^{2}}\left[ \kappa a_{+}\left(
t\right) +\varepsilon a_{-}\left( t\right) ^{\ast }\right] $ which leads to
\begin{eqnarray}
\dot{X}_{t} &\simeq &a_{+}\left( t\right) ^{\ast }\left( S^{\ast
}XS-X\right) _{t}a_{+}\left( t\right) +a_{+}\left( t\right) ^{\ast
}S_{t}^{\ast }\left[ X,L\right] _{t}+\left[ L^{\ast },X\right]
_{t}S_{t}a_{+}\left( t\right) +\mathcal{L}\left( X\right) _{t} \nonumber \\
&&+\left\{ \left[ L^{\ast },X\right] _{t}S_{t}+\frac{1}{2}L_{t}^{\ast }\left[
S,X\right] _{t}\right\} \left[ \left( \sqrt{n+1}-1\right) a_{+}\left(
t\right) +\sqrt{n}a_{-}\left( t\right) ^{\ast }\right] \nonumber \\
&&+\left[ \left( \sqrt{n+1}-1\right) a_{+}\left( t\right) ^{\ast }+\sqrt{n}
a_{-}\left( t\right) \right] \left\{ S_{t}^{\ast }\left[ X,L\right] _{t}+
\frac{1}{2}\left[ X,S^{\ast }\right] _{t}L_{t}\right\} .
\end{eqnarray}
Here we have $n=\left( \frac{2\varepsilon \kappa }{\varepsilon
^{2}-\kappa ^{2}}\right) ^{2}$, as before.
We now make a key assumption: \textbf{the scattering term} $S$ \textbf{
corresponds to a static element}. In this case $S\equiv e^{i\theta }$ for
some real $\theta $. The limit Heisenberg equation therefore simplifies to
\begin{eqnarray}
\dot{X}_{t} &=&a_{+}\left( t\right) ^{\ast }S^{\ast }\left[ X,L\right] _{t}+
\left[ L^{\ast },X\right] _{t}Sa_{+}\left( t\right) +\mathcal{L}\left(
X\right) _{t} \nonumber \\
&&+\left[ L^{\ast },X\right] _{t}S\left[ \left( \sqrt{n+1}-1\right)
a_{+}\left( t\right) +\sqrt{n}a_{-}\left( t\right) ^{\ast }\right] \nonumber \\
&&+\left[
\left( \sqrt{n+1}-1\right) a_{+}\left( t\right) ^{\ast }+\sqrt{n}a_{-}\left(
t\right) \right] S^{\ast }\left[ X,L\right] _{t} \nonumber \\
&=&\sqrt{n+1}a_{+}\left( t\right) ^{\ast }S^{\ast }\left[ X,L\right] _{t}+
\sqrt{n+1}\left[ L^{\ast },X\right] _{t}Sa_{+}\left( t\right) \nonumber \\
&&+\sqrt{n}\left[ L^{\ast },X\right] _{t}Sa_{-}\left( t\right) ^{\ast }+
\sqrt{n}a_{-}\left( t\right) S^{\ast }\left[ X,L\right] _{t}+\mathcal{L}
\left( X\right) _{t}.
\end{eqnarray}
We are not quite finished as the operators $a_{-}\left( t\right) $ and $
a_{-}\left( t\right) $ are out of Wick order. However, this is easily
remedied. For instance, we easily deduce that
\begin{eqnarray}
\left[ Y_{t},a_{-}\left( t\right) ^{\ast }\right] &=&\int_{0}^{t}\left[
\dot{Y}_{s},a_{-}\left( t\right) ^{\ast }\right] ds \nonumber \\
&=&\int_{0}^{t}\left[ \sqrt{n}a_{-}\left( s\right) S^{\ast }\left[ Y,L\right]
_{s},a_{-}\left( t\right) ^{\ast }\right] ds \nonumber \\
&=&\frac{1}{2}\sqrt{n}S^{\ast }\left[ Y,L\right] _{t}
\end{eqnarray}
so that we arrive at
\begin{equation}
\left[ L^{\ast },X\right] _{t}Sa_{-}\left( t\right) ^{\ast }=a_{-}\left(
t\right) ^{\ast }\left[ L^{\ast },X\right] _{t}S+\frac{1}{2}\sqrt{n}\left[
\left[ L^{\ast },X\right] ,L\right] _{t}.
\end{equation}
Similarly $\left[ a_{-}\left( t\right) ,Y_{t}\right] =\frac{1}{2}\sqrt{n}
\left[ L^{\ast },Y\right] _{t}$ and therefore we get the Wick re-ordering
\begin{eqnarray}
a_{-}\left( t\right) S^{\ast }\left[ X,L\right] _{t}=S^{\ast }\left[ X,L
\right] _{t}a_{-}\left( t\right) +\frac{1}{2}\sqrt{n}\left[ L^{\ast },\left[
X,L\right] \right] _{t}.
\end{eqnarray}
This leads to the form of the quantum white noise equation with both $a_{+}$
and $a_{-}$ Wick ordered as
\begin{eqnarray}
\dot{X}_{t} &=&\sqrt{n+1}a_{+}\left( t\right) ^{\ast }S^{\ast }\left[ X,L
\right] _{t}+\sqrt{n+1}\left[ L^{\ast },X\right] _{t}Sa_{+}\left( t\right)
\notag \\
&&+\sqrt{n}a_{-}\left( t\right) ^{\ast }\left[ L^{\ast },X\right] _{t}S+
\sqrt{n}S^{\ast }\left[ X,L\right] _{t}a_{-}\left( t\right) \notag \\
&&+\mathcal{L}\left( X\right) _{t}+\frac{1}{2}n\left[ \left[ L^{\ast },X
\right] ,L\right] _{t}+\frac{1}{2}n\left[ L^{\ast },\left[ X,L\right] \right]
_{t}. \label{eq:approx_Heis}
\end{eqnarray}
At this stage we recognize (\ref{eq:approx_Heis}) as the equivalent form of
the Heisenberg quantum stochastic differential equation for thermal noise.
We also remark that the output process determined by systems in series is $
B^{\mathrm{out}}\left( t\right) =U_{t}^{\ast }A_{+}\left( t\right) U_{t}$,
and from the quantum stochastic calculus we have
\begin{equation}
dB^{\mathrm{out}}\left( t\right) =dA_{+}\left( t\right) +\left( L+S\sqrt{
2\kappa k}c_{+}\right) _{t}dt.
\end{equation}
Using (\ref{eq:approx_eq}) we approximate this as
\begin{equation}
dB^{\mathrm{out}}\left( t\right) \simeq dA_{+}\left( t\right) +L_{t}dt+S
\frac{2\kappa }{\varepsilon ^{2}-\kappa ^{2}}\left[ \kappa dA_{+}\left(
t\right) +\varepsilon dA_{-}\left( t\right) ^{\ast }\right] \equiv SdB^{
\mathrm{in}}\left( t\right) +L_{t}dt,
\end{equation}
that is, the thermal input $B^{\mathrm{in}}\left( t\right) =\sqrt{n+1}
A_{+}\left( t\right) +\sqrt{n}A_{-}\left( t\right) ^{\ast }$ produces the
output $B^{\mathrm{out}}\left( t\right) $ according to the usual rules one
would expect of a quantum Markov component with the parameters $\mathbf{G}
\sim \left( S,L,H\right) $.
Therefore the description of a component with the parameters $\mathbf{G}\sim
\left( S,L,H\right) $, at least in the case where $S$ is a static
beam-splitter matrix, with Gaussian input processes may be considered as the
same component cascaded with a degenerate parametric amplifier with vacuum
inputs in the singular coupling limit of the DPA.
\section{The General Series Product}
\subsection{\label{Sec:NS_SP}Without Scattering}
Let us now consider the situation where a Gaussian input $B_{\mathrm{in}}=B_{
\mathrm{in}}^{\left( \mathscr{A}\right) }$ is driving a system with SLH
parameters $\left( I,L_{\mathscr{A}},H_{\mathscr{A}}\right) $ and that its
output $B_{\mathrm{out}}^{\left( \mathscr{A}\right) }$ acts as input $B_{
\mathrm{in}}^{\left( \mathscr{B}\right) }$ to a second system $\left( I,L_{
\mathscr{B}},H_{\mathscr{B}}\right) $. (We do not assume that any of the
various SLH operators commute!)
\textbf{(Components in Series: The no scattering case)} The Heisenberg QSDE
for the systems $\left( I,L_{\mathscr{A}},H_{\mathscr{A}}\right) $ and $
\left( I,L_{\mathscr{B}},H_{\mathscr{B}}\right) $ given by
\begin{equation}
dj_{t}(X)=\sum_{\mathscr{S}=\mathscr{A},\mathscr{B}}\bigg\{dB_{\mathrm{in}
}^{\left( \mathscr{S}\right) \ast }\circ j_{t}(\left[ X,L_{\mathscr{S}}
\right] )+j_{t}(\left[ L_{\mathscr{S}}^{\ast },X\right] )\circ dB_{\mathrm{in
}}^{\left( \mathscr{S}\right) }+j_{t}(\mathcal{L}_{\mathscr{S}}X)\circ dt
\bigg\},
\end{equation}
where
\begin{equation}
\mathcal{L}_{\mathscr{S}}X=\frac{1}{2}L_{\mathscr{S}}^{\ast }\left[ X,L_{
\mathscr{S}}\right] +\frac{1}{2}\left[ L_{\mathscr{S}}^{\ast },X\right] L_{
\mathscr{S}}-i\left[ X,H_{\mathscr{S}}\right] .
\end{equation}
and we have the constraints $B_{\mathrm{in}}^{\left( \mathscr{A}\right) }=B_{
\mathrm{in}}$ and $dB_{\mathrm{in}}^{\left( \mathscr{B}\right) }=dB_{\mathrm{
in}}^{\left( \mathscr{A}\right) }+j_{t}\left( L_{\mathscr{A}}\right) dt$,
consistent with $B_{\mathrm{in}}$ driving system $\mathscr{A}$ which in turn
drives $\mathscr{B}$, corresponds to the dynamics given by the intrinsic
series product (\ref{eq:series_prod}).
\begin{proof}
We have to show consistency of the quantum stochastic Heisenberg evolution $
j_{t}(\cdot )$. To this end we take the open loop equations and impose the
constraint $dB_{\mathrm{in}}^{\left( \mathscr{B}\right) }=dB_{\mathrm{in}
}^{\left( \mathscr{A}\right) }+j_{t}\left( L_{\mathscr{A}}\right) dt$ giving
\begin{eqnarray}
dj_{t}\left( X\right) &=&dB_{\mathrm{in}}^{\ast }\circ j_{t}(\left[ X,L_{
\mathscr{A}}\right] )+j_{t}(\left[ L_{\mathscr{A}}^{\ast },X\right] )\circ
dB_{\mathrm{in}} \notag \\
&+&\left( dB_{\mathrm{in}}+j_{t}(L_{\mathscr{A}})\right) ^{\ast }\circ j_{t}(
\left[ X,L_{\mathscr{B}}\right] ) \notag \\
&+&j_{t}(\left[ L_{\mathscr{B}}^{\ast },X\right] )\circ \left( dB_{\mathrm{in
}}+j_{t}(L_{\mathscr{A}})dt\right) \notag \\
&+&j_{t}(\mathcal{L}_{\mathscr{A}}X)\circ dt+j_{t}(\mathcal{L}_{\mathscr{B}
}X)\circ dt,
\end{eqnarray}
which we may rearrange as
\begin{eqnarray}
dj_{t}(X) &=&dB_{\mathrm{in}}^{\ast }\circ j_{t}(\left[ X,L_{\mathscr{A}}+L_{
\mathscr{B}}\right] )+j_{t}(\left[ L_{\mathscr{A}}^{\ast }+L_{\mathscr{B}
}^{\ast },X\right] )\circ dB_{\mathrm{in}} \nonumber \\
&&+j_{t}\bigg(\mathcal{L}_{\mathscr{A}}X+\mathcal{L}_{\mathscr{B}}X+L_{
\mathscr{A}}^{\ast }\left[ X,L_{\mathscr{B}}\right] +\left[ L_{\mathscr{B}
}^{\ast },X\right] L_{\mathscr{A}}\bigg)\circ dt.
\end{eqnarray}
However, the $dt$ term can be recast using the identity
\begin{eqnarray}
&&\mathcal{L}_{\mathscr{A}}X+\mathcal{L}_{\mathscr{B}}X+L_{\mathscr{A}
}^{\ast }\left[ X,L_{\mathscr{B}}\right] +\left[ L_{\mathscr{B}}^{\ast },X
\right] L_{\mathscr{A}} \nonumber \\
&=&\frac{1}{2}\left( L_{\mathscr{A}}+L_{\mathscr{B}}\right) ^{\ast }\left[
X,L_{\mathscr{A}}+L_{\mathscr{B}}\right] +\frac{1}{2}\left[ L_{\mathscr{A}
}^{\ast }+L_{\mathscr{B}}^{\ast },X\right] \left( L_{\mathscr{A}}+L_{
\mathscr{B}}\right) \nonumber \\
&&-i\left[ X,H_{\mathscr{A}}+H_{\mathscr{B}}+\frac{1}{2i}\left( L_{
\mathscr{B}}^{\ast }L_{\mathscr{A}}-L_{\mathscr{A}}^{\ast }L_{\mathscr{B}
}\right) \right] .
\end{eqnarray}
The resulting Heisenberg dynamics is therefore the same as for the model $
(I,L,H)$ with $L=L_{\mathscr{A}}+L_{\mathscr{B}}$, and $H=H_{\mathscr{A}}+H_{
\mathscr{B}}+\mathrm{Im}\{L_{\mathscr{B}}^{\ast }L_{\mathscr{A}}\}$. This
is, of course, the form predicted by the series product in the Fock case (
\ref{eq:series_prod}).
\end{proof}
\subsection{Including Scattering}
As mentioned above, it is not possible to construct a well defined
scattering processes $\Lambda _{jk}$ in the non-Fock theory. Nevertheless,
the effects of static beam-splitter scattering $S$ may be included in a
straightforward manner without directly considering unitary QSDE models
involving the scattering processes. A clue on how to proceed is given by
our earlier observation that if the scattering matrix $S$ entries commute with systems operators - physically, a static beam-splitter -
the scattering processes disappears.
In the Fock representation, we could always take the input field $A_{\mathrm{
in}}$ and apply a unitary rotation $A=SA_{\mathrm{in}}$ before passing it
though as drive for component. As we have seen, this
will require a compensating rotation of the coupling operators, but no
change to the Lindbladian. There is also a rotation of the output, however,
anticipating this we make the following definition.
\begin{definition}{Definition}
Let $\mathbf{G}$ and $\mathbf{\tilde{G}}$ be SLH model parameters which, for
given input noise $A_{\mathrm{in}}=\tilde{A}_{\mathrm{in}}$ lead to output
noises $A_{\mathrm{out}}$ and $\tilde{A}_{\mathrm{out}}$ respectively. We say
that the models' input-output relations are \textbf{related by a static
beam-splitter matrix} $S$ if we have
\begin{eqnarray}
A_{\mathrm{out}}=S\,\tilde{A}_{\mathrm{out}}.
\end{eqnarray}
\end{definition}
The following result shows that for the Fock representation, if the
scattering is just a static beam-splitter, then we can produce a related
model which avoids the use of the scattering processes.
\begin{theorem}{Theorem}
\label{Thm:S} Let $S$ be a static beam-splitter matrix and set $\mathbf{G}
\sim \left( S,L,H\right) $ and $\mathbf{\tilde{G}}\sim \left( I,S^{\ast
}L,H\right) $. Then the model parameters $\mathbf{G}$ and $\mathbf{\tilde{G}}
$ generate the same Heisenberg dynamics. Moreover, their input-output
relations are related by the static beam-splitter matrix $S$.
\end{theorem}
\begin{proof}
The Heisenberg dynamics generated by $\mathbf{G}$ is (the scattering terms
vanish for a static beam-splitter)
\begin{equation}
dj_{t}^{\mathbf{G}}(X)=\sum_{j}j_{t}(\mathcal{L}_{j0}^{\mathbf{G}
}X)\,dA_{j}^{\ast }+\sum_{k}j_{t}(\mathcal{L}_{0k}^{\mathbf{G}
}X)\,dA_{k}+j_{t}(\mathcal{L}^{\mathbf{G}}X)dt
\end{equation}
where
\begin{equation}
\mathcal{L}_{j0}^{\mathbf{G}}X=S_{lj}^{\ast }\left[ X,L_{l}\right] ,\quad
\mathcal{L}_{0k}^{\mathbf{G}}X=\left[ L_{l}^{\ast },X\right] S_{lk}
\end{equation}
and the Lindblad generator is $\mathcal{L}^{\mathbf{G}}X=\frac{1}{2}
L_{k}^{\ast }\left[ X,L_{k}\right] +\frac{1}{2}\left[ L_{k}^{\ast },X\right]
L_{k}-i\left[ X,H\right] $. The Heisenberg dynamics for $\mathbf{\tilde{G}}$
similarly has no scattering terms in its QSDE, and we see that
\begin{equation}
\mathcal{L}_{j0}^{\mathbf{G}}X=[X,S_{lj}^{\ast }L_{l}]\equiv \mathcal{L}
_{j0}^{\mathbf{\tilde{G}}}X,\quad \mathcal{L}_{0k}^{\mathbf{G}
}X=[L_{l}^{\ast }S_{lk},X]\equiv \mathcal{L}_{0k}^{\mathbf{\tilde{G}}}X.
\end{equation}
From the unitarity and scalar nature of $S$ we have that
\begin{eqnarray}
\mathcal{L}^{\mathbf{\tilde{G}}}X &=&\frac{1}{2}L_{k}^{\ast }S_{kl}\left[
X,S_{jl}^{\ast }L_{j}\right] +\frac{1}{2}\left[ L_{k}^{\ast }S_{kl},X\right]
S_{jl}^{\ast }L_{j}-i\left[ X,H\right] \nonumber\\
&=&\frac{1}{2}L_{k}^{\ast }\left[ X,L_{k}\right] +\frac{1}{2}\left[
L_{k}^{\ast },X\right] L_{k}-i\left[ X,H\right] \nonumber \\
&\equiv &\mathcal{L}^{\mathbf{G}}X.
\end{eqnarray}
Therefore the QSDEs corresponding to the Heisenberg dynamics for $\mathbf{G}$
and $\mathbf{\tilde{G}}$ are identical.
The input-output relations for $\mathbf{G}$ are
\begin{equation}
dA_{\mathrm{out},j}\left( t\right) =S_{jk}\,dA_{\mathrm{in},k}+j_{t}\left(
L_{j}\right) \,dt
\end{equation}
while for $\mathbf{\tilde{G}}$ we have
\begin{equation}
dB_{\mathrm{out},j}\left( t\right) =dB_{\mathrm{in},j}+S_{jk}\,j_{t}\left(
L_{k}\right) \,dt.
\end{equation}
If we require the inputs to be the same ($A_{\mathrm{in}}=B_{\mathrm{in}}$)
then we have $A_{\mathrm{out}}=S\,B_{\mathrm{out}}$.
\end{proof}
Our strategy for introducing static beam-splitter scattering into the
situation where we have non-Fock noise input fields is to say that the
initial input $A_{\mathrm{in}}$ be replaced by the rotated input $SA_{
\mathrm{in}}$, and exploit the fact that the Heisenberg dynamics no longer
involves the scattering processes $\Lambda _{jk}$ explicitly.
\begin{lemma}{Lemma}
\textbf{(The Universal Heisenberg QSDE Description)} The Heisenberg dynamics
for a general $\left( S,L,H\right) $ model with a static beam-splitter
matrix $S$ are given by the QSDE
\begin{eqnarray}
dj_{t}(X)&=&dA_{\mathrm{in}}^{\ast }\circ S^{\ast }j_{t}(\left[ X,L\right]
)+j_{t}(\left[ L^{\ast },X\right] )S\circ dA_{\mathrm{in}} +j_{t}(\mathcal{L}X)\circ dt
\end{eqnarray}
for all mean-zero Gaussian input fields $A_{\mathrm{in}}$.
\end{lemma}
This is of course just the equation (\ref{eq:approx_Heis}) written in the Wick-Stratonovich form so as to be
representation free!
Now let us try and repeat or analysis from Section \ref{Sec:NS_SP}. Let us
now consider the situation where a Gaussian input $A_{\mathrm{in}}=A_{
\mathrm{in}}^{\left( 1\right) }$ is driving a system with SLH parameters $
\left( S_{\mathscr{A}},L_{\mathscr{A}},H_{\mathscr{A}}\right) $ and that its
output $A_{\mathrm{out}}^{\left( 1\right) } $ acts as input for a second
system $\left( S_{\mathscr{B}},L_{\mathscr{B}},H_{\mathscr{B}}\right) $.
\begin{lemma}{Lemma}
\label{prop:fig} \textbf{(Components in series: With a static beam-splitter
scattering)} The Heisenberg QSDE for a pair of systems $\left( S_{\mathscr{A}
},L_{\mathscr{A}},H_{\mathscr{A}}\right) $ and $\left( S_{\mathscr{B}},L_{
\mathscr{B}},H_{\mathscr{B}}\right) $ in series is
\begin{eqnarray}
dj_{t}(X)=\sum_{\mathscr{S}=\mathscr{A},\mathscr{B}}\bigg\{dA_{\mathrm{in}
}^{\left( \mathscr{S}\right) \ast }\circ j_{t}(\left[ X,L_{\mathscr{S}}
\right] )
+j_{t}(\left[ L_{\mathscr{S}}^{\ast },X\right] )\circ dA_{\mathrm{in}
}^{\left( \mathscr{S}\right) }+j_{t}(\mathcal{L}_{\mathscr{S}}X)\circ dt
\bigg\} ,
\end{eqnarray}
where $A_{\mathrm{in}}^{\left( \mathscr{A}\right) }=S_{\mathscr{A}}A_{\mathrm{in}
}$ and $A_{\mathrm{in}}^{\left( \mathscr{B}\right) }=S_{\mathscr{B}}A_{\mathrm{
out}}^{\left( \mathscr{A}\right) }$ where $dA_{\mathrm{out}}^{\left(
\mathscr{A}\right) }=S_{\mathscr{A}}dA_{\mathrm{in}}^{\left( \mathscr{A}
\right) }+j_{t}\left( L_{\mathscr{A}}\right) dt$, and the Lindbladians $\mathcal{L}_{\mathscr{S}}$ are as before.
\end{lemma}
\begin{proof}
Substituting the processes into the QSDEs yields
\begin{eqnarray}
dj_{t}(X) &=&\left( S_{\mathscr{A}}dA_{\mathrm{in}}\right) ^{\ast }\circ
j_{t}(\left[ X,L_{\mathscr{A}}\right] ) +j_{t}(\left[ L_{\mathscr{A}}^{\ast
},X\right] )\circ S_{\mathscr{A}}dA_{\mathrm{in}} \nonumber\\
&&+\left( S_{\mathscr{B}}S_{\mathscr{A}}dA_{\mathrm{in}}+S_{\mathscr{B}}L_{
\mathscr{A}}dt\right) ^{\ast }\circ j_{t}(\left[ X,L_{\mathscr{B}}\right] ) \nonumber
\\
&& +j_{t}(\left[ L_{\mathscr{B}}^{\ast },X\right] )\circ \left( S_{
\mathscr{B}}S_{\mathscr{A}}dA_{\mathrm{in}}+S_{\mathscr{B}}L_{\mathscr{A}
}dt\right) \nonumber \\
&&+j_{t}(\mathcal{L}_{\mathscr{A}}X)\circ dt+j_{t}(\mathcal{L}_{\mathscr{B}
}X)\circ dt, \nonumber \\
&=&\left( dA_{\mathrm{in}}\right) ^{\ast }\circ j_{t}(\left[ X,S_{\mathscr{A}
}^{\ast }L_{\mathscr{A}}+S_{\mathscr{A}}^{\ast }S_{\mathscr{B}}^{\ast }L_{
\mathscr{B}}\right] ) +j_{t}(\left[ L_{\mathscr{A}}^{\ast }S_{\mathscr{A}
}+L_{\mathscr{B}}^{\ast }S_{\mathscr{B}}S_{\mathscr{A}},X\right] )\circ dA_{
\mathrm{in}} \nonumber \\
&&+j_{t}(\mathcal{L}_{\mathscr{A}}X+\mathcal{L}_{\mathscr{B}}X +L_{
\mathscr{A}}^{\ast }S_{\mathscr{B}}^{\ast }\left[ X,L_{\mathscr{B}}\right] +
\left[ L_{\mathscr{B}}^{\ast },X\right] S_{\mathscr{B}}L_{\mathscr{A}})\circ
dt.
\end{eqnarray}
A similar calculation to before shows that
\begin{eqnarray}
&&\mathcal{L}_{\mathscr{A}}X+\mathcal{L}_{\mathscr{B}}X+L_{\mathscr{A}
}^{\ast }S_{\mathscr{B}}^{\ast }\left[ X,L_{\mathscr{B}}\right] +\left[ L_{
\mathscr{B}}^{\ast },X\right] S_{\mathscr{B}}L_{\mathscr{A}} \nonumber \\
&=&\frac{1}{2}\left( S_{\mathscr{B}}L_{\mathscr{A}}+L_{\mathscr{B}}\right)
^{\ast }\left[ X,S_{\mathscr{B}}L_{\mathscr{A}}+L_{\mathscr{B}}\right] +
\frac{1}{2}\left[ L_{\mathscr{A}}^{\ast }S_{\mathscr{B}}^{\ast }+L_{
\mathscr{B}}^{\ast },X\right] \left( S_{\mathscr{B}}L_{\mathscr{A}}+L_{
\mathscr{B}}\right) \nonumber \\
&&-[iX,H_{\mathscr{A}}+H_{\mathscr{B}}+\frac{1}{2i}\left( L_{\mathscr{B}
}^{\ast }S_{\mathscr{B}}L_{\mathscr{A}}-L_{\mathscr{A}}^{\ast }S_{\mathscr{B}
}^{\ast }L_{\mathscr{B}}\right) ].
\end{eqnarray}
The resulting Heisenberg dynamics is therefore same as for the model $
\mathbf{\tilde{G}}\sim (I,\tilde{L},H)$ with coupling operators $\tilde{L}
=S_{\mathscr{A}}^{\ast }L_{\mathscr{A}}+S_{\mathscr{A}}^{\ast }S_{\mathscr{B}
}^{\ast }L_{\mathscr{B}}\equiv S_{\mathscr{A}}^{\ast }S_{\mathscr{B}}^{\ast
}\left( S_{\mathscr{B}}L_{\mathscr{A}}+L_{\mathscr{B}}\right) $, and
Hamiltonian $H=H_{\mathscr{A}}+H_{\mathscr{B}}+\mathrm{Im}\{L_{\mathscr{B}
}^{\ast }S_{\mathscr{B}}L_{\mathscr{A}}\}$.
The output is then $B_{\mathrm{out}}$ where
\begin{equation}
dB_{\mathrm{out}}\left( t\right) =dA_{\mathrm{in}}\left( t\right)
+j_{t}\left( S_{\mathscr{A}}^{\ast }L_{\mathscr{A}}+S_{\mathscr{A}}^{\ast
}S_{\mathscr{B}}^{\ast }L_{\mathscr{B}}\right) dt.
\end{equation}
The correct output for this should however be $A_{\mathrm{out}}=S_{
\mathscr{B}}S_{\mathscr{A}}B_{\mathrm{out}}$ so that
\begin{eqnarray}
dA_{\mathrm{out}}\left( t\right) =S_{\mathscr{B}}S_{\mathscr{A}}dA_{\mathrm{
in}}\left( t\right) +j_{t}\left( S_{\mathscr{B}}L_{\mathscr{A}}+L_{
\mathscr{B}}\right) dt
\end{eqnarray}
and we have the desired matrix $S_{\mathscr{B}}S_{\mathscr{A}}$ multiply the
inputs corresponding to scattering first by matrix $S_{\mathscr{A}}$ and
then by $S_{\mathscr{B}}$. The model $\mathbf{G}$ obtained from postulate Ia
is then the one related to $\mathbf{\tilde{G}}$ by the static beam-splitter
matrix $S_{\mathscr{B}}S_{\mathscr{A}}$, that is (from Theorem \ref{Thm:S}
with $S=S_{\mathscr{B}}S_{\mathscr{A}}$ and $\tilde{L}=S^\ast L$
\begin{eqnarray}
\mathbf{G} & \sim & \left( S,L,H\right) =\left( S_{\mathscr{B}}S_{\mathscr{A}
},S_{\mathscr{B}}S_{\mathscr{A}}\tilde{L},H\right) \notag \\
&=&
\left( S_{\mathscr{B}}S_{\mathscr{A}},S_{\mathscr{B}}L_{\mathscr{A}}+L_{
\mathscr{B}},H_{\mathscr{A}}+H_{\mathscr{B}}+\mathrm{Im}\{L_{\mathscr{B}
}^{\ast }S_{\mathscr{B}}L_{\mathscr{A}}\}\right) , \notag
\end{eqnarray}
and again we have the same form as the series product in the Fock case (\ref
{eq:series_prod}).
\end{proof}
\section{Conclusions}
We have shown that there is a consistent theory for quantum input-output
models in series when the driving input processes are in general Gaussian
states with a flat power spectrum. This emerges fairly explicitly at the
level of the singular input processes $b_k (t)$ themselves, but to have a
working theory we need to make the connection to the Hudson-Parthasarathy
quantum stochastic calculus. This involves quantum stochastic differential
equations on the Fock spaces used to represent the noise (which are a
mathematical convenience and not physical objects) with the result that the
associated dynamical equations appear to depend on the choice of Gaussian
state of the noise. In reality this is a mathematical artifact and we show
that even here there is a way of expressing the quantum stochastic
differential equations (the Wick-Stratonovich form introduced in this paper)
which removes these terms. In effect, it is the Wick-Stratonovich form that
translates in the physically relevant dynamical equations written in terms
of the quantum input processes $b_k (t)$.
The connection rules are then shown to be genuinely independent of the
choice of state. We were also able to include the effects of a static
beam-splitter component. At first sight this would seem problematic as the
scattering terms $\Lambda _{jk}(t)$ are not well-defined for non-vacuum
states, however, it is possible to ignore them from the model: in fact we
need to work at the level of the Heisenberg flow and the input-output
relations, neither of which involve the scattering terms. The result is that
we may account for static scattering and we find that the series product of
\cite{GJ-Series} again gives the correct rule. In this way we extend the
series product to deal with quantum feedback networks driven by general
Gaussian input processes.
We have restricted our analysis to Bose systems, however, there is an Araki-Woods
type double Fock space representation for Fermi fields with quasi-free states as well,
and is applicable to Fermi stochastic processes \cite{HP_Fermi}, \cite{BSW}.
The network rules for Fermi stochastic processes can be similarly derived
and one would naturally expect these to again be state-independent.
\end{document} |
\begin{document}
\title{Approximating the Integral Fr\'{e}chet Distance}
\author{\begin{tabular}{ c c c }
Anil Maheshwari & J\"{o}rg-R\"{u}diger Sack & Christian Scheffer \\
School of Computer Science & School of Computer Science & Department of Computer Science \\
Carleton University & Carleton University & TU Braunschweig\\
Ottawa, Canada K1S5B6 & Ottawa, Canada K1S5B6 & M\"{u}hlenpfordtstr. 23,\\
&& 38106 Braunschweig, Germany \\
\texttt{[email protected]} & \texttt{[email protected]} & \texttt{[email protected]}
\end{tabular}
}
\date{}
\maketitle
\begin{abstract}
A pseudo-polynomial time $(1 + \varepsilon)$-approximation algorithm is presented for computing the integral and average Fr\'{e}chet distance between two given polygonal curves $T_1$ and $T_2$. In particular, the running time is upper-bounded by $\mathcal{O}( \zeta^{4}n^4/\varepsilon^{2})$ where $n$ is the complexity of $T_1$ and~$T_2$ and $\zeta$ is the maximal ratio of the lengths of any pair of segments from $T_1$ and~$T_2$. The Fr\'{e}chet distance captures the minimal cost of a continuous deformation of $T_1$ into $T_2$ and vice versa and defines the cost of a deformation as the maximal distance between two points that are related. The integral Fréchet distance defines the cost of a deformation as the integral of the distances between points that are related. The average Fréchet distance is defined as the integral Fréchet distance divided by the lengths of $T_1$ and $T_2$.
Furthermore, we give relations between weighted shortest paths inside a single parameter cell~$C$ and the monotone free space axis of $C$. As a result we present a simple construction of weighted shortest paths inside a parameter cell. Additionally, such a shortest path provides an optimal solution for the partial Fr\'{e}chet similarity of segments for all leash lengths. These two aspects are related to each other and are of independent interest.
\end{abstract}
\section{Introduction}\label{sec:intro}
Measuring similarity between geometric objects is a fundamental problem in many areas of science and engineering. Applications arise e.g., when studying animal behaviour, human movement, traffic management, surveillance and security, military and battlefield, sports scene analysis, and movement in abstract spaces~\cite{gudmundsson:movement,gudmundsson:gpu,gudmundsson:football}. Due to its practical relevance, the resulting algorithmic problem of curve matching has become one of the well-studied problems in computational geometry. One of the prominent measures of similarities between curves is given by the \emph{Fr\'{e}chet distance} and its variants. \emph{Fr\'{e}chet} measures have been applied e.g., in hand-writing recognition~\cite{DBLP:conf/icdar/SriraghavendraKB07}, protein structure alignment~\cite{DBLP:journals/jbcb/JiangXZ08}, and vehicle tracking~\cite{wenk:vehicle}.
In the well-known dog-leash metaphor, the (standard) \emph{Fr\'{e}chet distance} is described as follows:
suppose a person walks a dog, while both have to move from the starting point to the ending point on their respective curves~$T_1$ and~$T_2$. The \emph{Fr\'{e}chet} distance is the minimum leash length required over all possible pairs of walks, if neither person nor dog is allowed to move backwards. Here, we see the \emph{Fr\'{e}chet distance} as capturing the cost of a continuous deformation of~$T_1$ into $T_2$ and vice versa. (A deformation is required to maintain the order along~$T_1$ and~$T_2$.) A specific deformation induces a relation $R \subset T_1 \times T_2$ such that ``$p \in T_1$ is deformed into $q \in T_2$''. For $(p,q) \in R$ we say \emph{$p$ is related to~$q$} and vice versa. The \emph{Fr\'{e}chet distance} defines the cost of a deformation as the maximal distance between two related points.
\begin{figure}
\caption{A deformation between $T_1$ and $T_2$ and the relation between $T_1$ and $T_2$. The deformation maintains the order of points along the curves. The distances between related points on the peak are larger than the distances between related points that do not lie on the peak.
}
\label{fig:deformationDistanceVSintegral}
\end{figure}
In this paper, we study the integral and average Fr\'{e}chet distance originally introduced by Buchin~\cite{buchin:phd}. The \emph{integral Fr\'{e}chet distance} defines the cost of a deformation as the integral of the distances between points that are related. The \emph{average Fr\'{e}chet distance} is defined as the integral Fr\'{e}chet distance divided by the lengths of $T_1$ and~$T_2$. Next, we define these notions formally.
\subsection{Problem Definition}
Let $T_1,T_2: [0,n] \rightarrow \mathbb{R}^2$ by two polygonal curves. We denote the first derivative of a function~$f$ by $f'$. By, $|| \cdot ||_p$, we denote the $p$-norm and by $d_p( \cdot, \cdot)$ its induced $L_p$ metric. The \emph{lengths~$|T_1|$ and~$|T_2|$} of $T_1$ and~$T_2$ are defined as $\int^n_0 ||(T_1)'(t)||_2\ dt$ and $\int^n_0 ||(T_2)'(t)||_2\ dt$, respectively. To simplify the exposition, we assume that $|T_1| = |T_2| = n$ and that $T_1$ and $T_2$ each have $n$ segments. A \emph{reparametrization} is a continuous function $\alpha: [0,n] \rightarrow [0,n]$ with $\alpha(0) = 0$ and $\alpha(n)= n$. A reparameterization $\alpha$ is \emph{monotone} if $\alpha(t_1) \leq \alpha(t_2)$ holds for all $0 \leq t_1 \leq t_2 \leq n$. A \emph{(monotone) matching} is a pair of (monotone) reparametrizations $(\alpha_1,\alpha_2)$. The \emph{Fr\'echet distance} of $T_1$ and~$T_2$ w.r.t. $d_2$ is defined as $\mathscr{D} \left( T_1, T_2 \right)=\inf_{(\alpha_1,\alpha_2)} \max_{t \in [0,n]} d_2 (T_1(\alpha_1(t)), T_2(\alpha_2(t)))$.
For a given leash length $\delta \geq 0$, Buchin et al.~\cite{buchin:exact} define the \emph{partial Fr\'{e}chet similarity $\mathcal{P}_{(\alpha_1,\alpha_2)}(T_1,T_2)$ w.r.t. a matching $(\alpha_1,\alpha_2)$} as
\begin{equation*}
\int_{d_2( T_1 \left( \alpha_1 \left( t \right) \right), T_2 \left( \alpha_2 \left( t \right) \right) ) \leq \delta} \left( || \left( T_1 \circ \alpha_1 \right)' \left( t \right)||_2 + || \left( T_2 \circ \alpha_2 \right)' \left( t \right)||_2 \right) dt
\end{equation*}
and the \emph{partial Fr\'{e}chet similarity} as $\mathcal{P}_{\delta}(T_1,T_2)=\sup_{\alpha_1,\alpha_2} \mathcal{P}_{(\alpha_1,\alpha_2)} (T_1,T_2)$.
Given a monotone matching $\left( \alpha_1, \alpha_2 \right)$, the \emph{integral Fr\'echet distance $\mathcal{F}_{\mathcal{S},(\alpha_1,\alpha_2)} \left( T_1, T_2 \right)$ of $T_1$ and $T_2$ w.r.t. $\left( \alpha_1,\alpha_2 \right)$} is defined as:
\begin{equation*}
\int_{0}^n d_2( T_1 \left( \alpha_1 \left( t \right) \right), T_2 \left( \alpha_2 \left( t \right) \right) ) \left( || \left( T_1 \circ \alpha_1 \right)' \left( t \right)||_2 + || \left( T_2 \circ \alpha_2 \right)' \left( t \right)||_2 \right) dt
\end{equation*}
and the \emph{integral Fr\'{e}chet distance} as $\mathcal{F}_{\mathcal{S}} \left( T_1, T_2 \right)=\inf_{(\alpha_1,\alpha_2)} \mathcal{F}_{\mathcal{S},(\alpha_1,\alpha_2)} \left( T_1, T_2 \right)$~\cite{buchin:phd}. Note that the derivatives of $(T_1 \circ \alpha_1)(\cdot)$ and $(T_2 \circ \alpha_2)(\cdot)$ are measured w.r.t. the $L_2$-norm because the lengths of $T_1$ and $T_2$ are measured in Euclidean space. The \emph{average Fr\'{e}chet distance} is defined as $\mathcal{F}_S (T_1,T_2) / (|T_1| + |T_2|)$~\cite{buchin:phd}.
While the integral Fr\'{e}chet distance has been studied ~\cite[p. 860]{wenk:vehicle}, no efficient algorithm exists to compute this distance measure (see Subsection~\ref{subsec:rel} for details). In this paper, we design the first pseudo-polynomial time algorithm for computing an $(1+ \varepsilon)$-approximation of the integral Fr\'{e}chet distance and consequently of the average Fr\'{e}chet distance.\\
\subsection{Related Work} \label{subsec:rel}
In their seminal paper, Alt and Godau~\cite{alt:computing} provided an algorithm that computes the Fr\'{e}chet distance between two polygonal curves $T_1$ and $T_2$ in $\mathcal{O}(n^2 \log (n))$ time, where $n$ is the complexity of $T_1$ and $T_2$. In the presence of outliers though, the Fr\'{e}chet distance may not provide an appropriate result. This is due to the fact that the Fr\'{e}chet distance measures the maximum of the distances between points that are related. This means that already one large "peak" may substantially increase the Fr\'{e}chet distance between $T_1$ and $T_2$ when the remainder of $T_1$ and $T_2$ are similar to each other, see Figure~\ref{fig:deformationDistanceVSintegral} for an example.
\begin{figure}
\caption{An optimal deformation between $T_1$ and $T_2$ for both, the Fr\'{e}
\label{fig:outlier}
\end{figure}
To overcome the issue of outliers, Buchin et al.~\cite{buchin:exact} introduced the notion of \emph{partial Fr\'{e}chet similarity} and gave an algorithm running in $\mathcal{O}(n^3 \log (n))$ time, where distances are measured w.r.t. the $L_1$ or $L_{\infty}$ metric. The partial Fr\'{e}chet similarity measures the cost of a deformation as the lengths of the parts of $T_1$ and $T_2$ which are made up of points that fulfill the following: The distances that are induced by straightly deforming points into their related points are upper-bounded by a given threshold $\delta \geq 0$, see Figure~\ref{fig:outlier}. De Carufel et al.~\cite{carufel:similarity} showed that the partial Fr\'{e}chet similarity w.r.t. to the $L_2$ metric cannot be computed exactly over the rational numbers. Motivated by that, they gave an $(1 \pm \varepsilon)$-approximation algorithm guaranteeing a pseudo-polynomial running time. An alternative perspective on the partial Fr\'{e}chet similarity is the partial Fr\'{e}chet dissimilarity, i.e., the minimization of the portions on $T_1$ and $T_2$ which are involved in distances that are larger than $\delta$. Observe that an exact solution for the similarity problem directly leads to an exact solution for the dissimilarity problem. In particular, the sum of both values is equal to the sum of the lengths of $T_1$ and $T_2$.
Unfortunately, both the partial Fr\'{e}chet similarity and dissimilarity are highly dependent on the choice of $\delta$ as provided by the user. As a function of $\delta$, the partial Fr\'{e}chet distance is unstable, i.e., arbitrary small changes of $\delta$ can result in arbitrarily large changes of the partial Fr\'{e}chet (dis)similarly, see Figure~\ref{fig:outlier}. In particular, noisy data may yield incorrect similarity results. For noisy data, the computation of the Fr\'{e}chet distance in the presence of imprecise points has been explored in~\cite{ahn:imprecise}. The idea behind this approach is to model signal errors by replacing each vertex $p$ of the considered chain $T_1$ by a small ball centered at $p$. Unfortunately, the above described outlier-problem cannot be resolved by such an approach because the distance of an outlier to the other chain $T_2$ could be arbitrarily large. This would mean that the radii of the corresponding balls would have been chosen extremely large.
An approach related to the integral Fr\'{e}chet distance is dynamic time warping (DTW), which arose in the context of speech recognition~\cite{rabiner:fundamentals}. Here, a discrete version of the integral Fr\'{e}chet distance is computed via dynamic programming. This is not suitable for general curve matching (see~\cite[p. 204]{efrat:mathching}). Efrat et al.~\cite{efrat:mathching} worked out an extension of the idea of DTW to a continuous version. In particular, they compute shortest path distances on a combinatorial piecewise linear $2$-manifold that is constructed by taking the Minkowski sum of $T_1$ and $T_2$. Furthermore, they gave two approaches dealing with that manifold. The first one does not yield an approximation of the integral Fr\'{e}chet distance. The second one does not lead to theoretically provable guarantees regarding both: polynomial running time and approximation quality of the integral Fr\'{e}chet distance.
More specifically, ~\cite{efrat:mathching} designed two approaches for continuous curve matching by computing shortest paths on a combinatorial piecewise linear $2$-manifold $\mathcal{M}(T_1,T_2) := T_1 \ominus T_2 := \{ T_1(\mu) - T_2(\lambda) | \lambda,\mu \in [0,n] \}$. In particular, they consider shortest path lengths between the points $T_1(0) - T_2(0)$ and $T_1(n) - T_2(n)$ on the polyhedral structure which is induced by $\mathcal{M}(T_1,T_2)$. The first approach is to compute in polynomial time the unweighted monotone shortest path length on $\mathcal{M}(T_1,T_2)$ w.r.t. $d_2$. This approach does not take into account the weights in form of the considered leash length. Therefore, it does not yield an approximation of the integral Fr\'{e}chet distance. In contrast to this, the second approach considers an arbitrarily chosen weight function $f$ such that the minimum path integral over all connecting curves on $\mathcal{M}(T_1,T_2)$ is approximated. In terms of Fr\'{e}chet distances, this approach is an approximation of the integral Fr\'{e}chet distances as described next. By flattening and rectifying $\mathcal{M}(T_1,T_2)$, we have a representation of the parameter space in the space of $T_1$ and $T_2$, such that by setting $f = w$ and considering shortest path length w.r.t. $d_1$ instead of $d_2$, we obtain the problem setting of computing the integral Fr\'{e}chet distance (the function $w$ is defined in Section \ref{sec:prelim}). However, to compute the weighted shortest path length on $\mathcal{M}(T_1,T_2)$, Efrat et al. apply the so-called \emph{Fast Marching Method}, ``to solve the Eikonal equation numerically''~\cite[p. 211]{efrat:mathching}. While ``the solution it (ed.: the algorithm) provides converges monotonically''~\cite[p. 211]{efrat:mathching}, the solution does not give a $(1+\varepsilon)$ approximation with pseudo-polynomial running-time.
\subsection{Contributions}
\label{sec:our-result}
\begin{itemize}
\item
We present a (pseudo-)polynomial time algorithm that approximates the integral Fr\'{e}chet Distance, $\mathcal{F}_S(T_1,T_2)$, up to an multiplicative error of $(1+\varepsilon)$. This measure is desirable because it integrates the inter-curve distances along the curve traversals, and is thus more stable (w.r.t. to the choice of $\delta$) than other Fr\'{e}chet Distance measures defined by the maximal such distance.
\item
The running time of our approach is $\Oh{ \zeta^{4}n^4/\varepsilon^{2} \log (\zeta n /\varepsilon) }$, where~$\zeta$ is the maximal ratio of the lengths of any pair of segments from $T_1$ and $T_2$. Note that achieving a running time that is independent of $|T_1| + |T_2|$ seems to be quite challenging as $\mathcal{F}_S(T_1,T_2)$ could be arbitrary small compared to $|T_1| + |T_2|$.
\item This guarantees an $(1+\varepsilon)$ approximation within pseudo-polynomial running time which was not been achieved by the approach of \cite{efrat:mathching}.
\item Our results thus answer the implicit question raised in \cite{wenk:vehicle}: ``Unfortunately there is no algorithm known that computes the integral Fr\'{e}chet distance.''
\item As a by-product, we show that a shortest weighted path $\pi_{ab}$ between two points $a$ and~$b$ inside a parameter cell $C$ can be computed in constant time. We also make the observation that $\pi_{ab}$ provides an optimal matching for the partial Fr\'{e}chet similarity for all leash length thresholds. This provides a natural extension of locally correct Fr\'{e}chet matchings that were first introduced by Buchin et al.~\cite{buchin:locally}. They suggest to: ``restrict to the locally correct matching that decreases the matched distance as quickly as possible.''\cite[p. 237]{buchin:locally}. The matching induced by $\pi_{ab}$ fulfils this requirement.
\end{itemize}
\section{Preliminaries} \label{sec:prelim}
The \emph{parameter space $P$} of $T_1$ and~$T_2$ is an axis aligned rectangle. The bottom-left corner $\mathfrak{s}$ and upper-right corner $\mathfrak{t}$ correspond to $(0,0)$ and $(n,n)$, respectively. We denote the $x$- and the $y$-coordinate of a point $a \in P$ by $a.x$ and $a.y$, respectively. A point $b \in P$ \emph{dominates} a point $a \in P$, denoted by $a \leq_{xy} b$, if $a.x \leq b.x$ and $a.y \leq b.y$ hold. A path~$\pi$ is \emph{($xy$-) monotone} if $\pi(t_1) \leq \pi(t_2)$ holds for all $0\leq t_1\leq t_2 \leq n$. Thus a monotone matching corresponds to a monotone path $\pi$ with $\pi(0) = \mathfrak{s}$ and $\pi(n) = \mathfrak{t}$. By inserting $n+1$ vertical and $n+1$ horizontal \emph{parameter lines}, we refine $P$ into $n$ rows and $n$ columns such that the $i$-th row (column) has a height (resp., width) that corresponds to the length of the $i$-th segment on $T_1$ (resp., $T_2$). This induces a partitioning of $P$ into cells, called \emph{parameter cells}.
For $a,b \in P$ with $a \leq_{xy} b$, we have $||ab||_1 = \int_{a.x}^{b.x} ||(T_1)'(t)||_2 \ dt + \int_{a.y}^{b.y} ||(T_2)'(t)||_2 \ dt$. This is equal to the sum of the lengths of the subcurves between $T_1(a.x)$ and $T_1(b.x)$ and between $T_2(a.y)$ and $T_2(b.y)$. Thus, we define the \emph{length $|\pi|$ of a path $\pi: [0,n] \rightarrow P$} as $\int_{0}^{n}||(\pi)'(t)||_1 \ dt$. Note that for the paths inside the parameter space the $1$-norm is applied, while the lengths of the curves in the Euclidean space are measured w.r.t. the $2$-norm. As $\mathcal{F}_S(T_1,T_2)$ measures the length of $T_1$ and $T_2$ at which each $(T_1(\alpha_1(t)),T_2(\alpha_2(t)))$ is weighted by $d_2 (T_1(\alpha_1(t)),T_2(\alpha_2(t)))$, we consider the \emph{weighted length} of $\pi$ defined as follows:
Let $w(\cdot) : P \rightarrow \mathbb{R}_{\geq 0}$ be defined as $w((x,y)) := d_2 (T_1(x), T_2(y))$ for all $(x,y) \in P$. The weighted length $|\pi|_w$ of a path $\pi : [a,b] \rightarrow P$ is defined as $\int_a^b w \left( \pi \left( t \right) \right) || (\pi)' \left( t \right) ||_1 dt.$
\begin{observation}[\cite{buchin:phd}]\label{obs:dualpaths}
Let $\pi$ be a shortest weighted monotone path between $\mathfrak{s}$ and~$\mathfrak{t}$ inside~$P$. Then, we have $|\pi|_w = \mathcal{F}_{\mathcal{S}} \left( T_1, T_2 \right)$.
\end{observation}
Motivated by Observation~\ref{obs:dualpaths}, we approximate $\mathcal{F}_S(T_1,T_2)$ by approximating the length of a shortest weighted monotone path $\pi \subset P$ connecting $\mathfrak{s}$ and $\mathfrak{t}$.
Let $\delta \geq 0$ be chosen arbitrarily but fixed. Inside each parameter cell~$C$, the union of all points $p$ with $w(p) \leq \delta$ is equal to the intersection of an ellipse $\mathcal{E}$ with $C$. Observe that $\mathcal{E}$ can be computed in constant time~\cite{alt:computing}. $\mathcal{E}$ is characterized by two focal points $F_1$ and $F_2$ and a radius $r$ such that $\mathcal{E} = \{ x \in \mathbb{R}^2 \mid d_2 (x,F_1) + d_2 (x,F_2) \leq r \}$. The two axes $\ell$ (monotone) and $\hbar$ (not monotone) of $\mathcal{E}$, called the \emph{free space axes}, are defined as the line induced by $F_1$ and $F_2$ and the bisector between $F_1$ and $F_2$. If $\mathcal{E}$ is a disc, $\ell$ and $\hbar$ are the lines with gradients $1$ and $-1$ and which cross each other in the middle of $\mathcal{E}$. Note that the axes are independent of the value of $\delta$.
\begin{figure}
\caption{A weighted shortest $xy$-monotone path $\pi_{ab}
\label{fig:shortestVSaxis}
\end{figure}
To approximate $|\pi|_w$ efficiently we make the following observation that is of independent interest: Let $a,b$ be two parameter points that lie in the same parameter cell $C$ such that~$a \leq_{xy} b$. The shortest weighted monotone path $\pi_{ab}$ between $a$ and $b$ (that induces an optimal solution for the integral Fr\'{e}chet distance) is the monotone path between $a$ and $b$ that maximizes its subpaths that lie on $\ell$ (see Figure~\ref{fig:shortestVSaxis} and Lemma~\ref{lem:key}). Another interesting aspect of $\pi_{ab}$ is that it also provides an optimal matching for the partial Fr\'{e}chet similarity (between the corresponding (sub-)segments) for all leash lengths, as $\pi \cap \mathcal{E}_{\delta}$ has the maximal length for all $\delta \geq 0$, where $\mathcal{E}_{\delta} := \mathcal{E}$ for a specific $\delta \geq 0$. Next, we discuss our algorithms.
\section{An Algorithm for Approximating Integral Fr\'{e}chet Distance}\label{sec:pre}
We approximate the length of a shortest weighted monotone path between $\mathfrak{s}$ and $\mathfrak{t}$ as follows: We construct two weighted, directed, geometric graphs $G_1 = (V_1,E_1,w_1)$ and $G_2 = (V_2,E_2,w_2)$ that lie embedded in $P$ such that $\mathfrak{s},\mathfrak{t} \in V_1$ and $\mathfrak{s},\mathfrak{t} \in V_2$. Then, in parallel, we compute for $G_1$ and $G_2$ the lengths of the shortest weighted paths between $\mathfrak{s}$ and $\mathfrak{t}$. Finally, we output the minimum of both values as an approximation for $\mathcal{F}_S(T_1,T_2)$.
We introduce some additional terminology. A \emph{geometric graph $G = (V,E)$} is a graph where each $v \in V$ is assigned to a point $p_v \in P$, its \emph{embedding}. The \emph{embedding} of an edge $(u,v) \in E$ (into $P$) is $p_{u}p_{v}$. The \emph{embedding of $G$ (into $P$)} is $\bigcup_{(u,v) \in E} p_up_v$. For $v \in V$ and $e \in E$, we denote simultaneously the vertex $v \in V$, the edge $e \in E$, and the graph $(V,E)$ and their embeddings by $v$, $e$, and $G$, respectively. $G$ is \emph{monotone (directed)} if $p_u \leq_{xy} p_v$ holds for all $(u,v) \in E$. Let $R \subseteq P$ be an arbitrarily chosen axis aligned rectangle with height $h$ and width $b$. The \emph{grid (graph) of $R$ with mesh size $\sigma$} is the geometric graph that is induced by the segments that are given as the intersections of $R$ with the following lines: Let $h_1, \dots,h_{k_1}$ be the $\lceil \frac{h}{\sigma} \rceil+1$ equidistant horizontal lines and let $b_1, \dots,b_{k_2}$ be the $\lceil \frac{b}{\sigma} \rceil+1$ equidistant vertical lines such that $\partial R = R \cap (h_1 \cup h_{k_1} \cup b_1 \cup b_{k_2})$. \\ \\
\noindent {\bf Construction of $G_1$:} Let $\mu$ be the length of a smallest segment from $T_1$ and $T_2$. We construct $G_1=(V_1,E_1) \subset P$ as the monotone directed grid graph of $P$ with a mesh size of $\frac{\varepsilon \mu}{40000 (|T_1| + |T_2|)}$. Furthermore, we set $w_1((u,v)):=|uv|_w$ for all $(u,v) \in E_1$.\\ \\
\noindent{\bf Construction of $G_2$: } For $u \in P$ and $r \geq 0$, we consider the ball $B_r(u)$ with its center at $u$ and a radius of $r$ w.r.t. the $L_{\infty}$ metric.
For the construction of $G_2$ we need the free space axes of the parameter cells and so called grid balls:
\begin{definition}\label{def:gridball}
Let $u \in P$ and $r \geq 0$ be chosen arbitrarily. The \emph{grid ball $G_r(u)$} is defined as the grid of $B_r(u)$ that has a mesh size of $\frac{\varepsilon}{456}w(u)$. We say~$G_r(u)$ \emph{approximates}~$B_r(u)$.
\end{definition}
We define $G_2$ as the monotone directed graph that is induced by the arrangement that is made up of the following components restricted to $P$:
\noindent\begin{minipage}{0.52\linewidth}\vspace*{2ex}
\begin{itemize}
\item (1) All monotone free space axes restricted to their corresponding parameter cell.
\item (2) All grid balls $G_{62w(u)}(u)$ for $u := \arg \min_{p \in e}w(u)$ and any parameter edge $e$.
\item (3) The segments $\mathfrak{s}c_{\mathfrak{s}}$ and $\mathfrak{t}c_{\mathfrak{t}}$ if the parameter cells $C_{\mathfrak{s}}$ and $C_{\mathfrak{t}}$ that contain $\mathfrak{s}$ and $\mathfrak{t}$ are intersected by their corresponding monotone free space axes $\ell_{\mathfrak{s}}$ and $\ell_{\mathfrak{t}}$, where $c_{\mathfrak{s}}$ and $c_{\mathfrak{t}}$ are defined as the bottom-leftmost and top-rightmost point of $\ell_{\mathfrak{s}} \cap C_{\mathfrak{s}}$ and $\ell_{\mathfrak{t}} \cap C_{\mathfrak{t}}$.
\end{itemize}
\end{minipage}
\begin{minipage}{0.4\linewidth}\vspace*{2ex}
\begin{center}
\begin{tabular}{p{6cm}}
\includegraphics[height=2.3cm]{exampleG2merged.pdf}\\
{\small Exemplified construction of $G_2$ for two given polygonal curves $T_1$ and $T_2$. For simplicity we only illustrate four grid balls (with reduced radii) and the corresponding point pairs from $T_1 \times T_2$.}
\end{tabular}
\end{center}
\end{minipage}\vspace*{2ex}
Finally, we set $w_2((v_1,v_2)):= |v_1v_2|_w$ for all $(v_1,v_2) \in E_2$.
For each edge $e \in G$ we choose the point $u \in e$ as the center of the corresponding grid ball because the free space axes of the parameters cells adjacent to $e$ lie close to $u$. \\ \\
\noindent{\bf Analysis of our approach: }
Since $G_1$ is monotone and each edge $(p_1,p_2) \in E_1$ is assigned to $|p_1p_2|_w$, we obtain that for each path $\widetilde{\pi} \subset G_1$ between~$\mathfrak{s}$ and $\mathfrak{t}$ holds $|\pi|_w \leq |\widetilde{\pi}|_w$. The same argument applies to $G_2$. Hence, we still have to ensure that there is a path $\widetilde{\pi} \subset G_1$ or $\widetilde{\pi} \subset G_2$ such that $|\widetilde{\pi}|_w \leq (1+\varepsilon)|\pi|_w$. We say that a path $\pi \subset P$ is \emph{low} if $w(p) \leq \frac{\mu}{100}$ holds for all $p \in \pi$. For our analysis, we show the following:\\
{\bf Case A: } There is a $\widetilde{\pi} \subset G_1$ with $|\widetilde{\pi}|_w \leq (1+\varepsilon)|\pi|_w$ if there is a shortest path $\pi \subset P$ that is not low (see Subsection~\ref{subsubsec:anaG1}).\\
{\bf Case B:} Otherwise, there is a $\widetilde{\pi} \subset G_2$ with $|\widetilde{\pi}|_w \leq (1+\varepsilon)|\pi|_w$ (see Subsection~\ref{subsubsec:anaG2}).
\subsection{Analysis of Case A}\label{subsubsec:anaG1}
In this subsection, we assume that there is a shortest path $\pi$ between $\mathfrak{s}$ and $\mathfrak{t}$ that is not low, i.e., there is a $p \in \pi$ with $w(p) \geq \frac{\mu}{100}$. Furthermore, for any $o,p\in \pi$, we denote the subpath of~$\pi$ which is between $o$ and~$p$ by $\pi_{op}$.
First we prove a lower bound for $|\pi|_w$ (Lemma~\ref{lem:lowerBoundForSummedFDcase1}). This lower bound ensures that the approximation error that we make for a path in $G_1$ is upper-bounded by $\varepsilon |\pi|_w$ (Lemma~\ref{lem:apprQualityG1}).
A \emph{cell $C$ of $G_1$} is the convex hull of four vertices $v_1,v_2,v_3,v_4 \in V_1$ such that $C \cap V_1 = \{ v_1,v_2,v_3,v_4 \}$. As the mesh size of $G_1$ is $\frac{\varepsilon \mu}{40000 (|T_1| + |T_2|)}$, we have $d_1(p_1,p_2) \leq \frac{\varepsilon \mu}{20000 (|T_1| + |T_2|)}$ for any two points $p_1$ and $p_2$ that lie in the same cell of $G_1$. The following property of $w(\cdot)$ is the key in the analysis of the weighted shortest path length of $G_1$:
\begin{definition}[\cite{funke:smooth}]\label{def:lip}
$f: P \rightarrow \mathbb{R}_{\geq 0}$ is $1$-Lipschitz if $f(x) \leq f(y) + d_1(x,y)$ for all $x,y \in P$~\footnote{The requirement $|f(x)-f(y)|\leq d_1(x,y)$ is also occasionally used to define $1$-Lipschitz continuity. Note that this alternative definition is equivalent to Definition~\ref{def:lip}.}.
\end{definition}
\begin{lemma}\label{lem:lip}
$w(\cdot)$ is $1$-Lipschitz w.r.t. $L_1$.
\end{lemma}
\begin{proof}
Let $(a_1,a_2), (b_1,b_2) \in P$ be chosen arbitrarily.
The subcurves $t_{T_1(a_1)T_2(b_2)} \subset T_1$ between $T_1(a_1)$ and $T_2(b_2)$ and $t_{T_2(a_2)T_2(b_2)} \subset T_2$ between $T_2(a_2)$ and $T_2(b_2)$ have lengths no larger than $|a_1 - b_2|$ and $|a_2 - b_2|$. Thus $d_2 (T_1(a_1), T_1(b_1)) \leq |a_1 - b_1|$ and $d_2 (T_2(a_2), T_2(b_2)) \leq |a_2 - b_2|$. Furthermore, $w((a_1,a_2))$ is equal to $d_2 (T_1(a_1), T_2 (a_2) )$. By triangle inequality, $w((b_1,b_2)) = d_2 \left( T_1(b_1), T_2(b_2) \right) \leq d_2( T_2(b_2), T_2(a_2) )+ d_2 (T_2(a_2), T_1(a_1) ) + d_2 (T_1(a_1), T_1(b_1) ) \leq d_1 ((a_1,a_2), (b_1,b_2)) + w ((a_1,a_2))$, because $d_2(T_2(b_2), T_2(a_2)) = |b_2 - a_2|$, $d_2(T_2(a_2), T_1(a_1)) = w((a_1,a_2))$, $d_2(T_1(a_1), T_1(b_1)) = |b_1 - a_1|$, and $d_1((a_1,a_2), (b_1,b_2)) = |b_1-a_1| + |b_2-a_2|$.
\end{proof}
Lemma~\ref{lem:lip} allows us to prove the following lower bound for the weighted length of $\pi$.
\begin{lemma}\label{lem:lowerBoundForSummedFDcase1}
$|\pi|_w \geq \frac{\mu}{20000}$
\end{lemma}
\begin{proof}
Let $p \in \pi$ such that $w(p) \geq \frac{\mu}{100}$. Let $\psi := \pi \cap B_{\frac{\mu}{100}}(p)$. We have $|\psi|_w \geq \frac{\mu}{200}$ because $w(\cdot)$ is $1$-Lipschitz. Furthermore, $\psi \subset \pi$ implies $|\psi|_w \leq |\pi|_w$ which yields $\frac{\mu}{200} \leq |\pi|_w$.
\end{proof}
\begin{lemma}\label{lem:apprQualityG1}
There is a path $\widetilde{\pi} \subset G_1$ that connects $\mathfrak{s}$ and $\mathfrak{t}$ such that $|\widetilde{\pi}|_w \leq (1 + \varepsilon) |\pi|_w$.
\end{lemma}
\begin{proof} Starting from $\mathfrak{s}$, we construct $\widetilde{\pi}$ inductively as follows: If $\pi$ crosses a vertical
\noindent\begin{minipage}{0.8\linewidth}\vspace*{0.5ex}
(horizontal) parameter line next, $\widetilde{\pi}$ goes one step to the right (top). For $p \in \pi$ let $h_p$ be the line with gradient $-1$ such that $p \in h_p$ (see the figure on the right). As $\pi$ and $\widetilde{\pi}$ are monotone, $\widetilde{p} := h_p \cap \widetilde{p}$ is unique and well defined. For all $p$, $p$ and $\widetilde{p}$ lie in the same cell of $G_1$ and thus, $w(\widetilde{p}) \leq w(p) + \frac{\varepsilon \mu}{20000 (|T_1| + |T_2|)}$. This implies $|\widetilde{\pi}|_w \leq (1+\varepsilon) |\pi|_w$ because $|\widetilde{\pi}| = |\pi|$. To be more precise, we consider $\widetilde{\pi}, \pi: [0,1] \rightarrow P$ to be parametrized such that $d_1(\mathfrak{s},\widetilde{\pi}(t)) = d_1(\mathfrak{s},\widetilde{\pi}(t)) = t d_1( \mathfrak{s}, \mathfrak{t})$. We obtain, $||(\widetilde{\pi})'(t)||_1 =d_1(\mathfrak{s},\mathfrak{t})= ||(\pi)'(t)||_1$ for all $t \in [0,1]$.
\end{minipage}
\begin{minipage}{0.2\linewidth}
\begin{center}
\includegraphics[height=3cm]{constructionEidetildePiCase1}
\end{center}
\end{minipage}\vspace*{0.5ex}
Furthermore, the above implies $w(\widetilde{\pi}(t)) \leq w(\pi(t)) + \frac{\varepsilon \mu}{20000 (|T_1| + |T_2|)}$ $(\star)$. Thus:
\begin{eqnarray*}
|\widetilde{\pi}|_w & = & \int_{0}^1 w (\widetilde{\pi}(t)) || (\widetilde{\pi})'(t)||_1\ dt \stackrel{(\star)}{\leq} \int_{0}^1 \left( w(\pi(t)) + \frac{\varepsilon \mu}{20000 (|T_1| + |T_2|)} \right) || (\pi)'(t)||_1\ dt\\
& = & \int_{0}^1 w(\pi(t)) || (\pi)'(t)||_1\ dt + \frac{\varepsilon \mu \int_{0}^1 1 \ || (\pi)'(t)||_1\ dt}{20000 (|T_1| + |T_2|)}\\
& =& |\pi|_w + \frac{\varepsilon \mu}{20000} \stackrel{\textit{Lemma~\ref{lem:lowerBoundForSummedFDcase1}}}{\leq} |\pi|_w + \varepsilon |\pi|_w = (1+\varepsilon) |\pi|_w.
\end{eqnarray*}
\end{proof}
\subsection{Analysis of Case B}\label{subsubsec:anaG2}
In this subsection, we assume that there is a monotone low path $\pi$ between $\mathfrak{s}$ and $\mathfrak{t}$.
First, we make a key observation that is also of independent interest. It states that a shortest path (that is not necessarily low) inside a parameter cell is uniquely determined by its monotone free space axis.
\begin{lemma}\label{lem:key}
Let $C$ be an arbitrarily chosen parameter cell and $a, b \in C$ such that $a \leq_{xy} b$. Furthermore, let $\ell$ be the monotone free space axis of $C$ and $R$ the rectangle that is induced by $a$ and $b$. The shortest path $\pi_{ab} \subset C$ between $a$ and $b$ is given as:
\begin{itemize}
\item $ac_1 \cup c_1c_2 \cup c_2b$, if $\ell$ intersects $R$ in $c_1$ and $c_2$ such that $c_1 <_{xy} c_2$ and as
\item $ac \cup cb$, otherwise, where $c$ is defined as the closest point from $R$ to $\ell$.
\end{itemize}
\begin{figure}
\caption{A shortest weighted $xy$-monotone path between two points $a$ and $b$ with $a \leq_{xy}
\label{fig:smallerWeightProjection}
\end{figure}
\end{lemma}
\begin{proof} Let $\psi_{ab} \subset C$ by an arbitrary monotone path that connects $a$ and $b$. In the following, we show that $|\pi_{ab}|_w \leq |\psi_{ab}|_w$. For this, we prove the following: Let $p \in C$ be chosen arbitrarily and $q$ be its orthogonal projection onto $\ell$ (see Figure~\ref{fig:smallerWeightProjection}(b)). We show $w(r) \leq w(p)$ for $r \in pq$. This implies that there is an injective, continuous function $\bot: \psi_{ab} \rightarrow \pi_{ab}$ with $w(\bot(p)) \leq w(p)$ for all~$p \in \psi$. In particular, $\bot(p)$ is defined as the intersection point of $\pi_{ab}$ and the line $d$ that lies perpendicular to $\ell$ such that $p \in d$. The function $\bot(\cdot)$ is well defined and injective as both $\psi_{ab}$ and $\pi_{ab}$ are monotone paths that connect $a$ and $b$. Similarly, as in the proof of Lemma~\ref{lem:apprQualityG1}, this implies $|\pi_{ab}|_w \leq |\psi_{ab}|_w$ because $|\pi_{ab}| = |\psi_{ab}|$.
To be more precise, consider $\psi, \pi: [0,1] \rightarrow C$ to be parametrized such that $d_1(a,\psi(t)) = d_1(a,\pi(t)) = td_1(a, b)$. This implies $||(\psi)'(t)||_1 =d_1(a,b)= ||(\pi)'(t)||_1$ for all $t \in [0,1]$. Thus:
\begin{eqnarray*}
|\psi_{ab}|_w & = & \int_{0}^1 w (\psi_{ab}(t)) || (\psi_{ab})'(t)||_1\ dt \geq \int_{0}^1 w (\bot (\psi_{ab}(t))) || (\pi_{ab})'(t)||_1\ dt\\
& =& \int_{0}^1 w (\pi_{ab}(t)) || (\pi_{ab})'(t)||_1\ dt = | \pi_{ab}|_w.
\end{eqnarray*}
Finally, we show: $w(r) \leq w(p)$, for $r \in pq$. Note that $w(r)$ and $w(p)$ are the leash lengths for $r$ and $p$ that lie on the boundary of the white space inside $C$, i.e., on the boundary of the ellipses $\mathcal{E}_{r}$ and~$\mathcal{E}_p$, resp. (see Figure~\ref{fig:smallerWeightProjection}). Since $r \in pq$ we get $\mathcal{E}_r \subseteq \mathcal{E}_p$, which implies $w(r) \leq w(p)$.
\end{proof}
We call a point $p \in C$ \emph{canonical} if $p \in \ell$. Let $C_o$ and $C_p$ be two parameter cells that share a parameter edge $e$. Furthermore, let $o \in \ell_0 \subset C_o$ and $p \in \ell_p \subset C_p$ be two canonical parameter points such that $o \leq_{xy} p$ where $\ell_o$ and $\ell_p$ are the monotone free space axis of $C_o$ and $C_p$, respectively. Let $c_o$ be the top-right end point of $\ell_o$ and $c_p$ the bottom-left end point of $\ell_p$. The following lemma is based on Lemma~\ref{lem:key} and characterizes how a shortest path passes through the parameter edges.\\
\noindent\begin{minipage}{0.5\linewidth}\vspace*{0.5ex}
\begin{lemma}\label{lem:canonicalOneVertex} If $c_o,c_p \in e$ and $c_o \leq_{xy} c_p$, $\pi_{op}$ is equal to the concatenation of the segments $oc_o$, $c_oc_p$, and $c_pp$ (see figure~(a) on right). Otherwise, there is a $z \in e$ such that $\pi_{op}$ is equal to the concatenation of the segments $oz_o$, $z_oz_p$, and $z_pp$, where $z_o \in \ell_{C_o}$ and $z_p \in C_p$ such that $z$ is the orthogonal projection of $z_o$ and $z_p$ onto $e$ (see figure~(b)).
\end{lemma}
\end{minipage}
\begin{minipage}{0.4\linewidth}
\begin{center}
\begin{tabular}{ccccccc}
\includegraphics[height=2.8cm]{crossingParameterEdgeA.pdf} & &
\includegraphics[height=2.8cm]{crossingParameterEdgeB.pdf}&&\\
{\small (a) $\pi_{op}$ for $o \leq_{xy} p$} & &
{\small (b) $\pi_{op}$ for $o \nleq_{xy} p$}&&
\end{tabular}
\end{center}
\end{minipage}
\\
\noindent {\bf Outline of the analysis of Case B: }
In the following, we apply Lemmas~\ref{lem:key} and~\ref{lem:canonicalOneVertex} to subpaths $\pi_{ab}$ of $\pi$ in order to ensure that~$\pi_{ab}$ is a subset of the union of a constant number of balls (that are approximated by grid balls in our approach) and monotone free space axes. In particular, we construct a discrete sequence of points from $\pi$ which lie on the free space axes, see Subsection~\ref{subsec:Sep}.
For each induced subpath~$\pi_{ab}$, we ensure that $\pi_{ab}$ crosses one or two perpendicular parameter edges. For the analysis we distinguish between the two cases which we consider separately:\\
{\bf Case 1:} $\pi_{ab}$ crosses one parameter edge and
{\bf Case 2:} $\pi_{ab}$ crosses two parameter edges.
\begin{figure}
\caption{Three different subcases in which we ensure, differently, that we capture a subpath $\pi_{ab}
\label{fig:captureTheSubpath}
\end{figure}
For Case 1, we show that, if $\pi_{ab}$ crosses one edge ($e$) then $\pi_{ab}$ is a subset of the union of the two monotone free space axes of the parameter cells that share $e$ and the ball $B_{62w(u)}(u)$ for $u := \arg \min_{p \in e}w(u)$ (see Figure~\ref{fig:captureTheSubpath}(a) and Subsections~\ref{subsec:anaOneCrossing}).
For Case 2, (see Subsection~\ref{subsec:anaTwoCrossing}), we consider the case that $\pi_{ab}$ crosses two parameter edges~$e_1$ and~$e_2$. In particular, $\pi_{ab}$ runs through three parameter cells $C_q$, $C_r$, and $C_s$, where $C_q$ and~$C_r$ share $e_1$ and~$C_r$ and $C_s$ share $e_2$.
We further distinguish further between two subcases. For this, let $u_1 := \arg \min_{p \in e_1} w(p)$ and $u_2 := \arg \min_{p \in e_2} w(p)$. \\
{\bf Case 2.1:} We show that, if $d_1(u_1,u_2) \geq 6 \max \{ w(u_1),w(u_2) \}$, then $\pi_{ab}$ is a subset of the union of the balls $B_{62w(u_1)}(u_1)$ and $B_{62w(u_2)}(u_2)$ and the monotone free space axes of $C_q$, $C_r$, and $C_s$ (see Figure~\ref{fig:captureTheSubpath}(b) and Lemma~\ref{lem:shortestPathOneCrossing}).\\
{\bf Case 2.2:} We show that, if $d_1(u_1,u_2) \leq 6 \max \{ w(u_1),w(u_2) \}$, then $\pi_{ab}$ is a subset of the union of the ball $B_{62w(u)(u)}$ and the monotone free space axes of $C_q$ and $C_s$ (see Figure~\ref{fig:captureTheSubpath}(c) and Lemma~\ref{lem:twoCrossingComplex}).
For the analysis of the length of a shortest path $\widetilde{\pi} \subset G_2$ that lies between $\mathfrak{s}$ and $\mathfrak{t}$, we construct for $\pi_{ab} \subset \pi$ a path $\widetilde{\pi}_{ab} \subset G_2$ between $a$ and $b$ such that $|\widetilde{\pi}_{ab}|_{w} \leq (1 + \varepsilon) |\pi_{ab}|_w$. In particular, $\widetilde{\pi}_{ab}$ is a subset of the grid balls that approximate the above considered balls and the free space axes that are involved in the individual (sub-)case for $\pi_{ab}$ (see, Figure~\ref{fig:captureTheSubpath}). Finally, we define $\widetilde{\pi} \subset G_2$ as the concatenation of the approximations $\widetilde{\pi}_{ab}$ for all $\pi_{ab}$.
\subsubsection{Separation of a shortest path}\label{subsec:Sep}
In the following, we determine a discrete sequence of canonical points $\mathfrak{s} = p_1,...,p_k = \mathfrak{t} \in \pi$ such that $\pi_{p_ip_{i+1}}$ crosses at most two parameter lines for each $i \in \{ 1,...,k-1 \}$. First we need the following supporting lemma:
\begin{lemma}\label{lem:tech}
For all $q_1, q_2 \in \pi$ that lie in the same parameter cell with $q_1 \leq_{xy} q_2$ we have $q_2.y-q_1.y - \frac{\mu}{50} \leq q_2.x-q_1.x \leq q_2.y-q_1.y + \frac{\mu}{50}$.
\end{lemma}
\begin{proof}
The triangle inequality implies:\\
$d_2(T_2(q_2.y),T_2(q_1.y)) \leq d_2(T_2(q_2.y),T_1(q_2.x)) + d_2(T_1(q_2.x),T_1(q_1.x)) + d_2(T_1(q_1.x),T_2(q_1.y))$. This implies $d_2(T_2(q_2.y),T_2(q_1.y)) - \frac{\mu}{50} \leq d_2(T_1(q_2.x),T_1(q_1.x))$,
because\\ $d_2(T_2(q_2.y),T_1(q_2.x)), d_2(T_1(q_1.x), T_2(q_1.x)) \leq \frac{\mu}{100}$. Furthermore, $d_2(T_2(q_2.y),T_2(q_1.y)) = q_2.y-q_1.y$ and $d_2(T_1(q_2.x), T_1(q_1.x)) = q_2.x - q_1.x$ because $q_1$ and $q_2$ lie in the same cell. This implies $q_2.y-q_1.y - \frac{\mu}{50} \leq q_2.x-q_1.x$. A corresponding argument yields $q_2.x-q_1.x \leq q_2.y-q_1.y + \frac{\mu}{50}$.
\end{proof}
\begin{lemma}\label{lem:separatingPoints}
There are canonical points $\mathfrak{s} = p_1,\dots p_k = \mathfrak{t} \in \pi$ such that for all $i \in \{ 1,\dots,k-1 \}$ the following holds: (P1) $\pi_{p_{i}p_{i+1}}$ crosses at most one vertical and at most one horizontal parameter line which are both not part of $\partial P$ and (P2) the distance of $p_i$ to a parameter line is lower-bounded by $\frac{\mu}{6}$ for all $i \in \{ 2,\dots,k-1 \}$.
\end{lemma}
\begin{proof} First, we give the construction of $p_2,\dots,p_{k-1}$. After that, we establish Properties (P1) and (P2), for each $i \in \{ 1,\dots,k-1 \}$.
\begin{itemize}
\item Construction of $p_2,\dots,p_{k-1}$: We construct $p_2,\dots,p_{k-1}$ iteratively with $p_1 := \mathfrak{s}$. Point $p_2$ is defined as the first point on $\pi$ such that $p.x = \frac{\mu}{2}$ or $p.y = \frac{\mu}{2}$. For $i \in \{ 2,\dots,k-3 \}$, let $p_1,\dots,p_i$ be defined, $c$ be the top-right corner of the parameter cell that contains $p_{i}$, and $u_1$ be the next intersection point of $\pi$ (behind $p_i$) with the parameter grid, see Figure~\ref{fig:constructionSequenceCanonicalPoints}. W.l.o.g., we assume that $u_1$ lies on a vertical parameter line.
If $d_1(c,u_1) \geq \frac{\mu}{2}$, we define $p_{i+1}$ as the first point on $\pi$ with $p_{i+1}.y = u_1.y + \frac{c.y-u_1.y}{2}$ or $p_{i+1}.x = u_1.x + \frac{\mu}{4}$, see Figure~\ref{fig:constructionSequenceCanonicalPoints}(a).
If $d_1(c,u_1) < \frac{\mu}{2}$, we consider the next intersection point $u_2$ of $\pi$ with a horizontal parameter line such that $u_1 \leq_{xy} u_2$. We define $p_{i+1}$ as the first point behind $u_2$ such that $p_{i+1}.y = u_2.y + \frac{\mu}{4}$.
\begin{figure}
\caption{The iterative construction of $p_{i+1}
\label{fig:constructionSequenceCanonicalPoints}
\end{figure}
\item (P1) and (P2): W.l.o.g., we assume $u.x = c.x$. For the configurations of $u.y = c.y$ a symmetric argument applies. Assume $p_{i-1}$ and $\pi_{p_{i-1}p_{i}}$ fulfil (P1) and (P2) for $i \in \{ 2,\dots, k-2 \}$. We show that $p_{i}$ fulfils (P1) and $\pi_{i_{i}i_{i+1}}$ (P2) for the two cases $d_1(c,u_1) \geq \frac{\mu}{2}$ and $d_1(c,u_1) < \frac{\mu}{2}$ separately. By induction it follows the statement of the lemma.
\begin{itemize}
\item $d_1(c,u_1) \geq \frac{\mu}{2}$:
\begin{itemize}
\item (P1): In both subcases $p_{i+1}.y = u_1.y + \frac{c.y-u_1.y}{2}$ or $p_{i+1}.x = u_1.x + \frac{\mu}{4}$ it follows that $p_{i+1}$ lies in the parameter cell $C_r$ that lies to the right of the parameter cell that contains $p_1$. In particular, in the first (second) subcase $p_{i+1}$ lies by construction in the same parameter row (column). As $\pi$ is monotone and $p_{i+1}$ is defined as the first point that fulfils one of the two constraints, $p_{i+1} \in C_r$. This implies (P1).
\item (P2): The above argument implies that the distances of $p_{i+1}$ to the right and the top parameter line are lower-bounded by $\frac{\mu}{2}$. If $p_{i+1}.y = u_1.y + \frac{c.y-u_1.y}{2}$, we have $p_{i+1}.y - u_1.y = \frac{c.y-u_1.y}{2} \geq \frac{\mu}{4}$. Thus, Lemma~\ref{lem:tech} implies $p_{i+1}.x-u_1.x \geq \frac{\mu}{4} - \frac{\mu}{50} \geq \frac{\mu}{6}$ which yields that the distance of $p_{i+1}$ to the left parameter line is lower-bounded by $\frac{\mu}{6}$. If $p_{i+1}.x = u_1.x + \frac{\mu}{4}$, we have $p_{i+1}.x - u_1.x = \frac{\mu}{4}$. Thus, Lemma~\ref{lem:tech} implies $p_{i+1}.y - u_1.y \geq \frac{\mu}{4} - \frac{\mu}{50} \geq \frac{\mu}{6}$ which yields that the distance of $p_{i+1}$ to the bottom parameter line is lower-bounded by $\frac{\mu}{6}$. Hence, (P2) is fulfilled.
\end{itemize}
\item $d_1(c,u_1) < \frac{\mu}{2}$:
\begin{itemize}
\item (P1): Assume $\pi$ crosses another vertical parameter line in a point $u$ that lies before~$u_2$. This implies, $u.x - u_1.x \geq \mu$ and $u.y - u_1.y \leq \frac{\mu}{2}$. Hence, $u.x - u_1.x \geq 2 (u.y - u_1.y)$ which is a contradiction to Lemma~\ref{lem:tech}. Thus, (P1) is fulfilled.
\item (P2): The construction of $u_2$ implies that the distances to the bottom and to the top parameter line is lower-bounded by $\frac{\mu}{4},\frac{3\mu}{4} \geq \frac{\mu}{6}$. Finally, we lower-bound the distances of $p_{i+1}$ to the left and to the right parameter line as follows: By combining $u_2.y-u_1.y \leq \frac{\mu}{2}$ and Lemma~\ref{lem:tech} we obtain $c.x \leq u_2.x \leq c.x + \frac{\mu}{2}+\frac{\mu}{50}$. Another application of Lemma~\ref{lem:tech}, combined with $p_{i+1}.y = u_2.y + \frac{\mu}{4}$ leads to $c.x + \frac{\mu}{4}-\frac{\mu}{50} \leq p_{i+1}.x \leq c.x + \frac{\mu}{2}+\frac{\mu}{50} + \frac{\mu}{4} + \frac{\mu}{50}$. Thus, the distances of $p_{i+1}$ to the left and to the right parameter line are lower-bounded by $\frac{\mu}{6}$. Hence, (P2) is fulfilled.
\end{itemize}
\end{itemize}
\end{itemize}
\end{proof}
\subsubsection{Analysis of subpaths that cross one parameter edge}\label{subsec:anaOneCrossing}
We need to show that those parts of $\pi$ that do not lie on the free space axes are covered by the balls~$B_{62w(u)}$. For this, we use the following geometrical interpretation of the free space axes $\ell$ and $\hbar$ of a parameter cell $C$. Let $t_1 \in T_1$ and $t_2 \in T_2$ be the segments that correspond to $C$. We denote the angular bisectors of $t_1$ and $t_2$ by $d_{\ell}$ and $d_{\hbar}$ such that the start points $t_1(0)$ and $t_2(0)$ of $t_1$ and $t_2$ lie on different sides w.r.t. $\ell$, see Figure~\ref{fig:dual}(b). If $t_1$ and~$t_2$ are parallel, $d_{\ell}$ denotes the line between $t_1$ and $t_2$ and we declare $d_{\hbar}$ as undefined\footnote{There is a corresponding definition of $\hbar$ in the case of $t_1 \parallel t_2$. However, considering $\hbar$ for $t_1 \parallel t_2$ would unnecessarily complicate the presentation because $\hbar$ is not required.}. We observe (see Figure~\ref{fig:dual}):
\begin{observation}\label{obs:dual}
$q \in \ell \Leftrightarrow T_1(q.x)T_2(q.y) \bot d_{\ell}$ and $p \in \hbar \Leftrightarrow T_1(p.x)T_2(p.y) \bot d_{\hbar}$ .
\end{observation}
\begin{figure}
\caption{Duality of parameter points from $\ell$ ($\hbar$) and leashes that lie perpendicular to $d_{\ell}
\label{fig:dual}
\end{figure}
From now on, let $o,p \in \pi$ be two consecutive, canonical points that are given via Lemma~\ref{lem:separatingPoints} such that $o \leq_{xy} p$. Furthermore, let $\ell_o$ and $\ell_p$ be the free space axes of the parameter cells~$C_o$ and $C_p$ such that $o \in \ell_o \subset C_o$ and $p \in \ell_p \subset C_p$.
\begin{lemma}\label{lem:oneCrossing}
If $\pi_{op}$ crosses one parameter edge $e$, $c_o,c_p \in e$ exist and we have $d_{\infty}(c_0,c_p) \leq \frac{w(u)}{2}$ where $u = \arg \min_{p \in e} w(p)$.
\end{lemma}
\begin{proof} W.l.o.g., we assume that $e$ is horizontal. Let $t_1,t_2 \in T_1$ and $t_3 \in T_2$ be the segments that induce parameter cells $C_o$ and $C_p$. First, we show $\angle (t_1,t_3), \angle (t_2,t_3) \leq 7^{\circ}$ and, then, that $d_1(c_0,c_p) \leq w(u)$. Let $q_1 \in \ell_o$ and $q_2 \in \ell_p$ such that $q_1.x = c_p$ and $q_2.x = c_o$, see Figure~\ref{fig:oneCrossing}(a). $\angle (t_1,t_3) \leq 7^{\circ}$ implies $\angle (T_1(u.x)T_2(u.y), T_1(u.x)T_2(q_2.y))\leq 3.5^{\circ}$. Furthermore, $c_p = e \cap \ell_p$ implies: $c_p$ corresponds to a leash $l_p = (T_1(c_p.x), T_2(c_p.y))$ such that $T_1(c_p.x) = T_1(u.x)$ and $T_1(c_p.x), T_2(c_p.y) \bot d_{\ell_o}$, see Figure~\ref{fig:oneCrossing}(b). Thus, $d_2(T_2(q_2.y),T_2(u.y))$ is upper-bounded by $d_2(T_2(u.y), T_2(q_2.y))$ which is upper-bounded by $d_2(T_1(u.x),T_2(u.y))\tan (3.5^{\circ}) \leq 0.065 w(u) < \frac{w(u)}{2}$.
\begin{figure}
\caption{Configuration of the Lemmas~\ref{lem:oneCrossing}
\label{fig:oneCrossing}
\end{figure}
Finally, we show $\angle (t_1,t_3), \angle (t_2,t_3) \leq 7^{\circ}$: We have $d_2(T_1(o.x), T_2(o.y)), d_2(T_1(u.x), T_2(u.x)) \leq \frac{\mu}{100}$ because $\pi$ is low. Lemma~\ref{lem:separatingPoints} implies $d_2(T_1(o.x),T_1(u.x)), d_2(T_2(o.y), T_2(u.y)) \geq \frac{\mu}{6}$. Thus, $\angle (t_1,t_3) \leq \arcsin \frac{6}{50} \leq 7^{\circ}$. A similar argument implies that $\angle (t_2,t_3) \leq \arcsin \frac{6}{50} \leq 7^{\circ}$
\end{proof}
\begin{lemma}\label{lem:shortestPathOneCrossing}
$\pi_{op} \subset \ell_o \cup B_{w(u)}(u) \cup \ell_p$ (see Figure~\ref{fig:captureTheSubpath}(a)).
\end{lemma}
\begin{proof}
We combine Lemmas~\ref{lem:canonicalOneVertex} and~\ref{lem:oneCrossing}. Lemma~\ref{lem:canonicalOneVertex} implies that $\pi_{op}$ orthogonally crosses $e$ at a point $z$ that lies between $c_o$ and $c_p$ such that $z \in z_oz_p \subset \pi_{op}$. Lemma~\ref{lem:oneCrossing} implies $d_1(c_o,c_p)\leq\frac{w(u)}{2}$. Thus, $z_oz_p \subset B_{w(u)}(u)$. Furthermore, $oz_o \subset \ell_o$ and $z_pp \subset \ell_p$. This implies $\pi_{op} \subset \ell_o \cup B_{w(u)}(u) \cup \ell_p$ because $\pi_{op} = oz_o \cup z_oz_p \cup z_pp$.
\end{proof}
\begin{lemma}\label{lem:shortestPathOneCrossingAppr}
There is a path $\widetilde{\pi}_{op} \subset G_2$ between $o$ and $p$ such that $|\widetilde{\pi}_{op}|_w \leq (1+\varepsilon) |\pi_{op}|_w$.
\end{lemma}
\begin{proof}: By Lemma~\ref{lem:shortestPathOneCrossing}, the two following intersection points are well defined: Let $z_o$ be the intersection point of $\ell_o$ and $\partial B_{62w(u)}(u)$ which lies on the left or bottom edge of $\partial B_{62w(u)}(u)$. Analogously, let $z_p$ be the intersection point of $\ell_p$ and $\partial B_{62w(u)}(u)$ which lies on the right or top edge of $\partial B_{62w(u)}(u)$. By Lemma~\ref{lem:shortestPathOneCrossing}, we can subdivide $\pi_{op}$ into the three pieces $oz_o \subset \ell_o$, $\pi_{z_oz_p}$, and $z_pp \subset \ell$. As $oz_o,z_pp \subset G_2$, we just have to construct a path $\widetilde{\pi}_{z_oz_p} \subset G_2$ between~$z_o$ and $z_p$ such that $|\pi_{z_oz_p}|_w \leq (1 + \varepsilon) |\widetilde{\pi}_{z_oz_p}|_w$.
We construct $\widetilde{\pi}_{z_oz_p}$ by applying the same approach as used in the proof of Lemma~\ref{lem:apprQualityG1}, see Figure~\ref{fig:captureTheSubpath}(a).
To upper-bound $|\widetilde{\pi}_{z_oz_p}|_w$ by $(1+\varepsilon)|\pi_{z_oz_p}|_w$ we first lower-bound $|\pi_{z_oz_p}|_w$ by $\frac{1}{2}w^2(u)$. Then, we apply an approach that is similar to the approach used in the proof of Lemma~\ref{lem:apprQualityG1}
\begin{itemize}
\item $|\pi_{z_oz_p}|_w \geq \frac{1}{2}w^2(u)$: Let $\psi := \pi_{z_oz_p} \cap B_{w(u)}(u)$. As $|\psi| \geq w(u)$ and $w(\cdot)$ is $1$-Lipschitz, we obtain $|\psi|_w \geq \frac{1}{2}w^2(u)$. This implies $|\pi_{z_oz_p}|_w \geq \frac{1}{2}w^2(u)$ because~$\psi \subset \pi_{z_oz_p}$.
\item $|\widetilde{\pi}_{z_oz_p}|_w \leq (1+\varepsilon) |\pi_{z_oz_p}|_w$: We observe that $|\pi_{z_oz_p}| \leq 114 w(u)$ $(\ddagger)$ because $\pi_{z_oz_p}$ is monotone and $\pi_{z_oz_p} \subset B_{62w(u)}(u)$. Furthermore, we parametrize $\widetilde{\pi}_{z_oz_p}, \pi_{z_oz_p}: [0,1] \rightarrow P$ such that $d_1(z_o, \widetilde{\pi}_{z_oz_p}) = d_1(z_o,\pi_{z_oz_p})$. This implies $w(\widetilde{\pi}_{z_oz_p}(t)) \leq w(\pi_{z_oz_p}(t)) + \frac{\varepsilon w(u)}{228}$ $(\dagger)$ and $||(\widetilde{\pi}_{z_oz_p})'(t)||_1 = ||(\pi_{z_oz_p})'(t)||_1$ $(\star)$ for all $t \in [0,1]$. Thus:
\begin{eqnarray*}
|\widetilde{\pi}_{z_oz_p}|_w & = & \int_0^1 w(\widetilde{\pi}_{z_oz_p}(t)) ||(\widetilde{\pi}_{z_oz_p})'(t)||_1 \ dt \\
& \stackrel{(\dagger) + (\star)}{\leq} & \int_0^1 w(\pi_{z_oz_p})||(\pi_{z_oz_p})'(t)||_1 \ dt + \frac{\varepsilon w(u)}{228}\int_0^1 ||(\pi_{z_oz_p})'(t)||_1 \ dt\\
& \stackrel{(\ddagger)}{\leq} & |\pi_{z_oz_p}|_w + \frac{\varepsilon}{2}w^2(u)\\
& \stackrel{|\pi_{z_oz_p}|_w \geq \frac{1}{2}w^2(u)}{\leq}& (1+\varepsilon)|\pi_{z_oz_p}|_w.
\end{eqnarray*}
\end{itemize}
\end{proof}
\subsubsection{Analysis of subpaths that cross two parameter edges}\label{subsec:anaTwoCrossing}
Let $q$ and $s$ be two consecutive parameter points from $\{ p_2,\dots,p_{k-1} \}$ such that $\pi_{qs}$ crosses two parameter edges $e_1$ and $e_2$. By Lemma~\ref{lem:separatingPoints}, $e_1$ and $e_2$ are perpendicular to each other and are adjacent at a point $c$. Let $C_r$ be the parameter cell such that $e_1$ and $e_2$ are part of the boundary of $C_r$. Furthermore, let $C_q$ and $C_s$ be the parameter cells such that $q \in C_q$ and $s \in C_{s}$. We denote the monotone free space axis of $C_q$, $C_r$, and $C_s$ by $\ell_q$, $\ell_r$, and $\ell_s$, respectively. Let $u_1 := \arg \min_{a \in e_1}w(a)$ and $u_2 := \arg \min_{a \in e_2}w(a)$.
\begin{lemma}\label{lem:twoCrossing}
If $d_1(u_1,u_2) \geq 6 \max \{ w(u_1), w(u_2) \}$, there is another canonical parameter point $r \in \ell_r$ such that $\pi_{qs} \subset \ell_{q} \cup B_{w(u_1)}(u_1) \cup \ell_{r} \cup B_{w(u_2)}(u_2) \cup \ell_s$.
\begin{figure}
\caption{Configuration of Lemma~\ref{lem:twoCrossing}
\label{fig:twoCrossing}
\end{figure}
\end{lemma}
\begin{proof}
W.l.o.g., assume that $\pi_{qs}$ crosses first a vertical parameter edge. Let $t_1,t_2 \in T_1$ and $t_3,t_4 \in T_2$ be the segments that induce parameter cells $C_q$, $C_r$, and $C_s$, see Figure~\ref{fig:twoCrossing}. Let $c_q$ and $c_r^2$ be the top-right end points of $\ell_q$ and $\ell_r$, respectively. Let $c_r^2$ and $c_s$ be the bottom-left end points of $\ell_r$ and $\ell_s$, respectively (see Figure~\ref{fig:twoCrossing}(a)). Let $a_1,a_2 \in \ell_r$ such that $c_q.y = a_1.y$ and $a_2.x = c_s.x$. Furthermore, let $b_1 \in \ell_q$ and $b_2 \in \ell_s$ such that $b_1.y = c_r^1$ and $c_r^2.x = b_2.x$. In the following, we show that $qb_1, a_1a_2, b_2s \subset \pi_{qs}$, $b_1,a_1 \in B_{w(u_1)}(u_1)$, and $b_2,a_2 \in B_{w(u_2)}(u_2)$. This implies $\pi_{qs} \subset \ell_{q} \cup B_{w(u_1)}(u_1) \cup \ell_{r} \cup B_{w(u_2)}(u_2) \cup \ell_s$ and concludes the proof.
Below, we show $\angle (t_2,t_3) \leq 42^{\circ}$. Then, a similar argument as used in Lemma~\ref{lem:oneCrossing} implies $d_1(c_r^1,u_1),d_1(c_q,u_1) \leq \frac{w(u_1)}{2}$ and $d_1(c_s,u_2),d_1(c_r^2,u_2) \leq \frac{w(u_1)}{2}$.
Finally, we show $\angle (t_2,t_3) \leq 42^{\circ}$: $d_1(u_1,u_2) \geq 6 \max \{ w(u_1), w(u_2) \}$ implies that $d_2(T_1(u_1.x),T_2(u_1.y))$ and $ d_2(T_1(u_2.x),T_2(u_2.y))$ are upper-bounded by $3 \min \{ d_2(T_1(u_1.x), T_1(u_2.x)), d_2(T_2(u_1.y), T_2(u_2.y)) \}$. Thus, we obtain $\angle (t_2,t_3) \leq \arcsin (\frac{2}{3}) < 42^{\circ}$.
\end{proof}
\begin{lemma}\label{lem:shortestPathTwoCrossingApprSimplie}
If $d_1(u_1,u_2) \geq 6 \max \{ w(u_1), w(u_2) \}$, there is a path $\widetilde{\pi}_{qs} \subset G_2$ between $q$ and $s$ such that $|\widetilde{\pi}_{qs}|_w \leq (1+\varepsilon) |\pi_{oqs}|_w$.
\end{lemma}
\begin{proof}
Lemma~\ref{lem:twoCrossing} implies that the following constructions are unique and well defined: Let $z_1$ ($z_2$) be the intersection point of $\partial B_{w(u_1)}(u_1)$ and $\ell_q$ ($\ell_r$) that lies on the left or bottom (respectively, right or top) edge of $\partial B_{w(u_1)}(u_1)$. Analogously, let $z_3$ ($z_4$) be the intersection point of $\partial B_{w(u_2)}(u_2)$ and $\ell_r$ ($\ell_s$) that lies on the left or bottom (respectively, right or top) edge of $\partial B_{w(u_2)}(u_2)$.
By applying the approach of Lemma~\ref{lem:shortestPathOneCrossingAppr}, for $\pi_{z_1z_2}$ and $\pi_{z_3z_4}$, we obtain a path $\widetilde{\pi}_{z_1z_2} \subset G_2$ between $z_1$ and $z_2$ and a path $\widetilde{\pi}_{z_3z_4} \subset G_2$ between $z_3$ and $z_4$ such that $|\widetilde{\pi}_{z_1z_2}|_w \leq (1 + \varepsilon) |\pi_{z_1z_2}|_w$ and $|\widetilde{\pi}_{z_3z_4}|_w \leq (1 + \varepsilon) |\pi_{z_3z_4}|_w$.
This concludes the proof because $qz_1, z_2z_3, z_4s \subset G_2$.
\end{proof}
\begin{lemma}\label{lem:twoCrossingComplex}
If $d_1(u_1,u_2) \leq 6 \max \{ w(u_1), w(u_2) \}$, we have $\pi_{qs} \subset \ell_{q} \cup B_{62 w(u) \}}(u) \cup \ell_s$ where $u := \arg \max_{u \in \{ u_1,u_2 \} } \{ w(u_1), w(u_2) \}$.
\end{lemma}
\begin{proof} Let $a \in \pi_{qs}$ be the last point that lies on $\ell_q$, i.e., there is no point $d \in \pi \cap \ell_q \setminus \{ a \}$ such that $a \leq_{xy} d$, see Figure~\ref{fig:twoCrossingComplex}(b). In the following, we show $d_1(a,c) \leq 56 \max \{ w(u_1), w(u_2) \}$. Analogously, we construct the first point $b \in \pi_{qs}$ that lies on $\ell_s$, i.e., there is no point $d \in \pi \cap \ell_s \setminus \{ b \}$, see Figure~\ref{fig:twoCrossingComplex}(b). A similar argument as above implies $d_1 (b,c) \leq 56 \max \{ w(u_1),w(u_2) \}$. The triangle inequality implies $d_1(d,u) \leq d_1(d,c)+d_1(c,u) \leq 62 \max \{ w(u_1),w(u_2) \}$ for all $d \in \pi_{ab}$ and $u \in \{ u_1,u_2 \}$. This concludes the proof.
For the sake of contradiction we assume $d_1(a,c) \geq 56 \max \{ w(u_1), w(u_2) \}$. Lemma~\ref{lem:key} implies that $\pi_{ab}$ crosses the boundary $\partial C_q$ of $C_q$ in the orthogonal projection $a_1$ of $a$ onto the top edge of $\partial C_q$ or in the orthogonal projection $a_2$ of $a$ onto the right edge of $\partial C_q$, see Figure~\ref{fig:twoCrossingComplex}(b). Thus, even $aa_1 \subset \pi_{qs}$ or $aa_2 \subset \pi_{ab}$ because $\pi_{ab}$ is monotone. In the following, we show $|aa_1|_w, |aa_2|_w \geq 0.4874 \lambda^2 (\max \{ w(u_1),w(u_2)\})^2$ where $\lambda \geq 0$ such that $ d_1(u_1,a) = \lambda \max \{ w(u_1), w(u_2) \}$. This implies $|\pi_{ab}|_w \geq 0.4874 \lambda^2 \max^2 \{ w(u_1), w(u_2) \}$.
Furthermore, we construct another path $\widetilde{\pi}_{ab}$ connecting $a$ and $b$ such that $|\widetilde{\pi}_{ab}|_w < ( 4 \lambda (1.01 + 0.09 \lambda) + 114) (\max \{ w(u_1), w(u_2)\})^2$. Additionally, we show $\lambda \geq 50$. This is a contradiction to the fact that $\pi_{ab}$ is a shortest path between $a$ and $b$:
\begin{alignat*}{4}
&|\pi_{ab}|_w&\leq&|\widetilde{\pi}_{ab}|_w\\
\Rightarrow & 0.4874 \lambda^2 (\max \{ w(u_1), w(u_2) \})^2 & \leq & ( 4 \lambda (1.01 + 0.09 \lambda) + 114) (\max \{ w(u_1), w(u_2)\})^2\\
\Leftrightarrow & 0.4874 \lambda^2 & \leq & \lambda^2 (\frac{4.04}{\lambda} + 0.36 + \frac{114}{\lambda^2})\\
\stackrel{\lambda \geq 50}{\Rightarrow} & 0.4874 & \leq & 0.0808 + 0.36 + 0.0456 = 0.4864
\end{alignat*}
\begin{figure}
\caption{Two paths $\widetilde{\pi}
\label{fig:twoCrossingComplex}
\end{figure}
\begin{itemize}
\item Construction of $\widetilde{\pi}_{ab}$: Let $c_q$ be the top-right end point of $\ell_q$ and $c_s$ the bottom-left end point of $\ell_s$, see Figure~\ref{fig:twoCrossingComplex}(d). We define $\widetilde{\pi}_{qs} := ac_q \cup c_qc \cup cc_s \cup c_sb$.
\item Upper bound for $|\widetilde{\pi}_{ab}|_w$: First of all we show $d_1(a,c_q) \leq 2 \lambda \max \{w(u_1),w(u_2) \}$. After that we show $w(d) \leq (1.01 + 0.09 \lambda) \max \{ w(u_1), w(u_2) \}$ for all $d \in a c_q$. This implies $|\widetilde{\pi}_{ac_q}|_w \leq 2 \lambda (1.01 + 0.09 \lambda) (\max \{w (u_1),w(u_2) \})^2$. A similar argument implies $|\widetilde{\pi}_{c_sb}|_w \leq 2 \lambda (1.01 + 0.09 \lambda) (\max \{ w(u_1),w (u_2) \})^2$. Furthermore, we show $d_1(c_q, c) \leq 7 \max \{ w(u_1),w (u_2) \}$ and $w(d) \leq 8.01 \max \{ w(u_1), w(u_2)\}$ for all $d \in c_q c$. This implies $|\widetilde{\pi}_{c_qc}|_2 \leq 57 (\max \{w(u_1),w(u_2)\})^2$. A similar argument implies $|\widetilde{\pi}_{cc_s}|_w \leq 57 (\max \{ w(u_1),w(u_2) \})^2$. Finally we upper bound
\begin{eqnarray*}
|\widetilde{\pi}_{ab}|_ w & = & |\widetilde{\pi}_{ac_q}|_ w + |\widetilde{\pi}_{c_qc}|_ w + |\widetilde{\pi}_{cc_s}|_ w + |\widetilde{\pi}_{c_sb}|_ w\\
&\leq& ( 4 \lambda (1.01 + 0.09 \lambda) + 114) (\max \{ w(u_1), w(u_2)\})^2.
\end{eqnarray*}
\begin{itemize}
\item $d_1(a,c_q) \leq 2 \lambda \max \{w(u_1),w(u_2) \}$: $d_1(u_1,a) = \lambda \max \{ w(u_1),w(u_2) \}$ implies $d_1(a,e_1) \leq \lambda \max \{ w(u_1),w(u_2) \}$. As the gradient of $\ell_q$ is $1$ and $c_q \in \ell_q$, we have $d_1(a,c_q) \leq 2 \lambda \max \{ w(u_1), w(u_2) \}$.
\item $w(d) \leq (1.01 + 0.09 \lambda) w(u_1)$ for all $d \in a c_q$: First we show $\angle (t_1,t_3) \leq 10^{\circ}$: By Lemma~\ref{lem:separatingPoints} we know $d_1(q,e_1) \geq \frac{\mu}{6}$. Let $d := \pi_{ab} \cap e_1$. This implies $d_1(q,d)\geq \frac{\mu}{2}$. Furthermore, we have $w(q),w(d) \leq \frac{\mu}{100}$ because $\pi$ is low. This implies $\angle (t_1,t_3) \leq \arcsin (\frac{\mu}{100}/\frac{2 \mu}{6}) \leq 10^{\circ}$. Thus we have $\angle (t_1,d_{\ell_q}), \angle (t_3,d_{\ell_q}) \leq 5^{\circ}$. This implies $w(c_q) = d_2(T_1(c_q.x), T_2(c_q.y)) \leq \frac{1}{\cos (5^{\circ})} d_2(T_1(u_1.x), T_2(u_1.y)) \leq 1.01 w(u_1)$. As $\angle (t_1, d_{\ell_q}), \angle (t_3,d_{\ell}) \leq 5^{\circ}$, we get $w(d) \leq w(c_q) + 2 \sin (5^{\circ}) \lambda \max \{ w(u_1), w(u_2) \} \leq (1.01 + 0.09 \lambda) \max \{ w(u_1), w(u_2) \}$.
\item $d_1(c_q,c) \leq 7 \max \{ w(u_1), w(u_2) \}$: By $d_1(u_1,u_2) \leq 6 \max \{ w(u_1), w(u_2) \}$ it follows that $d_1(u_1,c) \leq 6 \max \{ w(u_1), w(u_2) \}$ holds. Furthermore, we have $d_1(u_1,c_q) \leq \sin (5^{\circ}) w(u_1)$ because $\angle (d_{\ell_q},t_1),\angle (d_{\ell_1}, t_3) \leq 5^{\circ}$. The triangle inequality implies $d_1(c,c_q) \leq d_1(c,u_1) + d_1(u_1,c_q) \leq 7 \max \{ w(u_1), w(u_2) \}$.
\item $w(d) \leq 8.01 \max \{ w(u_1), w(u_2) \}$ for all $d \in c_qc$: Above we already showed $w(c_q) \leq 1.01 w(u_1) \leq 1.01 \max \{ w(u_1),w(u_2) \}$. By combining the $1$-Lipschitz continuity of $w(\cdot)$ and $d_1(c_q,c) \leq 7 \max \{ w(u_1), w(u_2) \}$ we obtain $w(d) \leq 8.01 \max \{ w(u_1), w(u_2) \}$.
\end{itemize}
\item $\lambda \geq 50$: Above we showed $d_1(c,u_1) \geq 6 \max \{ w(u_1), w(u_2) \}$. The triangle inequality implies $d_1(u_1,a) + d_1(u_1,c) \geq d_1(a,c) \Rightarrow \lambda \max \{ w(u_1), w(u_2) \} \geq (56 - 6) \max \{ w(u_1),w(u_2) \}$.
\item $|aa_1|_w, |aa_2|_w \geq 0.4874 \lambda^2 \max^2 \{ w(u_1),w(u_2) \}$: First we lower-bound $d_1(a,a_1), d_1(a,a_2) \geq 0.98 \lambda \max \{ w(u_1),w(u_2) \}$: Above we already showed $d_1(u_1,c_q) \leq 0.09 \max \{ w(u_1),w(u_2) \}$. Combining this with $d_1(u_1,a) = \lambda \max \{ w(u_1),w(u_2) \}$ and the triangle inequality yields $d_1(a,c_q) \geq (\lambda-0.09) \max \{ w(u_1), w(u_2) \}$. Thus $d_1(a,a_1), d_1(a,a_2) \geq 0.98 \lambda \max \{ w(u_1),w(u_2) \}$ because $\ell_q$ has a gradient of $1$ and $\lambda \geq 50$.
Above we already showed $w(a) \leq (1.01 + 0.09 \lambda) \max \{ w(u_1), w(u_2) \} \leq 0.12 \lambda \max \{ w(u_1),w(u_2) \}$ because $\lambda \geq 50$. This implies $w(a_1) \geq d_1(a,a_1) - w(a) \geq 0.86 \lambda \max \{ w(u_1),w(u_2) \}$ because $a.x = a_1.x$.
By combining $w(a_1) \geq 0.86 \lambda \max \{ w(u_1),w(u_2) \}$ and $d_1(a,a_1) \geq 0.98 \lambda \max \{ w(u_1),w(u_2) \}$ we get $|aa_1|_w \geq 0.4874 \lambda^2 (\max \{ w(u_1),w(u_2)\})^2$ as follows: Consider the subsegment $\overline{a}_1a_1 := B_{0.86 \lambda \max \{ w(u_1),w(u_2) \}}(a) \cap aa_1$. By $w(a_1) \geq 0.768 \lambda \max \{ w(u_1),w(u_2) \}$ it follows $w(d) \geq 0.98\lambda \max \{ w(u_1),w(u_2) \} - d_1(a_1,d)$ for all $d \in \overline{a}_1a_1$ because $w(\cdot)$ is $1$-Lipschitz. This implies $|aa_1|_w \geq (0.3698 + 0.1176)\lambda^2 (\max \{ w(u_1),w(u_2)\})^2 = 0.4874\lambda^2 (\max \{ w(u_1),w(u_2)\})^2$.
\end{itemize}
\end{proof}
Lemma~\ref{lem:twoCrossingComplex} implies that the approach taken in the proof of Lemma~\ref{lem:shortestPathOneCrossingAppr} yields that there is a path $\widetilde{\pi}_{qs} \subset G_2$ between $q$ and $s$ such that $|\widetilde{\pi}_{qs}|_w \leq (1+\varepsilon) |\pi_{oqs}|_w$ If $d_1(u_1,u_2) < 6 \max \{ w(u_1), w(u_2) \}$. Combining this with Lemmas~\ref{lem:shortestPathOneCrossingAppr} and~\ref{lem:shortestPathTwoCrossingApprSimplie} yields the following corollary:
\begin{corollary}\label{cor:apprC2}
Let $\widetilde{\pi} \subset G_2$ be a shortest path. We have $|\pi|_w \leq |\widetilde{\pi}|_w \leq (1+\varepsilon)|\pi|_w$.
\end{corollary}
\subsection{``Bringing it all together''}
In Subsections~\ref{subsubsec:anaG1} and~\ref{subsubsec:anaG2}, we showed that in both cases, Case A and B, the minimum of the shortest path lengths in $G_1$ and $G_2$ is upper-bounded by $(1+\varepsilon)|\pi|_w$, where $\pi_w$ is a shortest path in $P$.
Next, we discuss that our algorithm has a running time of $\mathcal{O}(\frac{\zeta^4 n^4}{\varepsilon})$. Graph $G_1$ is given by the arrangement that is induced by $\Theta(\frac{\zeta^2 n^2}{\varepsilon})$ horizontal and $\Theta(\frac{\zeta^2 n^2}{\varepsilon})$ vertical lines because the corresponding grid has a mesh of size $\frac{\varepsilon \mu}{40000 (|T_1| + |T_2|)}$. Thus, $|E_1| \in \Theta(\frac{\zeta^4 n^4}{\varepsilon^2})$. Graph $G_2$ is given by the arrangement that is induced by $\mathcal{O}(n^2)$ free space axis and $\Theta(n^2)$ grid balls. Each grid ball has a complexity of $\Theta (\frac{1}{\varepsilon})$. Thus, $|E_2| \in \mathcal{O}(\frac{n^4}{\varepsilon^2})$. Applying Dijkstra's shortest path algorithm on $G_1$ and $G_2$ takes time proportional to $\mathcal{O}(|E_1|)$ and $\mathcal{O}(|E_2|)$.
As $|E_1| \in \Theta(\frac{\zeta^4 n^4}{\varepsilon^2})$ and $|E_2| \in \mathcal{O}(\frac{n^4}{\varepsilon^2})$ we have to ensure that each edge of $E_1 \cup E_2$ can be computed in constant time to guarantee an overall running time of $\mathcal{O}(\frac{\zeta^4 n^4}{\varepsilon^2})$.
\begin{lemma}\label{lem:edgesComputable}
All edges of $G_1$ and $G_2$ can be computed in $\mathcal{O}(1)$ time.
\end{lemma}
\begin{proof}
There are two types of edges used in $G_1$ and $G_2$: (1.) axis aligned edges and (2.) edges that lie on a monotone free space axis. We consider both cases separately:
\begin{itemize}
\item Axis aligned edge $e \subset P$, see Figure~\ref{fig:edgeWeight}(a): W.l.o.g., we assume that $e = (o,p)$ is horizontal. We have $|op|_w = \int_{0}^{|op|} w(o + t (o-p)) \ dt = \int_{o.x}^{p.x}d_2(T_1(t),T_2(p.y)) \ dt$, see Figure~\ref{fig:edgeWeight}(a). Let $s \subset T_1$ be the segment such that $T_1(t) \in s$ for $t \in [p.x-o.x]$. W.l.o.g., we assume that $s$ lies on the $x$-axis. $\int_{o.x}^{p.x}d_2(T_1(t),T_2(p.y)) \ dt$ can be calculated as follows:
\begin{eqnarray*}
\int_{o.x}^{p.x}d_2(T_1(t),T_2(p.y)) \ dt & = & \int_{o.x}^{p.x} \sqrt{(T_1(t).x)^2 + (T_2(p.y).y)^2} \ dt\\
& =&\left. \frac{1}{2}\left( \begin{matrix}
(T_2(p.y).y)^2 \operatorname{arsinh} \left( \frac{T_1(t).x}{T_2(p.y).y} \right) +\\
T_1(t).x \sqrt{(T_1(t).x)^2 + (T_2(p.y).y)^2} \end{matrix} \right) \right|^{p.x}_{o.x}.
\end{eqnarray*}
That value can be calculated in constant time.
\item Edge $e$ on a free space axis, see Figure~\ref{fig:edgeWeight}(b): Let $\ell$ be the free space axis such that $e \subset \ell \subset P$ and $d_{\ell} \subset \mathbb{R}^2$ the corresponding angular bisector that corresponds to $\ell$. By observation~\ref{obs:dual}, we have $|op|_w = \int_{0}^{|op|}w(o + t(o-p)) \ dt = \int d_2(T_1(t), T_2(t)) \ dt$ where $T_1(t) T_2(t)$ lies perpendicular to $d_{\ell}$, see Figure~\ref{fig:edgeWeight}(b). Thus, $|op|_w$ is equal to the area that is bounded by $T_1(o.x)T_1(p.x)$, $T_2(o.y)T_2(p.y)$, $T_1(o.x)T_2(o.y)$, and $T_1(p.x)T_2(p.y)$ which can be computed in $\mathcal{O}(1)$ time.
\end{itemize}
\begin{figure}
\caption{The two types of matchings that correspond to the two types of edges from $G_1$ and~$G_2$.}
\label{fig:edgeWeight}
\end{figure}
\end{proof}
This leads to our main result.
\begin{theorem}
We can compute an $(1+\varepsilon)$-approximation of the integral Fr\'echet distance $\mathcal{F}_{\mathcal{S}} \left( T_1, T_2 \right)$ in $\mathcal{O}(\frac{\zeta^4 n^4}{\varepsilon^2})$ time.
\end{theorem}
\section{Locally optimal Fr\'{e}chet matchings}
In this section, we discuss an application of Lemma~\ref{lem:key} to so-called \emph{locally correct (Fr\'{e}chet) matchings} as introduced by Buchin et al.~\cite{buchin:locally}. For $i \in \{ 1,2 \}$ and $0 \leq a \leq b \leq n$, we denote the subcurve between $T_i(a)$ and $T_i(b)$ by $T_i[a,b]$.
\begin{definition}[\cite{buchin:locally}]\label{def:correct}
A matching $(\alpha_1,\alpha_2)$ is \emph{locally correct} if
$\mathscr{D} \left( T_1[\alpha_1(a),\alpha_2(b)], T_2[\alpha_1(a),\alpha_2(b)] \right) = \max_{t \in [a,b]} d_2(T_1(\alpha_1(a)),T_2(\alpha_2(b)))$, for all $0 \leq a \leq b \leq n$.
\end{definition}
Buchin et al.~\cite{buchin:locally} suggested to extend the definition of locally correct (Fr\'{e}chet) matchings to ``locally optimal'' (Fr\'{e}chet) matchings as future work. ``The idea is to restrict to the locally correct matching that decreases the matched distance as quickly as possible.''\cite[p. 237]{buchin:locally}. To the best of our knowledge, such an extension of the definition of locally correct matchings has not been given until now. In the following, we give a definition of locally optimal matchings and show that each locally correct matching
can be transformed, in $\mathcal{O}(n)$ time, into a locally optimal matching.
Buchin et al.~\cite{buchin:locally} require the leash length to decrease as fast possible. In general though, there is no matching that ensures a monotonically decreasing leash length. We therefore also consider increasing the leash length and extend the objective as follows: ``Computing a locally correct matching that locally decreases and increases the leash length as fast as possible between two maxima''. We measure how fast the leash length decreases (increases) as sum of the lengths of the subcurves that are needed to achieve a leash length of $\delta \geq 0$ (the next (local) maximum), then we continue from the point pair that realizes $\delta \geq 0$.
Thus, it seems to be natural to consider a matched point pair from $T_1 \times T_2$ in that a local maxima is achieved as fixed. Note that requiring a fast reduction and a fast enlargement of the leash length between two pairs $(T_1(\alpha_1(t_1)),T_2(\alpha_2(t_{1})))$ and $(T_1(\alpha_1(t_{2})),T_2(\alpha_2(t_{2})))$ of fixed points is equivalent to requiring a matching that is optimal w.r.t. the partial Fr\'{e}chet similarity between the curves between the points $T_1(\alpha_1(t_1))$ and $T_2(\alpha_2(t_{1}))$ and $T_1(\alpha_1(t_{2}))$ and $T_2(\alpha_2(t_{2}))$ for all thresholds $\delta \geq 0$.
In the following, we give a definition of locally correct matchings that considers the above described requirements.
Let $f:[0,n] \rightarrow \mathbb{R}_{\geq0}$. $t \in [0,1]$, is the parameter of a \emph{local maximum} of $f$ if the following is fulfilled: there is a $\delta_t > 0$ such that for all $0 \leq \delta\leq \delta_t: f(t \pm \delta) \leq f(t)$ and $f(t + \delta) < f(t)$ or $f(t-\delta) < f(t)$.
Given a matching $(\alpha_1,\alpha_2)$, let $t_1, \dots , t_k$ be the ordered sequence of parameters for all local maxima of the function $t \mapsto d_2(T_1(\alpha_1(t)), T_1(\alpha_1(t)))$. For any $t_i,t_{i+1}$, we denote the restrictions of $\alpha_1$ and $\alpha_2$ to $[t_i,t_{i+1}]$ as $\alpha_1[t_i,t_{i+1}]: [t_i,t_{i+1}] \rightarrow [\alpha_1(t_i), \alpha_1(t_{i+1})]$ and $\alpha_1[t_i,t_{i+1}]: [t_i,t_{i+1}] \rightarrow [\alpha_1(t_i), \alpha_1(t_{i+1})]$.
We say $(\alpha_1,\alpha_2)$ is \emph{locally optimal} if it is locally correct and for all $t_i,t_{i+1}$, $\mathcal{P}_{\delta}(T_1[t_i,t_{i+1}],T_2[t_i,t_{i+1}]) = \mathcal{P}_{(\alpha_1[t_i,t_{i+1}],\alpha_2[t_i,t_{i+1}])}(T_1,T_2)$ for all $\delta \geq 0$.
By applying a similar approach as in the proof of Lemma~\ref{lem:key} we obtain the following:
\begin{lemma}\label{cor:partFS}
Let $C$ be an arbitrarily chosen parameter cell and $a, b \in C$ such that $a \leq_{xy} b$ and $\pi_{ab}$ the path induced by Lemma~\ref{lem:key}. Then, $\mathcal{P}_{\delta}(T_1[a.x,b.x], T_2[a.y,b.y]) = |\mathcal{E}_{\delta} \cap \pi_{ab}|$ for all $\delta \geq 0$, where $\mathcal{E}_{\delta}$ is the free space ellipse of $C$ for the distance threshold $\delta$.
\end{lemma}
Lemma~\ref{cor:partFS} implies that each locally correct matching $\pi$ can be transformed into a locally optimal Fr\'{e}chet matching in $\mathcal{O}(n)$ time as follows: Let $p_1,\dots,p_{2n} \in \pi$ be the intersection points with the parameter grid. For each $i \in \{ 1,...,2n-1 \}$ we substitute the subpath $\pi_{p_ip_{i+1}}$ by the path between $p_{i}$ and $p_{i+1}$ which is induced by Lemma~\ref{lem:key}.
The algorithm from~\cite{buchin:locally} computes a locally correct matching in $\mathcal{O}(n^3 \log n)$ time. Thus, a locally optimal matching can be computed in $\mathcal{O}(n^3 \log n)$ time.
\section{Conclusion}
We presented pseudo-polynomial $(1+\varepsilon)$-approximation algorithms for the integral and average Fr\'{e}chet distance which have a running time of $\mathcal{O}(\frac{\zeta^4 n^4}{\varepsilon^2})$. In particular, in our approach we compute two geometric graphs and their weighted shortest path lengths in parallel. It remains open if one can reduce the complexity of $G_1$ to polynomial with respect to the input parameters such that $G_1 \cup G_2$ still ensures an $(1+\varepsilon)$-approximation.
\end{document}
\end{document} |
\begin{document}
\begin{abstract}
We investigate the geometry of hyperbolic knots and links whose
diagrams have a high amount of twisting of multiple strands. We find
information on volume and certain isotopy classes of geodesics for the
complements of these links, based only on a diagram. The results are
obtained by finding geometric information on generalized augmentations
of these links.
\end{abstract}
\maketitle
\section{Introduction}
\label{sec:intro}
By Mostow--Prasad rigidity and work of Gordon and Luecke
\cite{gordon-luecke}, the hyperbolic structure on the complement of a
hyperbolic knot is a knot invariant, and ought to be useful in
problems of knot and link classification. In practice, this structure
seems difficult to compute.
In recent years, some geometric properties of hyperbolic knots and
links have been discovered for links admitting certain types of
diagrams, such as alternating links \cite{lackenby:alt-volume}, and
highly twisted knots and links \cite{purcell:cusps, purcell:volume,
fkp}. However, many knots that are of interest to topologists and
hyperbolic geometers do not fall into these classes. These include
Berge knots \cite{berge, baker:I, baker:II}, twisted torus knots and
Lorenz knots \cite{birman-kofman}, which contain many of the smallest
volume hyperbolic knots \cite{champanerkar-kofman}. These knots often
have diagrams that are highly non-alternating, with few twists per
twist region, but contain regions where multiple strands of the
diagram twist around each other some number of times. We would like
to be able to understand and estimate geometric properties of these
``multiply twisted'' knots and links, given only a diagram, but
currently we do not have the tools to do so.
In this paper, we take a first step toward such an understanding. We
investigate the geometry of knots and links with diagrams with a high
amount of twisting of multiple strands. We find information on the
geometry of these knots, including volume bounds and certain isotopy
classes of geodesics, based only on a diagram.
The results are obtained \emph{augmenting} the knot and link diagrams.
That is, we encircle each twist of multiple strands by a simple closed
curve, unknotted in $S^3$. The resulting link is called a generalized
augmented link, generalizing a construction of Adams in which two
twisting strands are encircled by an unknotted component
\cite{adams:aug}. When one performs $1/n$ Dehn filling on the
augmentation components of these links, one adds $n$ full twists to
the strands. All diagrams can be obtained by such twisting. (See
section \ref{sec:character} for a more careful discussion.) Hence
geometric information on a generalized augmented link, combined with
geometric information under Dehn filling, leads to geometric results
on knot complements.
Regular augmented links have a very nice hyperbolic structure,
including a decomposition into right angled ideal polyhedra, first
written down by Agol and Thurston
\cite[Appendix]{lackenby:alt-volume}. Generalized augmented links do
not have as nice structure, but still contain enough symmetry to
obtain geometric estimates. To obtain geometric information on Dehn
fillings of these manifolds, one may turn to results on cone
deformations due to Hodgson and Kerckhoff \cite{hk:rigid, hk:univ,
hk:shape}, or hyperbolike filling of Agol and Lackenby
\cite{agol:bounds, lackenby:word}, or volume change results due to
Futer, Kalfagianni, and the author \cite{fkp}.
We have investigated generalized augmented links elsewhere. In
\cite{purcell:slopes}, we bounded the lengths of certain slopes on
these links, and showed that many knots obtained by their Dehn
fillings have meridian length approaching $4$ from below. With Futer
and Kalfagianni, in \cite{fkp:coils} we investigated properties of
volumes of a very particular class of these links. Here, we broaden
the results to larger classes of knots and links.
Finally, note that the focus of this paper is on geometric information
on hyperbolic generalized augmented links and their Dehn fillings. In
a companion paper, we discuss results for generalized augmented links
which are not hyperbolic \cite{purcell:geom-aug}.
\section{Characterization of generalized augmented links}
\label{sec:character}
We will be analyzing twisting and twist regions in a knot diagram.
Twist regions and generalized twist regions are defined carefully in
\cite{purcell:slopes}. We review definitions here for convenience.
\begin{define}
Let $K$ be a link in $S^3$, and let $D$ be a diagram of the link.
We may view $D$ as a 4--valent graph with over--under crossing
information at each vertex. A \emph{twist region} of the diagram
$D$ is a sequence of bigon regions of $D$ arranged end
to end, which is maximal in the sense that there are no other bigons
on either end of the sequence. A single crossing adjacent to no
bigons is also a twist region.
We will assume throughout that the diagram is alternating within a
twist region, else replace it with a diagram with fewer crossings in
the twist region.
\label{def:twist}
\end{define}
In a \emph{twist region} of a diagram, two strands twist around each
other maximally, as in Figure \ref{fig:twist}(a), and bound a
``ribbon'' surface.
\begin{define}
A \emph{generalized twist region} of $D$ is a region of the diagram
where two or more strands twist around each other maximally, as in
Figure \ref{fig:twist}(b). More precisely, a generalized twist region
is a region of the diagram consisting of $m\geq 2$ parallel strands.
When all the strands except the outermost two are removed from this
region of the diagram, the remaining two strands form a twist region.
In $S^3$, these two strands bound a ribbon surface between them.
Remaining strands of the generalized twist region can be isotoped to
lie parallel to each other, embedded on this ribbon surface.
\label{def:gen-twist-region}
\end{define}
\begin{figure}
\caption{(a) A twist region. (b) A generalized twist region.
Multiple strands lie on the twisted ribbon surface.}
\label{fig:twist}
\end{figure}
The amount of twisting in each twist region is also important. We
describe the amount of twisting in terms of half--twists and
full--twists.
\begin{define}
Let $K$ be a link in $S^3$. A \emph{half--twist} of a generalized
twist region of a diagram consists of a single crossing of the two
outermost strands. The ribbon surface they bound, containing other
strands of the twist region, flips over once in a half--twist.
A \emph{full--twist} consists of two half--twists. Figure
\ref{fig:twist}(b) shows a single full--twist, or two half--twists,
of five strands.
\label{def:half-twist}
\end{define}
Given a diagram of a link in $S^3$, group crossings into generalized
twist regions, such that each crossing is contained in exactly one
generalized twist region. Call such a choice of generalized twist
regions a \emph{maximal twist region selection}. Note the choice is
not necessarily unique. For example, in Figure \ref{fig:twist}(b), we
could group the crossings shown into a single generalized twist region
containing a full--twist of five strands, or into twenty regular twist
regions, each containing a single half--twist of two strands. Either
choice is a valid maximal twist region selection, although the former
seems more correct.
Now, at each generalized twist region in the maximal twist region
selection, insert a \emph{crossing circle}, that is, a simple closed
curve $C_i$ encircling the strands of the twist region, and bounding a
disk $D_i$ in $S^3$, perpendicular to the projection plane. The $D_i$
are called \emph{twisting disks}. See Figure \ref{fig:cross-cir}(a).
We can select the $C_i$ and the $D_i$ such that the collection of all
$D_i$ is a collection of disjoint disks in $S^3$.
When crossing circles are inserted at each twist region in the maximal
twist region selection, we obtain a new link, with components $K_j$
from the original link $K$, and crossing circles $C_i$. The
complement of this link is homeomorphic to the complement of the link
$L$ obtained by untwisting at each $C_i$. That is, we may remove all
full--twists from each generalized twist region of the link diagram
without changing the homeomorphism type of the link complement. See
Figure \ref{fig:cross-cir}(b).
\begin{figure}
\caption{(a) Encircle each twist region with a crossing circle. (b)
Link $L$ given by removing full--twists from the diagram.}
\label{fig:cross-cir}
\end{figure}
The resulting diagram of $L$ consists of unknotted link components
$C_i$ and components obtained from untwisting $K$, which we will call
$K_1, \dots, K_p$. In the diagram of $L$, the components of $K$ will
either lie flat on the projection plane, or may have single
half--twists encircled by crossing circles.
\begin{define}
We call the link $L$ an \emph{augmentation} of the diagram $D$ of $K$,
or we say $L$ is the augmentation of the diagram $D$ corresponding to
a maximal twist region selection. We also say that $L$ is obtained by
\emph{augmenting} $K$, and that $L$ is an \emph{generalized augmented}
link.
\label{def:augmentation}
\end{define}
For brevity, we often drop the adjective ``generalized'' from the term
generalized augmented links, since all augmented links we discuss here
are of this form.
The connection between $S^3-L$ and the original link complement is
given by Dehn filling. Any slope $s$ on a torus $T^2$ is
parameterized by two relatively prime integers $p, q$, where $s =
p\mu+q\lambda$, and $\mu, \lambda$ generate $H_1(T^2; {\mathbb{Z}})$. When $M$
is the link complement $S^3-L$, at the $i$-th crossing circle $C_i$,
let $\mu_i, \lambda_i$ denote the meridian and longitude of $\partial
N(C_i)$, respectively. Then Dehn filling along the slope $\mu_i +
n_i\lambda_i$ gives a new link whose diagram no longer contains $C_i$,
and the strands previously encircled by $C_i$ run through $n_i$
full--twists (see, for example, Rolfsen \cite{rolfsen}). Thus Dehn
filling connects $S^3-K$ and the complement of the augmented link $L$.
\subsection{Reflection}
The link $L$ admits a reflection, as follows. Arrange the diagram of
$L$ such that crossing circles of $L$ lie perpendicular to the
projection plane, and reflect the diagram of $L$ in the projection
plane. The crossing circle components $C_i$ are taken to themselves.
Outside of twist regions, the diagram of $L$ is preserved. If the
components $K_j$ lie flat on the projection plane, they are also
preserved by the reflection.
If some components $K_j$ run through a single half--twist at a twist
region, then the reflection will reverse all the crossings of the
half--twist, changing the direction of half--twist. Apply a twist
homeomorphism, twisting one full twist at each half--twist in the
opposite direction. This reverses the direction of the half--twist.
Thus the composition of the reflection and the twist homeomorphism is
an orientation reversing involution of $S^3-L$.
There is a surface which can be isotoped to be fixed pointwise by this
involution, namely, the projection plane outside of half--twists, and
the ribbon surfaces inside half twists, as well as a half--twisted
surface between $C_i$ and the knot strands.
The above discussion is a proof of the following, which is also
Proposition 3.1 of \cite{purcell:slopes}.
\begin{prop}
Let $L$ be an augmentation of a diagram of a link in $S^3$. Then
$S^3-L$ admits a reflection, i.e. an orientation reversing
involution with fixed point set a surface.
\label{prop:reflect}
\end{prop}
\section{Slopes lengths and hyperbolicity}
In this section, we prove results on slope lengths of generalized
augmented links. Our methods generalize to hyperbolic manifolds which
admit a reflection, and we state the more general results.
\begin{lemma}
Let $M$ be a $3$--manifold with torus boundary components with the
following properties:
\begin{enumerate}
\item $M$ admits an orientation reversing involution $\sigma$ whose
fixed point set is an embedded surface $P$ in $M$.
\item Some boundary component $T$ of $M$ meets $P$, and for some
slope $\lambda$ on $T$, $\sigma$ is an orientation reversing
involution of $\lambda$. (Write $\sigma(\lambda) = -\lambda$.)
\end{enumerate}
Then $\lambda$ meets $P$ exactly twice.
\label{lemma:lambda-P}
\end{lemma}
When our manifold is in fact a generalized augmented link, $\lambda$
may be the slope $\partial D_i$ on $\partial N(C_i)$, for example, or
a slope $\partial D_i$ on $\partial N(K_j)$.
\begin{proof}
Since $\sigma$ takes $\lambda$ to $-\lambda$, a representative
of $\lambda$ (which, by abuse of notation, we will also call
$\lambda$) has a fixed point under $\sigma$. Thus $\lambda$
meets $P$. Additionally, since the only orientation reversing
involutions of $S^1$ that fix a point must actually fix two points,
$\lambda$ must meet $P$ twice.
\end{proof}
\begin{lemma}
Let $M$ be as in Lemma \ref{lemma:lambda-P}. Then the torus $T$
is tiled by rectangles, each with one side parallel to the surface
$P$, and one side orthogonal to $P$. The lift of these rectangles
to the universal cover $\widetilde{T}$ gives a lattice in ${\mathbb{R}}^2$.
\label{lemma:lattice}
\end{lemma}
\begin{proof}
Consider the universal cover ${\mathbb{R}}^2$ of the torus boundary component
$T$. As $P$ is embedded, the slopes $P \cap T$ lift to give parallel
lines in ${\mathbb{R}}^2$. A simple curve representing the slope $\lambda$
lifts to give parallel lines perpendicular to the lines from $P$,
since $\lambda$ is taken to $-\lambda$ by the involution $\sigma$
fixing $P$. The projection of these lines to $T$ gives a tiling of
$T$ by rectangles. Together, the intersection points of these sets of
lines form a lattice ${\mathbb{Z}}^2$ of ${\mathbb{R}}^2$.
\end{proof}
Construct a basis of the lattice of Lemma \ref{lemma:lattice} by
letting ${\boldsymbol{p}}$ be a step parallel to a side from $P \cap T$, and by
letting ${\boldsymbol{o}}$ be a step orthogonal to ${\boldsymbol{p}}$.
\begin{lemma}
Let $M$ be as in Lemma \ref{lemma:lambda-P}, and let $\{{\boldsymbol{p}}, {\boldsymbol{o}}\}$
be the basis for the lattice on $\widetilde{T}$ as above. Then the
curve $\lambda$, which serves as one generator of $H_1(T;{\mathbb{Z}})$, is
given by $2{\boldsymbol{o}}$. Another generator of $H_1(T;{\mathbb{Z}})$ is given by ${\boldsymbol{p}}
+ \epsilon \,{\boldsymbol{o}}$, where $\epsilon = 0$ if there are two components
of $P\cap T$, and $\epsilon = 1$ if there is one component of $P\cap
T$.
\label{lemma:generators}
\end{lemma}
\begin{proof}
By Lemma \ref{lemma:lambda-P}, $\lambda$ intersects $P$ twice. Thus
its representative must cross lifts of $P$ twice in the lattice, and
be taken to itself under the involution in $P$, so it is $2{\boldsymbol{o}}$.
Note this implies that all corners of the rectangles formed by ${\boldsymbol{p}}$
and ${\boldsymbol{o}}$ project to just two distinct points on $T$ under the
covering transformation. These two points are the projection of ${\boldsymbol{o}}$
and the projection of $2{\boldsymbol{o}}$. Additionally, the fact that
$\lambda=2{\boldsymbol{o}}$ implies that $T$ is tiled by exactly two rectangles. To
determine generators of $H_1(T;{\mathbb{Z}})$, we determine if these
rectangles are glued with or without shearing on $T$.
Another obvious closed curve on $T$ besides $\lambda$ is given by a
single component of $P\cap \partial T$. Call the corresponding slope
$\alpha$. It does not necessarily generate $H_1(T;{\mathbb{Z}})$ with
$\lambda$. Since $\lambda$ intersects $P$ twice, either $\alpha$
intersects $\lambda$ once, in which case $P \cap T$ has two
components, there is no shearing, and ${\boldsymbol{p}}$ is a generator; or
$\alpha$ intersects $\lambda$ twice, and $P\cap T$ has one component.
If $P\cap T$ has one component, then $\alpha=2{\boldsymbol{p}}$, and $\alpha$ is
not a generator with $\lambda$. Then ${\boldsymbol{p}}$ must project to the same
point as ${\boldsymbol{o}}$ under the covering projection, so ${\boldsymbol{p}} + {\boldsymbol{o}}$ will give
a closed curve on $T$. Since it has intersection number $1$ with
$2{\boldsymbol{o}} = \lambda$, ${\boldsymbol{p}}+{\boldsymbol{o}}$ will be a generator.
\end{proof}
When $M$ is known to admit a hyperbolic structure, we can find lower
bounds on the lengths of the arcs ${\boldsymbol{o}}$ and ${\boldsymbol{p}}$ in the lattice.
Recall that when a manifold has multiple cusps, lengths depend on a
choice of maximal cusps, i.e. a collection of disjoint horoball
neighborhoods, one for each cusp. Lengths of arcs are measured on the
horospherical tori that form the boundaries of the horoball
neighborhoods. To ensure lengths on a torus boundary are long, we
need to ensure that we can choose maximal cusps appropriately.
\begin{theorem}
Let $M$ be a 3--manifold with torus boundary components which admits a
complete finite volume hyperbolic structure, and has the following
additional properties:
\begin{enumerate}
\item $M$ admits an orientation reversing involution $\sigma$ whose
fixed point set is an embedded surface $P$ in $M$.
\item Boundary components $T_1, \dots, T_t$ of $M$ meet $P$, and for
each $T_i$, there is a slope $\lambda_i$ that is taken to
$-\lambda_i$ under $\sigma$.
\end{enumerate}
Let $\{{\boldsymbol{p}}_i, {\boldsymbol{o}}_i\}$ generate the lattice on the universal cover
$\tild{T_i}$ of $T_i$, of intersections of lines which project to $P$
and lines which project orthogonal to $P$, respectively, as in Lemma
\ref{lemma:generators}. Then there exists a choice of maximal cusps
of $M$ such that, when measured on these maximal cusps, the length of
each ${\boldsymbol{o}}_i$ is at least $1$, and the length of ${\boldsymbol{p}}_i$ is at least
$1/2$.
\label{thm:hyp-result}
\end{theorem}
Similar results were shown for particular classes of links in $S^3$ in
\cite{purcell:slopes}, using techniques of Adams \emph{et al.}
\cite{adams:II}. We give a different proof here.
\begin{proof}
By Mostow--Prasad rigidity, the involution of $M$ is isotopic to an
isometry of $M$ under the hyperbolic metric. The surface $P$, since
it is fixed pointwise, is isotopic to a totally geodesic surface in
$M$ (see for example \cite{menasco-reid}, \cite{leininger}).
Lift to the universal cover ${\mathbb{H}}^3$, which we view as the upper half
space ${\mathbb{H}}^3=\{(x,y,z)|z>0\}$. For any $j$, we may conjugate such
that the cusp corresponding to $T_j$ lifts to the point at infinity.
The surface $P$ lifts to a collection of disjoint, totally geodesic
planes.
Since $P$ meets the cusp corresponding to $T_j$, copies of $P$ will
lift in ${\mathbb{H}}^3$ to parallel vertical planes through infinity. Because
$P$ is fixed under the involution $\sigma$, the collection of parallel
vertical planes must be preserved by a reflection of ${\mathbb{H}}^3$ in any
one of the planes. Hence the (Euclidean) distance between any two
adjacent planes must be constant. Without loss of generality, we will
conjugate such that these vertical planes are the planes $y=n$, $n\in
{\mathbb{Z}}$, in ${\mathbb{H}}^3 = \{(x,y,z)|z>0\}$, so that their Euclidean distance
is $1$.
The length of ${\boldsymbol{o}}_j$ will be given by $1/c$, where $c$ is the
height of the horosphere bounding the horoball about infinity. We
will show that we can always take $c$ to be less than or equal to $1$.
Define the horoball expansion about cusps of $T_1,
\dots, T_t$ such that the lengths of the ${\boldsymbol{o}}_j$ agree for every $j$
simultaneously. That is, there exists some (possibly large) $c$ such
that when each ${\boldsymbol{o}}_j$ has length $1/c$, the horoballs about the cusps
corresponding to $T_1, \dots, T_t$ are disjoint. Continue to increase
$c$ keeping all the ${\boldsymbol{o}}_j$ of equal length, until the value $1/c$ is
as large as possible. If there are remaining cusps disjoint from the
$T_j$, these may then be expanded in any way.
To prove the theorem, we must prove that the value of $c$ which
maximizes the length of the ${\boldsymbol{o}}_j$ is less than or equal to $1$.
Suppose not. Suppose $c>1$. Since $c$ is minimal, horoballs about
cusps corresponding to some $T_i$ and $T_j$ must abut. Conjugate such
that the cusp corresponding to $T_i$ is at infinity in ${\mathbb{H}}^3$, with
lifts of $P$ corresponding to the planes $y=n, n\in{\mathbb{Z}}$. The horoball
about infinity will have height $c$. It will be tangent to some
horoball $H$ over a point $w$ on the boundary ${\mathbb{C}} = \{(x,y,0)\}$ of
${\mathbb{H}}^3$, where $w$ projects to the cusp corresponding to $T_j$. Since
$c>1$, note $H$ is a ball of Euclidean diameter $c>1$.
Because the diameter of $H$ is greater than $1$, $H$ must intersect a
plane $y=n$. Because the reflection through the plane $y=n$ projects
to an isometry of $M$, the image of $H$ under this reflection must be
a horoball in ${\mathbb{H}}^3$ disjoint from all other horoballs in the lift of
the maximal cusps. Thus if $H$ lies over some point $w$ which is
\emph{not} on the plane $y=n$, then the image of $H$ under the
reflection through $y=n$ will give a horoball distinct from $H$, which
intersects $H$. This is impossible.
So $H$ is centered at a point $w\in {\mathbb{C}}$ which lies on a plane $y=n$.
Without loss of generality, assume $w=0$. Thus we are assuming $0$
projects to some cusp corresponding to $T_j$ under the covering map.
Now consider $T_j$. There is some isometry $S$ of ${\mathbb{H}}^3$ taking $0$
to infinity and infinity to $0$, and taking lifts of $P$ which meet
the cusp $T_j$ to planes $y=n, n\in{\mathbb{Z}}$. Note by the definition of
our horoball expansion, this isometry $S$ takes $H$ to a horoball of
height $z=c>1$ about infinity.
Consider $q = S^{-1}(i) = S^{-1}((0,1,0))$ on the boundary ${\mathbb{C}}$ of
${\mathbb{H}}^3$. This point lies on the boundary of some plane $Q$ of ${\mathbb{H}}^3$
which projects to $P$ under the covering map. This plane $Q$ is a
Euclidean hemisphere tangent to the plane $y=0$. It has diameter at
most $1$, since it cannot intersect the plane $y=1$, which also
projects to $P$ under the covering map.
Consider the vertical geodesic in ${\mathbb{H}}^3$ lying above $0$ in ${\mathbb{C}}$.
There is a unique geodesic $\gamma$ from the point $q$ which meets
this vertical geodesic at a right angle. The point $r$, where
$\gamma$ intersects the vertical geodesic, is of (Euclidean) height
$|q|$, where $|q|$ denotes the (Euclidean) distance of $q$ from $0$.
Because $q$ lies on the circle $Q$ of diameter at most $1$, $|q|$ is
at most $1$. Because $H$ is of diameter $c>1$, $r$ must be contained
in $H$. See Figure \ref{fig:r-in-H}.
\begin{figure}
\caption{Note $r$ is contained in the horosphere $H$.}
\label{fig:r-in-H}
\end{figure}
But now consider the effect of the isometry $S$ on the geodesic
$\gamma$. Since $S$ preserves the vertical geodesic above $0$ in
${\mathbb{C}}$, $S$ must take $\gamma$ to a geodesic from $S(q) = i \in {\mathbb{C}}$ to
one meeting the vertical geodesic above $0$ at a right angle. Thus
$S(r)$ will be of height exactly $1$. On the other hand, $S(H)$ is of
height $c>1$, and $S(H)$ must contain $S(r)$. This is impossible.
Thus all horoballs can be expanded to height $c\leq 1$. It follows
that each ${\boldsymbol{o}}_i$ has length at least $1$.
Finally, ${\boldsymbol{p}}_i$ or $2{\boldsymbol{p}}_i$ projects to a closed curve on $T_i$.
Hence translation along ${\boldsymbol{p}}_i$ or $2{\boldsymbol{p}}_i$ is a covering
transformation. It must take a maximal horoball centered at a point
on ${\mathbb{C}}$ to a disjoint maximal horoball. Thus the translation length
is at least $1$, so ${\boldsymbol{p}}_i$ has length at least $1/2$.
\end{proof}
We wish to study what happens when we twist along the disks $D_1,
\dots, D_t$, i.e. when we perform Dehn filling on slopes $1/n_1,
\dots, 1/n_t$ on the cusps corresponding to $C_1, \dots, C_t$,
respectively. First, we give the following result about the lengths
of such slopes. Note the following theorem applies to links in
general 3--manifolds, not just $S^3$.
\begin{prop}
Let $L = C_1 \cup \dots C_t$ be a link in a $3$--manifold $M$, such
that $M-L$ admits a complete, finite volume hyperbolic structure,
admits an orientation reversing involution $\sigma$ whose fixed
point set is a surface $P$, and for each component $C_i$ of $L$,
there is a slope $\lambda_i$ taken to $-\lambda_i$ by $\sigma$.
Let $\mu_i$ be the other generator of $H_1(\partial N(C_i))$ as in
Lemma \ref{lemma:generators}. Then the slope $\mu_i + n_i
\,\lambda_i$
has length at least $\sqrt{(1/4) + c_i^2}$. Here:
\begin{enumerate}
\item $c_i = 2|n_i|$ if $P \cap \partial N(C_i)$ consists of two
curves, or
\item $c_i = 2|n_i|-1$ if $P \cap \partial N(C_i)$ consists of one
curve.
\end{enumerate}
\label{prop:slope-lengths}
\end{prop}
\begin{proof}
$M-L$ fits the requirements of the lemmas above. So in particular, by
Lemma \ref{lemma:generators}, $H_1(\partial N(C_i);{\mathbb{Z}})$ is generated
by $2{\boldsymbol{o}}_i$ and ${\boldsymbol{p}}_i + \epsilon_i \,{\boldsymbol{o}}_i$; the generator $2{\boldsymbol{o}}_i$
corresponds to the curve $\lambda_i$; if $P \cap \partial N(C_i)$ has
two components, then one such component is a generator ${\boldsymbol{p}}_i =
\mu_i$; and if $P \cap \partial N(C_i)$ has one component, then the
other generator is ${\boldsymbol{p}}_i + {\boldsymbol{o}}_i = \mu_i$.
Suppose first that $P \cap \partial N(C_i)$ has two components. Then
the slope $\mu_i + n_i\, \lambda_i$ is given by ${\boldsymbol{p}}_i + n_i\,
(2\,{\boldsymbol{o}}_i)$. Since ${\boldsymbol{p}}_i$ and ${\boldsymbol{o}}_i$ are orthogonal, by Theorem
\ref{thm:hyp-result} this slope has length at least
$\sqrt{(1/4) + 4\,n_i^2} = \sqrt{(1/4) + c_i^2}.$
Now suppose that $P \cap \partial N(C_i)$ has one component. Then the
slope $\mu_i + n_i \,\lambda_i$ is given by ${\boldsymbol{p}}_i + {\boldsymbol{o}}_i +
n_i\,(2\,{\boldsymbol{o}}_i) = {\boldsymbol{p}}_i + (1 + 2\,n_i){\boldsymbol{o}}_i$. It must have length at
least $\sqrt{(1/4) + (1-2|n_i|)^2} = \sqrt{(1/4) + c_i^2}.$
\end{proof}
\begin{define}
If $P \cap \partial N(C_i)$ consists of one curve, as in case (2) of
Proposition \ref{prop:slope-lengths}, we say there is a \emph{half--twist}
at $D_i$.
\label{def:half-twist-general}
\end{define}
This terminology comes from considering a neighborhood of $D_i$ in
$M$. In this neighborhood, a half--twist at $D_i$ is identical to a
neighborhood of a half--twist of an augmented link in $S^3$, as in
Definition \ref{def:half-twist}. See Figure \ref{fig:half-twist}.
Two half--twists in a row in a neighborhood of $D_i$ again yields a
full--twist in this neighborhood. Thus Proposition
\ref{prop:slope-lengths} implies that the squared length of the slope
$\mu_i + n_i\lambda_i$ on $C_i$ is at least one more than the squared
number of half--twists inserted at $D_i$.
\begin{figure}
\caption{Left: $P\cap \partial N(C_i)$ has two components, shown in
dotted lines. Right: $P\cap \partial N(C_i)$ has one component,
giving a half--twist. }
\label{fig:half-twist}
\end{figure}
\begin{theorem}
Let $K$ be a knot or link in $S^3$ which has a diagram $D$ and a
maximal twist region selection with at least $6$ half--twists in each
generalized twist region, and such that the corresponding augmentation
is hyperbolic. Then $S^3-K$ is also hyperbolic.
\label{thm:hyp-knot}
\end{theorem}
\begin{proof}
The augmentation is a link with hyperbolic complement, by assumption.
It admits an orientation reversing involution $\sigma$ fixing a
surface $P$, and the cusps corresponding to crossing circles each have
a slope $\lambda_i$ which is taken to $-\lambda_i$ by $\sigma$:
namely, the slope of the longitude of the crossing circle.
The original knot or link complement is obtained from this link
complement by Dehn filling slopes on crossing circles. The longitude
of a crossing circle is given by $\lambda_i$. The meridian is the
generator $\mu_i$ of Proposition \ref{prop:slope-lengths}. If the
knot has $c_i$ half twists in the $i$-th twist region, then the Dehn
filling slope is $\mu_i + n_i \lambda_i$, where $n_i = c_i/2$ if $c_i$
is even, $n_i = (c_i+1)/2$ if $c_i$ is odd.
By Proposition \ref{prop:slope-lengths}, the slope of the Dehn filling
has length at least $\sqrt{(1/4)+c_i^2} > 6$, since the diagram of $K$
has at least $6$ half--twists in each generalized twist region. Thus
by the $6$--Theorem (\cite{agol:bounds}, \cite{lackenby:word}), the
manifold resulting from Dehn filling is hyperbolic.
\end{proof}
\section{Volumes}
The existence of a reflection gives information about the volumes of
augmented links as well. Theorem \ref{thm:hyp-vol}, below, is an
immediate generalization of a similar theorem in \cite{fkp}.
\begin{lemma}
Let $K$ be a knot or link in $S^3$ which has a diagram $D$ and a
maximal twist region selection such that the corresponding
augmentation yields a link $L$ in $S^3$ whose complement is
hyperbolic. Then the volume satisfies
$$ \mathop{\rm vol}\nolimits(S^3-L) \geq 2\,v_8\,(\mathop{\rm tw}\nolimits(D)-1), $$
where $v_8 \approx 3.66386$ is the volume of a regular hyperbolic
octahedron, and $\mathop{\rm tw}\nolimits(D)$ is the number of generalized twist regions of
the maximal twist region selection of $D$.
\label{lemma:vol-refl}
\end{lemma}
\begin{proof}
By assumption, $S^3-L$ admits a complete hyperbolic structure. By
Proposition \ref{prop:reflect}, it admits a reflective symmetry. Thus
$S^3-L$ contains a surface $P$ fixed pointwise under the reflection.
Cut $S^3-L$ along this surface. This produces a (possibly
disconnected) manifold $N$ with totally geodesic boundary. By a
theorem of Miyamoto \cite{miyamoto}, the volume of $N$ is at least
$-v_8\,\chi(N)$, where $\chi(N)$ denotes the Euler characteristic of
$N$.
Now, in the case that $P$ is the projection plane (i.e. no
half--twists), cutting along $P$ splits $S^3-L$ into two balls, with
half arcs corresponding to crossing circles drilled out of the ball.
This is a handlebody. Since there are $\mathop{\rm tw}\nolimits(D)$ crossing circles, the
genus of the handlebody is $\mathop{\rm tw}\nolimits(D)$. Thus we obtain the volume
estimate:
$$ \mathop{\rm vol}\nolimits(S^3-L) \geq 2\,v_8\,(\mathop{\rm tw}\nolimits(D)-1). $$
When the diagram has half--twists, let $L'$ denote the link obtained
by removing all half--twists from the diagram of $L$. Topologically,
$S^3-L'$ is obtained from $S^3-L$ by cutting $S^3-L$ along the disks
bounded by crossing circles, and regluing with a half--twist.
Note $S^3-L'$ has the following description as a gluing of ideal
polyhedra. Cut $S^3-L'$ along the projection plane. This slices each
of the disks bounded by crossing circles in half. Now cut along each
of these half disks and pull the disks apart. See Figure
\ref{fig:top-decomp}.
\begin{figure}
\caption{Decomposing $S^3-L'$ into ideal polyhedra. First, cut along
$P$. Second, cut along half disks. Finally, shrink remaining link
components to ideal vertices.}
\label{fig:top-decomp}
\end{figure}
This separates $S^3-L'$ into two identical ideal polyhedra with faces
given by crossing disks and by the projection plane. We may glue
these polyhedra back in the manner in which we cut them to obtain
$S^3-L'$. We may also change the gluing on crossing disks only to
obtain $S^3-L$, as follows. Rather than glue crossing disks straight
across where $L$ has a half--twist, glue a half crossing disk on one
polyhedron to the opposite half crossing disk on the opposite
polyhedron, inserting the half--twist. See Figure
\ref{fig:poly-half-twist}.
\begin{figure}
\caption{Left: Gluing without a half twist. Right: Inserting a half--twist.}
\label{fig:poly-half-twist}
\end{figure}
Compute the Euler characteristic of the cut manifold $(S^3-L)-P$ by
reading it off this polyhedral decomposition. Since $(S^3-L)-P$ has
boundary, it retracts onto a one--skeleton. Build the one--skeleton
by including a vertex for each ideal polyhedron (two vertices). Edges
run through the half crossing disks which we glue. There will be one
edge per glued pair of half crossing disks. Since there are $\mathop{\rm tw}\nolimits(D)$
crossing disks, the Euler characteristic is $2 - 2\mathop{\rm tw}\nolimits(D)$. Thus by
Miyamoto's theorem, the volume satisfies: $\mathop{\rm vol}\nolimits(S^3-L) \geq
2\,v_8\,(\mathop{\rm tw}\nolimits(D)-1).$
\end{proof}
Lemma \ref{lemma:vol-refl} should be compared to Proposition 3.1 of
\cite{fkp}. The proof above is an immediate extension of the proof of
that theorem to this more general case. For links with two strands
per twist region, we showed in \cite{fkp} that Lemma
\ref{lemma:vol-refl} is sharp.
In general, when crossing circles have more than two strands per twist
region, Lemma \ref{lemma:vol-refl} seems to actually be far from
sharp. With Futer and Kalfagianni we have been able to develop better
bounds on volumes of a certain class of knots \cite{fkp:coils}.
Meanwhile, Lemma \ref{lemma:vol-refl} gives a working lower bound on
volumes.
\begin{theorem}
Let $K$ be a knot or link in $S^3$ which has a diagram $D$ and a
maximal twist region selection with at least $7$ half--twists in each
generalized twist region, and such that the corresponding augmentation
is hyperbolic. Let $\mathop{\rm tw}\nolimits(D)$ denote the number of generalized twist
regions in the maximal twist region selection. Then \[ \mathop{\rm vol}\nolimits(S^3-K)
\geq 0.64756\,(\mathop{\rm tw}\nolimits(D) -1). \]
\label{thm:hyp-vol}
\end{theorem}
\begin{proof}
Let $L$ be the augmentation, $S^3-L$ hyperbolic, by assumption. By
Lemma \ref{lemma:vol-refl}, the volume satisfies:
$$\mathop{\rm vol}\nolimits(S^3-L) \geq 2\,v_8\,(\mathop{\rm tw}\nolimits(D)-1).$$
Now, $S^3-K$ is obtained by Dehn filling $S^3-L$. Since there are at
least $7$ half--twists per twist region, by Proposition
\ref{prop:slope-lengths}, the Dehn filling is along slopes of length
at least $\sqrt{49.25} > 2\pi$. Apply Theorem 1.1 of \cite{fkp}.
This theorem states that if $M$ is a hyperbolic manifold, and $s_1,
\dots, s_k$ are slopes on cusps of $M$ with minimum length
$\ell_{min}$ at least $2\pi$, then the Dehn filled manifold $M(s_1,
\dots, s_k)$ is hyperbolic with volume bounded below by
$$ \mathop{\rm vol}\nolimits(M(s_1,\dots, s_k)) \geq
\left(1-\left(\frac{2\pi}{\ell_{min}}\right)^2\right)^{3/2}
\,\mathop{\rm vol}\nolimits(M). $$
In our case, $\ell_{min} \geq \sqrt{49.25}$ and the volume of the
unfilled manifold $S^3-L$ satisfies $\mathop{\rm vol}\nolimits(S^3-L) \geq
2\,v_8\,(\mathop{\rm tw}\nolimits(D)-1)$. Thus the volume of $S^3-K$ satisfies
\begin{eqnarray*}
\mathop{\rm vol}\nolimits(S^3-K) &\geq&
\left(1-\left(\frac{2\pi}{\sqrt{49.25}}\right)^2\right)^{3/2}
\,2\,v_8\,(\mathop{\rm tw}\nolimits(D)-1)\\ &> & 0.64756\,(\mathop{\rm tw}\nolimits(D)-1).
\end{eqnarray*}
\end{proof}
\section{Geodesics}
We now give information on classes of geodesics in knot complements.
Our tools are those of cone manifolds and cone deformations. We
briefly review the definitions and results we use.
\begin{define}
A \emph{hyperbolic cone manifold} is a $3$--manifold $M$ and a link
$\Sigma$ in $M$ such that $M-\Sigma$ admits an incomplete hyperbolic
metric, with cone singularities along $\Sigma$. That is, a
neighborhood of $\Sigma$ in $M$ has a metric whose cross section is
a hyperbolic cone, with cone angle $\alpha$ at the core.
A \emph{hyperbolic cone deformation} is a one--parameter family of
hyperbolic cone manifold structures on $M-\Sigma$.
\end{define}
In special cases, a Dehn filling can be realized geometrically as a
cone deformation, as follows. Suppose $M$ is a $3$--manifold with
torus boundary which admits a complete hyperbolic metric. Let $s$ be
a slope on $\partial M$. Then we may view the complete hyperbolic
structure on $M$ as a hyperbolic cone manifold structure on $M(s)$
with cone angle zero along the link at the core of the attached solid
torus in $M(s)$.
We may always increase the cone angle from $\alpha=0$ to
$\alpha=\varepsilon$, for some $\varepsilon>0$ via cone deformation,
by work of Hodgson and Kerckhoff \cite{hk:rigid}.
When $\alpha=\varepsilon$, in the hyperbolic cone metric, the slope $s$
will bound a singular disk. That is, a representative of $s$ can be
isotoped to bound a disk $D$ which admits a smooth hyperbolic metric
everywhere except at the core of $D$, where $D$ intersects the
singular locus $\Sigma$. Thus this manifold with the hyperbolic cone
metric is homeomorphic to $M(s)$.
In case there is a cone deformation starting at cone angle $\alpha=0$
and extending to $\alpha=2\pi$, the final structure when $\alpha=2\pi$
gives a complete, non-singular hyperbolic metric on the manifold
$M(s)$. In this case, we say the Dehn filling is \emph{realized by
cone deformation}.
The benefit of a cone deformation is that one obtains some geometric
control on the hyperbolic structure of the manifold. In particular,
when we have a single filling slope, the core of the Dehn filled solid
torus is a closed geodesic in the hyperbolic structure given by cone
angle $\alpha=2\pi$. Thus this core is isotopic to a geodesic
provided we can show a Dehn filling is realized by cone deformation.
Hodgson and Kerckhoff analyzed conditions which guarantee the
existence of a cone deformation \cite{hk:univ}. We will apply their
results, but first we need the following definition.
\begin{define}
Let $M$ be a $3$--manifold with torus boundary $\partial M = T$
admitting a complete hyperbolic metric. Let $s$ be a slope on $T$.
In the hyperbolic structure on $M$, $T$ becomes a cusp. Take any
embedded horoball neighborhood of this cusp and consider its
boundary. This inherits a Euclidean metric from the hyperbolic
structure on $M$. Thus we may measure the length of $s$ and the
area of the Euclidean torus $T$ with respect to this metric.
Define the \emph{normalized length} of $s$ to be
\[ \ell_{norm}(s) = \frac{\rm{length}(s)}{\sqrt{\rm{area(T)}}}. \]
Here $\rm{length}(s)$ the length of a geodesic representing $s$.
Note that unlike the lengths of Theorem \ref{thm:hyp-result}, the
normalized length of a slope is independent of choice of horoball
neighborhood about the cusp corresponding to $T$.
\end{define}
The following is a consequence of Theorem 1.2 of \cite{hk:shape}.
\begin{theorem}[(Hodgson--Kerckhoff)]
Consider a complete, finite volume hyperbolic structure on the
interior of a compact, orientable 3--manifold $M$ with $k\geq 1$ torus
boundary components. Let $T_1, \dots, T_k$ be horospherical tori
which are embedded as cross--sections to the cusps of the complete
structure. Let $s_1, \dots, s_k$ be slopes, $s_i$ on $T_i$. Then
$M(s_1, \dots, s_k)$ admits a complete hyperbolic structure in which
the core cures of the Dehn filled solid tori are isotopic to
geodesics, provided the normalized lengths $\hat{L_i} =
\ell_{norm}(s_i)$ satisfy
\[ \sum_{i=1}^k \frac{1}{\hat{L_i}^2} < \frac{1}{(7.5832)^2}. \]
\label{thm:hk}
\end{theorem}
Theorem 1.2 of \cite{hk:shape} is actually a more general theorem
about Dehn filling space for manifolds with multiple cusps. However,
in the proof of that theorem it is shown that under the above
assumptions on normalized lengths of slopes, a cone deformation exists
from cone angle $0$ to $2\pi$ for which each component of the singular
locus has a tube about it of radius at least
$\rm{arctanh}(1/\sqrt{3})$ (page 36 of \cite{hk:shape}). The
components of the singular locus correspond to the cores of the filled
solid tori. Since each has a tube about it throughout the
deformation, the cores remain isotopic to geodesics. See also the
explanation in \cite{hk:shape} on page 5, after the statement of
Theorem 1.2.
\begin{lemma}
Let $M$, $L$, $\lambda_i$, and $\mu_i$ be as in Proposition
\ref{prop:slope-lengths}. Then the normalized length of each slope
$s_i = \mu_i + n_i\,\lambda_i$ is at least
\[ \ell_{norm}(s_i) \geq \sqrt{c_i}, \]
where again $c_i$ is the number of half--twists inserted by the Dehn
filling along slope $s_i$.
\label{lemma:norm-lengths}
\end{lemma}
The proof of Lemma \ref{lemma:norm-lengths} is similar to that of
Proposition \ref{prop:slope-lengths}, except with the added difficulty
that we are considering normalized lengths, and not actual lengths.
Compare to \cite[Proposition 6.5]{purcell:cusps}.
\begin{proof}
Write the slope $s_i=\mu_i + n_i\,\lambda_i$ in terms of the lengths
of ${\boldsymbol{o}}_i$ and ${\boldsymbol{p}}_i$, of Lemma \ref{lemma:generators}. In
particular, as in Proposition \ref{prop:slope-lengths}, the slope is
given by ${\boldsymbol{p}}_i + c_i\,{\boldsymbol{o}}_i$, where $c_i$ is the number of
half--twists inserted by the Dehn filling, and since ${\boldsymbol{o}}_i$ and
${\boldsymbol{p}}_i$ are orthogonal, its length is given by $\sqrt{p_i^2 +
c_i^2\,o_i^2}$, where $p_i$ and $o_i$ denote the lengths of geodesic
representatives of ${\boldsymbol{p}}_i$ and ${\boldsymbol{o}}_i$. By Lemma
\ref{lemma:generators}, the area of the cusp torus is given by
$2o_ip_i$.
Thus the normalized length of $s_i= \mu_i + n_i\,\lambda_i$ is given
by
\[ \ell_{norm}(s_i) =
\frac{\sqrt{p_i^2 + c_i^2o_i^2}}{\sqrt{2p_io_i}} =
\sqrt{\frac{p_i}{2o_i} + \frac{c_i\,o_i}{2p_i}}. \]
Minimize the normalized length with respect to $p_i/o_i$. We
find that its value is minimum when the ratio $p_i/o_i$ equals
$c_i$. In this case, the normalized length will be $\sqrt{c_i}$.
\end{proof}
We may now prove Theorem \ref{thm:geodesics}, giving results on
isotopy classes of geodesics in generalized augmented links.
\begin{theorem}
Let $K$ be a knot or link in $S^3$ which has a diagram $D$ and a
maximal twist region selection with $\mathop{\rm tw}\nolimits(D)$ twist regions, such that
the corresponding augmentation is hyperbolic. Let $c_i$ be the number
of half--twists in the $i$-th twist region. Then each crossing circle
is isotopic to a geodesic in the hyperbolic structure on $S^3-K$,
provided
\[ \sum_{i=1}^{\mathop{\rm tw}\nolimits(D)} \frac{1}{c_i} < \frac{1}{(7.5832)^2}.\]
\label{thm:geodesics}
\end{theorem}
\begin{proof}
$S^3-K$ is obtained from $S^3-L$ by Dehn filling the crossing circles.
By Lemma \ref{lemma:norm-lengths}, the normalized lengths of the
slopes of the Dehn filling are at least $\sqrt{c_i}$, where $c_i$ is
the number of half--twists in the $i$-th generalized twist region of
$D$. By Theorem \ref{thm:hk}, the cores of the filled solid tori are
isotopic to geodesics provided
\begin{eqnarray*}
\sum_{i=1}^{\mathop{\rm tw}\nolimits(D)} \frac{1}{c_i} &<& \frac{1}{(7.5832)^2}.
\end{eqnarray*}
\end{proof}
\end{document} |
\begin{document}
\title{Rank Polynomials of Fence Posets are Unimodal}
\begin{abstract}
We prove a conjecture of Morier-Genoud and Ovsienko that says that rank polynomials of the distributive lattices of lower ideals of fence posets are unimodal. We do this by introducing a related class of \emph{circular} fence posets and proving a stronger version of the conjecture due to McConville, Sagan and Smyth. We show that the rank polynomials of circular fence posets are symmetric and conjecture that unimodality holds except in some particular cases. We also apply the recent work of Elizalde, Plante, Roby and Sagan on rowmotion on fences and show many of their homomesy results hold for the circular case as well.
\end{abstract}
\section{Introduction}
Fence posets are a natural class of posets that appear in the study of cluster algebras, quiver respresentations and other areas of enumerative combinatorics, see \cite{Saganpaper} for an overview. Let $\overline{\alpha}pha=(\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_s)$ be a composition of $n$. The fence poset of $\overline{\alpha}pha$, denoted $F(\overline{\alpha}pha)$ is the poset on $x_1,x_2,\ldots,x_{n+1}$ with the order relations:
\begin{equation*}
x_1\preceq x_2 \preceq \cdots\preceq x_{\overline{\alpha}pha_1+1}\succeq x_{\overline{\alpha}pha_1+2}\succeq \cdots\succeq x_{\overline{\alpha}pha_1+\overline{\alpha}pha_2+1}\preceq x_{\overline{\alpha}pha_1+\overline{\alpha}pha_2+2}\preceq\cdots\preceq x_{\overline{\alpha}pha_1+\overline{\alpha}pha_2+\overline{\alpha}pha_3+1}\succeq \cdots
\end{equation*}
The relations describe a poset with $n+1$ nodes, where $n = \overline{\alpha}pha_1 + \ldots + \overline{\alpha}pha_s$ is the \emph{size} of $\overline{\alpha}pha$, schematically depicted in Figure~\ref{fig:first} below.
\begin{figure}
\caption{The fence poset $F(\overline{\alpha}
\label{fig:first}
\end{figure}
We call the $s$ maximal chains of this poset corresponding to parts of $\overline{\alpha}pha$ its \emph{segments}. Lower order ideals of $F(\overline{\alpha}pha)$ ordered by inclusion give a distributive lattice which we denote by $J(\overline{\alpha}pha)$. The lattice $J(\overline{\alpha}pha)$ is ranked by the size of the ideals, with a generating polynomial $R(\overline{\alpha}pha;q)= \sum_{I \in J(\overline{\alpha}pha)} q^I$, called the \emph{rank polynomial}. We will use $r(\overline{\alpha}pha)$ to denote the corresponding \emph{rank sequence} given by the powers of $q$.
\begin{example} \label{ex:2113} The fence poset for $\overline{\alpha}pha=(2,1,1,3)$ is given in the left part of Figure \ref{fig:2113}. Note that the ideals of maximal and minimal rank are unique. Ideals of rank $1$ and rank $7$ are given by minima and complements of maxima respectively, and there are five ideals of rank $2$, depicted in in Figure \ref{fig:2113}, right. The full rank sequence is $(1,3,5,6,6,5,3,2,1)$.
\begin{figure}
\caption{The fence poset $F(2,1,1,3)$ (left) and its five ideals of rank $2$ (right).}
\label{fig:2113}
\end{figure}
\end{example}
\begin{comment}
\begin{example}\label{F2113} The fence poset for $\overline{\alpha}pha=(2,1,1,3)$ is given in Figure \ref{fig:2113intro}.
\begin{figure}
\caption{The fence poset $F(2,1,1,3)$}
\label{fig:2113intro}
\end{figure}
The full rank sequence for this poset is $(1,3,5,6,6,5,3,2,1)$, which is unimodal.
\end{example}
\end{comment}
The rank sequences of fence posets were used by Morier-Genoud and Ovsienko in \cite{originalconj} in their recent work defining $q$-analogues of the rational numbers. Their $q$-rationals are defined by the ratio of the rank polynomials for two compositions given by the continued fraction expression of the rationals considered and enjoy several interesting properties including a type of convergence which allows one to extend their definition to obtain $q$-real numbers. They also proposed the following conjecture in their paper, the proof of which is the main result in this paper.
\begin{thm}[Conjecture $1.4$ in \cite{originalconj}]\label{thm:main0}
The rank polynomials of fence posets are unimodal.
\end{thm}
While there was no a priori reason for the authors to expect that this conjecture holds, there was ample numerical evidence. Results predating the conjecture itself were given by Salvi and Munarini \cite{crown}, who considered the case when all parts equal to $1$. Claussen \cite{claussen2020expansion} showed that the conjecture holds when the composition has at most $4$ parts. Further partial progress was made by McConville, Sagan and Smyth \cite{Saganpaper}, who proved the conjecture in the case where the first segment is larger than the sum of the others and proposed the following strengthening of this conjecture. The various interlacing properties referred to in the next theorem are defined in the next section.
\begin{thm}[Conjecture 1.4 in \cite{Saganpaper}]\label{thm:main} Suppose $\overline{\alpha}pha=(\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_s)$.
\begin{enumerate}
\item[(a)] If $s=1$ then $r(\overline{\alpha}pha) = (1,1,\ldots,1)$ is symmetric.
\item[(b)] If $s$ is even, then $r(\overline{\alpha}pha)$ is bottom interlacing.
\item[(c)] If $s\ge 3$ is odd we have:
\begin{enumerate}
\item[(i)] If $\overline{\alpha}pha_1>\overline{\alpha}pha_s$ then $r(\overline{\alpha}pha)$ is bottom interlacing.
\item[(ii)] If $\overline{\alpha}pha_1<\overline{\alpha}pha_s$ then $r(\overline{\alpha}pha)$ is top interlacing.
\item[(iii)] If $\overline{\alpha}pha_1=\overline{\alpha}pha_s$ then $r(\overline{\alpha}pha)$ is symmetric, bottom interlacing, or top interlacing depending on whether
$r(\overline{\alpha}pha_2,\overline{\alpha}pha_3,\ldots,\overline{\alpha}pha_{s-1})$ is symmetric, top interlacing, or bottom interlacing, respectively.
\end{enumerate}
\end{enumerate}
\end{thm}
One of the challenges in proving the above theorem comes from the feature that there are fence posets whose rank sequences can have long flat parts.
\begin{example}
The rank sequence of the composition $(a, 1, 1, 1)$ where $a > 2$ is
\[r(\overline{\alpha}pha) = (1, 3, 4, \overbrace{5}^{a-2}, 4, 3, 2, 1).\]
\end{example}
We will describe the main ideas in our proof later but it is noteworthy that our proof is purely combinatorial and essentially constructive, in that we can effectively describe injections that realize the desired unimodality. Unimodality of combinatorial sequences is often deduced by first proving stronger properties of the sequence such as log concavity, ultra log concavity or even real rootedness, but for this problem, none of these stronger properties need hold. Indeed to see that even log concavity need not hold, we see that for the fence poset $F(\overline{\alpha}pha) = F(2, 1, 1, 3)$ described in example \ref{ex:2113}, we have
\[9 = r(\overline{\alpha}pha)[6]^2 < r(\overline{\alpha}pha)[5]\, r(\overline{\alpha}pha)[7] = 5\cdot 2 = 10,\] where, we use the notation $r(\overline{\alpha}pha)[k]$ to refer to the number of $k$ ideals of the fence poset $F(\overline{\alpha}pha)$.
They key idea in our proof is to navigate between the properties of fence posets and those of the closely related class of \emph{circular fence posets}. For a composition $\overline{\alpha}pha=(\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s})$ of $n$ we define the \emph{circular fence poset} of $\overline{\alpha}pha$, denoted $\overline{F}(\overline{\alpha}pha)$ as the fence poset of $\overline{\alpha}pha$ where $x_{n+1}$ and $x_1$ are taken to be equal, so we get a circular poset with $n$ nodes.
\begin{example}
The circular fence poset $\bar{F}(2, 1, 1,3)$ is obtained from the regular fence poset $F(2, 1, 1, 3)$ (see figure \ref{fig:2113}) by identifying the vertices $x_1$ and $x_8$, yielding a poset on $7$ elements. Referring once again to figure \ref{fig:2113}, given that we have identified $x_1$ and $x_8$, two of the five ideals of size two are identical in the circular version and the ideal $(x_1, x_8)$ does not appear. Thus, the number of rank $2$ ideals in $\bar{F}(2, 1, 1,3)$ is $3$. The full rank sequence for $\bar{F}(2, 1, 1,3)$ is $(1, 2, 3, 4, 4, 3, 2, 1)$.
\end{example}
We will use $\bar{J}(\overline{\alpha}pha)$ to refer to the lattice of lower ideals of $\bar{F}(\overline{\alpha}pha)$, $\bar{R}(\overline{\alpha}pha; q)$ to refer to the rank polynomal of $\bar{J}(\overline{\alpha}pha)$ and $\bar{r}(\overline{\alpha}pha)$ to refer to the rank sequence. Rank polynomials for circular fence posets behave slightly differently from those for regular fence posets; there are examples where they fail to be unimodal, see section $4$ for a discussion and a characterization. However, they do satisfy a highly convenient property.
\begin{thm}
Rank polynomials of circular fence posets are \emph{symmetric}.
\end{thm}
Given a fence poset, there are several naturally related circular fence posets. Our proof consists of relating the rank polynomials of these various posets and inductively proving a number of ancillary results. One of the byproducts of our proof is the following result that might be of independent interest.
\begin{thm}
Let $\overline{\alpha}pha = (\overline{\alpha}pha_1, \ldots, \overline{\alpha}pha_{2s})$ be a composition with an even number of parts and consider any cyclic shift of $\overline{\alpha}pha$, $\beta = (\overline{\alpha}pha_k, \overline{\alpha}pha_{k+1}, \ldots, \overline{\alpha}pha_{2s}, \overline{\alpha}pha_1, \overline{\alpha}pha_2, \ldots, \overline{\alpha}pha_{k-1})$. Then
\[\bar{R}(\overline{\alpha}pha;q) = \bar{R}(\beta;q).\]
In other words, the rank polynomial of a circular fence poset is well defined over \emph{circular} compositions.
\end{thm}
As mentioned above, when it comes to circular fence posets, unimodality need not hold.
\begin{example}
Let $\overline{\alpha}pha = (1, a, 1, a)$ be a composition. A direct calculation shows that the rank sequence is
\[r(\overline{\alpha}pha) = (1, 2, \ldots, a, a+1, a, a+1, a, a-1, \ldots, 1).\]
This sequence has a dip in the middle term and is not unimodal.
\end{example}
\section{Notation and Terminology}
Let $P$ be a finite poset. A subset $I$ of $P$ is said to be a lower order ideal (resp. upper order ideal) if when $x \in I$, any $y \preceq x$ (resp. any $y \succeq x$) lies in $I$ as well.
We will use the word ``ideal'' to denote a lower order ideal, unless stated otherwise, and use the notation $I \trianglelefteq P$. Ideals (or upper order ideals) of a poset $P$ ordered by inclusion give the structure of a distributive lattice $J(P)$, ranked by the number of elements. See \cite{Stanley} Chapter 3.4 for a detailed discussion. For the purposes of this work, we will use the work "rank" exclusively to refer to the rank structure of the order ideal lattice. Note that taking the setwise complement of an ideal gives an upper order ideal of complementary rank.
We will be interested in the case where $P$ is a fence, or a circular fence, and consider the corresponding rank sequence and rank polynomial. The fences are defined to start with an up step, but as flipping a fence vertically only reverses the rank sequence, their structure can be inferred easily. Fences that start with a down step will come up at a few instances in our proofs, but instead of developing a separate notation for upside down fences, we will allow the first part of the composition to be zero in those instances.
A sequence is called \emph{unimodal} if there exists an index $m$ such that $$
a_0\le a_1 \le \cdots \le a_m \ge a_{m+1}\ge \ldots\ge a_{n}$$. It was conjectured in \cite{originalconj} that the rank sequence of $J(\overline{\alpha}pha)$ is unimodal. A more specific conjecture about the behaviour of the coefficients was given in \cite{Saganpaper}.
A sequence is called {\em top interlacing} if
$$
a_0\le a_n \le a_1\le a_{n-1} \le \ldots\le a_{\ce{n/2}}$$
where $\ce{\cdot}$ is the ceiling function. Similarly, the sequence is {\em bottom interlacing} if
$$
a_n\le a_0 \le a_{n-1} \le a_1 \le \ldots \le a_{\fl{n/2}}
$$
with $\fl{\cdot}$ being the floor function. Note that top interlacing as well as bottom interlacing sequences are unimodal.
To prove this Theorem \ref{thm:main}, we will define a circular version of the fence poset, where the first and last node are related.
\section{Circular Fences}
For a composition $\overline{\alpha}pha=(\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s})$ of $n$ we define the \emph{circular fence poset} of $\overline{\alpha}pha$, denoted $\overline{F}(\overline{\alpha}pha)$ to be the fence poset of $\overline{\alpha}pha$ with the additional relation $x_{n+1}=x_1$, so that we end up with a circular poset with $n$ nodes. We will denote the corresponding order ideal lattice, rank polynomial and rank sequence by $\overline{J}(\overline{\alpha}pha), \overline{R}(\overline{\alpha}pha;q)$ and $\overline{r}(\overline{\alpha}pha)$ respectively. We will call the nodes that correspond to $\overline{\alpha}pha_i$ the $i$th \emph{segment} of $\overline{F}(\overline{\alpha}pha)$
Circular fences have substantial intrinsic symmetry. Shifting the parts cyclically by two steps gives the same object and reversing the order of the parts preserves the rank sequence. In the special case when all the segments are of size $1$, the object we obtain is called a \emph{crown}. Crowns were previously studied in \cite{crown} where it was shown that the corresponding rank polynomials are symmetric, and that they are unimodal when the number of segments is different than $4$. Examining the one step shift allows us to directly say that the symmetry holds when one of the segments is larger as well. This will serve as the basis to prove that in fact, for any circular fence poset we get a rank symmetric lattice.
\begin{lemma} Shifting the parts of $\overline{\alpha}pha$ cyclically by one step reverses the rank sequence $\overline{r}(\overline{\alpha}pha)$. In particular \label{lemma:basis}$\overline{R}((k,1,1,1,\ldots,1);q)$ where the number of segments is even is symmetric for any $k\in \mathbb{N}$.
\end{lemma}
\begin{proof} This follows as a cyclic shift of one step on a circular fence is equivalent to reversing the order relation or flipping the poset upside down. Making a cyclic shift of one step followed by reversing the parts of $(k,1,1,1,\ldots,1)$ gives $(1,1,1,1,\ldots,1,k,1)$ which has the exact same structure but a reversed rank sequence.
\end{proof}
In general, rank polynomials for circular fences are no easier to calculate than their non circular counterparts and we only have formulas for a limited number of cases. The case when $\overline{\alpha}pha=(1,a,1,a,\ldots,1,a)$ was considered in \cite{crown2}. They were able to formulate the rank polynomial in terms of Chebyshev polynomials of the first kind, defined recursively by $T_0(q)=1$, $T_1(q)=q$ and $T_{n+2}(q) = 2q \,T_{n+1}(q) - T_n(q)$.
\begin{prop}[\cite{crown2}] We have
$$\displaystyle \overline{R}((1,a,1,a,\ldots,1,a)=2q^{({(a-1)s})/{2}}\,{T}_{a-1}\left(\frac{1+q+q^2+\cdots+q^s}{2q^{{s}/{2}}}\right)$$
where $2s$ is the number of segments of $(1,a,1,a,\ldots,1,a)$.
\end{prop}
Note that when $s=2$, we get the polynomial $1+2q+3q^2+\cdots+(a+1)q^a+aq^{a+1}+(a+1)q^{a+2}+(a)q^{a+3}+\cdots+2q^{2a+1}+q^{2a+2}$, which is not unimodal.
Some other small cases that can be easily calculated by hand are listed in Table \ref{tab:smallcases} below.
\begin{table}[ht]
\centering
\begin{tabular}{||c c c||}
\hline
$\overline{\alpha}pha$ & Ideal Count & Rank Polynomial \\ [0.5ex]
\hline\hline
$(a,b)$ & $ab+2$ & $1+q[a]_q[b]_q+q^{a+b}$ \\ [0.5ex]
\hline
$(a,1,b,1)$ & $ab+2a+2b+2$ & $[a+2]_q[b+2]_q-q^{a+1}-q^{b+1}$ \\[0.5ex]
\hline
$(a,b,c,d)$ & $abcd+ab+cd+ad+bc+2$ & \begin{tabular}{c}
$1+q[a]_q[d]_q+q[b]_q[c]_q+q^{a+b+1}[c]_q[d]_q$ \\
$ +q^{c+d+1}[a]_q[b]_q+q^{a+b+c+d}$
\end{tabular} \\
\hline
$(a,a,a,a)$ & $a^4+4a^2+2$ & $1+([a]_q)^4+(2q^{2a+1}+2q)([a]_q)^2+q^{4a}$ \\[0.5ex]
\hline
\end{tabular}
\caption{Ideal count and rank polynomial for small cases}
\label{tab:smallcases}
\end{table}
The cases of $(a,b)$ and $(1,a,1,b)$ are indeed quite straightforward. The lattice formed by the ideals of $\overline{F}(a,b)$ is formed by the direct product of two chains of lengths $a$ and $b$, with an added minimum element (for the empty ideal) and maximum element (for the full ideal): $\hat{0} \oplus C_{a}\times C_{b} \oplus \hat{1}$. Here, the position on $C_{a}$ corresponds to the number of unshared elements in the segment of size $a$, whereas the position on $C_{b}$ describes the number of unshared elements in the segment of size $b$. The natural symmetric chain decomposition on $C_{a}\times C_{b}$ can easily be extended to accommodate the two added nodes, as seen in Figure \ref{fig:48latticechains} for the example of $(5,8)$. We get $ab+2$ ideals with the corresponding rank polynomial $\overline{R}((a,b);q)=1+q[a]_q[b]_q+q^{a+b}$.
\begin{figure}
\caption{The lattice $J((5,8))$ (left) has a natural symmetric chain decomposition (right)}
\label{fig:48latticechains}
\end{figure}
When we have $(1,a,1,b)$, any ideal of size $k$ is a partitioning of $k$ into two parts $p_1\leq a$ and $p_2\leq b $ such that $p_1=a \Rightarrow p_2 \neq 0$ and $p_2=b \Rightarrow p_1\neq 0$. The lattice we obtained can be visualised as $C_{a+1} \times C_{b+1}$ with the two opposite corners deleted. When $a\neq b$ this also has a natural symmetric chain decomposition. When $a=b$ however, we have no such decomposition as the resulting rank polynomial is not unimodal, see Figure \ref{fig:1417}. We have $(a+1)(b+1)-2$ ideals, with $\overline{R}((1,a,1,b);q)=[a+2]_q[b+2]_q-q^{a+1}-q^{b+1}$.
\begin{figure}
\caption{The lattice $J((1,3,1,6))$ (left) has a natural symmetric chain decomposition (middle) whereas $J((1,4,1,4))$ (right) can not be decomposed into symmetric chains.}
\label{fig:1417}
\end{figure}
\renewcommand{1.5}{1.5}
\section{An Example} \label{sec:example}
In this chapter, we will consider ways of closing up a fence poset to get a rounded fence through the example of $\overline{\alpha}pha=(2,1,1,3)$. The ideas illustrated here will be the backbone of the proofs that will be given in the upcoming sections.
\begin{method}Letting $x_1=x_8$. \end{method}
\begin{figure}
\caption{The ideals of $\overline{F}
\end{figure}
This natural choice of setting $x_0=x_8$ gives us the rounded fence poset $\overline{F}(\overline{\alpha}pha)$, which has the disadvantage of having only $7$ nodes, so that we do not have all the structure of our original poset included in this circular version. In particular, we lose the ideals that contain only one of $x_1$ and $x_8$.
For the ideals of $\overline{\alpha}pha$ that contain $x_1$ but not $x_8$, any node above $x_8$ is also not included, and there is no effect on nodes above $x_1$, so we get a bijection with ideals of $F(1.1)$ as depicted in Figure \ref{fig:2113left} below.
\begin{figure}
\caption{The ideals of ${F}
\label{fig:2113left}
\end{figure}
Similarly, ideals that contain $x_8$ but not $x_1$ are in bijection with ideals of $1,2$, see Figure \ref{fig:2113right}.
\begin{figure}
\caption{The ideals of ${F}
\label{fig:2113right}
\end{figure}
The connection between the rank polynomials, consequentally, is a bit tricky. The ideals of $F(2,1,1,3)$ that do not contain $x_1$ or $x_8$ do not contain any node above them, so we have only two such ideals, the empty one and the one that consists of just $x_4$. Subtracting all these gives us the contribution of the ideals that contain both $x_0$ and $x_8$, which can also be calculated via adding $2$ nodes to each ideal of $F(1,1,1,2)$.
\begin{eqnarray*}
q^2 R(1,1,1,2)&=& R(2,1,1,3)-q R(1,1) - q R(1,2) - (1+q).\\
&=& (1+3q+5q^2+ 6q^3 + 6q^4 + 5q^5 + 3q^6 + 2q^7 + q^8)-q(1+2q+q^2+q^3)\\&&-q(1+2q+2q^2+q^3+q^4)-(1+q)\\
&=& q^2+3q^3+4q^4+4q^5+3q^6+2q^7+q^8.
\end{eqnarray*}
These ideals are shifted by $q^{-1}$ to give the ideals of $\overline{F}(2,1,1,3)$ that contain $x_1=x_8$. The two that do not contribute $1+q$, so that we get the following rank symmetric polynomial:
\begin{eqnarray*}
\overline{R}(2,1,1,3) &=& (q^{-1})(q^2+3q^3+4q^4+4q^5+3q^6+2q^7+q^8)+1+q\\
&=& 1+2q+3q^2+4q^3+4q^4+3q^5+2q^6+q^7.
\end{eqnarray*}
Adding the relation that $x_1$ is above (or below) $x_8$ allows us to get a circular fence with the same number of nodes.
\begin{method} \label{method:connect} Connecting $x_1$ and $x_8$.\end{method}
\begin{figure}
\caption{The circular fence $R(3,1,1,3)$ given by assuming $x_1$ is above $x_8$}
\end{figure}
Note that the ideals of $\overline{F}(3,1,1,3)$ give all ideals $I$ of $F(2,1,1,3)$ satisfying $x_1 \in I \Rightarrow x_8 \in I$. The ones that are left over are exactly the ones that contain $x_1$ but not $x_8$ that correspond to the ideals of $F(1,1)$ as we discussed above (See Figure \ref{fig:2113left}).
The corresponding rank polynomials are also related:
\begin{eqnarray*}
R(2,1,1,3)&=& \overline{R}(3,1,1,3)+ qR(1,1).\\
&=& (1+2q+3q^2+ 5q^3 + 5q^4 + 5q^5 + 3q^6 + 2q^7 + q^8)+q(1+2q+q^2+q^3)\\
&=& 1+3q+5q^2+ 6q^3 + 6q^4 + 5q^5 + 3q^6 + 2q^7 + q^8.
\end{eqnarray*}
Alternatively we can add a new node $x_0$ to complete the cycle.
\begin{method}Adding a new $x_0$ above $x_1$ and $x_8$. \end{method}
\begin{figure}
\caption{The ideals of $\overline{F}
\end{figure}
Adding $x_0$ gives us the rounded fence poset $P_T:=\overline{F}(2,1,1,3,1,1)$ with 9 nodes. Ideals of our original poset are exactly the ideals of $P_T$ that do not contain $x_0$. Any ideal that contains $x_0$ also contains $x_1$ and $x_3$ but puts no other restrictions on the inclusion of the other nodes, so these ideals are in bijection with those of $F(1,1,1,2)$. On the rank polynomials side, we get the identity:
\begin{eqnarray*}
&&R(2,1,1,3)= \overline{R}(2,1,1,3,1,1)-q^3 R(1,1,1,2)\\
&&= (1+3q+5q^2+ 7q^3 + 9q^4 + 9q^5 + 7q^6 + 5q^7 + 3q^8+q^9 )-q^3(1+3q+4q^2+4q^3+3q^4+2q^5+q^6)\\
&&= 1+3q+5q^2+ 6q^3 + 6q^4 + 5q^5 + 3q^6 + 2q^7 + q^8.
\end{eqnarray*}
\begin{method}Adding a new $x_0$ below $x_1$ and $x_8$.\end{method}
\begin{figure}
\caption{The ideals of $\overline{F}
\end{figure}
Adding $x_0$ below gives us the rounded fence poset $P_B:=\overline{F}(3,1,1,4)$ with 9 nodes. Ideals that contain $x_0$ are in bijection with our original poset. Any ideal that does not contain $x_0$ can not contain anything above either, and there are only two such ideals, the empty ideal and the rank $1$ ideal that only contains $x_4$:
\begin{eqnarray*}
R(2,1,1,3)&=& (q^{-1})( \overline{R}(3,1,1,4)- R(1)).\\
&=& (q^{-1}) ((1+2q+3q^2+ 5q^3 + 6q^4 + 6q^5 + 5q^6 + 3q^7 + 2q^8+q^9 )-(1+q))\\
&=& 1+3q+5q^2+ 6q^3 + 6q^4 + 5q^5 + 3q^6 + 2q^7 + q^8 .
\end{eqnarray*}
\section{Rank Symmetry in Circular Fences}
\begin{comment}
In this section, we will prove that the rank polynomial for circular fences is always symmetrical:
\begin{thm} \label{thm:sym} For any composition $\overline{\alpha}pha$ of $n$ with an even number of segments, the rank polynomial of $\overline{\alpha}pha$ is symmetric with center of symmetry at $n/2$.
\end{thm}
As we already showed symmetry holds in the case $(k,1,1,\ldots,1)$ for any $k$, for any given number of beads we have a case where we already know the rank polynomial is symmetric, so it suffices to show that moving beads around does not break the symmetry. The constructions given in the previous section will be our main tool, with which we will go back and forth between the circular and the non-circular cases. We will also make use of the following auxiliary statements about regular (non-circular) fences:
\begin{itemize} \setlength\itemsep{3mm}
\item[ ]
\item[ \textbf{A(n):}] Given a composition $\beta=(\beta_1,\beta_2,\ldots,\beta_{2s})$ of $n-1$, let $\mathfrak{I}_L$ be the set of ideals of $F(\overline{\alpha}pha)$ that include the leftmost node $x_1$, but not the rightmost node $x_n$. Similarly let $\mathfrak{I}_R$ be the set of ideals of $F(\overline{\alpha}pha)$ that include the rightmost node but not the leftmost. The polynomial $$ \displaystyle \sum_{I\in \mathfrak{I}_L }q^{|I|}-\sum_{J\in \mathfrak{I}_R }q^{|J|}$$ is symmetric with center of symmetry $n/2$.
\item[ \textbf{B(n):}] Given a composition $\beta=(\beta_1,\beta_2,\ldots,\beta_{2s})$ of $n-1$, where $\beta_1$ and $\beta_{2s}$ are allowed to be $0$ with the convention that when $\beta_1$ is $0$, we get a fence that starts with a down step instead of an up step. Then \[R((\beta_1+1,\beta_2,\ldots,\beta_{2s});q)-R((\beta_1,\beta_2,\ldots,\beta_{2s}+1);q)\] is symmetric around $n/2$.
\item [\textbf{C(n):}] Given a composition $\beta=(\beta_1,\beta_2,\ldots,\beta_{2s})$ of $n$. Then
\[\bar{R}((\beta_1+1,\beta_2,\ldots,\beta_{2s});q)-\bar{R}((\beta_1,\beta_2,\ldots,\beta_{2s}+1);q)\] is symmetric around $(n+1)/2$.
\item[ \textbf{D(n):}] Given a composition $\beta = (\beta_1, \ldots, \beta_{2s})$ of $n$, the rank polynomial of the associated circular fence poset $\bar{R}(\beta)$ is symmetric.
\end{itemize}
The outline of the proof is as follows. We will show in sequence,
\begin{itemize}
\item $\mathbf{D(n)}$ implies $\mathbf{A(n)}$.
\item $\mathbf{A(n)}$ implies $\mathbf{B(n)}$.
\item $\mathbf{B(n)}$ implies $\mathbf{C(n+1)}$.
\item $\mathbf{C(n+1)}$ implies $\mathbf{D(n+2)}$.
\end{itemize}
\begin{proof}[Proof of Theorem \ref{thm:sym}] If we have just $2$ beads, we have the symmetric rank polynomial $\overline{R}((1,1);q)=1+q+q^2$. Assume that symmetry holds when we have $n\leq M$ nodes. We Let $\overline{\alpha}pha=(\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s})$ be a composition with $M$ nodes. We will show that if the rank polynomial of $\overline{\alpha}pha_L=(\overline{\alpha}pha_1+1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s})$ is symmetric with center of symmetry given by $(M+1)/2$, then so is the one for $\overline{\alpha}pha_R=(\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s}+1)$. Equivalently, moving nodes from one side of a minimal point to the other preserves symmetry. This would also imply we can move nodes over maximal points, as turning the fence upside down preserves symmetric rank sequences.
By Lemma \ref{lemma:basis} the result follows.
\begin{claim}\label{claim1} $A(n)$ holds for any $n \leq M$.
\end{claim}
\begin{claimproof} We will consider two natural circular fences related to $\beta$: $\overline{R}((\beta_1+1,\beta_2,\ldots,\beta_{2s}))$ given by adding the relation $x_{n}\preceq x_1$ to the fence of $\beta$ and $\overline{R}((\beta_1,\beta_2,\ldots,\beta_{2s}+1))$ given by adding the relation $x_{n}\succeq x_1$ to the fence of $\beta$ (See Method~\ref{method:connect} from Section~\ref{sec:example}). Let us denote their rank polynomials by $\overline{\mathfrak{R}}_L(q)$ and $\overline{\mathfrak{R}}_R(q)$ respectively.
Note that:
\begin{eqnarray*}
R(\beta;q)&=&\overline{\mathfrak{R}}_L(q)+\sum_{\substack{I\trianglelefteq F(\beta)\\x_{n} \in I,\, x_1 \notin I} }q^{|I|}=\overline{\mathfrak{R}}_R(q)+\sum_{\substack{J\trianglelefteq F(\beta)\\x_{1} \in J,\, x_{n} \notin J} }q^{|J|}
\end{eqnarray*}
\begin{eqnarray*}
\sum_{I\in \mathfrak{I}_L }q^{|I|}-\sum_{J\in \mathfrak{I}_R }q^{|J|}&=&\overline{\mathfrak{R}}_R(q)-\overline{\mathfrak{R}}_L(q).
\end{eqnarray*}
Both rank polynomials belong to circular fences with $n$ nodes, which are symmetric around $n/2$ by our hypothesis.
\end{claimproof}
\begin{claim} \label{claim2} $B(n)$ holds for any $n \leq M$.
\end{claim}
\begin{claimproof} Let $\mathfrak{F}_L=F((\beta_1+1,\beta_2,\ldots,\beta_{2s}))$ be the fence with $n+1$ nodes given by incrementing the leftmost segment of $\beta$ by $1$. Similarly, let $\mathfrak{F}_R=F((\beta_1,\beta_2,\ldots,\beta_{2s}+1))$.
We want to show that the following polynomial is symmetric around $n/2$:
$$ \displaystyle \sum_{I \trianglelefteq \mathfrak{F}_L }q^{|I|}-\sum_{J \trianglelefteq \mathfrak{F}_R }q^{|J|}.$$
We will make use of the circular fence $\overline{\mathfrak{RF}}$ for $(\beta_1+1,\beta_2,\ldots,\beta_{2s}+1)$. Note that we can obtain $\overline{\mathfrak{RF}}$ from $\mathfrak{F}_L$ by adding the relation $x_1 \preceq x_{n+1}$ so that:
$$\displaystyle \sum_{I\trianglelefteq \overline{\mathfrak{RF}} } q^{|I|} =\sum_{I\trianglelefteq \mathfrak{F}_L }q^{|I|}-\sum_{\substack{I\trianglelefteq \mathfrak{F}_L\\x_{n+1} \in I,\, x_1 \notin I} }q^{|I|}.$$
Similarly we have:
$$\displaystyle \sum_{I\trianglelefteq \overline{\mathfrak{RF}} } q^{|I|} =\sum_{J\trianglelefteq \mathfrak{F}_R }q^{|J|}-\sum_{\substack{J\trianglelefteq \mathfrak{F}_R\\x_1 \in J,\, x_{n+1} \notin J} }q^{|J|}.$$
Now we will try to describe the difference in terms of the ideals of $F(\beta)$:
\begin{eqnarray*}
\displaystyle \sum_{I \trianglelefteq \mathfrak{F}_L }q^{|I|}-\sum_{J \trianglelefteq \mathfrak{F}_R }q^{|J|} &=&\sum_{\substack{J\trianglelefteq \mathfrak{F}_R\\x_1 \in J,\, x_{n+1} \notin J} }q^{|J|}-\sum_{\substack{I\trianglelefteq \mathfrak{F}_L\\x_{n+1} \in I,\, x_1 \notin I} }q^{|I|}\\
&=& \sum_{\substack{J\trianglelefteq F(\beta)\\x_1 \in J,\, x_{n} \notin J} }q^{|J|}-\sum_{\substack{I\trianglelefteq F(\beta)\\x_{n} \in I,\, x_1 \notin I} }q^{|I|}.
\end{eqnarray*}
By Claim 1, this difference is symmetric with center of symmetry at $n/2$. \end{claimproof}
Let let $\mathfrak{I}_L$ be the set of ideals of $\overline{F}(\overline{\alpha}pha_L)$ and $\mathfrak{I}_R$ be the set of ideals of $\overline{F}(\overline{\alpha}pha_R)$. The ideals that do not contain $x_1=x_{m+2}$ are in bijection as they contain no nodes from the segments $\overline{\alpha}pha_1$ or $\overline{\alpha}pha_{2s}$. That means we can limit our attention to the ideals that include $x_1=x_{m+2}$, which can be seen as ideals of regular fences.
The ideals of $\overline{F}(\overline{\alpha}pha_L)$ that contain $x_1$ are exactly those of $F((\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s}-1))$ shifted by q. Similarly, the ideals of $\overline{F}(\overline{\alpha}pha_R)$ that do not contain $x_1$ correspond to those of $F((\overline{\alpha}pha_1-1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s}))$ shifted by q (Here, again we abuse notation a bit and allow the first or last segments to possibly be $0$.).
These are both partitions of $M-1$, so by Claim \ref{claim2} the difference between the generating polynomials of their ideals is symmetric around $(M-1)/2$.
Shifting by $q$ gives a rank sequence symmetric around $(M+1)/2$ as desired.
\end{proof}
\begin{corollary} The polynomial $\overline{R}(\overline{\alpha}pha;q)$ is invariant under cyclic shift of segments of $\overline{\alpha}pha$, so it is well defined over cyclic compositions.
\end{corollary}
\end{comment}
In this section, we will prove that the rank polynomial for circular fences is always symmetric.
\begin{thm} \label{thm:sym} For any composition $\overline{\alpha}pha$ of $n$ with an even number of segments, the rank polynomial of $\overline{\alpha}pha$ is symmetric with center of symmetry at $n/2$.
\end{thm}
As we already showed symmetry holds in the case $(k,1,1,\ldots,1)$ for any $k$, for any given number of beads we have a case where we already know the rank polynomial is symmetric, so it suffices to show that moving beads around does not break the symmetry. The constructions given in the previous section will be our main tool, with which we will go back and forth between the circular and the non-circular cases. Consider the following statements:
\begin{itemize} \setlength\itemsep{3mm}
\item[ ]
\item[ \textbf{A(n):}] Given a composition $\beta=(\beta_1,\beta_2,\ldots,\beta_{2s})$ of $n-1$, let $\mathfrak{I}_L$ be the set of ideals of $F(\overline{\alpha}pha)$ that include the leftmost node $x_1$, but not the rightmost node $x_n$. Similarly let $\mathfrak{I}_R$ be the set of ideals of $F(\overline{\alpha}pha)$ that include the rightmost node but not the leftmost. The polynomial $$ \displaystyle \sum_{I\in \mathfrak{I}_L }q^{|I|}-\sum_{J\in \mathfrak{I}_R }q^{|J|}$$ is symmetric with center of symmetry $n/2$.
\item[ \textbf{B(n):}] Given a composition $\beta=(\beta_1,\beta_2,\ldots,\beta_{2s})$ of $n-1$, where $\beta_1$ and $\beta_{2s}$ are allowed to be $0$ with the convention that when $\beta_1$ is $0$, we get a fence that starts with a down step instead of an up step. Then \[R((\beta_1+1,\beta_2,\ldots,\beta_{2s});q)-R((\beta_1,\beta_2,\ldots,\beta_{2s}+1);q)\] is symmetric around $n/2$.
\item [\textbf{C(n):}] Given a composition $\beta=(\beta_1,\beta_2,\ldots,\beta_{2s})$ of $n$, the following difference is symmetric around $(n+1)/2$
\[\bar{R}((\beta_1+1,\beta_2,\ldots,\beta_{2s});q)-\bar{R}((\beta_1,\beta_2,\ldots,\beta_{2s}+1);q).\]
\item[ \textbf{D(n):}] Given a composition $\beta = (\beta_1, \ldots, \beta_{2s})$ of $n$, the rank polynomial of the associated circular fence poset $\bar{R}(\beta)$ is symmetric.
\end{itemize}
We will prove the rank symmetry of circular fences by showing that $\mathbf{D(n)} \Rightarrow \mathbf{A(n)} \Rightarrow \mathbf{B(n)} \Rightarrow \mathbf{C(n+1)}$ which in turn implies $\mathbf{D(n+2)}$. Note that as a byproduct we get that the statements $\mathbf{A(n)}$ and $\mathbf{B(n)}$ about the structure of non-circular fences.
\begin{proof}[Proof of Theorem \ref{thm:sym}] We will use induction on the size of the composition. If $\overline{\alpha}pha$ is a composition of $\leq 3$ with an even number of parts we have only three choices, each of which giving us symmetric rank polynomials:
\[\overline{R}((1,1);q)=1+q+q^2, \qquad \overline{R}((2,1);q)=\overline{R}((1,2);q)=1+q+q^2 + q^3.\]
Now, let us assume that $\mathbf{D(n)}$ holds, that is, for any composition $\overline{\alpha}pha = (\overline{\alpha}pha_1, \ldots, \overline{\alpha}pha_{2s})$ of $n$, the rank polynomial $\bar{R}(\overline{\alpha}pha)$ is symmetric.
\begin{claim}\label{claim1} $\mathbf{A(n)}$ holds.
\end{claim}
\begin{claimproof} We will consider two natural circular fences related to $\beta$: $\overline{R}((\beta_1+1,\beta_2,\ldots,\beta_{2s}))$ given by adding the relation $x_{n}\preceq x_1$ to the fence of $\beta$ and $\overline{R}((\beta_1,\beta_2,\ldots,\beta_{2s}+1))$ given by adding the relation $x_{n}\succeq x_1$ to the fence of $\beta$. Let us denote their rank polynomials by $\overline{\mathfrak{R}}_L(q)$ and $\overline{\mathfrak{R}}_R(q)$ respectively.
Note that
\begin{eqnarray*}
R(\beta;q)&=&\overline{\mathfrak{R}}_L(q)+\sum_{\substack{I\trianglelefteq F(\beta)\\x_{n} \in I,\, x_1 \notin I} }q^{|I|}=\overline{\mathfrak{R}}_R(q)+\sum_{\substack{J\trianglelefteq F(\beta)\\x_{1} \in J,\, x_{n} \notin J} }q^{|J|}
\end{eqnarray*}
Consequently
\begin{eqnarray*}
\sum_{I\in \mathfrak{I}_L }q^{|I|}-\sum_{J\in \mathfrak{I}_R }q^{|J|}&=&\overline{\mathfrak{R}}_R(q)-\overline{\mathfrak{R}}_L(q).
\end{eqnarray*}
Both rank polynomials belong to circular fences with $n$ nodes, which are symmetric around $n/2$ by our hypothesis.
\end{claimproof}
\begin{claim} \label{claim2} $\mathbf{B(n)}$ holds.
\end{claim}
\begin{claimproof} Let $\mathfrak{F}_L=F((\beta_1+1,\beta_2,\ldots,\beta_{2s}))$ be the fence with $n+1$ nodes given by adding a new node to the (possibly empty) leftmost segment of $\beta$ by $1$. Similarly, let $\mathfrak{F}_R=F((\beta_1,\beta_2,\ldots,\beta_{2s}+1))$.
We want to show that the following polynomial is symmetric around $n/2$:
$$ \displaystyle \sum_{I \trianglelefteq \mathfrak{F}_L }q^{|I|}-\sum_{J \trianglelefteq \mathfrak{F}_R }q^{|J|}.$$
We will make use of the circular fence $\overline{\mathfrak{RF}}$ for $(\beta_1+1,\beta_2,\ldots,\beta_{2s}+1)$. Note that we can obtain $\overline{\mathfrak{RF}}$ from $\mathfrak{F}_L$ by adding the relation $x_1 \preceq x_{n+1}$ (see Method~\ref{method:connect} from Section~\ref{sec:example} for an example) so that:
$$\displaystyle \sum_{I\trianglelefteq \overline{\mathfrak{RF}} } q^{|I|} =\sum_{I\trianglelefteq \mathfrak{F}_L }q^{|I|}-\sum_{\substack{I\trianglelefteq \mathfrak{F}_L\\x_{n+1} \in I,\, x_1 \notin I} }q^{|I|}.$$
Similarly we have:
$$\displaystyle \sum_{I\trianglelefteq \overline{\mathfrak{RF}} } q^{|I|} =\sum_{J\trianglelefteq \mathfrak{F}_R }q^{|J|}-\sum_{\substack{J\trianglelefteq \mathfrak{F}_R\\x_1 \in J,\, x_{n+1} \notin J} }q^{|J|}.$$
This yields
\begin{eqnarray*}
\displaystyle \sum_{I \trianglelefteq \mathfrak{F}_L }q^{|I|}-\sum_{J \trianglelefteq \mathfrak{F}_R }q^{|J|} &=&\sum_{\substack{J\trianglelefteq \mathfrak{F}_R\\x_1 \in J,\, x_{n+1} \notin J} }q^{|J|}-\sum_{\substack{I\trianglelefteq \mathfrak{F}_L\\x_{n+1} \in I,\, x_1 \notin I} }q^{|I|}
\end{eqnarray*}
We observe that
\[\sum_{\substack{I\trianglelefteq \mathfrak{F}_L\\x_{n+1} \in I,\, x_1 \notin I} }q^{|I|} = \sum_{\substack{I\trianglelefteq F(\beta)\\x_{n} \in I,\, x_1 \notin I} }q^{|I|}. \qquad \sum_{\substack{J\trianglelefteq \mathfrak{F}_R\\x_1 \in J,\, x_{n+1} \notin J} }q^{|J|} = \sum_{\substack{J\trianglelefteq F(\beta)\\x_1 \in J,\, x_{n} \notin J} }q^{|J|}.\]
This is simply because if $x_1 \not\in I \trianglelefteq \mathfrak{F}_L$, then no nodes from the first segment can be in the ideal and this sets up a bijection between the two sets of ideals in the left equation above. The second equation may be similarly justified. We conclude that
\begin{eqnarray*}
\sum_{\substack{J\trianglelefteq \mathfrak{F}_R\\x_1 \in J,\, x_{n+1} \notin J} }q^{|J|}-\sum_{\substack{I\trianglelefteq \mathfrak{F}_L\\x_{n+1} \in I,\, x_1 \notin I} }q^{|I|} = \sum_{\substack{J\trianglelefteq F(\beta)\\x_1 \in J,\, x_{n} \notin J} }q^{|J|}-\sum_{\substack{I\trianglelefteq F(\beta)\\x_{n} \in I,\, x_1 \notin I} }q^{|I|}.
\end{eqnarray*}
By Claim 1, this difference is symmetric with center of symmetry at $n/2$.
\end{claimproof}
\begin{claim} \label{claim3} $\mathbf{C(n+1)}$ holds.
\end{claim}
\begin{claimproof}
Let $\overline{\alpha}pha = (\overline{\alpha}pha_1, \ldots, \overline{\alpha}pha_{2s})$ be a composition of $n+1$ and let $\overline{\alpha}pha_L = (\overline{\alpha}pha_1+1, \ldots, \overline{\alpha}pha_{2s})$ and $\overline{\alpha}pha_R = (\overline{\alpha}pha_1, \ldots, \overline{\alpha}pha_{2s}+1)$. Let $\mathfrak{I}_L$ be the set of ideals of $\overline{F}(\overline{\alpha}pha_L)$ and $\mathfrak{I}_R$ be the set of ideals of $\overline{F}(\overline{\alpha}pha_R)$. The ideals that do not contain $x_1=x_{n+3}$ are in bijection as they contain no nodes from the first or last segments. That means we can limit our attention to the ideals that include $x_1=x_{n+3}$ and these can be seen as ideals of regular fences. Let $\tilde{\overline{\alpha}pha}_L = (\overline{\alpha}pha_1, \ldots, \overline{\alpha}pha_{2s}-1)$ and $\tilde{\overline{\alpha}pha}_R = (\overline{\alpha}pha_1-1, \ldots, \overline{\alpha}pha_{2s})$ We claim that
\[ \sum_{\substack{J\trianglelefteq \bar{\mathfrak{F}}(\overline{\alpha}pha_L)\\x_1 = x_{n+3} \in J} }q^{|J|} = q\,\sum_{J\trianglelefteq \mathfrak{F}(\tilde{\overline{\alpha}pha}_L) }q^{|J|}, \qquad \sum_{\substack{J\trianglelefteq \bar{\mathfrak{F}}(\overline{\alpha}pha_R)\\x_1 = x_{n+3} \in J} }q^{|J|} = q\,\sum_{J\trianglelefteq \mathfrak{F}(\tilde{\overline{\alpha}pha}_R) }q^{|J|}. \]
This is because the ideals of $\overline{F}(\overline{\alpha}pha_L)$ that contain $x_1$ correspond exactly to ideals of $F((\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s}-1))$, only shifted by $q$. The other equality is similarly justified. Here, we slightly abuse notation to permit the first or last segments to possibly be $0$, something that claim $\mathbf{B(n)}$ permits us to do. Consequently
\begin{eqnarray*}
\sum_{J\trianglelefteq \bar{\mathfrak{F}}(\overline{\alpha}pha_L)}q^{|J|} - \sum_{J\trianglelefteq \bar{\mathfrak{F}}(\overline{\alpha}pha_R) }q^{|J|} =& \sum_{\substack{J\trianglelefteq \bar{\mathfrak{F}}(\overline{\alpha}pha_L)\\x_1 = x_{n+2} \in J} }q^{|J|} - \sum_{\substack{J\trianglelefteq \bar{\mathfrak{F}}(\overline{\alpha}pha_R)\\x_1 = x_{n+2} \in J} }q^{|J|} \\
=& q\left(\sum_{J\trianglelefteq \mathfrak{F}(\tilde{\overline{\alpha}pha}_L) }q^{|J|}, \qquad - \sum_{J\trianglelefteq \mathfrak{F}(\tilde{\overline{\alpha}pha}_R) }q^{|J|}\right).
\end{eqnarray*}
In the final expressions, we have compositions of $n$, so by Claim \ref{claim2} the difference between the generating polynomials of their ideals is symmetric around $n/2$. Shifting by $q$ gives a rank sequence symmetric around $(n+2)/2$ as desired.
\end{claimproof}
\begin{claim} \label{claim4} $\mathbf{D(n+2)}$ holds.
\end{claim}
\begin{claimproof}
Let $\overline{\alpha}pha = (\overline{\alpha}pha_1, \ldots, \overline{\alpha}pha_{2s})$ be a composition of $n+2$. Claim \ref{claim3} says that moving an element across a valley (as long as the number of parts does not change) preserves symmetry. Taking the vertical reflection of the poset, $\overline{\alpha}pha_1$ yields another fence poset whose rank polynomial is the reflection of the original rank polyomial.
\begin{eqnarray}\label{flip}
\bar{R}(\overline{\alpha}pha) = \sum_0^{n+2} r_k q^k \Longrightarrow \bar{R}(\overline{\alpha}pha_1) = \sum_0^{n+2} r_{n-k} q^k.\end{eqnarray}
This is because for any $k$, there is a bijection between lower ideals of size $k$ of $\bar{F}(\overline{\alpha}pha)$ and the lower ideals of size $n+2-k$ for $\bar{F}(\overline{\alpha}pha_1)$, which is achieved by taking the set complement.
Claim \ref{claim3} then shows that moving an element of $\overline{\alpha}pha$ across a peak preserves symmetry as well. Applying these operations consecutively, we may transform $\overline{\alpha}pha$ to a composition of the form $(k, 1, \ldots, 1)$ with $2s$ parts. By lemma \ref{lemma:basis}, this has symmetric rank polynomial.
\end{claimproof}
As noted above, $\mathbf{D(2)}$ and $\mathbf{D(3)}$ are true by direct computations and the implications
\[\mathbf{D(n)} \implies \mathbf{A(n)} \implies \mathbf{B(n)} \implies \mathbf{C(n+1)} \implies \mathbf{D(n+2)},\]
yield our theorem for all values of $n$.
\end{proof}
\begin{corollary} The polynomial $\overline{R}(\overline{\alpha}pha;q)$ is invariant under cyclic shifts of segments of $\overline{\alpha}pha$, so it is well defined over cyclic compositions.
\label{cor:cyclicshift}
\end{corollary}
\begin{proof}
if $\overline{\alpha}pha = (\overline{\alpha}pha_1, \ldots, \overline{\alpha}pha_{2s})$ is a composition of $n$ and $\beta = (\overline{\alpha}pha_2, \ldots, \overline{\alpha}pha_{2s}, \overline{\alpha}pha_1)$, then we have that their rank polynomials are mirror images,
\[\bar{R}(\overline{\alpha}pha) = \sum_0^n r_k q^k \Longrightarrow \bar{R}(\beta) = \sum_0^n r_{n-k} q^k,\]
as noted above in (\ref{flip}). Theorem \ref{thm:sym} yields the result.
\end{proof}
\section{Proof of Main Theorem}
Given a rank sequence $(r_0,r_1,\ldots,r_{n+1})$, the properties of it being top interlacing, bottom interlacing or symmetric and unimodal are determined by the relationship between elements whose indices are equidistant from $(n+1)/2$, which we will call $\text{mid}(\overline{\alpha}pha)$. In all three cases, if $|j-\text{mid}(\overline{\alpha}pha)|>|i-\text{mid}(\overline{\alpha}pha)|$, then $r_j\leq r_i$,
To this end, we will partition the inequalities that correspond to interlacing into two parts; the part that holds for both bottom and top interlacing sequences and the one that separates bottom and top interlacing sequences.
\begin{eqnarray*}
\text{(ineqA)} & & r_0 \le r_{n},\, r_1 \le r_{n-1}\ldots \qquad \quad r_{n+1} \le r_{1},\, r_{n} \le r_{2}\ldots\\
\text{(ineqB)}& & r_0\ge r_{n+1},\, r_1\ge r_{n},\, \ldots\\
\text{(ineqT)}& & r_0\le r_{n+1},\,r_1\le r_{n},\, \ldots
\end{eqnarray*}
Bottom interlacing sequences are ones that satisfy (ineqA) and (ineqB), top interlacing sequences are ones that satisfy (ineqA) and (ineqT), and symmetric unimodal ones are the ones that satisfy all three sets of inequalities.
\begin{proof}[Proof of Theorem \ref{thm:main}] Assume that the theorem holds for all compositions of length at most $n-1$. Let $\overline{\alpha}pha$ be a composition of size $n$.
\begin{claim} The rank sequence $r(\overline{\alpha}pha)=(r_0,r_1,\ldots,r_{n+1})$ satisfies (ineqA).
\end{claim}
\begin{claimproof} Following Methods $3$ and $4$ from Section \ref{sec:example}, we will consider two circular fences obtained by adding a new node to the fence of $\overline{\alpha}pha$. We will Let $\overline{F}(\overline{\alpha}pha_T)$ be given by adding a node $x_{0}$ lying above both $x_1$ and $x_{n+1}$. Let $(t_0,t_1,\ldots,t_{n+2})$ be the corresponding rank sequence, which is symmetric by Theorem \ref{thm:sym}.
Note that the ideals of the fence poset of $\overline{\alpha}pha$ correspond exactly to the ideals of $\overline{F}(\overline{\alpha}pha_T)$ that do not contain $x_0$. The ideals that contain $x_0$ also contain $x_1, x_{n+1}$ and anything that is lying below them.
\begin{eqnarray*}\displaystyle \overline{R}(\overline{\alpha}pha_T;q)={R}(\overline{\alpha}pha;q) +\sum_{\substack{I\trianglelefteq \overline{F}(\overline{\alpha}pha_T)\\x_{0} \in I}}q^{|I|}\\
{R}(\overline{\alpha}pha;q)=\overline{R}(\overline{\alpha}pha_T;q)-q^k R(\beta;q).
\end{eqnarray*} where $\beta$ is obtained from $\overline{\alpha}pha$ by deleting $x_1$, $x_{n+1}$ and anything below them, and $k$ is the number of nodes deleted $+1$.
Note that by the induction hypothesis, $R(\beta;q)$ is bottom or top interlacing, with $n-k+1$ nodes. For each symmetric pair $t_i$ and $t_{n+2-i}$, as ${n+2-i}$ is closer to the shifted center $k+(n-k+1)/2$, the amount subtracted from $t_{n+2-i}$ is at least as large as the amount subtracted from $t_i$, implying $r_i \geq r_{n+2-i}$ for $1\leq i \leq \lceil{n}\rceil$.
Similarly we can add a node $x_0$ that lies below both $x_1$ and $x_{n+1}$. Let $\overline{F}(\overline{\alpha}pha_B)$ be the corresponding circular fence poset with rank sequence $(b_0,b_1,\ldots,b_{n+2})$. By the same reasoning as above we get:
\begin{eqnarray*}\displaystyle \overline{R}(\overline{\alpha}pha_B;q)=q{R}(\overline{\alpha}pha;q) +\sum_{\substack{I\trianglelefteq \overline{F}(\overline{\alpha}pha_T)\\x_{0} \notin I}}q^{|I|}\\
q{R}(\overline{\alpha}pha;q)=\overline{R}(\overline{\alpha}pha_T;q)- R(\beta;q).
\end{eqnarray*} where $\beta$ is obtained from $\overline{\alpha}pha$ by deleting $x_1$, $x_{n+1}$ and anything above them. Now the center is shifted left, so that the amount subtracted from $b_{n+2-i}$ is less than or equal to the amount subtracted from $b_i$. As we shifted by $q$, this means $r_{i-1}\leq r_{n+1-i}$ for $1\leq i \leq \lceil{n}\rceil$.
\end{claimproof}
To finish our proof, we will look at whether the rank sequence is bottom interlacing, top interlacing and symmetrical.
To this end, we will add the new relation $x_1 \succeq x_n$, as in Method $2$ from \ref{sec:example}
\begin{claim} If $\overline{\alpha}pha$ has an even number of segments, $R(\overline{\alpha}pha;q)$ is bottom interlacing.
\end{claim}
\begin{claimproof} Let us add the relation $x_1\succeq x_{n+1}$ to $\overline{\alpha}pha$. The resulting circular fence contains all ideals of ${F}(\overline{\alpha}pha)$ satisfying $x_1 \in I \Rightarrow x_{n+1} \in I$. The ones that are left over are exactly the ones that contain $x_1$ but not $x_{n+1}$. The inclusion of $x_1$ is equivalent to deleting the node $x_1$ and shifting by $q$ and not including $x_{n+1}$ is equivalent to deleting the node $x_{n+1}$ as well as anything above it. What we are left with is the rank polynomial of a smaller composition $\beta$ shifted by $q$. As we have an even number of parts, there is at least one node above $x_{n+1}$ (See Figure \ref{fig:2113left} for an example) which means $\beta$ has at most $n-2$ nodes. So $\text{mid}(\beta)$, even when shifted by $q$ lies strictly to the left of $\frac{n+1}{2}$.
Let $(c_0,c_1,\ldots,c_{n+1})$ be the rank sequence of the circular fence, symmetric by Theorem \ref{thm:sym}. In particular, for each $i\leq \frac{n+1}{2}$, $c_i=c_{n+1-i}$. Adding the rank sequence for $\beta$ to this gives the rank sequence of $\overline{\alpha}pha$. But $\text{mid}(\beta)$ laying strictly to the left of $\frac{n+1}{2}$ means for each $i$, what we add to $c_i$ is at least as large as what we add to $c_{n+1-i}$, giving us $r_i\geq r_{n+1-i}$.
\end{claimproof}
\begin{claim} If $\overline{\alpha}pha$ has an odd number of segments, $R(\overline{\alpha}pha;q)$ is bottom interlacing (respectively top interlacing) if and only if $R(\overline{\alpha}pha';q)$ is bottom interlacing (respectively top interlacing) where $\overline{\alpha}pha'=(\overline{\alpha}pha_1-1,\overline{\alpha}pha_2,\overline{\alpha}pha_3,\ldots,\overline{\alpha}pha_{s-1},\overline{\alpha}pha_s -1)$ is the composition of $n-2$ obtained from $\overline{\alpha}pha$ by subtracting $1$ from first and last segments (the fence of $\overline{\alpha}pha'$ starts with a downwards segment if the first part is zero).
\end{claim}
\begin{claimproof} Again, we consider the circular fence obtained by adding the relation $x_1\succeq x_{n+1}$ to $\overline{\alpha}pha$. The ideals of the circular fence are in bijection with ideals of ${F}(\overline{\alpha}pha)$ satisfying $x_1 \in I \Rightarrow x_{n+1} \in I$. We will calculate the ones $x_1$ but not $x_{n+1}$ separately. As we have an odd number of parts, $x_1$ is below $x_2$ and $x_{n+1}$ is above $x_{n}$, so that the ideals containing $x_1$ but not $x_{n+1}$ are in bijection with the ideals of $\overline{\alpha}pha'$ described above, with the rank sequence shifted by one. Deleting two nodes and shifting by one means that $\text{mid}(\overline{\alpha}pha')=\text{mid}(\overline{\alpha}pha)=\frac{n+1}{2}$. The rank polynomial of the circular fence is symmetric around $\frac{n+1}{2}$. Adding a bottom interlacing (respectively top interlacing) polynomial with the same $\text{mid}$ value gives us a bottom interlacing (resp. top interlacing) polynomial.
\end{claimproof}
Note that in the case of odd parts, if $\overline{\alpha}pha_1>\overline{\alpha}pha_s$ then removing pairs from both ends eventually gives us a fence with an even number of parts that is bottom interlacing, so $r(\overline{\alpha}pha)$ is bottom interlacing. If $\overline{\alpha}pha_1<\overline{\alpha}pha_s$, looking at $\overline{\alpha}pha^r=(\overline{\alpha}pha_s,\overline{\alpha}pha_{s-1},\ldots,\overline{\alpha}pha_1)$ reverses the rank sequence, so $r(\overline{\alpha}pha)$ is top interlacing. When $\overline{\alpha}pha_1=\overline{\alpha}pha_s$, removing pairs of nodes from both ends eventually gives us the fence for $(\overline{\alpha}pha_2,\overline{\alpha}pha_3,\ldots,\overline{\alpha}pha_{s-1})$ turned upside down, whose rank sequence is the reverse of $r(\overline{\alpha}pha_2,\overline{\alpha}pha_3,\ldots,\overline{\alpha}pha_{s-1})$.
\end{proof}
\section{Rank Unimodality of Circular Fences}\label{sect:rankuni}
Unlike the regular case, the rank polynomial of circular fences is not always cyclic. In the case of $\overline{\alpha}pha=( 1 , k , 1 , k )$, we get the rank sequence $[1, 2, 3, 4,\ldots, k, k+1, k, k+1, k,\ldots, 3, 2, 1]$ which makes a slight dip in the middle (Refer to Figure \ref{fig:1417} for the rank lattice of $(1,5,1,5)$). We will next see that this issue can only happen when we have an even number of nodes, and a dip can only happen in the middle term of the rank sequence.
\begin{prop} \label{prop:unimod} If $\overline{\alpha}pha=(\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s})$ has an odd number of nodes, then $\overline{R}(\overline{\alpha}pha;q)$ is unimodal. If $\overline{\alpha}pha$ is of size $2t$ for some $t \in \mathbb{N}$, then we have $r_i\geq r_{i-1}$ for all $i<t$.\end{prop}
\begin{proof} Take a composition $\overline{\alpha}pha$ of $n$ and let $T$ be a maximal node in $\overline{F}(\overline{\alpha}pha)$. We can partition ideals of $\overline{\alpha}pha$ into two parts: those that contain $T$ (and necessarily anything below it), and those that do not contain $T$. As deleting $T$ does not place any restrictions on other nodes, the ones that do not contain $T$ correspond to a regular fence of a composition $\beta$ with $n-1$ nodes, unimodal with $\text{mid}{\beta}=\frac{n-1}{2}$ by Theorem \ref{thm:main}. The ones that contain $T$ also contain the $k\geq 2$ nodes that lie below $T$ in $\overline{F}(\overline{\alpha}pha)$, and they are in bijection with the ideals of $F(\gamma)$ obtained from $\overline{F}(\overline{\alpha}pha)$ by deleting those nodes. The rank polynomial for $\overline{F}(\overline{\alpha}pha)$ satisfies:
\begin{eqnarray*}
\overline{R}(\overline{\alpha}pha;q)&={R}(\beta_q)+q^{k+1}R(\gamma;q).
\end{eqnarray*}
Denote the rank sequences of $\overline{\alpha}pha$ and $\beta$ by $(r_0,r_1,..,r_n)$ and $(b_0,b_1,\ldots,b_{n-1})$ respectively. We have $b_{n-i}\geq b_{n-i+1}$ for all $1\leq i\leq\frac{n-1}{2}$ by unimodality (we take $b_n=0$). As $R(\gamma;q)$ is also rank unimodal, and $q^k\text{mid}(\gamma)$ lies strictly to the right of $\text{mid}(\beta)=\frac{n-3}{2}$, the value we add to $b_{n-i}$ is at least as large as the value we add to $b_{n-i-1}$, giving us $r_{n-i}\geq r_{n-i+1}$ and by symmetry $r_i\geq r_{i-1}$ for all $i$ satisfying $1\leq i\leq\frac{n-3}{2}$.
If $n$ is odd, this means unimodality. If $n=2t$ is even, we do get any information about the ordering of $r_{t-1}$ and $r_{t}$, so it is possible to have a dip in the middle, which indeed happens for $\overline{\alpha}pha \neq ( 1 , k , 1 , k )$ and $( k , 1 , k, 1)$ for $k \in \mathbb{N}$.
\end{proof}
\begin{conj} For any $\overline{\alpha}pha \neq ( 1 , k , 1 , k )$ or $( k , 1 , k, 1)$ for some $k$, the rank sequence $\overline{R}(\overline{\alpha}pha;q)$ is unimodal.
\end{conj}
If the segments were fully independent, we would naturally end up with a unimodal polynomial. The connections of maximal and minimal entries work to add some additional relations so that some configurations are not allowed, a relatively small number. What this conjecture is saying is, when we look at a larger number of parts, the configurations disallowed are not sufficient to offset the underlying unimodality. Though we were unable to prove this in all generality, the next result shows that if there are exceptions, they are indeed very rare.
\begin{lemma} Let $T$ be a maximal node in the cyclic fence $\overline{F}(\overline{\alpha}pha)$, and let $F_{T^-}$ be the (possibly upside down) fence obtained by deleting $T$. If the rank polynomial $R_{T^-}(q)$ corresponding to $F_{T^-}$ is top interlacing, then $\overline{R}(\overline{\alpha}pha;q)$ is rank unimodal.
\end{lemma}
\begin{proof} We have already shown unimodality when the number of nodes is odd, so let us focus on the case $\overline{\alpha}pha$ is a composition of $2t$. Let $F_{T^+}$ denote the fence obtained by deleting $T$ and any node below $T$ with the corresponding rank polynomial $R_{T^+}(q)$ so that we have:
$$\overline{R}(\overline{\alpha}pha;q)= R_{T^-}(q)+q^{k+1}R_{T^+}(q) $$ where $k$ is the number of nodes below $T$ in $\overline{F}(\overline{\alpha}pha)$.
As $F_{T^-}$ is top interlacing, its rank sequence $(r_0,r_1,\ldots,r_{2s-1})$ satisfies $r{t-1}\leq t_m$. The rank sequence of $q^kR_{T^+}(q)$ is unimodal with the largest entry falling strictly to the right of position $t$, so that the number we add to $r_{t-1}$ to obtain the $t-1$st entry of the rank sequence of $\overline{F}(\overline{\alpha}pha)$ is at least as large as the number we add to $r_{t}$. As we already showed the only issue might be in the middle in Proposition \ref{prop:unimod}, we are done.
\end{proof}
\begin{corollary} If $\overline{\alpha}pha=(\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s} )$ has two consecutive segments larger than one, or $3$ consecutive segments $k, 1, l$ with $|k-l|>1$, then $\overline{R}(\overline{\alpha}pha;q)$ is unimodal.
\end{corollary}
\begin{proof} If $\overline{\alpha}pha$ has two consecutive segments larger than one, we can assume without loss of generality, by Corollary \ref{cor:cyclicshift}, that they meet at a top bead $T$. Deleting $T$ gives an upside down fence of an odd number of parts, so it is top interlacing. Similarly, in the case where we have consecutive segments $k, 1, l$ with $|k-l|>1$ by symmetry we can assume that $k$ is larger and $k$ and $1$ meet in a top node $T$. Deleting $T$ gives a fence with an even number of parts, first of which is $l$ and the last is $k-1$. As $k-1$ is strictly larger than $l$, the corresponding rank polynomial is top interlacing.
\end{proof}
The leftover cases can be fully analysed when we have a small number of parts. For example, if we have four parts, the only cases that are not covered are of forms $(1,k,1,k)$ and $(1,k,1,k+1)$. If we have $6$ parts, possible counter examples to unimodality must be of one of these forms: $(1,k,1,k,1,k)$,$(1,k,1,k,1,k+1)$,$(1,k,1,k+1,1,k+1)$,$(1,1,2,1,1,2)$.
\section{Rowmotion on Circular Fences}
We can identify the ideals of a fence with antichains on that fence, as any ideal is uniquely described by its maximal elements. Rowmotion acts on ideals by taking an ideal $I$ to the ideal $\rho(I)$ corresponding to the antichain given by the minimal elements of the complement of $I$. In their recent paper \cite{rowmotion}, Elizalde, Plante, Roby and Sagan explored rowmotion on fences, and gave homomesy and orbomesy results, many of which hold for the circular case as well.
In particular they gave a bijection between the orbits of rowmotion on $F(\overline{\alpha}pha)$ and an object called an $\overline{\alpha}pha$-tiling. Here, we introduce a natural analogue, the class of \emph{circular} $\overline{\alpha}pha$-tilings:
\begin{defn} For a composition $\overline{\alpha}pha=(\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s})$, a circular $\overline{\alpha}pha$-tiling is a tiling of a rectangle $R_{2s}$ with $2s$ rows labeled $1,2,\ldots,2s$ from top to bottom and an infinite number of columns with yellow $1 \times 1$ tiles, red $2 \times 1$ tiles which are allowed to wrap around and black $1\times (\overline{\alpha}pha_i-1)$ tiles in row $i$ satisfying the following properties:
\begin{enumerate}[label=\textbf{(\overline{\alpha}ph*)}]
\item If there is at least one black tile in a row, then when the red tiles are ignored, the black and yellow tiles alternate in that row.
\item If $i$ is odd, there is a red tile in a column covering rows $i$ and $i+1$ if and only if the next column contains two yellow tiles in those two rows.
\item If $i$ is even, there is a red tile covering rows $i$ and $i+1$ if $i<2s$ and wrapping around to cover $2s$ and $1$ if and only if the previous column contains two yellow tiles in those rows.
\end{enumerate}
\end{defn}
We say that a red tile \emph{starts} at row $i$ if it covers $i, i+1$ or $i=2s$ and it covers $2s$ and $1$. Though it is by no means clear from the definition, the connection with rowmotion orbits which we will prove next in Lemma~\ref{lem:rowmotionbijection} implies that all such tilings are periodic. The period of an orbit $\mathcal{O}$ will be called the \emph{size} of $\mathcal{O}$, denoted $|\mathcal{O}|$. We will visually represent tilings by drawing one such period and identify tilings that are cyclic shifts of each other horizontally.
Let the map $\overline{\phi}$ take an ideal of $\overline{F}(\overline{\alpha}pha=(\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s}))$ to a $2s\times 1$ rectangle where box $i$ is colored yellow if the $i$th segment contains no maximal elements of $I$, red if it contains a shared maximal element and black if it contains an unshared maximal element. $\overline{\phi}$ can be seen as a map on taking orbits of rowmotion to infinite rectangles of $2s$ rows by seeing each iteration of the rowmotion operation as a new column (See Figure \ref{fig:rowmotion2113} for an example). The following result directly follows from the proof of the corresponding Lemma 2.2 in \cite{rowmotion} and contains no new ideas. The proof is therefore omitted.
\begin{lemma}\label{lem:rowmotionbijection} The map $\overline{\phi}$ is a bijection between orbits of rowmotion on $\overline{F}(\overline{\alpha}pha)$ and circular $\overline{\alpha}pha$-tilings.
\end{lemma}
For the following discussion, we will identify each tiling with its corresponding orbit and use the two interchangeably. Note that the placement of red tiles uniquely determines an orbit as long as there is at least one red tile in a row, as yellow and black tiles alternate in the leftover spaces. When describing all orbits of a particular fence, we will often talk about the placement of the red tiles, leaving it up to the reader to verify that the construction indeed gives a valid orbit.
\begin{figure}
\caption{A circular $(2,1,1,3)$-tiling and the corresponding orbit of rowmotion on $\overline{F}
\label{fig:rowmotion2113}
\end{figure}
A statistic $\text{st}$ is said to be $d$-mesic (with respect to a group operation) if its average is $d$ on every orbit, and it is said to be homomesic if it is $d$-mesic for some $d$. We will consider the following statistics on orbits of rowmotion on cyclic fences:
\begin{eqnarray*}
\overline{\mathcal{M}}_x(\mathcal{O})=\text{number of times $x$ occurs as a maximal element in } \mathcal{O},\qquad &&\overline{\mathcal{M}}(\mathcal{O})=\sum_x\overline{\mathcal{M}}_x(\mathcal{O}),\\
\overline{\chi}_x(\mathcal{O})=\text{number of times $x$ occurs in } \mathcal{O},\qquad &&\overline{\chi}(\mathcal{O})=\sum_x\overline{\chi}_x(\mathcal{O}).
\end{eqnarray*}
We can read the values of the statistics described directly from the tiling. Let $b_i$ and $w_i$ denote the number of black tiles and white on row $i$ on one period of $\mathcal{O}$ respectively, and $r_i$ denote the number of red tiles starting in row $i$. Note that as black tiles only occur alternating with white tiles, $b_i=w_i$ in any row where $b_i$ is non-zero. So, for any row $i$, the total $w_i(\overline{\alpha}pha_i)+r_i+r_{i-1}$ is equal to the period of $\mathcal{O}$. An unshared element $x$ on segment $i$ occurs as a maximal element once per every black tile on row $i$, and the occurrences of shared elements correspond to red tiles.
If $\overline{\alpha}pha_i\geq 2$ and $x$ is the $j$th smallest unshared element on segment $i$, we get:
\begin{eqnarray*}
\overline{\mathcal{M}}_x(\mathcal{O})= b_i, \qquad \overline{\chi}_x(\mathcal{O})=\begin{cases}
b_i(\overline{\alpha}pha_i-j)+r_i& \text{if $i$ is odd}\\
b_i(\overline{\alpha}pha_i-j)+r_{i-1}& \text{if $i$ is even}.
\end{cases}
\end{eqnarray*}
Similarly for a maximal element $T$ lying between segments $2i+1$ and $2i+2$, and a minimal element $B$ lying between segments $2i$ and $2i+1$ (cyclically), we have:
\begin{eqnarray*}
\overline{\chi}_T(\mathcal{O})&=& r_{2i+1}=\overline{\mathcal{M}}_T(\mathcal{O}), \quad \quad
\overline{\mathcal{M}}_B(\mathcal{O})=r_{2i}, \\ \overline{\chi}_B(\mathcal{O})&=&|\mathcal{O}|-r_{2i}=r_{2i-1}+w_{2i}(\overline{\alpha}pha_{2i})=r_{2i+1}+w_{2i+1}(\overline{\alpha}pha_{2i+1}).
\end{eqnarray*}
By summing up these values over all nodes of the fence, we get the following formulas (using the convention $\overline{\alpha}pha_{2s+1}=\overline{\alpha}pha_1$):
\begin{eqnarray}
\overline{\mathcal{M}}(\mathcal{O})&=&\sum_{i\leq2s}b_i(\overline{\alpha}pha_i-1)+r_i,\label{eq:maxorbit}\\
\overline{\chi}(\mathcal{O})&=&s|\mathcal{O}|+\sum_{i\leq2s}b_i\binom{\overline{\alpha}pha_i}{2}+\sum_{i\leq s}r_{2i-1}(\overline{\alpha}pha_{2i-1}+\overline{\alpha}pha_{2i}-1)-r_{2i}.\\
&=&m/2\,|\mathcal{O}|-\sum_{i\leq2s}(-1)^i r_i(\overline{\alpha}pha_i+\overline{\alpha}pha_{i+1})/2\label{eq:sumorbit}
\end{eqnarray}
We have shown in Theorem \ref{thm:sym} that the rank polynomial for circular fences is always symmetric. This means that if the statistic $\overline{\chi}$ is homomesic, it is necessarily $m\slash 2$-mesic. So in a way the last part of Equation \ref{eq:sumorbit} describes how far from an homomesy an orbit is. If $\overline{\alpha}pha_i+\overline{\alpha}pha_{i+1}$ is the same for all $i$, as in the example of $(3,1,3,1)$ below, then $\overline{\chi}$ is $m\slash 2$-mesic if and only if $\sum_{i\leq s} r_{2i}-r_{2i-1}=0$ for all orbits.
\begin{example}[$\overline{F}(3,1,3,1)$.]\label{ex:3131}
On the small case $(3,1,3,1)$, row motion has $3$ orbits, one of size $5$ and two of size $9$.
\begin{tikzpicture}[scale=.45]
\draw(0,0)grid(5,-4);
\rrec{1}{1} \rrec{3}{1} \rrec{2}{3} \rrrec{4}{3} \yrec{1}{2} \yrec{2}{2} \yrec{3}{2} \yrec{4}{2}
\brec{1}{4}{2} \brec{3}{4}{2} \yrec{2}{4} \yrec{2}{5} \yrec{4}{4} \yrec{4}{5}
\draw node at (2.5,-4.5) {$\mathcal{O}_1$};
\end{tikzpicture} \qquad \qquad
\begin{tikzpicture}[scale=.45]
\draw(0,0)grid(9,-4);
\rrec{1}{1}\rrrec{4}{3}\rrec{2}{4}\rrec{1}{6}\rrec{3}{7}
\yrec{4}{1}\yrec{4}{2}\yrec{4}{5}\yrec{4}{6}\yrec{4}{4} \yrec{4}{8}
\yrec{2}{2}\yrec{2}{3}\yrec{2}{5}\yrec{2}{7}\yrec{2}{8}
\yrec{1}{2}\yrec{1}{7}\yrec{3}{3}\yrec{3}{8}\brec{1}{4}{2}\brec{1}{8}{2}\brec{3}{1}{2}
\brec{3}{5}{2} \yrec{2}{9} \yrec{4}{9} \rrec{2}{9}
\draw node at (4.5,-4.5) {$\mathcal{O}_2$};
\end{tikzpicture}
\qquad \qquad
\begin{tikzpicture}[scale=.45]
\draw(0,0)grid(9,-4);
\rrec{3}{1}\rrec{2}{3}\rrrec{4}{4}\rrec{3}{6}\rrec{1}{7}
\yrec{2}{1}\yrec{2}{2}\yrec{2}{5}\yrec{2}{6}\yrec{2}{4} \yrec{2}{8}
\yrec{4}{2}\yrec{4}{3}\yrec{4}{5}\yrec{4}{7}\yrec{4}{8}
\yrec{3}{2}\yrec{3}{7}\yrec{1}{3}\yrec{1}{8}\brec{3}{4}{2}\brec{3}{8}{2}\brec{1}{1}{2} \rrrec{4}{9}
\brec{1}{5}{2} \yrec{2}{9}
\draw node at (4.5,-4.5) {$\mathcal{O}_3$};
\end{tikzpicture} \centering
\begin{flushleft}
We can calculate the values of $\overline{\chi}$ and $\overline{\mathcal{M}}$ statistics via Equations $\ref{eq:maxorbit}$-$\ref{eq:sumorbit}$: \end{flushleft}
\begin{eqnarray*}
\overline{\mathcal{M}}(\mathcal{O})=2(b_1+b_3)+(r_1+r_2+r_3+r_4),& \quad \quad &\overline{\chi}(\mathcal{O})=4|\mathcal{O}|+4(r_1-r_2+r_3-r_4),\\
\overline{\mathcal{M}}(\mathcal{O}_1)=2(2)+4=8,& \quad \quad & \overline{\mathcal{M}}(\mathcal{O}_2)=\overline{\mathcal{M}}(\mathcal{O}_3)=2(4)+6=14,\\
\overline{\chi}(\mathcal{O}_1)=4(5)=24,& \quad \quad & \overline{\chi}(\mathcal{O}_2)=\overline{\chi}(\mathcal{O}_3)=4(9)=36.
\end{eqnarray*}
\begin{flushleft}
Note that the second $9$-orbit can be obtained from the first by shifting rows cyclically by $2$ so it makes sense that they have the same statistics. The statistic $\overline{\chi}$ is $4$-mesic.
\end{flushleft}
\end{example}
Applying the formulas for the $\overline{\mathcal{M}}$ and $\overline{\chi}$ statistics, we see that many homomesy results from the non-circular fences also apply for the circular ones:
\begin{prop}\label{prop:homomesy} For a composition $\overline{\alpha}pha=(\overline{\alpha}pha_1,\overline{\alpha}pha_2,\ldots,\overline{\alpha}pha_{2s})$, rowmotion operation on the circular fence $\overline{F}(\overline{\alpha}pha)$ has the following properties:
\begin{enumerate}
\item If $x$ and $y$ are unshared elements on the same segment, $\overline{\mathcal{M}}_x-\overline{\mathcal{M}}_y$ is $0$-mesic.
\item For an unshared element $x$ of segment $i$ that lies between a maximal element $T$ and a minimal element $B$, $\overline{\mathcal{M}}_x \overline{\alpha}pha_i+\overline{\mathcal{M}}_{T}+\overline{\mathcal{M}}_{B}$ is $1$-mesic.
\item For a maximal element $T$ lying between segments $2i+1$ and $2i+2$, and a minimal element $B$ lying between segments $2j$ and $2j+1$ (cyclically), if $r_{2i+1}=r_{2j}$ for all orbits $\mathcal{O}$, then $\overline{\chi}_T+\overline{\chi}_B$ is $1$-mesic.
\item If $\overline{\alpha}pha_i=2$ for all $i$, then $\overline{\mathcal{M}}$ is $s$-mesic.
\end{enumerate}
\end{prop}
We have previously noted that taking setwise complements maps the ideals of $\overline{F}(\overline{\alpha}pha)$ to ideals of $\overline{\text{sh}(\overline{\alpha}pha)}$,the fence of a cyclic shift of $\overline{\alpha}pha$ by one step. We can also see $\kappa$ as the map taking a circular $\overline{\alpha}pha$-tiling, doing a vertical cyclic shift of one step and a horizontal flip to get a circular $\text{sh}(\overline{\alpha}pha)$-tiling. Figure \ref{fig:shrowmotion2113} shows the action of $\kappa$ on the orbit seen in Figure \ref{fig:rowmotion2113}.
As the rowmotion is defined via the complement operation, it is quite well behaved under this map.
\begin{lemma} \label{lem:rowmotionflip} Let $\kappa$ denote the complement map between ideals of $\overline{F}(\overline{\alpha}pha)$ and $\overline{F}(\text{sh}(\overline{\alpha}pha))$. Then for any ideal $I$ we have $\kappa(\partial(I))=\partial^{-1}(\kappa(I))$, meaning $\kappa$ maps orbits to orbits. In particular, if $|\overline{\alpha}pha|=m-n$, for any orbit $\mathcal{O}$ of rowmotion on $\overline{F}(\overline{\alpha}pha)$ we have:
\begin{eqnarray*}
\overline{\mathcal{M}}(\mathcal{O})&=&\overline{\mathcal{M}}(\kappa(\mathcal{O}))\\
\overline{\chi}(\mathcal{O})+ \overline{\chi}(\kappa(\mathcal{O}))&=&{n}|\mathcal{O}|.
\end{eqnarray*}
\end{lemma}
\begin{proof} As for any ideal $I$ $\overline{\chi}(I)+\overline{\chi}(\kappa(I))=m$, the second statement is trivial. The first is slightly more complicated as we do not necessarily have $\overline{\mathcal{M}}(I)=\overline{\mathcal{M}}(\kappa(I))$, for example if $I$ is the empty ideal, $\overline{\mathcal{M}}(I)=0$ whereas $\overline{\mathcal{M}}(\kappa(I))=s$, where $2s$ is the length of $\overline{\alpha}pha$. However, as the total number of red and black tiles remains unchanged under $\kappa$, the result follows by Equation \ref{eq:maxorbit}.
\end{proof}
\begin{figure}
\caption{A circular $(3,2,1,1)$-tiling and the corresponding orbit}
\label{fig:shrowmotion2113}
\end{figure}
When we have only two parts of sizes $a$ and $b$, the rank lattice becomes a $a\times b$ lattice with added minimum and maximum elements (see Figure \ref{fig:48latticechains} for an example). The bijection with tilings allo.ws us to easily describe the orbits in terms of the lcm and gcd of the two segments.
\begin{prop} Let $d=\text{gcd}(a,b)$ and $m=\text{lcm}(a,b)$. Then, rowmotion on $\overline{F}(a,b)$ has a unique orbit of size $m+2$ and $d-1$ orbits of size $m$, where $\overline{\mathcal{M}}$ takes the values $2(a+b)(m+2)/d$ and $2m-(a+b)/d$ respectively. The statistic $\overline{\chi}$ is $(a+b)/2$-mesic.
\end{prop}
\begin{proof} There is a unique tiling of size $m+2$ that contains red tiles, one starting on the first row of column $1$, and the other starting at the third row of column $3$. The rest of the orbits contain no red tiles, and are given by the $d-1$ ways of placing the white tiles so that they always fall on different columns (see Figure \ref{fig:48latticeorbits} for an example). In all orbits $w_1=b/d$ and $w_2=a/d$, and plugging in these values to equations \ref{eq:maxorbit} and \ref{eq:sumorbit} allows us the calculate $\overline{\mathcal{M}}$ and $\overline{\chi}$.
\end{proof}
Another way to visualise the orbits in this case is to think of them as walks on the rank lattice using the moves $(1,1)$,$(a,-1)$,$(-1,b)$ and the special move that connects the maximum and the minimum, refer to Figure \ref{fig:48latticeorbits} for the example of $\overline{F}(4,8)$.
\begin{figure}
\caption{The four orbits of rowmotion on $\overline{F}
\label{fig:48latticeorbits}
\end{figure}
\subsection{Other Cases With Few Segments}
When the number of segments is small, it is often possible to build all orbits from ones of smaller size, by \emph{dilating} orbits for partitions of small size, where we add new columns that lengthen the black tiles without creating problems. Figure \ref{fig:4dilation} shows an example where adding new columns to the marked spaces is how we build the orbits for larger partitions. In this section, we will use "dilation" arguments to fully describe the action of rowmotion on $\overline{F}((1,1,a,1))$ and $\overline{F}((a,1,a,1))$. The idea can be extended to use the orbits for $(k,1,a,1)$ to build the orbits of $(k,1,a+k+3,1)$ for general $k$.
\begin{figure}
\caption{We can add black-yellow-black-yellow columns to the $6$-orbit for $\overline{F}
\label{fig:4dilation}
\end{figure}
\begin{thm} For $a\geq 2$, row motion on $\overline{F}(a,1,a,1)$ has $a-2$ small orbits $\mathcal{O}_s$ of size $a+2$ with $\overline{\mathcal{M}}(\mathcal{O}_s)=2a+2$ and $2$ large orbits $\mathcal{O}_l$ of size $2a+3$ with $\overline{\mathcal{M}}(\mathcal{O}_l)=4a+2$. The statistic $\overline{\chi}$ is $(a+1)$-mesic.
\end{thm}
\begin{proof} The orbits for the case $a=3$ were already examined in Example \ref{ex:3131}. For larger $a$, all small orbits have one red tile starting at each row. For any $0\leq k\leq a-4$ we get a unique $5$-orbit with red tiles starting on rows $1,2,3,4$ placed on columns $1$, $6+k$, $4+k$ and $3$ respectively. All these orbits can be obtained by dilating the $6$-orbit of rowmotion on $\overline{F}(4,1,4,1)$ shown on Figure \ref{fig:4dilation}, left by adding columns with alternating black and yellow tiles. The only other $a-2$ orbit has red tiles starting on rows $1$ and $3$ on column $1$, and red tiles starting on rows $2$ and $4$ on column $3$.
The larger orbits can similarly be obtained by dilating the $9$-orbits shown in Example \ref{ex:3131}. The placement of starting position of red tiles on one of the large orbits is : columns $1$ and $a+3$ on row $1$, $4$ in row $2$,$a+4$ in row $3$ and $3$ in row $4$. The other orbit is obtained by shifting rows cyclically by $2$, so the positions for rows $1$ and $2$ are flipped with the positions for rows $3$ and $4$ respectively.
As all $(a+2)^2$ elements are represented, there can be no more orbits. Equations \ref{eq:maxorbit}-\ref{eq:sumorbit} give the data about the statistics.
\end{proof}
\begin{wrapfigure}{r}{0.35\linewidth}\centering
\begin{tikzpicture}[scale=.45]
\draw (-4,0) rectangle(4,-4);
\draw (-4,0) grid (-3,-4);
\draw (-1,0)grid(4,-4);
\rrec{1}{2}\rrrec{4}{4}\yrec{1}{3}\yrec{2}{1}\yrec{2}{3}\yrec{2}{4}\yrec{4}{1}\yrec{2}{-3}\yrec{4}{-3}\yrec{2}{0}\yrec{4}{0}
\yrec{4}{2}\yrec{4}{3}
\brec{3}{-.5}{5.5}\brec{1}{-.5}{2.5}
\brec{3}{-3}{3.5}\brec{1}{-3}{3.5}
\draw[ultra thick,dashed](-3,-1.5)--(-1,-1.5)(-3,-3.5)--(-1,-3.5);\draw[thick,yellow,dashed](-3,-1.5)--(-1,-1.5)(-3,-3.5)--(-1,-3.5);
\draw [
thick,
decoration={
brace,
mirror,
},
decorate
] (-4,-4.2) -- (1,-4.2);
\node at (-1.5,-4.8) {$k-1$};
\end{tikzpicture} \end{wrapfigure}
For a fixed $k$ value, we can obtain all orbits of $\overline{F}(k,1,a+k+2,1)$ from orbits of $\overline{F}(k,1,a,1)$ by adding new pieces that extend the black tiles on the third row, while adding an extra black tile to row two. The addition of the $4\times (k+2)$ piece on the right to extend each black tile in row $3$, shifting cyclically if necessary achieves exactly this. We will now use this process to calculate the orbits of rowmotion on $\overline{F}(1,1,a,1)$.
\begin{thm} If $a\equiv 0$ or $2$ mod $3$, rowmotion on $\overline{F}(a,1,1,1)$ has a unique orbit $\mathcal{O}$ of size $3a+4$ and $\overline{\chi}$ is homomesic. If $a\equiv 1$ mod $3$, then rowmotion has $3$ orbits, of sizes $a+2$, $a+1$ and $a+1$ and $\overline{\chi}$ values $(a+2)(a+3)/2$, $((a+2)(a+3)/2$ and $((a)(a+3)/2$ respectively.
\end{thm}
\begin{proof} For the cases when $a\equiv 0$ or $2$ mod $3$, it is possible to extend the unique orbits for $\overline{F}(1,1,2,1)$ and $\overline{F}(1,1,3,1)$ (see Figure ~\ref{fig:rowmotionona111}) to get the orbits for $\overline{F}(1,1,2+3t,1)$ and $\overline{F}(1,1,3+3t,1)$, $t\in\mathbb{N}$. Considering the sizes shows us that no other orbits exist. Similarly, the three orbits of $\overline{F}(1,1,1,1)$ can be extended to get orbits for $\overline{F}(1,1,1+3t,1)$ as seen in Figure~\ref{fig:rowmotion2ona111}. From Table~\ref{tab:smallcases} we can see that $\overline{F}(1,1,a,1)$ has a total of $3a+4$ ideals, so these are all the orbits. To calculate the $\overline{\chi}$ values, we can use Equation~\ref{eq:sumorbit}, which simplifies to $\overline{\chi}(\mathcal{O})=(a+3)|\mathcal{O}|/2 +(r_1-r_4)+(a+1)/2(r_3-r_2)$ for the particular case of $\overline{F}(1,1,a,1)$.
\end{proof}
\begin{figure}
\caption{Orbits for $\overline{F}
\label{fig:rowmotionona111}
\end{figure}
\begin{figure}
\caption{Orbits for $\overline{F}
\label{fig:rowmotion2ona111}
\end{figure}
A similar process shows that for $a\geq 2$, the number of orbits of rowmotion on $\overline{F}(2,1,a,1)$ depends on what $a$ is in modulo $4$. If $a\equiv 1$ we get a unique orbit of size $4a+6$. If $a \equiv 3$, we get three orbits, two of size $a+1$ and one of size $2a+4$. If $a$ is even, we get two orbits of sizes $a+2$ and $3a+4$ respectively. The statistics for this orbits can be found in Table~\ref{tab:rowmotion}. As mentioned before, a similar process can be used to characterize orbits of fences of size $(k,1,a,1)$ where even more cases would be involved.
\begin{table}\center
\begin{tabular}{||c|c|c|c|c||}
\hline
Composition& Orbit Count& $|\mathcal{O}|$& $\overline{\mathcal{M}}(\mathcal{O})$&$\overline{\chi}(\mathcal{O})$ \\ [0.5ex]
\hline\hline
\multirow{2}{*}{\begin{tabular}{c}
$(a,b)$: \\ gcd$(a,b)=m$
\end{tabular}}& $1$&$m+2$&$2m(a+b)(m+2)/ab$&$|\mathcal{O}|n/2$\\\cline{2-5}
& $(ab/m) -1$ &$m$&$2m-(a+b)m/ab$&$|\mathcal{O}|n/2$\\\hline
\multirow{1}{*}{$(a\neq3t+1,1,1,1)$}& $1$&$3a+4$&$5a+6$&$|\mathcal{O}|n/2$\\\hline
\multirow{3}{*}{$(a=3t+1,1,1,1)$}& $1$&$a+2$&$5t+4$&$|\mathcal{O}|n/2$\\\cline{2-5}
& $1$&$a+1$&$5t+2$&$(|\mathcal{O}|+1)n/2$\\\cline{2-5}
& $1$&$a+1$&$5t+2$&$(|\mathcal{O}|-1)n/2$\\\hline
\multirow{2}{*}{$(a=4t-2,1,2,1)$}& $1$&$a+2$&$7t-1$&$|\mathcal{O}|n/2$\\\cline{2-5}& $1$&$3a+4$&$21t-7$&$|\mathcal{O}|n/2$\\\hline
\multirow{3}{*}{$(a=4t-1,1,2,1)$}& $1$&$a+1$&$7t-1$&$|\mathcal{O}|n/2-(a+1)/2$\\\cline{2-5}&
$1$&$a+1$&$7t-1$&$|\mathcal{O}|n/2+(a+1)/2$\\\cline{2-5}&
$1$&$2a+4$&$14t+1$&$|\mathcal{O}|n/2$\\\hline
\multirow{2}{*}{$(a=4t,1,2,1)$}& $1$&$a+2$&$7t+2$&$|\mathcal{O}|n/2$\\\cline{2-5}&
$1$&$3a+4$&$21t+4$&$|\mathcal{O}|n/2$\\\hline
\multirow{1}{*}{$(a=4t+1,1,2,1)$}& $1$&$4a+6$&$7a+6$&$|\mathcal{O}|n/2$\\\hline
\multirow{2}{*}{$(a,1,a,1)$}& $a-2$&$a+2$&$2a+2$&$|\mathcal{O}|n/2$\\\cline{2-5}
& $2$&$2a+3$&$4a+2$&$|\mathcal{O}|n/2$\\\hline
\multirow{5}{*}{$(a,a,a,a)$}& $a^3-4a^2+6a-3$&$a$&$4a-4$&$|\mathcal{O}|n/2$\\\cline{2-5}
& $a$&$a+1$&$4a-2$&$(|\mathcal{O}|-1)n/2$\\\cline{2-5}
& $a$&$a+1$&$4a-2$&$(|\mathcal{O}|+1)n/2$\\\cline{2-5}
& $1$&$a+2$&$4a$&$|\mathcal{O}|n/2$\\\cline{2-5}
& $2a-2$&$2a^2$&varied&varied \\\hline
\end{tabular}
\caption{The behaviour of rowmotion on circular fences for small examples, where $n$ denotes the size of $\overline{\alpha}pha$}
\label{tab:rowmotion}
\end{table}
Looking at Table~\ref{tab:rowmotion}, we see that even when we focus on the cases with at most $4$ parts, it is difficult to predict the numbers and lengths of the orbits in general. The connection to modular arithmetics is prevalent, but it is not always as simple as looking at the gcd of the parts, as in the case of two parts. When we consider partitions of type $(a,1,k,1)$ with $a>k$ for example, the structure seems to depend on $\text{gcd}(a,k+2)$ instead. Note that the statistic $\overline{\chi}$ is quite well behaved, with orbits or pairs of orbits averaging out to $|\mathcal{O}|n/2$ where $n=|\overline{\alpha}pha|$.
\subsection{The case of $(a,a,a,a)$ and a note on orbomesy}
When a statistic has the same average on all orbits of the same size, it is called \emph{orbomesic}. Extending the idea of homomesy, the orbomesy phonemenon is introduced in \cite{rowmotion} and illustrated through a number of cases it applies to. The rowmotion on fences is a periodic operation, and except when we get shared elements it applies to each segment independently according to their own size. The orbit structure therefore is determined by how often we get shared elements- how in sync the action on different segments are. In the examples of orbomesy given in \cite{rowmotion}, we often get groups of orbits that, though not isomorphic in a well defined sense, are structurally equivalent and are formed by picking different pairings of moduli that are out of sync. As a result, they naturally have the same length, $\overline{\mathcal{M}}$ value and $\overline{\chi}$ value, resulting in an apparent orbomesy. In the circular case, we see that the orbomesy of $\overline{\chi}$ breaks down completely, in that we either get a full homomesy or we get pairs of orbits of the same size with different $\overline{\chi}$ values. Now we will look at the case of $\overline{F}(a,a,a,a)$ where this is especially visible.
\begin{thm} Rowmotion on $\overline{F}(a,a,a,a)$ has:
\begin{itemize}
\item $a^3-4 a^2 +6a -3$ orbits of size $a$, satisfying $\overline{\mathcal{M}}(\mathcal{O})=4a-4$, $\overline{\chi}(\mathcal{O})=2a|\mathcal{O}|$,
\item $2a$ orbits of size $a+1$ with $\overline{\mathcal{M}}(\mathcal{O})=4a-2$, $a$ of them satisfying $\overline{\chi}(\mathcal{O})=2a(|\mathcal{O}|+1)$, the other $a$ satisfying $\overline{\chi}(\mathcal{O})=2a(|\mathcal{O}|-1)$.
\item $1$ orbit of size $a+2$, $\overline{\mathcal{M}}(\mathcal{O})=4a$, $\overline{\chi}(\mathcal{O})=2a|\mathcal{O}|$,
\item $2a-2$ orbits of size $2a^2$ with $\overline{\mathcal{M}}(OO)=8a^2-10a+4$, where for each $r \in \{0,1,\ldots,a-2\}$ we get two orbits whose $\overline{\chi}$ value is equal to $4a^3+2a^2-4a+4ra$.
\end{itemize}
\end{thm}
\begin{proof} We will describe each of these orbits. As the total number of ideals represented, $a^4+4a^2+2$, matches $\overline{R}((a,a,a,a);1)$ there can be no other orbits. After describing the orbits, the statistics can be calculated via the simplified formulas:$$\overline{\mathcal{M}}(\mathcal{O})=\sum_{i\leq4}b_i(a-1)+r_i\qquad \overline{\chi})(\mathcal{O})=a(2|\mathcal{O}|+r_1-r_2+r_3-r_4).$$
\begin{itemize}
\item The size $a$ orbits are the ones that contain no red tile. Each row contains one white and one black tile, where white tiles on consecutive rows fall on different columns, including rows $1$ and $4$. Placing the white tile on position $1$ of the first row, the rest of the white tiles can be placed in $(a-1)^3-(a-1)(a-2)=a^3-4 a^2 +6a -3$ ways.
\item The size $a+1$ orbits have two red tiles that lie in different columns, which we can choose in $a$ ways. They either start on rows $1$ and $3$ or rows $2$ and $4$, giving us $2a$ such orbits in total.
\item The unique size $a+2$ orbit contains $4$ red tiles: starting at rows $1$ and $3$ of column $1$ and rows $2$ and $4$ of column $3$.
\item The largest orbits can be indexed with $r \in \{0,1,\ldots,a-2\}$, where we get a pair of orbits $\mathcal{O}^1_r$ and $\mathcal{O}^2_r$ of size $2a^2$ for each choice of $r$. $\mathcal{O}^1_r$ has the following positions for the red tiles:
\begin{minipage}{\dimexpr\textwidth-3cm}
\begin{itemize}
\item [ROW 1:] $1+(a+1)t$ for $0\leq t\leq r$,
\item [ROW 2:] $a^2-(a+1)t$ for $1\leq a-1-r$,
\item [ROW 3:] $a^2+(a+1)t$ for $0\leq t\leq r$,
\item [ROW 4:] $2a^2-(a+1)t$ for $1\leq a-1-r$.
\end{itemize}
\xdef\tpd{\the\prevdepth}
\end{minipage} $\mathcal{O}^2_r$ has the values for rows $2$ and $4$ flipped. \end{itemize}\end{proof}
The value $\overline{\mathcal{M}}$ seems to be orbomesic in this example, in fact that is the case in all examples listed on Table~\ref{tab:rowmotion}. That could be indicative of a general pattern or could be because we are only looking at a very limited sample of examples. For $(a,a,a,a)$, the $\overline{\chi}$ values of orbits of size $2a^2$ are not just paired up but actually follow an arithmetic progression centered around $|\mathcal{O}|n/2$. It would be interesting to see if this trend continues in larger examples.
\section{Comments, Questions and Future Directions}\label{sec:further}
We list some questions and observations here that are of natural interest.
\begin{itemize}
\item \textbf{Bijective proofs:} The original starting point of this work was finding a bijective proof for the symmetry of the rank sequences of lower ideas of circular fences. The setwise complement of a size $k$ lower ideal is a size $n-k$ upper ideal. : The tantalizingly simple notion of taking this setwise complement and then letting the beads (nodes belonging to the ideal) fall with gravity unfortunately did not work. When there are filled or empty sections, the algorithm can not be described locally section by section, which makes both for a tricky description and a complicated proof. It is possible however, that another perspective on the objects might lead to a more natural approach to the proof.
Fence posets are in bijective correspondence in a variety of combinatorial objects. An example is given by perfect matchings in snake graphs. Circular fence posets, similarly, can be viewed as a circular analogue to snake graphs, with two ends identified. It is possible to get a bijection if two ends are identified in a \emph{parity reversing} way to avoid any extra matchings forming, or disallowing matchings that do not work in the uncircular case as done in \cite{bandgraphs}. The natural symmetries of this object are different and can possibly provide new insight.
\item \textbf{Rowmotion orbits under shifting:} We have seen in Lemma~\ref{lem:rowmotionflip} that the setwise complement map $\kappa$ gives a natural bijection between orbits of $\overline{F}(\overline{\alpha}pha)$ and $\overline{F}(\text{sh}(\overline{\alpha}pha))$ that takes $\overline{\chi}$ to $n|\mathcal{O}|-\overline{\chi}$, while fixing orbit length and $\overline{\mathcal{M}}$. The pairing up of $\overline{\chi}$ statistics seen in Table~\ref{tab:rowmotion} suggests that it might be possible to find a bijection that also \emph{fixes} $\overline{\chi}$. That would be exciting in two levels. It would confirm that in the circular case, $\overline{\chi}$ is never truly orbomesic, it is either a true homomesy or we have orbits of the same size with different $\overline{\chi}$ values. It would also provide a bijective proof for the symmetry of the rank polynomial, possibly making way to a bijective proof of unimodality in the non-circular case.
\item \textbf{A Polyhedral Perspective: } A related and probably simpler question that we were unable to answer goes as follows.
Given a composition $\overline{\alpha}pha$ of $n$, consider the polytope $\overline{P}_{\overline{\alpha}pha} \subset \mathbb{R}^n$ given by the indicator vectors of $\overline{J}(\overline{\alpha}pha)$, the set of all lower ideals of the associated circular fence poset. Consider the sections of the polytope:
\[\overline{P}_{\overline{\alpha}pha}^t = \overline{P}_{\overline{\alpha}pha} \cap \{x\in \mathbb{R}^n, \, \sum_{i} x_i = t\}.\]
We have observed that the function $t \rightarrow \operatorname{Vol} \, \overline{P}_{\overline{\alpha}pha}^t$
is symmetric about the point $n/2$, that is, \[\operatorname{Vol}(\overline{P}_{\overline{\alpha}pha}^t) = \operatorname{Vol}(\overline{P}_{\overline{\alpha}pha}^{n/2-t}),\qquad 0 \leq t \leq n.\]
Interestingly, these polytopes are not necessarily combinatorially equivalent. The special case when we look at compositions $(\overline{\alpha}pha_1, 1, \ldots, \overline{\alpha}pha_s, 1)$ of $n$ is especially interesting. Note that thesee are of special interest (at least to the authors) as these are potentially the only compositions where we are yet to settle the unimodality problem for rank sequences. In this case, we can parse the question as follows. Define the polytope $H_{\overline{\alpha}pha}$ by
\[H_{\overline{\alpha}pha} = \{x \in \prod_{i = 1}^{s} [0, \overline{\alpha}pha_i + 1], \,\,\, x_i - x_{i+1\, (\operatorname{mod}\,s)\,} \leq \overline{\alpha}pha_i, \, i = 1, \ldots, s\}.\]
Then the above conjecture in this special reduces to the claim that
\[\operatorname{Vol}(\overline{H}_{\overline{\alpha}pha}^t) = \operatorname{Vol}(\overline{H}_{\overline{\alpha}pha}^{n/2-t}), \qquad 0 \leq t \leq n,\] where these polytopes are defined similarly to above. Again, we have equality of volumes despite the polytopes not necessarily being isomorphic. A natural explanation of this would be interesting. \item \textbf{Refinements of Unimodality: } In their paper \cite{Saganpaper}, McConville, Sagan and Smyth investigated the existence of chain decompositions as a possible method of proving unimodality. Though unable to make process in this direction, the examples we considered led us the believe that for circular fences -apart from the case $\overline{\alpha}pha = (a, 1, a, 1)$ (see Figure~\ref{fig:1417})- the associated lattices admit \emph{symmetric chain decompositions} and are thus \emph{strongly Sperner}. A resolution of this would be satisfying and would go some way towards elucidating the structure of fence and circular fence posets.
\item\textbf{Skew Young-Posets:} The boxes on the Ferrers diagram of a partition $\lambda$ have a natural poset structure, where ideals are in bijection with partitions whose Ferrers diagrams fit inside $\lambda$. From this viewpoint, fence poset can be viewed as the posets for certain skew-diagrams $\lambda\backslash \mu$ corresponding to maximal border strips. The unimodality of the corresponding rank polynomial in the non-skew case was studied previously by Stanton in 1990 (see \cite{stanton}), where he conjectured that self dual partitions give rise to unimodal polynomials. He also provided several examples where unimodality fails. These examples are of a similar flavor to the examples in the circular case in that the two largest entries are seperated by a slightly smaller entry which provides the only violation to unimodality.
Progress towards a general classification has been limited since then. Zbarsky showed in \cite{nearrectangular} that when the partition $\lambda$ is satisfies certain properties to ensure the parts are of similar sizes the rank polynomial is unimodal. He also provided further examples where unimodality fails and conjectured that in any example that unimodality fails the rank polynomial is bimodal with the two modes being seperated by one entry only.
The tricky part about the case of the (possibly skew) diagrams compared to the fences is that the position of the mode (or modes) is trickier to determine and does not necessarily lie around $q^{|\lambda/2|}$. Nevertheless, there is enough similarity to warrant a new look at this problem through the lens of skew-diagrams and possible circular analogues.
\item \textbf{Extremal Ranks: } For a fixed size, how does changing the shape of the fence affect the resulting rank sequence? It is a simple observation to see that any maximum is achieved at a composition with parts $\leq 2$:
\begin{prop} Let $\overline{\alpha}pha'$ be obtained from a partition $\overline{\alpha}pha$ from replacing a part of size $t\geq3$ by parts $t-2,1,1$. Then $\overline{R}(\overline{\alpha}pha';q)-\overline{R}(\overline{\alpha}pha;q)$ has non-negative integer coefficients.
\end{prop}
\begin{proof} As the rank polynomial is invariant under cyclic shifts, we can assume that the part of size $t$ is the first part. Let $x\preceq y \preceq z$ be the maximum nodes on the first segment. The ideals of $\overline{\alpha}pha'$ can be obtained by replacing these relations with the weaker set $ y\succeq x \preceq z$, where $x$ and $z$ are incomparable.
\end{proof}
Our experiments suggest that following is true.
\begin{itemize}
\item Given any composition $\overline{\alpha}pha$ of $n$, we conjecture that $r(\overline{\alpha}pha) \leq r(1^n)$, where the inequalities are pointwise i.e. we believe that for every $k$, the number of rank $k$ down ideals of $F(\overline{\alpha}pha)$ are at most the number of rank $k$ down ideals of $F(1^n)$. \item More generally, we conjecture that for any fixed $k$ and $n$ where $k$ divides $n$ and any composition of $n$ with $k$ parts, we have that $r(\overline{\alpha}pha) \leq r(n/k, \ldots, n/k)$.
\end{itemize}
\end{itemize}
\end{document} |
\begin{document}
\mainmatter
\title{Integrating Topological Proofs with Model Checking to Instrument Iterative Design}
\titlerunning{Integrating model checking and topological proofs}
\author{Claudio Menghi\inst{1}
\and Alessandro Maria Rizzi\inst{2}
\and Anna Bernasconi\inst{2}
}
\authorrunning{Integrating Topological Proofs with Model Checking}
\institute{
University of Luxembourg, Luxembourg\\
\mailsa\\
\and
Politecnico di Milano, Italy\\% Dipartimento di Elettronica, Informazione e Bioingegneria\\
\mailsb\\
}
\toctitle{Lecture Notes in Computer Science}
\tocauthor{Authors' Instructions}
\maketitle
\begin{abstract}
System development is not a linear, one-shot process. It proceeds through refinements and revisions.
To support assurance that the system satisfies its requirements, it is desirable that continuous verification can be performed after each refinement or revision step.
To achieve practical adoption, formal system modeling and verification must accommodate continuous verification efficiently and effectively.
Our proposal to address this problem is TOrPEDO , a verification approach where models are given via Partial Kripke Structures (PKSs) and requirements are specified as Linear-time Temporal Logic (LTL) properties.
PKSs support refinement, by deliberately indicating unspecified parts of the model that are later completed.
We support verification in two complementary forms: via model checking and proofs. Model checking is useful to provide counterexamples, i.e., pinpoint model behaviors that violate requirements. Proofs are instead useful since they can explain why requirements are satisfied. In our work, we introduce a specific concept of proof, called topological proof (TP). A TP produces a slice of the original PKS which justifies the property satisfaction. Because models can be incomplete, TOrPEDO\ supports reasoning on requirements satisfaction, violation, and possible satisfaction (in the case where the satisfaction depends on unknown parts).
\end{abstract}
\section{Introduction}
One of the goals of software engineering and formal methods is to provide automated verification tools that support designers in producing models of an envisioned system, which follows a set of properties of interest.
Many automated verification methods are available to help and guide the designer through this complex task. These methods include, among others, model checking and theorem proving. Typically, the designer benefits from automated support to understand why her system does not behave as expected (e.g., counterexamples), but she might find useful also information retrieved when the system already follows the specified requirements. While model checkers provide the former, theorem provers sustain the latter.
These usually rely on some form of deductive mechanism that, given a set of axioms, iteratively applies a set of rules until a theorem is proved. The proof consists of the specific sequence of deductive rules applied to prove the theorem. In literature, many approaches have dealt with integration of model checking and theorem proving at various levels (e.g.,~\cite{namjoshi2001certifying,cleaveland2002evidence,rajan1995integration,kupferman2005complementation}). These approaches are oriented to provide \textit{certified model checking} procedures rather than tools which actually help the design process.
Even when the idea is to provide a practically useful framework~\cite{PPZ01,PZ01}, the output consists of deductive proofs which are usually difficult to understand and hardly connectable with the designer's modeling choices.
Moreover, verification techniques which only take into account completely specified designs do not comply with modern agile software design techniques.
In a recent work (\cite{Bernasconi2017,bernasconi2017arxiv}), we have considered cases in which a partial knowledge of the system model is available. However, the presented approach was mainly theoretical and lacked a practical implementation.
With the intent to provide a valuable support for a flexible design process, we formulate the initial problem on models that contain uncertain parts. Partial specification may be used, for instance, to represent the uncertainty of introducing, keeping, excluding particular portions of the design with respect to the complete model of the system.
For this reason, we chose Partial Kripke Structures (PKSs) as a formalism to represent general models.
PKSs are a standard formalism used to reason on incomplete systems.
Among several applications, they have been used in requirement elicitation to reason about the system behavior from different points of view~\cite{easterbrook2001framework,brunet2006manifesto}.
Furthermore, other modeling formalisms such as Modal Transition Systems~\cite{larsen1988modal} (MTSs), can be converted into PKSs through a simple transformation~\cite{godefroid2003expressiveness}.
Thus, the proposed solution can also be easily applied on models specified using MTSs, which are commonly used in software development~\cite{foster2006ltsa,UchitelFm}.
Kripke Structures (KSs) are particular instances of PKSs used to represent complete models. Requirements on the model are expressed in Linear-time Temporal Logic (LTL).
Verification techniques that consider PKSs return three alternative values: \emph{true} if the property holds in the partial model, \emph{false} if it does not hold, and \emph{maybe} if the property possibly holds, i.e., its satisfaction depends on the parts that still need to be refined.
Methods for verifying partial models naturally fit in modern software design processes~\cite{FASE18,foster2006ltsa,UchitelFm}.
In the iterative design process, the designer starts from a high level model of the system in which some portions can be left unspecified, representing design decisions that may be taken in later development steps.
As development proceeds, the initial model can be \emph{refined}, by filling parts that are left unspecified, or \emph{revised}, by changing parts that were already specified.
In a PKS a refinement is performed by associating a true/false value to previously unknown propositions, while revising may also involve adding or removing existing states or transitions, or changing values already assigned to propositions (i.e., a refinement is a type of revision).
A comprehensive integrated design framework able to support software designers in understanding \emph{why} properties are (possibly) satisfied -- as models are specified, refined, or revised -- is still missing.
We tackle this problem by presenting TOrPEDO\ (TOpological Proof drivEn Development framewOrk), a novel automated verification framework, that:
\begin{itemize}
\item[(i)] supports a modeling formalism which allows a partial specification of the system design;
\item[(ii)] allows performing analysis and verification in the context of systems in which ``incompleteness'' represents a conceptual uncertainty;
\item[(iii)] provides guidance in the refinement process through complementary outputs: counterexamples and topological proofs;
\item[(iv)] when the system is completely specified, allows understanding which changes impact or not on certain properties.
\end{itemize}
TOrPEDO\ is based on the novel notion of \textit{topological proof} (TP), which tries to overcome the complexity of deductive proofs and is designed to make proofs understandable on the original system design.
A TP is a \textit{slice} of the original model that specifies which part of it influences the property satisfaction.
If the slice defined by the TP is not preserved during a refinement or a revision,
there is no assurance that the property holds (possibly holds) in the refined or revised model.
This paper proposes an algorithm to compute topological proofs---which relies on the notion of \textit{unsatisfiable cores} (UCs) \cite{SCHUPPAN2016155}---and proves its correctness on PKSs.
TOrPEDO\ has been implemented on top of NuSMV~\cite{nusmv} and PLTL-MUP~\cite{sergeantfinding}.
The implementation has been used to evaluate how TOrPEDO\ helps software designers by considering a set of examples coming from literature including both completely specified and partially specified models.
The paper is structured as follows.
Section~\ref{sec:torpedo} describes TOrPEDO.
Section~\ref{sec:background} discusses the background.
Sections~\ref{sec:topologicalproof} and \ref{sec:automatedsupport} present the theoretical results and the algorithms that support TOrPEDO.
Section~\ref{sec:evaluation} evaluates the achieved results.
Section~\ref{sec:related} discusses related work.
Section~\ref{sec:conclusions} concludes.
\section{TOrPEDO}
\label{sec:torpedo}
TOrPEDO\ is a proof based development framework which allows verifying initial designs and evaluating their revisions.
To illustrate TOrPEDO , we use an example: a designer needs to develop a simple vacuum-cleaner robot which has to satisfy the requirements in Table~\ref{tab:motivatinproperties}, specified through LTL formulae and plain English text.
These are simple requirements that will be used for illustration purposes.
Informally, when turned on, the vacuum-cleaner agent can move with the purpose to reach a site which may be cleaned in case it is dirty.
\begin{figure}
\caption{Model of a vacuum-cleaner robot.}
\label{fig:motivatingmodel}
\caption{Sample requirements.}
\label{tab:motivatinproperties}
\end{figure}
The TOrPEDO\ framework is illustrated in Fig.~\ref{fig:renovated} and it is made of four phases: \textsc{initial design}, \textsc{analysis}, \textsc{revision}, and \textsc{re-check}.
Dashed boxes marked with the person icon represent phases performed manually by the designer, while dashed boxes marked with the gears icon contain phases operated using automated support.
\textbf{\textsc{Initial design.}} This phase concerns the initial definition of the model of the system, formalized as a PKS (marked in Fig.~\ref{fig:renovated} with \circled{\scriptsize{1}}) along with the property of interest, in LTL (\circled{\scriptsize{2}}).
In the vacuum-cleaner example, the designer has identified two actions the robot can perform:
$\mathit{move}$, i.e., the agent travels to the cleaning site;
$\mathit{suck}$, i.e., the agent is drawing the dust.
She has also identified two conditions that can trigger actions:
$\mathit{on}$, true when the robot is turned on;
$\mathit{reached}$, true when the robot has reached the cleaning site.
These actions and conditions determine the designer description of the preliminary model presented in Fig.~\ref{tab:motivatinproperties}.
The model is made by four states representing the configuration of the vacuum-cleaner robot.
The state $\mathit{OFF}$ represents the robot being shut down,
$\mathit{IDLE}$ the robot being tuned in w.r.t. a cleaning call,
$\mathit{MOVING}$ the robot reaching the cleaning site,
and $\mathit{CLEANING}$ the robot performing its duty.
Each state is labeled with the actions and conditions that are true in that state.
Given an action or condition $\alpha$ and a state $s$, we use the notation:
$\alpha=\LTLtrue$ to indicate that $\alpha$ occurs when the robot is in state $s$;
$\alpha=\LTLfalse$ to indicate that $\alpha$ does not occur when the robot is in state $s$;
$\alpha=?$ to indicate that there is uncertainty on whether $\alpha$ occurs when the robot is in state $s$.
In the first two cases the designer is sure that an action must (must not) be performed or a condition must be true (false) in a state; in the third case the designer is uncertain about the design.
Specifically, she does not know whether the robot should perform an action or whether a condition should be true in order for a state to be entered.
\begin{figure*}
\caption{TOrPEDO\ structure. Continuous arrows represent inputs and outputs to phases. Circled numbers are used to reference the image in the text.}
\label{fig:renovated}
\end{figure*}
\textbf{\textsc{Analysis.}} TOrPEDO\ assists the designer with an automated analysis, which includes the following elements:
\begin{enumerate}
\item[(i)] information about \emph{what is wrong} in the current design. This information includes a definitive-counterexample, which indicates a behavior that depends on already performed design choices and violates the properties of interest. The definitive-counterexample (i.e., Def-CE \circled{\scriptsize{3}}) can be used to improve $M$ and produce a revised version $M^\prime$ that satisfies or possibly satisfies the property of interest.
\item[(ii)] information about \emph{what is correct} in the current design. This information includes definitive-topological proofs (i.e., Def-TP \circled{\scriptsize{4}}) that indicate a portion of the design that ensures property satisfaction;
\item[(iii)] information about \emph{what could be wrong}/\emph{correct} in the current design depending on how uncertainty is removed. This information includes: a possible-counterexample (i.e., Poss-CE \circled{\scriptsize{5}}), indicating a behavior (which depends on uncertain actions and conditions) that violates the properties of interest, and a possible-topological proof (i.e., Poss-TP \circled{\scriptsize{6}}), indicating a portion of the design that ensures the possible satisfaction of the property of interest. The designer can use the possible-counterexample and the possible-topological proof to improve $M$.
\end{enumerate}
In the following we will use the notation $x$-topological proofs or $x$-TP to indicate arbitrarily definitive-topological or possible-topological proofs.
In the vacuum-cleaner example, the designer analyzes her proposed design using TOrPEDO .
\begin{sloppypar}
Property $\phi_1$ is possibly satisfied. TOrPEDO\ returns the possible-counterexample $\mathit{OFF}$, $\mathit{IDLE}$, $(\mathit{MOVING})^{\omega}$.
This possible-counterexample shows a run that violates the property of interest.
TOrPEDO\ also returns a possible-topological proof showing that the property remains possibly satisfied given that $\mathit{OFF}$ remains the only initial state, $reached$ still holds in $\mathit{CLEANING}$, and $suck$ does not hold in $\mathit{OFF}$ and $\mathit{IDLE}$,
while unknown in $\mathit{MOVING}$ (note that, if $suck$ was set to $\bot$ in this state we would indeed obtain a proof). In addition, all transitions must be preserved.
\end{sloppypar}
Property $\phi_2$ is satisfied. TOrPEDO\ returns the definitive-topological proof, which shows that the property remains satisfied given that $\mathit{OFF}$ remains the only initial state, $on$ still holds in $\mathit{MOVING}$ and $\mathit{CLEANING}$, and $move$ does not hold in $\mathit{OFF}$ and $\mathit{IDLE}$. In addition, all transitions must be preserved.
Property $\phi_3$ is not satisfied. TOrPEDO\ returns a definitive-counterexample, e.g., $\mathit{OFF},IDLE^\omega$.
The counterexample shows that it is not true that always a robot which is operative and not moving is drawing dust.
Property $\phi_4$ is possibly satisfied. TOrPEDO\ returns the possible-counter- example $\mathit{OFF}$, $(\mathit{IDLE},\,\mathit{MOVING},\,\mathit{CLEANING},\,\mathit{IDLE},\,\mathit{OFF})^{\omega}$, which specifies a sample run for which it is not true that the robot is only moving (and not cleaning) before it can draw dust.
The topological proof shows that the property remains possibly satisfied given that the following characteristics of the model are preserved:
from the only initial state $\mathit{OFF}$ one can loop or move to $\mathit{IDLE}$, from which one can loop, return to $\mathit{OFF}$, or go to $\mathit{MOVING}$; in addition $move$ must hold in $\mathit{MOVING}$ and $suck$ must not occur in $\mathit{OFF}$ and $\mathit{IDLE}$, while unknown in $\mathit{MOVING}$.
\textbf{\textsc{Revision.}} As development proceeds, the designer may want to revise the existing model by
changing some of its parts: adding/removing states and transitions or by changing propositions labelling inside states.
Revision may include refinement, i.e., replacing with $\LTLtrue$ and $\LTLfalse$ some unknown values in the atomic propositions.
The inputs of this phase are the initial model $M$ (\circled{\scriptsize{1}}), or an already revised model (\circled{\scriptsize{7}}),
and the $x$-TP that can be used by the designer as a guideline for the revision (\circled{\scriptsize{9}}).
The output is another revised model $M'$ (\circled{\scriptsize{8}}).
\textit{Revision 1.} The designer would like her model to not violate any property of interest.
She examines the counterexample of $\phi_3$ to understand why it is not satisfied and envisions a revised model that could satisfy the property.
She also consults the $x$-TPs of properties $\phi_1$, $\phi_2$, and $\phi_4$ in order to be sure to preserve their verification results.
She thus decides to change the value of $move$ in state $\mathit{IDLE}$ from $\bot$ to $\top$.
Since she foresees $\phi_3$ is now satisfied, she reruns the \textsc{analysis} for this property.
TOrPEDO\ provides the corresponding $x$-TP.
\textit{Revision 2.} The designer decides to further improve her model by proposing a refinement: $move$ becomes $\top$ in state $\mathit{CLEANING}$ and $reached$ becomes $\bot$ in state $\mathit{IDLE}$.
Since $\phi_1$, $\phi_2$, $\phi_3$, and $\phi_4$ were previously not violated, TOrPEDO\ performs the \textsc{re-check} phase for each property.
\textbf{\textsc{Re-check.}} The automated verification tool provided by TOrPEDO\ checks whether all the changes in the current model revision (\circled{\scriptsize{8}}) are compliant with the $x$-TPs (\circled{\scriptsize{10}}), i.e., changes applied to the revised model do not include parts that had to be preserved according to the proof.
If a property of interest is (possibly) satisfied in a previous model (\circled{\scriptsize{1}}), and the revision of the model is compliant with the property $x$-TP, the designer has the guarantee that the property is (possibly) satisfied in the revision.
Thus, she can perform another model revision round (\circled{\scriptsize{12}}) or approve the current design (\circled{\scriptsize{13}}).
Otherwise, TOrPEDO\ re-executes the \textsc{analysis} (\circled{\scriptsize{11}}).
In the vacuum-cleaner case, the second revision passes the \textsc{re-check} and the designer proceeds to a new revision phase.
\section{Background}
\label{sec:background}
We present some background and notation necessary to understand the rest of the paper.
First, we describe how to model the system under development and its properties.
Then, we present the unsatisfiable core which is the element upon which our algorithm for computing topological proofs is based.
\subsection{Modeling systems and properties}
\label{sub:modeling}
We first describe Partial Kripke Structures (PKS)
and Kripke Structures (KS), formalisms that allow modeling the systems under development.
Then, we briefly introduce the semantics for LTL properties on PKSs and KSs and how to perform model checking on these structures.
PKS are a modeling formalism that can be adopted when the value of some propositions is uncertain on selected states.
\begin{definition}[\cite{bruns1999model}]
A \emph{Partial Kripke Structure} $M$ is a tuple $\langle S, R,S_0,AP,$ $L \rangle$, where:
$S$ is a set of states;
$R\subseteq S\times S$ is a left-total transition relation on $S$;
$S_0$ is a set of initial states;
$AP$ is a set of atomic propositions;
$L: S\times AP\rightarrow \{\top,?,\bot\}$ is a function that, for each state in $S$, associates a truth value to every atomic proposition in $AP$.
\end{definition}
Informally, a PKS represents a system as a set of states and transitions between these states.
Uncertainty on the $AP$ is represented through the value $?$.
The model of the vacuum-cleaner agent presented in Fig.~\ref{fig:motivatingmodel} is a PKS where propositions in $AP$ are used to model actions and conditions.
\begin{definition}[\cite{Kripke1963-KRISCO}]
A \emph{Kripke Structure} $M$ is a PKS $\langle S, R,S_0,AP,L \rangle$, where $L: S\times AP\rightarrow \{\top,\bot\}$.
\end{definition}
PKSs can be related to other PKSs or to KSs respectively through \textit{refinement} and \textit{completion}.
\begin{definition}[\cite{bruns2000generalized}]
\label{def:refinement}
Let $M=\langle S, R,S_0,AP,L \rangle$ be a PKS.
A refinement of $M$ is a PKS $M^\prime=\langle S, R, S_0,$ $AP,$ $L^\prime \rangle$
where $L^\prime$ is such that
\begin{itemize}
\item for all $s\in S$, $\alpha \in AP$ if $L(s, \alpha)=\LTLtrue \rightarrow L^\prime(s, \alpha)=\LTLtrue$;
\item for all $s\in S$, $\alpha \in AP$ if $L(s, \alpha)=\LTLfalse \rightarrow L^\prime(s, \alpha)=\LTLfalse$.
\end{itemize}
\end{definition}
We indicate that $M^\prime$ is a refinement of $M$ using the notation $M \preceq M^\prime$.
Intuitively, the notion of refinement allows assigning a $\LTLtrue$ or a $\LTLfalse$ value to an atomic proposition $\alpha$ in a state $s$ s.t. $L^\prime(s, \alpha)=?$.
\begin{definition}[\cite{bruns2000generalized}]
Let $M$ be a PKS and let $M^\prime$ be a KS.
Then $M^\prime$ is a \emph{completion} of $M$ if and only if $M \preceq M^\prime$.
\end{definition}
Intuitively, a completion of a PKS is a KS obtained by assigning a $\LTLtrue$ or a $\LTLfalse$ value to every atomic propositions $\alpha$ and state $s$ s.t. $L^\prime(s, \alpha)=?$.
\noindent
\textbf{Semantics of LTL properties.}
For KSs we consider the classical LTL semantics $[M \models \phi]$ over infinite words that associates to a model $M$ and a formula $\phi$ a truth value in the set $\{ \bot, \top \}$. The interested reader may refer, for example, to~\cite{katoen2008}.
Let $M$ be a KS and $\phi$ be an LTL property. We assume that the function $\textsc{Check}$, such that $\langle res, c \rangle =\textsc{Check} (M $, $\phi)$, returns a tuple $\langle res, c \rangle$, where $res$ is the model checking result in $\{ \top, \bot \}$ and, if $res=\bot$, $c$ is the counterexample.
Instead, when the satisfaction of LTL over PKSs is of interest, two semantics can be considered: the three-valued or the thorough semantics.
The \emph{three-valued LTL semantics}~\cite{bruns1999model} $[M \models_{3} \phi]$ associates to a model $M$ and a formula $\phi$ a truth value in the set $\{ \bot, ?, \top \}$ and is defined based on the information ordering $\top >\, ? > \bot$, i.e., on the assumption that $\top$ ``provides more information'' than $?$ and $?$ ``provides more information'' than $\bot$~\cite{bruns1999model}.
The three-valued LTL semantics is defined by considering paths of the model $M$. A path $\pi$ is a sequence of states $s_0,s_1,\ldots$ such that, for all $i \geq 0$, $(s_i, s_{i+1}) \in R$.
\begin{definition}[\cite{bruns1999model}]
Let $M = \langle S, R,$ $S_0, AP, L \rangle$ be a PKS,
let $\pi=s_0,s_1,\ldots$ be a path, and
let $\phi$ be an LTL formula. Then, the \emph{three-valued semantics} $[(M,\pi)\models_3\phi]$ is defined inductively as follows:
\begin{align*}
&[(M,\pi) \models_3 p] & = &&& L(s_0,p)\\
&[(M,\pi) \models_3 \lnot\phi] & = &&& comp([(M,\pi) \models_3 \phi]) \\
&[(M,\pi) \models_3 \phi_1 \LTLand \phi_2] & = &&& \min([(M,\pi) \models_3 \phi_1],[(M,\pi) \models_3 \phi_2])\\
&[(M,\pi) \models_3 \LTLnext \phi] & = &&& [(M,\pi^1) \models_3 \phi]\\
&[(M,\pi) \models_3 \phi_1 \LTLuntil \phi_2] & = &&& \max_{j\geq 0}(\min(\{[(M,\pi^i) \models_3 \phi_1]|i<j\} \cup \{[(M,\pi^j) \models_3 \phi_2]\}))
\end{align*}
\end{definition}
The conjunction (resp. disjunction) is defined as the minimum (resp. maximum) of its arguments, following the order $\bot<\,?<\top$. These functions are extended to sets with min($\emptyset$)=$\top$ and max($\emptyset$)=$\bot$. The $comp$ operator maps $\top$ to $\bot$, $\bot$ to $\top$, and $?$ to $?$
\begin{definition}[\cite{bruns1999model}]
Let $M = \langle S, R,$ $S_0, AP, L \rangle$ be a PKS,
$s$ be a state of $M$
and $\phi$ be an LTL formula. Then
$ [(M, s) \models_3 \phi]=\min(\{[(M, \pi) \models_3 \phi] \mid \pi^0=s\})$.
\end{definition}
Intuitively this means that, given a formula $\phi$, each state $s$ of $M$ is associated with the minimum of the values obtained considering the LTL semantics over any path $\pi$ that starts in $s$.
When the three-valued semantics is considered, if $[M \models_{3} \phi] = \top$ then in every completion of $M$ formula $\phi$ is true and if $[M \models_{3} \phi] = \bot$, then in every completion of $M$ formula $\phi$ is false.
In general, when $[M \models_{3} \phi] = ?$, there exist both completions of $M$ that satisfy and do not satisfy $\phi$.
However, there are cases in which all the completions of $M$ satisfy (or do not satisfy) $\phi$.
For this reason, the alternative thorough LTL semantics~\cite{bruns2000generalized} has been proposed ($[M \models_{T} \phi]$).
The \emph{thorough LTL semantics}~\cite{bruns2000generalized} dictates that $[M \models_{T} \phi] = \top$ if in every completion of $M$ formula $\phi$ is true and $[M \models_{T} \phi] = \bot$ if in every completion of $M$ formula $\phi$ is false.
This ensures that, when $[M \models_{T} \phi] =?$, there exist both completions of $M$ that satisfy $\phi$ and completions of $M$ that do not satisfy $\phi$.
\begin{definition}[\cite{bruns2000generalized}]
Let $M$ be a PKS and
let $\phi$ be an LTL formula.
Then,
\begin{equation}
[M \models_T \phi] \buildrel \text{def}\over =
\begin{cases}
\top & \quad \text{if}\ M^\prime \models \phi \text{ for every completion } M^\prime \text{ of } M\\
\bot & \quad \text{if}\ M^\prime \not\models \phi \text{ for every completion } M^\prime \text{ of } M \\
? & \quad \text{otherwise}
\end{cases}
\end{equation}
\end{definition}
Note that, when a PKS is a KS, $[M \models_{3} \phi]= [M \models_{T} \phi]=[M \models \phi]$.
\begin{lemma}[\cite{godefroid2011ltl}]
Let $M$ be a PKS and
let $\phi$ be an LTL formula.
Then
\begin{enumerate*}
\item[] $[M \models_3 \phi] =$ $ \top \Rightarrow$ $ [M\models_T \phi] = \top$; and
\item[] $[M\models_3\phi] = \bot$ $\Rightarrow$ $[M\models_T \phi] = \bot$.
\end{enumerate*}
\end{lemma}
That is, a formula which is true (false) under the three-valued semantics is also true (false) under the thorough semantics.
There exists a subset of LTL formulae, known in the literature as \emph{self-minimizing}~\cite{godefroid2005model}, such that the two semantics coincide, i.e.,
given a model $M$ and a self-minimizing LTL property $\phi$, then $[M\models_3\phi]=[M\models_T\phi]$. It has been observed that most practically useful LTL formulae belong to this subset~\cite{godefroid2005model}.
\iffalse
\color{red}
To define three-valued LTL semantics we consider a path $\pi$ as a sequence of states $s_0,s_1,s_2\ldots$ such that for all $i \geq 0$, $(s_i, s_{i+1}) \in R$.
\color{black}
\color{red}
\begin{definition}
Given a PKS $M = \langle S, R,$ $S_0, AP, L \rangle$, a path $\pi=s_0,s_1,s_2\ldots$, and a formula $\phi$, the \emph{three-valued semantics}~\cite{godefroid2011ltl} $[(M,\pi)\models\phi]$ is defined inductively as follows:
\begin{align*}
&[(M,\pi) \models \alpha] & \buildrel \text{def}\over = &&& L(s_0,\alpha)\\
&[(M,\pi) \models \lnot\phi] &\buildrel \text{def}\over = &&& comp([(M,\pi) \models \phi]) \\
&[(M,\pi) \models \phi_1 \LTLand \phi_2] & \buildrel \text{def}\over = &&& \min([(M,\pi) \models \phi_1],[(M,\pi) \models \phi_2])\\
&[(M,\pi) \models \LTLnext \phi] & \buildrel \text{def}\over = &&& [(M,\pi^1) \models \phi]\\
&[(M,\pi) \models \phi_1 \LTLuntil \phi_2] & \buildrel \text{def}\over = &&& \max_{j\geq 0}(\min(\{[(M,\pi^i) \models \phi_1]|i<j\} \cup \{[(M,\pi^j) \models \phi_2]\}))
\end{align*}
\end{definition}
The conjunction (disjunction) is defined as the minimum (maximum) of its arguments, following the order $\bot<\,?<\top$. These functions are extended to sets with min($\emptyset$)=$\top$ and max($\emptyset$)=$\bot$. The symbol $\pi^1$ indicates the subpath $s_1,s_2\ldots$ of $\pi$.
\begin{definition}
Given a PKS $M = \langle S, R,$ $S_0, AP, L \rangle$, a state $s$, and an LTL formula $\phi$:
\begin{align*}
& [(M, s) \models \phi] \buildrel \text{def}\over = \min(\{[(M, \pi) \models \phi] \mid \pi^0=s\})
\end{align*}
\end{definition}
Intuitively this means that, given a formula $\phi$, each state $s$ of $M$ is associated with the minimum of the values obtained considering the LTL semantics over any path $\pi$ that starts in $s$.
\color{black}
\fi
\noindent
\textbf{Model checking.}
Checking KSs with respect to LTL properties can be done by using classical model checking procedures.
For example, the model checking problem of property $\phi$ on a KS $M$ can be reduced to the satisfiability problem of the LTL formula $\Phi_M \LTLand \neg \phi$, where $\Phi_M$ represents the behaviors of model $M$.
If $\Phi_M \LTLand \neg \phi$ is satisfiable, then $[M \models \phi]=\bot$, otherwise $[M \models \phi]=\top$.
Checking a PKS $M$ with respect to an LTL property $\phi$ considering the three-valued semantics can be done by performing twice the classical model checking procedure for KSs~\cite{bruns2000generalized}, one considering an optimistic approximation $M_{opt}$ and one considering a pessimistic approximation $M_{pes}$.
These two procedures consider the LTL formula $\phi^\prime=\magicfunction(\phi)$, where \magicfunction\ transforms $\phi$ with the following steps:
\begin{enumerate}
\item[(i)] negate $\phi$;
\item[(ii)] convert $\neg \phi$ in negation normal form\footnote{An LTL formula $\phi$ is in \emph{negation normal form} if negations are applied only to atomic propositions.
Conversion of an LTL formula into its negation normal form can be achieved by pushing negations inward and replacing them with their duals---for details see \cite{katoen2008}.};
\item[(iii)] replace every subformula $\neg \alpha$, where $\alpha$ is an atomic proposition, with a new atomic proposition $\overline{\alpha}$.
\end{enumerate}
To create the optimistic and pessimistic approximations $M_{opt}$ and $M_{pes}$,
the PKS $M=\langle S, R,S_0,AP,L \rangle$ is first converted into its \emph{complement-closed} version $M_c=\langle S, R, S_0, AP_c, L_c \rangle$ where the set of atomic propositions $AP_c = AP \cup \overline{AP}$
is such that $\overline{AP}=\{\overline{\alpha} \mid \alpha \in AP \}$.
Atomic propositions in $\overline{AP}$ are called complement-closed propositions.
Function $L_c$ is such that for all $s \in S$ and $\alpha \in AP$, $L_c(s, \alpha)=L(s,\alpha)$
and for all $s\in S$ and $\overline{\alpha} \in \overline{AP}$, $L_c(s,\overline{p})=comp(L(s,p))$.
For example, the complement-closed PKS of the vacuum-cleaner agent in Fig.~\ref{fig:motivatingmodel}, in the state $IDLE$ presents eight propositional assignments: $move= \bot$, $\overline{move}=\top$, $suck=\bot$, $\overline{suck}=\top$, $on=\top$, $\overline{on}=\bot$, $reached=?$, and $\overline{reached}=?$.
The two model checking runs for a PKS $M=\langle S, R,S_0,AP,L \rangle$ are based respectively on an optimistic ($M_{opt}=\langle S, R,S_0,AP_c,L_{opt} \rangle$) and a pessimistic ($M_{pes}=\langle S, R,S_0,AP_c,L_{pes} \rangle$) approximation of $M$'s relative complement-closed $M_c=\langle S, R, S_0, AP_c, L_c \rangle$.
Function $L_{pes}$ (resp. $L_{opt}$) is such that for all $s \in S$, $\alpha \in AP_c$, and $L_c(s, \alpha) \in \{\top, \bot \}$, then $L_{pes}(s, \alpha)=L_c(s,\alpha)$ (resp. $L_{opt}(s, \alpha)=L_c(s,\alpha)$),
and for all $s\in S$, $\alpha \in AP_c$, and $L_c(s, \alpha)=?$, then $L_{pes}(s,\alpha)=\bot$ (resp. $L_{opt}(s,\alpha)=\top$).
Let $A$ be a KS and $\phi$ be an LTL formula, $A$ $\models^\ast\phi$ is true if no path that satisfies the formula $\magicfunction (\phi)$ is present in $A$.
\begin{theorem}[\cite{bruns1999model}]
\label{th:threevaluedMC}
Let $\phi$ be an LTL formula,
let $M=\langle S, R, S_0, AP, L\rangle $ be a PKS,
and let $M_{pes}$ and $M_{opt}$ be the pessimistic and optimistic approximations of $M$'s relative complement-closed $M_c$. Then
\begin{equation}
[M \models_3\phi]\buildrel \text{def}\over =
\begin{cases}
\top & \quad \text{if}\ M_{pes}\models^\ast\phi\\
\bot & \quad \text{if}\ M_{opt}\not\models^\ast\phi\\
? & \quad otherwise
\end{cases}
\end{equation}
\end{theorem}
We assume that the function $\textsc{Check}^\ast$ computes the result of operator $\models^\ast$. It takes as input either $M_{pes}$ or $M_{opt}$ and the property $\magicfunction(\phi)$, and returns a tuple $\langle res, c \rangle$,
where $res$ is the model checking result in $\{ \top, \bot \}$, and $c$ can be an empty set (when $M$ satisfies $\phi$), a \textit{definitive}-counterexample (when $M$ violates $\phi$), or a \textit{possible}-counterexample (when $M$ possibly-satisfies $\phi$).
\subsection{Unsatisfiable core}
\label{sub:UC}
Given a set of atomic propositions $AP$, $AP^{L}$ is the minimum set of elements such that for all $ p \in AP$, the following holds: $p \in AP^{L}$ and $(\neg p) \in AP^{L}$.
\begin{definition}[\cite{SCHUPPAN2016155}]
Let $AP$ be a set of atomic propositions.
A Separated Normal Form (SNF) clause is an LTL formula, which is either an SNF initial clause, an SNF global clause or an SNF eventuality clause, where:
\begin{itemize}
\item an \textit{SNF initial clause} $\underset{p \in P}{\bigvee}p$ where $P \subseteq AP^L$;
\item an \textit{SNF global clause} $\LTLg(\underset{p \in P}{\bigvee}p \lor \LTLx(\underset{q \in Q}{\bigvee}q))$ where $P,Q \subseteq AP^L$;
\item an \textit{SNF eventuality clause} $\LTLg(\underset{p \in P}{\bigvee}p \lor \LTLf(l))$ where $P \subseteq AP^L, l \in AP^L$.
\end{itemize}
\end{definition}
\noindent
Given a set $C$ of LTL formulae we denote the LTL formula $\underset{c \in C}\bigwedge c$ with $\eta(C)$.
\begin{definition}[\cite{SCHUPPAN2016155}]
Let $C$ be a set of SNF clauses.
Then the formula $\eta(C)$ is in SNF.
\end{definition}
In the following we assume that the property $\phi$ is an LTL formula in SNF since any LTL formula can be transformed in an equally satisfiable SNF formula (for example, by using the procedure in~\cite{fisher2001clausal}).
\begin{definition}[\cite{SCHUPPAN2016155}]
Let $C$ be a set of SNF clauses.
Then $C$ is \emph{unsatisfiable} if the corresponding SNF formula $\eta(C)$ is unsatisfiable.
\end{definition}
\begin{definition}[\cite{SCHUPPAN2016155}]
Let $C$ be an unsatisfiable set of SNF clauses and
let $C^{uc}$ be an unsatisfiable subset of $C$.
Then
$C^{uc}$ is an \emph{Unsatisfiable Core (UC)} of $C$.
\end{definition}
\section{Revising and refining models}
\label{sec:topologicalproof}
First, we define how models can be revised and refined.
Then, we define the notion of \emph{topological proof}, that is used to describe why a property $\phi$ is satisfied in a KS $M$.
Furthermore, we describe how the defined notion of proof can be exploited to show why a property is satisfied or possibly satisfied in a PKS.
\noindent
\textbf{Revisions and refinements.}
During a revision, a designer can add and remove states and transitions or change the labeling of the atomic propositions in some of the states of the structure.
\begin{definition}
\label{def:revision}
Let $M=\langle S,$ $R,$ $S_0,$ $AP,$ $L \rangle$ and
$M^\prime=\langle S^\prime,$ $R^\prime,$ $S^\prime_0,$ $AP^\prime,$ $L^\prime \rangle$ be two PKSs.
Then $M^\prime$ is a \emph{revision} of $M$ if and only if $AP \subseteq AP^\prime$.
\end{definition}
Informally, the only constraint the designer has to respect during a revision is not to remove propositions from the set of atomic propositions.
\begin{restatable}{lemma}{refinementisrevision}
\label{refinementisrevision}
Let $M=\langle S,$ $R,$ $S_0,$ $AP,$ $L \rangle$ be a PKS and
let $M^\prime=\langle S,$ $R,$ $S_0,$ $AP,$ $L^\prime\rangle$ be a refinement of $M$.
Then $M^\prime$ is a revision of $M$.
\end{restatable}
\noindent
\textbf{Topological proofs.}
The pursued proof is made of a set of clauses specifying certain topological properties of $M$, which ensure that the property is satisfied.
\begin{definition}
\label{def:ksclause}
Let $M=\langle S,$ $R,$ $S_0,$ $AP,$ $L \rangle$ be a PKS.
A \emph{topological proof clause} (TP-clause) $\gamma$ for $M$ is either:
\begin{itemize}
\item a \textit{topological proof propositional clause} (TPP-clause), i.e., a triad $\langle s, \alpha, v \rangle$ where $s \in S$, $\alpha \in AP$, and $v \in \{ \LTLtrue, ?, \LTLfalse \}$;
\item a \textit{topological proof transitions-from-state clause} (TPT-clause), i.e., an element $\langle s, T \rangle$, such that $s \in S, T \subseteq S$;
\item a \textit{topological proof initial-states clause} (TPI-clause), i.e., an element $\langle S_0 \rangle$.
\end{itemize}
\end{definition}
These clauses indicate \textit{topological properties} of a PKS $M$.
Informally, TPP-, TPT-, and TPI-clauses describe how states are labeled, how states are connected, and from which states the runs on the model begin, respectively.
\begin{definition}
\label{def:gammarelated}
Let $M=\langle S, R, S_0, AP, L \rangle$ be a PKS and
let $\Gamma$ be a set of TP-clauses for $M$.
Then a \emph{$\Gamma$-related} PKS is a PKS $M^\prime=\langle S^\prime, R^\prime, S^\prime_0, AP^\prime, L^\prime \rangle$, such that the following conditions hold:
\begin{itemize}
\item $AP \subseteq AP^\prime$;
\item for every TPP-clause $\langle s, \alpha, v \rangle \in \Gamma$, $v=L^\prime(s, \alpha)$;
\item for every TPT-clause $\langle s, T \rangle \in \Gamma$, $T=\{s^\prime \in S^\prime | (s,s^\prime)\in\ R^\prime\}$;
\item for every TPI-clause $\langle S_0 \rangle \in \Gamma$, $S_0 = S^\prime_0$.
\end{itemize}
\end{definition}
Intuitively, a $\Gamma$-related PKS of $M$ is a PKS obtained from $M$ by changing any topological aspect that does not impact on the set of TP-clauses $\Gamma$.
Any transition whose source state is not the source state of a transition included in the TPT-clauses can be added or removed from the PKS and any value of a proposition that is not constrained by a TPP-clause can be changed.
States can be always added and they can be removed if they do not appear in any TPT-, TPP-, or TPI-clause. Initial states cannot be changed if $\Gamma$ contains a TPI-clause.
\begin{definition}
\label{def:topologicalproof}
Let $M=\langle S, R, S_0, AP, L \rangle$ be a PKS,
let $\phi$ be an LTL property,
let $\Omega$ be a set of TP-clauses,
and let $x$ be a truth value in $\{\top,?\}$.
A set of TP-clauses $\Omega$ is an \emph{$x$-topological proof} (or $x$-TP) for $\phi$ in $M$ if
$[M \models \phi] = x$ and
every $\Omega$-related PKS $M^\prime$ is such that $[M^\prime \models \phi]\geq x$.
\end{definition}
Note that, in this definition---and in the rest of the paper, when not differently specified---$\models$ indicates either $\models_3$ or $\models_T$.
Intuitively, an \emph{$x$-topological proof} is a set $\Omega$ such that every PKS $M^\prime$ that satisfies the conditions specified in Definition~\ref{def:gammarelated} is such that $[M^\prime \models \phi]\geq x$.
We call $\top$-TP a \emph{definitive-topological proof} and
$?$-TP a \emph{possible-topological proof}.
In Definition~\ref{def:topologicalproof} the operator $\geq$, assumes that values $\top, ?, \bot$ are ordered considering the classical information ordering $\top >\, ? > \bot$ among the truth values~\cite{bruns1999model}.
A $?$-TP for the PKS in Fig.~\ref{fig:motivatingmodel} and property $\phi_4$ is composed by the TP-clauses shown in Table~\ref{tab:motivatingExampleProof}.
\begin{table}[t]
\begin{tabular}{lc}
\toprule
\textbf{Proof generated for property $\phi_4$ } & \textbf{Clause}\\
\midrule
$\langle \mathit{OFF}, \mathit{suck}, \bot \rangle$,
$\langle \mathit{IDLE}, \mathit{suck}, \bot \rangle$,
$\langle \mathit{MOVING}, \mathit{suck},$ $?$ $\rangle$,
$\langle \mathit{MOVING}, \mathit{move}, \top \rangle$ & TPP \\
$\langle \mathit{OFF},\{\mathit{OFF}, \mathit{IDLE}\} \rangle$,
$\langle \mathit{IDLE},\{\mathit{OFF}, \mathit{IDLE}, \mathit{MOVING}\} \rangle$ & TPT \\
$\langle \{\mathit{OFF}\} \rangle$ & TPI \\
\bottomrule
\end{tabular}
\caption{An example of proof for the vacuum-cleaner example.}
\label{tab:motivatingExampleProof}
\end{table}
\begin{definition}
\label{def:omegaRevisionDef}
Let $M$ and $M^\prime$ be two PKSs,
let $\phi$ be an LTL property, and
let $\Omega$ be an $x$-TP.
Then $M^\prime$ is an \emph{$\Omega_x$-revision} of $M$ if
$M^\prime$ is $\Omega$-related to $M$.
\end{definition}
Intuitively, since the \emph{$\Omega_x$-revision} $M^\prime$ of $M$ is such that $M^\prime$ is $\Omega$-related w.r.t. $M$, it is obtained by changing the model $M$ while preserving the statements that are specified in the $x$-TP.
A revision $M^\prime$ of $M$ is \emph{compliant} with the $x$-TP for a property $\phi$ in $M$ if it is an \emph{$\Omega_x$-revision} of $M$.
\begin{restatable}{lemma}{revisingpreservespropertysatisfaction}
\label{revisingpreservespropertysatisfaction}
Let $M$ be a PKS,
let $\phi$ be an LTL property such that $[M \models \phi]=\top$, and
let $\Omega$ be a $\top$-TP.
Then every $\Omega_\top$-revision $M^\prime$ is such that $[M^\prime \models \phi] =\top$.
\noindent
Let $M$ be a PKS,
let $\phi$ be an LTL property such that $[M \models \phi]=?$, and
let $\Omega$ be an $?$-TP.
Then every $\Omega_?$-revision $M^\prime$ is such that $[M^\prime \models \phi] \in \{ \top, ?\}$.
\end {restatable}
\begin{proof}
We prove the first statement of the Lemma; the proof of the second statement is obtained by following the same steps.
If $\Omega$ is a $\top$-TP,
it is a $\top$-TP for $\phi$ in $M^\prime$,
since $M^\prime$ is an $\Omega_\top$-revision of $M$ (by Definition~\ref{def:omegaRevisionDef}).
Since $\Omega$ is a $\top$-TP for $\phi$ in $M^\prime$, then $[M^\prime \models \phi] \geq \top$ (by Definition~\ref{def:topologicalproof}).
\qed
\end{proof}
\section{TOrPEDO\ automated support}
\label{sec:automatedsupport}
This section describes the algorithms that support the \textsc{analysis} and \textsc{re-check} phases of TOrPEDO .
\begin{algorithm}[t]
\captionof{algorithm}{The algorithm that supports the \textsc{analysis} phase.}
\label{alg:analyze}
\begin{algorithmic}[1]
\Function{\textsc{Analyze}}{$M$, $\phi$}
\State $\langle res, c \rangle$ = \textsc{Check}$^\ast$($M_{opt} $, $\phi)$
\label{line:check_opt}
\If{$res == \bot$} \Return $\langle \bot, \{ c \} \rangle$
\label{line:not_satisfied}
\Else
\State $\langle res^\prime, c^\prime \rangle$ = \textsc{Check}$^\ast$($M_{pes} $, $\phi$)
\label{line:check_pes}
\If{$res^\prime ==\top$} \Return $\langle \top, \{ \tpcompute(M, M_{pes} , \magicfunction(\phi)) \} \rangle$
\label{line:satisfied}
\Else
\State \Return $\langle ?, \{ c^\prime, \tpcompute(M, M_{opt} $, $\magicfunction(\phi))\} \rangle$
\label{line:possiblysatisfied}
\EndIf
\EndIf
\EndFunction
\end{algorithmic}
\end{algorithm}
\textbf{\textsc{Analysis.}}
To analyze a PKS $M=\langle S, R,S_0,AP,L \rangle$, TOrPEDO\ uses the three-valued model checking framework based on Theorem~\ref{th:threevaluedMC}.
The model checking result is provided as output by the \textsc{analysis} phase of TOrPEDO , whose behavior is described in Algorithm~\ref{alg:analyze}.
Specifically, the algorithm returns a tuple $\langle x, y \rangle$, where $x$ is the verification result and $y$ is a set containing the counterexample, the topological proof or both of them.
The algorithm first checks whether the optimistic approximation $M_{opt}$ of the PKS $M$ satisfies property $\phi$ (Line~\ref{line:check_opt}).
If this is not the case, the property is violated by the PKS and the definitive-counterexample $c$ is returned (Line~\ref{line:not_satisfied}).
Then, it checks whether the pessimistic approximation $M_{pes}$ of the PKS $M$ satisfies property $\phi$ (Line~\ref{line:check_pes}).
If this is the case, the property is satisfied and the value $\top$ is returned along with the definitive-topological proof ($\top$-TP) computed by the \tpcompute\ procedure applied on the pessimistic approximation $M_{pes}$ and the property $\magicfunction(\phi)$.
If this is not the case, the property is possibly satisfied and the value $?$ is returned along with the possible-counterexample $c'$ and the possible-topological proof ($?$-TP) computed by the \tpcompute\ procedure applied to $M_{opt}$ and $\magicfunction(\phi)$.
\begin{algorithm}[t]
\captionof{algorithm}{Compute Topological Proofs}
\label{alg:computetpp}
\begin{algorithmic}[1]
\Function{\tpcompute}{$M$, $\mathcal{A}$, $\psi$}
\State $\eta(C)= \textsc{Sys2Snf} (\mathcal{A}, \psi)$ \label{step1}
\State $C^{uc} =\textsc{GetUC} (\eta(C))$ \label{step2}
\State $TP= \textsc{GetTP} (M, C^{uc})$ \label{step3}
\State \Return $TP$
\EndFunction
\end{algorithmic}
\end{algorithm}
The procedure \tpcompute\ (Compute Topological Proofs) to compute $x$-TPs is described in Algorithm~\ref{alg:computetpp}.
It takes as input a PKS $M$, its optimistic/pessimistic approximation, i.e., the KS $\mathcal{A}$, and an LTL formula $\psi$---satisfied in $\mathcal{A}$--- corresponding to the transformed property \magicfunction($\phi$). The three steps are described in the following.
\textsc{Sys2Snf} . \textit{Encoding of the KS A and the LTL $\psi$ formula into an LTL formula in SNF $\eta(C)$.}
The KS $\mathcal{A}$ and the LTL formula $\psi$ are used to generate an SNF formula
$\eta(C_{\mathcal{A}} \cup C_{\psi})$, where $C_{\mathcal{A}}$ and $C_{\psi}$ are sets of SNF clauses obtained from the KS $\mathcal{A}$ and the LTL formula $\psi$.
The clauses in $C_{\psi}$ are computed from $\psi$ as specified in~\cite{SCHUPPAN2016155}.
The set of clauses that encodes the KS is $C_{\mathcal{A}}=C_{\mathit{KS}} \cup C_{\mathit{REG}}$, where $C_{\mathit{KS}}=\{ c_i \} \cup CR_{\mathcal{A}} \cup CL_{\top,\mathcal{A}} \cup CL_{\bot,\mathcal{A}}$
and $c_i$, $CR_{\mathcal{A}}$, $CL_{\top,\mathcal{A}}$ and $CL_{\bot,\mathcal{A}}$
are defined as specified in Table~\ref{tab:kstosnf}.
Note that the clauses in $C_{\mathcal{A}}$ are defined on the set of atomic propositions $AP_S=AP_{\mathcal{A}} \cup \{ p(s) | s \in S_{\mathcal{A}}\}$, i.e., $AP_S$ includes an additional atomic proposition $p(s)$ for each state $s$, which is true when the KS is in state $s$.
\begin{table}[t]
\caption{Rules to transform the KS in SNF formulae.}
\label{tab:kstosnf}
\begin{tabular}{l}
\toprule
$c_i=\underset{s \in S_{0,\mathcal{A}}}{\bigvee} p(s)$ \\ The KS is initially in one of its initial states.\\
\midrule
$CR_{\mathcal{A}}=\{\LTLg(\neg p(s) \lor\LTLx( \underset{(s, s^\prime) \in R_{\mathcal{A}}}{\bigvee} p(s^\prime)) ) \mid s \in S_{\mathcal{A}} \}$ \\ If the KS is in state $s$ in the current time instant, in the next time instant it is in \\ one of its successors $s^\prime$ of $s$.\\
\midrule
$CL_{\top,\mathcal{A}}=\{\LTLg(\neg p(s) \lor \alpha ) \mid s \in S_{\mathcal{A}}, \alpha \in AP_{\mathcal{A}}, L_{\mathcal{A}}(s, \alpha)=\top \}$ \\
If the KS is in state $s$ s.t. $L_{\mathcal{A}}(s, \alpha)=\top$, the atomic proposition $\alpha$ is true.\\
\midrule
$CL_{\bot,\mathcal{A}}=\{\LTLg(\neg p(s) \lor \neg \alpha ) \mid s \in S_{\mathcal{A}}, \alpha \in AP_{\mathcal{A}}, L_{\mathcal{A}}(s, \alpha)=\bot \}.$ \\ If the KS is in state $s$ s.t. $L_{\mathcal{A}}(s, \alpha)=\bot$, the atomic proposition $\alpha$ is false.\\
\midrule
$C_{\mathit{REG}}=\{\LTLg(\neg p(s) \lor \neg p(s^\prime)) \mid s,s^\prime \in S_{\mathcal{A}} \text{ and } s\neq s^\prime\}$ \\ Ensures that the KS is in at most one state at any time.\\
\toprule
\end{tabular}
\end{table}
\textsc{GetUC} . \textit{Computation of the unsatisfiable core (UC) $C^{uc}$ of $C$.}
Since the property $\psi$ is satisfied on $\mathcal{A}$, as recalled in Section~\ref{sec:background}, $\eta(C_{\mathcal{A}} \cup C_{\psi})$ is unsatisfiable and the computation of its UC core is performed as specified in~\cite{SCHUPPAN2016155}.
The procedure returns an SNF formula $\eta(C_{\mathit{KS}}^\prime \cup C_{\mathit{REG}}^\prime \cup C^\prime_{\psi})$ that is unsatifiable and such that $C_{\mathit{KS}}^\prime \subseteq C_{\mathit{KS}}$,
$C_{\mathit{REG}}^\prime \subseteq C_{\mathit{REG}}$ and $C_{\psi}^\prime \subseteq C_{\psi}$.
\textsc{GetTP} . \textit{Analysis of the UC $C^{uc}$ and extraction of the topological proof.}
Formula $\eta(C_{\mathcal{A}}^\prime \cup C^\prime_{\psi})$, where $C_{\mathcal{A}}^\prime=C_{\mathit{KS}}^\prime \cup C_{\mathit{REG}}^\prime$,
contains clauses regarding the KS ($C^\prime_{\mathit{KS}}$), the fact that the model is a KS ($C^\prime_{\mathit{REG}}$), and the property of interest ($C_{\psi}^\prime$) that made the formula $\eta(C_{\mathcal{A}} \cup C_{\psi})$ unsatisfiable.
Since we are interested in clauses related to the KS that caused unsatisfiability, we extract the topological proof $\Omega$, whose topological proof clauses are obtained from the clauses in $C_{\mathit{KS}}^\prime$ as specified in Table~\ref{tab:snftotp}.
Since the set of atomic propositions of $\mathcal{A}$ is $AP_{\mathcal{A}}= AP \cup \overline{AP}$, in the table we use $\alpha$ for propositions in $AP$ and $\overline{\alpha}$ for propositions in $\overline{AP}$.
\begin{table}[t]
\caption{Rules to extract the TP-clauses from the UC SNF formula.}
\label{tab:snftotp}
\begin{tabular}{ p{0.4\textwidth} p{0.4\textwidth} p{0.2\textwidth}}
\toprule
\textbf{SNF clause} & \textbf{TP clause} & \textbf{TP clause type} \\
\toprule
$c_i=\underset{s \in S_{0,\mathcal{A}}}{\bigvee} p(s)$ & $\langle S_0 \rangle$ & TPI-clause \\
\midrule
$\LTLg(\neg p(s) \lor \LTLx(\underset{(s, s^\prime) \in R_{\mathcal{A}}}{\bigvee} p(s^\prime) )$ & $\langle s, T \rangle$ where $T=\{s^\prime | (s,s^\prime) \in R \}$ & TPT-clause \\
\midrule
$\LTLg(\neg p(s) \lor \alpha )$ & $\langle s, \alpha, L(s, \alpha) \rangle$ & TPP-clause \\
\midrule
$\LTLg(\neg p(s) \lor \neg \alpha )$ & $\langle s, \alpha, comp(L(s, \alpha)) \rangle$ & TPP-clause \\
\midrule
$\LTLg(\neg p(s) \lor \overline\alpha )$ & $\langle s, \alpha, comp(L(s, \alpha)) \rangle$ & TPP-clause \\
\midrule
$\LTLg(\neg p(s) \lor \neg \overline\alpha )$ & $\langle s, \alpha, L(s, \alpha) \rangle$ & TPP-clause \\
\toprule
\end{tabular}
\end{table}
Note that elements in $C_{\mathit{REG}}^\prime$ are not considered in the TP computation.
Indeed, given an SNF clause $\LTLg(\neg p(s) \lor \neg p(s^{\prime}))$, either state $s$ or $s^\prime$ is included in other SNF clauses, thus it will be mapped on TP-clauses that will be preserved in the model revisions.
\begin{lemma}
\label{lemma:creg}
Let $\mathcal{A}$ be a KS
and let $\psi$ be an LTL property.
Let also $\eta(C_{\mathcal{A}} \cup C_{\psi})$ be the SNF formula computed in the step \textsc{Sys2Snf} of the algorithm, where $C_{\mathcal{A}}=C_{\mathit{REG}}\cup C_{\mathit{KS}}$, and
let $C_{\mathcal{A}}^\prime \cup C_{\psi}^\prime$ be an unsatisfiable core, where $C_{\mathcal{A}}^\prime=C_{\mathit{REG}}^\prime\cup C_{\mathit{KS}}^\prime$.
Then, if $\LTLg(\neg p(s) \lor \neg p(s^{\prime})) \in C^\prime_{\mathit{REG}}$,
either
\begin{enumerate}
\item[(i)] there exists an SNF clause in $C_{\mathit{KS}}^\prime$ that predicates on state $s$ (or on state $s^{\prime}$);
\item[(ii)] $C_{\mathcal{A}}^{\prime\prime} \cup C_{\psi}^\prime$, s.t. $C_{\mathcal{A}}^{\prime\prime}=C_{\mathcal{A}}^{\prime} \setminus \{ \LTLg(\neg p(s) \lor \neg p(s^{\prime})) \}$, is an unsatisfiable core of $\eta(C^\prime_{\mathcal{A}} \cup C^{\prime}_{\psi})$.
\end{enumerate}
\end{lemma}
\begin{proof}
We indicate $\LTLg(\neg p(s) \lor \neg p(s^{\prime}))$ as $\tau(s,s^\prime)$.
Assume per absurdum that conditions (i) and (ii) are violated, i.e., no SNF clause in $C^\prime_{\mathit{KS}}$ predicates on state $s$ or $s^\prime$ and
$C_{\mathcal{A}}^{\prime\prime} \cup C_{\psi}^\prime$ is not an unsatisfiable core of $C^\prime_{\mathcal{A}} \cup C^{\prime}_{\psi}$.
Since $C_{\mathcal{A}}^{\prime\prime} \cup C_{\psi}^\prime$ is not an unsatisfiable core of $C^\prime_{\mathcal{A}} \cup C^{\prime}_{\psi}$, $C_{\mathcal{A}}^{\prime\prime} \cup C^{\prime}_{\psi}$ is satisfiable since $C_{\mathcal{A}}^{\prime\prime} \subset C^\prime_{\mathcal{A}}$.
Since $C_{\mathcal{A}}^{\prime\prime} \cup C_{\psi}^\prime$ is satisfiable,
$C_{\mathcal{A}}^{\prime} \cup C_{\psi}^\prime$ s.t. $C_{\mathcal{A}}^{\prime}=C_{\mathcal{A}}^{\prime\prime} \cup \{\tau(s,s^\prime)\}$ must also be satisfiable. Indeed, it does not exist any SNF clause that predicates on state $s$ (or on state $s^{\prime}$) and, in order to generate a contradiction, the added SNF clause must generate it with the SNF clauses obtained from the LTL property $\psi$.
This is a contradiction.
Thus, conditions (i) and (ii) must be satisfied.
\qed
\end{proof}
The \textsc{Analyze}\ procedure in Algorithm~\ref{alg:analyze} has shown how we obtain a TP for a PKS by
first computing the related optimistic or pessimistic approximation (i.e., a KS) and
then exploiting the computation of the TP for this KS.
\begin{restatable}{theorem}{topologicalproofcorrectness}
\label{topologicalproofcorrecntess}
Let $M=\langle S, R,S_0,AP,L \rangle$ be a PKS,
let $\phi$ be an LTL property,
and let $x\in\{\top,?\}$ be an element such that $[M \models_{3} \phi]=x$.
If the procedure \textsc{Analyze}\, applied to the PKS $M$ and the LTL property $\phi$, returns a TP $\Omega$, this is an $x$-TP for $\phi$ in $M$.
\end{restatable}
\begin{proof}
Assume that the
\textsc{Analyze}\ procedure returns the value $\top$ and a $\top$-TP.
We show that every $\Omega$-related PKS $M^\prime$ is such that $[M^\prime \models \phi]\geq x$ (Definition~\ref{def:topologicalproof}).
If \textsc{Analyze}\ returns the value $\top$, it must be that
$M_{\mathit{pes}}\models^\ast \phi$ by Lines~\ref{line:check_pes} and~\ref{line:satisfied} of Algorithm~\ref{alg:analyze}.
Furthermore, by Line~\ref{line:satisfied}, $\psi=\magicfunction(\phi)$ and $\mathcal{A}=M_{\mathit{pes}}$.
Let $N=\langle S_N, R_N, S_{0,N}, AP_N,L_N \rangle$ be a PKS $\Omega$-related to $M$.
Let $\eta(C_{\mathcal{A}} \cup C_{\psi})$ be the SNF formula associated with $\mathcal{A}$ and $\psi$ and
let $\eta(C_{\mathcal{B}} \cup C_{\psi})$ be the SNF formula associated with $\mathcal{B}=N_{\mathit{pes}}$ and $\psi$.
Let us consider an UC $C^\prime_{\mathcal{A}}$ $\cup C_{\psi}^\prime$ of $C_{\mathcal{A}} \cup C_{\psi}$,
where $C^\prime_{\mathcal{A}}=C_{\mathit{KS}}^\prime \cup C_{\mathit{REG}}^\prime$,
$C_{\mathit{KS}}^\prime \subseteq C_{\mathit{KS}}$,
$C_{\mathit{REG}}^\prime \subseteq C_{\mathit{REG}}$, and
$C_{\psi}^\prime \subseteq C_{\psi}$.
We show that $C^\prime_{\mathcal{A}} \subseteq C_{\mathcal{B}}$ and $C_{\psi}^\prime \subseteq C_{\psi}$, i.e., the UC is also an UC for the
SNF formula associated with the approximation $\mathcal{B}$ of the PKS $N$.
\begin{itemize}
\item $C_{\psi}^\prime \subseteq C_{\psi}$ is trivially verified since property $\psi$ does not change.
\item $C^\prime_{\mathcal{A}} \subseteq C_{\mathcal{B}}$, i.e.,
$(C^\prime_{\mathit{KS}} \cup C_{\mathit{REG}}^\prime) \subseteq C_{\mathcal{B}}$.
By Lemma~\ref{lemma:creg} we can avoid considering $C_{\mathit{REG}}^\prime$.
By construction (see Line~\ref{step1} of Algorithm~\ref{alg:computetpp}) any clause $c \in C_{\mathit{KS}}^\prime$ belongs to one rule among $CR$, $CL_{pes,\top}$, $ CL_{pes,\bot}$ or $c=c_i$:
\begin{itemize}
\item if $c = c_i$ then, by the rules in Table~\ref{tab:snftotp}, there is a TPI-clause $\{S_0\} \in \Omega$. By Definition~\ref{def:gammarelated}, $S_0=S_0^\prime$.
Thus, $c_i \in C_{\mathcal{B}}$ since $N$ is $\Omega$-related to $M$.
\item if $c \in CR$ then, by rules in Table~\ref{tab:snftotp}, there is a TPT-clause $\langle s, T \rangle \in \Omega$ where $s\in S$ and $T \subseteq R$.
By Definition~\ref{def:gammarelated}, $T=\{s^\prime \in S^\prime | (s,s^\prime)\in R^\prime\}$.
Thus, $c \in C_{\mathcal{B}}$ since $N$ is $\Omega$-related to $M$.
\item if $c \in CL_{\mathcal{A},\top}$ or $c \in CL_{\mathcal{A},\bot}$, by rules in Table~\ref{tab:snftotp}, there is a TPP-clause $\langle s, \alpha, L(s,\alpha) \rangle \in \Omega$ where $s\in S$ and $\alpha \in AP$.
By Definition~\ref{def:gammarelated}, $L^\prime(s,\alpha)=L(s,\alpha)$.
Thus, $c \in C_{\mathcal{B}}$ since $N$ is $\Omega$-related to $M$.
\end{itemize}
\end{itemize}
Since $N$ is $\Omega$-related to $M$, it has preserved the elements of $\Omega$. Thus $C^\prime_{\mathcal{A}}\cup C_{\psi}^\prime$ is also an UC of $C_{\mathcal{B}}$.
It follows that $[N \models \phi] = \top$.
The proof from the case in which \textsc{Analyze}\ procedure returns the value $?$ and a $?$-TP can be derived from the first case.
\qed
\end{proof}
\textbf{\textsc{Re-check.}}
Let $M$ be a PKS. The \textsc{re-check} algorithm verifies whether a revision $M^\prime$ of $M$ is an $\Omega$-revision.
Let $M=\langle S, R, S_0, AP, L \rangle$ be a PKS,
let $\Omega$ be an $x$-TP for $\phi$ in $M$, and
let $M^\prime=\langle S^\prime, R^\prime, S_0^\prime, AP^\prime, L^\prime \rangle$ be a revision of $M$.
The \textsc{re-check} algorithm returns \texttt{true} if and only if the following holds:
\begin{itemize}
\item $AP \subseteq AP^\prime$;
\item for every TPP-clause $\langle s, \alpha, v \rangle \in \Omega$, $v=L^\prime(s, \alpha)$;
\item for every TPT-clause $\langle s, T \rangle \in \Omega$,
$T=\{s^\prime \in S^\prime|(s,s^\prime)\in\ R^\prime\}$;
\item for every TPI-clause $\langle S_0 \rangle \in \Omega$,
$S_0 = S^\prime_0$.
\end{itemize}
\begin{lemma}
\label{lemma:omega-related}
Let $M=\langle S, R, S_0, AP, L \rangle$ and $M^\prime=\langle S^\prime, R^\prime,$ $S^\prime_0,$ $AP^\prime, L^\prime \rangle$ be two PKSs
and let $\Omega$ be an $x$-TP.
The \textsc{re-check} algorithm returns \texttt{true} if and only if $M^\prime$ is $\Omega$-related to $M$.
\end{lemma}
\begin{proof}
Since $M^\prime$ is $\Omega$-related to $M$,
the conditions of Definition~\ref{def:gammarelated} hold.
Each of these conditions corresponds to a condition of the \textsc{re-check} algorithm.
Thus, if $M^\prime$ is $\Omega$-related to $M$, the \textsc{re-check} returns \texttt{true}.
Conversely, if \textsc{re-check} returns \texttt{true}, each condition of the algorithm is satisfied and, since each of this conditions is mapped to a condition of Definition~\ref{def:gammarelated},
$M^\prime$ is $\Omega$-related to $M$. \qed
\end{proof}
\begin{restatable}{theorem}{topologicalproofcorrecntess}
\label{recheckcorrectness}
Let $M$ be a PKS,
let $\phi$ be a property,
let $\Omega$ be an $x$-TP for $\phi$ in $M$ where $x \in \{\top,?\}$, and
let $M^\prime$ be a revision of $M$.
The \textsc{re-check} algorithm returns \texttt{true} if and only if $M^\prime$ is an $\Omega$-revision of $M$.
\end{restatable}
\begin{proof}
By applying Lemma~\ref{lemma:omega-related}, the \textsc{re-check} algorithm returns \texttt{true} if and only if $M^\prime$ is
$\Omega$-related to $M$.
By Definition~\ref{def:omegaRevisionDef},
since $\Omega$ is an $x$-TP, the \textsc{re-check} algorithm returns \texttt{true} if and only if $M^\prime$ is an $\Omega$-revision of $M$. \qed
\end{proof}
The \textsc{analysis} and \textsc{re-check} algorithms assume that the three-valued LTL semantics is considered.
Indeed, the algorithm upon which the proof generation framework is developed is the three-valued model checking algorithm~\cite{bruns2000generalized}.
However, the proposed results are also valid considering the thorough semantics if the properties of interest are self-minimizing.
This is not a strong limitation since, as shown in~\cite{godefroid2005model}, most practically useful LTL formulae are self-minimizing.
Future work will consider how to extend the \textsc{analysis} and \textsc{re-check} to completely support the thorough LTL semantics.
\section{Evaluation}
\label{sec:evaluation}
To evaluate how TOrPEDO\ supports designers, we implemented TOrPEDO\ as a Scala stand alone application which is available at \url{http://goo.gl/V4fSjG}.
Specifically, we considered the following research questions:
\begin{enumerate*}
\item[] \textbf{RQ1:} How does the \textsc{analysis} help in creating models revisions?
\item[] \textbf{RQ2:} How does the \textsc{re-check} help in creating models revisions?
\end{enumerate*}
\noindent
\textbf{Evaluation setup.}
To answer RQ1 and RQ2 we considered a set of example PKSs proposed in literature to evaluate the $\chi\mathit{Chek}$~\cite{1201295} model checker.
These examples are divided into three categories.
Each category represents an aspect of a phone call, i.e.,
the \texttt{callee}, \texttt{caller}, and \texttt{caller-callee} categories include PKSs modeling respectively the callee process, the caller process, and the overall calleer-callee process.
To simulate iterative design performed using the TOrPEDO\ framework all examples were slightly modified.
Uncertainty on the transitions was removed in all the PKSs of the considered examples.
In addition, the examples in the \texttt{callee} category were modified such that the designer iteratively generates refinements of PKSs, i.e., $?$ is assigned to atomic propositions to generate abstractions of the final KS \texttt{callee-4}.
Instead, the examples in the \texttt{caller-callee} category were modified since the original examples were proposed to evaluate designer disagreement about the value to assign to a proposition in a state~\cite{Chechik2006}.
Thus, in a state $s$ a proposition could be assigned to multiple values in the set $\{\top, ?, \bot\}$.
To generate a PKS useful for our evaluation, we used the following transformations:
when, in a state $s$, a proposition was assigned to the values $\{\top,\bot \}$, $\{ \top, ?\}$, or $\{\bot, ? \}$ these values were replaced with $?$; when, in a state $s$, a proposition was assigned values $\{\top,\top\}$ (resp. $\{\bot,\bot\}$), these values were replaced with $\top$ (resp. $\bot$).
We defined a set of properties based on well known LTL property patterns~\cite{dwyer1998property}.
This choice was motivated by the fact that the original properties used in the examples were specified in Computation Tree Logic (CTL), which is not supported by TOrPEDO.
The defined properties were inspired by the original properties from the examples and are listed in Table~\ref{tab:properties}.
\color{red}
\begin{table}[t]
\small
\caption{Properties considered in the evaluation}
\begin{tabular}{llll}
\toprule
$\phi_1$: &&& $\LTLg (\neg \mathit{OFFHOOK}) \lor (\neg\mathit{OFFHOOK}~ \LTLu ~\mathit{CONNECTED})$ \\
$\phi_2$: &&& $\neg\mathit{OFFHOOK}~\LTLw~(\neg\mathit{OFFHOOK} \land \mathit{CONNECTED})$\\
$\phi_3$: &&& $\LTLg (\mathit{CONNECTED} \rightarrow \mathit{ACTIVE})$ \\
$\phi_4$: &&& $\LTLg (\mathit{OFFHOOK} \land \mathit{ACTIVE} \land \neg\mathit{CONNECTED} \rightarrow \LTLx(\mathit{ACTIVE}))$ \\
$\phi_5$ &&& $\LTLg(\mathit{CONNECTED} \rightarrow \LTLx(\mathit{ACTIVE}))$ \\
\midrule
$\psi_1$: &&& $\LTLg(\mathit{CONNECTED} \rightarrow \mathit{ACTIVE})$ \\
$\psi_2$: &&& $\LTLg(\mathit{CONNECTED} \rightarrow \LTLx(\mathit{ACTIVE}))$\\
$\psi_3$: &&& $\LTLg(\mathit{CONNECTED}) \lor (\mathit{CONNECTED}~\LTLu~\neg\mathit{OFFHOOK})$ \\
$\psi_4$: &&& $\neg\mathit{CONNECTED}~\LTLw~(\neg\mathit{CONNECTED} \land \mathit{OFFHOOK})$ \\
$\psi_5$: &&& $\LTLg(\mathit{CALLEE\_SEL} \rightarrow \mathit{OFFHOOK})$ \\
\midrule
$\eta_1$: &&& $\LTLg((\mathit{OFFHOOK} \land \mathit{CONNECTED}) \rightarrow \LTLx(\mathit{OFFHOOK} \lor \neg\mathit{CONNECTED}))$ \\
$\eta_2$: &&& $\LTLg(\mathit{CONNECTED}) \lor (\mathit{CONNECTED}~\LTLw~\neg\mathit{OFFHOOK})$\\
$\eta_3$: &&& $\neg\mathit{CONNECTED}~\LTLw~(\neg\mathit{CONNECTED} \land \mathit{OFFHOOK})$ \\
$\eta_4$: &&& $\LTLg(\mathit{CALLEE\_FREE} \lor \mathit{LINE\_SEL})$ \\
$\eta_5$: &&& $\LTLg(\LTLx(\mathit{OFFHOOK}) \land \neg\mathit{CONNECTED})$ \\
\bottomrule
\end{tabular}
\label{tab:properties}
\end{table}
\color{black}
\noindent
\textbf{RQ1.} To answer RQ1 we checked how the proofs output by the \textsc{analysis} algorithm were useful in producing PKSs revisions.
To evaluate the usefulness we checked how easy it was to analyze the property satisfaction on the proofs w.r.t. the original models.
This was done by comparing the size of the proofs and the size of the original models.
The size of a PKS $M=\langle S, R, S_0, AP, L \rangle$ was defined as $|M|=|AP|*|S|+|R|+|S_0|$.
The size of a proof $\Omega$ was defined as $|\Omega|=\underset{c \in \Omega}{\sum} |c|$ where:
$|c|=1$ if $c=\langle s,\alpha,v \rangle$;
$|c|=|T|$ if $c=\langle s,T \rangle$, and
$|c|=|S_0|$ if $c=\langle S_0\rangle$.
Table~\ref{tab:experiments} summarizes the obtained results, indicated in the columns under the label `RQ1'.
We show the cardinalities
$|S|$, $|R|$ and $|AP|$ of the sets of states, transitions, and atomic propositions of each considered PKS $M$, the number $|?|$ of couples of a state $s$ with an atomic proposition $\alpha$ such that $L(s,\alpha)=?$, the total size $|M|$ of the model, and the size $|\Omega_p|_x$ of the proofs, where $p$ indicates the considered LTL property and $x$ indicates whether $p$ is satisfied ($x=\top$) or possibly satisfied ($x=?$).
Cells labeled with the symbol $\times$ indicate that a property was not satisfied in that model and thus a proof was not produced by the \textsc{analysis} algorithm.
It can be observed that the size of the proof was always lower than the size of the initial model.
This is an indicator that shows that proofs are easier to understand than the original models, since they include a subset of the elements of the models that ensure that a property is satisfied (resp. possibly satisfied).
\begin{table}[t]
\caption{Cardinalities $|S|$, $|R|$, $|AP|$, $|?|$, and $|M|$ are those of the evaluated model $M$.
$|\Omega_p|_x$ is the size of proof $\Omega_p$ for a property $p$;
$x$ indicates if $\Omega_p$ is a $\top$-TP or a $?$-TP.}
\begin{tabular}{lcccccccccc|ccccl}
\toprule
& \multicolumn{10}{c}{\textbf{RQ1}} & \multicolumn{5}{c}{\textbf{RQ2}}\\
\toprule
Model & $|S|$ & $|R|$ & $|AP|$ & $|?|$ & $|M|$ & $|\Omega_{\phi_1}|$ & $|\Omega_{\phi_2}|$ & $|\Omega_{\phi_3}|$ & $|\Omega_{\phi_4}|$ & $|\Omega_{\phi_5}|$ & $\phi_1$ & $\phi_2$ & $\phi_3$ &$\phi_4$ & $\phi_5$ \\
\midrule
callee-1 & 5 & 15 & 3 & 7 & 31 & $7_?$ & $9_?$ & $21_?$ & $23_?$ & $23_?$ & \textbf{-} & \textbf{-} & \textbf{-} & \textbf{-} & \textbf{-} \\
callee-2 & 5 & 15 & 3 & 4 & 31 & $7_?$ & $9_?$ & $21_?$ & $22_\top$ & $\times$ & \ding{51}\ & \ding{51}\ & \ding{51}\ & \ding{51}\ & \ding{55}\ \\
callee-3 & 5 & 15 & 3 & 2 & 31 & $7_?$ & $9_?$ & $21_?$ & $23_\top$ & $\times$ & \ding{51}\ & \ding{51}\ & \ding{51}\ & \ding{51}\ & \textbf{-}\\
callee-4 & 5 & 15 & 3 & 0 & 31 & $\times$ & $\times$ & $23_\top$ & $21_\top$ & $\times$ & \ding{55}\ & \ding{55}\ & \ding{51}\ & \ding{51}\ & \textbf{-} \\
\midrule
Model & $|S|$ & $|R|$ & $|AP|$ & $|?|$ & $|M|$ & $|\Omega_{\psi_1}|$ & $|\Omega_{\psi_2}|$ & $|\Omega_{\psi_3}|$ & $|\Omega_{\psi_4}|$ & $|\Omega_{\psi_5}|$ & $\psi_1$ & $\psi_2$ & $\psi_3$ &$\psi_4$ & $\psi_5$ \\
\midrule
caller-1 & 6 & 21 & 5 & 4 & 52 & $28_?$ & $\times$ & $2_\top$ & $9_?$ & $28_?$ & \textbf{-} & \textbf{-} & \textbf{-} & \textbf{-} & \textbf{-} \\
caller-2 & 7 & 22 & 5 & 4 & 58 & $30_?$ & $\times$ & $2_\top$ & $9_?$ & $30_?$ & \ding{51}\ & \textbf{-} & \ding{51}\ & \ding{51}\ & \ding{51}\ \\
caller-3 & 6 & 19 & 5 & 1 & 50 & $26_\top$ & $28_\top$ & $2_\top$ & $11_\top$ & $26_\top$ & \ding{51}\ & \textbf{-} & \ding{51}\ & \ding{51}\ & \ding{51}\ \\
caller-4 & 6 & 21 & 5 & 0 & 52 & $28_\top$ & $\times$ & $2_\top$ & $9_\top$ & $28_\top$ & \ding{51}\ & \ding{55}\ & \ding{51}\ & \ding{51}\ & \ding{51}\ \\
\midrule
Model & $|S|$ & $|R|$ & $|AP|$ & $|?|$ & $|M|$ & $|\Omega_{\eta_1}|$ & $|\Omega_{\eta_2}|$ & $|\Omega_{\eta_3}|$ & $|\Omega_{\eta_4}|$ & $|\Omega_{\eta_5}|$ & $\eta_1$ & $\eta_2$ & $\eta_3$ &$\eta_4$ & $\eta_5$ \\
\midrule
caller-callee-1 & 6 & 30 & 6 & 30 & 61 & $37_?$ & $2_\top$ & $15_?$ & $37_?$ & $\times$ & \textbf{-} & \textbf{-} & \textbf{-} & \textbf{-} & \textbf{-} \\
caller-callee-2 & 7 & 35 & 6 & 36 & 78 & $43_?$ & $2_\top$ & $18_?$ & $43_?$ & $\times$ & \ding{51}\ & \ding{51}\ & \ding{51}\ & \ding{51}\ & \textbf{-} \\
caller-callee-3 & 7 & 45 & 6 & 38 & 88 & $53_?$ & $2_\top$ & $53_?$ & $53_?$ & $53_?$ & \ding{51}\ & \ding{51}\ & \ding{51}\ & \ding{51}\ & \textbf{-} \\
caller-callee-4 & 6 & 12 & 4 & 0 & 42 & $\times$ & $\times$ & $\times$ & $19_\top$ & $\times$ & \ding{55}\ & \ding{55}\ & \ding{55}\ & \ding{51}\ & \ding{55}\ \\
\bottomrule
\end{tabular}
\label{tab:experiments}
\end{table}
\noindent
\textbf{RQ2.}
To answer RQ2 we checked how the results produced by the \textsc{re-check} algorithm were useful in producing PKSs revisions.
To evaluate the usefulness we assumed that, for each category of examples, the designer produced revisions following the order specified in Table~\ref{tab:experiments}.
The columns under the label `RQ2' contain the different properties that have been analyzed for each category.
A cell contains \ding{51}\ if the \textsc{re-check} was passed by the considered revised model, i.e., a \texttt{true} value was returned by the \textsc{re-check} algorithm, \ding{55}\ otherwise. The \textit{dash} symbol \textbf{-} is used when the model of the correspondent line is not a revision (i.e., the first model of each category) or when the observed property was false in the previous model, i.e., an $x$-TP was not produced.
We inspect results produced by the \textsc{re-check} algorithm to evaluate their utility in verifying if revisions were violating the proofs.
A careful observation of Table \ref{tab:experiments} reveals that, in many cases, the TOrPEDO\ \textsc{re-check} notifies the designer that the proposed revision violates some of the clauses contained in the $\Omega$-proof.
This suggests that the \textsc{re-check} is effective in helping the designer in creating model revisions.
\noindent
\textbf{Threats to validity.}
The changes applied to the existing models are a threat to construct validity since they may generate models which are not realistic.
To mitigate this threat, we referred to textual explanations in the papers in which the examples were presented to generate reasonable revisions.
Biases in the creation of PKSs are a threat to internal validity.
To mitigate this threat, we designed our models starting from already existing models.
The limited number of examples is a threat to external validity.
To mitigate this threat, we verified that all the possible output cases of TOrPEDO\ were obtained at least once.
\noindent
\textbf{Scalability.}
Three-valued model checking is as expensive as classical model checking~\cite{bruns1999model}, which is commonly employed in real world problems analysis~\cite{Woodcock09}.
Unsatisfiability checking and UCs computation has been employed to verify digital hardware and software systems~\cite{hustadt2003trp++}.
The \textsc{analysis} phase of TOrPEDO\ simply combines three-valued model checking and UCs computation, therefore
its scalability improves as the performance of the employed integrated frameworks enhances.
Future investigation may evaluate if the execution of the \textsc{re-check} algorithm (which is a simple syntactic check) speeds up the verification framework by avoiding the re-execution of the \textsc{analysis} algorithm in the cases in which revisions satisfy the proofs.
\section{Related work}
\label{sec:related}
\begin{sloppypar}
Partial knowledge has been considered in requirement analysis and elicitation~\cite{menghi2017integrating,menghi2017cover,letier2008deriving},
in novel robotic planners~\cite{10.1007/978-3-319-95582-7_24,menghi2018towards},
and in the production of software models that satisfy a set of desired properties~\cite{uchitel2009synthesis,uchitel2013supporting,famelis2012partial,albarghouthi2012under}.
Several researchers analyzed the model checking problem for partially specified systems~\cite{menghi2016dealing,chechik2004multi},
some considering three-valued~\cite{larsen1988modal,godefroid2001abstraction,bruns1999model,bruns2000generalized,godefroid2011ltl},
others multi-valued~\cite{gurfinkel2003multi,bruns2004MCmultivalued} scenarios.
Other works apply model checking to incremental program development~\cite{henzinger2003extreme,beyer2007software}.
However, all these model checking approaches do not provide an \emph{explanation} on why a property is satisfied, by means of a \emph{certificate} or \emph{proof}.
Although several works have tackled this problem~\cite{Bernasconi2017,cleaveland2002evidence,PZ01,PPZ01,griggio2018certifying,deng2017witnessing},
they aim mostly to automate proof reproducibility rather than actually helping the design process and they usually produce deductive proofs which are different from the topological proofs presented in this paper.
\end{sloppypar}
Tao and Li~\cite{tao2017complexity} propose a theoretical solution to a related issue: \textit{model repair} is the problem of finding the minimum set of states in a KS which makes a formula satisfiable. However, this problem is diffenent than the one addressed in this paper. Furthermore, the framework is only theoretical an based on complete systems.
\emph{Witnesses} have also been proposed in literature as an instrument to explain why a property is satisfied~\cite{Biere:1999:SMC:309847.309942,hong2002temporal,namjoshi2001certifying}.
Given an existential LTL formula and a model $M$, a witness is usually defined as a path that satisfies that formula.
This is different than the notion of topological proof proposed in this work, where a proof is defined as a slice of the model $M$.
We are not aware of any work, except for \cite{Bernasconi2017},
that combines model checking and proofs in a multi-valued context.
Here the proposed proofs are verbose, obtained manually, and their effectiveness is not shown in practical applications.
This paper extends~\cite{Bernasconi2017}
by defining topological proofs and model revisions,
and by providing a working and practical environment.
\section{Conclusions}
\label{sec:conclusions}
We have proposed TOrPEDO , an integrated framework that allows a software designer to refine and revise her model proposal in a continuous verification setting. The framework, implemented in a tool for practical use, allows to specify partial models and properties to be verified. It checks these models against the requirements and provides a guide for the designer who wishes to preserve slices of her model that contribute to satisfy fundamental requirements while other components are modified.
For these purposes, the novel notion of topological proof has been formally and algorithmically described. This corresponds to a set of constraints that, if kept when changing the proposed model, ensure that the behavior of the model w.r.t. the property of interest is preserved.
TOrPEDO\ was evaluated by showing the effectiveness of the \textsc{analysis} and \textsc{re-check} algorithms included in the framework. Results showed that proofs are in general easier to understand than the original models and thus TOrPEDO\ can help the design process effectively.
Future work will consider supporting LTL thorough semantics in all phases of the framework and providing a deeper evaluation to show the speed-up offered by the \textsc{re-check} phase w.r.t. \textsc{analysis} re-execution.
\end{document} |
\begin{equation}gin{document}
\begin{equation}gin{abstract}
We consider in the whole plane the following Hamiltonian coupling of Schr\"{o}dinger equations
\begin{equation}gin{equation*}\left\{
\begin{equation}gin{array}{ll}
-\Delta u+V_0u=g(v)\\
-\Delta v+V_0v=f(u)\\
\varepsilonnd{array}
\right. \varepsilonnd{equation*}
where $V_0>0$, $f,g$ have critical growth in the sense of Moser. We prove that the (nonempty) set $S$ of ground state solutions is compact in $H^1(\mathbb R^2)\tildemes H^1(\mathbb R^2)$ up to translations. Moreover, for each $(u,v)\in S$, one has that $u,v$ are uniformly bounded in $L^\infty(\mathbb R^2)$ and uniformly decaying at infinity. Then we prove that actually the ground state is positive and radially symmetric. We apply those results to prove the existence of semiclassical ground states solutions to the singularly perturbed system
\begin{equation}gin{equation*}
\begin{equation}gin{cases}
-\varepsilon^2\Delta \varphi+V(x)\varphi=g(\Phisi)&\\
-\varepsilon^2\Delta \Phisi+V(x)\Phisi=f(\varphi)
\varepsilonnd{cases}
\varepsilonnd{equation*}
where $V\in \mathcal{C}(\mathbb{R}^2)$ is a Schr\"{o}dinger potential bounded away from zero. Namely, as the adimensionalized Planck constant $\varepsilon\to 0$, we prove the existence of minimal energy solutions which concentrate around the closest local minima of the potential with some precise asymptotic rate.
\varepsilonnd{abstract}
\maketitle
\section{Introduction}
\noindent
Consider in the whole $\mathbb{R}^2$ the following system of coupled Schr\"odinger equations
\begin{equation}\lambdab{q1} \left\{
\begin{equation}gin{array}{ll}
\displaystyle -\varepsilon^2\Delta \varphi+V(x)\varphi=\frac{\Phiartialrtial H(\varphi,\Phisi)}{\Phiartialrtial\Phisi}\\
\displaystyle -\varepsilon^2\Delta \Phisi+V(x)\Phisi=-\frac{\Phiartialrtial H(\varphi,\Phisi)}{\Phiartialrtial\varphi}
\varepsilonnd{array}
\right.
\varepsilone
where $\varepsilon>0$, the external Schr\"odinger potential $V\in C(\mathbb R^2,\mathbb R)$ enjoys the following condition:
\begin{equation}gin{itemize}
\item [$(V)$] $0<V_0:=\inf_{\mathbb R^2}V(x)<\lim_{|x|\rightarrow\infty}V(x)=V_\infty\leq\infty$.
\varepsilonnd{itemize}
The Hamiltonian has the following form $H(\varphi,\Phisi)=G(\Phisi)-F(\varphi)$, with $F(t)=\int_0^t f(\tau)\, \mathrm{d} \tau$ and $G(t)=\int_0^t g(\tau)\, \mathrm{d} \tau$ and the nonlinearities $f,g\in C(\mathbb R,\mathbb R)$ satisfy the following hypotheses:
\begin{equation}gin{itemize}
\item [$(H1)$] $f(t)=o(t)$ and $g(t)=o(t)$, as $t\rightarrow0$;
\item [$(H2)$] There exists $\theta>2$ such that for any $t\not=0$,
\begin{equation}gin{center}
$0<\theta F(t)\le f(t)t$ and $0<\theta G(t)\le g(t)t$;
\varepsilonnd{center}
\item [$(H3)$] There exists $M>0$ such that for any $t\not=0$,
\begin{equation}gin{center}
$0<F(t)\le Mf(t)$ and $0<G(t)\le Mg(t)$;
\varepsilonnd{center}
\item [$(H4)$] $f(t)/|t|$ and $g(t)/|t|$ are strictly increasing for $t\not=0$.
\varepsilonnd{itemize}
\noindent As a consequence of the Pohozaev-Trudinger-Moser inequality for which the Sobolev space $H^1$ embeds into the space of functions such that $e^{\alphapha u^2}\in L^1$, the following notion of critical growth in dimension two was first introduced in \cite{AY,DMR} (in the case of bounded domains):
\begin{equation}gin{definition}\lambdabel{cgdef} A function $f:\mathbb R^+\rightarrow\mathbb R^+$ has critical growth in the sense of Pohozaev-Trudinger-Moser inequality, if there exists $\alpha_0>0$ such that
\begin{equation}gin{align*}
\lim_{t\to + \infty} \frac {f(t)}{e^{\alphapha t^2}}=
\begin{equation}gin{cases}
0 & \text{if } \alpha > \alpha_0
\\
+ \infty & \text{if } \alpha < \alpha_0
\varepsilonnd{cases}
\varepsilonnd{align*}
\varepsilond
\noindent It will be crucial in what follows the following growth assumptions:
\begin{equation}gin{itemize}
\item [$(H5)$] $\displaystyle\liminf_{|t|\rightarrow\infty}\frac{tf(t)}{e^{\alpha_0t^2}}\gammae\begin{equation}ta_0>\frac{2e}{\alpha_0}V_0$ and $\displaystyle\liminf_{|t|\rightarrow\infty}\frac{tg(t)}{e^{\alpha_0t^2}}\gammae\begin{equation}ta_0>\frac{2e}{\alpha_0}V_0.$
\varepsilonnd{itemize}
\noindent It is well known, both from the theoretical point of view as well as from that of applications, that minimal energy solutions, the so-called ground states, play a fundamental role, see e.g. \cite{BF}. In what follows we will focus on this class of solutions. In particular, to investigate the sign of ground state solutions to \re{q1}, we require in addition the following condition:
\begin{equation}gin{itemize}
\item [$(H6)$] There exist $p, q>1$ such that $f(t)\gammaeq t^q$ and $g(t)\gammaeq t^p$ for small $t>0$;
\varepsilonnd{itemize}
\noindent As a reference model take $F(t)=|t|^p (\mathrm{e}^{4\Phii t^2}-1)$ and $G(t)=|t|^q(\mathrm{e}^{4\Phii t^2}-1)$ with $p,q>2$ and $\alpha_0=4\Phii$ which clearly satisfy $(H1)$-$(H6)$.
Our main result reads as follows:
\begin{equation}gin{theorem}\lambdab{Th1} Assume condition $(V)$ and that $f,g$ have critical growth in the sense of Definition \ref{cgdef} and satisfy $(H1)$--$(H5)$. Then, for sufficiently small $\varepsilon>0$, $(\ref{q1})$ admits a least energy solution $z_\varepsilon=(\varphi_\varepsilon,\Phisi_\varepsilon)\in H^1(\mathbb R^2)\tildemes H^1(\mathbb R^2)$. Moreover, the following properties hold:
\begin{equation}gin{itemize}
\item [$(i)$] let $x_\varepsilon^1, x_\varepsilon^2, x_\varepsilon$ be any maximum point of $|\varphi_\varepsilon|, |\Phisi_\varepsilon|, |\varphi_\varepsilon|+|\Phisi_\varepsilon|$ respectively, then, setting $$\mathcal{M}\varepsilonquiv\{x\in \mathbb R^2: V(x)=V_0\}$$ one has
$$
\lim_{\varepsilon\rightarrow
0}\mbox{dist}(x_\varepsilon,\mathcal{M})=0\:\text{ and }\: \lim_{\varepsilon\rightarrow0}|x_\varepsilon^i-x_\varepsilon|=0,\thetauad i=1,2.
$$
Furthermore, $(\varphi_\varepsilon(\varepsilon x+x_\varepsilon),\Phisi_\varepsilon(\varepsilon x+x_\varepsilon))$ converges (up to a subsequence) as $\varepsilon\to 0$ to a ground state solution of
\begin{equation}gin{align*}
\left\{
\begin{equation}gin{array}{ll}
-\Delta u+V_0u=g(v)\\
-\Delta v+V_0v=f(u)
\varepsilonnd{array}
\right.
\varepsilonnd{align*}
\item [$(ii)$] if in addition $(H6)$ holds, then replacing $f$ and $g$ above with their odd extensions, for $\varepsilon>0$ small enough, up to changing sign $u_\varepsilon, v_\varepsilon>0$ in $\mathbb R^2$ and $x_\varepsilon^1,x_\varepsilon^2$ are the unique global maximum points of $u_\varepsilon,v_\varepsilon$ respectively and which also enjoy the following
$$\lim_{\varepsilon\rightarrow0}|x_\varepsilon^1-x_\varepsilon^2|/\varepsilon=0.$$
Moreover, for some $c,C>0$ one has $$|\varphi_\varepsilon(x)|\le C\varepsilonxp(-\frac{c}{\varepsilon}|x-x_\varepsilon^1|),\,\,|\Phisi_\varepsilon(x)|\le C\varepsilonxp(-\frac{c}{\varepsilon}|x-x_\varepsilon^2|), \,\, x\in\mathbb R^2;$$
\varepsilonnd{itemize}
\varepsilont
(Without loss of generality, throughout the paper we may assume $0\in\mathcal{M}$.)
\begin{equation}gin{remark}\lambdabel{remark1} Let us point out a few comments on the conditions we assume in Theorem \ref{Th1}:
\begin{equation}gin{itemize}
\item Actually the Ambrosetti-Rabinowitz condition $(H2)$ can be replaced by the following slightly weaker assumption:
\begin{equation}gin{itemize}
\item [$(H2)'$] There exists $\theta>2$ such that for any $t\not=0$,
\begin{equation}gin{center}
$0<2F(t)\le f(t)t$ and $0<\theta G(t)\le g(t)t$,
\varepsilonnd{center}
or equivalently
\begin{equation}gin{center}
$0<\theta F(t)\le f(t)t$ and $0<2G(t)\le g(t)t$.
\varepsilonnd{center}
\varepsilonnd{itemize}
\item We also point out that conditions $(H2)$ and $(H4)$ are weaker than the following assumption:
\begin{equation}gin{itemize}
\item [$(H)$] $f,g\in C^1(\mathbb R,\mathbb R)$ and there exists $\delta'>0$ such that for any $s\not=0$,
$$
0<(1+\delta')f(s)s\le f'(s)s^2\,\,\mbox{and}\,\,\, 0<(1+\delta')g(s)s\le g'(s)s^2
$$
which appears in the literature, see \cite{dsr,Pisto,Ramos1}.
\varepsilonnd{itemize}
\item Hypotheses $(H6)$ and $(H)$ can be also found in \cite{dsr}. Clearly hypothesis $(H)$ yields $sf(s)\le f(1)|s|^{2+\delta'}$ and $sg(s)\le g(1)|s|^{2+\delta'}$ if $|s|\le1$. Let us point out that in the present paper we do not require $sf(s),sg(s)$ to be less than $|s|^r$ near the origin for some $r>2$.
\varepsilonnd{itemize}
\varepsilonr
Systems of the form \varepsilonqref{q1} have been largely investigated in the last three decades being a prototype in many different applications, where they model for instance the minimal energy interaction between nonlinear fields, see \cite{BF,yang}. The scenario changes remarkably from the higher dimensional case $N\gammaeq 3$ to the planar case $N=2$. In particular, $N=2$ affects the notion of critical growth which is the maximal admissible growth for the nonlinearities in order to preserve the variational structure of the problem; we refer to \cite{dcassani2,CST,CT} for a discussion on this topic and to \cite{bst,ruf} for a survey on systems of the form \varepsilonqref{q1} in the case of bounded domains. As far as we are concerned with minimal energy solutions in the whole space, existence results have been first established in \cite{boyan}, see also \cite{Weth}, in the higher dimensional case and then recently extended to $N=2$ in \cite{DJJ} , where the Trudinger-Moser critical case is covered, see also \cite{bsrt,dsr}. Qualitative properties of minimal energy solutions such as symmetry and positivity have been investigated in the higher dimensional case in \cite{Sirakov1,syso,dsr}, see also \cite{dgp,QS} for closely related results. Always in dimension $N\gammaeq 3$, a priori estimates have been obtained in \cite{DY}. A priori bounds open the way to investigate the existence and concentrating behavior, as $\varepsilon\to 0$, of the so-called semiclassical states. From the point of view of Physics, these solutions live on the interface between quantum and classical Mechanics, in the sense that the field behaves like a Newtonian particle as $\varepsilon\to 0$, see \cite{EvansZ} for a survey on the topic and references therein. Semiclassical states for singularly perturbed Schr\"{o}dinger systems have been studied in the higher dimensional case in \cite{ASY,Ramos1,dlz}.
\noindent Finally, let us mention that the question weather the ground state we find is unique, seems to be out of reach at the moment. This is still a challenging open problem even in the subcritical case as well as in higher dimensions in which uniqueness of positive solutions (not necessarily ground states) is known just in a few particular cases such as Lane-Emden systems \cite{rd}. More in general, the matter of uniqueness of ground states, even in cases in which one has multiplicity of positive solutions, remains open even for the single equation.
\sectionubsection*{Overview} The paper is organized as follows: in Section \ref{lp} we begin with studying a limit problem for system \varepsilonqref{q1}. Here we complete the work initiated in \cite{DJJ}, where the existence of ground states is proved, by establishing a priori estimates, regularity, symmetry and qualitative properties of solutions. Here we use a suitable Nehari manifold approach in the spirit of Pankov \cite{Pankov} combined with Moser type techniques, as everything is set in dimension two and in presence of Moser critical growth. In particular we exploit those preliminary results to prove positivity of ground states solutions in a quite general setting, as developed throughout Section \ref{sign_s}. Then, Section \ref{semiclassical_s} is devoted to apply the informations previously obtained on the limit problem, to analyze the concentrating behavior of semiclassical solutions from the point of view of localizing bumps as well as of deriving the asymptotic rate of concentration. Here the presence of critical Moser's growth requires some delicate energy estimates which we then apply to establish compactness.
\section{The limit problem}\lambdabel{lp}
\noindent
By denoting $u_\varepsilon(x)=\varphi(\varepsilon x),v_\varepsilon(x)=\Phisi(\varepsilon x)$ and $V_\varepsilon(x)=V(\varepsilon x)$, \re{q1} is equivalent to
\begin{equation}gin{align*} \left\{
\begin{equation}gin{array}{ll}
-\Delta u_\varepsilon+V_\varepsilon(x)u_\varepsilon=g(v_\varepsilon)\\
-\Delta v_\varepsilon+V_\varepsilon(x)v_\varepsilon=f(u_\varepsilon)
\varepsilonnd{array}
\right. \varepsilonnd{align*}
in the whole plane. Let $x_0\in\mathbb R^2$ and assume $u_\varepsilon(\cdotsot+\frac{x_0}{\varepsilon})\rightarrow u$, $v_\varepsilon(\cdotsot+\frac{x_0}{\varepsilon})\rightarrow v$ in
$C^1_{loc}(\mathbb R^2)$, if $V_0=V(x_0)$ then one has
\begin{equation}\lambdab{q11} \left\{
\begin{equation}gin{array}{ll}
-\Delta u+V_0u=g(v)\\
-\Delta v+V_0v=f(u)
\varepsilonnd{array}
\right. \varepsilone
which is the so-called limit problem of \re{q1}. Recently, D.\ G. De\ Figueiredo, J. M. do \'O and J. Zhang established in \cite{DJJ} the existence of ground state solutions to \re{q11}, precisely
\begin{equation}gin{theoremletter}\lambdab{a}
{\rm (Theorem 1.3 in \cite{DJJ})} {\it Suppose that $f,g$ have critical growth and satisfy $(H1)$--$(H5)$. Then \varepsilonqref{q11} admits a ground state solution $(u,v)\in H^1(\mathbb R^2)\tildemes H^1(\mathbb R^2)$.}
\varepsilonnd{theoremletter}
\noindent Denote by $\mathcal{S}$ the set of of ground state solutions to system \re{q11}, then by Theorem A $\mathcal{S}\not=\varepsilonmptyset$. Here we investigate the regularity and qualitative properties of the ground state solutions to \re{q11}. Precisely, we prove the following results:
\begin{equation}gin{theorem}\lambdab{Th2} Suppose $f,g$ have critical growth and satisfy $(H1)$--$(H5)$. Then the following hold true:
\begin{equation}gin{itemize}
\item [$(i)$] $(u,v)\in \mathcal{S}\mathbb Rightarrow u,v\in L^{\infty}(\mathbb R^2)\cap C_{loc}^{1,\gamma}(\mathbb R^2)$ for some $\gamma\in(0,1)$;
\item [$(ii)$] let $x_z\in\mathbb R^2$ be the maximum point of $|u(x)|+|v(x)|$, then the set $$\{(u(\cdotsot+x_z),v(\cdotsot+x_z))\,|\, (u,v)\in \mathcal{S}\}$$ is compact in $H^1(\mathbb R^2)\tildemes H^1(\mathbb R^2)$;
\item [$(iii)$] $0<\inf\{\|u\|_{\infty},\|v\|_{\infty}\,|\, (u,v)\in \mathcal{S}\}\le \sectionup\{\|u\|_{\infty},\|v\|_{\infty}\,|\, (u,v)\in \mathcal{S}\}<\infty$;
\item [$(iv)$] $u(x+x_z)\rightarrow 0$ and $v(x+x_z)\rightarrow 0$, as $|x|\rightarrow\infty$ uniformly for any $z=(u,v)\in \mathcal{S}$, where $x_z$ is given in $(ii)$;
\item [$(v)$] for any $(u,v)\in \mathcal{S}$, the following Pohozaev-type identity holds
$$
\int_{\mathbb R^2}(F(u)+G(v)-V_0uv)\,\mathrm{d} x=0.
$$
\varepsilonnd{itemize}
\varepsilont
\begin{equation}gin{theorem}\lambdab{sign} Assume in addition to the hypotheses of Theorem \ref{Th2} that also $(H6)$ holds. Then, replacing $f$ and $g$ in Theorem \ref{Th2} with their odd extensions, for any $(u,v)\in\mathcal{S}$ one has $u,v\in C^2(\mathbb R^2)$ and $uv>0$ in $\mathbb R^2$. Moreover, there exists some point
$x_0\in\mathbb R^2$ such that $u,v$ are radially symmetric with respect to the same point $x_0$, namely $u(x)=u(|x-x_0|)$, $v(x)=v(|x-x_0|)$ and setting $r=|x-x_0|$, one has for $r>0$
$$
\frac{\Phiartial u}{\Phiartial r}<0\thetauad \text{ and }\thetauad \frac{\Phiartial v}{\Phiartial r}<0
$$
as well as
$$\Delta u(x_0)<0\thetauad \text{ and }\thetauad \Delta v(x_0)<0.$$
Moreover, there exist $C,c>0$, independent of $z=(u,v)\in \mathcal{S}$, such that $$|D^{\alpha}u(x)|+|D^{\alpha}v(x)|\le C\varepsilonxp(-c|x-x_0|),\thetauad x\in \mathbb R^2,\,|\alpha|=0,1$$
\varepsilont
\sectionubsection{The functional setting: a generalized Nehari manifold}
\renewcommand{5.\arabic{equation}}{2.\arabic{equation}}
Let $H^1(\mathbb R^2)$ be the usual Sobolev space endowed with the inner product
$$
(u,v)_{H^1}:=\int_{\mathbb R^2}\nabla u\nabla v+V_0uv,\ \ \|u\|_{H^1}^2:=(u,u)_{H^1}, u,v\in H^1(\mathbb R^2).
$$
and set $E=H^1(\mathbb R^2)\tildemes H^1(\mathbb R^2)$ with the inner product
$$
(z_1,z_2):=(u_1,u_2)_{H^1}+(v_1,v_2)_{H^1},\ \ z_i=(u_i,v_i)\in E, i=1,2.
$$
Clearly we have the space decomposition $E=E^+\opluslus E^-$, where
$$
E^+:=\{(u,u)\,|\, u\in H^1(\mathbb R^2)\}\ \ \ \mbox{and}\ \ \ E^-:=\{(u,-u)\,|\, u\in H^1(\mathbb R^2)\}.
$$
For each $z=(u,v)\in E$, one has $$z=z^++z^-=((u+v)/2,(u+v)/2)+((u-v)/2,(v-u)/2).$$
\vskip .25inkip0.1in
Weak solutions to \varepsilonqref{q11} are the critical points of the associated energy functional
$$
\Phi(z):=\int_{\mathbb R^2}\nabla u\nabla v+V_0uv-I(z),\ \ z=(u,v)\in E,
$$
where $I(z)=\int_{\mathbb R^2}F(u)+G(v)$.
Using the above notation we have
\begin{equation}\lambdab{y1}
\Phi(z):=\frac{1}{2}\|z^+\|^2-\frac{1}{2}\|z^-\|^2-I(z),
\varepsilone
which emphasizes the strongly indefinite nature of $\Phi$ which however, by the hypotheses on $f$ and $g$, is of class $C^1(E,\mathbb R)$ and
\begin{equation}\lambdab{y2}
I(0)=0, \ \lambdan I'(z), z\ranglengle>2I(z)>0,\ \ \mbox{for all}\ \ z\in E\sectionetminus\{0\}.
\varepsilone
On one hand, if $z=(u,v)\in E\sectionetminus \{0\}$ such that $\Phi'(z)=0$, then by $(H2)$
\begin{equation}gin{equation*}
\Phi(z)=\Phi(z)-\frac{1}{2}\lambdan \Phi'(z),z\ranglengle=\int_{\mathbb R^2}\frac{1}{2}f(u)u-F(u)+\frac{1}{2}g(u)u-G(u)>0.
\varepsilonnd{equation*}
On the other hand, if $z=(u,-u)\in E^-$, we have by $(H2)$
$$
\Phi(z)=-\int_{\mathbb R^2}(|\nabla u|^2+V_0u^2)-\int_{\mathbb R^2} F(u)+G(-u)\le 0.
$$
As a consequence, if $z\in E$ is a nontrivial critical point of $\Phi$, then necessarily $z\in E\sectionetminus E^-$. This motivates the introduction of the following generalized Nehari manifold, due to Pankov \cite{Pankov} and then used also in \cite{Szulkin, Weth, DJJ}:
$$
\mathcal{N}:=\{z\in E\sectionetminus E^-: \lambdan \Phi'(z),z\ranglengle=0, \lambdan \Phi'(z),\varphi\ranglengle=0\ \mbox{for all}\ \ \varphi\in E^-\}.
$$
Let
$$
c_\ast:=\inf_{z\in\mathcal{N}}\Phi(z)
$$
then $c_\ast$ is called the least energy level of system \re{q11}. In \cite{DJJ} the authors proved that $c_\ast\in(0,4\Phii/{\alpha_0})$ and that it is achieved on $\mathcal{N}$.
\sectionubsection{Proof of Theorem \ref{Th2}}
\renewcommand{5.\arabic{equation}}{3.\arabic{equation}}
Let $\{z_n\}\sectionubset S$, namely
\begin{equation}\lambdab{pss}
\Phi(z_n)=c_\ast \ \ \mbox{and}\ \ \Phi'(z_n)=0, \thetauad \forall n\in\mathbb{N}
\varepsilone
We carry out the proof of $(ii)$ of Theorem \ref{Th2} through the following four steps:
\begin{equation}gin{itemize}
\item We first prove that $\{z_n\}$ is bounded in $E$ (Proposition \ref{o1});
\item In Proposition \ref{nv} we prove that there exisst $\{y_n\}\sectionubset\mathbb R^2$ and $z_0\not={\bf 0}$ such that $z_n(\cdotsot+y_n)\rightharpoonup z_0$ in $E$ and $z_n(\cdotsot+y_n)\indexrightarrow{a.e.}z_0$ in
$\mathbb R^2$, as $n\rightarrow\infty$;
\item In Proposition \ref{o11} we show that $z_0$ is actually a critical point of $\Phi$;
\item Finally in Proposition \ref{con} we prove that $z_0\in \mathcal{S}$ and that actually $z_n(\cdotsot+y_n)\longrightarrow z_0$ strongly in $E$, as $n\to \infty$.
\varepsilonnd{itemize}
In the proof of the Proposition \ref{o1} below we will use the following lemma which we borrow from \cite{Fi}:
\begin{equation}gin{lemmaletter}\lambdabel{RUF1} {\it The following inequality holds
\[
s\text{ }t\leq \left\{
\begin{equation}gin{array}{ll}
(e^{t^{2}}-1)+s(\log s)^{1/2}, \; & \text{ for
all }t\gammaeq 0 \text{ and }s\gammaeq e^{1/4}; \\
(e^{t^{2}}-1)+\frac{1}{2}s^{2}, \; & \text{ for all } t \gammaeq 0
\text{ and }0 \leq s\leq e^{1/4}.
\varepsilonnd{array}
\right.
\]}
\varepsilonnd{lemmaletter}
\noindent The proofs of Proposition \ref{o1} and \ref{o11} are similar to \cite{DJJ}, however for the sake of completeness we give the details.
\begin{equation}gin{proposition}\lambdab{o1} There exists $C>0$ such that for all $n\in\mathbb{N}$:
\begin{equation}gin{itemize}
\item [$1)$] $\|z_n\|=\|(u_n,v_n)\|\le C$;
\item [$2)$] $\int_{\mathbb R^2}f(u_n)u_n\, \mathrm{d} x\le C$ and $\int_{\mathbb R^2}g(v_n)v_n\, \mathrm{d} x\le C$;
\item [$3)$] $\int_{\mathbb R^2}F(u_n)\, \mathrm{d} x\le C$ and $\int_{\mathbb R^2}G(v_n)\, \mathrm{d} x\le C$.
\varepsilonnd{itemize}
\varepsilono
\begin{equation}gin{proof}
From $\lambdan\Phi'(z_n),z_n\ranglengle=0$ we have
\begin{equation}gin{equation}\lambdabel{pitomba}
2 \int_{\mathbb R^2} (\nablabla u_n \nablabla v_n+V_0u_nv_n)\, \mathrm{d} x - \int_{\mathbb R^2}
f(u_n)u_n\, \mathrm{d} x - \int_{\mathbb R^2} g(v_n)v_n \, \mathrm{d} x =0.
\varepsilonnd{equation}
Recalling that
$$
\Phi(z_n)=\int_{\mathbb R^2}(\nablabla u_n \nablabla v_n+V_0u_nv_n)\, \mathrm{d} x-\int_{\mathbb R^2}(F(u_n)+G(v_n))\, \mathrm{d} x=c_\ast
$$
we obtain by $(H_3)$ the following
\begin{equation}gin{align*}
\int_{\mathbb R^2} [f(u_n)u_n + g(v_n)v_n]\, \mathrm{d} x &= 2\int_{\mathbb R^2} [F(u_n) + G(v_n)]\, \mathrm{d} x + 2c_\ast \\
& \leq \frac{2}{\theta}\int_{\mathbb R^2} [f(u_n)u_n + g(v_n)v_n]\, \mathrm{d} x + 2c_\ast.
\varepsilonnd{align*}
Thus
\begin{equation}gin{equation}\lambdabel{goiaba}
\int_{\mathbb R^2} [f(u_n)u_n + g(v_n)v_n]\, \mathrm{d} x \leq \frac{2c_\ast\theta}{\theta-2}.
\varepsilonnd{equation}
From $ \lambdan\Phi'(z_n),(v_n,0)\ranglengle=0$ and $ \lambdan\Phi'(z_n),(0,u_n)\ranglengle=0$, we have
$$
\| v_n \|^2-\int_{\mathbb R^2}
f(u_n)v_n\, \mathrm{d} x=0\ \ \mbox{and}\ \ \| u_n \|^2-\int_{\mathbb R^2}
g(v_n)u_n\, \mathrm{d} x=0.
$$
Let $U_n=u_n/ \| u_n\|$ and $ V_n = v_n / \| v_n \| $, then
\begin{equation}gin{align}
\| v_n \| & = \int_{\mathbb R^2}
f(u_n)V_n\, \mathrm{d} x \lambdabel{laranja},\\
\| u_n \| & = \int_{\mathbb R^2}
g(v_n)U_n\, \mathrm{d} x . \lambdabel{limao}
\varepsilonnd{align}
By $(H1)$, there exist $\begin{equation}ta>0$ and $C_\begin{equation}ta>0$ such that
$$
f(t)\le C_\begin{equation}ta e^{\begin{equation}ta t^2}\thetauad \text{ and }\thetauad g(t)\le C_\begin{equation}ta e^{\begin{equation}ta t^2}\ \ \mbox{for all} \ \ t\gammae0.
$$
Moreover, there exists $C_1>0$ such that for all $n$
$$
f(u_n(x))\le C_1 u_n(x)\ \ \mbox{for}\ \ x\in\{\mathbb R^2 : f(u_n(x))/C_\begin{equation}ta \leq e^{1/4}\}.
$$
Setting $ t = V_n $ and $ s =
f(u_n)/C_\begin{equation}ta $ in Lemma \ref{RUF1} then by $(H1)$-$(H2)$ together with the Pohozaev-Trudinger-Moser
inequality, we get
\begin{equation}gin{align*}
\int_{\mathbb R^2} f(u_n)V_n\, \mathrm{d} x& \leq C_\begin{equation}ta \int_{\{x \in \mathbb R^2 : f(u_n(x))/C_\begin{equation}ta \gammaeq e^{1/4} \}}
\frac{1}{C_\begin{equation}ta}f(u_n) \left[\log (\frac{1}{C_\begin{equation}ta} f(u_n))\right]^{1/2}\, \mathrm{d} x \\
&+ \frac{1}{2}\int_{\{x \in \mathbb R^2 : f(u_n(x))/C_\begin{equation}ta \leq e^{1/4} \}}
\frac{1}{C_\begin{equation}ta} (f(u_n))^2\, \mathrm{d} x+C_\begin{equation}ta \int_{\mathbb R^2} (e^{V_n^{2}}-1)
\, \mathrm{d} x \\
& \leq C_2 + (\begin{equation}ta^{1/2}+C_1/(2C_\begin{equation}ta) \int_{ \mathbb R^2 } f(u_n) u_n \, \mathrm{d} x,
\varepsilonnd{align*}
for some constant $ C_2>0$. This estimate together with
(\ref{laranja}) implies, for some constant $ c_1 > 0 $, that
\begin{equation}gin{equation}\lambdabel{maravilha}
\| v_n \| \leq c_1(1 + \int_{ \mathbb R^2 } f(u_n) u_n\, \mathrm{d} x)
\varepsilonnd{equation}
and similarly
\begin{equation}gin{equation}\lambdabel{macacheira}
\| u_n \| \leq c_1(1 + \int_{ \mathbb R^2 } g(v_n) v_n\, \mathrm{d} x).
\varepsilonnd{equation}
From \re{maravilha}, \re{macacheira} and \re{goiaba} it follows the first claim $1)$. Then, by (\ref{goiaba}) and $(H_3) $ we
obtain the remaining bounds $2)$ and $3)$.
\varepsilonp
Next we prove that, up to translations, $\{z_n\}$ has a nontrivial weak limit. Clearly $(u_n,v_n)$ satisfies just one of the following conditions:
\begin{equation}gin{itemize}
\item[] ({\it Vanishing})
$
\thetauad \lim_{n\rightarrow\infty}\sectionup_{y\in\mathbb R^2}\int_{B_R(y)}(u_n^2+v_n^2)\, \mathrm{d} x=0\ \ \mbox{for all}\ \ R>0;
$
\vskip .25inpace{0,1cm}
\item[] ({\it Nonvanishing}) there exist $\nu>0$, $R_0>0$ and $\{y_n\}\sectionubset\mathbb R^2$ such that
$$
\lim_{n\rightarrow\infty}\int_{B_{R_0}(y_n)}(u_n^2+v_n^2)\, \mathrm{d} x\gammae\nu.
$$
\varepsilonnd{itemize}
We borrow from \cite{Fi} the following lemma:
\begin{equation}gin{lemmaletter}\lambdab{l3.3}{\it
Let $\Omega\sectionubset\mathbb R^2$ be a bounded domain and $f\in C(\mathbb R,\mathbb R)$. Let $\{u_n\}\sectionubset L^1(\Omega)$ be such that $u_n\rightarrow u$ strongly in $L^1(\Omega)$,
$$
f(u_n)\in L^1(\Omega)\ \ \mbox{and}\ \ \ \int_{\Omega}|f(u_n)u_n|\, \mathrm{d} x\le C, n\gammae1
$$
for some $C>0$. Then, up to a subsequence we have
$$
f(u_n)\rightarrow f(u)\ \ \mbox{strongly in}\ \ L^1(\Omega)\ \ \mbox{as}\ \ n\rightarrow\infty.
$$}
\varepsilonnd{lemmaletter}
\begin{equation}gin{proposition}\lambdab{nv}
Vanishing does not occur.
\varepsilono
\begin{equation}gin{proof} We know from \cite{DJJ} that $c_\ast\in(0,4\Phii/\alpha_0)$, hence for some $\delta>0$ sufficiently small one has $c_\ast\in(0,4\Phii/\alpha_0-\delta)$.
Assume by contradiction that vanishing occurs, namely
$$
\lim_{n\rightarrow\infty}\sectionup_{y\in\mathbb R^2}\int_{B_R(y)}(u_n^2+v_n^2)\, \mathrm{d} x=0\ \ \mbox{for all}\ \ R>0,
$$
then Lions's lemma \cite{lionslemma} yields $u_n\rightarrow0, v_n\rightarrow0$ strongly in $L^s(\mathbb R^2)$ for any $s>2$.
Let us divide the proof into two steps:
\vskip .25inkip0.1in
{\bf Step 1.} We claim
$$
\lim_{n\rightarrow\infty}\int_{\mathbb R^2}F(u_n)\, \mathrm{d} x=0\ \ \mbox{and}\ \ \lim_{n\rightarrow\infty}\int_{\mathbb R^2}G(v_n)\, \mathrm{d} x=0.
$$
Indeed, by Lemma \ref{l3.3}, for any $R>0$ one has $f(u_n)\rightarrow0$ and $g(v_n)\rightarrow0$ strongly in $L^1(B_R(0))$ as $n\rightarrow\infty$. Then by $(H3)$ and the Lebesgue dominated convergence theorem,
\begin{equation}\lambdab{y4.1}
\lim_{n\rightarrow\infty}\int_{B_R(0)}F(u_n)\, \mathrm{d} x=0\ \ \mbox{and}\ \ \lim_{n\rightarrow\infty}\int_{B_R(0)}G(v_n)\, \mathrm{d} x=0.
\varepsilone
In order to prove the claim, it is enough to prove that for any $\varepsilon>0$, there exists $R>0$ such that for $n$ large enough,
\begin{equation}\lambdab{y4.2}
\int_{\mathbb R^2\sectionetminus B_R(0)}F(u_n)\, \mathrm{d} x\le\varepsilon\thetauad\text{and}\thetauad \int_{\mathbb R^2\sectionetminus B_R(0)}G(v_n)\, \mathrm{d} x\le\varepsilon.
\varepsilone
By $(H3)$ and Proposition \ref{o1}, for any $K>0$ and $n$,
$$
\int_{\{x\in\mathbb R^2\sectionetminus B_R(0): |u_n(x)|\gammae K\}}F(u_n)\le\frac{M}{K}\int_{\{x\in\mathbb R^2\sectionetminus B_R(0): |u_n(x)|\gammae K\}}f(u_n)u_n\le\frac{MC}{K}.
$$
Then choosing $K>0$ large enough, we get that for all $n$
\begin{equation}\lambdab{y4.3}
\int_{\{x\in\mathbb R^2\sectionetminus B_R(0): |u_n(x)|\gammae K\}}F(u_n)\le\frac{\varepsilon}{2}.
\varepsilone
By $(H1)$, for any $\rho>0$ there exists $C_{\rho,K}>0$ such that
$$
F(t)\le \rho t^2+C_{\rho,K}t^4,\ \ \ |t|\le K.
$$
Recalling that $u_n\rightarrow 0$ strongly in $L^4(\mathbb R^2)$, we obtain
$$
\limsup_{n\rightarrow\infty}\int_{\{x\in\mathbb R^2\sectionetminus B_R(0): |u_n(x)|\le K\}}F(u_n)\le\rho\sectionup_{n}\|u_n\|_2^2.
$$
By Proposition \ref{o1} and since $\rho$ is arbitrary, for $n$ large enough we get
\begin{equation}\lambdab{y4.4}
\int_{\{x\in\mathbb R^2\sectionetminus B_R(0): |u_n(x)|\le K\}}F(u_n)\le\frac{\varepsilon}{2}.
\varepsilone
Thus \re{y4.3} and \re{y4.4} yield the first bound in \re{y4.2} and similarly one gets the second bound.
\vskip .25inkip0.1in
{\bf Step 2.} We claim that $c_\ast=0$, from which the contradiction follows as we know $c_*>0$. We need the following inequality used in \cite[Lemma 4.1]{Souza}
\begin{equation}\lambdab{inequa}
t\ s\le t^2(e^{t^2}-1)+s(\log{s})^{\frac{1}{2}},\ \ \mbox{for all}\ \ (t,s)\in[0,\infty)\tildemes[e^{\frac{1}{\sectionqrt[3]{4}}},\infty).
\varepsilone
By Step 1,
\begin{equation}\lambdab{y4.5}
\lim_{n\rightarrow\infty}\int_{\mathbb R^2}(\nabla u_n\nabla v_n+V_0u_nv_n)=c_*\,.
\varepsilone
If $u_n\rightarrow 0$ or $v_n\rightarrow0$ strongly in $H^1(\mathbb R^2)$ as $n\rightarrow\infty$, then \re{y4.5} directly yields $c_*=0$. Therefore, let us assume $\inf_{n\gammae1}\|u_n\| \gammae b > 0$. Note that
\begin{equation}gin{equation}\lambdabel{unvn4}
\| u_n \|^2 = \int_{\mathbb R^2} g(v_n) u_n \, \mathrm{d} x.
\varepsilonnd{equation}
By $(H1)$, for any fixed $\varepsilon>0$, there exists $C_\varepsilon>0$ such that
$$
f(t), g(t)\le C_\varepsilon e^{(\alpha_0+\varepsilon)t^2}\ \ \mbox{for}\ \ t\gammae 0.
$$
Let $ \overlineerline{u}_n = (4\Phii/\alphapha_0 - \delta)^{1/2} u_n / \|
u_n \|$ and using the inequality \re{inequa} with $s =
g(v_n)/C_\varepsilon$ and $t = \sectionqrt{\alphapha_0}\, |\overlineerline{u}_n|$,
{\alphalowdisplaybreaks
\begin{equation}gin{align*}
&(4\Phii/\alphapha_0 - \delta)^{1/2} \| u_n \| \le \int_{\mathbb R^2}g(v_n)|\overlineerline{u}_n|\, \mathrm{d} x \\
&=\frac{C_\varepsilon}{\sectionqrt{\alphapha_0}}\int_{\mathbb R^2}\frac{g(v_n)}{C_\varepsilon}\sectionqrt{\alphapha_0}|\overlineerline{u}_n|\, \mathrm{d} x \\
& \leq\frac{C_\varepsilon}{\sectionqrt{\alphapha_0}}\int_{\{x \in\mathbb R^2 : g(v_n(x))/C_\varepsilon \gammaeq e^{1/\sectionqrt[3]{4}} \}}
\frac{g(v_n)}{C_\varepsilon} [\log(\frac{ g(v_n)}{C_\varepsilon})]^{1/2}\, \mathrm{d} x \\
&\ \ +\int_{\{x \in\mathbb R^2 : g(v_n(x))/C_\varepsilon \leq e^{1/\sectionqrt[3]{4}} \}}g(v_n)|\overlineerline{u}_n|\, \mathrm{d} x+C_\varepsilon\sectionqrt{\alphapha_0}\int_{\mathbb R^2}\overlineerline{u}_n^2 ( e^{\alphapha_0 \overlineerline{u}_n^{2}} - 1 )\, \mathrm{d} x\\
& \leq\sectionqrt{\frac{\alphapha_0+\varepsilon}{\alphapha_0}}\int_{\{x \in\mathbb R^2 : g(v_n(x))/C_\varepsilon \gammaeq e^{1/\sectionqrt[3]{4}} \}}
g(v_n)v_n\, \mathrm{d} x+C_\varepsilon\sectionqrt{\alphapha_0}\int_{\mathbb R^2}\overlineerline{u}_n^2 ( e^{\alphapha_0 \overlineerline{u}_n^{2}} - 1 )\, \mathrm{d} x \\
&\ \ +\int_{\{x \in\mathbb R^2 : g(v_n(x))/C_\varepsilon \leq e^{1/\sectionqrt[3]{4}} \}}g(v_n)|\overlineerline{u}_n|\, \mathrm{d} x\\
&\le \sectionqrt{\frac{\alphapha_0+\varepsilon}{\alphapha_0}}\int_{\mathbb R^2}g(v_n)v_n\, \mathrm{d} x+I_{1,n}+I_{2,n}.
\varepsilonnd{align*}
}
Recalling that $\overlineerline{u}_n\rightarrow0$ strongly in $L^s(\mathbb R^2)$ for any $s>2$. Since $\| \overlineerline{u}_n \|^2 = 4\Phii/\alphapha_0 - \delta$, there exists $p>1$(close to 1) such that $p\alpha_0(4\Phii/\alphapha_0 - \delta)<4\Phii$. Thus,
by the Pohozaev-Trudinger-Moser inequality, as $n\rightarrow\infty$,
$$
I_{1,n}\le C_\varepsilon\sectionqrt{\alpha_0}\left(\int_{\mathbb R^2}|\overlineerline{u}_n|^{2q}\right)^{1/q}\left(\int_{\mathbb R^2}( e^{p\alphapha_0 \overlineerline{u}_n^{2}}-1)\right)^{1/p}\rightarrow0,
$$
where $1/p+1/q=1$, namely, $I_{1,n}=o_n(1)$. Note that by $(H1)$-$(H2)$, for any $\rho>0$, there exits $C_{\rho,\varepsilon}>0$ such that
$$
g(v_n(x))\le\rho|v_n(x)|+C_{\rho,\varepsilon}v_n^2,\ \ \mbox{for any}\ x\in\mathbb R^2\ \mbox{with}\ \ g(v_n(x))/C_\varepsilon \leq e^{1/\sectionqrt[3]{4}}.
$$
Then
\begin{equation}gin{align*}
I_{2,n}\le\int_{\mathbb R^2}(\rho|v_n|+C_{\rho,\varepsilon}v_n^2)|\overlineerline{u}_n|\le\left[\rho\left(\int_{\mathbb R^2}|v_n|^2\right)^{1/2}+
C_{\rho,\varepsilon}\left(\int_{\mathbb R^2}|v_n|^4\right)^{1/2}\right]\left(\int_{\mathbb R^2}|\overlineerline{u}_n|^2\right)^{1/2}.
\varepsilonnd{align*}
Recalling $v_n\rightarrow0$ strongly in $L^4(\mathbb R^2)$,
$$
\limsup_{n\rightarrow\infty}I_{2,n}\le C'\rho,
$$
where $C'>0$ is independent of $\rho$. By the arbitrary choice of $\rho$, $I_{2,n}=o_n(1)$. Hence,
\begin{equation}gin{equation}\lambdabel{barao4}
(4\Phii/\alphapha_0 - \delta)^{1/2} \| u_n \| \leq o_n(1) +
(1 + \frac{\varepsilon}{\alphapha_0})^{1/2}\int_{\mathbb R^2 } g(v_n) v_n .
\varepsilonnd{equation}
Similarly, we have
\begin{equation}gin{equation}\lambdabel{CPV4}
(4\Phii/\alphapha_0 - \delta)^{1/2} \| v_n \| \leq o_n(1) +
(1 + \frac{\varepsilon}{\alphapha_0})^{1/2} \int_{\mathbb R^2 } f(u_n) u_n.
\varepsilonnd{equation}
Note that
$$
\lambdan\Phi'(z_n),z_n\ranglengle=2\int_{\mathbb R^2}(\nablabla u_n \nablabla v_n+V_0u_nv_n)-\int_{ \mathbb R^2 } f(u_n) u_n + \int_{ \mathbb R^2 } g(v_n) v_n=0
$$
and that by \re{y4.5} we get
$$\int_{ \mathbb R^2 } f(u_n) u_n+\int_{ \mathbb R^2 } g(v_n) v_n=2c_\ast+o_n(1).$$
It follows from (\ref{barao4})-(\ref{CPV4}) that
$$
(4\Phii/\alphapha_0 - \delta)^{1/2}(\| u_n \|_{H^1} + \| v_n \|_{H^1})\le 2c_\ast(1 + \frac{\varepsilon}{\alphapha_0})^{1/2}+o_n(1).
$$
Since $c_\ast<4\Phii/\alphapha_0 - \delta$, for $\varepsilon > 0$ sufficiently small, as $n$ is large enough we have
$$
\| u_n \|_{H^1} + \| v_n \|_{H^1}\le 2(4\Phii/\alphapha_0 - \delta/2)^{1/2}.
$$
Then similarly as above, by the Trudinger-Moser inequality and $u_n\rightarrow0$ strongly in $L^q(\mathbb R^2)$ for any $q>2$, we have $\int_{\mathbb R^2}g(v_n)u_n\rightarrow0$, which implies by (\ref{unvn4}) that
$ u_n \rightarrow 0 $ strongly in $ H^1(\mathbb R^2)$. Thus, it follows from \re{y4.5} that $c_\ast=0$ and hence a contradiction and vanishing does not occur.
\varepsilonp
\noindent As a consequence of Proposition \ref{nv}, up to a subsequence, there exist $\{y_n\}\sectionubset\mathbb R^2$ and $z_0\not\varepsilonquiv 0$ such that $z_n(\cdotsot+y_n)\rightharpoonup z_0$ in $E$ and $z_n(\cdotsot+y_n)\indexrightarrow{a.e.}z_0$ in $\mathbb R^2$, as $n\rightarrow\infty$.
\begin{equation}gin{proposition}\lambdab{o11}
The weak limit $z_0$ is a critical point of $\Phi$.
\varepsilono
\begin{equation}gin{proof}
By $(H1)$, there exist $a>0$ and $\alpha>\alpha_0$ such that
$$
|f(t)|\le a|t|+(e^{\alpha t^2}-1)\ \ \mbox{for all} \ \ t\in\mathbb R.
$$
Then by the Pohozaev-Trudinger-Moser inequality $f(\begin{align}r{u}_n)\in L_{loc}^1(\mathbb R^2)$ and $g(\begin{align}r{v}_n)\in L_{loc}^1(\mathbb R^2)$, where $\begin{align}r{z}_n=(\begin{align}r{u}_n,\begin{align}r{v}_n)=(u(\cdotsot+y_n),v(\cdotsot+y_n))$. From Lemma \ref{l3.3} and and Proposition \ref{o1} we get, as $n\rightarrow\infty$
$$
\int_{\mathbb R^2}(f(\begin{align}r{u}_n)\varphi+g(\begin{align}r{v}_n)\Phihi)\rightarrow\int_{\mathbb R^2}(f(u_0)\varphi+g(v_0)\Phihi),
$$
for any $(\varphi,\Phihi)\in C_0^\infty(\mathbb R^2)\tildemes C_0^\infty(\mathbb R^2)$. Noting that $\Phi'(\begin{align}r{z}_n)=0$, it follows that
$$
\int_{\mathbb R^2}(\nabla u_0\nabla \Phihi+\nabla v_0\nabla \varphi+V_0u_0\Phihi+V_0v_0\varphi)\, \mathrm{d} x=\int_{\mathbb R^2}(f(u_0)\varphi+g(v_0)\Phihi)\, \mathrm{d} x,
$$
for any $(\varphi,\Phihi)\in C_0^\infty(\mathbb R^2)\tildemes C_0^\infty(\mathbb R^2)$. Thus, $\Phi'(z_0)=0$ in $E$ and $z_0=(u_0,v_0)$ is a critical point of $\Phi$.
\varepsilonp
\vskip .25inkip0.1in
\begin{equation}gin{proposition}\lambdab{con}
$z_0\in \mathcal{S}$ and $z_n(\cdotsot+y_n)\longrightarrow z_0$ in $E$, as $n\rightarrow\infty$, thus $\mathcal{S}$ is a compact set.
\varepsilono
\begin{equation}gin{proof} Thanks to the invariance of $\Phi$ by translation, let us write for simplicity $z_n$ in place of $z_n(\cdotsot+y_n)$ and let $z_n=(u_n,v_n)$, $z_0=(u_0,v_0)$. By $(H2)$, $f(s)s-2F(s)\gammae0$ and $g(s)s-2G(s)\gammae0$ for any $s\in\mathbb R$. Then by Fatou's Lemma,
{\alphalowdisplaybreaks
\begin{equation}gin{align}\lambdab{fatou}
c_\ast&=\Phi(z_n)-\frac{1}{2}\lambdan \Phi'(z_n),z_n\ranglengle\nonumber\\
&=\lim_{n\rightarrow\infty}\left(\int_{\mathbb R^2}\frac{1}{2}f(u_n)u_n-F(u_n)+\int_{\mathbb R^2}\frac{1}{2}g(u_n)u_n-G(u_n)\right)\nonumber\\
&\gammae\limsup_{n\rightarrow\infty}\int_{\mathbb R^2}\frac{1}{2}f(u_n)u_n-F(u_n)+\liminf_{n\rightarrow\infty}\int_{\mathbb R^2}\frac{1}{2}g(u_n)u_n-G(u_n)\nonumber\\
&\gammae\liminf_{n\rightarrow\infty}\int_{\mathbb R^2}\frac{1}{2}f(u_n)u_n-F(u_n)+\liminf_{n\rightarrow\infty}\int_{\mathbb R^2}\frac{1}{2}g(u_n)u_n-G(u_n)\\
&\gammae\int_{\mathbb R^2}\frac{1}{2}f(u_0)u_0-F(u_0)+\int_{\mathbb R^2}\frac{1}{2}g(u_0)u_0-G(u_0)\nonumber\\
&=\Phi(z_0)-\frac{1}{2}\lambdan \Phi'(z_0),z_0\ranglengle=\Phi(z_0).\nonumber
\varepsilonnd{align}}
On the other hand, since $z_0\not\varepsilonquiv 0$ and $\Phi'(z_0)=0$ one has $\Phi(z_0)\gammae c_\ast$. Therefore,
$z_0$ is a ground state solution of \varepsilonqref{q11}, namely, $z_0\in \mathcal{S}$.
\vskip .25inkip0.1in
\noindent Next we prove that $z_n\rightarrow z_0$ in $E$. By \re{fatou} and $\Phi(z_0)=c_\ast$ we have
\begin{equation}\lambdab{ff}
\lim_{n\rightarrow\infty}\int_{\mathbb R^2}\frac{1}{2}f(u_n)u_n-F(u_n)=\int_{\mathbb R^2}\frac{1}{2}f(u_0)u_0-F(u_0)
\varepsilone
and
\begin{equation}\lambdab{gg}
\lim_{n\rightarrow\infty}\int_{\mathbb R^2}\frac{1}{2}g(v_n)v_n-G(v_n)=\int_{\mathbb R^2}\frac{1}{2}g(v_0)v_0-G(v_0).
\varepsilone
By $(H2)$ we get
$$
0\le\frac{\theta-2}{2}F(u_n)\le \frac{1}{2}f(u_n)u_n-F(u_n),\thetauad 0\le\frac{\theta-2}{2}G(v_n)\le \frac{1}{2}g(v_n)v_n-G(v_n), \thetauad n\gammaeq 1
$$
and the Lebesgue dominated convergence theorem, together with \re{ff} and \re{gg} yields
\begin{equation}\lambdab{fg1}
\lim_{n\rightarrow\infty}\int_{\mathbb R^2}F(u_n)=\int_{\mathbb R^2}F(u_0),\thetauad \lim_{n\rightarrow\infty}\int_{\mathbb R^2}G(v_n)=\int_{\mathbb R^2}G(v_0).
\varepsilone
Then, by \re{ff} and \re{gg} one has
\begin{equation}\lambdab{fg2}
\lim_{n\rightarrow\infty}\int_{\mathbb R^2}f(u_n)u_n=\int_{\mathbb R^2}f(u_0)u_0,\thetauad \lim_{n\rightarrow\infty}\int_{\mathbb R^2}g(v_n)v_n=\int_{\mathbb R^2}g(v_0)v_0.
\varepsilone
Since $z_n,z_0\in S$, we have
$$
\int_{\mathbb R^2}\nabla u_n\nabla v_n+V_0u_nv_n=c_\ast+\int_{\mathbb R^2}F(u_n)+G(v_n),
$$
$$
\int_{\mathbb R^2}\nabla u_0\nabla v_0+V_0u_0v_0=c_\ast+\int_{\mathbb R^2}F(u_0)+G(v_0).
$$
Thanks to \re{fg1},
\begin{equation}\lambdab{fg3}
\lim_{n\rightarrow\infty}\int_{\mathbb R^2}\nabla u_n\nabla v_n+V_0u_nv_n=\int_{\mathbb R^2}\nabla u_0\nabla v_0+V_0u_0v_0.
\varepsilone
By $\lambdan\Phi'(u_n,v_n),(u_n,u_n)\ranglengle=0$ and \re{fg2}-\re{fg3} we obtain
\begin{equation}\lambdab{fg4}
\int_{\mathbb R^2}|\nabla u_n|^2+V_0u_n^2=\int_{\mathbb R^2}f(u_0)u_0+g(v_n)u_n-\int_{\mathbb R^2}\nabla u_0\nabla v_0+V_0u_0v_0+o_n(1).
\varepsilone
At the same time from $\lambdan\Phi'(u_n,v_n),(u_n,-u_n)\ranglengle=0$ and $\lambdan\Phi'(u_0,v_0),(u_0,-u_0)\ranglengle=0$, we have
\begin{equation}\lambdab{fg5}
\int_{\mathbb R^2}f(u_n)u_n=\int_{\mathbb R^2}g(v_n)u_n,\thetauad \int_{\mathbb R^2}f(u_0)u_0=\int_{\mathbb R^2}g(v_0)u_0.
\varepsilone
This implies by \re{fg2} that $\lim_{n\rightarrow\infty}\int_{\mathbb R^2}g(v_n)u_n=\int_{\mathbb R^2}g(v_0)u_0$. As a consequence, by \re{fg4} we obtain
$$
\lim_{n\rightarrow\infty}\int_{\mathbb R^2}|\nabla u_n|^2+V_0u_n^2=\int_{\mathbb R^2}f(u_0)u_0+g(v_0)u_0-\int_{\mathbb R^2}\nabla u_0\nabla v_0+V_0u_0v_0.
$$
Recalling that $\lambdan\Phi'(u_0,v_0),(u_0,u_0)\ranglengle=0$, namely
$$\int_{\mathbb R^2}|\nabla u_0|^2+V_0u_0^2=\int_{\mathbb R^2}f(u_0)u_0+g(v_0)u_0-\int_{\mathbb R^2}\nabla u_0\nabla v_0+V_0u_0v_0,$$
which implies $$\lim_{n\rightarrow\infty}\int_{\mathbb R^2}|\nabla u_n|^2+V_0u_n^2=\int_{\mathbb R^2}|\nabla u_0|^2+V_0u_0^2$$ and hence $u_n\rightarrow u_0$ in $H^1(\mathbb R^2)$. Similarly, $v_n\rightarrow v_0$ in $H^1(\mathbb R^2)$.
\varepsilonp
\noindent
Next we prove $(i), (iii)$ of Theorem \ref{Th2} through the following three steps:
\begin{equation}gin{itemize}
\item In Proposition \ref{bo1} we prove regularity, namely for any fixed $z=(u,v)\in \mathcal{S}$ we prove that $u,v\in L^{\infty}(\mathbb R^2)\cap C_{loc}^{1,\gamma}(\mathbb R^2)$ for some $\gamma\in(0,1)$;
\item In Proposition \ref{bo2} we prove that for any $\{z_n\}\sectionubset \mathcal{S}$, $z_n=(u_n,v_n)$, for which there exists $y_n\in \mathbb{R}^2$ such that $z_n(\cdotsot +y_n)\to z_0\in \mathcal{S}$, one has
$$\sectionup_{n\gammae1}(\|u_n\|_\infty+\|v_n\|_\infty)<\infty;$$
\item Finally, in Proposition \ref{pro_apriori} we prove the following a priori estimates
$$0<\inf_{z=(u,v)\in \mathcal{S}}\min\{\|u\|_\infty,\|v\|_\infty\}< \sectionup_{z=(u,v)\in \mathcal{S}}(\|u\|_\infty+\|v\|_\infty)<\infty.$$
\varepsilonnd{itemize}
\begin{equation}gin{proposition}\lambdab{bo1}
Let $(u,v)\in \mathcal{S}$, then $u,v\in L^{\infty}(\mathbb R^2)\cap C_{loc}^{1,\gamma}(\mathbb R^2)$ for some $\gamma\in(0,1)$.
\varepsilono
\begin{equation}gin{proof} For any $r>0$, let $B_1=B_r(0), B_2=B_{2r}(0)$. Noting that $u$ is a weak solution of the following problem
\begin{equation}\lambdab{v0}
-\Delta U+V_0U=g(v)\ \mbox{in}\ B_2,\ U-u\in H_0^1(B_2),
\varepsilone
by the Pohozaev-Trudinger-Moser inequality one has $g(v)\in L^p(B_2)$ for all $p\gammae2$. By the Calderon-Zygmund inequality, see e.g. \cite[Theorem 9.9]{GT}, one has $u\in W^{2,p}(B_2)$. It follows from classical interior $L^p$-estimates that
\begin{equation}\lambdab{v1}
\|u\|_{W^{2,p}(B_1)}\le C\left(\|g(v)\|_{L^p(B_2)}+\|u\|_{L^p(B_2)}\right),
\varepsilone
where $C$ only depends on $r,p$. Meanwhile, by the Sobolev embedding theorem, if $p>2$ we get that $u\in C^{1,\gamma}(\overlineerline{B_1})$ for some $\gamma\in(0,1)$ and there exists $c$ (independent of $u$) such that
\begin{equation}\lambdab{v2}\|u\|_{C^{1,\gamma}(\overlineerline{B_1})}\le c\|u\|_{W^{2,p}(B_1)}.
\varepsilone
\noindent Next we prove that $u$ vanishes at infinity, namely that for any $\delta>0$, there exists $R>0$ such that $|u(x)|\le \delta,\ \forall |x|\gammae R$. Indeed, otherwise there exists $\{x_j\}\sectionubset\mathbb R^2$ with $|x_j|\rightarrow\infty$, as
$j\rightarrow\infty$ and $\liminf_{j\rightarrow\infty}|u(x_j)|>0$. Let $u_j(x)=u(x+x_j)$ and $v_j(x)=v(x+x_j)$, then $\|u_j\|=\|u\|$ and
\begin{equation}\lambdab{v3}
-\Delta u_j+V_0u_j=g(v_j),\thetauad u_j\in H^1(\mathbb R^2).
\varepsilone
We may assume $u_j \rightharpoonup u_0$ weakly in $H^1(\mathbb R^2)$, we claim that $u_0\not\varepsilonquiv 0$. In fact, noting that $u_j$ is a weak solution of \varepsilonqref{v0} replacing $g(v)$ by $g(v_j)$, it follows from \varepsilonqref{v1} and
\varepsilonqref{v2} that, up to a subsequence, $u_j\rightarrow u_0$ uniformly in $\overlineerline{\Omega}$. Hence,
$$
u_0(0)=\liminf_{j\rightarrow\infty}u_j(0)=\liminf_{j\rightarrow\infty}u(x_j)\not=0,
$$
which implies that $u_0\not\varepsilonquiv 0$. On the other hand, for any fixed $R>0$ and $j$ large enough, we have
\begin{equation}gin{align*}
\int_{\mathbb R^2}u^2 \, \mathrm{d} x &\gammae \int_{B_R(0)}u^2 \, \mathrm{d} x +\int_{B_R(x_j)}u^2 \, \mathrm{d} x\\
&=\int_{B_R(0)}u^2 \, \mathrm{d} x+\int_{B_R(0)}u_j^2\, \mathrm{d} x\\
&=\int_{B_R(0)}u^2\, \mathrm{d} x +\int_{B_R(0)}u_0^2\, \mathrm{d} x +o_j(1),
\varepsilonnd{align*}
where $o_j(1)\rightarrow 0$, as $j\rightarrow\infty$.
Since $R$ is arbitrary, we get $u_0\varepsilonquiv 0$, which is a contradiction. Thus, $u(x)\rightarrow 0$, as $|x|\rightarrow\infty$. Moreover, since $u\in C(B_r)$ for any $r>0$, we have $u\in L^\infty(\mathbb R^2)$. Similarly, $v\in L^\infty(\mathbb R^2)$.
\varepsilonp
\begin{equation}gin{proposition}\lambdab{bo2}
Let $z_n=(u_n,v_n)\sectionubset \mathcal{S}$ such that $\begin{align}r{z}_n=z_n(\cdotsot+y_n)\rightarrow z_0=(u_0,v_0)\in \mathcal{S}$ in $E$, then
$$\sectionup_{n\gammae1}(\|u_n\|_\infty+\|v_n\|_\infty)<\infty$$
\varepsilono
\begin{equation}gin{proof} Let $\begin{align}r{u}_n=u(\cdotsot+y_n), \begin{align}r{v}_n=v_n(\cdotsot+y_n)$. Similarly as above, $\begin{align}r{u}_n$ is a weak solution of the following problem
\begin{equation}\lambdab{vb0}
-\Delta U+V_0U=g(\begin{align}r{v}_n)\ \mbox{in}\ B_2,\ U-\begin{align}r{u}_n\in H_0^1(B_2).
\varepsilone Moreover, for any $p\gammae2$ we have
\begin{equation}\lambdab{vb1}
\|\begin{align}r{u}_n\|_{W^{2,p}(B_1)}\le C\left(\|g(\begin{align}r{v}_n)\|_{L^p(B_2)}+\|\begin{align}r{u}_n\|_{L^p(B_2)}\right),
\varepsilone
where $C$ only depends on $r,p$. By the Sobolev embedding theorem, if $p>2$ we get $\begin{align}r{u}_n\in C^{1,\gamma}(\overlineerline{B_1})$ for some $\gamma\in(0,1)$ and there exists $c$ (independent of $n$) such that
\begin{equation}\lambdab{vb2}\|\begin{align}r{u}_n\|_{C^{1,\gamma}(\overlineerline{B_1})}\le c\|\begin{align}r{u}_n\|_{W^{2,p}(B_1)}.
\varepsilone
Then by \re{vb1}-\re{vb2}, we get
\begin{equation}\lambdab{vb3}\|\begin{align}r{u}_n\|_{C^{1,\gamma}(\overlineerline{B_1})}\le c\left(\|g(\begin{align}r{v}_n)\|_{L^p(\mathbb R^2)}+\|\begin{align}r{u}_n\|_{L^p(\mathbb R^2)}\right).
\varepsilone
\noindent By $(H1)$, for $\begin{equation}ta>\alpha_0$ and some $C>0$, we have $|g(t)|\le C(|t|+\varepsilonxp{(\begin{equation}ta t^2)}-1), t\in\mathbb R$. Recalling that $\begin{align}r{v}_n\rightarrow v_0$ in
$H^1(\mathbb R^2)$, we next prove that
\begin{equation}\lambdab{yf1}
\lim_{n\rightarrow\infty}\int_{\mathbb R^2}|\varepsilonxp(p\begin{equation}ta \begin{align}r{v}_n^2)-\varepsilonxp(p\begin{equation}ta v_0^2)| \, \mathrm{d} x=0.
\varepsilone
In fact, since $v_0\in L^{\infty}(\mathbb R^2)$, there exists $c>0$ such that
\begin{equation}gin{align*}
&\int_{\mathbb R^2}|e^{(p\begin{equation}ta \begin{align}r{v}_n^2)}-e^{(p\begin{equation}ta v_0^2)}| \, \mathrm{d} x \\
&\le c\int_{\mathbb R^2}e^{(2p\begin{equation}ta |\begin{align}r{v}_n-v_0|^2)}|\begin{align}r{v}_n^2-v_0^2| \mathrm{d} x \\
&= c\int_{\mathbb R^2}[e^{(2p\begin{equation}ta |\begin{align}r{v}_n-v_0|^2)}-1]|\begin{align}r{v}_n^2-v_0^2| \, \mathrm{d} x +o_n(1)\\
&\le c\left(\int_{\mathbb R^2}\left[e^{(4p\begin{equation}ta |\begin{align}r{v}_n-v_0|^2)}-1\right]\, \mathrm{d} x \right)^{{1}/{2}}\left(\int_{\mathbb R^2}\left|\begin{align}r{v}_n^2-v_0^2\right|^2\, \mathrm{d} x \right)^{{1}/{2}}+o_n(1),
\varepsilonnd{align*}
where $o_n(1)\rightarrow 0$, as $n\rightarrow\infty$. From $\|\begin{align}r{v}_n-v_0\|_1\rightarrow0$, as $n\rightarrow\infty$ and the Pohozaev-Trudinger-Moser inequality, there exists $C$ such that $$\int_{\mathbb R^2}\left[e^{(4p\begin{equation}ta
|\begin{align}r{v}_n-v_0|^2)}-1 \right]\, \mathrm{d} x \le C$$ as $n$ is large enough; thus \varepsilonqref{yf1} follows.
Recalling that $\begin{align}r{z}_n\rightarrow z_0$ in $E$, by \re{yf1} $\|g(\begin{align}r{v}_n)\|_{L^p(\mathbb R^2)}\rightarrow\|g(v_0)\|_{L^p(\mathbb R^2)}$ as $n\rightarrow\infty$. Finally we have
\begin{equation}\lambdab{vb4}
\sectionup_{n\gammae1}\|\begin{align}r{u}_n\|_{C^{1,\gamma}(\overlineerline{B_1})}<\infty.
\varepsilone
\vskip .25inkip0.1in
\noindent Next we prove that $\begin{align}r{u}_n(x)\rightarrow0$, uniformly as $|x|\rightarrow\infty$. It is enough to prove that for any $\delta>0$, there exists $R>0$ such that $|\begin{align}r{u}_n(x)|\le \delta,\ \forall n\gammae1, |x|\gammae R$. Suppose this does not occur, so that there exists
$\{x_n\}\sectionubset\mathbb R^2$ with $|x_n|\rightarrow\infty$, as $n\rightarrow\infty$ and $\liminf_{n\rightarrow\infty}|\begin{align}r{u}_n(x_n)|>0$. Let $\tilde{u}_n(x)=\begin{align}r{u}_n(x+x_n)$ and $\tilde{v}_n(x)=\begin{align}r{v}_n(x+x_n)$, then
\begin{equation}\lambdab{vb5}
-\Delta \tilde{u}_n+V_0\tilde{u}_n=g(\tilde{v}_n),\thetauad \tilde{u}_n\in H^1(\mathbb R^2).
\varepsilone
We may assume $\tilde{u}_n \rightharpoonup \tilde{u}_0$ weakly in $H^1(\mathbb R^2)$ and we claim $\tilde{u}_0\not\varepsilonquiv 0$. For any $n\gammae1$, $\tilde{u}_n$ is a weak solution to the following problem
\begin{equation}\lambdab{vbb0}
-\Delta U+V_0U=g(\tilde{v}_n)\ \mbox{in}\ B_2,\ U-\tilde{u}_n\in H_0^1(B_2).
\varepsilone Moreover,
\begin{equation}\lambdab{vbb1}
\|\tilde{u}_n\|_{W^{2,4}(B_1)}\le C\left(\|g(\tilde{v}_n)\|_{L^4(B_2)}+\|\tilde{u}_n\|_{L^4(B_2)}\right)
\varepsilone
where $C$ depends on $r$ only. At the same time, by the Sobolev embedding theorem, we get $\tilde{u}_n\in C^{1,\gamma}(\overlineerline{B_1})$ for some $\gamma\in(0,1)$ and there exists $c$ (independent of $n$) such that
\begin{equation}\lambdab{vbb2}\|\tilde{u}_n\|_{C^{1,\gamma}(\overlineerline{B_1})}\le c\|\tilde{u}_n\|_{W^{2,4}(B_1)}.
\varepsilone
Then by \re{vbb1}-\re{vbb2}, we get
$$\|\tilde{u}_n\|_{C^{1,\gamma}(\overlineerline{B_1})}\le c\left(\|g(v_n)\|_{L^4(\mathbb R^2)}+\|u_n\|_{L^4(\mathbb R^2)}\right).
$$
Then similar to \re{vb4}, $\sectionup_{n\gammae1}\|\tilde{u}_n\|_{C^{1,\gamma}(\overlineerline{B_1})}<\infty$. Hence up to a subsequence, $\tilde{u}_n\rightarrow \tilde{u}_0$ uniformly in $\overlineerline{B_1}$. Thus,
$$
\tilde{u}_0(0)=\liminf_{n\rightarrow\infty}\tilde{u}_n(0)=\liminf_{n\rightarrow\infty}u_n(x_n)\not=0,
$$
which implies that $\tilde{u}_0\not\varepsilonquiv 0$. On the other hand, for any fixed $R>0$ and $j$ large enough, we have
\begin{equation}gin{align*}
&o_n(1)+\int_{\mathbb R^2}u_0^2 \, \mathrm{d} x=\int_{\mathbb R^2}\begin{align}r{u}_n^2 \, \mathrm{d} x\\
&\gammae \int_{B_R(0)}\begin{align}r{u}_n^2 \, \mathrm{d} x +\int_{B_R(x_n)}\begin{align}r{u}_n^2 \, \mathrm{d} x\\
&=\int_{B_R(0)}\begin{align}r{u}_n^2 \, \mathrm{d} x+\int_{B_R(0)}\tilde{u}_n^2\, \mathrm{d} x\\
&=\int_{B_R(0)}u_0^2\, \mathrm{d} x +\int_{B_R(0)}\tilde{u}_0^2\, \mathrm{d} x +o_n(1),
\varepsilonnd{align*}
where $o_n(1)\rightarrow 0$, as $n\rightarrow\infty$ and we have used the fact that $\begin{align}r{u}_n=u_n(\cdotsot+y_n)\rightarrow u_0$ in $H^1(\mathbb R^2)$.
Since $R$ is arbitrary, we get $\tilde{u}_0\varepsilonquiv 0$, which is a contradiction. Thus, $\begin{align}r{u}_n(x)\rightarrow 0$, uniformly as $|x|\rightarrow\infty$, which immediately implies by \re{vb4} that
$\sectionup_{n\gammae1}\|u_n\|_\infty=\sectionup_{n\gammae1}\|\begin{align}r{u}_n\|_\infty<\infty$. Similarly, $\sectionup_{n\gammae1}\|v_n\|_\infty<\infty$.
\varepsilonp
\begin{equation}gin{proposition}\lambdabel{pro_apriori} The following a priori estimates hold
\begin{equation}gin{equation}\lambdabel{aprioribound}
0<\inf_{z=(u,v)\in \mathcal{S}}\min\{\|u\|_\infty,\|v\|_\infty\}<\sectionup_{z=(u,v)\in \mathcal{S}}(\|u\|_\infty+\|v\|_\infty)<\infty
\varepsilonnd{equation}
\varepsilono
\begin{equation}gin{proof}
The upper bound is a consequence of Proposition \ref{bo2} and the fact that $\mathcal{S}$ is compact.
\noindent In order to prove the lower bound we argue by contradiction and thus assume $$\inf_{z=(u,v)\in \mathcal{S}}\min\{\|u\|_\infty,\|v\|_\infty\}=0.$$
Then, there exists $\{z_n\}\sectionubset \mathcal{S}$ such that, without loss of generality, $\|v_n\|_\infty\rightarrow0$, as $n\rightarrow\infty$. From
$$
\int_{\mathbb R^2}|\nabla u_n|^2+V_0u_n^2=\int_{\mathbb R^2}g(v_n)u_n,
$$
by $(H1)$ we have
$$
\int_{\mathbb R^2}|\nabla u_n|^2+V_0u_n^2\leq o_n(1)\left(\int_{\mathbb R^2}v_n^2\right)^{1/2}\left(\int_{\mathbb R^2}u_n^2\right)^{1/2}
$$
and hence $u_n\rightarrow0$ in $H^1(\mathbb R^2)$. From
$$
\int_{\mathbb R^2}|\nabla v_n|^2+V_0v_n^2=\int_{\mathbb R^2}f(u_n)v_n\le\left(\int_{\mathbb R^2}v_n^2\right)^{1/2}\left(\int_{\mathbb R^2}[f(u_n)]^2\right)^{1/2},
$$
together with the fact $u_n\rightarrow0$ in $H^1(\mathbb R^2)$ which implies $\int_{\mathbb R^2}[f(u_n)]^2\rightarrow 0$, we have also $v_n\rightarrow0$ in $H^1(\mathbb R^2)$. Finally, as $(u_n,v_n)\in \mathcal{S}$, we obtain a contradiction from the following
$$
0<c_\ast=\lim_{n\rightarrow\infty}\left(\int_{\mathbb R^2}\nabla u_n\nabla v_n+V_0u_nv_n-\int_{\mathbb R^2}F(u_n)+G(v_n)\right)=0
$$
\varepsilonp
In order to complete the proof of Theorem \ref{Th2} it remains to show that ground states vanish at infinity and that enjoy a suitable Pohozaev-type identity in the whole plane; we prove these results in Proposition \ref{vanishing_R} and \ref{stanislav} of the next Section.
\sectionubsection{Vanishing and Pohozaev-type identity}
\begin{equation}gin{proposition}{\rm(Uniform vanishing)}\lambdabel{vanishing_R}
Let $x_z\in\mathbb R^2$ be a maximum point of $|u(x)|+|v(x)|$, $z=(u,v)\in \mathcal{S}$. Then $u(x+x_z)\to 0$ and $v(x+x_z)\rightarrow 0$, as $|x|\rightarrow\infty$, uniformly for any $(u,v)\in \mathcal{S}$.
\varepsilono
\noindent In order to prove Proposition \ref{vanishing_R} we need the following technical lemma
\begin{equation}gin{lemma}\lambdab{boo4}
For any $\{z_n\}\sectionubset \mathcal{S}, z_n=(u_n,v_n)$, up to a subsequence, $z_n(\cdotsot+x_n)\rightarrow z_1$ in $E$, as $n\rightarrow\infty$, where $\{x_n\}\sectionubset\mathbb R^2$ is such that $|u_n(x_n)|+|v_n(x_n)|=\max_{x\in\mathbb R^2}(|u_n(x)|+|v_n(x)|).$
\varepsilonl
\begin{equation}gin{proof}
We first claim that there exist $\mu>0$ and $R_1>0$ such that
\begin{equation}\lambdab{nvv}
\lim_{n\rightarrow\infty}\int_{B_{R_1}(x_n)}(u_n^2+v_n^2)\, \mathrm{d} x\gammae\mu.
\varepsilone
Let us argue by contradiction, indeed if not, for some $\{z_n\}\sectionubset\mathcal{S}$ and any $R>0$, we get
$$
\lim_{n\rightarrow\infty}\int_{B_R(x_n)}(u_n^2+v_n^2)\, \mathrm{d} x=0.
$$
Let $\hat{u}_n=u_n(\cdotsot+x_n)$ and $\hat{v}_n=v_n(\cdotsot+x_n)$, then $\hat{u}_n,\hat{v}_n\rightarrow0$ in $L_{loc}^2(\mathbb R^2)$, as $n\rightarrow\infty$. Similarly as above, $\hat{u}_n$ is a weak solution of the following problem
$$
-\Delta U+V_0U=g(\hat{v}_n)\ \mbox{in}\ B_2,\ U-\hat{u}_n\in H_0^1(B_2).
$$ By standard elliptic regularity we get $\hat{u}_n\in C^{1,\gamma}(\overlineerline{B_1})$ for some $\gamma\in(0,1)$ and there exists $c$ (independent of $n$) such that for $p>2$,
\begin{equation}\lambdab{vbb3}\|\hat{u}_n\|_{C^{1,\gamma}(\overlineerline{B_1})}\le c\left(\|g(\hat{v}_n)\|_{L^p(\mathbb R^2)}+\|\hat{u}_n\|_{L^p(\mathbb R^2)}\right).
\varepsilone
By Proposition \ref{bo2}, $\begin{align}r{z}_n\rightarrow z_0$ in $E$, by \re{yf1} $\|g(\hat{v}_n)\|_{L^p(\mathbb R^2)}=\|g(\begin{align}r{v}_n)\|_{L^p(\mathbb R^2)}\rightarrow\|g(v_0)\|_{L^p(\mathbb R^2)}$, as $n\rightarrow\infty$. Then we have
\begin{equation}\lambdab{vbb4}
\sectionup_{n\gammae1}\|\hat{u}_n\|_{C^{1,\gamma}(\overlineerline{B_1})}<\infty,
\varepsilone
which implies by $\hat{u}_n\rightarrow0$ in $L^2(B_1)$ that $\hat{u}_n\rightarrow0$ uniformly in $B_1$. In particular, $\hat{u}_n(0)=u_n(x_n)\rightarrow0$. Similarly, we have $\hat{v}_n(0)=v_n(x_n)\rightarrow0$. Finally we obtain
$$
\lim_{n\rightarrow\infty}\max_{x\in\mathbb R^2}(|u_n(x)|+|v_n(x)|)=\lim_{n\rightarrow\infty}(|u_n(x_n)|+|v_n(x_n)|)=0,
$$
which implies
$$
\lim_{n\rightarrow\infty}\min\{\|u_n\|_\infty,\|v_n\|_\infty\}=0
$$
and thus a contradiction.
Now by \re{nvv} $\lim_{n\rightarrow\infty}\int_{B_{R_1}(0)}(\hat{u}_n^2+\hat{v}_n^2)\, \mathrm{d} x\gammae\mu$ which combined with the local compactness of the embedding $H^1(\mathbb R^2)\hookrightarrow L^2(\mathbb R^2)$, yields up to a subsequence, $z_n(\cdotsot+x_n)=(\hat{u}_n+\hat{v}_n)\rightharpoonup z_1\not={0}$ in $E$
and $z_n(\cdotsot+x_n)\to z_1$ a.e. in $\mathbb R^2$, as $n\rightarrow\infty$. Then arguing as in Proposition \ref{o11}-\ref{con}, we get $z_1\in \mathcal{S}$ and $z_n(\cdotsot+x_n)\rightarrow z_1$ in $E$, as $n\rightarrow\infty$, and this completes the proof.
\varepsilonp
\noindent{\it Proof of Proposition \ref{vanishing_R}.}
Next let us prove that for any $\delta>0$, there exists $R>0$ such that $|u(x+x_z)|+|v(x+x_z)|\le \delta, |x|\gammae R$ for any $z=(u,v)\in \mathcal{S}$, where $x_z\in\mathbb R^2$ is a maximum point of $|u(x)|+|v(x)|$. If not, there exist $z_n=(u_n,v_n)\in \mathcal{S}$ and $\{x_n\}\sectionubset\mathbb R^2$ such that $|x_n|\rightarrow\infty$ as $n\rightarrow\infty$ and $$\liminf_{n\rightarrow\infty}(|u_n(x_n+x_{z_n})|+|v_n(x_n+x_{z_n})|)>0,$$ where $x_{z_n}\in\mathbb R^2$ is a maximum point of $|u_n(x)|+|v_n(x)|$. Without loss of generality, we may assume $\liminf_{n\rightarrow\infty}|u_n(x_n+x_{z_n})|>0$. Let $\tilde{u}_n(x)=u_n(x+x_n+x_{z_n})$ and $\tilde{v}_n(x)=v_n(x+x_n+x_{z_n})$. Assume $\tilde{u}_n \rightharpoonup \tilde{u}_0$ weakly in $H^1(\mathbb R^2)$, in the following we claim $\tilde{u}_0\not\varepsilonquiv 0$. Indeed, by Lemma \ref{boo4}, up to a subsequence, there exists $z\in\mathcal{S}$ such that $(u_n(\cdotsot+x_{z_n}),v_n(\cdotsot+x_{z_n}))\rightarrow z$ strongly in $E$. Then as in the proof of the above Lemma, by the elliptic estimates, up to a subsequence, for some $\tilde{u}_0\in H^1(\mathbb R^2)$ and $\gamma\in(0,1)$, $\tilde{u}_n\rightarrow \tilde{u}_0$ in $C_{loc}^{1,\gamma}(\mathbb R^2)$, as $n\rightarrow\infty$. Hence,
$$
\tilde{u}_0(0)=\liminf_{n\rightarrow\infty}\tilde{u}_n(0)=\liminf_{n\rightarrow\infty}u_n(x_n+x_{z_n})\not=0,
$$
which implies that $\tilde{u}_0\not\varepsilonquiv 0$. On the other hand, proceeding as in Proposition \ref{bo2}, we get $\tilde{u}_0\varepsilonquiv 0$, which is a contradiction.
\thetaed
\begin{equation}gin{proposition}{\rm(Pohozaev-type identity)}\lambdabel{stanislav}
For any $z=(u,v)\in \mathcal{S}$, the following Pohozaev-type identity holds true
\begin{equation}gin{equation}\lambdabel{idpo}
\int_{\mathbb R^2}(F(u)+G(v)-V_0uv)\,\mathrm{d} x=0.
\varepsilonnd{equation}
\varepsilono
\begin{equation}gin{proof}
By the proof of Proposition \ref{bo1} we know $u,v\in W_{\operatorname{\rm loc}}^{2,p}(\mathbb R^2)$ for any $p\gammae2$. Then $\Delta u=V_0u-g(v)$ a.e. in $\mathbb R^2$ and $\Delta v=V_0v-f(u)$ a.e. in $\mathbb R^2$. Following\cite{Pucci, van} we get
\begin{equation}gin{align}\lambdab{poha}
&\oint_{\Phil B_r}\nabla u\nabla v\cdotsot(x,{\bf n})\,\mathrm{d} s-\oint_{\Phil B_r}\left(\sectionum_{i,j=1}^2 x_j\left(\frac{\Phil u}{\Phil x_j}\frac{\Phil v}{\Phil x_i}+\frac{\Phil v}{\Phil x_j}\frac{\Phil u}{\Phil x_i}\right),{\bf n}\right)\,\mathrm{d} s\\
&=2\int_{B_r}(V_0uv-F(u)-G(v))\,\mathrm{d} x,\nonumber
\varepsilonnd{align}
where $B_r(0):=\{x\in\mathbb R^2:|x|<r\}, r>0$ and $\bf{n}$ is the outward normal of $\Phil B_r$ at $x$. From $\nabla u,\nabla v\in L^2(\mathbb R^2)$, by virtue of the coarea formula, there exits $r_n$ such that $r_n\rightarrow\infty$ and
$$
r_n\oint_{\Phil B_{r_n}}\left|\frac{\Phil u}{\Phil x_j}\frac{\Phil v}{\Phil x_i}\right|\,\mathrm{d} s\rightarrow0,\,\,\hbox{for any}\,\,i,j=1,2.
$$
As a consequence as $n\rightarrow\infty$,
$$
\left|\oint_{\Phil B_{r_n}}\nabla u\nabla v\cdotsot(x,{\bf n})\,\mathrm{d} s\right|\le r_n\oint_{\Phil B_{r_n}}\left|\nabla u\nabla v\right|\,\mathrm{d} s\rightarrow0
$$
and hence
$$
\oint_{\Phil B_r}\left(\sectionum_{i,j=1}^2 x_j\left(\frac{\Phil u}{\Phil x_j}\frac{\Phil v}{\Phil x_i}+\frac{\Phil v}{\Phil x_j}\frac{\Phil u}{\Phil x_i}\right),{\bf n}\right)\,\mathrm{d} s\rightarrow0.
$$
Then, let $r=r_n$ in \re{poha} to get, as $n\rightarrow\infty$, identity \varepsilonqref{idpo}.
\varepsilonp
\sectionubsection{Sign and symmetry properties}\lambdabel{sign_s}
This section is devoted to proving Theorem \ref{sign}. To investigate positivity and radial symmetry of ground state solutions to \re{q11}, without loss
generality, throughout this section we assume that $f,g$ are odd symmetric functions.
\noindent Let $$\kappa:=\sectionup\{\|u\|_{\infty},\|v\|_{\infty}: (u,v)\in S\}<\infty$$ by Theorem \ref{Th2}. By $(H1)$ and $(H6)$, there exist small $a_0,b_0\in[0,1)$ and $k_1,k_2>0$ with $$k_1=\max_{a_0<|t|\le\kappa}|f(t)|/|t|^q,\,\, k_2=\max_{b_0<|t|\le\kappa}|g(t)|/|t|^p,$$ such that $f(t)\le t$, for $t\in[0,a_0]$ and $g(t)\le t$, for $t\in[0,b_0]$. Moreover, $f(a_0)=k_1a_0^q$ and $g(b_0)=k_2b_0^p$. In fact, if $\limsup_{t\rightarrow0}|f(t)|/|t|^q<\infty$, we can choose $a_0=0$, otherwise there exists $a_0\in(0,1)$ such that $f(a_0)/a_0^q=\max_{t\in[a_0,\kappa]}f(t)/t^q.$
Let
$$
f_k(t)=\left\{
\begin{equation}gin{array}{ll}
f(t),\ \ & \mbox{if}\ t\in[0,a_0]\\
\min\{f(t),k_1t^q\},\ \ \ & \mbox{if}\ t\in(a_0,\infty)
\varepsilonnd{array}
\right.
$$
and $f_k(t)=-f_k(-t)$ for $t\le0$ and similarly for $g$. Then, $f_k,g_k\in C(\mathbb R,\mathbb R)$ and $f_k(t)=f(t),g_k(t)=g(t)$ if $|t|\le\kappa$, $0<f_k(t)\le f(t),0<g_k(t)\le g(t)$ for all $t>0$. At the same time, there exists $\begin{equation}ta>0$ such that
\begin{equation}\lambdab{gj}
\left\{
\begin{equation}gin{array}{ll}
|f_k(t)|\gammae\begin{equation}ta|t|^q\text{ and }|g_k(t)|\gammae\begin{equation}ta|t|^p,\ \ \mbox{for any}\ t\in\mathbb R\\
|f_k(t)|=|f(t)|\le|t|\,\,\,\mbox{if}\ |t|\le a_0,\ \ |g_k(t)|=|g(t)|\le|t|\,\ \mbox{if}\ |t|\le b_0\\
|f_k(t)|\le k_1|t|^q\,\,\mbox{if}\ |t|\gammae a_0,\ \ |g_k(t)|\le k_2|t|^p\,\ \mbox{if}\ |t|\gammae b_0.
\varepsilonnd{array}
\right.
\varepsilone
Moreover, it is easy to check that $f_k, g_k$ satisfy $(H1)$, $(H4)$ and
\begin{equation}\lambdab{am1}
0<2F_k(t)\le f_k(t)t,\,\,0<2G_k(t)\le g_k(t)t,\,\,t\not=0,
\varepsilone
\begin{equation}\lambdab{am2}
\lim_{|t|\rightarrow\infty}\frac{F_k(t)}{t^2}=\infty,\,\,\lim_{|t|\rightarrow\infty}\frac{G_k(t)}{t^2}=\infty,
\varepsilone
where $F_k(t)=\int_0^tf_k(\tau)\,\mathrm{d}\tau$ and $G_k(t)=\int_0^tg_k(\tau)\,\mathrm{d}\tau$.
Now consider the truncated problem
\begin{equation}\lambdab{qk1} \left\{
\begin{equation}gin{array}{ll}
&-\Delta u+V_0u=g_k(v)\\
&-\Delta v+V_0v=f_k(u)
\varepsilonnd{array}
\right. \varepsilone
whose associated energy functional is
$$
\Phi_k(z):=\int_{\mathbb R^2}(\nabla u\nabla v+V_0uv)\,\mathrm{d} x-\int_{\mathbb R^2}(F_k(u)+G_k(v))\,\mathrm{d} x,\ \ z=(u,v)\in E.
$$
Recall the generalized Nehari Manifold
$$
\mathcal{N}_k:=\{z\in E\sectionetminus E^-: \lambdan \Phi_k'(z),z\ranglengle=0, \lambdan \Phi_k'(z),\varphi\ranglengle=0\ \mbox{for all}\ \ \varphi\in E^-\}
$$
and the least energy
$$
c_\ast^k:=\inf_{z\in\mathcal{N}_k}\Phi_k(z).
$$
Noting that for any $(u,v)\in \mathcal{S}$, $(u,v)$ is a solution to \re{qk1}, hence $c_\ast^k\le c_\ast$. For $z\in E\sectionetminus E^-$, set
$$
\hat{E}(z)=E^-\opluslus\mathbb R^+z=E^-\opluslus\mathbb R^+z^+.
$$
From \cite{DJJ,Szulkin,Weth} we have
\begin{equation}gin{lemma}\lambdab{lk54.1}\
\begin{equation}gin{itemize}
\item [1)] For any $z\in \mathcal{N}_k$, $\Phi_k|_{\hat{E}(z)}$ has a unique maximum point which occurs exactly at $z$;
\item [2)] For any $z\in E\sectionetminus E^-$, the set $\hat{E}(z)$ intersects $\mathcal{N}_k$ at exactly one point $\hat{m}_k(z)$, which is the unique global maximum point of $\Phi_k|_{\hat{E}(z)}$;
\item [3)]
$$
c_\ast^k:=\inf_{z\in E\sectionetminus E^-}\max_{\Omegaega\in\hat{E}(z)}\Phi_k(\Omegaega).
$$
\varepsilonnd{itemize}
\varepsilonl
\noindent From $0\le G_k(t)\le G(t)$ and $0\le F_k(t)\le F(t)$ for any $t\in\mathbb R$, we have
$$
c_\ast^k\gammae\inf_{z\in E\sectionetminus E^-}\max_{\Omegaega\in\hat{E}(z)}\Phi(\Omegaega)=c_\ast.
$$
thus $c_\ast^k=c_\ast>0$.
\noindent Next define
$$
\hat{m}_k: z\in E\sectionetminus E^-\mapsto\hat{m}_k(z)\in\hat{E}(z)\cap\mathcal{N}_k.
$$
There exists $\delta>0$ such that $\|z^+\|_\varepsilon\gammae\delta$ for all $z\in\mathcal{N}_k$; in particular one has
$$
\|\hat{m}_k(z)^+\|_\varepsilon\gammae\delta\ \ \ \mbox{for all}\ \ z\in E\sectionetminus E^-.
$$
Moreover, for each compact subset $\mathcal{W}\sectionubset E\sectionetminus E^-$, there exists a constant $C_{\mathcal{W}}>0$ such that
$$
\|\hat{m}(z)\|\le C_{\mathcal{W}}\ \ \ \mbox{for all}\ \ z\in\mathcal{W}.
$$
\noindent Define
$$
S^+:=\{z\in E^+: \|z\|=1\},
$$
then, $S^+$ is a $C^1$-submanifold of $E^+$ and the tangent manifold of $S^+$ at $z\in S^+$ is given by
$$
T(S^+)=\{\Omegaega\in E^+: (\Omegaega,z)=0\}.
$$
Let
$$
m_k:=\hat{m}_k|_{S^+}: S^+\longrightarrow\mathcal{N}_k,
$$
then $\hat{m}_k$ is continuous and $m_k$ is a homeomorphism between $S^+$ and $\mathcal{N}_k$. Define
$$
\Psi_k: S^+\longrightarrow\mathbb R, \Psi_k(z):=\Phi_k(m_k(z)), z\in S^+
$$
then, by \cite[Corollary 4.3]{Weth} we have
\begin{equation}gin{proposition}\lambdab{pk5.5}\noindent
\begin{equation}gin{itemize}
\item [1)] $\Psi_k\in C^1(S^+,\mathbb R)$ and
$$
\lambdan\Psi_k'(z),\Omegaega\ranglengle=\|m_k(z)^+\|\lambdan\Phi_k'(m_k(z)),\Omegaega\ranglengle,\ \ \mbox{for all}\ \ \Omegaega\in T_z(S^+);
$$
\item [2)] If $\{\Omegaega_n\}\sectionubset S^+$ is a Palais-Smale sequence for $\Psi_k$, then $\{m_k(\Omegaega_n)\}\sectionubset \mathcal{N}_k$ is a Palais-Smale sequence for $\Phi_k$. Namely, if $\Psi_k(\Omegaega_n)\rightarrow d$ for some $d>0$ and $\|\Psi_k'(\Omegaega_n)\|_\ast\rightarrow 0$ as $n\rightarrow\infty$, then $\Phi_k(m_k(\Omegaega_n))\rightarrow d$ and $\|\Phi_k'(m_k(\Omegaega_n))\|\rightarrow0$ as $n\rightarrow\infty$, where
$$
\|\Psi_k'(\Omegaega_n)\|_\ast=\sectionup_{\sectiontackrel{\Phihi\in T_{\Omegaega_n}(S^+)}{\|\Phihi\|=1}}\lambdan\Psi_k'(\Omegaega_n),\Phihi\ranglengle\ \ \mbox{and}\ \ \|\Phi_k'(m_k(\Omegaega_n))\|=\sectionup_{\sectiontackrel{\Phihi\in E}{\|\Phihi\|=1}}\lambdan\Phi_k'(m_k(\Omegaega_n)),\Phihi\ranglengle;
$$
\item [3)] $\Omegaega\in S^+$ is a critical point of $\Psi_k$ if and only if $m_k(\Omegaega)\in \mathcal{N}_k$ is a critical point of $\Phi_k$;
\item [4)] $\inf_{S^+}\Psi_k=\inf_{\mathcal{N}_k}\Phi_k$.
\varepsilonnd{itemize}
\varepsilono
\noindent It follows from the Ekeland Variational Principle (see \cite[Theorem 3.1]{E}) that there exists $\{z_n^k\}\sectionubset\mathcal{N}_k$ such that
\begin{equation}\lambdab{pkss4}
\Phi_k(z_n^k)\rightarrow c_\ast>0 \ \ \mbox{and}\ \ \Phi_k'(z_n^k)\rightarrow 0,\ \ \mbox{as}\ \ n\rightarrow\infty.
\varepsilone
Next we prove that $\{z_n^k\}$ is uniformly bounded in $E$. Precisely we have the following
\begin{equation}gin{lemma}\lambdab{o14}
There exists $C>0$ such that $\|z_n^k\|=\|(u_n^k,v_n^k)\|\le C$, for all $n\in\mathbb{N}$.
\varepsilonl
\begin{equation}gin{proof}
Let $z_n^k=z_n^++z_n^-$, where $z_n^+\in E^+,\,\, z_n^-\in E^-$. Noting that $z_n^k\in\mathcal{N}_k$, we have $\|z_n^+\|^2\gammae\|z_n^k\|^2/2$ for all $n\in\mathbb{N}$. Let $w_n^k=w_n^++w_n^-=z_n^k/\|z_n^k\|$, where $w_n^+\in\hat{E}(z_n^k)\sectionubset E^+,w_n^-\in E^-$ and $w_n^+=(\tilde{w}_n,\tilde{w}_n)$, then $\|w_n^+\|^2\gammae1/2$. By Lemma \ref{lk54.1}, for some $R>2\sectionqrt{c_\ast}$, we have
\begin{equation}gin{align*}
c_\ast+o_n(1)&=\Phi_k(z_n^k)=\max_{w\in\hat{E}(z_n^k)}\Phi_k(w)\gammae\Phi_k(R w_n^+)\\
&\gammae R^2/4-\int_{\mathbb R^2}F_k(R\tilde{w}_n)+G_k(R\tilde{w}_n),
\varepsilonnd{align*}
which implies
$$
\liminf_{n\rightarrow\infty}\int_{\mathbb R^2}F_k(R\tilde{w}_n)+G_k(R\tilde{w}_n)>0.
$$
By Lions' Lemma, up to translations, $\tilde{w}_n\rightarrow w\not=0$ weakly in $H^1(R^2)$ as $n\rightarrow\infty$. Assume that $w_n^k\rightarrow (u,v)$ weakly in $H^1(R^2)$ as $n\rightarrow\infty$, then $u+v=2w$. If $\|z_n^k\|\rightarrow\infty$ as $n\rightarrow\infty$, then $u_n^k(x)\rightarrow\infty$ if $u(x)\not=0$ as $n\rightarrow\infty$ and by Fatou's Lemma and \re{am2},
$$
\liminf_{n\rightarrow\infty}\int_{\mathbb R^2}\left(\frac{F_k(u_n^k)}{\|z_n^k\|^2}+\frac{G_k(v_n^k)}{\|z_n^k\|^2}\right)=+\infty,
$$
which yields $\Phi_k(z_n^k)\rightarrow-\infty$ as $n\rightarrow\infty$. This is a contradiction and therefore $\{z_n^k\}$ stays bounded in $E$.
\varepsilonp
Up to a subsequence, we may assume $z_n^k\rightharpoonup z^k$ weakly in $E$, as $n\rightarrow\infty$. It is standard to check that $\Phi_k'(z^k)=0$.
\begin{equation}gin{proposition}\lambdab{tk1}
The truncated problem \re{qk1} admits a ground state solution.
\varepsilono
\begin{equation}gin{proof}
If $z^k\not=0$, then by \re{am2} and Fatou's Lemma one has
\begin{equation}gin{align*}
c_\ast+o_n(1)&=\Phi_k(z_n^k)-\frac{1}{2}\lambdan \Phi_k'(z_n^k),z_n^k\ranglengle\\
&=\int_{\mathbb R^2}\frac{1}{2}f_k(u_n^k)u_n^k-F_k(u_n^k)+\int_{\mathbb R^2}\frac{1}{2}g_k(u_n^k)u_n^k-G_k(u_n^k)\\
&\gammae\int_{\mathbb R^2}\frac{1}{2}f_k(u^k)u^k-F_k(u^k)+\int_{\mathbb R^2}\frac{1}{2}g_k(u^k)u^k-G_k(u^k)+o_n(1)\\
&=\Phi_k(z^k)-\frac{1}{2}\lambdan \Phi_k'(z^k),z^k\ranglengle+o_n(1)\\
&=\Phi_k(z^k)\gammae c_\ast+o_n(1).
\varepsilonnd{align*}
from which $z^k$ is a ground state solution to \re{qk1}.
\noindent If $z^k=0$, we claim there exist $\nu>0$, $R_0>0$ and $\{y_n\}\sectionubset\mathbb R^2$ such that
\begin{equation}\lambdab{nonvanishing}
\lim_{n\rightarrow\infty}\int_{B_{R_0}(y_n)}(|u_n^k|^2+|v_n^k|^2)\, \mathrm{d} x\gammae\nu.
\varepsilone
Suppose the claim holds true and set $\tilde{u}_n^k(\cdotsot):=u_n^k(\cdotsot+y_n)$ and $\tilde{v}_n^k(\cdotsot):=v_n^k(\cdotsot+y_n)$, so that
\begin{equation}\lambdab{yk22}
\lim_{n\rightarrow\infty}\int_{B_{R_0}(0)}(|\tilde{u}_n^k|^2+|\tilde{v}_n^k|^2)\, \mathrm{d} x\gammae\nu,
\varepsilone
and $\Phi_k(\tilde{z}_n^k)\rightarrow c_\ast>0$ and $\Phi_k'(\tilde{z}_n^k)\rightarrow 0$, as $n\rightarrow\infty$ where $\tilde{z}_n^k=(\tilde{u}_n^k,\tilde{v}_n^k)$. Clearly $\{\tilde{z}_n^k\}$ is bounded in $E$ and up to a subsequence, by \re{yk22} we may assume that $\tilde{z}_n^k\rightarrow \tilde{z}^k\not=0$ weakly in $E$ to a ground state solution of \re{qk1}.
\noindent Hence let us prove by contradiction the claim \re{nonvanishing}. Indeed, if \re{nonvanishing} does not hold we have
$$
\lim_{n\rightarrow\infty}\sectionup_{y\in\mathbb R^2}\int_{B_R(y)}(|u_n^k|^2+|v_n^k|^2)\, \mathrm{d} x=0\ \ \mbox{for all}\ \ R>0,
$$
then by Lions's Lemma, $u_n^k\rightarrow0, v_n^k\rightarrow0$ strongly in $L^s(\mathbb R^2)$ for any $s>2$. By $(H1)$ and \re{gj} we have
$$
\int_{\mathbb R^2}(|\nabla u_n^k|+V_0|u_n^k|)\,\mathrm{d} x=\int_{\mathbb R^2}g_k(v_n^k)u_n^k\,\mathrm{d} x\rightarrow 0,\,\, n\rightarrow\infty.
$$
Namely, $u_n^k\rightarrow0$ strongly in $E$, as $n\rightarrow\infty$. It follows that
$$
\int_{\mathbb R^2}(|\nabla v_n^k|+V_0|v_n^k|)\,\mathrm{d} x=\int_{\mathbb R^2}f_k(u_n^k)v_n^k\,\mathrm{d} x\rightarrow 0,\,\, n\rightarrow\infty.
$$
Namely, $v_n^k\rightarrow0$ strongly in $E$, as $n\rightarrow\infty$. So we get $c_\ast+o_n(1)=\Phi_k(z_n^k)\rightarrow0$, as $n\rightarrow\infty$, which is a contradiction.
\varepsilonp
\noindent Denote by $\mathcal{S}_k$ the set of of ground state solutions to system \re{qk1}, then $\mathcal{S}_k\not=\varepsilonmptyset$. Similarly as above,
for any $z=(u,v)\in \mathcal{S}_k$, $u,v\in L^{\infty}(\mathbb R^2)\cap C_{loc}^{1,\gamma}(\mathbb R^2)$ for some $\gamma\in(0,1)$. Recalling that $c_\ast=c_\ast^k$,
we get $\mathcal{S}\sectionubseteq \mathcal{S}_k$. In order to prove the reverse inclusion let us recall the following results from \cite{DJJ}
\begin{equation}gin{lemma}\lambdab{l54.1}{\rm \cite{DJJ}} With the assumptions in Theorem \ref{Th2}, we have:
\begin{equation}gin{itemize}
\item [1)] for any $z\in \mathcal{N}$, $\Phi|_{\hat{E}(z)}$ admits a unique maximum point which is precisely at $z$;
\item [2)] for any $z\in E\sectionetminus E^-$, the set $\hat{E}(z)$ intersects $\mathcal{N}$ at exactly one point $\hat{m}(z)$, which is the unique globally maximum point of $\Phi|_{\hat{E}(z)}$;
\item [3)]
$$c_\ast=\inf_{z\in E\sectionetminus E^-}\max_{\Omegaega\in\hat{E}(z)}\Phi(\Omegaega).$$
\varepsilonnd{itemize}
\varepsilonl
\noindent Let $m:=\hat{m}|_{S^+}: S^+\mapsto\mathcal{N}$ and
$$
\Psi: S^+\mapsto\mathbb R, \Psi(z):=\Phi(m(z)), z\in S^+,
$$
then $\hat{m}$ is continuous and $m$ is a homeomorphism between $S^+$ and $\mathcal{N}$. As in \cite{Weth}, $m$ is invertible and the inverse is given
by $$m^{-1}(z)=\frac{z^+}{\|z\|},\,\, z=z^++z^-\in\mathcal{N},\,\, z^+\in E^+,\,\ z^-\in E^-.$$
Similar to Proposition \ref{pk5.5}, we have
\begin{equation}gin{proposition}\lambdab{pk5.6}\noindent
\begin{equation}gin{itemize}
\item [1)] $\Psi\in C^1(S^+,\mathbb R)$ and
$$
\lambdan\Psi'(z),\Omegaega\ranglengle=\|m(z)^+\|\lambdan\Phi'(m(z)),\Omegaega\ranglengle\ \ \mbox{for all}\ \ \Omegaega\in T_z(S^+);
$$
\item [2)] If $\{\Omegaega_n\}\sectionubset S^+$ is a Palais-Smale sequence for $\Psi$, then $\{m(\Omegaega_n)\}\sectionubset \mathcal{N}$ is a Palais-Smale sequence for $\Phi$. Namely, if $\Psi(\Omegaega_n)\rightarrow d$ for some $d>0$ and $\|\Psi'(\Omegaega_n)\|_\ast\rightarrow 0$ as $n\rightarrow\infty$, then $\Phi(m(\Omegaega_n))\rightarrow d$ and $\|\Phi'(m(\Omegaega_n))\|\rightarrow0$ as $n\rightarrow\infty$, where
$$
\|\Psi'(\Omegaega_n)\|_\ast=\sectionup_{\sectiontackrel{\Phihi\in T_{\Omegaega_n}(S^+)}{\|\Phihi\|=1}}\lambdan\Psi'(\Omegaega_n),\Phihi\ranglengle\ \ \mbox{and}\ \ \|\Phi'(m(\Omegaega_n))\|=\sectionup_{\sectiontackrel{\Phihi\in E}{\|\Phihi\|=1}}\lambdan\Phi'(m(\Omegaega_n)),\Phihi\ranglengle;
$$
\item [3)] $\Omegaega\in S^+$ is a critical point of $\Psi$ if and only if $m(\Omegaega)\in \mathcal{N}$ is a critical point of $\Phi$;
\item [4)] $\inf_{S^+}\Psi=\inf_{\mathcal{N}}\Phi$.
\varepsilonnd{itemize}
\varepsilono
\begin{equation}gin{proposition}\lambdab{sk}
$$\mathcal{S}_k=\mathcal{S}.$$
\varepsilono
\begin{equation}gin{proof}
For any $z^k\in \mathcal{S}_k$, we know $z^k\in \mathcal{N}_k$, by Lemma \ref{lk54.1} $\Phi_k|_{\hat{E}(z)}$ admits a unique maximum point at $z^k$ and
$$
c_\ast^k:=\inf_{z\in E\sectionetminus E^-}\max_{\Omegaega\in\hat{E}(z)}\Phi_k(\Omegaega)=\max_{\Omegaega\in\hat{E}(z^k)}\Phi_k(\Omegaega).
$$
Since $z^k\in E\sectionetminus E^-$, by Lemma \ref{l54.1} the set $\hat{E}(z^k)$ intersects $\mathcal{N}$ just at one point $\hat{m}(z^k)$, which is the unique global maximum of $\Phi|_{\hat{E}(z^k)}$. Let $\hat{m}(z^k)=(\hat{u}^k,\hat{v}^k$), then by $0\le f_k(t)\le f(t)$ and $0\le g_k(t)\le g(t)$, for $t\gammae0$ we have
\begin{equation}gin{align*}
c_\ast^k&=\max_{\Omegaega\in\hat{E}(z^k)}\Phi_k(\Omegaega)\gammae\Phi_k(\hat{m}(z^k))\\
&=\Phi(\hat{m}(z^k))+\int_{\mathbb R^2}[F(\hat{u}_k)-F_k(\hat{u}_k)]\,\mathrm{d} x+\int_{\mathbb R^2}[G(\hat{v}_k)-G_k(\hat{v}_k)]\,\mathrm{d} x\\
&=\max_{\Omegaega\in\hat{E}(z^k)}\Phi(\Omegaega)+\int_{\mathbb R^2}[F(\hat{u}_k)-F_k(\hat{u}_k)]\,\mathrm{d} x+\int_{\mathbb R^2}[G(\hat{v}_k)-G_k(\hat{v}_k)]\,\mathrm{d} x\\
&\gammae\inf_{z\in E\sectionetminus E^-}\max_{\Omegaega\in\hat{E}(z)}\Phi(\Omegaega)\gammae c_\ast,
\varepsilonnd{align*}
which implies $F(\hat{u}_k(x))\varepsilonquiv F_k(\hat{u}_k(x))$ and $G(\hat{v}_k(x))\varepsilonquiv G_k(\hat{v}_k(x))$ for all $x\in\mathbb R^2$ and
$$\max_{\Omegaega\in\hat{E}(z^k)}\Phi_k(\Omegaega)=\Phi_k(\hat{m}(z^k))=\Phi(\hat{m}(z^k))=c_\ast.$$
Then $\Psi(m^{-1}(\hat{m}(z^k))):=\Phi(\hat{m}(z^k))=c_\ast$. Notice that $m^{-1}(\hat{m}(z^k))\in S^+$. Then, by Proposition \ref{pk5.6}, $m^{-1}(\hat{m}(z^k))$ is
a minimizer of $\Psi$ on the $C^1$-manifold $S^+$. Thus
$$
\lambdan\Psi'(m^{-1}(\hat{m}(z^k))),\Omegaega\ranglengle=0\ \ \mbox{for all}\ \ \Omegaega\in T_{m^{-1}(\hat{m}(z^k))}(S^+).
$$
If follows from $3)$ of Proposition \ref{pk5.6} that $\Phi'(\hat{m}(z^k))=0$, which yields $\hat{m}(z^k)\in \mathcal{S}$. By uniqueness of the global maximum point
of $\Phi_k|_{\hat{E}(z^k)}$, we get $z^k=\hat{m}(z^k)$ and hence $z^k\in \mathcal{S}$. Therefore, $\mathcal{S}_k=\mathcal{S}$.
\varepsilonp
In the last part of this section, in the spirit of \cite{dsr} we prove that $uv>0$ in $\mathbb R^2$ for any $z=(u,v)\in \mathcal{S}_k$.
\noindent Let $h(s):=g_k^{-1}(s)$ and $H$ denote the primitive function of $h$.
By \re{gj}, for some $c,C>0$,
\begin{equation}\lambdab{gj2}
\left\{
\begin{equation}gin{array}{ll}
h(s)s\le C|s|^{(p+1)/p}\,\,\ &\mbox{for}\,\,s\in\mathbb R,\\
h(s)s\gammae s^2/2\,\,\,&\mbox{if}\,\,|s|\le g(b_0),\\
h(s)s\gammae c|s|^{(p+1)/p}\,\,\,&\mbox{if}\,\, |s|>g(b_0).
\varepsilonnd{array}
\right.
\varepsilone
and clearly the same estimates hold for $H(s)$ as well. Consider the Schr\"odinger operator $L:=-\Delta +V_0$ and the Sobolev space $W^{2,(p+1)/p}(\mathbb R^2)$ endowed with the norm
$$\interleave{u\interleave}=\left(\int_{\mathbb R^2}|Lu|^{\frac{p+1}{p}}\,\mathrm{d} x\right)^{\frac{p}{p+1}}.$$
The following embeddings hold
$$
W^{2,\frac{s+1}{s}}(\mathbb R^N)\hookrightarrow L^r(\mathbb R^N),\,\, \mbox{for any}\,\, r\gammae\frac{s+1}{s},\,s>1,\,\,\mbox{if}\,\, s(N-2)\le 2,
$$
in particular $W^{2,(p+1)/p}(\mathbb R^2)\hookrightarrow L^2(\mathbb R^2)\cap L^{p+1}(\mathbb R^2)\cap L^{q+1}(\mathbb R^2)$. For $u\in W^{2,(p+1)/p}(\mathbb R^2)$, define
$$
J_k(u)=\int_{\mathbb R^2}H(Lu)-F_k(u)\,dx
$$
then $J_k$ is of class $C^1$ and
$$
\lambdan J_k'(u),\varphi\ranglengle=\int_{\mathbb R^2}(h(Lu)L(\varphi)-f(u)\varphi)\,\mathrm{d} x,\,\, u,\varphi\in W^{2,(p+1)/p}(\mathbb R^2).
$$
\begin{equation}gin{proposition}\lambdab{equ}
$(u,v)\in E$ is a critical point of $\Phi_k$ if and only if $u$ is a critical point of $J_k$ and $v=h(Lu)$. Moreover, one has $\Phi_k(u,v)=J_k(u)$.
\varepsilono
\noindent Define
$$
c_1(\mathbb R^2)=\inf_{u\in\mathcal{N}_J}J_k(u),\,\,\,\mbox{where}\,\,\,\mathcal{N}_J:=\{u\in W^{2,(p+1)/p}(\mathbb R^2)\sectionetminus\{0\}: \lambdan J_k'(u),u\ranglengle=0\},
$$
which under our assumptions might not be well defined. We overcome this difficulty by considering an approximation via bounded domains. Precisely, for any $R>0$ let us consider the problem
\begin{equation}\lambdab{qk2} \left\{
\begin{equation}gin{array}{ll}
-\Delta u+V_0u=g_k(v)\\
-\Delta v+V_0v=f_k(u)
\varepsilonnd{array}
\right. \varepsilone
$u,v\in H_0^1(B_R(0))$ whose associated energy functional is
$$
I_R(z):=\int_{B_R(0)}(\nabla u\nabla v+V_0uv)\,\mathrm{d} x-(F_k(u)+G_k(v))\,\mathrm{d} x,
$$
where $z=(u,v)\in E_R:=H_0^1(B_R(0))\tildemes H_0^1(B_R(0))$.
\noindent We can define as above $E_R^+,E_R^-,\hat{E}_R(z)$ and
$$
\mathcal{N}_R:=\{z\in E_R\sectionetminus E_R^-:\lambdan I_R'(z),z\ranglengle=0,\,\,\lambdan I_R'(z),\Phihi\ranglengle=0\,\,\, \mbox{for all}\,\,\Phihi\in E_R^-\}.
$$
Denote by $c_\ast(B_R(0))$ the corresponding least energy associated to the energy functional $I_R$. Similar to Lemma \ref{o14}, every Palais-Smale sequence for $I_R$ is bounded in $E_R$. Then $c_\ast(B_R(0))$ is the ground state critical level associated to $I_R$. Moreover,
$$
c_\ast(B_R(0))=\inf_{z\in E_R\sectionetminus E_R^-}\max_{\Omegaega\in\hat{E}_R(z)}I_R(\Omegaega).
$$
\begin{equation}gin{remark}
If $z=(u,v)\in\mathcal{N}_R$, we have $\lambdan I_R'(z),(\varphi,-\varphi)\ranglengle=0$ for all $\varphi\in H_0^1(B_R(0))$. In general, $\lambdan I_R'(z),(\varphi,-\varphi)\ranglengle=0$ does not hold
for all $\varphi\in H^1(\mathbb R^2)$. Then, $\mathcal{N}_R$ is not a subset of $\mathcal{N}$, so it is not clear if $c_\ast(B_R(0))$ is greater than $c_\ast$.
\varepsilonr
\noindent Let $$X_R=W^{2,(p+1)/p}(B_R(0))\cap W_0^{1,(p+1)/p}(B_R(0))$$
endowed with the norm $$\interleave{u\interleave}=\left(\int_{B_R}|Lu|^{\frac{p+1}{p}}\,\mathrm{d} x\right)^{\frac{p}{p+1}}$$ and
$$
J_R(u)=\int_{B_R(0)}H(Lu)-F_k(u)\,dx,\,\,\ u\in X_R.
$$
\begin{equation}gin{proposition}
$z=(u,v)\in E_R$ is a critical point of $I_R$ if and only if $u$ is a critical point of $J_R$ and $v=h(Lu)$. Moreover, $I_R(u,v)=J_R(u)$.
\varepsilono
\noindent Let
$$
\mathcal{N}_{J_R}:=\{u\in X_R\sectionetminus\{0\}: \lambdan J_R'(u),u\ranglengle=0\},\,\, c_1(B_R(0)):=\inf_{u\in\mathcal{N}_{J_R}}J_R(u).
$$
Notice that $\mathcal{N}_{J_R}$ might not be a $C^1-$manifold, so that we next borrow some ideas of \cite{Weth} to
overcome this difficulty and prove the existence of ground states corresponding to the functional $J_R$ on $\mathcal{N}_{J_R}$ for any $R$. Then by passing to
the limit, we show that $c_1(\mathbb R^2)$ is the ground state critical value.
\begin{equation}gin{lemma}\lambdab{mountain}
For any $u\in X_R\sectionetminus\{0\}$, $J_R(tu)\rightarrow-\infty$, as $t\rightarrow+\infty$ and the set $\mathbb R^+u$ intersects
$\mathcal{N}_{J_R}$ at exactly one point denoted by $\hat{m}_R(u)$, which is the unique global maximum point of $J_R(tu)$, for $t>0$. In particular,
$\hat{m}_R(u)=1$ if and only if $u\in\mathcal{N}_{J_R}$. Moreover, there exist $a_R,b_R>0$ such that
$$
\interleave{u\interleave}\gammae a_R\,\,\mbox{for any}\,\,u\in\mathcal{N}_{J_R}\,\,\mbox{and}\,\,c_1(B_R(0))\gammae b_R.
$$
\varepsilonl
\begin{equation}gin{proof}
{\bf Step 1.} By \re{gj} and \re{gj2}, for any $u\in X_R\sectionetminus\{0\}$ and $t>0$,
$$
J_R(tu)\le Ct^{(p+1)/p}\int_{B_R(0)}|Lu|^{(p+1)/p}-\frac{q+1}{q}\begin{equation}ta t^{q+1}\int_{B_R(0)}|u|^{q+1}\rightarrow-\infty,\,\,t\rightarrow+\infty,
$$
and for any $\gamma>0$ small, there exists $c_\gamma>0$ such that
\begin{equation}gin{align*}
J_R(tu)\gammae& \frac{t^2}{2}\int_{\{|Lu|\le g(b_0)\}}|Lu|^2+ct^{(p+1)/p}\int_{\{|Lu|>g(b_0)\}}|Lu|^{(p+1)/p}\\
&-\gamma t^2\int_{B_R(0)}|u|^2-c_\gamma t^{q+1}\int_{B_R(0)}|u|^{q+1}>0,\,\,|t|\ll1,
\varepsilonnd{align*}
where
$$
\{|Lu|\le g(b_0)\}:=\{x\in B_R(0): |Lu(x)|\le g(b_0)\}.
$$
For any $u\in\mathcal{N}_{J_R}$, let $\theta(t)=J_R(tu)$, then $\theta(0)=0$ and $\theta'(1)=0$. Recalling that $g_k(s)/s$ is strictly increasing for $s>0$,
$h(s)/s$ is strictly decreasing for $s>0$. Obviously, $Lu=0$ if and only if $u=0$. Then for any $t>1$, thanks to $(H4)$, $(H6)$,
\begin{equation}gin{align*}
\theta'(t)&=\int_{B_R(0)}h(tLu)Lu-\int_{B_R(0)}f_k(tu)u\\
&=\int_{B_R(0)}h(t|Lu|)|Lu|-\int_{B_R(0)}f_k(t|u|)|u|\\
&=\int_{B_R(0)}\frac{h(t|Lu|)}{t|Lu|}t|Lu|^2-\int_{B_R(0)}\frac{f_k(t|u|)}{t|u|}t|u|^2\\
&<t\int_{B_R(0)}h(|Lu|)|Lu|-t\int_{B_R(0)}f_k(|u|)|u|\\
&=t\int_{B_R(0)}h(Lu)Lu-t\int_{B_R(0)}f_k(u)u=0.
\varepsilonnd{align*}
Similarly, $\theta'(t)>0$ for $t<1$. Namely, $J_R(u)=\max_{t\gammae0}J_R(tu)$. Similarly, for any $u\in X_R\sectionetminus\{0\}$, $J_R(tu)\rightarrow-\infty$ as $t\rightarrow+\infty$ and the set $\mathbb R^+u$ intersects $\mathcal{N}_{J_R}$ at exactly one point, which is the unique globally maximum point of $J_R(tu)$ for $t>0$.
\vskip .25inkip0.1in
{\bf Step 2.} We prove that there exists $a_R>0$ such that
$$
\interleave{u\interleave}\gammae a_R\,\,\mbox{for any}\,\,u\in\mathcal{N}_{J_R}.
$$
For any $u\in X_R\sectionetminus\{0\}$, by \re{gj2} one has
\begin{equation}gin{align}\lambdab{biao1}
\int_{B_R(0)}h(Lu)Lu&\gammae\frac{1}{2}\int_{\{|Lu|\le g(b_0)\}}|Lu|^2+c\int_{\{|Lu|>g(b_0)\}}|Lu|^{(p+1)/p}\nonumber\\
&\gammae\frac{1}{2}|B_R(0)|^{\frac{1-p}{1+p}}\left(\int_{\{|Lu|\le g(b_0)\}}|Lu|^{(p+1)/p}\right)^{2p/(p+1)}\\
&\ \ \ +c\int_{\{|Lu|>g(b_0)\}}|Lu|^{(p+1)/p}.\nonumber
\varepsilonnd{align}
Moreover, by $(H1)$, for any small $\gamma>0$, there exist $c_\gamma>0$ and $C>0$ (independent of $\gamma$) such that
\begin{equation}gin{align}\lambdab{biao2}
\int_{B_R(0)}f_k(u)u\le\int_{B_R(0)}\gamma u^2+c_\gamma |u|^{q+1}\le C\interleave{u\interleave}^2(\gamma+c_\gamma\interleave{u\interleave}^{q-1})
\varepsilonnd{align}
Here we used the embedding of $X_R$ into $L^r(B_R(0))$ for $r=2$ and $r=q+1$. By choosing
$$\gamma=2^{-\frac{4p+2}{p+1}}|B_R(0)|^{\frac{1-p}{1+p}}C^{-1},$$
and for any $u\in\mathcal{N}_{J_R}$, if $\interleave{u\interleave}^{q-1}\le\gamma c_\gamma^{-1}$, by \re{biao1} and \re{biao2},
\begin{equation}gin{align*}
&\frac{1}{4}|B_R(0)|^{\frac{1-p}{1+p}}\left(\int_{\{|Lu|\le g(b_0)\}}|Lu|^{(p+1)/p}\right)^{2p/(p+1)}+c\int_{\{|Lu|>g(b_0)\}}|Lu|^{(p+1)/p}\\
&\le C\gamma2^{2p/(p+1)}\left(\int_{\{|Lu|>g(b_0)\}}|Lu|^{(p+1)/p}\right)^{2p/(p+1)}.
\varepsilonnd{align*}
Since $u\not=0$, we have $\int_{\{|Lu|>g(b_0)\}}|Lu|^{(p+1)/p}>0$ and then
$$
\int_{\{|Lu|>g(b_0)\}}|Lu|^{(p+1)/p}\gammae\left(\frac{c}{C\gamma2^{2p/(p+1)}}\right)^{\frac{p+1}{p-1}}>0.
$$
So that for any $u\in\mathcal{N}_{J_R}$ the following holds
$$
\interleave{u\interleave}\gammae\min\left\{(\gamma c_\gamma^{-1})^{\frac{1}{q-1}},\left(\frac{c}{C\gamma2^{2p/(p+1)}}\right)^{\frac{p}{p-1}}\right\}:=a_R>0.
$$
\vskip .25inkip0.1in
{\bf Step 3.} We prove that there exists $b_R>0$ such that $c_1(B_R(0))\gammae b_R.$ Obviously, $c_1(B_R(0))\gammae0$. Assume by contradiction that there exists $\{u_n\}\sectionubset\mathcal{N}_{J_R}$ such that $J_R(u_n)\rightarrow0$, as $n\rightarrow\infty$. We claim that $\{u_n\}$ is bounded in $X_R$. Indeed, if not we may assume $\interleave{u_n\interleave}\rightarrow\infty$, as $n\rightarrow\infty$. Let $v_n=u_n/\interleave{u_n\interleave}$ and assume that $v_n\rightharpoonup v$ weakly in $X_R$. If $v=0$, then by compactness of the embedding of $X_R$ into $L^r(B_R(0))$ for $r=2$ and $r=q+1$, we get $\int_{B_R(0)}F_k(v_n)\rightarrow0$, as $n\rightarrow\infty$. Then by Step 1,
\begin{equation}gin{align*}
J_R(u_n)=\max_{t\gammae0}J_R(tu_n)\gammae J_R(v_n)=\int_{B_R(0)}H(Lv_n)+o_n(1).
\varepsilonnd{align*}
Namely, $\int_{B_R(0)}H(Lv_n)=o_n(1)$. On the other hand, similar to \re{biao1},
\begin{equation}gin{align*}
\int_{B_R(0)}H(Lv_n)&\gammae\frac{1}{2}|B_R(0)|^{\frac{1-p}{1+p}}\left(\int_{\{|Lv_n|\le g(b_0)\}}|Lv_n|^{(p+1)/p}\right)^{2p/(p+1)}\\
&\ \ \ +c\int_{\{|Lv_n|>g(b_0)\}}|Lv_n|^{(p+1)/p}.
\varepsilonnd{align*}
It follows that $v_n\rightarrow0$ strongly in $X_R$, which contradicts the fact $\interleave{v_n\interleave}=1$. So $v\not=0$ and by \re{am2}, \re{gj2} and Fatou's Lemma,
$$
o_n(1)=\frac{J_R(u_n)}{\interleave{u_n\interleave}^{\frac{p+1}{p}}}\le C-\int_{B_R(0)}\frac{F_k(u_n)}{|u_n|^{(p+1)/p}}|v_n|^{(p+1)/p}\rightarrow-\infty.
$$
This is a contradiction. Hence, $\{u_n\}$ is bounded in $X_R$. We may assume, up to a subsequence, $u_n\rightharpoonup u$ weakly in $X_R$ and strongly in $L^2(B_R(0))$. Noting that $h(t)/t$ is strictly decreasing for $t>0$, we have $0<h(t)t\le2H(t)$ for all $t\not=0$. Then by $(H2)$,
\begin{equation}gin{align*}
o_n(1)&=J_R(u_n)-\frac{1}{2}\lambdan J_R'(u_n),u_n\ranglengle\\
&=\int_{B_R(0)}H(Lu_n)-\frac{1}{2}h(Lu_n)Lu_n+\frac{1}{2}\int_{B_R(0)}f_k(u_n)u_n-2F_k(u_n)\\
&\gammae\frac{1}{2}\int_{B_R(0)}f_k(u_n)u_n-2F_k(u_n)\gammae\frac{\theta-2}{2}\int_{\{x\in B_R(0): |u_n|\le a_0\}}F(u_n)\\
&\rightarrow\frac{\theta-2}{2}\int_{\{x\in B_R(0): |u|\le a_0\}}F(u),\,\,\mbox{as}\,\,n\rightarrow\infty.
\varepsilonnd{align*}
It follows that
$$
\int_{\{x\in B_R(0): |u|\le a_0\}}F(u)=0.
$$
Since $u\in X_R$, from elliptic regularity we get $u\in C^{0,2/(p+1)}(\overlineerline{B_R(0)})$, which yields $u=0$. Analogously we get $\int_{B_R(0)}F_k(u_n)\rightarrow0$, as $n\rightarrow\infty$ and
\begin{equation}gin{align*}
\int_{B_R(0)}H(Lu_n)=J_R(u_n)+o_n(1)=o_n(1).
\varepsilonnd{align*}
Similar to \re{biao1},
\begin{equation}gin{align*}
\int_{B_R(0)}H(Lu_n)&\gammae\frac{1}{2}|B_R(0)|^{\frac{1-p}{1+p}}\left(\int_{\{|Lu_n|\le g(b_0)\}}|Lu_n|^{(p+1)/p}\right)^{2p/(p+1)}\\
&\ \ \ +c\int_{\{|Lu_n|>g(b_0)\}}|Lu_n|^{(p+1)/p}.
\varepsilonnd{align*}
Thus $u_n\rightarrow0$ strongly in $X_R$, which contradicts the fact $\interleave{u\interleave}\gammae a_R$ for all $u\in\mathcal{N}_{J_R}$.
\varepsilonp
\noindent Define
$$
\hat{m}_R: u\in X_R\sectionetminus\{0\}\mapsto\hat{m}_R(u)\in\mathbb R^+u\cap\mathcal{N}_{J_R}.
$$
Similar as in \cite{Szulkin}, we have the following
\begin{equation}gin{lemma}\lambdab{l2.4}
There exists $\delta>0$ such that $\interleave{u\interleave}\gammae\delta$ for all $u\in\mathcal{N}_{J_R}$. In particular,
$$
\interleave{\hat{m}_R(u)\interleave}\gammae\delta,\ \ \ \mbox{for all}\ \ u\in X_R\sectionetminus\{0\}.
$$
Moreover, for each compact subset $\mathcal{W}\sectionubset X_R\sectionetminus\{0\}$, there exists a constant $C_{\mathcal{W}}>0$ such that
$$
\interleave{\hat{m}_R(u)\interleave}\le C_{\mathcal{W}},\ \ \ \mbox{for all}\ \ u\in\mathcal{W}.
$$
\varepsilonl
\begin{equation}gin{proof}
By \re{gj2}, for any $u\in\mathcal{N}_{J_R}$, we have
$$
b_1\le J_R(u)\le\int_{B_R(0)}H(Lu)\le C\interleave{u\interleave}^{p/(p+1)}.
$$
Thus, there exists $\delta>0$ such that $\interleave{u\interleave}\gammae\delta$ for any $u\in\mathcal{N}_{J_R}$. Moreover, since $\hat{m}_R(u)=\hat{m}_R(u/\interleave{u\interleave})$ for any $u\not=0$, without loss generality,
we may assume $\mathcal{W}\sectionubset S_R:=\{u\in X_R: \interleave{u\interleave}=1\}$. In the following, we claim that there exists $C_{\mathcal{W}}>0$ such that
\begin{equation}\lambdab{claim}
\hbox{$J_R\le 0$ on $\mathbb R^+u\sectionetminus B_{C_{\mathcal{W}}}(0)$, for all $u\in\mathcal{W}$,}
\varepsilone
where $B_{C_{\mathcal{W}}}(0)=\{v\in X_R: \interleave{v\interleave}\le C_{\mathcal{W}}\}$.
If the claim \re{claim} is true, then noting that $J_R(\hat{m}_R(u))\gammae b_1>0$ for all $0\not=u\in X_R$, we have
$\|\hat{m}_R(u)\|=\|\hat{m}_R(u/\interleave{u\interleave})\|\le C_{\mathcal{W}}$ for any $u\in \mathcal{W}$.
\vskip .25inkip0.1in
\noindent So let us prove \re{claim}. Assume by contradiction that there exists $\{u_n\}\sectionubset\mathcal{W}\sectionubset S_R$ with $u_n\rightarrow u$ strongly
in $\mathcal{W}$ and $\Omegaega_n\in\mathbb R^+u_n$ with $\Omegaega_n=t_nu_n$, $t_n\rightarrow\infty$ such that $J_R(\Omegaega_n)\gammae0$, as $n\rightarrow\infty$.
For $n$ large enough, by \re{gj2} one has
\begin{equation}gin{align}\lambdab{y3}
0\le\frac{J_R(\Omegaega_n)}{\interleave{\Omegaega_n\interleave}^{(p+1)/p}}&\le C
-\int_{B_R(0)}\frac{F_k(t_nu_n)}{|t_nu_n|^{(p+1)/p}}|u_n|^{(p+1)/p}.
\varepsilonnd{align}
Noting that $u_n\indexrightarrow{a.e.}u\not=0$, it follows from Fatou's Lemma and \re{y3} that $$\frac{J_R(\Omegaega_n)}{\interleave{\Omegaega_n\interleave}^{(p+1)/p}}\rightarrow-\infty$$
as $n\rightarrow\infty$, which is a contradiction.
\varepsilonp
\noindent Let $m_R:=\hat{m}_R|_{S_R}: S_R\longrightarrow\mathcal{N}_{J_R}$ and
$$
K: S_R\longrightarrow \mathbb R,\thetauad K(u):=J_R(m_R(u)), u\in S_R,
$$
then $\hat{m}_R$ is continuous and $m_R$ is a homeomorphism between $S_R$ and $\mathcal{N}_{J_R}$.
\begin{equation}gin{proposition}\lambdab{pkr5.6}\noindent
\begin{equation}gin{itemize}
\item [1)] $K\in C^1(S_R,\mathbb R)$ and
$
\lambdan K'(u),\Omegaega\ranglengle=\|m_R(u)\|\lambdan J_R'(m_R(u)),\Omegaega\ranglengle$, for all $\Omegaega\in T_u(S_R)$;
\item [2)] If $\{\Omegaega_n\}\sectionubset S_R$ is a Palais-Smale sequence for $K$, then $\{m_R(\Omegaega_n)\}\sectionubset \mathcal{N}_{J_R}$
is a Palais-Smale sequence for $J_R$. Namely, if $K(\Omegaega_n)\rightarrow d$ for some $d>0$ and $\|K'(\Omegaega_n)\|_\ast\rightarrow 0$, as $n\rightarrow\infty$,
then $J_R(m_R(\Omegaega_n))\rightarrow d$ and $\|J_R'(m_R(\Omegaega_n))\|\rightarrow0$, as $n\rightarrow\infty$, where
$$
\|K'(\Omegaega_n)\|_\ast=\sectionup_{\sectiontackrel{\Phihi\in T_{\Omegaega_n}(S_R)}{\interleave{\Phihi\interleave}=1}}\lambdan K'(\Omegaega_n),\Phihi\ranglengle\ \
\mbox{and}\ \ \|J_R'(m_R(\Omegaega_n))\|=\sectionup_{\sectiontackrel{\Phihi\in X_R}{\interleave{\Phihi\interleave}=1}}\lambdan J_R'(m_R(\Omegaega_n)),\Phihi\ranglengle.
$$
\item [3)] $\Omegaega\in S_R$ is a critical point of $K$ if and only if $m_R(\Omegaega)\in \mathcal{N}_{J_R}$ is a critical point of $J_R$;
\item [4)] $\inf_{S_R}K=\inf_{\mathcal{N}_{J_R}}J_R$.
\varepsilonnd{itemize}
\varepsilono
\begin{equation}gin{lemma}
For any $R>0$, $c_1(B_R(0))\gammae c_\ast(B_R(0)).$
\varepsilonl
\begin{equation}gin{proof} Observing that $S_R$ is a $C^1$-manifold in $X_R$, by virtue of the Ekeland variational principle (see \cite[Theorem 3.1]{E}), there exists $\{u_n\}\sectionubset\mathcal{N}_{J_R}$
such that
\begin{equation}\lambdab{pkrss4}
J_R(u_n)\rightarrow c_1(B_R(0))>0 \ \ \mbox{and}\ \ J_R'(u_n)\rightarrow 0,\ \ \mbox{as}\ \ n\rightarrow\infty.
\varepsilone
It is standard to show that $\{u_n\}$ is bounded in $X_R$, thus up to a subsequence, $u_n\rightarrow u$ weakly in $X_R$, as $n\rightarrow\infty$. By means of the compactness
of $X_R\hookrightarrow L^{r}(B_R(0))$ for any $r\gammae(p+1)/p$, $u_n\rightarrow u$ strongly in $L^{q+1}(B_R(0))$. Then
\begin{equation}gin{align}\lambdab{nonzero}
\liminf_{n\rightarrow\infty}\int_{B_R(0)}h(Lu_n)Lu_n=\liminf_{n\rightarrow\infty}\int_{B_R(0)}f(u_n)u_n=\int_{B_R(0)}f(u)u.
\varepsilonnd{align}
By \re{gj2}, we also have
$$
\int_{B_R(0)}h(Lu_n)Lu_n\gammae\frac{1}{2}\int_{|Lu_n|\le g(b_0)}|Lu_n|^2+c\int_{|Lu_n|>g(b_0)}|Lu_n|^{(p+1)/p}.
$$
We claim that $u\not\varepsilonquiv0$. Indeed, otherwise by \re{nonzero} we get
$$
\lim_{n\rightarrow\infty}\int_{|Lu_n|\le g(b_0)}|Lu_n|^2=0\,\,\mbox{and}\,\,\ \lim_{n\rightarrow\infty}\int_{|Lu_n|>g(b_0)}|Lu_n|^{(p+1)/p}=0.
$$
Hence
\begin{equation}gin{align*}
\lim_{n\rightarrow\infty}&\int_{B_R(0)}|Lu_n|^{(p+1)/p}\le\lim_{n\rightarrow\infty}\int_{|Lu_n|>g(b_0)}|Lu_n|^{(p+1)/p}\\
&+\lim_{n\rightarrow\infty}\left(\int_{|Lu_n|\le g(b_0)}|Lu_n|^2\right)^{(p+1)/(2p)}|B_R(0)|^{(p-1)/(2p)}
\rightarrow0&
\varepsilonnd{align*}
as $n\to\infty$, which implies $J_R(u_n)\rightarrow0$, as $n\rightarrow\infty$. This is a contradiction.
\noindent Next let $u_0=\hat{m}_R(u)u$ and $v_n=\hat{m}_R(u)u_n$. By $(H7)$, $H$ is convex. Therefore
$$
\liminf_{n\rightarrow\infty}\int_{B_R(0)}H(Lv_n)\gammae\int_{B_R(0)}H(Lu_0)\,\,\mbox{and}\,\, \lim_{n\rightarrow\infty}\int_{B_R(0)}F(v_n)=\int_{B_R(0)}F(u_0).
$$
As $u_0\in\mathcal{N}_{J_R}$ one the one hand on has
\begin{equation}gin{align*}
\liminf_{n\rightarrow\infty}J_R(v_n)\gammae\int_{B_R(0)}H(Lu_0)-\int_{B_R(0)}F(u_0)\gammae c_1(B_R(0)).
\varepsilonnd{align*}
On the other hand, it follows from Lemma \ref{mountain} and $u_n\in\mathcal{N}_{J_R}$ the following
$$
\liminf_{n\rightarrow\infty}J_R(v_n)\le\liminf_{n\rightarrow\infty}\max_{t\gammae0}J_R(tu_n)=\liminf_{n\rightarrow\infty}J_R(u_n)=c_1(B_R(0)).
$$
and in turn $J_R(u_0)=c_1(B_R(0))$. By Proposition \ref{pkrss4} $J_R'(u_0)=0$ and by Proposition \ref{equ},
$(u_0,v_0)$ is a nontrivial critical point of $I_R$, namely $(u_0,v_0)\in\mathcal{N}_R$ where $v_0=h(Lu_0)$. Finally,
$$
c_\ast(B_R(0))\le I_R(u_0,v_0)=J_R(u_0)=c_1(B_R(0)).
$$
\varepsilonp
Similar as in \cite{dsr}, one can prove the reversed inequality to get the following
\begin{equation}gin{lemma}\lambdab{crr}
For any $R>0$,
$$c_\ast(B_R(0))=c_1(B_R(0)).$$
\varepsilonl
\begin{equation}gin{lemma}\lambdab{signr}
Let $(u_R,v_R)$ be any ground state for the functional $I_R$, then $u_Rv_R>0$ in $B_R(0)$.
\varepsilonl
\begin{equation}gin{proof}
Recalling that $\mathcal{S}=\mathcal{S}_k$, it is enough to
prove $uv>0$ in $\mathbb R^2$ for any $(u,v)\in\mathcal{S}_k$. For any $R>0$ and any ground state $(u_R,v_R)$ for the functional $I_R$,
by Lemma \ref{crr} and Proposition \ref{equ}, $u_R$ is a ground state for the functional $J_R$. Let $\Omegaega=L^{-1}(|Lu_R|)$, then $\Omegaega>0$
and $\Omegaega\gammae|u_R|$. Moreover, $\lambdan J_R'(t\Omegaega),\Omegaega\ranglengle=0$, where $t=\hat{m}_R(\Omegaega)>0$. On the other hand,
\begin{equation}gin{align*}
c_1(B_R(0))&\le J_R(t\Omegaega)=J_R(tu_R)+\int_{B_R(0)}F_k(t|u_R|)-F_k(t\Omegaega)\\
&\le c_1(B_R(0))+\int_{B_R(0)}F_k(t|u_R|)-F_k(t\Omegaega).
\varepsilonnd{align*}
So that
$\int_{B_R(0)}F_k(t|u_R|)-F_k(t\Omegaega)\gammae0$. It follows from $(H7)$ that $|u_R|=\Omegaega>0$. If $u_R>0$ in $B_R(0)$, then by means of the maximum principle, $v_R>0$
in $B_R(0)$ and $u_Rv_R>0$ in $B_R(0)$. Similarly, if $u_R<0$ in $B_R(0)$, $u_Rv_R>0$ in $B_R(0)$.
\varepsilonp
\noindent As a consequence of Lemma \ref{crr} and Lemma \ref{signr}, see also \cite[Remark 4.11]{dsr}, we have
\begin{equation}gin{lemma}\lambdab{compare}
The map $R\mapsto c_\ast(B_R(0))$ is decreasing for $R>0$.
\varepsilonl
\begin{equation}gin{lemma}\lambdab{cast}
For any $R>0$, we have $c_\ast(B_R(0))\gammae c_\ast(\mathbb R^2)$.
\varepsilonl
\begin{equation}gin{proof}
For any $R>0$, let $z_R=(u_R,v_R)$ be a ground state solution of $I_R$. Namely, $I_R(z_R)=c_\ast(B_R(0))$ and $I_R'(z_R)=0$.
We extend $z_R\in E_R$ to $z_R\in E$ by zero extension outside $B_R(0)$. Then, as in Lemma \ref{o14}, $\{z_R\}$ turns out to be
bounded in $E$. Up to a subsequence, we may assume $z_R\rightharpoonup z_0$ weakly in $E$, as $R\rightarrow\infty$, then $z_0=(u_0,v_0)\in E$ is a nonnegative
solution to \re{qk1}, namely $\Phi_k'(z_0)=0$.
If $z_0\not=0$, by $(H2)$ and Fatou's Lemma, we have for any $r\le R$,
\begin{equation}gin{align*}
c_\ast(B_r(0))&\gammae\lim_{R\rightarrow\infty}c_\ast(B_R(0))=\lim_{R\rightarrow\infty}\left(I_R(z_R)-\frac{1}{2}\lambdan I_R'(z_R),z_R\ranglengle\right)\\
&=\lim_{R\rightarrow\infty}\left(\int_{B_R(0)}\frac{1}{2}f_k(u_R)u_R-F_k(u_R)+\int_{B_R(0)}\frac{1}{2}g_k(v_R)v_R-G_k(v_R)\right)\\
&\gammae\int_{\mathbb R^2}\frac{1}{2}f_k(u_0)u_0-F_k(u_0)+\int_{\mathbb R^2}\frac{1}{2}g_k(v_0)v_0-G_k(v_0)\\
&=\Phi_k(z_0)-\frac{1}{2}\lambdan \Phi_k'(z_0),z_0\ranglengle=\Phi_k(z_0)\gammae c_\ast(\mathbb R^2).
\varepsilonnd{align*}
If $z_0=0$, then $\{z_R\}$ satisfies one of the following alternatives:
\begin{equation}gin{itemize}
\item [(1)] ({\it Vanishing})
$$
\lim_{R\rightarrow\infty}\sectionup_{y\in\mathbb R^2}\int_{B_r(y)}(u_R^2+v_R^2)\, \mathrm{d} x=0,\ \ \mbox{for all}\ \ r>0;
$$
\item [(2)] ({\it Nonvanishing}) there exist $\nu>0$, $r_0>0$ and $\{y_R\}\sectionubset\mathbb R^2$ such that
$$
\lim_{R\rightarrow\infty}\int_{B_{r_0}(y_R)}(u_R^2+v_R^2)\, \mathrm{d} x\gammae\nu.
$$
\varepsilonnd{itemize}
As in Proposition \ref{nv}{\it Vanishing} does not occur. So let $\tilde{u}_R:=u_R(\cdotsot+y_R)$ and $\tilde{v}_R:=v_R(\cdotsot+y_R)$,
then $\tilde{z}_R=(\tilde{u}_R,\tilde{v}_R)$ is bounded in $H^1(\mathbb R^2)$ and $\tilde{z}_R\rightharpoonup \tilde{z}_0\not=0$ weakly in $H^1(\mathbb R^2)$.
Moreover, let $\tilde{z}_0=(\tilde{u}_0,\tilde{v}_0)$, we know $\tilde{u}_0,\tilde{v}_0$ are nonnegative. Obviously, $|y_R|\le R+r_0$. Assume that, up to a rotation,
$y_R/|y_R|\rightarrow (0,-1)\in\mathbb R^2$ and $(\tilde{u}_0,\tilde{v}_0)\in H_0^1(\Omega)\tildemes H_0^1(\Omega)$ satisfies
\begin{equation}\lambdab{qkj2} \begin{equation}gin{cases}
-\Delta \tilde{u}_0+V_0\tilde{u}_0=g_k(\tilde{v}_0)\\
-\Delta \tilde{v}_0+V_0\tilde{v}_0=f_k(\tilde{u}_0)
\varepsilonnd{cases}
\varepsilone
where $\Omega=\mathbb R^2$ or $\Omega=\{(x_1,x_2)\in\mathbb R^2:x_2>d\}$, where $d:=\liminf_{R\rightarrow\infty}\mbox{dist}(y_R,\Phiartial B_R(0))$. If $\Omega=\mathbb R^2$ the proof follows. If $\Omega=\{(x_1,x_2)\in\mathbb R^2:x_2>d\}$, then by the Hopf Lemma,
$\Phiartial\tilde{u}_0/\Phiartial \varepsilonta<0$ and $\Phiartial\tilde{v}_0/\Phiartial \varepsilonta<0$ on $\Phiartial\Omega$, where $\varepsilonta$ is the outward pointing unit normal to $\Phiartial\Omega$. Finally from the Pohozaev type identity proved in \cite[Proposition 1.2]{Lions}(see also \cite[Lemma 3.1]{Pisto}) one actually has
$$
\int_{\Phiartial\Omega}\frac{\Phiartial\tilde{u}_0}{\Phiartial n}\frac{\Phiartial\tilde{v}_0}{\Phiartial n}=0,
$$
which is a contradiction.
\varepsilonp
\begin{equation}gin{proof}[Proof of Theorem \ref{sign} completed]
Thanks to Lemma \ref{cast} any ground state solution $(u,v)$ to \re{q11} does not change sign.
Assume $u>0$ and $v>0$ in $\mathbb R^2$. Setting
$$
f_1(u,v)=g(v)-V_0u\,\,\,\mbox{and}\,\,\ f_2(u,v)=f(u)-V_0v,
$$
as a consequence of \cite[Theorem 1]{Sirakov1} and $(H1)$, $(u,v)$ is radially symmetric and strictly decreasing with respect to the same point, which we denote by $x_0$. Clearly, $\Delta u(x_0)\le0$ and $\Delta v(x_0)\le0$. To complete the proof of Theorem \ref{sign}, we next prove that actually $\Delta u(x_0)<0$ and $\Delta v(x_0)<0$. Indeed, if not, without loss of generality we may assume $\Delta u(x_0)=0$ and then $g(v(x_0))=V_0u(x_0)$. Let $u_1=u-u(x_0)$, then $u_1(x)\le0$ in $\mathbb R^2$ and
\begin{equation}gin{align*}
&-\Delta u_1=-\Delta u=g(v)-V_0u\\
&\le g(v(x_0))-V_0u(x_0)-V_0u_1\\
&=-V_0u_1.
\varepsilonnd{align*}
Namely, $-\Delta u_1+V_0 u_1\le 0$ in $\mathbb R^2$. Noting that $u_1(0)=0$, by the maximum principle, $u_1\varepsilonquiv0$ in $\mathbb R^2$, which is a contradiction. Therefore, $\Delta u(x_0)<0$. Similarly, one has $\Delta v(x_0)<0$ as well. {Finally, by Proposition \ref{vanishing_R}, $u(x+x_z), v(x+x_z)\rightarrow 0$, as $|x|\rightarrow\infty$ uniformly for any $z=(u,v)\in \mathcal{S}$. Since $u,v$ do not change the sign, using the maximum principle, we conclude that there exist $C,c>0$, independent of $z=(u,v)\in \mathcal{S}$, such that $$|D^{\alpha}u(x)|+|D^{\alpha}v(x)|\le C\varepsilonxp(-c|x-x_0|),\,x\in \mathbb R^2,\,|\alpha|=0,1$$ }
\varepsilonp
\section{Proof of Theorem \ref{Th1}}\lambdabel{semiclassical_s}
\renewcommand{5.\arabic{equation}}{5.\arabic{equation}}
\sectionubsection{Functional setting} By setting $u(x)=\varphi(\varepsilon x),v(x)=\Phisi(\varepsilon x)$ and $V_\varepsilon(x)=V(\varepsilon x)$, \re{q1} is equivalent to
\begin{equation}\lambdab{q51} \left\{
\begin{equation}gin{array}{ll}
-\Delta u+V_\varepsilon(x)u=g(v)\\
-\Delta v+V_\varepsilon(x)v=f(u)
\varepsilonnd{array}
\right. \varepsilone
We next consider \re{q51}. Let $H_\varepsilon$ be the completion of $C_0^\infty(\mathbb R^2)$ with respect to the inner product
$$
(u,v)_{1,\varepsilon}:=\int_{\mathbb R^2}\nabla u\nabla v+V_\varepsilon(x)uv$$
and the norm
$$\|u\|_{1,\varepsilon}^2:=(u,u)_{1,\varepsilon}, u,v\in H_\varepsilon.
$$
Let $E_\varepsilon:=H_\varepsilon\tildemes H_\varepsilon$ with the inner product
$$
(z_1,z_2)_\varepsilon:=(u_1,u_2)_{1,\varepsilon}+(v_1,v_2)_{1,\varepsilon},\ \ z_i=(u_i,v_i)\in E_\varepsilon,\: i=1,2.
$$
and the norm $\|z\|_\varepsilon^2=\|(u,v)\|_\varepsilon^2=\|u\|_{1,\varepsilon}^2+\|v\|_{1,\varepsilon}^2.$ We have the orthogonal space decomposition $E_\varepsilon=E_\varepsilon^+\opluslus E_\varepsilon^-$, where
$$
E_\varepsilon^+:=\{(u,u)\,|\, u\in H_\varepsilon\}\ \ \ \mbox{and}\ \ \ E_\varepsilon^-:=\{(u,-u)\,|\,u\in H_\varepsilon\}.
$$
For each $z=(u,v)\in E_\varepsilon$, $$z=z^++z^-=((u+v)/2,(u+v)/2)+((u-v)/2,(v-u)/2).$$
Weak solutions of \re{q51} are critical points of the associated energy functional
$$
\Phi_\varepsilon(z):=\int_{\mathbb R^2}\nabla u\nabla v+V_\varepsilon(x)uv-I(z),\ \ z=(u,v)\in E_\varepsilon,
$$
where $I(z)=\int_{\mathbb R^2}F(u)+G(v)$. Then $\Phi_\varepsilon\in C^1(E,\mathbb R)$
and
$$
\lambdan\Phi_\varepsilon'(z),w\ranglengle=\int_{\mathbb R^2}(\nabla u\nabla w_2+\nabla v\nabla w_1+V_\varepsilon(x)uw_2+V_\varepsilon(x)vw_1)-\int_{\mathbb R^2}(f(u)w_1+g(v)w_2),
$$
for all $z=(u,v),w=(w_1,w_2)\in E_\varepsilon$. Moreover, $\Phi_\varepsilon$ can be rewritten as follows
\begin{equation}\lambdab{y51}
\Phi_\varepsilon(z):=\frac{1}{2}\|z^+\|_\varepsilon^2-\frac{1}{2}\|z^-\|_\varepsilon^2-I(z).
\varepsilone
We know that if $z\in E_\varepsilon$ is a nontrivial critical point of $\Phi_\varepsilon$, then $z\in E_\varepsilon\sectionetminus E_\varepsilon^-$. In the spirit of \cite{Szulkin}, we define the generalized Nehari Manifold
$$
\mathcal{N}_\varepsilon:=\{z\in E_\varepsilon\sectionetminus E_\varepsilon^-: \lambdan \Phi_\varepsilon'(z),z\ranglengle_\varepsilon=0, \lambdan \Phi_\varepsilon'(z),\varphi\ranglengle_\varepsilon=0\ \mbox{for all}\ \ \varphi\in E_\varepsilon^-\}.
$$
Let
$$
c_\varepsilon:=\inf_{z\in\mathcal{N}_\varepsilon}\Phi_\varepsilon(z),
$$
then $c_\varepsilon$ is the least energy for system \re{q51}, the so-called ground state level.
\noindent For $z\in E_\varepsilon\sectionetminus E_\varepsilon^-$, set
$$
\hat{E}_\varepsilon(z)=E_\varepsilon^-\opluslus\mathbb R^+z=E_\varepsilon^-\opluslus\mathbb R^+z^+,
$$
where $\mathbb R^+z:=\{tz: t\gammae0\}$. From \cite{DJJ,Szulkin,Weth} we have the following properties of $\mathcal{N}_\varepsilon$, which will be used later.
\begin{equation}gin{lemma}\lambdab{l5.1} Under the assumptions in Theorem \ref{Th1}, we have:
\begin{equation}gin{itemize}
\item [1)] for any $z\in \mathcal{N}_\varepsilon$, $\Phi_\varepsilon|_{\hat{E}_\varepsilon(z)}$ admits a unique maximum point which occurs precisely at $z$;
\item [2)] for any $z\in E_\varepsilon\sectionetminus E_\varepsilon^-$, the set $\hat{E}_\varepsilon(z)$ intersects $\mathcal{N}_\varepsilon$ at exactly one point $\hat{m}_\varepsilon(z)$, which is the unique global maximum point of $\Phi_\varepsilon|_{\hat{E}_\varepsilon(z)}$.
\varepsilonnd{itemize}
\varepsilonl
\sectionubsection{Lower and upper bounds for $c_\varepsilon$}
\begin{equation}gin{proposition}\lambdab{co51} There exists $c_0>0$ (independent of $\varepsilon$) such that for $\varepsilon>0$ sufficiently small,
$$c_\varepsilon=\inf_{z\in E_\varepsilon\sectionetminus E_\varepsilon^-}\max_{\Omegaega\in\hat{E}_\varepsilon(z)}\Phi_\varepsilon(\Omegaega)\in(c_0,4\Phii/\alpha_0).$$
\varepsilono
\begin{equation}gin{proof}
The min-max characterization is standard and we refer to \cite{DJJ}. Here we are concerned with estimating form below and above the critical level $C_\varepsilon$.
\noindent {\it Lower bound.} On one hand, for any $z\in E_\varepsilon$, we know $\hat{E}_\varepsilon(z)=\hat{E}_\varepsilon(z^+)$. Then, for any $a>0$
\begin{equation}gin{align*}
c_\varepsilon&=\inf_{z\in E_\varepsilon\sectionetminus E_\varepsilon^-}\max_{\Omegaega\in\hat{E}_\varepsilon(z)}\Phi_\varepsilon(\Omegaega)=\inf_{z\in E_\varepsilon^+\sectionetminus\{0\}}\max_{\Omegaega\in\hat{E}_\varepsilon(z)}\Phi_\varepsilon(\Omegaega)\\
&=\inf_{z\in S_{a,\varepsilon}^+}\max_{\Omegaega\in\hat{E}_\varepsilon(z)}\Phi_\varepsilon(\Omegaega)\gammae\inf_{z\in S_{a,\varepsilon}^+}\max_{\Omegaega\in\mathbb R^+ z}\Phi_\varepsilon(\Omegaega),
\varepsilonnd{align*}
where
$
S_{a,\varepsilon}^+:=\{z\in E_\varepsilon^+: \|z\|_\varepsilon=a\}.
$
On the other hand, recalling that $f,g$ have critical growth with critical exponent $\alpha_0$, by $(H1)$, for some $\alpha'>\alpha_0$, there exists $C>0$ such that
\begin{equation}\lambdab{fg}
F(t)\le \frac{1}{4}V_0|t|^2+C |t|^3\left(e^{\alpha't^2}-1\right),\, G(t)\le \frac{1}{4}V_0|t|^2+C |t|^3\left(e^{\alpha't^2}-1\right), t\in\mathbb R.
\varepsilone
By the Pohozaev-Trudinger-Moser inequality, there exists $a>0$ sufficiently small such that
$$
\int_{\mathbb R^2}\left(e^{2\alpha'u^2}-1\right)\le1,
$$
for any $u\in H^1(\mathbb R^2)$ with $\|u\|_{H^1}\le a$. Then, for any $z=(u,u)\in S_{a,\varepsilon}^+$,
{\alphalowdisplaybreaks
\begin{equation}gin{align*}
&\max_{\Omegaega\in\mathbb R^+ z}\Phi_\varepsilon(\Omegaega)\gammae\Phi_\varepsilon(z)=\int_{\mathbb R^2}|\nabla u|^2+V_\varepsilon(x)u^2-\int_{\mathbb R^2}F(u)+G(u)\\
&\gammae\|u\|_{1,\varepsilon}^2-V_0/2\int_\Omega u^2-2C\int_{\mathbb R^2} |u|^3\left(e^{\alpha'u^2}-1\right)\\
&\gammae C'\|u\|_{1,\varepsilon}^2-2C\left(\int_\Omega u^6\right)^{1/2}\gammae\|u\|_{1,\varepsilon}^2(C'-2C C_6^3\|u\|_{1,\varepsilon}),
\varepsilonnd{align*}
}
where $C'=\min\{1,V_0\}/2$ and $C_6$ is the Sobolev's constant of the embedding $H^1(\mathbb R^2)\hookrightarrow L^6(\mathbb R^2)$. Thus, taking $a>0$ fixed but small enough, for any $z=(u,u)\in S_{a,\varepsilon}^+$, we have $\|u\|_{1,\varepsilon}^2=a^2/2$ and
$$
\max_{\Omegaega\in\mathbb R^+ z}\Phi_\varepsilon(\Omegaega)\gammae\|u\|_{1,\varepsilon}^2\left[C'-2CC_6^3\|u\|_{1,\varepsilon}\right]\gammae a^2/6>0.
$$
Thus, for any $\varepsilon>0$, $c_\varepsilon\gammae c_0=a^2/6$.
\noindent {\it Upper bound.} By $(H5)$ and $V(0)=V_0$, for some fixed $r>0$ and $\varepsilon_0>0$ such that
\begin{equation}gin{equation}
\lambdabel{ChoiceOfBeta}
\begin{equation}ta_0>\frac{4e^{\frac{r^2}{2}\max_{|x|\le\varepsilon r}V(x)}}{\alpha_0 r^2},\,\,\varepsilon\in(0,\varepsilon_0),
\varepsilonnd{equation}
we consider the following so-called Moser sequence
\begin{equation}gin{equation}
\Omegaega_k(x) = \frac{1}{\sectionqrt{2\Phii}}\left\{
\begin{equation}gin{array}{ll}
(\log k)^{1/2}, & \mid x \mid \leq r/k;\\
\frac{ \log \frac{r}{\mid x \mid} }{(\log k)^{1/2}}, & r/k \leq
\mid x \mid \leq r; \\
0, & \mid x \mid \gammaeq r.
\varepsilonnd{array}
\right.
\varepsilonnd{equation}
Then, one easily checks that $\|\nabla\Omegaega_k\|_2=1$ and $\|\Omegaega_k\|_2^2=r^2/(4\log{k})+o(r^2/\log{k})$. Let $d_k(r):= r^2/4 + o_k(1)$ where $o_k(1) \to 0$, as $k \to + \infty$ and $\tilde{\Omegaega}_{k,\varepsilon}:=\Omegaega_k/\|\Omegaega_k\|_{1,\varepsilon}$, then $\|\tilde{\Omegaega}_{k,\varepsilon}\|_{1,\varepsilon}=1$ and for $k$ large enough,
\begin{equation}gin{equation}
\lambdabel{EstOfwn}
\tilde{\Omegaega}_{k,\varepsilon}^2(x)\gammaeq \frac 1{2 \Phii} \: \Bigl( \log k - \: d_{k,\varepsilon}(r) \Bigr) \thetauad \text{for } |x| \leq \frac r k,
\varepsilonnd{equation}
where $d_{k,\varepsilon}(r)=d_k(r)\max_{|x|\le\varepsilon r}V(x)\gammae V_0d_k(r)$.
\noindent Suppose by contradiction that for some fixed $\varepsilon\in(0,\varepsilon_0)$ and for all $ k $,
$$
\sectionup_{z\in\hat{E}((\tilde{\Omegaega}_{k,\varepsilon},\tilde{\Omegaega}_{k,\varepsilon}))}\Phi_\varepsilon(z) \gammaeq 4\Phii/\alphapha_0.
$$
Then $\Phi_\varepsilon(\hat{m}((\tilde{\Omegaega}_{k,\varepsilon},\tilde{\Omegaega}_{k,\varepsilon})))\gammaeq 4\Phii/\alphapha_0$ for all $ k $, where $\hat{m}((\tilde{\Omegaega}_{k,\varepsilon},\tilde{\Omegaega}_{k,\varepsilon}))\in\mathcal{N}_\varepsilon$ and
$$
\hat{m}((\tilde{\Omegaega}_{k,\varepsilon},\tilde{\Omegaega}_{k,\varepsilon})) = \tau_k(\tilde{\Omegaega}_{k,\varepsilon},\tilde{\Omegaega}_{k,\varepsilon}) + (u_k,-u_k)\in\hat{E}((\tilde{\Omegaega}_{k,\varepsilon},\tilde{\Omegaega}_{k,\varepsilon})).
$$
Namely,
\begin{equation}gin{equation}\lambdabel{Itatiaia}
\tau^2_{k} - \int_{\mathbb R^2}(|\nablabla u_{k}| ^2+V_\varepsilon(x)u_k^2) -
\int_{\mathbb R^2} [F(\tau_{k}\tilde{\Omegaega}_{k,\varepsilon} + u_{k})+G(\tau_{k}\tilde{\Omegaega}_{k,\varepsilon}-u_{k})] \gammaeq 4\Phii/\alphapha_0
\varepsilonnd{equation}
and
\begin{equation}gin{equation}\lambdabel{Itatuba}
\tau^2_{k} -
\int_{\mathbb R^2}(|\nablabla u_{k}| ^2+V_\varepsilon(x)u_k^2) =
\int_{\mathbb R^2} [f(\tau_{k}\tilde{\Omegaega}_{k,\varepsilon} + u_{k})(\tau_{k}\tilde{\Omegaega}_{k,\varepsilon} + u_{k})
+ g(\tau_{k}\tilde{\Omegaega}_{k,\varepsilon} -u_{k})(\tau_{k}\tilde{\Omegaega}_{k,\varepsilon} -u_{k})].
\varepsilonnd{equation}
\noindent {\it Claim:} $\lim_{k\rightarrow\infty}\tau_k=4 \Phii / \alphapha_0 $. Indeed, from (\ref{Itatiaia}), we get $\tau_{k}^2 \gammaeq 4 \Phii / \alphapha_0 $.
From $(H5)$, given $ \rho>0 $, there exists $ R_\rho $ such
that
\[
tf(t) \gammaeq (\begin{equation}ta_0 - \rho) e^{\alphapha_0 t^2}
\mbox{ for all } t \gammaeq R_\rho .
\]
and the same holds true also for $tg(t)$.
Noting that $$\tau_k \tilde{\Omegaega}_{k,\varepsilon}=\frac{\tau_k}{\|\Omegaega_k\|_\varepsilon}\frac{\sectionqrt{\log{k}}}{\sectionqrt{2\Phii}}\rightarrow+\infty,\ \ \mbox{as}\ \ k\rightarrow\infty,\ \ x\in B_{r/k},$$
by choosing $k$ sufficiently large, we get
$\max{\{\tau_{k}\tilde{\Omegaega}_{k,\varepsilon} +u_{k}, \; \tau_{k}\tilde{\Omegaega}_{k,\varepsilon} -u_{k}\}} \gammaeq
R_\rho $ for all $ x \in B_{r/k} $. So that by \re{EstOfwn},
\begin{equation}gin{align}\lambdab{bound}
\tau_k^2&\gammae\int_{B_{r/k}} [f(\tau_{k}\tilde{\Omegaega}_{k,\varepsilon} + u_{k})(\tau_{k}\tilde{\Omegaega}_{k,\varepsilon} + u_{k})
+ g(\tau_{k}\tilde{\Omegaega}_{k,\varepsilon} -u_{k})(\tau_{k}\tilde{\Omegaega}_{k,\varepsilon} -u_{k})]\nonumber\\
&\gammaeq (\begin{equation}ta_0- \rho) \int_{B_{r/k}}e^{\alpha_0 (\tau_k\tilde{\Omegaega}_{k,\varepsilon})^2} \, \mathrm{d} x\nonumber\\
&\gammae\Phii r^2(\begin{equation}ta_0 - \rho)\: e^{\frac{\alpha_0}{2\Phii}\tau_k^2[\log k - d_{k,\varepsilon}(r)]-2\log{k}},
\varepsilonnd{align}
which implies that $\{\tau_k\}$ is bounded. By \re{bound}, as a consequence of the boundedness of $\{\tau_k\}$, we know $\limsup_{k\rightarrow\infty}\tau_k^2\le4 \Phii / \alphapha_0$. In fact, if not we have
$$
\limsup_{k\rightarrow\infty}e^{\frac{\alpha_0}{2\Phii}\tau_k^2[\log k - d_{k,\varepsilon}(r)]-2\log{k}}=\infty,
$$
which is a contradiction, and the claim is proved.
\noindent As $\Omegaega_k \to 0 $ a.e. in $\mathbb R^2$, by the Lebesgue dominated convergence theorem
$$\int_{\{x\in B_r:\tau_k\tilde{\Omegaega}_{k,\varepsilon} < R_\rho\}} \min\{f(\tau_k\tilde{\Omegaega}_{k,\varepsilon}) \tau_k\tilde{\Omegaega}_{k,\varepsilon}, g(\tau_k\tilde{\Omegaega}_{k,\varepsilon}) \tau_k\tilde{\Omegaega}_{k,\varepsilon}\}\, \mathrm{d} x \rightarrow0,\thetauad k\rightarrow\infty$$
and $$\int_{\{x\in B_r:\tau_k\tilde{\Omegaega}_{k,\varepsilon} < R_\rho\}} e^{\alpha_0 (\tau_k\tilde{\Omegaega}_{k,\varepsilon})^2} \, \mathrm{d} x\rightarrow\Phii r^2.$$
Then, from \varepsilonqref{Itatuba} and $(H4)$ we have obtain
\begin{equation}gin{align*}
\tau_k^2 & \gammae\int_{B_r} [f(\tau_k\tilde{\Omegaega}_{k,\varepsilon}+u_k)(\tau_k\tilde{\Omegaega}_{k,\varepsilon}+u_k)+g(\tau_k\tilde{\Omegaega}_{k,\varepsilon}-u_k)(\tau_k\tilde{\Omegaega}_{k,\varepsilon}-u_k)] \, \mathrm{d} x\\
& \gammaeq (\begin{equation}ta_0- \rho) \int_{B_r} e^{\alpha_0 (\tau_k\tilde{\Omegaega}_{k,\varepsilon})^2} \, \mathrm{d} x - (\begin{equation}ta_0- \rho) \int_{\{x\in B_r:\tau_k\tilde{\Omegaega}_{k,\varepsilon} < R_\rho\}} e^{\alpha_0 (\tau_k\tilde{\Omegaega}_{k,\varepsilon})^2} \, \mathrm{d} x\\
&\ \ \ \ +\int_{\{x\in B_r:\tau_k\tilde{\Omegaega}_{k,\varepsilon} < R_\rho\}}\min\{f(\tau_k\tilde{\Omegaega}_{k,\varepsilon}) \tau_k\tilde{\Omegaega}_{k,\varepsilon}, g(\tau_k\tilde{\Omegaega}_{k,\varepsilon}) \tau_k\tilde{\Omegaega}_{k,\varepsilon}\}\, \mathrm{d} x\\ &=(\begin{equation}ta_0- \rho)\Big[\int_{B_r} e^{\alpha_0 (\tau_k\tilde{\Omegaega}_{k,\varepsilon})^2} \, \mathrm{d} x-\Phii r^2\Big].
\varepsilonnd{align*}
In the following, we estimate the term $\int_{B_r} e^{\alpha_0 (\tau_k\tilde{\Omegaega}_{k,\varepsilon})^2} \, \mathrm{d} x$. Observe first that from \varepsilonqref{EstOfwn} one has
$$\int_{B_{r/k}} e^{\alpha_0 (\tau_k\tilde{\Omegaega}_{k,\varepsilon})^2} \, \mathrm{d} x \gammaeq \Phii r^2 \: e^{\frac{\alpha_0}{2\Phii}\tau_k^2[\log{k}- d_{k,\varepsilon}(r)]-2\log{k}}.$$ Noting that $\tau_k^2\gammae4\Phii/\alpha_0$ and $\tau_k^2\rightarrow4\Phii/\alpha_0$, we have
$$\liminf_{k\rightarrow\infty}\int_{B_{r/k}} e^{\alpha_0 (\tau_k\tilde{\Omegaega}_{k,\varepsilon})^2}\, \mathrm{d} x \gammaeq \Phii r^2e^{-\max_{|x|\le\varepsilon r}V(x)r^2/2}.$$
Secondly, by using the change of variable $s=r e^{-\|\Omegaega_k\|_\varepsilon \sectionqrt{\log k} \: t}$, one has
\begin{equation}gin{equation*}
\begin{equation}gin{split}
\int_{B_r \sectionetminus B_{r/k}} e^{4 \Phii (\tilde{\Omegaega}_{k,\varepsilon})^2} \, \mathrm{d} x & = 2 \Phii r^2 \|\Omegaega_k\|_\varepsilon \sectionqrt{\log k} \: \int_{0}^{\frac{\sectionqrt{\log k}}{\|\Omegaega_k\|_\varepsilon}} \: e^{2(\:t^2 - \|\Omegaega_k\|_\varepsilon \sectionqrt{\log k} \: t \:)} \, \mathrm{d} t
\\
& \gammaeq 2 \Phii r^2 \|\Omegaega_k\|_\varepsilon \sectionqrt{\log k} \: \int_{0}^{\frac{\sectionqrt{\log k}}{\|\Omegaega_k\|_\varepsilon}} \: e^{-2\|\Omegaega_k\|_\varepsilon \sectionqrt{\log k} \: t} \, \mathrm{d} t \\
& = \Phii r^2 \begin{equation}gin{exercise}gl( 1 - e^{-2 \log k}\begin{equation}gin{exercise}gr).
\varepsilonnd{split}
\varepsilonnd{equation*}
Thus $$\liminf_{k\rightarrow\infty}\int_{B_r} e^{\alpha_0(\tau_k \tilde{\Omegaega}_{k,\varepsilon})^2} \, \mathrm{d} x \gammaeq \Phii r^2(e^{-\max_{|x|\le\varepsilon r}V(x)r^2/2}+1),$$
which implies
$$4\Phii/\alpha_0= \lim_{k \to + \infty} \tau_k^2 \gammaeq (\begin{equation}ta_0 - \rho) \Phii r^2e^{-\max_{|x|\le\varepsilon r}V(x)r^2/2}.$$
As $\rho$ is arbitrary, we have
$$\begin{equation}ta_0 \leq \frac{4e^{\frac{r^2}{2}\max_{|x|\le\varepsilon r}V(x)}}{\alpha_0 r^2},$$
which contradicts \varepsilonqref{ChoiceOfBeta}. Therefore, $c_\varepsilon<4\Phii/\alpha_0$ for $\varepsilon\in(0,\varepsilon_0)$.
\varepsilonp
\sectionubsection{Existence of solutions to system \re{q51}}\
\noindent Let us define
$$
\hat{m}_\varepsilon: z\in E_\varepsilon\sectionetminus E_\varepsilon^-\mapsto\hat{m}_\varepsilon(z)\in\hat{E}_\varepsilon(z)\cap\mathcal{N}_\varepsilon.
$$
\begin{equation}gin{lemma}\lambdab{l5.4}
There exists $\delta>0$ (independent of $\varepsilon$) such that $\|z^+\|_\varepsilon\gammae\delta$ for all $z\in\mathcal{N}_\varepsilon$. In particular,
$$
\|\hat{m}_\varepsilon(z)^+\|_\varepsilon\gammae\delta\ \ \ \mbox{for all}\ \ z\in E_\varepsilon\sectionetminus E_\varepsilon^-.
$$
Moreover, for each compact subset $\mathcal{W}\sectionubset E_\varepsilon\sectionetminus E_\varepsilon^-$, there exists a constant $C_{\mathcal{W},\varepsilon}>0$ such that
$$
\|\hat{m}_\varepsilon(z)\|_\varepsilon\le C_{\mathcal{W},\varepsilon}\ \ \ \mbox{for all}\ \ z\in\mathcal{W}.
$$
\varepsilonl
\noindent Let
$$
S_\varepsilon^+:=\{z\in E_\varepsilon^+: \|z\|_\varepsilon=1\},
$$
then $S_\varepsilon^+$ is a $C^1$-submanifold of $E_\varepsilon^+$ and the tangent manifold of $S_\varepsilon^+$ at $z\in S_\varepsilon^+$ is
$$
T_z(S_\varepsilon^+)=\{\Omegaega\in E_\varepsilon^+: (\Omegaega,z)_\varepsilon=0\}.
$$
Let
$$
m_\varepsilon:=\hat{m}_\varepsilon|_{S_\varepsilon^+}: S_\varepsilon^+\longrightarrow \mathcal{N}_\varepsilon,
$$
then by Lemma \ref{l5.4}, $\hat{m}_\varepsilon$ is continuous and $m_\varepsilon$ is a homeomorphism between $S_\varepsilon^+$ and $\mathcal{N}_\varepsilon$. Define
$$
\Psi_\varepsilon: S_\varepsilon^+\longrightarrow\mathbb R,\thetauad \Psi_\varepsilon(z):=\Phi_\varepsilon(m_\varepsilon(z)),\: z\in S_\varepsilon^+,
$$
then, as a consequence of \cite[Corollary 4.3]{Weth}, for any fixed $\varepsilon>0$, we have the following
\begin{equation}gin{proposition}\lambdab{p5.5}\noindent
\begin{equation}gin{itemize}
\item [1)] $\Psi_\varepsilon\in C^1(S_\varepsilon^+,\mathbb R)$ and
$$
\lambdan\Psi_\varepsilon'(z),\Omegaega\ranglengle_\varepsilon=\|m_\varepsilon(z)^+\|\lambdan\Phi_\varepsilon'(m_\varepsilon(z)),\Omegaega\ranglengle_\varepsilon\ \ \mbox{for all}\ \ \Omegaega\in T_z(S_\varepsilon^+);
$$
\item [2)] If $\{\Omegaega_n\}\sectionubset S_\varepsilon^+$ is a Palais-Smale sequence for $\Psi_\varepsilon$, then $\{m_\varepsilon(\Omegaega_n)\}\sectionubset \mathcal{N}_\varepsilon$ is a Palais-Smale sequence for $\Phi_\varepsilon$. Namely, if $\Psi_\varepsilon(\Omegaega_n)\rightarrow d$ for some $d>0$ and $\|\Psi_\varepsilon'(\Omegaega_n)\|_\ast\rightarrow 0$, as $n\rightarrow\infty$, then $\Phi_\varepsilon(m_\varepsilon(\Omegaega_n))\rightarrow d$ and $\|\Phi_\varepsilon'(m_\varepsilon(\Omegaega_n))\|\rightarrow0$, as $n\rightarrow\infty$, where
$$
\|\Psi_\varepsilon'(\Omegaega_n)\|_\ast=\sectionup_{\sectiontackrel{\Phihi\in T_{\Omegaega_n}(S_\varepsilon^+)}{\|\Phihi\|_\varepsilon=1}}\lambdan\Psi_\varepsilon'(\Omegaega_n),\Phihi\ranglengle_\varepsilon\ \ \mbox{and}\ \ \|\Phi_\varepsilon'(m_\varepsilon(\Omegaega_n))\|=\sectionup_{\sectiontackrel{\Phihi\in E_\varepsilon}{\|\Phihi\|_\varepsilon=1}}\lambdan\Phi_\varepsilon'(m_\varepsilon(\Omegaega_n)),\Phihi\ranglengle_\varepsilon;
$$
\item [3)] $\Omegaega\in S_\varepsilon^+$ is a critical point of $\Psi_\varepsilon$ if and only if $m_\varepsilon(\Omegaega)\in \mathcal{N}_\varepsilon$ is a critical point of $\Phi_\varepsilon$;
\item [4)] $\inf_{S_\varepsilon^+}\Psi_\varepsilon=\inf_{\mathcal{N}_\varepsilon}\Phi_\varepsilon$.
\varepsilonnd{itemize}
\varepsilono
\noindent Since $S_\varepsilon^+$ is a regular $C^1$-submanifold of $E_\varepsilon^+$, by Proposition \ref{co51} and Proposition \ref{p5.5}, it follows from the Ekeland variational principle (see \cite[Theorem 3.1]{E}) that there exists $\{\Omegaega_n\}\sectionubset S_\varepsilon^+$ such that
$$
\Psi_\varepsilon(w_n)\rightarrow c_\varepsilon>0 \ \ \mbox{and}\ \ \|\Psi_\varepsilon'(\Omegaega_n)\|_\ast\rightarrow 0,\ \ \mbox{as}\ \ n\rightarrow\infty.
$$
Let $z_n=m(\Omegaega_n)\in\mathcal{N}_\varepsilon$, then
\begin{equation}
\Phi_\varepsilon(z_n)\rightarrow c_\varepsilon>0 \ \ \mbox{and}\ \ \|\Phi_\varepsilon'(z_n)\|\rightarrow 0,\ \ \mbox{as}\ \ n\rightarrow\infty.
\varepsilone
Similar as in \cite{DJJ}, one has the following two propositions:
\begin{equation}gin{proposition}\lambdab{o54} There exists $C$ (independent of $\varepsilon$) such that for all $\varepsilon>0$ and $n\in \mathbb{N}$:
\begin{equation}gin{itemize}
\item [1)] $\|z_n\|_\varepsilon=\|(u_n,v_n)\|_\varepsilon\le C(1+c_\varepsilon)$;
\item [2)] $\int_{\mathbb R^2}f(u_n)u_n\, \mathrm{d} x\le C(1+c_\varepsilon)$ and $\int_{\mathbb R^2}g(v_n)v_n\, \mathrm{d} x\le C(1+c_\varepsilon)$ ;
\item [3)] $\int_{\mathbb R^2}F(u_n)\, \mathrm{d} x\le C(1+c_\varepsilon)$ and $\int_{\mathbb R^2}G(v_n)\, \mathrm{d} x\le C(1+c_\varepsilon)$.
\varepsilonnd{itemize}
\varepsilono
\noindent Up to a subsequence, there exists $z_\varepsilon=(u_\varepsilon,v_\varepsilon)\in E_\varepsilon$ such that $z_n\rightharpoonup z_\varepsilon$ in $E_\varepsilon$ and $z_n\indexrightarrow{a.e.}z_\varepsilon$ in $\mathbb R^2$, as $n\rightarrow\infty$, which is actually a weak solution to \re{q51}, precisely we have
\begin{equation}gin{proposition}\lambdab{o51}
The weak limit $z_\varepsilon$ is a critical point of $\Phi_\varepsilon$.
\varepsilono
\sectionubsection{Asymptotic behavior of $c_\varepsilon$}
By Proposition \ref{o51}, it suffices to show $z_\varepsilon\not\varepsilonquiv0$. For this purpose, in the following, we investigate the relation between $c_\ast$ and $c_\varepsilon$, where $c_\ast, c_\varepsilon$ are the corresponding least energies to System \re{q11} and \re{q51} respectively.
\begin{equation}gin{lemma}\lambdab{l5.9}
With the assumptions of Theorem \ref{Th1}, we have $$\limsup_{\varepsilon\rightarrow0}c_\varepsilon\le c_\ast.$$
\varepsilonl
\begin{equation}gin{proof}
By Theorem \ref{a}, there exists $z=(u,v)\in\mathcal{N}$ such that $$c_\ast=\max_{\Omegaega\in\hat{E}(z)}\Phi(\Omegaega)=\max_{\Omegaega\in\hat{E}(z^+)}\Phi(\Omegaega).$$ Noting that $z\in E\sectionetminus E^-$, we know for any $\varepsilon>0$, $z\in E_\varepsilon\sectionetminus E_\varepsilon^-$. Then, by Lemma \ref{l5.1}, for any $\varepsilon>0$
\begin{equation}gin{align*}
c_\varepsilon\le\max_{\Omegaega\in\hat{E}_\varepsilon(z)}\Phi_\varepsilon(\Omegaega)=\Phi_\varepsilon(\hat{m}_\varepsilon(z)).
\varepsilonnd{align*}
Recalling that $\hat{m}_\varepsilon(z)\in\hat{E}_\varepsilon(z)\cap\mathcal{N}_\varepsilon$, there exist $s_\varepsilon\gammae0$, $t_\varepsilon\in\mathbb R$ and $\varphi_\varepsilon\in H_\varepsilon$, $\|\varphi_\varepsilon\|_\varepsilon=1$ such that $\hat{m}_\varepsilon(z)=s_\varepsilon z+t_\varepsilon(\varphi_\varepsilon,-\varphi_\varepsilon)$.\vskip .25inkip0.1in
{\bf Step 1.} We borrow some ideas from \cite{Ramos1} to prove that $t_\varepsilon,s_\varepsilon$ are bounded for $\varepsilon>0$ sufficiently small. We proceed by contradiction and distinguish between two cases.
\noindent {\it Case I.} Both $s_\varepsilon,t_\varepsilon$ are unbounded for $\varepsilon$ small. If $|t_\varepsilon|/s_\varepsilon\rightarrow\infty$, as $\varepsilon\rightarrow0$, then
\begin{equation}gin{align*}
c_\varepsilon&\le\Phi_\varepsilon(s_\varepsilon z+t_\varepsilon(\varphi_\varepsilon,-\varphi_\varepsilon))\\
&=s_\varepsilon^2\|z\|_\varepsilon^2-t_\varepsilon^2+t_\varepsilon s_\varepsilon O(1)-\int_{\mathbb R^2}F(s_\varepsilon u+t_\varepsilon\varphi_\varepsilon)+G(s_\varepsilon v-t_\varepsilon\varphi_\varepsilon)\\
&\le s_\varepsilon^2\|z\|_\varepsilon^2-t_\varepsilon^2+t_\varepsilon s_\varepsilon O(1)=s_\varepsilon^2(O(1)-1)\rightarrow-\infty,
\varepsilonnd{align*}
which contradict the fact $c_\varepsilon\gammae c_0>0$. If $|t_\varepsilon|/s_\varepsilon\rightarrow0$, as $\varepsilon\rightarrow0$, then
\begin{equation}gin{align*}
c_\varepsilon&\le s_\varepsilon^2\|z\|_\varepsilon^2-t_\varepsilon^2+t_\varepsilon s_\varepsilon O(1)-\int_{\mathbb R^2}F(s_\varepsilon u+t_\varepsilon\varphi_\varepsilon)+G(s_\varepsilon v-t_\varepsilon\varphi_\varepsilon)\\
&=s_\varepsilon^3\left(o(1)-\int_{\mathbb R^2}\frac{F(s_\varepsilon u+t_\varepsilon\varphi_\varepsilon)}{s_\varepsilon^3}+\frac{G(s_\varepsilon v-t_\varepsilon\varphi_\varepsilon)}{s_\varepsilon^3}\right).
\varepsilonnd{align*}
Since $c_\varepsilon\gammae c_0>0$, as $\varepsilon\rightarrow0$ we have
$$
\int_{\mathbb R^2}\frac{F(s_\varepsilon u+t_\varepsilon\varphi_\varepsilon)}{s_\varepsilon^3}\rightarrow0,\,\,\frac{G(s_\varepsilon v-t_\varepsilon\varphi_\varepsilon)}{s_\varepsilon^3}\rightarrow0.
$$
Recalling that $f$ has Moser critical growth at infinity, there exists $C>0$ such that $|F(t)|\gammae C|t|^3$ for $|t|\gammae1$. Let $A_\varepsilon:=\{x\in\mathbb R^2: |s_\varepsilon u(x)+t_\varepsilon\varphi_\varepsilon(x)|\gammae1\}$, then
$$
\int_{A_\varepsilon}\frac{F(s_\varepsilon u+t_\varepsilon\varphi_\varepsilon)}{s_\varepsilon^3}\gammae C\int_{A_\varepsilon}\left|u(x)+\frac{t_\varepsilon}{s_\varepsilon}\varphi_\varepsilon(x)\right|^3,
$$
where the left hand side vanishes as $k\rightarrow\infty$, which yields $\lim_{\varepsilon\rightarrow0}\int_{A_\varepsilon}|u(x)|^3=0$. At the same time,
\begin{equation}gin{align*}
&\int_{\mathbb R^2\sectionetminus A_\varepsilon}|u(x)|^3\le\int_{\mathbb R^2\sectionetminus A_\varepsilon}u^2(x)\left(\frac{1}{s_\varepsilon}+\frac{|t_\varepsilon|}{s_\varepsilon}|\varphi_\varepsilon|\right)\\
&\le\frac{1}{s_\varepsilon}\int_{\mathbb R^2}u^2(x)+\frac{|t_\varepsilon|}{s_\varepsilon}\left(\int_{\mathbb R^2}u^4(x)\right)^{1/2}\left(\int_{\mathbb R^2}\varphi^2_\varepsilon(x)\right)^{1/2}\rightarrow0,\hbox{ as $\varepsilon\rightarrow0$.}
\varepsilonnd{align*}
Hence $\int_{\mathbb R^2}|u|^3=0$ and in turn $u\varepsilonquiv0$. Similarly, $v\varepsilonquiv0$. So that we get $c_\ast=0$, which is a contradiction. If $|t_\varepsilon|/s_\varepsilon\rightarrow l>0$, as $\varepsilon\rightarrow0$, then following the same line as above,
$$\int_{A_\varepsilon}\left|u(x)+\frac{t_\varepsilon}{s_\varepsilon}\varphi_\varepsilon(x)\right|^3\rightarrow0.$$
Moreover,
\begin{equation}gin{align*}
\int_{\mathbb R^2\sectionetminus A_\varepsilon}\left|u(x)+\frac{t_\varepsilon}{s_\varepsilon}\varphi_\varepsilon(x)\right|^3\le\frac{1}{s_\varepsilon}\int_{\mathbb R^2\sectionetminus A_\varepsilon}\left|u(x)+\frac{t_\varepsilon}{s_\varepsilon}\varphi_\varepsilon(x)\right|^2\rightarrow0,\hbox{ as $\varepsilon\rightarrow0$.}
\varepsilonnd{align*}
Then
$$
\int_{\mathbb R^2}\left|u(x)+\frac{t_\varepsilon}{s_\varepsilon}\varphi_\varepsilon(x)\right|^3\rightarrow0,\,\,\varepsilon\rightarrow0
$$
and analogously
$$
\int_{\mathbb R^2}\left|v(x)-\frac{t_\varepsilon}{s_\varepsilon}\varphi_\varepsilon(x)\right|^3\rightarrow0,\,\,\varepsilon\rightarrow0.
$$
So we get $\int_{\mathbb R^2}|u+v|^3=0$, that is $u=-v$. This implies $z=(u,v)\in E^-$ which contradicts the fact $z\in\mathcal{N}$.
\noindent {\it Case II.} Just one between $s_\varepsilon$ and $t_\varepsilon$ stays bounded for $\varepsilon$ small. If $|t_\varepsilon|/s_\varepsilon\rightarrow\infty$, as $\varepsilon\rightarrow0$, then $|t_\varepsilon|\rightarrow\infty$, as $\varepsilon\rightarrow0$ and as above one has
\begin{equation}gin{align*}
c_\varepsilon\le s_\varepsilon^2\|z\|_\varepsilon^2-t_\varepsilon^2+t_\varepsilon s_\varepsilon O(1)=t_\varepsilon^2(O(1)-1)\rightarrow-\infty,
\varepsilonnd{align*}
which contradicts the fact $c_\varepsilon\gammae c_0>0$. If $|t_\varepsilon|/s_\varepsilon$ is bounded for $\varepsilon$ small, then $s_\varepsilon\rightarrow\infty$ and $|t_\varepsilon|/s_\varepsilon\rightarrow0$, as $\varepsilon\rightarrow0$. Reasoning as in {\it Case I}, we get $u=v=0$ and $c_\ast=0$, which is again a contradiction.
\vskip .25inkip0.1in
{\bf Step 2.} Recall that
\begin{equation}gin{align*}
c_\varepsilon\le\max_{\Omegaega\in\hat{E}_\varepsilon(z)}\Phi_\varepsilon(\Omegaega)=\Phi_\varepsilon(\hat{m}_\varepsilon(z))
\varepsilonnd{align*}
where $\hat{m}_\varepsilon(z)=s_\varepsilon z+t_\varepsilon(\varphi_\varepsilon,-\varphi_\varepsilon)$. Then
\begin{equation}gin{align*}
c_\varepsilon\le&\Phi_\varepsilon(s_\varepsilon z+t_\varepsilon(\varphi_\varepsilon,-\varphi_\varepsilon))=\Phi(s_\varepsilon z+t_\varepsilon(\varphi_\varepsilon,-\varphi_\varepsilon))\\
&+\int_{\mathbb R^2}(V_\varepsilon(x)-1)(s_\varepsilon u+t_\varepsilon\varphi_\varepsilon)(s_\varepsilon v-t_\varepsilon\varphi_\varepsilon)\\
\le&\max_{\Omegaega\in\hat{E}(z)}\Phi(\Omegaega)+I_\varepsilon=c_\ast+I_\varepsilon,
\varepsilonnd{align*}
where $I_\varepsilon:=\int_{\mathbb R^2}(V_\varepsilon(x)-1)(s_\varepsilon u+t_\varepsilon\varphi_\varepsilon)(s_\varepsilon v-t_\varepsilon\varphi_\varepsilon)$. Since $0\in\mathcal{M}$, by Lebesgue's dominated convergence theorem and Step 1, we have
\begin{equation}gin{align*}
I_\varepsilon&=\int_{\mathbb R^2}(V_\varepsilon(x)-1)[s_\varepsilon^2uv-t_\varepsilon^2\varphi_\varepsilon^2+t_\varepsilon s_\varepsilon (v-u)\varphi_\varepsilon]\\
&\le\int_{\mathbb R^2}(V_\varepsilon(x)-1)[s_\varepsilon^2uv+t_\varepsilon s_\varepsilon (v-u)\varphi_\varepsilon]\\
&\le s_\varepsilon^2\int_{\mathbb R^2}(V_\varepsilon(x)-1)uv+|t_\varepsilon s_\varepsilon|\left(\int_{\mathbb R^2}|V_\varepsilon(x)-1|^2(v-u)^2\right)^{1/2}\rightarrow0\,\,\hbox{ as $\varepsilon\rightarrow0$.}
\varepsilonnd{align*}
Therefore, $\limsup_{\varepsilon\rightarrow0}c_\varepsilon\le c_\ast$.
\varepsilonp
\sectionubsection{Existence of ground state solutions for \re{q51}}
For any $\lambda>0$, let us consider the following problem in $\mathbb{R}^2$
\begin{equation}\lambdab{q110} \left\{
\begin{equation}gin{array}{ll}
-\Delta u+\lambda u=g(v)\\
-\Delta v+\lambda v=f(u)
\varepsilonnd{array}
\right. \varepsilone
whose corresponding energy functional is
$$
\Phi_\lambda(z):=\int_{\mathbb R^2}\nabla u\nabla v+\lambda uv-I(z),\ \ z=(u,v)\in E.
$$
As above one can define the generalized Nehari Manifold $\mathcal{N}_\lambda$ and the least energy
$$
c_\lambda:=\inf_{z\in\mathcal{N}_\lambda}\Phi_\lambda(z).
$$
Moreover, with the same assumptions of Theorem \ref{Th2}, if $c_\lambda\in(0,4\Phii/\alpha_0)$ for some $\lambda>0$, then there exists $z_\lambda=(u_\lambda,v_\lambda)\in\mathcal{N}_\lambda$ such that $\Phi_\lambda(z_\lambda)=c_\lambda$.
\begin{equation}gin{lemma}\lambdab{bj1}
With the assumptions of Theorem \ref{Th2}, for any $\lambda>0$ the map $\lambdambda\mapsto c_\lambda\in(0,4\Phii/\alpha_0)$ is strictly increasing.
\varepsilonl
\begin{equation}gin{proof}
For any $\lambda>0$ with $c_\lambda\in(0,4\Phii/\alpha_0)$, let $z_\lambda=(u_\lambda,v_\lambda)$ be a solution of \re{q110}, then $\tilde{z}_\lambda=(\tilde{u}_\lambda,\tilde{v}_\lambda)=(u_\lambda(\cdotsot/\sectionqrt{\lambda},v_\lambda(\cdotsot/\sectionqrt{\lambda}))$ satisfies in the whole plane the following system
\begin{equation}\lambdab{q111} \left\{
\begin{equation}gin{array}{ll}
-\Delta \tilde{u}_\lambda+\tilde{u}_\lambda=\lambda^{-1}g(\tilde{v}_\lambda)\\
-\Delta \tilde{v}_\lambda+\tilde{v}_\lambda=\lambda^{-1}f(\tilde{u}_\lambda)
\varepsilonnd{array}
\right. \varepsilone
whose corresponding energy functional is
$$
\tilde{\Phi}_\lambda(\tilde{z}_\lambda):=\int_{\mathbb R^2}\nabla\tilde{u}_\lambda\nabla\tilde{v}_\lambda+\tilde{u}_\lambda\tilde{v}_\lambda-\lambda^{-1}I(\tilde{z}_\lambda).
$$
Similar as above, we can define the generalized Nehari Manifold $\mathcal{\tilde{N}}_\lambda$ and the least energy
$$
\tilde{c}_\lambda:=\inf_{z\in\mathcal{\tilde{N}}_\lambda}\tilde{\Phi}_\lambda(z).
$$
We have $c_\lambda=\tilde{c}_\lambda\in(0,4\Phii/\alpha_0)$. Then \re{q111} admits a ground state solution $\tilde{z}_\lambda=(\tilde{u}_\lambda,\tilde{v}_\lambda)$. Moreover,
$$
\tilde{c}_\lambda:=\inf_{z\in E\sectionetminus E^-}\max_{\Omegaega\in\hat{E}(z)}\tilde{\Phi}_\lambda(\Omegaega)=\max_{\Omegaega\in\hat{E}(\tilde{z}_\lambda)}\tilde{\Phi}_\lambda(\Omegaega).
$$
To show that $c_\lambda$ is strictly increasing, it is enough to prove that $\tilde{c}_\lambda$ is strictly increasing. For any $0<\mu<\lambda$, the set $\hat{E}(\tilde{z}_\lambda)$ intersects $\mathcal{\tilde{N}_\mu}$ at exactly one point $\hat{m}_\mu(z)$, which is the unique global maximum point of $\tilde{\Phi}_\mu|_{\hat{E}(\tilde{z}_\lambda)}$. Since $F(s), G(s)>0$ for any $s\not=0$,
\begin{equation}gin{align*}
\tilde{c}_\mu&\le\max_{\Omegaega\in\hat{E}(\tilde{z}_\lambda)}\tilde{\Phi}_\mu(\Omegaega)=\tilde{\Phi}_\mu(\hat{m}_\mu(z))\\
&<\tilde{\Phi}_\lambda(\hat{m}_\mu(z))\le\max_{\Omegaega\in\hat{E}(\tilde{z}_\lambda)}\tilde{\Phi}_\lambda(\Omegaega)=\tilde{c}_\lambda.
\varepsilonnd{align*}
Therefore, $c_\mu<c_\lambda$.
\varepsilonp
\noindent Now, we are set to prove that the weak limit obtained in Proposition \ref{o51} is non trivial, precisely
\begin{equation}gin{lemma}\lambdab{bj2}
$z_\varepsilon\not\varepsilonquiv0$ provided $\varepsilon>0$ is sufficiently small.
\varepsilonl
\begin{equation}gin{proof}
Assume by contradiction that $z_\varepsilon=0$ for $\varepsilon>0$ small, then $z_n=(u_n,v_n)\rightharpoonup 0$ in $E_\varepsilon$ and $z_n\indexrightarrow{a.e.}0$ in $\mathbb R^2$, as $n\rightarrow\infty$. It is well known that $\{z_n\}$ satisfies just one of the following alternatives:
\begin{equation}gin{itemize}
\item [1)] (Vanishing)
$$
\lim_{n\rightarrow\infty}\sectionup_{y\in\mathbb R^2}\int_{B_R(y)}(u_n^2+v_n^2)\, \mathrm{d} x=0\ \ \mbox{for all}\ \ R>0;
$$
\item [2)] (Nonvanishing) there exist $\nu>0$, $R_0>0$ and $\{y_n\}\sectionubset\mathbb R^2$ such that
$$
\lim_{n\rightarrow\infty}\int_{B_{R_0}(y_n)}(u_n^2+v_n^2)\, \mathrm{d} x\gammae\nu.
$$
\varepsilonnd{itemize}
Due to $c_\varepsilon\in(c_0,4\Phii/\alpha_0)$ we can rule out {\it Vanishing}. So that {\it Nonvanishing} occurs. Let $\tilde{u}_n(\cdotsot):=u_n(\cdotsot+y_n)$ and $\tilde{v}_n(\cdotsot):=v_n(\cdotsot+y_n)$, then $|y_n|\rightarrow\infty$, as $n\rightarrow\infty$ and
\begin{equation}\lambdab{y522}
\lim_{n\rightarrow\infty}\int_{B_{R_0}(0)}(\tilde{u}_n^2+\tilde{v}_n^2)\, \mathrm{d} x\gammae\nu.
\varepsilone
Let $\tilde{z}_n=(\tilde{u}_n,\tilde{v}_n)$, $\{\tilde{z}_n\}$ is bounded in $E$. Up to a subsequence, by \re{y522} we assume that $\tilde{z}_n\rightarrow \tilde{z}\not=0$ weakly in $E$ for some $\tilde{z}=(\tilde{u},\tilde{v})\in E$ and $\Phi_{V_\infty}'(\tilde{z})=0$, where
$$
\Phi_{V_\infty}(z)=\int_{\mathbb R^2}\nabla u\nabla v+V_\infty uv-I(z),\,\, z=(u,v)\in E.
$$
By $(H2)$ and Fatou's Lemma, for fixed $\varepsilon>0$,
\begin{equation}gin{align*}
c_\varepsilon+o_n(1)&=\Phi_\varepsilon(\tilde{z}_n)-\frac{1}{2}\lambdan \Phi_\varepsilon'(\tilde{z}_n),\tilde{z}_n\ranglengle\\
&=\int_{\mathbb R^2}\frac{1}{2}f(\tilde{u}_n)\tilde{u}_n-F(\tilde{u}_n)+\int_{\mathbb R^2}\frac{1}{2}g(\tilde{v}_n)\tilde{v}_n-G(\tilde{v}_n)\\
&\gammae\int_{\mathbb R^2}\frac{1}{2}f(\tilde{u})\tilde{u}-F(\tilde{u})+\int_{\mathbb R^2}\frac{1}{2}g(\tilde{v})\tilde{v}-G(\tilde{v})+o_n(1)\\
&=\Phi_{V_\infty}(\tilde{z})-\frac{1}{2}\lambdan \Phi_{V_\infty}'(\tilde{z}),\tilde{z}\ranglengle+o_n(1)\gammae c_{V_\infty}+o_n(1).
\varepsilonnd{align*}
It follows that $c_\varepsilon\gammae c_{V_\infty}$ for $\varepsilon>0$ small enough. By Lemma \ref{l5.9} and Lemma \ref{bj1}, we get $c_{V_\infty}>c_\ast$. Again by Lemma \ref{l5.9} we get a contradiction.
\varepsilonp
\noindent By virtue of Lemma \ref{bj2} we get straightforward the following
\begin{equation}gin{corollary}
For $\varepsilon>0$ small enough, $\Phi_\varepsilon(z_\varepsilon)=c_\varepsilon$, namely $z_\varepsilon$ is a ground state solution of \re{q51}.
\varepsilonc
\sectionubsection{Concentration}
Reasoning as in Proposition \ref{bo1} we have
\begin{equation}gin{proposition}\lambdab{boc1}
Let $\varepsilon>0$ and $z_\varepsilon=(u_\varepsilon,v_\varepsilon)$ be a ground state solution to \re{q51}. Then, $u_\varepsilon, v_\varepsilon\in L^{\infty}(\mathbb R^2)\cap C_{loc}^{1,\gamma}(\mathbb R^2)$ for some $\gamma\in(0,1)$. {Moreover, $u_\varepsilon(x), v_\varepsilon(x)\rightarrow 0$, as $|x|\rightarrow\infty$.}
\varepsilono
\noindent By Proposition \ref{boc1}, there exists $y_\varepsilon\in\mathbb R^2$ such that $$|u_\varepsilon(y_\varepsilon)|+|v_\varepsilon(y_\varepsilon)|=\max_{x\in\mathbb R^2}(|u_\varepsilon(x)|+|v_\varepsilon(x)|).$$ Moreover, $x_\varepsilon:=\varepsilon y_\varepsilon$ is a maximum point of $|\varphi_\varepsilon(x)|+|\Phisi_\varepsilon(x)|$, where $(\varphi_\varepsilon(\cdotsot),\Phisi_\varepsilon(\cdotsot))=(u_\varepsilon(\cdotsot/\varepsilon),v_\varepsilon(\cdotsot/\varepsilon))$ is a ground state solution of the original problem \re{q1}. We conclude the proof of Theorem \ref{Th1} by proving Proposition \ref{boc2}, \ref{boc3} and \ref{boc4} below.
{\begin{equation}gin{proposition}\lambdab{boc2}\
\begin{equation}gin{itemize}
\item [1)] $\lim_{\varepsilon\rightarrow 0}\mbox{dist}(x_\varepsilon,\mathcal{M})=0$;
\item [2)] $(u_\varepsilon(\cdotsot+x_\varepsilon/\varepsilon), v_\varepsilon(\cdotsot+x_\varepsilon/\varepsilon))$ converges (up to a subsequence) to a ground state solution of
\begin{equation}gin{align}\lambdab{luv}
\left\{
\begin{equation}gin{array}{ll}
-\Delta u+V_0u=g(v)&\\
&\text{ in } \mathbb{R}^2\\
-\Delta v+V_0v=f(u)
\varepsilonnd{array}
\right.
\varepsilonnd{align}
\item [3)] $u_\varepsilon(x+x_\varepsilon/\varepsilon), v_\varepsilon(x+x_\varepsilon/\varepsilon)\rightarrow 0$, uniformly as $|x|\rightarrow\infty$, for $\varepsilon>0$ sufficiently small.
\varepsilonnd{itemize}
\varepsilono}
\begin{equation}gin{proof}
\noindent By virtue of Proposition \ref{o54} and Fatou's Lemma, there exists $C>0$ (independent of $\varepsilon$) such that $\|(u_\varepsilon,v_\varepsilon)\|_\varepsilon\le C$ for all $\varepsilon\in(0,\varepsilon_0)$.
Up to a subsequence, we may assume $z_\varepsilon=(u_\varepsilon, v_\varepsilon)\rightharpoonup z_0=(u_0, v_0)$ in $E$ and $(u_\varepsilon, v_\varepsilon)\indexrightarrow{a.e.}(u_0, v_0)$ in $\mathbb R^2$, as $\varepsilon\rightarrow0$. Due to $c_\varepsilon\in(c_0,4\Phii/\alpha_0)$ for $\varepsilon>0$ sufficiently small, as in Lemma \ref{bj2}, we have $u_0\not\varepsilonquiv0, v_0\not\varepsilonquiv0$. Moreover, $\Phi'(z_0)=0$. By $(H2)$ and Fatou's Lemma,
\begin{equation}gin{align*}
c_\varepsilon&=\Phi_\varepsilon(z_\varepsilon)-\frac{1}{2}\lambdan \Phi_\varepsilon'(z_\varepsilon),z_\varepsilon\ranglengle\\
&=\int_{\mathbb R^2}\frac{1}{2}f(u_\varepsilon)u_\varepsilon-F(u_\varepsilon)+\int_{\mathbb R^2}\frac{1}{2}g(v_\varepsilon)v_\varepsilon-G(v_\varepsilon)\\
&\gammae\int_{\mathbb R^2}\frac{1}{2}f(u_0)u_0-F(u_0)+\int_{\mathbb R^2}\frac{1}{2}g(v_0)v_0-G(v_0)+o_\varepsilon(1)\\
&=\Phi(z_0)-\frac{1}{2}\lambdan \Phi'(z_0),z_0\ranglengle+o_\varepsilon(1)\gammae c_\ast+o_\varepsilon(1).
\varepsilonnd{align*}
Thanks to Lemma \ref{l5.9}, $\Phi(z_0)=c_\ast$, namely $(u_0, v_0)$ is a ground state solution of \re{luv}. Thanks to Fatou's Lemma again,
$$
\lim_{\varepsilon\rightarrow0}\int_{\mathbb R^2}\frac{1}{2}f(u_\varepsilon)u_\varepsilon-F(u_\varepsilon)=\int_{\mathbb R^2}\frac{1}{2}f(u_0)u_0-F(u_0)
$$
and
$$
\lim_{\varepsilon\rightarrow0}\int_{\mathbb R^2}\frac{1}{2}g(v_\varepsilon)v_\varepsilon-G(v_\varepsilon)=\int_{\mathbb R^2}\frac{1}{2}g(v_0)v_0-G(v_0).
$$
Repeating the argument in Proposition \ref{con}, we get $\|u_\varepsilon\|_\varepsilon\rightarrow\|u_0\|_{H^1}$ and $\|v_\varepsilon\|_\varepsilon\rightarrow\|v_0\|_{H^1}$, as $\varepsilon\rightarrow0$. This implies $(u_\varepsilon,v_\varepsilon)\rightarrow (u_0,v_0)$ strongly in $E$ as $\varepsilon\rightarrow0$. Then, as in Proposition \ref{bo2} and \ref{pro_apriori}, $\{\|u_\varepsilon\|_\infty,\|v_\varepsilon\|_\infty\}$ is uniformly bounded for $\varepsilon>0$ small and $$\liminf_{\varepsilon\rightarrow0}\min\{\|u_\varepsilon\|_\infty,\|v_\varepsilon\|_\infty\}>0.$$
As in Proposition \ref{boo4}, there exists $R_2>0$ such that
$$
\lim_{\varepsilon\rightarrow0}\int_{B_{R_2}(x_\varepsilon/\varepsilon)}(u_\varepsilon^2+v_\varepsilon^2)\, \mathrm{d} x>0.
$$
Now, we claim that $\{x_\varepsilon\}$ is bounded for $\varepsilon>0$ small enough. Suppose this does not occur, so that $|x_\varepsilon|\rightarrow\infty$, as $\varepsilon\rightarrow0$. Let $\begin{align}r{u}_\varepsilon(\cdotsot)=u_\varepsilon(\cdotsot+x_\varepsilon/\varepsilon)$ and $\begin{align}r{v}_\varepsilon(\cdotsot)=v_\varepsilon(\cdotsot+x_\varepsilon/\varepsilon)$ which, up to a subsequence, $(\begin{align}r{u}_\varepsilon,\begin{align}r{v}_\varepsilon)\rightarrow \begin{align}r{z}=(\begin{align}r{u},\begin{align}r{v})$ weakly in $E$, as $\varepsilon\rightarrow0$ and $\begin{align}r{u},\begin{align}r{v}\not\varepsilonquiv0$. Moreover, $\Phi_{V_\infty}'(\begin{align}r{z})=0$. As in Lemma \ref{bj2} we get a contradiction. Therefore $\{x_\varepsilon\}$ is bounded for $\varepsilon>0$ small. Up to a subsequence, assume $x_\varepsilon\rightarrow x_0$, as $\varepsilon\rightarrow0$ and let $\hat{u}_\varepsilon(\cdotsot)=u_\varepsilon(\cdotsot+x_\varepsilon/\varepsilon)$, $\hat{v}_\varepsilon(\cdotsot)=v_\varepsilon(\cdotsot+x_\varepsilon/\varepsilon)$. Then, up to a subsequence, $\hat{z}_\varepsilon=(\hat{u}_\varepsilon,\hat{v}_\varepsilon)\rightarrow\hat{z}=(\hat{u},\hat{v})\not=0$ weakly in $E$, as $\varepsilon\rightarrow0$ and $\Phi_{V(x_0)}'(\hat{z})=0$, where
$$
\Phi_{V(x_0)}(z)=\int_{\mathbb R^2}\nabla u\nabla v+V(x_0) uv-I(z),\,\, z=(u,v)\in E.
$$
By $(H2)$ and Fatou's Lemma,
\begin{equation}gin{align*}
c_\varepsilon&=\Phi_\varepsilon(z_\varepsilon)-\frac{1}{2}\lambdan \Phi_\varepsilon'(z_\varepsilon),z_\varepsilon\ranglengle\\
&=\int_{\mathbb R^2}\frac{1}{2}f(\hat{u}_\varepsilon)\hat{u}_\varepsilon-F(\hat{u}_\varepsilon)+\int_{\mathbb R^2}\frac{1}{2}g(\hat{v}_\varepsilon)\hat{v}_\varepsilon-G(\hat{v}_\varepsilon)\\
&\gammae\int_{\mathbb R^2}\frac{1}{2}f(\hat{u})\hat{u}-F(\hat{u})+\int_{\mathbb R^2}\frac{1}{2}g(\hat{v})\hat{v}-G(\hat{v})+o_\varepsilon(1)\\
&=\Phi_{V(x_0)}(\hat{z})-\frac{1}{2}\lambdan \Phi_{V(x_0)}'(\hat{z}),\hat{z}\ranglengle+o_\varepsilon(1)\gammae c_{V(x_0)}+o_\varepsilon(1).
\varepsilonnd{align*}
Recalling that $\limsup_{\varepsilon\rightarrow0}c_\varepsilon\le c_\ast$, we get $c_{V(x_0)}=c_\ast$ and hence $(\hat{u},\hat{v})$ is a ground state solution of \re{luv}. Thanks to Lemma \ref{bj1}, $V(x_0)=V_0$, namely $x_0\in\mathcal{M}$ and $\lim_{\varepsilon\rightarrow 0}\mbox{dist}(x_\varepsilon,\mathcal{M})=0$. {Moreover, $(\hat{u}_\varepsilon,\hat{v}_\varepsilon)\rightarrow (\hat{u},\hat{v})$ strongly in $E$, as $\varepsilon\rightarrow0$. As in Proposition \ref{bo2}, $u_\varepsilon(x+x_\varepsilon/\varepsilon), v_\varepsilon(x+x_\varepsilon/\varepsilon)\rightarrow 0$ vanish at infinity uniformly in $\varepsilon$.}
\varepsilonp
{\begin{equation}gin{proposition}\lambdab{boc3} Let $(\varphi_\varepsilon,\Phisi_\varepsilon)$ be a ground state solution to \re{q1} and $x_\varepsilon^1, x_\varepsilon^2$ be any maximum point of $|\varphi_\varepsilon|$ and $|\Phisi_\varepsilon|$ respectively. Then,
$$
\hbox{$\lim_{\varepsilon\rightarrow 0}\mbox{dist}(x_\varepsilon^i,\mathcal{M})=0,\thetauad \lim_{\varepsilon\rightarrow0}|x_\varepsilon^i-x_\varepsilon|=0,\thetauad i=1,2$.}
$$
If in addition $f$ and $g$ are odd and $(H6)$ holds, then for $\varepsilon>0$ small enough, $\varphi_\varepsilon\Phisi_\varepsilon>0$ in $\mathbb R^2$ and
$$\lim_{\varepsilon\rightarrow0}|x_\varepsilon^1-x_\varepsilon^2|/\varepsilon=0.$$
Moreover, for some $c,C>0$, $$|\varphi_\varepsilon(x)|\le C\varepsilonxp(-\frac{c}{\varepsilon}|x-x_\varepsilon^1|),\thetauad |\Phisi_\varepsilon(x)|\le C\varepsilonxp(-\frac{c}{\varepsilon}|x-x_\varepsilon^2|), \,\, x\in\mathbb R^2.$$
\varepsilono}
\begin{equation}gin{proof}
Note that $x_\varepsilon^1/\varepsilon,x_\varepsilon^2/\varepsilon$ are the maxima points of $u_\varepsilon,v_\varepsilon$ respectively. Thanks to the decayof $u_\varepsilon,v_\varepsilon$ and the following fact
$$
\liminf_{\varepsilon\rightarrow0}\min\{\|u_\varepsilon\|_\infty,\|v_\varepsilon\|_\infty\}>0,
$$
we get $|x_\varepsilon^i/\varepsilon-x_\varepsilon/\varepsilon|$ is bounded for $i=1,2$ and $\varepsilon>0$ small enough. Then, $\lim_{\varepsilon\rightarrow 0}\mbox{dist}(x_\varepsilon^i,\mathcal{M})=0\,,i=1,2$, $\lim_{\varepsilon\rightarrow0}|x_\varepsilon^i-x_\varepsilon|=0\,,i=1,2$ and $\lim_{\varepsilon\rightarrow0}|x_\varepsilon^1-x_\varepsilon^2|=0$ .
\noindent Next we assume {$f$ and $g$ are odd, that $(H6)$ holds}, and also that, up to a subsequence, $(x_\varepsilon^1-x_\varepsilon^2)/\varepsilon\rightarrow y_0\in\mathbb R^2$, as $\varepsilon\rightarrow0$.
Let $\tilde{u}_\varepsilon(\cdotsot)=u_\varepsilon(\cdotsot+x_\varepsilon^1/\varepsilon)$ and $\tilde{v}_\varepsilon(\cdotsot)=v_\varepsilon(\cdotsot+x_\varepsilon^2/\varepsilon)$, then $(\tilde{u}_\varepsilon(\cdotsot),\tilde{v}_\varepsilon(\cdotsot+(x_\varepsilon^1-x_\varepsilon^2)/\varepsilon))\rightarrow (u,v)\not=0$ strongly in $E$ and
in $C_{loc}^1(\mathbb R^2)$, as $\varepsilon\rightarrow0$. Moreover, $(u,v)$ is a ground state solution of \re{q11}. Without loss generality, we assume $u>0$, $v>0$
in $\mathbb R^2$. Since $0$ is a maximum point of $\tilde{u}_\varepsilon$, $0$ is a maximum point also for $u$. By virtue of Theorem \ref{sign}, $0$ is the unique maximum point of
$u$ and $v$. On the other hand, up to a subsequence, $(\tilde{u}_\varepsilon(\cdotsot+(x_\varepsilon^2-x_\varepsilon^1)/\varepsilon),\tilde{v}_\varepsilon(\cdotsot))\rightarrow (\tilde{u},\tilde{v})\not=0$ strongly in $E$ and
in $C_{loc}^1(\mathbb R^2)$, as $\varepsilon\rightarrow0$. Then $(\tilde{u}(\cdotsot),\tilde{v}(\cdotsot))=(u(\cdotsot-y_0),v(\cdotsot-y_0))$, which is a ground state solution of \re{q11}.
Since $0$ is a maximum point of $\tilde{v}_\varepsilon$, then $0$ is the unique maximum point of $\tilde{v}$. Therefore, $y_0=0$.
\noindent Finally, we prove that $u_\varepsilon,v_\varepsilon$ do not change the sign for $\varepsilon>0$ sufficiently small. Let
$$
\begin{align}r{u}_\varepsilon=u_\varepsilon(\cdotsot+x_\varepsilon^1/\varepsilon),\,\,\, \begin{align}r{v}_\varepsilon=v_\varepsilon(\cdotsot+x_\varepsilon^1/\varepsilon),
$$
it is enough to prove $\begin{align}r{u}_\varepsilon\begin{align}r{v}_\varepsilon>0$ in $\mathbb R^2$. We assume $(\begin{align}r{u}_\varepsilon,\begin{align}r{v}_\varepsilon)\rightarrow(u,v)\in\mathcal{S}$ strongly in $E$ and uniformly in $C_{loc}^2(\mathbb R^2)$, as $\varepsilon\rightarrow0$ and $0$ is the unique maximum point of $u,v$. By Theorem \ref{sign}, $uv>0$ in $\mathbb R^2$. Without loss of generality, we assume $u>0$ and $v>0$ in $\mathbb R^2$. Then there exist $R>0$ and $\varepsilon_0>0$
such that $\begin{align}r{u}_\varepsilon,\begin{align}r{v}_\varepsilon>0$ in $B_R(0)$ for $\varepsilon<\varepsilon_0$. Define
$$
R_\varepsilon(\begin{align}r{u}_\varepsilon):=\sectionup\{r\,|\,\begin{align}r{u}_\varepsilon(x)>0,\,\, \forall\, x\in B_r(0)\},\,\,R_\varepsilon(\begin{align}r{v}_\varepsilon):=\sectionup\{r\,|\, \begin{align}r{v}_\varepsilon(x)>0,\,\,\forall\ x\in B_r(0)\}
$$
and $R_\varepsilon:=\min\{R_\varepsilon(\begin{align}r{u}_\varepsilon),R_\varepsilon(\begin{align}r{v}_\varepsilon)\}$, then $R_\varepsilon\gammae R$ for any $\varepsilon<\varepsilon_0$. If $R_\varepsilon=\infty$ for any $\varepsilon<\varepsilon_0$, the proof is complete. Otherwise, there exists $\varepsilon_n>0$ such that $\varepsilon_n\rightarrow0$, as $n\rightarrow\infty$ and $R_n:=R_{\varepsilon_n}<\infty$ for any fixed $n$. Then, by the maximum principle, $R_{\varepsilon_n}(\begin{align}r{u}_{\varepsilon_n}), R_{\varepsilon_n}(\begin{align}r{v}_{\varepsilon_n})<\infty$ for any fixed $n\in\mathbb{N}$. Hence $\inf_{x\in\mathbb R^2}\begin{align}r{u}_{\varepsilon_n}(x)<0$ and $\inf_{x\in\mathbb R^2}\begin{align}r{v}_{\varepsilon_n}(x)<0$ for any $n\in\mathbb{N}$. Noting that $\begin{align}r{u}_{\varepsilon_n}(x),\begin{align}r{v}_{\varepsilon_n}(x)\rightarrow0$, as $|x|\rightarrow\infty$, there exist $y_n,z_n\in\mathbb R^2$ such that $\begin{align}r{u}_{\varepsilon_n}(y_n)=\min_{x\in\mathbb R^2}\begin{align}r{u}_{\varepsilon_n}(x)<0$ and $\begin{align}r{v}_{\varepsilon_n}(z_n)=\min_{x\in\mathbb R^2}\begin{align}r{v}_{\varepsilon_n}(x)<0$. Then we have
$$
g(\begin{align}r{v}_{\varepsilon_n}(y_n))\le V_0\begin{align}r{u}_{\varepsilon_n}(y_n),\,\,\,f(\begin{align}r{u}_{\varepsilon_n}(z_n))\le V_0\begin{align}r{v}_{\varepsilon_n}(z_n).
$$
By Remark \ref{remark1} we have
$$
V_0\begin{align}r{u}_{\varepsilon_n}(y_n)\gammae g(\begin{align}r{v}_{\varepsilon_n}(y_n))\gammae g(\begin{align}r{v}_{\varepsilon_n}(z_n))
\gammae g\left(\frac{f(\begin{align}r{u}_{\varepsilon_n}(z_n))}{V_0}\right)\gammae g\left(\frac{f(\begin{align}r{u}_{\varepsilon_n}(y_n))}{V_0}\right),
$$
which yields $\inf_n|\begin{align}r{u}_{\varepsilon_n}(y_n)|>0$ by $(H1)$ . {Observe that $\begin{align}r{u}_{\varepsilon_n}(x)\rightarrow0$, as $|x|\rightarrow\infty$, uniformly in $\varepsilon$, and thus $\sectionup_n|y_n|<\infty$, namely $|y_n|<R_n$ for $n$ sufficiently large.} Hence $\begin{align}r{u}_{\varepsilon_n}(y_n)>0$, which is a contradiction. {Finally, since $u_\varepsilon,v_\varepsilon$ do not change the sign, by the standard comparison principle, we get the uniformly exponential decay at infinity.}
\varepsilonp
In order to complete the proof of Theorem \ref{Th1} we need to prove the uniqueness of the maximum points of $\varphi_\varepsilon,\Phisi_\varepsilon$.
\begin{equation}gin{proposition}\lambdab{boc4} Let $x_\varepsilon^1, y_\varepsilon^1$ be any maxima points of $\varphi_\varepsilon$. {Assume $f$ and $g$ are odd and $(H6)$ holds. Then $x_\varepsilon^1=y_\varepsilon^1$, for $\varepsilon>0$ sufficiently small.} Namely, the maximum point of $\varphi_\varepsilon$ is unique. The same holds for $\Phisi_\varepsilon$.
\varepsilono
\begin{equation}gin{proof} Let
$$
\begin{align}r{u}_\varepsilon=u_\varepsilon(\cdotsot+x_\varepsilon^1/\varepsilon),\,\,\, \begin{align}r{v}_\varepsilon=v_\varepsilon(\cdotsot+x_\varepsilon^1/\varepsilon).
$$
Then $(\begin{align}r{u}_\varepsilon,\begin{align}r{v}_\varepsilon)\rightarrow (u,v)\in\mathcal{S}$ strongly in $E$ and uniformly in $C_{loc}^2(\mathbb R^2)$, as $\varepsilon\rightarrow0$. Moreover, there exist $c,C>0$ such that
$$
|\begin{align}r{u}_\varepsilon(x)|\le C\varepsilonxp{(-c|x-x_\varepsilon^1/\varepsilon|)},\,\,\ x\in\mathbb R^2.
$$
Hence $\|\begin{align}r{u}_\varepsilon\|_\infty\le C\varepsilonxp{(-c|y_\varepsilon^1-x_\varepsilon^1|/\varepsilon)}$. As a consequence we have $$\limsup_{\varepsilon\rightarrow0}|y_\varepsilon^1-x_\varepsilon^1|/\varepsilon<\infty.$$
Indeed, otherwise $\|\begin{align}r{u}_\varepsilon\|_\infty\rightarrow0$, as $\varepsilon\rightarrow0$, which yields
$$
\int_{\mathbb R^2}[|\nabla\begin{align}r{v}_\varepsilon|^2+V_\varepsilon(x+x_\varepsilon^1/\varepsilon)|\begin{align}r{v}_\varepsilon|^2]\,\mathrm{d} x=\int_{\mathbb R^2}f(\begin{align}r{u}_\varepsilon)\begin{align}r{v}_\varepsilon\,\mathrm{d} x\rightarrow0.
$$
Namely $\|v_\varepsilon\|_{1,\varepsilon}\rightarrow0$, as $\varepsilon\rightarrow0$ from which $\Phi_\varepsilon(u_\varepsilon,v_\varepsilon)\rightarrow0$, as $\varepsilon\rightarrow0$, thus a contradiction by Proposition \ref{co51}. Therefore $|y_\varepsilon^1-x_\varepsilon^1|/\varepsilon$ stays bounded for $\varepsilon>0$ small. As in Proposition \ref{boc3}, $|y_\varepsilon^1-x_\varepsilon^1|/\varepsilon\rightarrow0$, as $\varepsilon\rightarrow0$. Obverse that $\nabla\begin{align}r{u}_\varepsilon(0)=\nabla\begin{align}r{u}_\varepsilon((y_\varepsilon^1-x_\varepsilon^1)/\varepsilon)=0$. By Theorem \ref{sign}, $\Delta u(0)<0$. Recalling that $u(x)=u(|x|)$, $u'(0)=0$ and $u''(r)<0$ for $r=|x|$ small. On the other hand, since $g\in C^1$, $\begin{align}r{u}_\varepsilon\in C^2$ and $\begin{align}r{u}_\varepsilon\rightarrow u$ in $C_{loc}^2(\mathbb R^2)$, as $\varepsilon\rightarrow0$, it follows from \cite[Lemma 4.2]{Ni-Takagi1} that $y_\varepsilon^1=x_\varepsilon^1$ for $\varepsilon>0$ sufficiently small.
\varepsilonp
\begin{equation}gin{thebibliography}{10}
\begin{equation}gin{exercise}bitem{AY}
\newblock Adimurthi \& S.L.~Yadava,
\newblock \varepsilonmph{ Multiplicity results for semilinear elliptic
equations in bounded domain of $\mathbb R^2$ involving critical exponent.}
\newblock Ann. Scuola. Norm. Sup. Pisa, \textbf{17} (1990), 481--504.
\begin{equation}gin{exercise}bitem{ASY} C.O.~Alves, S.H.M.~Soares and J.~Yang, On existence and concentration of solutions for a class of Hamiltonian systems in RN, {\it Adv. Nonlinear Stud.} {\bf 3} (2003),161--180.
\begin{equation}gin{exercise}bitem{BF} V.~Benci and D.~Fortunato, Variational Methods in Nonlinear Field Equations, {\it Springer Monographs in Mathematics}, Springer, 2014.
\begin{equation}gin{exercise}bitem{dsr} D.~Bonheure, E.M.~Dos Santos, M.~Ramos, Ground state and non ground state solutions of some strongly coupled elliptic systems, {\it Trans. Amer. Math. Soc., } {\bf 364} (2012), 447-491.
\begin{equation}gin{exercise}bitem{bst}D.~Bonheure, E.~M.~Dos Santos and H.~Tavares, Hamiltonian elliptic systems: a guide to variational frameworks, {\it Port. Math.} {\bf 71} (2014), 301--395.
\begin{equation}gin{exercise}bitem{bsrt} D.~Bonheure, E.~M.~Dos Santos, M.~Ramos and H.~Tavares, Existence and symmetry of least energy nodal solutions for Hamiltonian elliptic systems, {\it J. Math. Pures Appl.} {\bf 104} (2015), 1075--1107.
\begin{equation}gin{exercise}bitem{Sirakov1} J.~Busca and B.~Sirakov, Symmetry results for semilinear elliptic systems in the whole space, {\it J. Differential Equations}, {\bf 163} (2000), 41--54.
\begin{equation}gin{exercise}bitem{dcassani2} D.~Cassani, Lorentz-Sobolev spaces and systems of Schr\"{o}dinger equations in $\mathbb{R}^N$, {\it Nonlinear Anal.} {\bf 70} (2009), 2846--2854.
\begin{equation}gin{exercise}bitem{CT} D.~Cassani and C.~Tarsi, Existence of solitary waves for supercritical Schr\"{o}dinger systems in dimension two, {\it Calc. Var. Partial Differential Equations} {\bf 54} (2015), 1673--1704.
\begin{equation}gin{exercise}bitem{CST} D.~Cassani, F.~Sani and C.~Tarsi, Equivalent Moser type inequalities in $\mathbb{R}^2$ and the zero mass case, {\it J. Funct. Anal.} {\bf 267} (2014), 4236--4263.
\begin{equation}gin{exercise}bitem{rd} R.~Dalmasso, Existence and uniqueness of positive radial solutions for the Lane-Emden system, {\it Nonlinear Anal.} {\bf 57} (2004), 341--348.
\begin{equation}gin{exercise}bitem{dgp} L.~Damascelli, F.~Gladiali and F.~Pacella, Symmetry results for cooperative elliptic systems in unbounded domains, {\it Indiana Univ. Math. J.} {\bf 63} (2014), 615--649. (Reviewer: Alan V. Lair) 35J57 (35A09 35B06 35J91)
\begin{equation}gin{exercise}bitem{Fi} D. G. de Figueiredo, J. M. do \'{O} and B. Ruf, Critical and subcritical elliptic systems in dimension two, {\it Indiana Univ. Math. J. } {\bf 53} (2004), 1037--1054.
\begin{equation}gin{exercise}bitem{DMR}
\newblock D.G. de Figueiredo, O.H. Miyagaki \& B. Ruf,
\newblock \varepsilonmph{ Elliptic equations in
$\mathbb R^2$ with nonlinearities in the critical growth range.}
\newblock Calc.
Var. Partial Differential Equations, \textbf{3} (1995), 139--153.
\begin{equation}gin{exercise}bitem{DY} D. G. de Figueiredo and J. Yang. Decay, symmetry and existence of solutions of semilinear elliptic systems. {\it Nonlinear Analysis }{\bf 33} (1998), 211--234.
\begin{equation}gin{exercise}bitem{DJJ} D. G. de Figueiredo, J. M. do \'{O} and J. Zhang, Ground state solutions of Hamiltonian system in dimension two, {\it preprint} 2016.
\begin{equation}gin{exercise}bitem{dlz} Y.~Ding, C.~Lee and F.~Zhao, Semiclassical limits of ground state solutions to Schr\"odinger systems, {\it Calc. Var. Partial Differential Equations} {\bf 51} (2014), 725--760.
\begin{equation}gin{exercise}bitem{Souza} M. de Souza and J. M. do \'{O}, Hamiltonian elliptic systems in $\mathbb R^2$ with subcritical and critical exponential growth, {\it Annali di Matematica Pura e Applicata}, DOI: 10.1007/s10231-015-0498-7 (2015).
\begin{equation}gin{exercise}bitem{Lions} M. Esteban and P. L. Lions, Existence and non-existence results for semilinear elliptic problems in unbounded domains,
{\it Proc. Roy. Soc. Edinburgh}, {\bf 93A} (1982), 1-14.
\begin{equation}gin{exercise}bitem{E} I. Ekeland, On the variational principle, {\it J. Math. Anal. Appl.} {\bf 47} (1974), 324--353.
\begin{equation}gin{exercise}bitem{EvansZ} L.C.~Evans and M.~Zworski, Lectures on Semiclassical Analysis, UC Berkley, 2003.
\begin{equation}gin{exercise}bitem{GT} { D. Gilbarg and N. S. Trudinger,} {Elliptic Partial Differential Equations of Second Order; second edition,} Grundlehren 224, Springer, Berlin, Heidelberg, New York and Tokyo, 1983.
\begin{equation}gin{exercise}bitem{lionslemma} P.-L. Lions, The concentration-compactness principle in the calculus of variations. The locally compact case. I. {\it Ann. Inst. H. Poincaré Anal. Non Lin\'eaire} {\bf 1} (1984), 109--145.
\begin{equation}gin{exercise}bitem{Ni-Takagi1} W. M. Ni and I. Takagi, On the shape of least-energy solutions to a semilinear Neumann problem.
{\it Comm. Pure Appl. Math., } {\bf 44} (1991), 819--851.
\begin{equation}gin{exercise}bitem{Pankov} A. Pankov, Periodic nonlinear Schr\"{o}dinger equation with application to photonic crystals, {\it Milan J. Math. } {\bf 73} (2005), 259-287.
\begin{equation}gin{exercise}bitem{Pucci} P. Pucci and J. Serrin, A general variational identity, {\it Indiana Univ. Math. J. }{\bf 35} (1986), 681--703.
\begin{equation}gin{exercise}bitem{Pisto} A. Pistoia, M. Ramos, Locating the peaks of the least energy solutions to an elliptic system with Dirichlet boundary
conditions, {\it NoDEA-Nonlinear Differential Equations Appl.}, {\bf 15} (2008), 1-23.
\begin{equation}gin{exercise}bitem{QS} P.~Quittner and P.~Souplet, Symmetry of components for semilinear elliptic systems, {\it SIAM J. Math. Anal.} {\bf 44} (2012), 2545--2559.
\begin{equation}gin{exercise}bitem{Ramos1} M. Ramos, S. Soares, On the concentration of solutions of singularly perturbed Hamiltonian systems in $\mathbb R^N$, {\it Port. Math. (N.S.)} {\bf 63} (2006), 157--171.
\begin{equation}gin{exercise}bitem{ruf} B.~Ruf, Superlinear elliptic equations and systems. Handbook of Differential Equations: Stationary Partial Differential Equations, vol. V, 211--276. Handb. Differ. Equ., Elsevier/North-Holland, Amsterdam (2008).
\begin{equation}gin{exercise}bitem{boyan} B.~Sirakov, On the existence of solutions of Hamiltonian elliptic systems in RN, {\it Adv. Differential Equations} {\bf 5} (2000), 1445--1464.
\begin{equation}gin{exercise}bitem{syso} B.~Sirakov and H.M.~Soares, Soliton solutions to systems of coupled Schr\"{o}dinger equations of Hamiltonian type, {\it Trans. Am. Math. Soc.} {\bf 362} (2010), 5729--5744.
\begin{equation}gin{exercise}bitem{Szulkin} A. Szulkin and T. Weth, Ground state solutions for some indefinite variational problems, {\it J. Funct. Analysis} {\bf 257} (2009), 3802--3822.
\begin{equation}gin{exercise}bitem{Weth} A. Szulkin and T. Weth, The method of Nehari manifold. In Handbook of nonconvex analysis and applications, 597--632 (Boston, MA: International Press, 2010).
\begin{equation}gin{exercise}bitem{van} R. C. A. M. van der Vorst, Variational identities and applications to differential systems, {\it Arch. Rat. Mech. Anal. } {\bf 116} (1991), 375--398.
\begin{equation}gin{exercise}bitem{yang} Y.Yang, Solitons in Field Theory and Nonlinear Analysis, Springer, 2000.
\varepsilonnd{thebibliography}
\varepsilonnd{document} |
\begin{document}
\title{Problem of optimal control for bilinear systems with endpoint constraint}
\section{Introduction and the problem statement}
Linear systems are usually preferable when approximating nonlinear dynamical processes for their simplicity. However, there are many other practical situations for which bilinear models are more appropriate (see \cite{bea11,Bradly,Alami,Khapalov,Mohler,Wei} and the references therein). In general, a problem of control aims to achieve a certain degree of performance for the system at hand using suitable control laws among available options. If this is indeed feasible, then one usually aims to achieve this performance while optimizing a certain criterion. A problem of optimal control is an optimization problem on a reasonable set described by dynamic constraints. As an interesting example, the question of describing the best control among those that allow to reach a desired state with minimal cost or energy. Such problems arise in various applications, such as the optimization of hydrothermal systems and non-smooth modeling in mechanics and engineering, etc. (see e.g. \cite{bayon,bayon1,cesari,demyanov,lopes}).
The problem of optimal control for bilinear and semi-linear systems with unconstrained endpoint has been treated by many authors (see \cite{Bradly,cannarsa 92,Alami,Li and Young,linag,Boukhari R,Boukhari 2017}). The question of optimal control with endpoint constraint has been treated in the context of linear and semi-linear systems with additive controls (see \cite{Fattorini,Li and Young} and the references therein).
The approach is based on the Pontryagin's maximum principle. The main goal of this paper is to study a quadratic optimization problem with a restricted endpoint state.
In the case of a bounded set of admissible control, we will characterize the optimal control either for exactly or approximately attainable states. This problem can be formulated as an optimization problem with endpoint constraint, which can also be approximated by a set of unconstrained problems. Moreover, if the steering control is scalar valued, then the optimal control can be expressed as a time-varying feedback law.\\
Let us consider the following system\\
\begin{equation}\label{P2-sys prin1}
\left\{
\begin{array}{ll}
\dot{y}(t)= Ay(t) +{\cal B} (u(t),y(t)) \\
y(0)=y_0 \in X \\
\end{array}
\right.
\end{equation}
where
\begin{itemize}
\item{$A : D(A) \subset X \mapsto X$ is the infinitesimal generator of a linear $C_0$- semi-group $S(t)$ on a real Hilbert space $X$ whose inner product and corresponding norm are denoted respectively by $\langle.,.\rangle$ and $\Vert .\Vert$,}
\item $u\in L^2(0,T;U)$, where $U$ is a real Hilbert space equipped with inner product $\langle .,.\rangle_U$ and the corresponding norm $\Vert .\Vert_U$, and $y$ is the corresponding mild solution to the control $u$,
\item{$ {\cal B}: \ U\times X\rightarrow X $ is a bounded bilinear operator.}\end{itemize}
Let us now consider the following assumptions:\\
$(a)$ For all $y\in X$ the mapping $u\mapsto {\cal B}(u,y)$ is compact,\\
$(b)$ $A$ is the infinitesimal generator of a linear compact $C_0$- semigroup $S(t)$.\\
Note that assumption $(b)$ is systematically satisfied for $U=\mathbb{R}\cdot$\\
The quadratic cost function $J$ to be minimized is defined by
\begin{equation}\label{P2 cost obj}
J(u)= \int_0^T \Vert y(t)\Vert^2 dt +\frac{r}{2} \int_0^T\Vert u(t)\Vert_U^2 dt\cdot
\end{equation} Here, $r>0$ and $u$ belongs to the set of admissible control
$$U_{ad}=\{ u\in V\ \ / \ \ y(T)=y_d\},$$
where $V$ is a closed convex subset of $L^2(0,T;U)$ and $y_d\in X$ is the desired state.\\
The optimal control problem may be stated as follows
\begin{equation*}\label{P2prob opt obj}
(P) \ \ \ \ \ \left\{
\begin{array}{ll}
min J(u) \\
u\in U_{ad} \\
\end{array}
\right.
\end{equation*}
In order to solve the problem $(P)$, let us introduce the following auxiliary cost function\\
\begin{equation*}\label{P2 cost inter}
J_\epsilon(u)= \Vert y(T)-y_d\Vert^2 + \epsilon J(u),
\end{equation*}
where $\epsilon>0$, and let us consider the following optimal control problem
\begin{equation*}\label{P2 prob opt inter}
(P_\epsilon )\ \ \ \ \ \ \left\{
\begin{array}{ll}
min J_\epsilon(u) \\
u\in V\\
\end{array}
\right.
\end{equation*}
This paper is organized as follows: In Section 2, we will first provide a solution to the auxiliary problem $(P_\epsilon)$. This result is then applied to build a solution of the problem $(P)$. We will further provide sufficient conditions on the operators $A$ and $B$ under which the solution of the problem $(P)$ can be expressed as a time-varying feedback law. Section 3 is devoted to examples and simulations.
\section{ Characterisation of the optimal control }
\subsection{Preliminary}
Let us recall the notion of attainability.
\begin{df}\
\begin{itemize}
\item A target state $y_d\in X$ is approximately attainable for the system (\ref{P2-sys prin1}), if for all $\varepsilon> 0$ there exists $u_\varepsilon\in V$ such that $\Vert y_{u_\varepsilon}(T)-y_d\Vert \leq \varepsilon\cdot$
\item A target state $y_d\in X$ is exactly attainable for the system (\ref{P2-sys prin1}), if there exists $u\in V$ such that $y_u(T)=y_d\cdot$
\end{itemize}
\end{df}
The following lemma provides a continuity property of the solution $y$ with respect to the control $u$.
\begin{lem} \label{2}
If one of the assumption $(a)$ or $(b)$ hold, then for any sequence $(u_n)\subset L^2(0,T;U)$ such that $u_n\rightharpoonup u$ in $L^2(0,T;U)$,
we have
\begin{equation*}
\lim_{n\rightarrow +\infty}\sup_{0\leq t \leq T}\Vert y_n(t)-y(t)\Vert=0,
\end{equation*}
where $y_n$ and $y$ are the mild solutions of the system (\ref{P2-sys prin1}) respectively corresponding to $u_n$ and $u\cdot$
\end{lem}
\textbf{ Proof\\}
First, let us recall that for all $u\in L^2(0,T;U)$, the system (\ref{P2-sys prin1}) has a unique mild solution corresponding to $u$, which is given by the following variation of constants formula (see e.g. \cite{Li and Young}, p. 66):\\
\begin{equation*}
y(t)=S(t)y_0+\int_0^tS(t-s){\cal B}(u(s),y(s))ds\cdot
\end{equation*}
Thus, the solutions $y_n$ and $y$ of the system (\ref{P2-sys prin1}) respectively corresponding to $u_n$ and $ u$ satisfy the following formula for $t\in [0,T]$
\begin{equation*}
\begin{array}{r c l}
y_n(t)-y(t)&=&\int_0^tS(t-s)\bigg ({\cal B}(u_n(s),y_n(s))-{\cal B}(u(s),y(s))\bigg )ds\cdot
\end{array}
\end{equation*}
Then, for all $t\in [0,T]$ we have
\begin{equation*}
\Vert y_n(t)-y(t)\Vert \leq \Vert \int_0^t S(t-s){\cal B}(u_n(s)-u(s),y(s))ds\Vert + \Vert {\cal B}\Vert\int_0^t\Vert S(t-s)\Vert \Vert u_n(s)\Vert_U \Vert y_n(s)-y(s)\Vert ds
\end{equation*}
Applying the Gronwall lemma (see Theorem 1 in \cite{gronwall}) yields
\begin{equation}\label{P2 y_n ver y^* 1}
\Vert y_n(t)-y(t)\Vert \leq \sup_{t\in [0,T]}\bigg (\Vert \int_0^t S(t-s){\cal B}(u_n(s)-u(s),y(s))ds\Vert \bigg)\exp\big (\Vert {\cal B}\Vert\int_0^t\Vert S(t-s)\Vert \Vert u_n(s)\Vert_U ds\big )
\end{equation}
Using the weak convergence of $u_n$ in $L^2(0,T;U)$ and the fact that the semi-group $S(t)$ is bounded on the entire finite interval $[0,T]$, we have for some $M>0$
\begin{equation}\label{P2 y_n ver y^* 2}
\exp\big (\Vert {\cal B}\Vert \int_0^t\Vert S(t-s)\Vert \Vert u_n(s)\Vert_U ds\big )\leq M ,\ \ \ \forall t\in [0,T]\cdot
\end{equation}
\\
\textbf{$1^{st}$case : Assume that $(a)$ holds.}\\
The weak convergence of $u_n\rightharpoonup u$ in $L^2(0,T;U)$ implies that ${\cal B}(u_n(.),y(.))$ strongly converge to $ {\cal B}(u(.),y(.))$ in $L^2(0,T;X)\cdot$\\
Then, we conclude that
\begin{equation}\label{P2 case1}
\lim_{n\rightarrow +\infty}\sup_{0\leq t\leq T}\Vert\int_0^tS(t-s){\cal B}(u_n(s)-u(s),y(s))ds\Vert=0\cdot
\end{equation}
It follows from (\ref{P2 y_n ver y^* 1}), (\ref{P2 y_n ver y^* 2}) and ( \ref{P2 case1}) that\\
\begin{equation*}
\lim_{n\rightarrow +\infty}\sup_{0\leq t \leq T}\Vert y_n(s)-y(s)\Vert=0\cdot
\end{equation*}
\textbf{$2^{nd}$ case : Assume that $(b)$ holds.}\\
According to Theorem 3.9 in \cite{Brezis}, the weak convergence : $u_n\rightharpoonup u$ in $L^2(0,T;U)$ implies the following weak convergence : ${\cal B}(u_n(.),y(.))\rightharpoonup {\cal B}u(.),y(.))$ in $L^2(0,T;X)\cdot$\\
Moreover, the weak convergence of ${\cal B}(u_n(.),y(.))\rightharpoonup {\cal B}u(.),y(.))$ in $L^2(0,T;X)$ gives (see Corollary 3.3 of \cite{Li and Young}):
\begin{equation}\label{P2 y_n ver y^* 3}
\lim_{n\rightarrow +\infty}\sup_{0\leq t\leq T}\Vert\int_0^tS(t-s){\cal B}(u_n(s)-u(s),y(s))ds\Vert=0\cdot
\end{equation}
It follows from (\ref{P2 y_n ver y^* 1}), (\ref{P2 y_n ver y^* 2}) and ( \ref{P2 y_n ver y^* 3}) that\\
\begin{equation*}
\lim_{n\rightarrow +\infty}\sup_{0\leq t \leq T}\Vert y_n(s)-y(s)\Vert=0\cdot
\end{equation*}
\subsection{Optimal control for the problem $P_\epsilon$}
The following result discusses the existence of the optimal control related to the auxiliary problem $(P_\epsilon)$.
\begin{thm} \label{thm3} \ \\
Let one of the assumptions $(a)$ or $(b)$ hold.
\begin{itemize}
\item If $V=\{u\in L^2(0,T;U) / \Vert u\Vert_U \leq M\} $ for some $M>0$, then there exists an optimal control for the problem $(P_\epsilon),$ which satisfies the following formula:
\end{itemize}
\begin{equation*}\label{P2 optimal control t;x born}
u^*(t)=-\left( \frac{\Vert \epsilon r u^*(t)+({\cal B}(.,y^*(t))^*\phi(t)\Vert_U}{M}+\epsilon r\right)^{-1}{\cal B}(., y^*(t))^*\phi(t),
\end{equation*}
where $\phi$ is the mild solution of the following adjoint system
\begin{equation}\label{P2 adjoint pontr}
\begin{cases}
\dot{\phi}(t)=-A^*\phi(t)-{\cal B}^*(u^*(t),\phi(t))-2\epsilon y(t)\\
\phi(T)=2(y(T)-y_d)
\end{cases}
\end{equation}
${\cal B}^*(u^*(t),.)$ being the adjoint of the operator ${\cal B}(u^*(t),.)\cdot$
\begin{itemize}
\item If $V=L^2(0,T;U)$, then the control defined by
\end{itemize}
\begin{equation*}
u^*(t)=-\frac{1}{\epsilon r} ({\cal B}(. , y^*(t))^*\phi(t)
\end{equation*}
is a solution of the problem $(P_\epsilon)$, where $\phi$ is the mild solution of the adjoint system (\ref{P2 adjoint pontr}).\\
\end{thm}
\textbf{ Proof:}\\
First let us show the existence of a solution of the problem $(P_\epsilon)$. \\
Since the set $\{J_\epsilon(u)/ u\in V\}\subset \mathbb{R}^+$ is not empty and bounded from below, it admits a lower bound $J^*$.
Let $ (u_n )_{n\in\mathbb{N}}$ be a minimizing sequence such that $J_\epsilon(u_n)\rightarrow J^*$. \\
Then the sequence $(u_n)$ is bounded, so it admits a sub-sequence still denoted by $(u_n)$, which weakly converges to $u^*\in V$ .\\
Let $y_n$ and $y^*$ be the solutions of (\ref{P2-sys prin1}) respectively corresponding to $u_n$ and $u^*$.\\
From Lemma \ref{2} we have
\begin{equation}\label{P2 limit ball}
\lim_{n\rightarrow +\infty}\Vert y_n(t)-y^*(t)\Vert=0, \ \forall t\in [0,T] \cdot
\end{equation}
Since the norm $\Vert . \Vert$ is lower semi-continuous, it follows from (\ref{P2 limit ball}) that for all $t\in [0,T]$\\
\begin{equation*}
\Vert y^*(t)\Vert^2= \lim_{n\rightarrow+\infty}inf\Vert y_n(t)\Vert^2\cdot
\end{equation*}
Applying Fatou's lemma we get \\
\begin{equation}\label{P2 y inf Ja}
\int_0^T\Vert y^*(t)\Vert^2dt= \lim_{n\mapsto +\infty}inf\int_0^T\Vert y_n(t)\Vert^2dt \cdot
\end{equation}
Since $R: u\mapsto \int_0^T\Vert u(t)\Vert_U^2dt $ is convex and lower semi-continuous with respect to weak topology, we have (see Corollary III.8 of \cite{Brezis})
\begin{equation}\label{P2 u inf Ja}
R(u^*)\leq \lim_{n\rightarrow+\infty}\inf R(u_n)\cdot
\end{equation}Combining the formulas (\ref{P2 limit ball}) , (\ref{P2 y inf Ja}) and (\ref{P2 u inf Ja}) we deduce that
\begin{equation*}
\begin{aligned}
J_\epsilon(u^*) & = \Vert y^*(T)-y_d\Vert^2+\epsilon\int_0^T\Vert y(t)\Vert^2dt +\frac{\epsilon r}{2}\int_0^T\Vert u^*(t)\Vert_U^2dt\\
&\leq \lim_{n\rightarrow+\infty}\inf\Vert y_n(T)-y_d\Vert^2+\epsilon\lim_{n\rightarrow +\infty}inf \int_0^T\Vert y_n(t)\Vert^2dt+\frac{\epsilon r}{2}\lim_{n\rightarrow +\infty}inf \int_0^T\Vert u_n(t)\Vert_U^2dt\\\\
&\leq \lim_{n\rightarrow +\infty}infJ_\epsilon(u_n)\\\\
&\leq J^*\cdot
\end{aligned}
\end{equation*}
We conclude that $J_\epsilon(u^*)=J^*$ and so $u^*$ is a solution of the problem $(P_\epsilon)$.\\
Let us proceed to the characterisation of the optimal control.
\begin{enumerate}
\item \textbf{ The case $V=\{ u\in L^2(0,T;U) \ \ / \ \Vert u\Vert _{L^2(0,T,U)}\leq M\}\cdot$ }\
\end{enumerate}
Let $f_0: X\times U \mapsto \mathbb{R}$ be defined by
\begin{equation*}
f_0(y,u)=\epsilon \bigg (\Vert y\Vert^2+ \frac{r}{2}\Vert u\Vert_U^2\bigg ), \ \forall (y,u)\in X \times U\cdot
\end{equation*}
Then, the cost function $J_\epsilon $ takes the form
\begin{equation*}
J_\epsilon(u)=\Vert y(T)-y_d\Vert^2 +\int_0^T f_0(y(t),u(t))dt\cdot
\end{equation*}
Since $V$ is bounded, by application of Pontryagin's maximum principle (see Theorem 5.2 p. 258 in \cite{Li and Young} and Theorem 6.1 p. 162 in \cite{cannarsa 92} ), we find that for any solution $u^*$ of the problem $(P_\epsilon)$ there exists a function $\phi$ solution of the following adjoint system
\begin{equation*}
\begin{cases}
\dot{\phi}(t)=-A^*\phi(t)-{\cal B}^*(u^*(t),\phi(t))-2\epsilon y^*(t)\\
\phi(T)=2(y^*(T)-y_d)
\end{cases}
\end{equation*}
and satisfies the following condition
\begin{equation}\label{P2 principe max}
H(t,u^*(t),y^*(t),\phi(t))=\min_{u\in V}H(t,u(t),y^*(t),\phi(t)),
\end{equation}
where
\begin{equation*}
H(t,u(t),y^*(t),\phi(t))=f_0(u(t),y^*(t))+\langle \phi(t),{\cal B}(u(t),y^*(t))\rangle\cdot
\end{equation*}
By differentiating the function $u\mapsto H(u)=H(t,u(t),y^*(t),\phi(t))$, we have
\begin{equation*}
H'(u)(t) = \epsilon r u(t)+{\cal B}(. ,y^*(t))^*\phi(t),
\end{equation*}
where $({\cal B}(. ,y^*(t))^*:X\mapsto U$ is the adjoint of the operator ${\cal B}(. ,y^*(t))$.\\
If $\Vert u^*\Vert_{L^2(0,T;U)}<M$, then we conclude that
\begin{equation}\label{P2 H' 0}
u^*(t)=-\frac{1}{\epsilon r} {\cal B}(. , y^*(t))^*\phi(t)\cdot
\end{equation}
If $\Vert u^*\Vert_{L^2(0,T;U)}=M$, we can distinguish two cases, if $H'(u^*)=0$ then the control is given by (\ref{P2 H' 0}) and if $H'(u^*)\neq 0$, then we proceed as follows:\\
Let
$v_1(t)=\frac{1}{M}u^*(t)$ and $v_2(t)=-\frac{1}{\Vert H'(u^*)\Vert_{L^2(0,T;U)}}H'(u^*)(t)$. We will show that $v_1=v_2\cdot$\\
For all $u\in V$ we have
\begin{equation*}
\langle v_1,u \rangle_{L^2(0,T;U)} \leq \Vert v_1\Vert_{L^2(0,T;U)} \Vert u\Vert_{L^2(0,T;U)} \leq M\ \ \ \ and \ \ \ \langle v_1,u^*\rangle_{L^2(0,T;U)}=M\cdot
\end{equation*}
So we conclude that
\begin{equation*}
\forall u\in V, \ \ \ \ \langle v_1,u\rangle_{L^2(0,T;U)}\leq \langle v_1,u^*\rangle_{L^2(0,T;U)}\cdot
\end{equation*}
Moreover, the fact that $V$ is convex, implies
\begin{equation*}
\forall u\in V, \ \ \forall \lambda \in [0,1], \ \ u^* + \lambda(u-u^*) \in V\cdot
\end{equation*}
Then since $u^*$ is a solution of the problem $(P_\epsilon)$, we derive from
(\ref{P2 principe max})
\begin{equation}\label{P2 H' VN}
\begin{array}{ccc}
H(u^*)&\leq& H(u^*+\lambda (u-u^*)) \\
&\leq& H(u^*)+\langle H'(u^*),\lambda (u-u^*)\rangle_{L^2(0,T;U)} \\
&+ &\lambda \Vert u^*-u\Vert_{L^2(0,T;U)}\theta(\lambda \Vert u^*-u\Vert_{L^2(0,T;U)}), \ \forall \lambda\in [0,1], \ \forall u\in V
\end{array}
\end{equation}
where the function $\theta$ is such that
\begin{equation}\label{P2 H' teta}
\lim_{\lambda \rightarrow 0^+}\theta(\lambda \Vert u^*-u\Vert_{L^2(0,T;U)} )=0\cdot
\end{equation}
From (\ref{P2 H' VN}) and (\ref{P2 H' teta}) it comes
\begin{equation*}
\langle H'(u^*),u\rangle_{L^2(0,T;U)} \ge \langle H'(u^*), u^*\rangle_{L^2(0,T;U)}\cdot
\end{equation*}
So, we conclude that
\begin{equation*}
\forall u\in V_{ad}, \ \ \ \ \langle v_2, u\rangle_{L^2(0,T;U)}\leq \langle v_2,u^*\rangle_{L^2(0,T;U)}\cdot
\end{equation*}
Taking into account that $\sup_{u\in V}\langle v_2,u\rangle_{L^2(0,T;U)}=M$, we deduce that $\langle v_2,u^*\rangle_{L^2(0,T;U)}=M$ and that
\begin{equation*}
\langle \frac{1}{2}(v_1+v_2),u^*\rangle_{L^2(0,T;U)}=\frac{1}{2}\langle v_1,u^*\rangle_{L^2(0,T;U)} + \frac{1}{2}\langle v_2,u^*\rangle_{L^2(0,T;U)}=M,
\end{equation*}
then
\begin{equation*}
\Vert \frac{1}{2}(v_1+v_2)\Vert_{L^2(0,T;U)}\ge 1\cdot
\end{equation*}
It follows that
$$\Vert (v_1+v_2)\Vert_{L^2(0,T;U)}=\Vert v_1\Vert_{L^2(0,T;U)} + \Vert v_2\Vert_{L^2(0,T;U)} $$
and that $v_1=v_2$.\\
Furthermore, we have
\begin{equation}\label{P2 H' no}
\frac{1}{M}u^*(t)=-\frac{1}{\Vert H'(u^*)\Vert_{L^2(0,T;U)}}H'(u^*)(t)\cdot
\end{equation}
According to (\ref{P2 H' 0}) and (\ref{P2 H' no}) we have
\begin{equation*}
u^*(t)=\frac{-1}{\frac{\Vert H'(u^*)\Vert_{L^2(0,T;U)}}{M}+\epsilon r} {\cal B}(. , y^*(t))^*\phi(t),
\end{equation*}
where
\begin{equation*}
H'(u)(t) = \epsilon r u(t)+{\cal B}(. , y^*(t))^*\phi(t)\cdot
\end{equation*}
\textbf{ 2. The case $V=L^2(0,T;U)$}.\\\\
From the first part of the proof, there exists a solution $u^*$ of the problem $(P_\epsilon)$.\\
Let us consider the closed convex space $$V^*=\{ u\in L^2(0,T;U) \ / \ \Vert u\Vert_{L^2(0,T;U)} \leq \Vert u^*\Vert_{L^2(0,T;U)} +1\}\cdot$$
It is clear that $u^*(t)\in \mathring{V}^*$, then from the first case, we have $H'(u^*)=0$, which leads to
\begin{equation*}
u^*(t)=-\frac{1}{\epsilon r} {\cal B}(.,y^*(t))^*\phi(t),
\end{equation*}
where $\phi$ is the mild solution of the adjoint system (\ref{P2 adjoint pontr}).\\
This achieves the proof of Theorem \ref{thm3}.
\subsection{Sequential characterization of the solution of the problem $(P)$}
In the sequel, we take a decreasing sequence $(\epsilon_n)$ such that $\epsilon_n\rightarrow 0$ with corresponding sequence of controls $(u_n^*)$ solutions of problems $(P_{\epsilon_n})$.
\begin{thm}\label{thm4}
Assume that $V$ is bounded and let $y_d$ be an approximately attainable state by a control from $V$. Then the problem $(P)$ posses a solution. Moreover any weak limit value of $(u_n^*)$ in $L^2(0,T,U)$ is a solution of $(P)$.
\end{thm}
\textbf{Proof:}\\
Since $V$ is bounded, we deduce that the sequence $(u_n^*)$ is bounded, so it admits a weakly converging subsequence, denoted by $(u_n^*)$ as well. Let $u^*$ be a weak limit value of $(u_n^*)$ in $V$.\\
The remainder of the proof is divided into three steps\\
\textbf{Step 1: $y_d$ is exactly attainable $U_{ad}\ne \emptyset \cdot$}\\
Let us consider the following problem \\
\begin{equation}\label{P2 prob non vide}
\left\{
\begin{array}{ll}
min \ \ \Vert y_u(T)-y_d\Vert^2 \\
u\in V\\
\end{array}
\right.
\end{equation}
The set $\{\Vert y_u(T)-y_d\Vert^2 / u\in V\}\subset \mathbb{R}^+$ is not empty and bounded from below, so it admits a lower bound $J_d$.\\
Let $ (v_n )_{n\in\mathbb{N}}$ be a minimizing sequence such that $\Vert y_{v_n}(T)-y_d\Vert^2 \underset{n\rightarrow+\infty}{\longrightarrow} J_d$. \\
Since $V$ is bounded, we deduce that the sequence $(v_n)$ is bounded, so it admits a weakly converging subsequence to $v\in V$ still denoted by $(v_n)$.\\
By Lemma \ref{2}, we have for all $t\in [0,T]$
\begin{equation*}
\lim_{n\rightarrow +\infty}\Vert y_{v_n}(t)-y_v(t)\Vert = 0
\end{equation*}
then, we conclude that
\begin{equation}\label{P2 min bor}
\Vert y_v(T)-y_d\Vert^2=\lim_{n\rightarrow + \infty} \Vert y_{v_n}(T)-y_d\Vert^2 =J_d= \min_{u\in V}\Vert y_u(T)-y_d\Vert^2
\end{equation}
So the control $v$ is a solution of the problem (\ref{P2 prob non vide}).\\
Since the system (\ref{P2-sys prin1}) is approximately attainable, we have
\begin{equation}\label{P2 approxi def}
\forall \varepsilon>0 ,\ \ \exists v_{\varepsilon}\in V \ \ / \ \ \ \Vert y_{v_\epsilon}(T)-y_d\Vert\leq \varepsilon
\end{equation}
According to (\ref{P2 min bor}) and (\ref{P2 approxi def}), we get
\begin{equation*}
\forall \epsilon > 0, \ \ \exists v_{\varepsilon}\in V, \ \ \ \Vert y_v(T)-y_d\Vert\leq \Vert y_{v_\epsilon}(T)-y_d\Vert \leq \epsilon
\end{equation*} So we conclude that $\Vert y_v(T)-y_d\Vert =0$ and hence $v\in U_{ad}$. \\
\textbf{Step 2: $ \forall v\in U_{ad},\ \ J(u^*)\leq J(v)\cdot$}\\
Taking into account that $u_n^*$ is a solution of the problem $(P_{\epsilon_n})$ and $y_n^*$ is the corresponding solution of the system (\ref{P2-sys prin1}), we get for all $v\in U_{ad}$
\begin{equation*}
J_{\epsilon_n}(u_n^*)=\Vert y_n^*(T)-y_d\Vert^2 + \epsilon_nJ(u_n^*)\leq J_{\epsilon_n}(v)
\end{equation*}from which, it comes
\begin{equation*}
\begin{array}{r c l}
\epsilon_nJ(u_n^*) &\leq & J_{\epsilon_n}(v)-\Vert y_n^*(T)-y_d\Vert^2 \\\\
&\leq & \epsilon_nJ(v)
\end{array}
\end{equation*}So we find
\begin{equation}\label{P2 contro inf 2}
J(u_n^*)\leq J(v) \ for \ all \ \ v\in U_{ad}\cdot
\end{equation}
Let $y^*$ be the solution of system (\ref{P2-sys prin1}) corresponding to $u^*$.\\
Since $u_n\rightharpoonup u^*$ in $L^2(0,T;U)$, we have by Lemma \ref{2}
\begin{equation}\label{P2 yn tend y*}
\lim_{n\rightarrow +\infty}\Vert y_n^*(t)-y^*(t)\Vert=0, \ \forall t\in [0,T] \cdot
\end{equation}The norm $\Vert . \Vert$ is lower semi-continuous, it follows that for all $t\ge 0$ we have\\
\begin{equation*}
\Vert y^*(t)\Vert^2=\lim_{n\rightarrow+\infty}inf\Vert y^*_n(t)\Vert^2\cdot
\end{equation*}
Applying Fatou's lemma we get\\
\begin{equation}\label{P2 y inf Jco}
\int_0^T\Vert y^*(t)\Vert^2dt=\lim_{n\rightarrow +\infty}inf\int_0^T\Vert y^*_n(t)\Vert^2dt \cdot
\end{equation}
The function $R$ is lower semi-continuous and convex, it follows from \cite{Brezis} that
\begin{equation}\label{P2 u inf Jco}
R(u^*)\leq \lim_{n\rightarrow+\infty}\inf R(u_n^*)\cdot
\end{equation}By the inequalities (\ref{P2 y inf Jco} ) and (\ref{P2 u inf Jco}) we deduce that
\begin{equation}\label{P2 limi inf born 2}
J(u^*)\leq \lim_{n\rightarrow +\infty}inf(J(u_n))\cdot
\end{equation}Combining ( \ref{P2 contro inf 2}) and (\ref{P2 limi inf born 2}) we deduce that
\begin{equation*}
J(u^*)\leq J(v)\cdot
\end{equation*}\textbf{Step 3 : $u^*\in U_{ad}\cdot$}\\
According to the inequality (\ref{P2 contro inf 2}), we deduce that $J(u_n^*)$ is bounded and
\begin{equation*}
\lim_{n\rightarrow +\infty} \Vert y^*_n(T)-y_d\Vert^2= \lim_{n\rightarrow +\infty} J_{\epsilon_n}(u_n^*)\leq \lim_{n\rightarrow +\infty}J_{\epsilon_n}(v)=\Vert y_v(T)-y_d\Vert^2=0\cdot
\end{equation*}
Then, taking into account the formula (\ref{P2 yn tend y*}), we derive via the continuity of the norm that
\begin{equation*}
\lim_{n\rightarrow +\infty} \Vert y_n^*(T)-y_d\Vert = \Vert y^*(T)-y_d\Vert \leq \Vert y_v(T)-y_d\Vert = 0 \cdot
\end{equation*}
Consequently, $y^*(T)=y_d$ and the control $u^*$ is a solution of problem $(P)$.\\
\begin{thm} \label{thm5}
If $U_{ad}\ne \emptyset$ , then there exists a solution $u^*$ of the problem $(P)$. Furthermore, any weak limit value of the solution $(u_n^*)$ of $(P_{\epsilon _n})$ in $L^2(0,T;U)$ is a solution of $(P)$.
\end{thm}
\textbf{Proof:}\\
Let $v\in U_{ad}$ . Then keeping in mind that $u_n^*$ is the solution of the problem $(P_{\epsilon_n})$ corresponding to $\epsilon_n$, we can see that
\begin{equation*}
J_{\epsilon_n}(u_n^*)\leq J_{\epsilon_n}(v)= \epsilon_nJ(v)
\end{equation*}
It follows that
\begin{equation*}
\epsilon_nJ(u_n^*)=J_{\epsilon_n}(u_n^*)-\Vert y_n^*(T)-y_d\Vert^2\leq J_{\epsilon_n}(u_n^*)\leq \epsilon_nJ(v)
\end{equation*}
Using the definition of the cost $J$ given by (\ref{P2 cost obj}), the last equality gives
\begin{equation}\label{contro inf 2 }
r\int_0^T\Vert u_n^*(t)\Vert_U ^2dt\leq J(u_n^*)\leq J(v)\cdot
\end{equation}
We deduce that the sequence $(u_n^*)$ is bounded, so it admits a weakly converging subsequence in $V$, also denoted by $(u_n^*)$.
Let $u^*$ be a weak limit value of $(u_n^*)$ in $V$ and let $y^*$ be the solution of system (\ref{P2-sys prin1}) corresponding to $u^*$.\\
Since $u_n\rightharpoonup u^*$ in $L^2(0,T;U)$, we have by Lemma \ref{2}
\begin{equation*}\label{yn tend y*}
\lim_{n\rightarrow +\infty}\Vert y_n^*(t)-y^*(t)\Vert=0, \ \forall t\in [0,T] \cdot
\end{equation*}
Similarly to the proof of Theorem \ref{thm4} we can show that
\begin{equation*}
J(u^*)\leq J(v)\cdot
\end{equation*}
According to the inequality (\ref{contro inf 2 }), we deduce that $J(u_n^*)$ is bounded and
\begin{equation*}
\lim_{n\rightarrow +\infty}J_{\epsilon_n}(u_n^*)=\lim_{n\rightarrow +\infty} \Vert y_n^*(T)-y_d\Vert^2 \leq \Vert y_v(T)-y_d\Vert^2\cdot
\end{equation*}
Hence
\begin{equation*}
\lim_{n\rightarrow +\infty} \Vert y_n^*(T)-y_d\Vert = \Vert y^*(T)-y_d\Vert \leq \Vert y_v(T)-y_d\Vert = 0
\end{equation*}
We conclude that $u^*\in U_{ad}\cdot$\\
\subsection{Optimal feedback control }
In this part we will try to express the optimal control $u^*$ of the problem $(P)$ as a time-varying feedback law for the class of commutative bilinear systems with scalar control \cite{Alami,Wei}.\\
Assume that $U=\mathbb{R}$, then we can write the system (\ref{P2-sys prin1}) as follows
\begin{equation*}
\begin{cases}
\dot{y}(t)= Ay(t) + u(t)By(t) \\
y(0)=y_0 \in X \\
\end{cases}
\end{equation*}
where {$A : D(A) \subset X \mapsto X$ is the infinitesimal generator of a linear $C_0$- semi-group $S(t)$,} $B$ is a bounded linear operator and $u\in V:=L^2(0,T)$ .\\
\begin{thm} \label{thm6}
Assume that $A$ and $B$ commute with each other and that $ U_{ad}\ne \emptyset$. Let $v\in U_{ad} $ and let $y_0\in X$ be such that $S(T)y_0\not\in Ker(B)$. Then for any solution $u^*$ of the problem $(P)$, we have the following formula
\begin{equation*}
u^*(t)= \frac{1}{T}\int_0^Tv(s)ds+\frac{2}{Tr}\int_0^T\int_\alpha^T\langle y^*(s),By^*(s)\rangle ds\ d\alpha-\frac{2}{r}\int_t^T\langle y^*(s),By^*(s)\rangle ds
\end{equation*}
\end{thm}
\textbf{Proof:}\\
Let us consider the system (\ref{P2-sys prin1}) in the time horizon $[0,T]$, and let $A_k=kA(kI-A)^{-1}$ be the Yosida approximation of the operator $A$.
Let $y_k$ and $\phi_k$ be the respective solutions to (\ref{P2-sys prin1}) and (\ref{P2 adjoint pontr}) with $A_k$ instead of $A$. For $u\in L^2(0,T)$,
since $A_k$ is bounded, we have $ y_k, \phi_k \in
H^1(0, T)$ and
\begin{equation*}
\begin{array}{r c l}
\langle\dot{\phi}_k(t),By_k(t)\rangle+\langle \phi_k(t),B\dot{y}_k(t)\rangle &=&\langle -A_k^*\phi_k(t)-u(t)B^*\phi_k(t)-2\epsilon
y_k(t),By_k(t)\rangle \\\\ &+&\langle B^*\phi_k(t), A_ky_k(t)+u(t)By_k(t)\rangle\\\\
&=&\langle\phi_k(t),BA_ky_k(t)-A_kBy_k(t)\rangle-2\epsilon\langle y_k(t),By_k(t)\rangle\cdot\\
\end{array}
\end{equation*}
Thus
\begin{equation}\label{P2 differ phi commut}
\langle\dot{\phi}_k(t),By_k(t)\rangle+\langle \phi_k(t),B\dot{y}_k(t)\rangle=\langle\phi_k(t),[B,A_k]y_k(t)\rangle-2\epsilon\langle y_k(t),By_k(t)\rangle
\end{equation}
where $[B,A_k]:=B A_k- A_k B$.\\
Integrating (\ref{P2 differ phi commut}) over $[t, T],$ we get
\begin{equation*}\label{P1 phi By}
\langle\phi_k(t),By_k(t)\rangle=2\langle y_k(T)-y_d,By_k(T)\rangle -\int_t^T\bigg (\langle\phi_k(s),[B,A_k]y_k(s)\rangle-2\epsilon\langle y_k(s),By_k(s)\rangle\bigg ) ds
\end{equation*}
Since $\phi_k\longrightarrow \phi$ and $y_k\longrightarrow y$ strongly, we obtain by letting $k\rightarrow +\infty$
\begin{equation*}\label{P1 phi By lim}
\langle\phi(t),By(t)\rangle=2\langle y(T)-y_d,By(T)\rangle+2\epsilon\int_t^T\langle y(s),By(s)\rangle ds\cdot
\end{equation*}So, by Theorem \ref{thm3}, we conclude that the solution of the problem $(P_{\epsilon_n})$ corresponding to $\epsilon_n$, is given by
\begin{equation}\label{P2 u_n feedback}
u_n^*(t)=-\frac{1}{\epsilon_nr}\langle \phi_n(t),By_n^*(t)\rangle= -\frac{2}{\epsilon_nr}\langle y_n^*(T)-y_d,By_n^*(T)\rangle-\frac{2}{r}\int_t^T\langle y_n^*(s),By_n^*(s)\rangle ds\cdot
\end{equation}
Let $v\in U_{ad}$. By Theorem \ref{thm5}, any limit value $u^*$ of $u_n^*$ in $L^2(0,T)$ is a solution of the problem $(P)$.\\
Since $A$ and $B$ commute, we have the following formulas
\begin{equation*}
y_v(t)=S(t)\exp(B\int_0^tv(s)ds)y_0
\end{equation*}
and
\begin{equation*}
y^*(t)=S(t)\exp(B\int_0^tu^*(s)ds)y_0\cdot
\end{equation*}
Using the fact that $v,u^*\in U_{ad}$ and $\lim_{n\rightarrow +\infty } y_n^*(T)=y_d$ , we obtain
\begin{equation*}
\lim_{n\rightarrow + \infty} y_n^*(T) = y_u^*(T)=y_v(T)=y_d\cdot
\end{equation*}
Hence
\begin{equation*}
\lim_{n\rightarrow +\infty}S(T)\exp(B\int_0^Tu_n^*(t)dt)y_0=S(T)\exp(B\int_0^Tv(t)dt)y_0=S(T)\exp(B\int_0^Tu^*(t)dt)y_0\cdot
\end{equation*}
From the assumption $S(T)y_0\not\in Ker (B)$, we deduce from the last inequalities that
\begin{equation*}
\lim_{n\rightarrow +\infty}\int_0^Tu_n^*(t)dt=\int_0^Tv(t)dt=\int_0^Tu^*(t)dt\cdot
\end{equation*}
Moreover, we deduce from the formula (\ref{P2 u_n feedback}), that
\begin{equation*}
\begin{aligned}
\lim_{n\rightarrow +\infty}\ \ \int_0^Tu_n^*(t)dt &= \lim_{n\rightarrow +\infty} \int_0^T\bigg ( -\frac{2}{\epsilon_nr}\langle y_n^*(T)-y_d,By_n^*(T)\rangle-\frac{2}{r}\int_t^T\langle y_n^*(s),By_n^*(s)\rangle ds\bigg ) dt \\\\
&= \lim_{n\rightarrow +\infty} -\frac{2T}{\epsilon_nr}\langle y_n^*(T)-y_d,By_n^*(T)\rangle-\frac{2}{r}\int_0^T\int_t^T\langle y^*(s),By^*(s)\rangle ds dt
\end{aligned}
\end{equation*}
from which, we derive
\begin{equation}\label{P2 terme feedb}
\lim_{n\rightarrow +\infty} -\frac{2T}{\epsilon_nr}\langle y_n^*(T)-y_d,By_n^*(T)\rangle=\int_0^Tv(t)dt+ \frac{2}{r}\int_0^T\int_t^T\langle y^*(s),By^*(s)\rangle ds dt\cdot
\end{equation}
By (\ref{P2 u_n feedback}) and (\ref{P2 terme feedb}) we deduce that $u_n^*(t)\rightarrow u^*(t)$ for all $t\in [0,T]$ and
\begin{equation*}
\begin{aligned}
\lim_{n\rightarrow +\infty}u_n^*(t)&=\lim_{n\rightarrow +\infty} -\frac{2}{\epsilon_nr}\langle y_n^*(T)-y_d,By_n^*(T)\rangle-\frac{2}{r}\int_t^T\langle y_n^*(s),By_n^*(s)\rangle ds\\\\
&= \frac{1}{T}\int_0^Tv(s)ds+\frac{2}{Tr}\int_0^T\int_\alpha^T\langle y^*(s),By^*(s)\rangle ds d\alpha-\frac{2}{r}\int_t^T\langle y^*(s),By^*(s)\rangle ds\\\\ &= u^*(t)\cdot
\end{aligned}
\end{equation*}
We conclude that
\begin{equation*}
u^*(t)= \frac{1}{T}\int_0^Tv(s)ds+\frac{2}{Tr}\int_0^T\int_\alpha^T\langle y^*(s),By^*(s)\rangle ds d\alpha-\frac{2}{r}\int_t^T\langle y^*(s),By^*(s)\rangle ds\cdot
\end{equation*}
\begin{rem} In the case where $S(t_1)$ is one to one for some $t_1>0$ and $y_0\not\in Ker(B), $ the assumption $S(T)y_0\not\in Ker(B)$ in Theorem 6 is satisfied.
\end{rem}
\section{Examples}
\subsection{Wave equation}
Let us consider the following wave equation
\begin{equation*}
\left\{
\begin{array}{lllll}
\frac{\partial^2}{\partial t^2}z(t,x) &=& \Delta z(t,x) +u (t,x)z(t,x), & \ \ t\in[0,T] \ and \ \ & x\in \Omega=(0,1)\\
z(t,0)&=&z(t,1)=0, & t\in [0,T] &\\
z(0,x)&=&z_0(x),\ & & x\in \Omega
\end{array}
\right.
\end{equation*}
where
\begin{itemize}
\item $u\in L^2(0,T,L^2(\Omega))$,
\item $T>4\max_{x\in \Omega} \vert x-x_0\vert$ for some $ x_0\in \mathbb{R}\setminus [0,1]$,
\item the desired state $z_d \in H_0^1(\Omega)\cap H^2(\Omega)$ is such that $\frac{\Delta z_d}{z_d}\mathbb{1}_{(z_d\ne0)}\in L^\infty(\Omega)$, where $\mathbb{1}_{(z_d\ne0)} $ indicates the characteristic function of the set $(z_d\ne0):=\{ x\in \Omega \ /\ \ z_d(x)\ne 0\}.$
\end{itemize}
This system has the form of the system (\ref{P2-sys prin1}) if we take $y(t)=(z(t), \dot{z}(t))$, $X=H_0^1(\Omega)\times L^2(\Omega)$ with
$\langle (y_1,z_1),(y_2,z_2)\rangle_X=\langle y_1,y_2\rangle_{H_0^1(\Omega)} + \langle z_1,z_2\rangle_{L^2(\Omega)}$
and
\begin{equation*}
A=\begin{pmatrix}
0 & I\\
\Delta & 0
\end{pmatrix} \ \mbox{with} \ D(A)= H_0^1(\Omega)\cap H^2(\Omega)\times H_0^1(\Omega) \ \mbox{and}
\ \ B=\begin{pmatrix}
0 & 0\\
I & 0
\end{pmatrix}\cdot
\end{equation*}
Here $B$ is a compact linear bounded operator on $X$ and $A$ is the infinitesimal generator of a linear $C_0$- semi-group $S(t)$ of isometries (see \cite{ball 79}, p.176).\\
The quadratic cost function is given by
\begin{equation*}\label{100}
J(u)= \int_0^T (\Vert z(t)\Vert_{H_0^1(\Omega)}^2+\Vert \dot{z}(t)\Vert_{L^2(\Omega)}^2) dt + \frac{r}{2}\int_0^T\Vert u(t)\Vert_{L^2(\Omega)}^2 dt,
\end{equation*}
where $u(t):=u(t,\cdot)$ and $z(t):=z(t,\cdot).$ \\
According to \cite{ouzahra2019wave}, there exists a control $v\in L^2(0,T;L^2(\Omega)$ such that the corresponding solution $z_v$ of the system (\ref{P2-sys prin1}) verifies $z_v(T)=z_d$.
Then, according to Theorem \ref{thm5} there exists a control $u^*\in L^2(0,T,\mathbb{R})$, which guarantees the exact attainability of $z_d$ at time $T$, and is a solution of the problem $(P)$ with $U_{ad}=\{u\in L^2(0,T,L^2(\Omega))\ / z(T)=z_d\}$.\\
\begin{rem} The optimal control of the bilinear wave equation has been considered in \cite{linag,Boukhari 2017} in the context of unconstrained endpoint.
\end{rem}
\subsection{Heat equation}
In this part we study the optimal exact attainability for the reaction-diffusion equation.\\
Let us consider the following system
\begin{equation}\label{P2 chal}
\left\{
\begin{array}{ll}
\frac{\partial}{\partial t}y(t,x)=\Delta y(t,x) +u(t,x)y(t,x), \ \ \ \ & in \ Q=\Omega \times(0,T), \ T>0\\
y(t,0)=y(t,1)=0, \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ & on \ (0,T)\\
y(0)=y_0 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ & in \ \ \Omega
\end{array}
\right.
\end{equation}
where $\Omega=(0,1) $ and $u\in L^2(0,T,U)$ is a control function.\\
\textbf{Case 1: Distributed control $(U=L^2(\Omega))$}\\
Assume that $y_0, y_d \in L^2(\Omega)$ are such that
\begin{itemize}
\item for a.e. $x\in \Omega$ , $y_dy_0 \ge 0$, \\
\item for a.e. $x\in \Omega , \ y_0(x) = 0 \iff y_d(x) = 0,$\\
\item $a :=ln(\frac{y_d}{y_0}) {\bf 1}_{(y_0\ne0)} \in L^{\infty }(\Omega),$ where $\mathbb{1}_{(y_0\ne0)} $ indicates the characteristic function of the set $(y_0\ne0):=\{ x\in \Omega \ /\ \ y_0(x)\ne 0\}.$
\item $ \frac{\Delta y_d}{y_d} 1_{{(y_d\ne0)}} \in L^\infty(\Omega), $\\
\item $\vert y_d\vert >0$ a.e. on some nonempty open subset $O$ of $\Omega$.
\end{itemize}
According to Theorem 2 in \cite{contr 2016}, there is a time $T$ for which $y_d$ is exactly attainable for the system (\ref{P2 chal}) using a control $v\in L^2(0,T, L^2(\Omega))$, so $U_{ad}\ne \emptyset $.
Then, according to Theorem \ref{thm5}, there exists a control $u^*$ which guarantees the exact attainability of $y_d$ at time $T$, and is solution of the following problem
\begin{equation}\label{P2 Exemple}
\left\{
\begin{array}{ll}
min J(u) \\
u\in U_{ad}=\{u\in L^2(0,T,L^2(\Omega))\ \ \ / \ \ y_u(T)=y_d\} \\
\end{array}
\right.
\end{equation}
More precisely any weak limit of $u_n^*$ given by Theorem \ref{thm3} corresponding to sequence $(\epsilon_n)$ gives a optimal control $u^*$ for (\ref{P2 Exemple}).\\
\textbf{Case 2: Scalar control $(U=\mathbb{R})$}\\
Here, we have $u(t,x)=u(t)\in \mathbb{R}$. \\
Assume that $y_0, y_d \in L^2(\Omega)$ are such that $y_d=\lambda y_0$ with $\lambda > 1$ and $y_0>0$, a.e in $\Omega$.
According to Theorem II 4 and Remark 4 in \cite{contr 2020}, there is a time $T$ for which $y_d$ is exactly attainable for the system (\ref{P2 chal}) using the control $v(t)=\frac{\lambda-1}{T+(\lambda-1)t}\in L^2(0,T, \mathbb{R})$, so $U_{ad}\ne \emptyset$. \\
By Theorem \ref{thm6}, there exists a feedback control $u^*\in L^2(0,T,\mathbb{R})$ which guarantees the exact attainability of $y_d$ at time $T$, and is solution of the problem $(P)$ with $U_{ad}=\{u\in L^2(0,T,\mathbb{R})\ \ \ / \ \ y^*(T)=y_d\}$, and satisfies the following formula
\begin{equation*}
u^*(t)=\frac{1}{T}\ln(\lambda)+\frac{2}{Tr}\int_0^T\int_\alpha^T\Vert y^*(s)\Vert^2 ds\ d\alpha-\frac{2}{r}\int_t^T\Vert y^*(s)\Vert^2 ds\cdot
\end{equation*}
\subsection{Transport equation}
Let us consider the following transport problem
\begin{equation}\label{P1 trans}
\left\{
\begin{array}{ll}
\frac{\partial}{\partial t}y(t,x)= -\frac{\partial }{\partial x}y(t,x) +u (t)y(t,x), & t\in (0,T),\; x\in \Omega=(0,+\infty)\\
y(t,0)=0, & t\in (0,T) \\
y(0,x)=y_0(x), & x\in \Omega
\end{array}
\right.
\end{equation}
where $u\in L^2(0,T)$. Here the operator $A=-\frac{\partial}{\partial x}$ with the domain $D(A)=H_0^1(\Omega)$ generates a $C_0-$semi-group of isometries $S(t)$ in $X=L^2(\Omega)$.
Below, we will develop numerical simulation for the example (\ref{P1 trans}). For this end, we take $r=2$ , $T=9$, $y_0=x\exp(-x)$ and
$$
y_d(x)=
\begin{cases}
0, \ if \ x\leq 9\\
(x-9)\exp(9-x),\ if \ x\ge 9
\end{cases}
$$
then the control $v=0\in U_{ad}=\{u\in L^2(0,T)\ \ / \ \ y^*(T)=y_d\}\cdot$
By Theorem \ref{thm6}, there exists a feedback control $u^*\in L^2(0,T)$ which guarantees the exact attainability of $y_d$ at time $T$. Moreover $u^*$ is the solution of the problem $(P)$ and satisfies the following formula
\begin{equation}\label{P2 transport 1}
u^*(t)=\frac{1}{T}\int_0^T\int_\alpha^T\Vert y^*(s)\Vert^2 ds\ d\alpha-\int_t^T\Vert y^*(s)\Vert^2 ds\cdot
\end{equation}
In the Figure 1, we compare numerically the two controls $u^*$ and $v=0$ in term of the state at the finite time $T=9$. Moreover, we find $J(u^*)=1.2442$ and $J(v)=2.25\approx 2J(u^*)$.\\
\begin{figure}
\caption{The state $y_{u^*}
\end{figure}
We observe that the desired state is exactly attainable either by using the optimal control $u^*$ or the control $v=0$.
However, the control $ u^* $ leads to a lower cost than the zero control.\\
\begin{rem}Unlike the case of linear systems, the uniqueness of the optimal control of the quadratic cost (\ref{P2 cost obj}) is not guaranteed in general when dealing with bilinear systems, which is due to the lack of convexity of the state w.r.t control. For instance, if we assume that $r=0$ and that $A=B $ is a skew-adjoint matrix, we can see that the cost function is constant so we have an infinity of optimal controls. However,
in the case of the quadratic cost function $J(u)=\int_0^T u^2(t)dt$, the uniqueness of the optimal control is assured by the strict convexity of the cost $J$ (see \cite{Wei}). Moreover, in the case of a cost function $J$ of the form (\ref{P2 cost obj}), one can prove the uniqueness of the optimal bilinear control under some constraint relaying $T$ and $y_0$ \cite{Bradly,Boukhari R,Boukhari 2017}.
\end{rem}
\section{Conclusion}
In this work, we studied the question of quadratic optimal control with endpoint constraint for bilinear systems. The optimal control is characterized via a set of unconstrained minimization problems, then it is expressed as a time varying feedback for commutative bilinear systems. The obtained results are applied to parabolic and hyperbolic PDE. As an interesting continuation of the present work, one can consider the same questions for unbounded control operators, such as the case of Fokker Planck equation \cite{fokker}.\\
{\bf Conflict of interest statement.}\\
On behalf of all authors, the corresponding author states that there is no conflict of interest.
\end{document} |
\begin{document}
\title[Whittaker-Hill] {Instability intervals of the Whittaker-Hill operator}
\author{Xu-Dan Luo}
\address{Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing 100190, China.}
\email{[email protected]}
\subjclass[2010]{Primary : 34L15; 41A60; 47E05.
}
\keywords{The Whittaker-Hill operator; Instability intervals; Asymptotics}
\begin{abstract}
The Hill operator admits a band gap structure. As a special case, like the Mathieu operator, one has only open gaps,
however, the instability intervals of the Whittaker-Hill operator may be open or closed. In 2007, P. Djakov and B. Mityagin gave the asymptotics of band gaps for a special Whittaker-Hill operator [P. Djakov and B. Mityagin, J. Funct. Anal., 242, 157-194 (2007).]. In this paper, a more general Whittaker-Hill operator is considered and the asymptotics of the instability intervals are studied.
\end{abstract}
\maketitle
\section{Introduction and main results}
The Floquet (Bloch) theory indicates that the spectrum of the Schr\"{o}dinger operator
\begin{equation}
\label{E:Schrodinger operator}
Lf:=-f''(x)+\nu(x)f(x), \ \ \ x\in\mathbb{R}
\end{equation}
with a smooth real-valued periodic potential $\nu(x)$ has a band gap structure. If we further assume that $\nu(x)$ is of periodic $\pi$ and set
\begin{equation*}
\nu(x)=-\sum_{n=1}^{\infty}\theta_{n}\cos(2nx)-\sum_{m=1}^{\infty}\phi_{m}\sin (2mx),
\end{equation*}
where $\theta_{n}$ and $\phi_{m}$ are real, then (\ref{E:Schrodinger operator}) can be written as:
\begin{equation}
Lf:=-f''(x)-\left[\sum_{n=1}^{\infty}\theta_{n}\cos(2nx)+\sum_{m=1}^{\infty}\phi_{m}\sin (2mx)\right]f(x).
\end{equation}
Moreover, there are \cite{Eastham} two monotonically increasing infinite sequence of real numbers
\begin{equation*}
\lambda_{0}^{+}, \ \lambda_{1}^{+}, \ \lambda_{2}^{+}, \cdots
\end{equation*}
and
\begin{equation*}
\lambda_{1}^{-}, \ \lambda_{2}^{-}, \ \lambda_{3}^{-}, \cdots
\end{equation*}
such that the Hill equation
\begin{equation}
Lf=\lambda f
\end{equation}
has a solution of period $\pi$ if and only if $\lambda=\lambda_{n}^{+}$, $n=0, 1, 2, \cdots$, and a solution of semi-period $\pi$ (i.e., $f(x+\pi)=-f(x)$) if and only if $\lambda=\lambda_{n}^{-}$, $n=1, 2, 3, \cdots$. The $\lambda_{n}^{+}$ and $\lambda_{n}^{-}$ satisfy the inequalities
\begin{equation*}
\lambda_{0}^{+}<\lambda_{1}^{-}\leq \lambda_{2}^{-}\leq \lambda_{1}^{+}\leq \lambda_{2}^{+}<\lambda_{3}^{-}\leq \lambda_{4}^{-}<\lambda_{3}^{+}\leq \lambda_{4}^{+}<\cdots
\end{equation*}
and the relations
\begin{equation*}
\lim_{n\rightarrow\infty}\lambda_{n}^{+}=\infty, \ \ \ \lim_{n\rightarrow\infty}\lambda_{n}^{-}=\infty.
\end{equation*}
Besides, $\gamma_{n}:=(\lambda_{n+1}^{-}-\lambda_{n}^{-})$ for odd $n$ and $\gamma_{n}:=(\lambda_{n}^{+}-\lambda_{n-1}^{+})$ for even $n$ are referred to as band gaps or instability intervals,
where $n\geq 1$.
It is well-known that there is an extensive theory for the Mathieu operator, where the potential $\nu(x)$ is a single trigonometric function, i.e.,
\begin{equation}
\nu(x)=-B \cos 2x.
\end{equation}
Ince \cite{Ince 3} proved that all instability intervals of the Mathieu operator are open, i.e., no closed gaps for the Mathieu operator. In 1963, Levy and Keller \cite{Levy} gave the asymptotics of $\gamma_{n}=\gamma_{n}(B)$, i.e., for fixed $n$ and real nonzero number $B$, when $B \to 0$,
\begin{equation}
\gamma_{n}=\frac{8}{[(n-1)!]^{2}}\cdot \left(\frac{B}{8}\right)^{n} (1+O(B)).
\end{equation}
18 years later, Harrell \cite{Harrell} gave the asymptotics of the band gaps of the Mathieu operator for fixed $B$ and $n\rightarrow\infty$, i.e.,
\begin{equation}
\gamma_{n}=\lambda_{n}^{+}-\lambda_{n}^{-}=\frac{8}{[(n-1)!]^{2}}\cdot \left(\frac{|B|}{8}\right)^{n}\left(1+O\left(\frac{1}{n^{2}}\right)\right).
\end{equation}
Compared with the Mathieu potential, the band gaps for the Whittaker-Hill potential
\begin{equation}
\label{E:Whittaker-Hill potential}
\nu(x)=-(B\cos 2x+C\cos 4x)
\end{equation}
may be open or closed.
Specifically, if $B=4\alpha t$ and $C=2\alpha^{2}$, for any real $\alpha$ and natural number $t$, it is already known that for odd $t=2m+1$, all the even gaps are closed except the first $m$, but no odd gap disappears; similarly, for even $t=2m$, except for the first $m$, all the odd gaps are closed, but even gaps remain open (see Theorem 11, \cite{Djakov 1} and Theorem 7.9, \cite{Maguns}).
In 2007, P. Djakov and B. Mityagin (see \cite{Djakov 2}) gave the asymptotics of the instability intervals for the above special Whittaker-Hill potential, namely, for real $B, C\neq 0$, $B=4 \alpha t$ and $C=2 \alpha^{2}$, they have the following results, where either both $\alpha$ and $t$ are real numbers if $C>0$ or both $\alpha$ and $t$ are pure imaginary numbers if $C<0$.
\begin{theorem}[\cite{Djakov 2}]
\label{T:Djakov 1}
Let $\gamma_{n}$ be the $n-$th band gap of the Whittaker-Hill operator
\begin{equation}
Lf=-f''-[4\alpha t \cos 2x+ 2\alpha^{2} \cos 4x]f,
\end{equation}
where either both $\alpha$ and $t$ are real, or both are pure imaginary numbers. If $t$ is fixed and $\alpha\rightarrow 0$, then for even $n$
\begin{equation}
\gamma_{n}=\left|\frac{8\alpha^{n}}{2^{n}[(n-1)!]^{2}}\prod_{k=1}^{n/2}(t^{2}-(2k-1)^{2})\right|(1+O(\alpha)),
\end{equation}
and for odd $n$
\begin{equation}
\gamma_{n}=\left|\frac{8\alpha^{n}t}{2^{n}[(n-1)!]^{2}}\prod_{k=1}^{(n-1)/2}(t^{2}-(2k)^{2})\right|(1+O(\alpha)).
\end{equation}
\end{theorem}
\begin{theorem}[\cite{Djakov 2}]
\label{T:Djakov 2}
Let $\gamma_{n}$ be the $n-$th band gap of the Whittaker-Hill operator
\begin{equation}
Lf=-f''-[4\alpha t \cos 2x+ 2\alpha^{2} \cos 4x]f,
\end{equation}
where either both $\alpha$ and $t\neq 0$ are real, or both are pure imaginary numbers. Then the following asymptotic formulae hold for fixed $\alpha$, $t$ and $n\rightarrow\infty$:
for even $n$
\begin{equation}
\gamma_{n}=\frac{8|\alpha|^{n}}{2^{n}[(n-2)!!]^{2}}\left|\cos\left(\frac{\pi}{2}t\right)\right|\left[1+O\left(\frac{\log n}{n}\right)\right],
\end{equation}
and for odd $n$
\begin{equation}
\gamma_{n}=\frac{8|\alpha|^{n}}{2^{n}[(n-2)!!]^{2}}\frac{2}{\pi}\left|\sin\left(\frac{\pi}{2}t\right)\right|\left[1+O\left(\frac{\log n}{n}\right)\right],
\end{equation}
where $(2m-1)!!=1\cdot3\cdots(2m-1)$, \ \ \ $(2m)!!=2\cdot4\cdots(2m)$.
\end{theorem}
In this paper, a more general Whittaker-Hill operator
\begin{equation}
L=-D^{2}+(bq^{m_{1}}\cos 2x+cq^{m_{2}}\cos 4x)
\end{equation}
is considered and the asymptotics of the instability intervals are derived, where $b$, $c$, $q$, $m_{1}$ and $m_{2}$ are real. Our theorems are stated as follows, in particular, we can deduce P. Djakov and B. Mityagin's results by choosing $m_{1}=1$, $m_{2}=2$, $b=-4\alpha t$ and $c=-2\alpha^{2}$.
\begin{theorem}
\label{T:1}
Let the Whittaker-Hill operator be
\begin{equation}
Ly=-y''+(bq^{m_{1}}\cos 2x+cq^{m_{2}}\cos 4x)y,
\end{equation}
where $b$, $c$ and $q$ are real. If $q\rightarrow 0$ and $m_{1}$, $m_{2}$ are positive real parameters, then one of the following results holds:
\begin{enumerate}
\item
when $m_{1}> \frac{m_{2}}{2}$,
\\
(i)
\begin{equation}
\gamma_{2m}=\left|\frac{32\cdot(\frac{c}{2})^{m}\cdot q^{m_{2}m}}{2^{4m}[(m-1)!]^{2}}\right|+O\Big(q^{m_{2}(m-\frac{1}{2})+m_{1}}\Big),
\end{equation}
\\
(ii)
\begin{equation}
\gamma_{1}= |bq^{m_{1}}|+O(q^{2m_{1}-\frac{m_{2}}{2}}), \ \ \ \gamma_{3}=\frac{|bcq^{m_{1}+m_{2}}|}{8}+O(q^{2m_{1}+\frac{m_{2}}{2}}),
\end{equation}
\begin{equation}
\begin{split}
&\gamma_{2m-1}= \Big|\left(\frac{c}{2}\right)^{m-1}\cdot b\cdot q^{m_{2}(m-1)+m_{1}}\cdot \frac{8}{2^{3m}}\cdot
\Big\{ \frac{1}{[(2m-3)!!]^{2}}\\
&\cdot \sum_{i=1}^{m-2} \frac{(2m-2i-3)!!\cdot (2i-1)!!}{i!\cdot (m-1-i)!} + \frac{2}{(2m-3)!!\cdot(m-1)!} \Big\}\Big|+O\Big(q^{m_{2}(m-\frac{3}{2})+2m_{1}}\Big)
\ \ \ \text{for} \ \ \ m\geq 3;
\end{split}
\end{equation}
\item
when $m_{1}< \frac{m_{2}}{2}$,
\begin{equation}
\gamma_{1}=\left|bq^{m_{1}}\right|+O(q^{m_{2}-m_{1}}), \ \ \ \gamma_{2}=\left|cq^{m_{2}}+\frac{b^{2}q^{2m_{1}}}{8}\right|+O(q^{m_{2}}),
\end{equation}
\begin{equation}
\gamma_{n}=\left|\frac{8\cdot b^{n}\cdot q^{m_{1}n}}{2^{3n}\cdot [(n-1)!]^{2}}\right|+O(q^{m_{1}n+m_{2}-2m_{1}})
\ \ \ \ \ \ \ \text{for} \ \ \ n\geq 3;
\end{equation}
\item
when $m_{1}= \frac{m_{2}}{2}$ and $c<0$,
\\
(i)
\begin{equation}
\gamma_{1}=\left|bq^{m_{1}}\right|+O(q^{3m_{1}}), \ \ \ \gamma_{2}=\left|cq^{m_{2}}+\frac{b^{2}q^{2m_{1}}}{8}\right|+O(q^{4m_{1}}),
\end{equation}
\\
(ii)
\begin{equation}
\gamma_{2m}=8\left|\frac{\prod_{k=1}^{m}\Big(\Big(\frac{b}{2}\Big)^{2}+8c\Big(k-\frac{1}{2}\Big)^{2}\Big)\cdot q^{2m_{1}\cdot m}}{2^{4m}\cdot[(2m-1)!]^{2}}\right|+O(q^{2m_{1}(m+1)}) \ \ \ \ \ \ \ \text{for} \ \ \ m\geq 2,
\end{equation}
\\
(iii)
\begin{equation}
\gamma_{2m-1}=32\left|\frac{\frac{b}{2}\prod_{k=1}^{m-1}\Big(\Big(\frac{b}{2}\Big)^{2}+8ck^{2}\Big)\cdot q^{m_{1}\cdot (2m-1)}}{2^{4m}\cdot[(2m-2)!]^{2}}\right|+O(q^{m_{1}(2m+1)}) \ \ \ \ \ \ \ \text{for} \ \ \ m\geq 2.
\end{equation}
\end{enumerate}
Here, $m$ is a positive integer and $\gamma_{n}$ is the n-th instability interval.
\end{theorem}
\begin{theorem}
\label{T:2}
Let the Whittaker-Hill operator be
\begin{equation}
Ly=-y''+(bq^{m_{1}}\cos 2x+cq^{2m_{1}}\cos 4x)y,
\end{equation}
where $b$, $q$ are real, $c<0$ and $m_{1}>0$. Then the following asymptotic formula holds for fixed $b$, $c$, $q$ and $n\rightarrow\infty$.
\begin{equation}
\gamma_{2m}=\frac{q^{2m_{1}\cdot m}\cdot |c|^{m}}{2^{3m-3}\cdot[(2m-2)!!]^{2}}\cdot \left|\cos\left(\frac{b\pi}{4\sqrt{-2c}}\right)\right|\cdot\left[1+O\left(\frac{\log m}{m}\right)\right],
\end{equation}
\begin{equation}
\gamma_{2m-1}=\frac{q^{m_{1}(2m-1)}\cdot |c|^{m-1}\cdot\sqrt{-2c}}{2^{3m-5}\cdot[(2m-3)!!]^{2}\cdot \pi}
\cdot \left|\sin\left(\frac{b\pi}{4\sqrt{-2c}}\right)\right|\cdot\left[1+O\left(\frac{\log m}{m}\right)\right].
\end{equation}
Here, $m$ is a positive integer and $\gamma_{n}$ is the n-th instability interval.
\end{theorem}
\section{Some lemmas}
\begin{lemma}[\cite{Djakov 2}]
\label{L:1}
Let the Schr\"{o}dinger operator
\begin{equation}
Ly=-y''+v(x)y
\end{equation}
be defined on $\mathbb{R}$, with a real-valued periodic $L^{2}([0, \pi])$-potential $v(x)$, where $v(x)=\sum_{m\in \mathbb{Z}} V(m)\exp(imx)$, $V(m)=0$ for odd $m$, then $\|v\|^{2}=\sum|V(m)|^{2}$.
(a) If $\|v\|<\frac{1}{9}$, then for each $n=1,2,\cdots$, there exists $z=z_{n}$ such that
~\\$|z|\leq 4\|v\|$, and
\begin{equation}
\label{E:length estimation}
2|\beta_{n}(z)|(1-3\|v\|^{2}/n^{2})\leq \gamma_{n}\leq 2|\beta_{n}(z)|(1+3\|v\|^{2}/n^{2}),
\end{equation}
where
\begin{equation}
\beta_{n}(z)=V(2n)+\sum_{k=1}^{\infty}\sum_{j_{1},\cdots,j_{k}\neq \pm n}\frac{V(n-j_{1})V(j_{1}-j_{2})\cdots V(j_{k-1}-j_{k})V(j_{k}+n)}
{(n^{2}-j_{1}^{2}+z)\cdots(n^{2}-j_{k}^{2}+z)}
\end{equation}
converges absolutely and uniformly for $|z|\leq 1$, and $\gamma_{n}$ is the n-th instability interval.
(b) If $V(0)=\frac{1}{\pi}\int_{0}^{\pi}v(x)dx=0$, then there is $N_{0}=N_{0}(v)$ such that (\ref{E:length estimation}) holds for $n\geq N_{0}$ with $z=z_{n}$, $|z_{n}|<1$.
\end{lemma}
\begin{lemma}[\cite{Volkmer}]
\label{L:Volkmer}
The Ince equation
\begin{equation}
\label{E:general Ince}
(1+a\cos 2t)y''(t)+b(\sin 2t)y'(t)+(c+d\cos 2t)y(t)=0
\end{equation}
can be transformed into the Whittaker-Hill equation by assuming
\begin{equation}
a=0,\ \ b=-4q,\ \ c=\lambda+2q^{2}, \ \ d=4(m-1)q.
\end{equation}
Moreover,
\begin{equation}
\label{E:semifinite band gap 1}
\mathrm{sign} (\alpha_{2n}-\beta_{2n})=\mathrm{sign} \ q^{2}\cdot \mathrm{sign} \prod_{p=-n}^{n-1}(2p-m+1)
\end{equation}
and
\begin{equation}
\label{E:semifinite band gap 2}
\mathrm{sign} (\alpha_{2n+1}-\beta_{2n+1})=\mathrm{sign} \ q \cdot \mathrm{sign} \prod_{p=-n}^{n-1}(2p-m),
\end{equation}
where $a$, $b$ and $d$ are real; $\alpha_{2n}$ and $\beta_{2n+2}$ are defined by the eigenvalues corresponding to non-trivial even and odd solutions with period $\pi$, respectively; and $\alpha_{2n+1}$ and $\beta_{2n+1}$ are defined by the eigenvalues corresponding to non-trivial even and odd solutions with semi-period $\pi$, respectively.
\end{lemma}
\begin{lemma}[\cite{Maguns}]
\label{L:Maguns}
The Whittaker-Hill equation
\begin{equation}
f''+[\lambda+4mq\cos 2x+2q^{2}\cos 4x]f=0
\end{equation}
can have two linearly independent solutions of period $\pi$ or $2\pi$ if and only if $m$ is an integer. If $m=2l$ is even, then the odd intervals of instability on the $\lambda$ axis disappear, with at most $|l|+1$ exceptions, but no even interval of instability disappears. If $m=2l+1$ is odd, then at most $|l|+1$ even intervals of instability remain but no interval of instability disappears.
\end{lemma}
\begin{lemma}
\label{L:2}
The Whittaker-Hill operator
\begin{equation}
L=-D^{2}-(B\cos 2x+C\cos 4x)
\end{equation}
admits all even gaps closed except the first $n$ when $\pm\frac{B}{4\sqrt{2C}}=-n-\frac{1}{2}$, $n\in\mathbb{Z}_{\geq 0}$; and all odd gaps closed except the first $n+1$ when $\pm\frac{B}{4\sqrt{2C}}=-n-1$, $n\in\mathbb{Z}_{\geq 0}$.
\end{lemma}
\begin{proof}
By Lemma \ref{L:Maguns}, we obtain that the Whittaker-Hill equation
\begin{equation}
\label{E:W-H-real}
f''(x)+(A+B\cos 2x+C\cos 4x)f(x)=0
\end{equation}
have two linearly independent solutions of period or semi-periodic $\pi$ if and only if $\frac{B}{2\sqrt{2C}}\in \mathbb{Z}$. Moreover, we transform (\ref{E:W-H-real})
into the Ince equation
\begin{equation}
g''(x)+4\sqrt{\frac{C}{2}}\sin 2x\cdot g'(x)+\left[(A+C)+\left(B+4\sqrt{\frac{C}{2}}\right)\cos 2x\right]g(x)=0.
\end{equation}
via $f(x)=e^{-\sqrt{\frac{C}{2}}\cos 2x}\cdot g(x)$. From Lemma \ref{L:Volkmer}, we can write the parameters $q$, $\lambda$ and $m$ of equation (\ref{E:general Ince}) in terms of $A$, $B$ and $C$, i.e.,
\begin{equation*}
q=-\sqrt{\frac{C}{2}}, \ \ \ \lambda=A, \ \ \ m=-\frac{B}{2\sqrt{2C}}.
\end{equation*}
(1) If $m=2n+1$, $n\in \mathbb{N}^{+}\cup\{0\}$, i.e., $\frac{B}{2\sqrt{2C}}=-2n-1$, and the solutions satisfy the periodic boundary conditions, then we deduce from Lemma \ref{L:Volkmer} that the first $2n+1$ eigenvalues are simple, and others are double.
(2) If $m=2n+2$, $n\in \mathbb{N}^{+}\cup\{0\}$, i.e., $\frac{B}{2\sqrt{2C}}=-2n-2$, and the solutions satisfy the semi-periodic boundary conditions, then we also deduce from Lemma \ref{L:Volkmer} that the first $2n+2$ eigenvalues are simple, and others are double.
Besides, we can also transform (\ref{E:W-H-real}) into the Ince equation
\begin{equation}
g''(x)-4\sqrt{\frac{C}{2}}\sin 2x\cdot g'(x)+\left[(A+C)+\left(B-4\sqrt{\frac{C}{2}}\right)\cos 2x\right]g(x)=0.
\end{equation}
via $f(x)=e^{\sqrt{\frac{C}{2}}\cos 2x}\cdot g(x)$. Thus, we have similar conclusions.
\end{proof}
In order to prove our results, let us consider all possible walks from $-n$ to $n$. Each such walk is determined by the sequence of its steps
\begin{equation}
x=(x_{1}, \cdots, x_{\nu+1}),
\end{equation}
or by its vertices
\begin{equation}
j_{s}=-n+\sum_{k=1}^{s}x_{k}, \ \ \ s=1, \cdots, \nu.
\end{equation}
The relationship between steps and vertices are given by the formula
\begin{equation}
x_{1}=n+j_{1};\ \ \ x_{k}=j_{k}-j_{k-1}, \ k=2, \cdots, \nu; \ \ \ x_{\nu+1}=n-j_{\nu}.
\end{equation}
\begin{definition}
\label{D:1}
Let $X$ denote the set of all walks from $-n$ to $n$ with steps $\pm 2$ or $\pm 4$. For each $x=(x_{s})_{s=1}^{\nu+1}\in X$ and each $z\in \mathbb{R}$, we define
\begin{equation}
B_{n}(x,z)=\frac{V(x_{1})\cdots V(x_{\nu+1})}{(n^{2}-j_{1}^{2}+z)\cdots(n^{2}-j_{\nu}^{2}+z)}.
\end{equation}
\end{definition}
\begin{definition}
\label{D:2}
Let $X^{+}$ denote the set of all walks from $-n$ to $n$ with positive steps equal to $2$ or $4$. For each $\xi\in X^{+}$, let $X_{\xi}$ denote the set of all walks $x\in X\backslash X^{+}$ such that each vertex of $\xi$ is a vertex of $x$ also. For each $\xi\in X^{+}$ and $\mu\in\mathbb{N}$, let $X_{\xi,\mu}$ be the set of all $x\in X_{\xi}$ such that $x$ has $\mu$ more vertices than $\xi$. Moreover, for each $\mu-$tuple $(i_{1}, \cdots, i_{\mu})$ of integers in $I_{n}=(n+2\mathbb{Z})\setminus \{\pm n\}$, we define $X_{\xi}(i_{1}, \cdots, i_{\mu})$ as the set of all walks $x$ with $\nu+1+\mu$ steps such that $(i_{1}, \cdots, i_{\mu})$ and the sequence of the vertices of $\xi$ are complementary subsequences of the sequence of the vertices of $x$.
\end{definition}
From Definition \ref{D:2}, we deduce
\begin{equation}
X_{\xi,\mu}=\bigcup_{(i_{1}, \cdots, i_{\mu})\in (I_{n})^{\mu}}X_{\xi}(i_{1}, \cdots, i_{\mu}).
\end{equation}
\begin{lemma}[\cite{Djakov 2}]
\label{L:3}
If $\xi\in X^{+}$ and $n\geq 3$, then for $z\in[0,1)$
\begin{equation}
1-z\frac{\log n}{n}\leq \frac{B_{n}(\xi,z)}{B_{n}(\xi,0)}\leq 1-z\frac{\log n}{4n},
\end{equation}
and for $z\in(-1,0]$
\begin{equation}
1+|z|\frac{\log n}{2n}\leq \frac{B_{n}(\xi,z)}{B_{n}(\xi,0)}\leq 1+|z|\frac{2\log n}{n}.
\end{equation}
\end{lemma}
\begin{lemma}[\cite{Djakov 2}]
\label{L:4}
For each walk $\xi\in X^{+}$ and each $\mu-$tuple $(i_{1}, \cdots, i_{\mu})\in (I_{n})^{\mu}$,
\begin{equation}
\sharp X_{\xi}(i_{1}, \cdots, i_{\mu})\leq 5^{\mu}.
\end{equation}
\end{lemma}
\begin{lemma}
\label{L:5}
If $\xi\in X^{+}$ and $|z|\leq 1$, then there exists $n_{1}$ such that for $n\geq n_{1}$,
\begin{equation}
\sum_{x\in X_{\xi}}|B_{n}(x,z)|\leq |B_{n}(\xi,z)|\cdot \frac{K \log n}{n},
\end{equation}
where $K=40 \left(|\frac{b}{2}q^{m_{1}}|+|\frac{c}{2}q^{m_{2}}|+|\frac{b^{2}}{2c}q^{2m_{1}-m_{2}}|\right)$.
\end{lemma}
\begin{proof}
By Definition \ref{D:2}, we have
\begin{equation}
\sum_{x\in X_{\xi}}|B_{n}(x,z)|=\sum_{\mu=1}^{\infty}\sum_{x\in X_{\xi,\mu}}|B_{n}(x,z)|.
\end{equation}
Moreover,
\begin{equation}
\sum_{x\in X_{\xi,\mu}}|B_{n}(x,z)|\leq \sum_{(i_{1}, \cdots, i_{\mu})}\sum_{X_{\xi}(i_{1}, \cdots, i_{\mu})}|B_{n}(x,z)|,
\end{equation}
where the first sum on the right is taken over all $\mu-$tuples $(i_{1}, \cdots, i_{\mu})$ of integers $i_{s}\in n+2\mathbb{Z}$ such that $i_{s}\neq \pm n$.
Fix $(i_{1}, \cdots, i_{\mu})$, if $x\in X_{\xi}(i_{1}, \cdots, i_{\mu})$, then
\begin{equation}
\frac{B_{n}(x,z)}{B_{n}(\xi,z)}=\frac{\prod_{k}V(x_{k})}{\prod_{s}V(\xi_{s})}\cdot \frac{1}{(n^{2}-i_{1}^{2}+z)\cdots(n^{2}-i_{\mu}^{2}+z)}.
\end{equation}
Note that $V(\pm 2)=\frac{b}{2}q^{m_{1}}$ and $V(\pm4)=\frac{c}{2}q^{m_{2}}$.
If each step of $\xi$ is a step of $x$, then
\begin{equation}
\frac{\prod_{k}\left|V(x_{k})\right|}{\prod_{s}\left|V(\xi_{s})\right|}\leq C^{\mu},
\end{equation}
where $C:=|\frac{b}{2}q^{m_{1}}|+|\frac{c}{2}q^{m_{2}}|+|\frac{b^{2}}{2c}q^{2m_{1}-m_{2}}|$. For the general case, let $(j_{s})_{s=1}^{\nu}$ be the vertices of $\xi$, and let us put for convenience $j_{0}=-n$ and $j_{\nu+1}=n$. Since each vertex of $\xi$ is a vertex of $x$, for each $s$, $1\leq s\leq \nu+1$,
\begin{equation}
\xi_{s}=j_{s}-j_{s-1}=\sum_{k\in J_{s}}x_{k},
\end{equation}
where $x_{k}$, $k\in J_{s}$, are the steps of $x$ between the vertices $j_{s-1}$ and $j_{s}$. Fix an $s$, $1\leq s \leq \nu+1$. If $\xi_{s}=2$, then there is a step $x_{k^{*}}$, $k^{*}\in J_{s}$ such that $|x_{k^{*}}|=2$, otherwise, $\xi_{s}$ would be a multiple of $4$. Hence, $|V(\xi_{s})|=|V(x_{k}^{*})|$, which implies that
\begin{equation}
\frac{\prod_{J_{s}}|V(x_{k})|}{|V(\xi_{s})|}\leq C^{b_{s}-1},
\end{equation}
where $b_{s}$ is the cardinality of $J_{s}$.
If $\xi_{s}=4$, there are two possibilities. (1) If there is $k_{*}\in J_{s}$ with $|x_{k_{*}}|=4$, then $|V(\xi_{s})|=|V(x_{k_{*}})|$, so the above inequality holds. (2) There are $k', k''\in J_{s}$ such that $|x_{k'}|=|x_{k''}|=2$, hence,
\begin{equation}
\frac{|V(x_{k'})V(x_{k''})|}{|V(\xi_{s})|}=\left|\frac{b^{2}}{2c}q^{2m_{1}-m_{2}}\right|\leq C,
\end{equation}
so the above inequality also holds. Note that
\begin{equation}
\sum_{s}(b_{s}-1)=\mu,
\end{equation}
we get
\begin{equation}
\frac{\prod_{k}V(x_{k})}{\prod_{s}V(\xi_{s})}\leq C^{\mu}
\end{equation}
holds for the general case.
By
\begin{equation}
\frac{1}{|n^{2}-i^{2}+z|}\leq \frac{2}{|n^{2}-i^{2}|},
\end{equation}
where $i\neq \pm n$, $|z|\leq 1$, we have
\begin{equation}
\frac{|B_{n}(x,z)|}{|B_{n}(\xi,z)|}\leq \frac{(2C)^{\mu}}{|n^{2}-i_{1}^{2}|\cdots |n^{2}-i_{\mu}^{2}|}, \ \ \ x\in X_{\xi}(i_{1}, \cdots, i_{\mu}).
\end{equation}
By Lemma \ref{L:4}, we derive
\begin{equation}
\sum_{x\in X_{\xi}(i_{1}, \cdots, i_{\mu})}\frac{|B_{n}(x,z)|}{|B_{n}(\xi,z)|}\leq \frac{(10C)^{\mu}}{|n^{2}-i_{1}^{2}|\cdots |n^{2}-i_{\mu}^{2}|}.
\end{equation}
Combining Lemma \ref{L:3}, it yields
\begin{equation}
\begin{split}
\sum_{x\in X_{\xi,\mu}}\frac{|B_{n}(x,z)|}{|B_{n}(\xi,z)|}&\leq \sum_{(i_{1}, \cdots, i_{\mu})}\frac{(10C)^{\mu}}{|n^{2}-i_{1}^{2}|\cdots |n^{2}-i_{\mu}^{2}|}\leq\left(\sum_{i\in(n+2\mathbb{Z})\setminus \{\pm n\}} \frac{10C}{|n^{2}-i^{2}|}\right)^{\mu}\\
&\leq (10C)^{\mu}\left(\frac{1+\log n}{n}\right)^{\mu}\leq \left(\frac{20C \log n}{n}\right)^{\mu}.
\end{split}
\end{equation}
Thus,
\begin{equation}
\sum_{x\in X_{\xi,\mu}} |B_{n}(x,z)|\leq |B_{n}(\xi,z)|\cdot \left(\frac{20C \log n}{n}\right)^{\mu}.
\end{equation}
Hence,
\begin{equation}
\sum_{x\in X_{\xi}}\frac{|B_{n}(x,z)|}{|B_{n}(\xi,z)|}\leq \sum_{\mu=1}^{\infty}\left(\frac{20C \log n}{n}\right)^{\mu}.
\end{equation}
We can choose $n_{1}\in \mathbb{N}^{+}$ such that $\frac{20C \log n}{n}\leq \frac{1}{2}$ for $n\geq n_{1}$. Then
\begin{equation}
\sum_{x\in X_{\xi}}\frac{|B_{n}(x,z)|}{|B_{n}(\xi,z)|}\leq \frac{40C \log n}{n}.
\end{equation}
Therefore, there exists $n_{1}$ such that for $n\geq n_{1}$,
\begin{equation}
\sum_{x\in X_{\xi}}|B_{n}(x,z)|\leq |B_{n}(\xi,z)|\cdot \frac{K \log n}{n},
\end{equation}
where $K:=40C=40 \left(|\frac{b}{2}q^{m_{1}}|+|\frac{c}{2}q^{m_{2}}|+|\frac{b^{2}}{2c}q^{2m_{1}-m_{2}}|\right)$.
\end{proof}
\section{Proof of Theorem \ref{T:1}}
Note that
\begin{equation}
V(\pm 2)=\frac{bq^{m_{1}}}{2}, \ \ \ V(\pm 4)=\frac{cq^{m_{2}}}{2}
\end{equation}
and
\begin{equation}
\|v\|^{2}=\frac{1}{2}\Big(b^{2}q^{2m_{1}}+c^{2}q^{2m_{2}}\Big),
\end{equation}
by Lemma \ref{L:1}, we have
\begin{equation}
\gamma_{n}=\pm2\Big(V(2n)+\sum_{k=1}^{\infty} \beta_{k}(n,z)\Big)\Big(1+O(q^{2\cdot\min\{m_{1},m_{2}\}})\Big),
\end{equation}
where
\begin{equation}
\label{E:belta}
\begin{split}
\beta_{k}(n,z)&=\sum_{j_{1},\cdots,j_{k}\neq \pm n}\frac{V(n-j_{1})V(j_{1}-j_{2})\cdots V(j_{k-1}-j_{k})V(j_{k}+n)}
{(n^{2}-j_{1}^{2}+z)\cdots(n^{2}-j_{k}^{2}+z)}\\
&=\sum_{j_{1},\cdots,j_{k}\neq \pm n}\frac{V(n+j_{1})V(j_{2}-j_{1})\cdots V(j_{k}-j_{k-1})V(n-j_{k})}
{(n^{2}-j_{1}^{2}+z)\cdots(n^{2}-j_{k}^{2}+z)}
\end{split}
\end{equation}
and $z=O(q)$. Moreover, all series converge absolutely and uniformly for sufficiently small $q$.
Note that
\begin{equation}
(n+j_{1})+(j_{2}-j_{1})+\cdots+(j_{k}-j_{k-1})+(n-j_{k})=2n,
\end{equation}
and
\begin{equation}
\frac{V(n+j_{1})V(j_{2}-j_{1})\cdots V(j_{k}-j_{k-1})V(n-j_{k})}
{(n^{2}-j_{1}^{2}+z)\cdots(n^{2}-j_{k}^{2}+z)}\neq 0
\end{equation}
when
\begin{equation}
(n+j_{1}),(j_{2}-j_{1}),\cdots,(j_{k}-j_{k-1}),(n-j_{k})\in \{\pm2, \pm4\}.
\end{equation}
We distinguish three cases to discuss.
{\noindent\bf Case 1.} If $m_{1}> \frac{m_{2}}{2}$, then
\begin{equation}
V(n+j_{1})V(j_{2}-j_{1})\cdots V(j_{k}-j_{k-1})V(n-j_{k})
\end{equation}
is a monomial in $q$ of degree at least
\begin{equation}
\frac{m_{2}}{4}\cdot\Big[|n+j_{1}|+|j_{2}-j_{1}|+\cdots+|j_{k}-j_{k-1}|+|n-j_{k}|\Big].
\end{equation}
The minimum case occurs when
\begin{equation}
(n+j_{1}),(j_{2}-j_{1}),\cdots,(j_{k}-j_{k-1}),(n-j_{k})\in \{\pm4\},
\end{equation}
then
\begin{equation}
(n+j_{1})+(j_{2}-j_{1})+\cdots+(j_{k}-j_{k-1})+(n-j_{k})\in 4\mathbb{Z},
\end{equation}
while
\begin{equation}
(n+j_{1})+(j_{2}-j_{1})+\cdots+(j_{k}-j_{k-1})+(n-j_{k})=2n.
\end{equation}
If $n$ is even, i.e., $n=2m$, $m\in\mathbb{Z}_{>0}$, since
\begin{equation}
\begin{split}
&|n+j_{1}|+|j_{2}-j_{1}|+\cdots+|j_{k}-j_{k-1}|+|n-j_{k}|\\
&\geq (n+j_{1})+(j_{2}-j_{1})+\cdots+(j_{k}-j_{k-1})+(n-j_{k})\\
&=2n=4m,
\end{split}
\end{equation}
we obtain
\begin{equation}
V(n+j_{1})V(j_{2}-j_{1})\cdots V(j_{k}-j_{k-1})V(n-j_{k})
\end{equation}
is a monomial in $q$ of degree at least $m_{2}\cdot m$. Such monomial in $q$ of degree $m_{2}\cdot m$ corresponds to a walk from $-n$ to $n$ with vertices $j_{1}, j_{2}, \cdots, j_{k}\neq \pm n$ and positive steps of length $4$. Thus,
\begin{equation}
\gamma_{2m}=\pm P_{2m}(t)q^{m_{2}m}+O\Big(q^{m_{2}(m-\frac{1}{2})+m_{1}}\Big),
\end{equation}
where
\begin{equation}
P_{2m}(t)q^{m_{2}m}=2\Big(V(4m)+\sum_{k=1}^{\infty} \beta_{k}(2m,z)\Big).
\end{equation}
We have
\begin{equation}
P_{2}(t)q^{m_{2}}=2 V(4)=c q^{m_{2}}
\end{equation}
and
\begin{equation}
\begin{split}
&P_{2m}(t)q^{m_{2}m}=2 \sum_{k=1}^{\infty} \beta_{k}(2m,z)\\
&=2 \cdot \Big(\frac{c}{2}\Big)^{m} \cdot q^{m_{2}m}\cdot \prod_{j=1}^{m-1}\Big((4m^{2}-(-2m+4j)^{2})\Big)^{-1}\\
&=\frac{32\cdot(\frac{c}{2})^{m}\cdot q^{m_{2}m}}{2^{4m}[(m-1)!]^{2}}
\end{split}
\end{equation}
for $m\geq 2$. Therefore,
\begin{equation}
\gamma_{2m}=\left|\frac{32\cdot(\frac{c}{2})^{m}\cdot q^{m_{2}m}}{2^{4m}[(m-1)!]^{2}}\right|+O\Big(q^{m_{2}(m-\frac{1}{2})+m_{1}}\Big).
\end{equation}
If $n$ is odd, i.e., $n=2m-1$, $m\in\mathbb{Z}_{>0}$, since
\begin{equation}
(n+j_{1})+(j_{2}-j_{1})+\cdots+(j_{k}-j_{k-1})+(n-j_{k})=2n=4m-2,
\end{equation}
then
\begin{equation}
V(n+j_{1})V(j_{2}-j_{1})\cdots V(j_{k}-j_{k-1})V(n-j_{k})
\end{equation}
is a monomial in $q$ of degree at least
\begin{equation}
\begin{split}
&\frac{m_{2}}{4}\cdot\Big[|n+j_{1}|+|j_{2}-j_{1}|+\cdots+|j_{k}-j_{k-1}|+|n-j_{k}|\Big]+m_{1}-\frac{m_{2}}{2}\\
&\geq \frac{m_{2}}{4}\cdot(4m-2)+m_{1}-\frac{m_{2}}{2}\\
&=m_{2}(m-1)+m_{1}.
\end{split}
\end{equation}
Such monomial in $q$ of degree $m_{2}(m-1)+m_{1}$ corresponds to a walk from $-n$ to $n$ with vertices $j_{1}, j_{2}, \cdots, j_{k}\neq \pm n$ and positive steps. Specifically, except for one step with length $2$, the others are of length $4$. Thus,
\begin{equation}
\gamma_{2m-1}=\pm P_{2m-1}(t)q^{m_{2}(m-1)+m_{1}}+O\Big(q^{m_{2}(m-\frac{3}{2})+2m_{1}}\Big),
\end{equation}
where
\begin{equation}
P_{2m-1}(t)q^{m_{2}(m-1)+m_{1}}=2\Big(V(4m-2)+\sum_{k=1}^{\infty} \beta_{k}(2m-1,z)\Big).
\end{equation}
We obtain
\begin{equation}
P_{1}(t) q^{m_{1}}=2 V(2)=bq^{m_{1}},
\end{equation}
\begin{equation}
P_{3}(t) q^{m_{1}+m_{2}}=2 \sum_{k=1}^{\infty} \beta_{k}(3,z)=2 \left(\frac{bq^{m_{1}}}{2}\right)\left(\frac{cq^{m_{2}}}{2}\right)\left(\frac{1}{3^{2}-1^{2}}+\frac{1}{3^{2}-1^{2}}\right)
=\frac{bcq^{m_{1}+m_{2}}}{8},
\end{equation}
\begin{equation}
\begin{split}
&P_{5}(t) q^{m_{1}+2m_{2}}=2 \sum_{k=1}^{\infty} \beta_{k}(5,z)\\
&=2 \left(\frac{bq^{m_{1}}}{2}\right)\left(\frac{cq^{m_{2}}}{2}\right)^{2}
\left[\frac{1}{(5^{2}-3^{2})(5^{2}-1^{2})}+\frac{1}{(5^{2}-1^{2})(5^{2}-1^{2})}+\frac{1}{(5^{2}-3^{2})(5^{2}-1^{2})}\right]\\
&=\frac{bc^{2}q^{m_{1}+2m_{2}}}{3^{2}\cdot 2^{6}},
\end{split}
\end{equation}
\begin{equation}
\begin{split}
&P_{2m-1}(t)q^{m_{2}(m-1)+m_{1}}=2 \sum_{k=1}^{\infty} \beta_{k}(2m-1,z)\\
&=2 \left(\frac{c}{2}\right)^{m-1}\cdot \left(\frac{b}{2}\right)\cdot q^{m_{2}(m-1)+m_{1}}\cdot
\Big\{\sum_{i=1}^{m-2} \prod_{j=1}^{i}\Big[(2m-1)^{2}-(-2m+1+4j)^{2}\Big]^{-1}\\
&\cdot \prod_{j=i}^{m-2}\Big[(2m-1)^{2}-(-2m+3+4j)^{2}\Big]^{-1}+\prod_{j=0}^{m-2}\Big[(2m-1)^{2}-(-2m+3+4j)^{2}\Big]^{-1}\\
&+\prod_{j=1}^{m-1}\Big[(2m-1)^{2}-(-2m+1+4j)^{2}\Big]^{-1}\Big\}\\
&=2 \left(\frac{c}{2}\right)^{m-1}\cdot \left(\frac{b}{2}\right)\cdot q^{m_{2}(m-1)+m_{1}}\cdot \frac{8}{2^{3m}}\cdot
\Big\{ \frac{1}{[(2m-3)!!]^{2}}\cdot \sum_{i=1}^{m-2} \frac{(2m-2i-3)!!\cdot (2i-1)!!}{i!\cdot (m-1-i)!} \\
&+ \frac{2}{(2m-3)!!\cdot(m-1)!} \Big\}.
\end{split}
\end{equation}
Hence,
\begin{equation}
\gamma_{1}= |bq^{m_{1}}|+O(q^{2m_{1}-\frac{m_{2}}{2}}), \ \ \ \gamma_{3}=\frac{|bcq^{m_{1}+m_{2}}|}{8}+O(q^{2m_{1}+\frac{m_{2}}{2}}),
\end{equation}
\begin{equation}
\begin{split}
&\gamma_{2m-1}= \Big|\left(\frac{c}{2}\right)^{m-1}\cdot b\cdot q^{m_{2}(m-1)+m_{1}}\cdot \frac{8}{2^{3m}}\cdot
\Big\{ \frac{1}{[(2m-3)!!]^{2}}\\
& \cdot \sum_{i=1}^{m-2} \frac{(2m-2i-3)!!\cdot (2i-1)!!}{i!\cdot (m-1-i)!}
+ \frac{2}{(2m-3)!!\cdot(m-1)!} \Big\}\Big|+O\Big(q^{m_{2}(m-\frac{3}{2})+2m_{1}}\Big)
\end{split}
\end{equation}
for $m\geq 3$.
{\noindent\bf Case 2.} If $m_{1}< \frac{m_{2}}{2}$, then
\begin{equation}
V(n+j_{1})V(j_{2}-j_{1})\cdots V(j_{k}-j_{k-1})V(n-j_{k})
\end{equation}
is a monomial in $q$ of degree at least
\begin{equation}
\frac{m_{1}}{2}\cdot\Big[|n+j_{1}|+|j_{2}-j_{1}|+\cdots+|j_{k}-j_{k-1}|+|n-j_{k}|\Big].
\end{equation}
The minimum case occurs when
\begin{equation}
(n+j_{1}),(j_{2}-j_{1}),\cdots,(j_{k}-j_{k-1}),(n-j_{k})\in \{\pm2\},
\end{equation}
then
\begin{equation}
(n+j_{1})+(j_{2}-j_{1})+\cdots+(j_{k}-j_{k-1})+(n-j_{k})\in 2\mathbb{Z},
\end{equation}
while
\begin{equation}
(n+j_{1})+(j_{2}-j_{1})+\cdots+(j_{k}-j_{k-1})+(n-j_{k})=2n.
\end{equation}
Since
\begin{equation}
\begin{split}
&|n+j_{1}|+|j_{2}-j_{1}|+\cdots+|j_{k}-j_{k-1}|+|n-j_{k}|\\
&\geq (n+j_{1})+(j_{2}-j_{1})+\cdots+(j_{k}-j_{k-1})+(n-j_{k})\\
&=2n,
\end{split}
\end{equation}
we have
\begin{equation}
V(n+j_{1})V(j_{2}-j_{1})\cdots V(j_{k}-j_{k-1})V(n-j_{k})
\end{equation}
is a monomial in $q$ of degree at least $m_{1}\cdot n$. Such monomial in $q$ of degree $m_{1}\cdot n$ corresponds to a walk from $-n$ to $n$ with vertices $j_{1}, j_{2}, \cdots, j_{k}\neq \pm n$ and positive steps of length $2$. Thus,
\begin{equation}
\gamma_{n}=\pm P_{n}(t) q^{m_{1}n}+O(q^{m_{1}n+m_{2}-2m_{1}}),
\end{equation}
where
\begin{equation}
P_{n}(t) q^{m_{1}n}=2\Big(V(2n)+\sum_{k=1}^{\infty} \beta_{k}(n,z)\Big).
\end{equation}
We deduce
\begin{equation}
P_{1}(t)q^{m_{1}}=2V(2)=bq^{m_{1}}, \ \ \ P_{2}(t)q^{2m_{1}}=2 \left(V(4)+\frac{\Big(\frac{bq^{m_{1}}}{2}\Big)^{2}}{2^{2}}\right)=cq^{m_{2}}+\frac{b^{2}q^{2m_{1}}}{8},
\end{equation}
\begin{equation}
\begin{split}
&P_{n}(t) q^{m_{1}n}=2\sum_{k=1}^{\infty} \beta_{k}(n,z)\\
&=2\cdot\Big(\frac{b}{2}\Big)^{n}\cdot q^{m_{1}\cdot n}\cdot \prod_{j=1}^{n-1}(n^{2}-(-n+2j)^{2})^{-1}\\
&=2\cdot \Big(\frac{b}{2}\Big)^{n}\cdot q^{m_{1}\cdot n}\cdot \prod_{j=1}^{n-1}(n^{2}-(-n+2j)^{2})^{-1}\\
&=2\cdot \frac{(\frac{b}{2})^{n}\cdot q^{m_{1}\cdot n}}{4^{n-1}\cdot[(n-1)!]^{2}}\\
&=\frac{8\cdot b^{n}\cdot q^{m_{1}n}}{2^{3n}\cdot [(n-1)!]^{2}}
\end{split}
\end{equation}
for $n\geq 3$. Therefore,
\begin{equation}
\gamma_{1}=\left|bq^{m_{1}}\right|+O(q^{m_{2}-m_{1}}), \ \ \ \gamma_{2}=\left|cq^{m_{2}}+\frac{b^{2}q^{2m_{1}}}{8}\right|+O(q^{m_{2}}),
\end{equation}
\begin{equation}
\gamma_{n}=\left|\frac{8\cdot b^{n}\cdot q^{m_{1}n}}{2^{3n}\cdot [(n-1)!]^{2}}\right|+O(q^{m_{1}n+m_{2}-2m_{1}})
\end{equation}
for $n\geq 3$.
{\noindent\bf Case 3.} If $m_{1}= \frac{m_{2}}{2}$, then
\begin{equation}
V(n+j_{1})V(j_{2}-j_{1})\cdots V(j_{k}-j_{k-1})V(n-j_{k})
\end{equation}
is a monomial in $q$ of degree
\begin{equation}
\frac{m_{1}}{2}\cdot\Big[|n+j_{1}|+|j_{2}-j_{1}|+\cdots+|j_{k}-j_{k-1}|+|n-j_{k}|\Big].
\end{equation}
Since
\begin{equation}
\begin{split}
&|n+j_{1}|+|j_{2}-j_{1}|+\cdots+|j_{k}-j_{k-1}|+|n-j_{k}|\\
&\geq (n+j_{1})+(j_{2}-j_{1})+\cdots+(j_{k}-j_{k-1})+(n-j_{k})\\
&=2n,
\end{split}
\end{equation}
we have
\begin{equation}
V(n+j_{1})V(j_{2}-j_{1})\cdots V(j_{k}-j_{k-1})V(n-j_{k})
\end{equation}
is a monomial in $q$ of degree at least $m_{1}\cdot n$, and each such monomial of degree $m_{1}\cdot n$ corresponds to a walk from $-n$ to $n$ with vertices $j_{1}, j_{2}, \cdots, j_{k}\neq \pm n$ and positive steps of length $2$ or $4$. The minimum case occurs when $n+j_{1}$, $j_{2}-j_{1}$, $\cdots$, $j_{k}-j_{k-1}$ and $n-j_{k}$ are of the same sign,
while the second smallest degree is for one step of length $2$ with opposite sign. Thus,
\begin{equation}
\gamma_{n}=\pm P_{n}(t) q^{m_{1}n}+O(q^{m_{1}(n+2)}),
\end{equation}
where
\begin{equation}
P_{n}(t) q^{m_{1}n}=2\Big(V(2n)+\sum_{k=1}^{\infty} \beta_{k}(n,z)\Big).
\end{equation}
We obtain
\begin{equation}
P_{1}(t)q^{m_{1}}=2V(2)=bq^{m_{1}}, \ \ \ P_{2}(t)q^{2m_{1}}=2 \left(V(4)+\frac{\Big(\frac{bq^{m_{1}}}{2}\Big)^{2}}{2^{2}}\right)=cq^{m_{2}}+\frac{b^{2}q^{2m_{1}}}{8},
\end{equation}
\begin{equation}
\begin{split}
&P_{n}(t) q^{m_{1}n}=2\sum_{k=1}^{\infty} \beta_{k}(n,z)\\
&=2\cdot P_{n}\Big(\frac{b}{2}\Big)\cdot q^{m_{1}\cdot n}\cdot \prod_{j=1}^{n-1}(n^{2}-(-n+2j)^{2})^{-1}\\
&=2\cdot P_{n}\Big(\frac{b}{2}\Big)\cdot q^{m_{1}\cdot n}\cdot \prod_{j=1}^{n-1}(n^{2}-(-n+2j)^{2})^{-1}\\
&=8\cdot \frac{P_{n}\big(\frac{b}{2}\big)\cdot q^{m_{1}\cdot n}}{2^{2n}\cdot[(n-1)!]^{2}}
\end{split}
\end{equation}
for $n\geq 3$. Therefore,
\begin{equation}
\gamma_{1}=\left|bq^{m_{1}}\right|+O(q^{3m_{1}}), \ \ \ \gamma_{2}=\left|cq^{m_{2}}+\frac{b^{2}q^{2m_{1}}}{8}\right|+O(q^{4m_{1}}),
\end{equation}
\begin{equation}
\gamma_{n}=\left|8\cdot \frac{P_{n}\big(\frac{b}{2}\big)\cdot q^{m_{1}\cdot n}}{2^{2n}\cdot[(n-1)!]^{2}}\right|+O(q^{m_{1}(n+2)})
\end{equation}
for $n\geq 3$, where $P_{n}\big(\frac{b}{2}\big)$ is a polynomial of $\frac{b}{2}$ with degree $n$ and leading coefficient $1$.
Specifically, if $n$ is even, i.e., $n=2m$, $m\in\mathbb{Z}_{>0}$, then
\begin{equation}
(n+j_{1})+(j_{2}-j_{1})+\cdots+(j_{k}-j_{k-1})+(n-j_{k})=4m,
\end{equation}
which implies that each walk from $-2m$ to $2m$ has even number of steps with length $2$. We have
\begin{equation}
P_{2m}\Big(\frac{b}{2}\Big)=\prod_{k=1}^{m}\Big(\Big(\frac{b}{2}\Big)^{2}-x_{k}\Big),
\end{equation}
where $x_{k}$, $k=1,\cdots, m$, depend on $m$. By Lemma \ref{L:2}, we obtain all even gaps closed except the first $k$ if $\big(\frac{b}{2}\big)=-8c\big(k+\frac{1}{2}\big)^{2}$, which yields
\begin{equation}
P_{2m}\Big(\frac{b}{2}\Big)=\prod_{k=1}^{m}\Big(\Big(\frac{b}{2}\Big)^{2}+8c\Big(k-\frac{1}{2}\Big)^{2}\Big).
\end{equation}
Hence,
\begin{equation}
\gamma_{2m}=8\left|\frac{\prod_{k=1}^{m}\Big(\Big(\frac{b}{2}\Big)^{2}+8c\Big(k-\frac{1}{2}\Big)^{2}\Big)\cdot q^{2m_{1}\cdot m}}{2^{4m}\cdot[(2m-1)!]^{2}}\right|+O(q^{2m_{1}(m+1)})
\end{equation}
for $m\geq 2$. If $n$ is odd, i.e., $n=2m-1$, $m\in\mathbb{Z}_{>0}$, then
\begin{equation}
(n+j_{1})+(j_{2}-j_{1})+\cdots+(j_{k}-j_{k-1})+(n-j_{k})=2n=4m-2,
\end{equation}
which implies that each walk from $-2m$ to $2m$ has odd number of steps with length $2$. We have
\begin{equation}
P_{2m-1}\Big(\frac{b}{2}\Big)=\frac{b}{2}\prod_{k=1}^{m-1}\Big(\Big(\frac{b}{2}\Big)^{2}-y_{k}\Big),
\end{equation}
where $y_{k}$, $k=1,\cdots, m-1$, depend on $m$. By Lemma \ref{L:2}, we deduce
\begin{equation}
P_{2m-1}\Big(\frac{b}{2}\Big)=\frac{b}{2}\prod_{k=1}^{m-1}\Big(\Big(\frac{b}{2}\Big)^{2}+8ck^{2}\Big).
\end{equation}
Hence,
\begin{equation}
\gamma_{2m-1}=32\left|\frac{\frac{b}{2}\prod_{k=1}^{m-1}\Big(\Big(\frac{b}{2}\Big)^{2}+8ck^{2}\Big)\cdot q^{m_{1}\cdot (2m-1)}}{2^{4m}\cdot[(2m-2)!]^{2}}\right|+O(q^{m_{1}(2m+1)})
\end{equation}
for $m\geq 2$.
\section{Proof of Theorem \ref{T:2}}
Since $V(\pm 2)=\frac{b}{2}q^{m_{1}}$ and $V(\pm 4)=\frac{c}{2}q^{2m_{1}}$, thus,
\begin{equation}
\|v\|^{2}=\frac{1}{2}\Big(b^{2}q^{2m_{1}}+c^{2}q^{4m_{1}}\Big).
\end{equation}
By Lemma \ref{L:1}, we get
\begin{equation}
\gamma_{n}=2\left|\sum_{x\in X}B_{n}(x,z)\right|\left(1+O\left(\frac{1}{n^{2}}\right)\right),
\end{equation}
where $z=z_{n}$ depends on $n$, but $|z|<1$.
Set $\sigma_{n}=\sum_{\xi\in X^{+}}B_{n}(\xi,0):=\sigma_{n}^{+}+\sigma_{n}^{-}$, where
$\sigma_{n}^{\pm}:=\sum_{\xi:B_{n}(\xi,0)\gtrless 0}B_{n}(\xi,0)$.
When $\xi\in X^{+}$,
\begin{equation}
B_{n}(\xi,0)=\frac{V(x_{1})\cdots V(x_{\nu+1})}{(n^{2}-j_{1}^{2})\cdots(n^{2}-j_{\nu}^{2})},
\end{equation}
where $x_{i}=2$ or $4$ for $i=1, \cdots, \nu+1$.
Note that $X\setminus X^{+}= \bigcup_{\xi\in X^{+}} X_{\xi}$, we choose disjoint sets $X_{\xi}'\subset X_{\xi}$ so that
\begin{equation}
X\setminus X^{+}=\bigcup_{\xi\in X^{+}}X_{\xi}'.
\end{equation}
Then
\begin{equation}
\sum_{x\in X\setminus X^{+}}B_{n}(x,z)=\sum_{\xi\in X^{+}}\left(\sum_{x\in X_{\xi}'}B_{n}(x,z)\right),
\end{equation}
therefore, we have
\begin{equation}
\begin{split}
&\sum_{x\in X}B_{n}(x,z)=\sum_{\xi\in X^{+}}\left(B_{n}(\xi,z)+\sum_{x\in X_{\xi}'}B_{n}(x,z)\right)\\
&=\sum_{\xi:B_{n}(\xi,0)>0}\left(B_{n}(\xi,z)+\sum_{x\in X_{\xi}'}B_{n}(x,z)\right)
+\sum_{\xi:B_{n}(\xi,0)<0}\left(B_{n}(\xi,z)+\sum_{x\in X_{\xi}'}B_{n}(x,z)\right)\\
&:\Sigma=\Sigma^{+}+\Sigma^{-},
\end{split}
\end{equation}
where $\Sigma^{\pm}:=\sum_{\xi:B_{n}(\xi,0)\gtrless 0}\left(B_{n}(\xi,z)+\sum_{x\in X_{\xi}'}B_{n}(x,z)\right)$.
By Lemma \ref{L:3} and Lemma \ref{L:5}, we get there exists a constant $C_{1}>0$ such that
\begin{equation}
\left[1\mp C_{1}\frac{\log n}{n}\right]\sigma_{n}^{\pm}\leq\Sigma^{\pm}\leq \left[1\pm C_{1}\frac{\log n}{n}\right]\sigma_{n}^{\pm},
\end{equation}
which is followed by
\begin{equation}
\label{E:estimation}
\left|\frac{\Sigma}{\sigma_{n}}-1\right|\leq C_{1} \frac{|\sigma_{n}^{-}|+\sigma_{n}^{+}}{|\sigma_{n}|} \cdot \frac{\log n}{n}.
\end{equation}
If $\xi\in X^{+}$, then $V(x_{1})\cdots V(x_{\nu+1})$ is a monomial in $q$ of degree $\frac{m_{1}}{2}\cdot (x_{1}+\cdots+x_{\nu+1})=m_{1}\cdot n$. From Case 3 of Theorem \ref{T:1}, we have
\begin{equation}
\sigma_{2m}=\sum_{\xi\in X^{+}}B_{2m}(\xi,0)=\frac{q^{2m_{1}\cdot m}}{4^{2m-1}\cdot[(2m-1)!]^{2}}\cdot\prod_{k=1}^{m}\left(\left(\frac{b}{2}\right)^{2}+8c\left(k-\frac{1}{2}\right)^{2}\right)
\end{equation}
and
\begin{equation}
\sigma_{2m-1}=\sum_{\xi\in X^{+}}B_{2m-1}(\xi,0)=\frac{q^{m_{1}(2m-1)}}{4^{2m-2}\cdot[(2m-2)!]^{2}}\cdot\frac{b}{2}
\cdot\prod_{k=1}^{m-1}\left(\left(\frac{b}{2}\right)^{2}+8ck^{2}\right).
\end{equation}
Moreover, $\sigma_{2m}\neq 0$ when $\frac{b}{2}\neq 2\sqrt{-2c}\cdot (k-\frac{1}{2})$ and $\sigma_{2m-1}\neq 0$
when $\frac{b}{2}\neq 2\sqrt{-2c} \cdot k$, where $c<0$. So
\begin{equation}
\label{E:upper bound 1}
\frac{|\sigma_{2m}^{-}|+\sigma_{2m}^{+}}{|\sigma_{2m}|}
=\frac{\prod_{k=1}^{m}\left(1-\frac{b^{2}}{8c(2k-1)^{2}}\right)}{\prod_{k=1}^{m}\left|1+\frac{b^{2}}{8c(2k-1)^{2}}\right|}
\leq \frac{\prod_{k=1}^{\infty}\left(1-\frac{b^{2}}{8c(2k-1)^{2}}\right)}{\prod_{k=1}^{\infty}\left|1+\frac{b^{2}}{8c(2k-1)^{2}}\right|}
=\left|\frac{\cosh \left(\frac{b\pi}{4\sqrt{-2c}}\right)}{\cos \left(\frac{b\pi}{4\sqrt{-2c}}\right)}\right|.
\end{equation}
Similarly, we have
\begin{equation}
\label{E:upper bound 2}
\frac{|\sigma_{2m-1}^{-}|+\sigma_{2m-1}^{+}}{|\sigma_{2m-1}|}\leq \left|\frac{\sinh \left(\frac{b\pi}{4\sqrt{-2c}}\right)}{\sin \left(\frac{b\pi}{4\sqrt{-2c}}\right)}\right|.
\end{equation}
By (\ref{E:estimation}), we obtain
\begin{equation}
\sum_{x\in X}B_{2m}(x,z)=\sigma_{2m}\left[1+O\left(\frac{\log m}{m}\right)\right]=\left(\sum_{\xi\in X^{+}}B_{2m}(\xi,0)\right)\left[1+O\left(\frac{\log m}{m}\right)\right]
\end{equation}
and
\begin{equation}
\sum_{x\in X}B_{2m-1}(x,z)=\sigma_{2m-1}\left[1+O\left(\frac{\log m}{m}\right)\right]=\left(\sum_{\xi\in X^{+}}B_{2m-1}(\xi,0)\right)\left[1+O\left(\frac{\log m}{m}\right)\right].
\end{equation}
Notice that
\begin{equation}
\cos \left(\frac{b\pi}{4\sqrt{-2c}}\right)=\prod_{k=1}^{\infty}\left(1+\frac{b^{2}}{8c(2k-1)^{2}}\right)
\end{equation}
and
\begin{equation}
\sin \left(\frac{b\pi}{4\sqrt{-2c}}\right)=\frac{b\pi}{4\sqrt{-2c}}\prod_{k=1}^{\infty}\left(1+\frac{b^{2}}{8c(2k)^{2}}\right),
\end{equation}
then
\begin{equation}
\cos \left(\frac{b\pi}{4\sqrt{-2c}}\right)=\prod_{k=1}^{m}\left(1+\frac{b^{2}}{8c(2k-1)^{2}}\right)\left[1+O\left(\frac{1}{m}\right)\right]
\end{equation}
and
\begin{equation}
\sin \left(\frac{b\pi}{4\sqrt{-2c}}\right)=\frac{b\pi}{4\sqrt{-2c}}\prod_{k=1}^{m-1}\left(1+\frac{b^{2}}{8c(2k)^{2}}\right)\left[1+O\left(\frac{1}{m}\right)\right].
\end{equation}
Hence,
\begin{equation}
\sum_{\xi\in X^{+}}B_{2m}(\xi,0)=\frac{q^{2m_{1}\cdot m}\cdot(-1)^{m}\cdot c^{m}}{2^{3m-2}\cdot[(2m-2)!!]^{2}}\cdot \cos\left(\frac{b\pi}{4\sqrt{-2c}}\right)\cdot \left[1+O\left(\frac{1}{m}\right)\right]
\end{equation}
and
\begin{equation}
\sum_{\xi\in X^{+}}B_{2m-1}(\xi,0)=\frac{q^{m_{1}(2m-1)}\cdot(-1)^{m-1}\cdot c^{m-1}\cdot\sqrt{-2c}}{2^{3m-4}\cdot[(2m-3)!!]^{2}\cdot \pi}
\cdot \sin\left(\frac{b\pi}{4\sqrt{-2c}}\right)\cdot \left[1+O\left(\frac{1}{m}\right)\right].
\end{equation}
Combining (\ref{E:estimation}), (\ref{E:upper bound 1}) and (\ref{E:upper bound 2}), we deduce
\begin{equation}
\begin{split}
&\sum_{x\in X}B_{2m}(x,z)=\left(\sum_{\xi\in X^{+}}B_{2m}(\xi,0)\right)\left[1+O\left(\frac{\log m}{m}\right)\right]\\
&=\frac{q^{2m_{1}\cdot m}\cdot(-1)^{m}\cdot c^{m}}{2^{3m-2}\cdot[(2m-2)!!]^{2}}\cdot \cos\left(\frac{b\pi}{4\sqrt{-2c}}\right)\cdot\left[1+O\left(\frac{\log m}{m}\right)\right]
\end{split}
\end{equation}
and
\begin{equation}
\begin{split}
&\sum_{x\in X}B_{2m-1}(x,z)=\left(\sum_{\xi\in X^{+}}B_{2m-1}(\xi,0)\right)\left[1+O\left(\frac{\log m}{m}\right)\right]\\
&=\frac{q^{m_{1}(2m-1)}\cdot(-1)^{m-1}\cdot c^{m-1}\cdot\sqrt{-2c}}{2^{3m-4}\cdot[(2m-3)!!]^{2}\cdot \pi}
\cdot \sin\left(\frac{b\pi}{4\sqrt{-2c}}\right)\cdot\left[1+O\left(\frac{\log m}{m}\right)\right].
\end{split}
\end{equation}
Therefore,
\begin{equation}
\gamma_{2m}=\frac{q^{2m_{1}\cdot m}\cdot |c|^{m}}{2^{3m-3}\cdot[(2m-2)!!]^{2}}\cdot \left|\cos\left(\frac{b\pi}{4\sqrt{-2c}}\right)\right|\cdot\left[1+O\left(\frac{\log m}{m}\right)\right]
\end{equation}
and
\begin{equation}
\gamma_{2m-1}=\frac{q^{m_{1}(2m-1)}\cdot |c|^{m-1}\cdot\sqrt{-2c}}{2^{3m-5}\cdot[(2m-3)!!]^{2}\cdot \pi}
\cdot \left|\sin\left(\frac{b\pi}{4\sqrt{-2c}}\right)\right|\cdot\left[1+O\left(\frac{\log m}{m}\right)\right].
\end{equation}
\end{document} |
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{document}
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{center}
{\LARGE On the Stability of Symmetric Periodic Orbits of \\
the Elliptic Sitnikov Problem}\\
\vskip 0.3cm
Xiuli Cen\frac} \def\fa{\forall\,ootnote{This author is supported by the National Natural Science Foundation of China (Grant No. 11801582).}\\
School of Mathematics (Zhuhai), Sun Yat-sen University, \\
Zhuhai, Guangdong 519082, China \\
E-mail: {\tt [email protected]} \\
\vskip 0.2cm
Xuhua Cheng\frac} \def\fa{\forall\,ootnote{This author is supported by the National Natural Science Foundation of China (Grant No. 11601257).} \\
Department of Applied Mathematics, Hebei University of Technology, \\ Tianjin 300130, China\\
E-mail: {\tt [email protected]}
\vskip 0.2cm
Zaitang Huang\frac} \def\fa{\forall\,ootnote{This author is supported by the Guangxi Natural Science Foundation (Grant No. 2018JJA110052).}\\
School of Mathematics and Statistics, Nanning Normal University,\\ Nanning 530023, China\\
E-mail: {\tt [email protected]}\\
\vskip 0.2cm
Meirong Zhang\frac} \def\fa{\forall\,ootnote{Correspondence author. This author is supported by the National Natural Science Foundation of China (Grant No. 11790273).}\\
Department of Mathematical Sciences, Tsinghua University, \\ Beijing 100084, China\\
E-mail: {\tt [email protected]}
\varepsilon} \def\de{\deltand{center}
\vskip 0.2cm
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{abstract}
Motivated by the recent works on the stability of symmetric periodic orbits of the elliptic Sitnikov problem, for time-periodic Newtonian equations with symmetries, we will study symmetric periodic solutions which are emanated from nonconstant periodic solutions of autonomous equations. By using the theory of Hill's equations, we will first deduce in this paper a criterion for the linearized stability and instability of periodic solutions which are odd in time. Such a criterion is complementary to that for periodic solutions which are even in time, obtained recently by the present authors. Applying these criteria to the elliptic Sitnikov problem, we will prove in an analytical way that the odd $(2p,p)$-periodic solutions of the elliptic Sitnikov problem are hyperbolic and therefore are Lyapunov unstable when the eccentricity is small, while the corresponding even $(2p,p)$-periodic solutions are elliptic and linearized stable. These are the first analytical results on the stability of nonconstant periodic orbits of the elliptic Sitnikov problem.
\varepsilon} \def\de{\deltand{abstract}
{\bf Mathematics Subject Classification (2010):} 34D20; 34C25; 34C23
{\bf Keywords:} Elliptic Sitnikov problem; periodic solution; symmetric solution; linearized stability/instability; Hill's equation; hyperbolic periodic solution; elliptic periodic solution.
\section{Introduction} \setcounter{section}{1} \setcounter{equation}{0} \label{main-result}
The elliptic Sitnikov problem, denoted by $(S_e)$, is the simplest model in the restricted $3$-body problems \cite{S60}. By assuming that the two primaries with equal masses are moving in a circular or an elliptic orbit of the $2$-body problem of the eccentricity $e\in [0, 1)$, the Sitnikov problem describes the motion of the infinitesimal mass moving on the straight line orthogonal to the plane of motion of the primaries,
whose governing equation was given in \cite{BLO94, LO08} and will be stated as Eq. \x{se} in \S \ref{Sitnikov} of this paper. When $e=0$, $(S_0)$ is called the circular Sitnikov problem, whose equation, stated as Eq. \x{s0}, is an autonomous scalar Newtonian or Lagrangian equation. For $e\in(0,1)$, the equation for $(S_e)$ is a nonlinear scalar Newtonian equation which is $2\pi$-periodic in time.
There is a long history and a rigorous study on motions of problem $(S_e)$, covering the following topics.
$\bullet$\ {\bf Oscillation and expressions of motions:} The motions of the circular Sitnikov problem can be expressed using various elliptic functions in an implicit way \cite{BLO94, F03, LS90, S60}. It is also found that the elliptic Sitnikov problem admits oscillatory motions. See the bibliography of \cite{LO08} for some historic references on this topic.
$\bullet$\ {\bf Existence and construction of periodic orbits:} Due to the symmetries of the elliptic Sitnikov problem, many interesting periodic orbits have been obtained in \cite{BLO94, LO08, LS80, O16, OR10}, mainly by using the bifurcation method and global continuation.
$\bullet$\ {\bf Stability and linearized stability of motions:} This is a central topic in dynamical systems \cite{O17, SM71}. For example, $(S_e)$ has the origin as an equilibrium which can be considered as a $2\pi$-periodic solution. In case the equilibrium is elliptic, its Lyapunov stability can be studied using the third order approximation developed by Ortega \cite{O96} and extended in \cite{LLYZ03}. See \cite[\S 6]{LO08} for details. As for nonconstant, even (in time) periodic solutions of $(S_e)$ which are emanated from the corresponding solutions of $(S_0)$, the stability and linearized stability are studied in very recent papers \cite{GNR18, GNRR18, M18, ZCC18}. Most of these are based on the theory for Hill's equations. Though some analytical formulas have been derived, many results of these are numerical due to the difficulties caused by nonconstant periodic solutions.
In this paper we continue the study for the stability and linearized stability of nonconstant, symmetric (in time) periodic solutions of $(S_e)$. Our aim is to provide some analytical results. In order to make such an analytical approach be applicable to more general problems, we consider the following second-order nonlinear scalar Newtonian equation
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{xe}
\cdots} \def\pa{\partialot x+ F(x,t,e)=0.
\varepsilon} \def\de{\deltae
Here $F(x,t,e)$ is a smooth function of $(x,t,e)\in {\mathbb R}} \def\C{{\mathbb C}^3$ fulfilling the following symmetries
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Sy1}
\left} \def\y{\right\{\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{array}{l} F(-x,t,e) \varepsilon} \def\de{\deltaquiv -F(x,t,e), \\
F(x,-t,e) \varepsilon} \def\de{\deltaquiv F(x,t,e),\\
F(x,t+2\pi,e) \varepsilon} \def\de{\deltaquiv F(x,t,e),\\
F(x,t,0)\varepsilon} \def\de{\deltaquiv f(x), \\
x f(x)> 0\mbox{ for }x\ne 0.
\varepsilon} \def\de{\deltand{array}\y.
\varepsilon} \def\de{\deltae
These symmetries are verified by the Sitnikov problem $(S_e)$. In particular, when $e=0$, the starting equation
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{x}
\cdots} \def\pa{\partialot x+f(x)=0
\varepsilon} \def\de{\deltae
is autonomous and has the unique equilibrium $x=0$. Obviously, $f(x)$ is also odd in $x$.
Let $m, \ p\in \N$ be integers. We say that $x(t)$ is an $({m,p})$-periodic solution of Eq. \x{xe}, if $x(t)$ is a ${2m\pi}$-periodic solution of \x{xe} and has precisely $2p$ zeros in intervals $[t_0,t_0+{2m\pi})$, $t_0\in {\mathbb R}} \def\C{{\mathbb C}$.
Because of the autonomy and the complete integrability, all $({m,p})$-periodic solutions of Eq. \x{x} are clear. In particular, with suitable choice of $({m,p})$, Eq. \x{x} admits the $({m,p})$-periodic solutions $\vp_{m,p}(t)$ and $\phi_{m,p}(t)$, which are respectively even and odd in time $t$. These are the symmetric $({m,p})$-periodic solutions of Eq. \x{x} we are interested in. Due to the autonomy of Eq. \x{x}, both $\vp_{m,p}(t)$ and $\phi_{m,p}(t)$ have the minimal period ${2m\pi}/p$.
From bifurcation theory, it is known that, under some non-degeneracy conditions, Eq. \x{xe} admits families of $({m,p})$-periodic solutions $\vp_{m,p}(t,e)$ and $\phi_{m,p}(t,e)$, $0 \le e \ll 1$, such that
\[
\left} \def\y{\right\{ \begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{array}{l}
\vp_{m,p}(t,0)\varepsilon} \def\de{\deltaquiv\vp_{m,p}(t), \mbox{ and } \phi_{m,p}(t,0)\varepsilon} \def\de{\deltaquiv\phi_{m,p}(t),\\
\vp_{m,p}(t,e) \mbox{ is even in } t, \ \vp_{m,p}(0,e)>0, \mbox{ and } \vp_{m,p}(t+m\pi,e) \varepsilon} \def\de{\deltaquiv - \vp_{m,p}(t,e), \\
\phi_{m,p}(t,e) \mbox{ is odd in } t, \ \dot\phi_{m,p}(0,e)>0, \mbox{ and } \phi_{m,p}(t+m\pi,e) \varepsilon} \def\de{\deltaquiv - \phi_{m,p}(t,e).
\varepsilon} \def\de{\deltand{array}
\y.
\]
They are called the even and the odd $({m,p})$-periodic solutions of Eq. \x{xe}, respectively.
Generally speaking, when $e>0$, $\vp_{m,p}(t,e)$ and $\phi_{m,p}(t,e)$ have the minimal period ${2m\pi}$, not ${2m\pi}/p$. For more details, see Theorem \ref{M1}. For the elliptic Sitnikov problem $(S_e)$, such symmetric periodic solutions have been studied extensively in \cite{BLO94, LO08, O16}. Moreover, some interesting global continuations of these solutions are also obtained. See, for example, \cite[Theorem 3.1]{LO08} and \cite[Theorem 1]{O16}.
Since the linearization equations of \x{xe} are Hill's equations with parameter $e$ \cite{MW66}, the linearized stability/instability of these periodic solutions $\vp_{m,p}(t,e)$ and $\phi_{m,p}(t,e)$ are related with the traces $\tau_{m,p}(e)$ of the corresponding Poincar\'e matrixes. For $e=0$, one has $\tau_{m,p}(0)=2$ because Eq. \x{x} is autonomous and $\vp_{m,p}(t)$ and $\phi_{m,p}(t)$ are parabolic. Hence the signs of $\tau'_{m,p}(0)=\frac} \def\fa{\forall\,{\,{\rm d} \tau_{m,p}(e)}{\,{\rm d} e}|_{e=0}$, if they are nonzero, can yield the linearized stability or instability.
As for even $({m,p})$-periodic solutions $\vp_{m,p}(t,e)$, a formula of $\tau'_{m,p}(0)$ has been obtained in \cite{ZCC18} and will be restated as \x{tau-e1} of this paper.
One of the main results of this paper is to derive the corresponding formula of $\tau'_{m,p}(0)$ for odd $({m,p})$-periodic solutions $\phi_{m,p}(t,e)$. See formula \x{dtau0} in \S \ref{criteria}. Note that formulas \x{dtau0} and \x{tau-e1} for $\tau'_{m,p}(0)$ are involved of nonconstant periodic solutions $\vp_{m,p}(t)$ and $\phi_{m,p}(t)$ of the autonomous equation \x{x}, which are not known explicitly.
By applying these formulas to the elliptic Sitnikov problem $(S_e)$, we can obtain the following analytical results on the stability or instability for some families of symmetric periodic solutions.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Theorem} \label{M5-7}
For those frequencies $({m,p})=(2p,p)$ where $p\in \N$ is arbitrary, we have the following results.
{\rm (i)} For the odd $(2p,p)$-periodic solutions $\phi_{2p,p}(t,e)$, one has
\(
\tau'_{2p, p}(0)>0.
\)
Consequently, for $e>0$ small, $\phi_{2p,p}(t,e)$ is hyperbolic and Lyapunov unstable.
{\rm (ii)} For the even $(2p,p)$-periodic solutions $\vp_{2p,p}(t,e)$, one has
\(
\tau'_{2p, p}(0)<0.
\)
Consequently, for $e>0$ small, $\vp_{2p,p}(t,e)$ is elliptic and linearized stable.
\varepsilon} \def\de{\deltand{Theorem}
It seems to us that these are the first analytical results on the stability or instability for the nonconstant symmetric periodic solutions of the elliptic Sitnikov problem $(S_e)$.
The organization of the paper is as follows. In \S \ref{Hill}, we will introduce some notions for Hill's equations. The linearization equations of autonomous equation \x{x} along symmetric periodic solutions will be discussed with the emphasis on the relation between the fundamental solutions of linearization equations and the solutions of Eq. \x{x} themselves. See Lemma \ref{psi12}. Moreover, a relation between the Poincar\'e matrixes and the period function of the periodic solutions of Eq. \x{x} will be found in Lemma \ref{hbn}. These results may be of independent interests. In \S \ref{criteria}, we will first give the bifurcation result on odd $({m,p})$-periodic solutions $\phi_{m,p}(t,e)$ of Eq. \x{xe}. See Theorem \ref{M1}. Then we will derive the formula of $\tau'_{m,p}(0)$ in Theorem \ref{M2}. Finally, in \S \ref{Sitnikov}, we will use the formulas of $\tau'_{m,p}(0)$ to analyze the elliptic Sitnikov problem $(S_e)$. The results of Theorem \ref{M5-7} will be proved in \S \ref{odd} and \S \ref{even}.
Note from Theorem \ref{M5-7} that we have only obtained analytical results for some families of symmetric periodic solutions with very specific frequencies $({m,p})=(2p,p)$, because we are dealing with nonconstant periodic solutions. In fact, it is found numerically and analytically in \cite{GNRR18, ZCC18} that the stability/instability depend on frequencies in a delicate way. As for the elliptic Sitnikov problem, we will prove in Theorems \ref{M4} and \ref{M6} that $\tau'_{m,p}(0)$ are always $0$ for both odd and even $({m,p})$-periodic solutions when frequencies $({m,p})$ satisfy $m/(2p)\not\in \N$.
The remaining frequencies are $({m,p})=(2np, p)$, $n\ge 2$. For odd $(2np,p)$-periodic solutions, numerical simulation shows that $\tau'_{2np,p}(0)$ are always positive and $\phi_{2np,p}(t,e)$ will lead to instability. For even $(2np,p)$-periodic solutions, we will prove in Lemma \ref{rels} that the signs of $\tau'_{2np,p}(0)$ differ from that of the odd ones by a factor $(-1)^n$. Hence some even solutions are linearized stable, while the others are unstable. These observations will be stated as a conjecture at the end of the paper.
\iffalse
Let us mention some important ones, among a lot of existence results.
$\bullet$\ u In \cite[Theorem 3.1]{LO08}, Llibre and Ortega gave the following result. Let integers $p,\ m$ be in condition \x{mp}.
Then there exists $e_{m,p}\in(0,1]$ and a family of solutions of problem $(S_e)$, $\vp_{m,p}(t,e)$, $e\in [0,e_{m,p})$ such that $\vp_{m,p}(t,e)$ is even, ${2m\pi}$-periodic in $t$, $\vp_{m,p}(0,e)>0$, and has precisely $2p$ zeros in one period. Moreover,
a sharp estimate on the possible maximal eccentricity $e_{m,p}$ is also obtained there.
These are called the {\it even} periodic orbits of the $({m,p})$-type, because they have the shape like $\cos(p t/m)$. The original proof in \cite{LO08} is to use global continuation theory. However, if no estimate on $e_{m,p}$ is considered, these solutions can also obtained from the Implicit Function Theorem (IFT) with the starting periodic solutions $\vp_{m,p}(t) :=\vp_{m,p}(t,0)$ being nonconstant ${2m\pi}$-periodic solutions of the circular Sitnikov problem $(S_0)$ of the $({m,p})$-type.
$\bullet$\ u With the same choice of $({m,p})$ as in \x{mp}, an appropriate translation of $\vp_{m,p}(t)$ in $t$ can lead to an odd, ${2m\pi}$-periodic solution $\phi_{m,p}(t)$ of problem $(S_0)$ of the $({m,p})$-type. By the IFT again, one has then a smooth family of solutions of problem $(S_e)$, $\phi_{m,p}(t,e)$, $e\in [0,e_{m,p}^*)$ such that $\phi_{m,p}(t,e)$ is odd, ${2m\pi}$-periodic in $t$, $\dot\phi_{m,p}(0,e)>0$, and has precisely $2p$ zeros in one period. For details, see Theorem \ref{M1}. These are then called {\it odd} periodic solutions of the $({m,p})$-type.
$\bullet$\ u In \cite[Theorem 1]{O16}, Ortega proved a very interesting result, i.e. for any $m\in \N$, the above family $\phi_{1,m}(t,e)$ of odd periodic orbits of the $(1,m)$-type is {\it uniquely, globally} defined. That is, for any $e\in[0,1)$, such a ${2m\pi}$-periodic solution $\phi_{1,m}(t,e)$ is existent and unique. Such a uniqueness is obtained from some property on solutions of the linearized equations satisfying the Dirichlet boundary conditions.
\frac} \def\fa{\forall\,i
\section{Periodic Solutions and Linearization of Autonomous Equations} \setcounter{equation}{0} \label{Hill}
\subsection{Periodic solutions of autonomous equations} We consider the autonomous equation \x{x} with the symmetries as before. By introducing
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Ex}
E(x):=\int_0^x f(u) \,{\rm d}u, \quad} \def\qq{\qquadq x\in {\mathbb R}} \def\C{{\mathbb C},
\varepsilon} \def\de{\deltae
an even function such that $E(0)=0$ and $E(x)>0$ for $x\ne 0$, we know that solutions $x(t)$ of \x{x} satisfy
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{ener}
C_h: \quad} \def\qq{\qquadq \frac{\footnotesize 1}{\footnotesize 2} \dot x^2(t) + E(x(t)) \varepsilon} \def\de{\deltaquiv h,
\varepsilon} \def\de{\deltae
where $h\in [0, +\infty} \def\d{\cdot)$. For $h=0$, \x{ener} corresponds to the equilibrium $x(t)\varepsilon} \def\de{\deltaquiv 0$. For
\[
0< h < E_{\max}:=\sup_{x\in {\mathbb R}} \def\C{{\mathbb C}} E(x),
\]
$C_h$ consists of a nonconstant periodic orbit in the phase plane, whose minimal period is denoted by $T=T(h)>0$. We will not write down $T$ explicitly and refer to \cite{L91} for details.
Because of the symmetries of $f(x)$, we are interested in the following two classes of periodic solutions of Eq. \x{x}.
{\bf Odd periodic solutions:} For
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{v}
\varepsilon} \def\de{\deltata\in \left} \def\y{\right(0,\varepsilon} \def\de{\deltata_{\max}\y),\quad} \def\qq{\qquadq \varepsilon} \def\de{\deltata_{\max}:=\sqrt{2 E_{\max}},
\varepsilon} \def\de{\deltae
let $x=S(t)=S(t,\varepsilon} \def\de{\deltata)$ be the solution of \x{x} satisfying the initial value conditions
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{ini}
\left} \def\y{\right(x(0), \dot x(0)\y)=(0,\varepsilon} \def\de{\deltata).
\varepsilon} \def\de{\deltae
Then $S(t)$ is a periodic solution of \x{x} of the minimal period
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Tv}
T=T(h),\quad} \def\qq{\qquadq \mbox{where } h=\varepsilon} \def\de{\deltata^2/2,
\varepsilon} \def\de{\deltae
with the following symmetries
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Os1}
S(-t) \varepsilon} \def\de{\deltaquiv -S(t) \quad \mbox{ and } \quad S(t+T/2)\varepsilon} \def\de{\deltaquiv - S(t).
\varepsilon} \def\de{\deltae
Moreover, $S(t)>0$ is strictly increasing on $(0,T/4)$.
{\bf Even periodic solutions:} For
\[
\xi\in \left} \def\y{\right(0,+\infty} \def\d{\cdot\y),
\]
let $x=C(t)=C(t,\xi)$ be the solution of \x{x} satisfying the initial value conditions
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{ini2}
\left} \def\y{\right(x(0), \dot x(0)\y)=(\xi,0).
\varepsilon} \def\de{\deltae
Then $C(t)$ is a periodic solution of \x{x} of the minimal period
\[
T=T(h),\quad} \def\qq{\qquadq \mbox{where } h=E(\xi),
\]
with the following symmetries
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Os2}
C(-t)\varepsilon} \def\de{\deltaquiv C(t) \quad \mbox{ and } \quad C(t+T/2)\varepsilon} \def\de{\deltaquiv - C(t).
\varepsilon} \def\de{\deltae
Moreover, $C(t)>0$ is strictly decreasing on $(0,T/4)$.
From \x{Os1} and \x{Os2}, one sees that
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Os12}
S(T/2-t) \varepsilon} \def\de{\deltaquiv S(t) \quad \mbox{ and } \quad C(T/2-t) \varepsilon} \def\de{\deltaquiv -C(t).
\varepsilon} \def\de{\deltae
The solutions $S(t)$ and $C(t)$ are also called $T/2$-anti-periodic. Like the sine and cosine, these solutions are related in the following way.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Lemma} \label{SC}
Suppose that $\varepsilon} \def\de{\deltata$ and $\xi$ satisfy
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{sc1}
\varepsilon} \def\de{\deltata^2/2=E(\xi)=:h.
\varepsilon} \def\de{\deltae
By setting $T=T(h)$, the odd and the even periodic solutions $S(t)=S(t,\varepsilon} \def\de{\deltata)$ and $C(t)=C(t,\xi)$ are related via
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{sc2}
S(t+T/4) \varepsilon} \def\de{\deltaquiv C(t) \quad \mbox{ and } \quad C(t+T/4) \varepsilon} \def\de{\deltaquiv -S(t).
\varepsilon} \def\de{\deltae
\varepsilon} \def\de{\deltand{Lemma}
\subsection{Traces of Hill's equations} We need some general results for Hill's equations \cite{MW66}. Let $q: {\mathbb R}} \def\C{{\mathbb C}\to {\mathbb R}} \def\C{{\mathbb C}$ be a $T$-periodic locally Lebesgue integrable function and consider the Hill's equation
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{he}
\cdots} \def\pa{\partialot y + q(t) y=0,\quad} \def\qq{\qquadq t\in {\mathbb R}} \def\C{{\mathbb C}.
\varepsilon} \def\de{\deltae
As usually, we use $y= \psi_i(t) =\psi_i(t,q)$, $i=1,2$ to denote the fundamental solutions of Eq. \x{he}, i.e. the solutions of \x{he} satisfying initial conditions $(\psi_1(0),\dot\psi_1(0)) = (1,0)$ and $(\psi_2(0),\dot\psi_2(0)) = (0,1)$ respectively.
The $T$-periodic Poincar\'e matrix of Eq. \x{he} is
\[
P=P_T=\matt{a}{b}{c}{d}:= \matt{\psi_1(T)}{\psi_2(T)}{\dot\psi_1(T)}{\dot\psi_2(T)}.
\]
The Liouville law for Eq. \x{he} asserts that
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Ll}
\det P_T = a d - bc =+1.
\varepsilon} \def\de{\deltae
The trace of the $T$-Poincar\'e matrix $P_T$ is
\[
\tau=\tau_T:= {\rm tr}(P_T)= a+d= \psi_1(T)+\dot\psi_2(T).
\]
Because of \x{Ll}, we know that (i) in case $|\tau|<2$, \x{he} is elliptic and is stable, (ii) in case $|\tau|>2$, \x{he} is hyperbolic and is unstable, and (iii) the case $|\tau|=2$ corresponds to the parabolicity of Eq. \x{he} which can be either stable or unstable.
Being considered as functionals of potentials $q$, all of the above objects are Fr\'echet differentiable in $q\in L^1({\mathbb R}} \def\C{{\mathbb C}/T{\mathbb Z}} \def\N{{\mathbb N})$, the Lebesgue space endowed with the $L^1$ norm $\|\d\|_{L^1}$.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Lemma} \label{tau'0} {\rm (\cite[Lemma 2.2]{ZCC18})}
The Fr\'echet derivative of the trace $\tau: L^1({\mathbb R}} \def\C{{\mathbb C}/T{\mathbb Z}} \def\N{{\mathbb N})\to {\mathbb R}} \def\C{{\mathbb C}$ at $q$ is
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{dtau}
\frac} \def\fa{\forall\,{\pa \tau}{\pa q}(h)=\int_0^T K(s) h(s) \,{\rm d}s\quad} \def\qq{\qquadqf h\in L^1({\mathbb R}} \def\C{{\mathbb C}/T{\mathbb Z}} \def\N{{\mathbb N}).
\varepsilon} \def\de{\deltae
Here, by using the fundamental solutions $\psi_i(s)=\psi_i(s,q)$,
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{ker0}
K(s):=-\psi_2(T) \psi^2_1(s) +\left} \def\y{\right(\psi_1(T)-\dot \psi_2(T)\y) \psi_1(s)\psi_2(s) +\dot \psi_1(T) \psi^2_2(s).
\varepsilon} \def\de{\deltae
\varepsilon} \def\de{\deltand{Lemma}
\subsection{Linearization of autonomous equations} We consider a nonconstant $T$-periodic solution $x=\phi(t)$ of the autonomous equation \x{x}. Here $T$ is not necessarily the minimal period of $\phi(t)$. Then the linearization equation of \x{x} along the solution $\phi(t)$ is the Hill's equation \x{he}, where
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{qt}
q(t):= f'(\phi(t))
\varepsilon} \def\de{\deltae
is a $T$-periodic potential.
In the sequel, we consider
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{pss}
\phi(t):=S(t,\varepsilon} \def\de{\deltata)\quad \mbox{ and } \quad q(t):= f'(S(t,\varepsilon} \def\de{\deltata)).
\varepsilon} \def\de{\deltae
Here $S(t,\varepsilon} \def\de{\deltata)$ is an odd periodic solution of \x{x} of the minimal period $T$ as in \x{Tv}. Then one has the following important observations.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Lemma} \label{psi12}
Using the solutions $S(t,\varepsilon} \def\de{\deltata)$ of initial value problems, the fundamental solutions $\psi_i(t)=\psi_i(t,q)$ of Eq. \x{he} are given by
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}a\label{psi10}
\psi_1(t)\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh
\frac} \def\fa{\forall\,{1}{\varepsilon} \def\de{\deltata}\pas{\frac} \def\fa{\forall\,{\pa S}{\pa t}}{(t,\varepsilon} \def\de{\deltata)} \quad \mbox{ and } \quad
\dot\psi_1(t)
=-\frac} \def\fa{\forall\,{f\left} \def\y{\right(S(t,\varepsilon} \def\de{\deltata)\y)}{\varepsilon} \def\de{\deltata},\\
\label{psi20}
\psi_2(t)\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \pas{\frac} \def\fa{\forall\,{\pa S}{\pa \varepsilon} \def\de{\deltata}}{(t,\varepsilon} \def\de{\deltata)}\quad \mbox{ and } \quad \dot\psi_2(t)= \pas{\frac} \def\fa{\forall\,{\pa^2 S}{\pa t\pa \varepsilon} \def\de{\deltata}}{(t,\varepsilon} \def\de{\deltata)}.
\varepsilon} \def\de{\deltaea
\varepsilon} \def\de{\deltand{Lemma}
\noindent{\bf Proof} \quad Recall that $S(t,\varepsilon} \def\de{\deltata)$ satisfies
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}a \label{phit}
\EM\cdots} \def\pa{\partialot S(t,\varepsilon} \def\de{\deltata) +f\left} \def\y{\right(S(t,\varepsilon} \def\de{\deltata)\y) = 0,\\
\label{ini-v}
\EM \left} \def\y{\right(S(0,\varepsilon} \def\de{\deltata),\dot S(0,\varepsilon} \def\de{\deltata)\y)=(0,\varepsilon} \def\de{\deltata).
\varepsilon} \def\de{\deltaea
Differentiating \x{phit} with respect to $t$, we know that $y(t):=\pas{\frac} \def\fa{\forall\,{\pa S}{\pa t}}{(t,\varepsilon} \def\de{\deltata)}= \dot S(t,\varepsilon} \def\de{\deltata)$ satisfies Eq. \x{he} and the initial values
$$
(y(0),\dot y(0))=\left} \def\y{\right(\dot S(0,\varepsilon} \def\de{\deltata),\cdots} \def\pa{\partialot S(0,\varepsilon} \def\de{\deltata)\y)=\left} \def\y{\right(\dot S(0,\varepsilon} \def\de{\deltata),-f\left} \def\y{\right(S(0,\varepsilon} \def\de{\deltata)\y)\y)=\left} \def\y{\right(\varepsilon} \def\de{\deltata,0\y) = \varepsilon} \def\de{\deltata(1,0).
$$
Hence we have
\[
\psi_1(t)\varepsilon} \def\de{\deltaquiv{\dot S(t,\varepsilon} \def\de{\deltata)}/\varepsilon} \def\de{\deltata\quad \mbox{ and } \quad \dot\psi_1(t)\varepsilon} \def\de{\deltaquiv{\cdots} \def\pa{\partialot S(t,\varepsilon} \def\de{\deltata)}/\varepsilon} \def\de{\deltata=-f\left} \def\y{\right(S(t,\varepsilon} \def\de{\deltata)\y)/\varepsilon} \def\de{\deltata,
\]
the equalities in \x{psi10}.
On the other hand, by differentiating \x{phit} and \x{ini-v} with respect to $\varepsilon} \def\de{\deltata$, we know that the variational equation for $y(t):= \pas{\frac} \def\fa{\forall\,{\pa S}{\pa \varepsilon} \def\de{\deltata}}{(t,\varepsilon} \def\de{\deltata)}$ is just Eq. \x{he} and the initial values are $(y(0),\dot y(0))=(0,1)$. Thus $\psi_2(t) \varepsilon} \def\de{\deltaquiv \pas{\frac} \def\fa{\forall\,{\pa S}{\pa \varepsilon} \def\de{\deltata}}{(t,\varepsilon} \def\de{\deltata)}$. As a consequence,
\[
\dot\psi_2(t) \varepsilon} \def\de{\deltaquiv \frac} \def\fa{\forall\,{\pa}{\pa t}\left} \def\y{\right(\pas{\frac} \def\fa{\forall\,{\pa S}{\pa \varepsilon} \def\de{\deltata}}{(t,\varepsilon} \def\de{\deltata)}\y)
=\pas{\frac} \def\fa{\forall\,{\pa^2 S}{\pa t\pa \varepsilon} \def\de{\deltata}}{(t,\varepsilon} \def\de{\deltata)}.
\]
Thus we have the equalities in \x{psi20}.\quad} \def\qq{\qquaded
Since $f'(x)$ is even in $x$, it follows from \x{Os1} and \x{pss} that the minimal period of $q(t)$ is actually $T/2$. Because of this, we consider the Poincar\'e matrixes of Eq. \x{he} with different periods
\[
\hat P:=P_{T/2}\quad \mbox{ and } \quad \hat{P}_n:=P_{n T/2},\quad} \def\qq{\qquad n\in \N.
\]
Using the fundamental solutions $\psi_i(t)$, these are
\[
\hat P=\matt{\psi_1(T/2)}{\psi_2(T/2)}{\dot\psi_1(T/2)}{\dot\psi_2(T/2)}\quad \mbox{ and } \quad \hat{P}_n=\matt{\psi_1(n T/2)}{\psi_2(n T/2)}{\dot\psi_1(n T/2)}{\dot\psi_2(n T/2)}.
\]
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Lemma} \label{PM}
By letting
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{B1}
\hat{b} :=\psi_2(T/2)\quad \mbox{ and } \quad \hat b_n:=\psi_2(n T/2),
\varepsilon} \def\de{\deltae
one has
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Ptp}
\hat P =\matt{-1}{\hat{b}}{0}{-1}\quad \mbox{ and } \quad \hat P_n =\matt{(-1)^n}{\hat b_n}{0}{(-1)^n},
\varepsilon} \def\de{\deltae
and the constants $\hat{b}, \ \hat{b}_n$ are related via
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{B2}
\hat b_n=(-1)^{n+1}n\hat{b}.
\varepsilon} \def\de{\deltae
\varepsilon} \def\de{\deltand{Lemma}
\noindent{\bf Proof} \quad From \x{Os1} and their derivatives, one has
\[
\left} \def\y{\right(S(T/2),\dot S(T/2)\y)=\left} \def\y{\right(-S(0), -\dot S(0)\y)=\left} \def\y{\right(0, -\varepsilon} \def\de{\deltata\y).
\]
By \x{psi10}, we have
$$
\left} \def\y{\right(\psi_1(T/2),\dot\psi_1(T/2)\y)=\left} \def\y{\right(\dot S(T/2),-f(S(T/2))\y)/\varepsilon} \def\de{\deltata=\left} \def\y{\right(-1,0\y),
$$
i.e. the first column of $\hat P$ is $(-1,0)^\top$. Moreover, it follows from \x{Ll} that $\dot \psi_2(T/2)=-1$. This gives the first result of \x{Ptp}.
For general $n\in \N$, one has then
\[
\hat{P}_n = \hat P^n= \matt{-1}{\hat{b}} {0} {-1}^n = \matt{(-1)^n}{{(-1)^{n+1}n \hat{b}}} {0} {(-1)^n}.
\]
Hence we have all equalities of the lemma. \quad} \def\qq{\qquaded
Using the period function $T(h)$ of orbit $C_h$ of Eq. \x{x}, we have the following relation.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Lemma} \label{hbn}
Suppose that $T(h)$ is differentiable in $h$. Then
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{B3}
\hat{b}_n= (-1)^{n+1}n \frac} \def\fa{\forall\,{\varepsilon} \def\de{\deltata^2}{2} \left} \def\y{\right.\frac} \def\fa{\forall\,{\,{\rm d} T(h)}{\,{\rm d} h}\y|_{\varepsilon} \def\de{\deltata^2/2}= (-1)^{n+1}n h T'(h),
\varepsilon} \def\de{\deltae
where $h=\varepsilon} \def\de{\deltata^2/2$ and $'=\frac} \def\fa{\forall\,{\,{\rm d}}{\,{\rm d} h}$.
\varepsilon} \def\de{\deltand{Lemma}
\noindent{\bf Proof} \quad Since we are considering odd periodic solutions $S(t,\varepsilon} \def\de{\deltata)$, we know from the second equality of \x{Os1} that
\[
S{(T(h)/2,\varepsilon} \def\de{\deltata)} \varepsilon} \def\de{\deltaquiv 0
\]
for all $\varepsilon} \def\de{\deltata$ as in \x{v}, where $h=\varepsilon} \def\de{\deltata^2/2$ is as in \x{Tv}. Differentiating it with respect to $\varepsilon} \def\de{\deltata$, we obtain
\[
\pas{\frac} \def\fa{\forall\,{\pa S}{\pa t}}{(T(h)/2,\varepsilon} \def\de{\deltata)} T'(h) \frac} \def\fa{\forall\,{\varepsilon} \def\de{\deltata}{2} + \pas{\frac} \def\fa{\forall\,{\pa S}{\pa \varepsilon} \def\de{\deltata}}{(T(h)/2,\varepsilon} \def\de{\deltata)}=0.
\]
By \x{psi10} and \x{psi20}, we have
\[
\pas{\frac} \def\fa{\forall\,{\pa S}{\pa t}}{(T(h)/2,\varepsilon} \def\de{\deltata)} =\varepsilon} \def\de{\deltata \psi_1(T/2)=-\varepsilon} \def\de{\deltata\quad \mbox{ and } \quad \pas{\frac} \def\fa{\forall\,{\pa S}{\pa \varepsilon} \def\de{\deltata}}{(T(h)/2,\varepsilon} \def\de{\deltata)}= \psi_2(T/2) = \hat b.
\]
See the proof of Lemma \ref{PM}. Thus $\hat b=(\varepsilon} \def\de{\deltata^2/2) T'(\varepsilon} \def\de{\deltata^2/2)$. Combining with \x{B2}, we obtain result \x{B3} for general $n$.\quad} \def\qq{\qquaded
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Remark}\label{nd20}
{\rm (i) From Lemmas \ref{PM} and \ref{hbn}, we have the following equivalence relations
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{nd11}
\hat{b}\ne 0 \Longleftrightarrow \hat{b}_n\ne 0 \Longleftrightarrow T'(h)\ne 0.
\varepsilon} \def\de{\deltae
One can notice that the former two conditions mean that $\phi(t)=S(t,\varepsilon} \def\de{\deltata)$ is parabolic-unstable, while the last means that $\phi(t)$ is Lyapunov unstable because the periodic orbits inside a neighborhood of $C_h$ will have different periods.
(ii) For even periodic solutions $x=C(t)=C(t,\xi)$ of Eq. \x{x}, results analogous to those in Lemmas \ref{psi12}---\ref{hbn} have been deduced in \cite{ZCC18} in a similar way. }
\varepsilon} \def\de{\deltand{Remark}
\section{A Stability Criterion for Odd Periodic Solutions} \setcounter{equation}{0} \label{criteria}
\subsection{Bifurcations of odd periodic solutions} For $\varepsilon} \def\de{\deltata>0$, we use $x=X(t,\varepsilon} \def\de{\deltata,e)$ to denote the solution of problem \x{xe}-\x{ini}. In particular, when $e=0$, one has
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}\label{xs}
X(t,\varepsilon} \def\de{\deltata,0)\varepsilon} \def\de{\deltaquiv S(t,\varepsilon} \def\de{\deltata),
\varepsilon} \def\de{\deltae
the solution of problem \x{x}-\x{ini}.
Let $m\in \N$ and $p\in \N$. Suppose that there exists $h_{m,p}$ such that $C_{h_{m,p}}$ of \x{ener} is a periodic orbit of Eq. \x{x} of the minimal period ${2m\pi}/p$, i.e.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Tm}
T(h_{{m,p}})= {2m\pi}/p.
\varepsilon} \def\de{\deltae
Due to the autonomy and the symmetries of Eq. \x{x}, $C_{h_{m,p}}$ can be presented using either odd or even periodic solutions of Eq. \x{x}. In fact, by defining
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{vpm}
\phi_{m,p}(t):= S(t,\eta_\pp), \quad} \def\qq{\qquadq \mbox{where }\eta_\pp: = \sqrt{2 h_{m,p}},
\varepsilon} \def\de{\deltae
$\phi_{m,p}(t)$ is then an odd periodic solution of Eq. \x{x} of the minimal period ${2m\pi}/p$. More symmetries of $\phi_{m,p}(t)$ can be found from \S 2.1. In particular, $\phi_{m,p}(t)$ is an $({m,p})$-periodic solution of \x{x} and satisfies
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{ps0}
\phi_{m,p}(t+m\pi/p)\varepsilon} \def\de{\deltaquiv -\phi_{m,p}(t).
\varepsilon} \def\de{\deltae
This implies that $\phi_{m,p}(m \pi)=S(m\pi, \eta_\pp)=0$, i.e.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{veq}
X(m\pi,\eta_\pp,0) =0.
\varepsilon} \def\de{\deltae
See \x{xs}. As for the dependence of these solutions on $({m,p})$, one has $\phi_{mn,pn}(t) \varepsilon} \def\de{\deltaquiv \phi_{m,p}(t)$ for any $n\in \N$.
A bifurcation result for odd $({m,p})$-periodic solutions of \x{xe} emanating from $\phi_{m,p}(t)$ is as follows.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Theorem} \label{M1}
Let $m, \ p$ and $h_{m,p}, \ \eta_\pp$ be as above. Assume that
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{nd90}
T'(h_{m,p})\ne 0.
\varepsilon} \def\de{\deltae
Then there exist $e_{m,p}>0$ and a smooth function $E_{m,p}(e)$ of $e\in [0,e_{m,p})$ such that
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Vs}
E_{m,p}(0)=\eta_\pp\quad \mbox{ and } \quad X(m\pi,E_{m,p}(e),e)=0 \mbox{ for } e\in [0,e_{m,p}).
\varepsilon} \def\de{\deltae
Hence, for any $e\in [0,e_{m,p})$,
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{psie}
\phi_{m,p}(t,e):= X(t,E_{m,p}(e),e)
\varepsilon} \def\de{\deltae
is an odd $({m,p})$-periodic solution of the non-autonomous equation \x{xe}, with the following symmetry
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{p01}
\phi_{m,p}(t+m\pi,e) \varepsilon} \def\de{\deltaquiv -\phi_{m,p}(t,e).
\varepsilon} \def\de{\deltae
\varepsilon} \def\de{\deltand{Theorem}
\noindent{\bf Proof} \quad Let $\varepsilon} \def\de{\deltata=\eta_\pp$ be in Lemmas \ref{psi12}---\ref{hbn}. Then $T={2m\pi}/p$ and $m\pi= p \d T/2$. Thus
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}a\label{hbp}
\pas{\frac} \def\fa{\forall\,{\pa X}{\pa \varepsilon} \def\de{\deltata}}{(m\pi,\eta_\pp,0)}\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \pas{\frac} \def\fa{\forall\,{\pa S}{\pa \varepsilon} \def\de{\deltata}}{(m\pi,\eta_\pp)}=\psi_2(m\pi)\quad} \def\qq{\qquadq \mbox{(by \x{psi20})}\nonumber} \def\tl{\tilde\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \hat b_p \quad} \def\qq{\qquadq \mbox{(by \x{B1})} \nonumber} \def\tl{\tilde\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh (-1)^{p+1}p h_{m,p} T'(h_{m,p})\quad} \def\qq{\qquadq \mbox{(by \x{B3})}\nonumber} \def\tl{\tilde\\
\hh & \ne & \hh} \def\AND#1{\hh & #1 & \hh 0\quad} \def\qq{\qquadq \mbox{(by \x{nd90})}.
\varepsilon} \def\de{\deltaea
Combining with \x{veq}, the existence of the function $E_{m,p}(e)$ as in \x{Vs} follows immediately from the Implicit Function Theorem (IFT).
Since $F(x,t,e)$ is odd in $x$, the solution $\phi_{m,p}(t,e)$ of \x{psie} is obviously odd in $t$. Moreover, $\phi_{m,p}(t,e)$ satisfies \x{p01} and is $({m,p})$-periodic. \quad} \def\qq{\qquaded
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Remark} \label{M11}
{\rm (i) As seen from \x{nd11} of Remark \ref{nd20}, the non-degeneracy condition \x{nd90} is equivalent to the instability of the $({m,p})$-periodic solution $\phi_{m,p}(t)$ of Eq. \x{x}, in the linearized sense and/or in the Lyapunov sense.
(ii) Note that $\phi_{m,p}(t,0) \varepsilon} \def\de{\deltaquiv \phi_{m,p}(t)$ is ${2m\pi}/p$-periodic. See \x{vpm}. Usually speaking, if $e>0$, the minimal period of $\phi_{m,p}(t,e)$ is ${2m\pi}$, not ${2m\pi}/p$.}
\varepsilon} \def\de{\deltand{Remark}
\subsection{A stability criterion for odd periodic solutions} We consider the family $\phi_{m,p}(t,e)$ of odd $({m,p})$-periodic solutions of Eq. \x{xe} as in Theorem \ref{M1}.
For $e\in[0,e_{m,p})$, the linearization equation of Eq. \x{xe} along $x=\phi_{m,p}(t,e)$ is the Hill's equation
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{He}
\cdots} \def\pa{\partialot y + q(t,e)y=0, \quad} \def\qq{\qquadq q(t,e):=\pas{\frac} \def\fa{\forall\,{\pa F}{\pa x}}{(\phi_{m,p}(t,e),t,e)}.
\varepsilon} \def\de{\deltae
Here the period is understood as $T={2m\pi}$. The corresponding trace is
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Tre}
\tau_{m,p}(e):={\psi_1({2m\pi},e)}+{\dot\psi_2({2m\pi},e)}.
\varepsilon} \def\de{\deltae
Here $\psi_i(t,e)$ are fundamental solutions of Eq. \x{He}. When $e=0$, we have
\[
\phi_{m,p}(t,0)=\phi_{m,p}(t):= S(t,\eta_\pp)\quad \mbox{ and } \quad q(t,0)=q(t)= f'(S(t,\eta_\pp)).
\]
See \x{pss}.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Theorem}\label{M2}
Let $\phi_{m,p}(t)$ be the odd $({m,p})$-periodic solution of Eq. \x{x} verifying condition \x{nd90}. Denote
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{F23}
F_{23}(t):=\pas{\frac} \def\fa{\forall\,{\pa^2 F}{\pa t\pa e }}{(\phi_{m,p}(t),t,0)}.
\varepsilon} \def\de{\deltae
Then the derivative of the trace \x{Tre} at $e=0$ is
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{dtau0}
\tau'_{m,p}(0):= \pas{\frac} \def\fa{\forall\,{\,{\rm d}\tau_{m,p}(e)}{\,{\rm d} e}}{e=0} =- p T'(h_{m,p}) \int_0^\T F_{23}(t)\dot\phi_{m,p}(t)\,{\rm d}t.
\varepsilon} \def\de{\deltae
Here $h_{m,p}=\eta_\pp^2/2$ and $'=\frac} \def\fa{\forall\,{\,{\rm d}}{\,{\rm d} h}$.
\varepsilon} \def\de{\deltand{Theorem}
\noindent{\bf Proof} \quad In order to apply Lemma \ref{tau'0}, we need to consider the ${2m\pi}$-periodic Poincar\'e matrix $P$ of the linearization equation
\[
\cdots} \def\pa{\partialot y+ q(t) y=0, \quad} \def\qq{\qquadq \mbox{where } q(t):=f'(\phi_{m,p}(t)).
\]
Arguing as in the proof of \x{hbp}, by letting $T={2m\pi}/p$ in Lemmas \ref{psi12}---\ref{hbn} and noticing that ${2m\pi}=2p \d T/2$, we have
\[
\matt {\psi_1(2m\pi)} {\psi_2(2m\pi)} {\dot\psi_1(2m\pi)} {\dot\psi_2(2m\pi)} = \hat P_{2p}=\matt{1}{\hat b_{2p}}{0}{1},
\]
where
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{bmp}
\hat b_{2p}= \psi_2(2m\pi)= -2 p h_{m,p} T'(h_{m,p})=: b_{m,p}.
\varepsilon} \def\de{\deltae
See \x{B3} with $n=2p$. Thus the kernel of \x{ker0} is
\[
K(t) = -b_{m,p} \psi_1^2(t)= -\frac} \def\fa{\forall\,{b_{m,p}}{\eta_\pp^2} \dot\phi^2_{m,p}(t)\varepsilon} \def\de{\deltaquiv p T'(h_{m,p})\dot\phi^2_{m,p}(t).
\]
Denote
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{phi}
\Phi(t):=\pas{\frac} \def\fa{\forall\,{\pa \phi_{m,p}(t,e)}{\pa e}}{(t,0)} \quad \mbox{ and } \quad F_{13}(t)
:=\pas{\frac} \def\fa{\forall\,{\pa^2 F}{\pa e\pa x}}{(\phi_{m,p}(t),t,0)}.
\varepsilon} \def\de{\deltae
From \x{He}, we have
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}aa
h(t)\AND{:=}\pas{\frac} \def\fa{\forall\,{\pa q}{\pa e}}{(t,0)} = \left} \def\y{\right.\frac} \def\fa{\forall\,{\pa }{\pa e}\left} \def\y{\right(\pas{\frac} \def\fa{\forall\,{\pa F}{\pa x}}{(\phi_{m,p}(t,e),t,e)}\y)\y|_{(t,0)} \\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh\pas{\frac} \def\fa{\forall\,{\pa^2 F}{\pa x^2}}{(\phi_{m,p}(t),t,0)}\Phi(t) +F_{13}(t)\\
\AND{=:} f''(\phi(t)) \Phi(t)+F_{13}(t).
\varepsilon} \def\de{\deltaeaa
Here, for simplicity, $\phi(t):=\phi_{m,p}(t)$. From \x{dtau}, we obtain
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}a \label{tau0}
\tau'_{m,p}(0)\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \int_0^\T K(t) h(t) dt
= p T'(h_{m,p})\int_0^\T\left} \def\y{\right(\Phi f''(\phi) + F_{13}\y)\dot\phi^2\,{\rm d}t.
\varepsilon} \def\de{\deltaea
Since $\phi_{m,p}(t,e)$ is ${2m\pi}$-periodic for any $e$, we know from the defining equality \x{phi} that $\Phi(t)$ is necessarily ${2m\pi}$-periodic. Moreover, $\Phi(t)$ satisfies the variational equation
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{phieq}
\cdots} \def\pa{\partialot{\Phi} + q(t)\Phi + F_{3}(t) =0,
\varepsilon} \def\de{\deltae
where
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}a\label{df3}
F_{3}(t)\AND{:=}\pas{\frac} \def\fa{\forall\,{\pa F}{\pa e}} {(\phi(t),t,0)},\nonumber} \def\tl{\tilde\\
\dot F_{3}(t)\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \frac} \def\fa{\forall\,{\,{\rm d}}{\,{\rm d}t}\left} \def\y{\right(\pas{\frac} \def\fa{\forall\,{\pa F}{\pa e}} {(\phi(t),t,0)}\y)=
\pas{\frac} \def\fa{\forall\,{\pa^2 F}{\pa x \pa e}}{(\phi(t),t,0)}\dot \phi(t)
+ \pas{\frac} \def\fa{\forall\,{\pa^2 F}{\pa t \pa e}}{(\phi(t),t,0)}\nonumber} \def\tl{\tilde\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh F_{13} (t) \dot \phi(t)+F_{23}(t).
\varepsilon} \def\de{\deltaea
Recall that we have Eq. \x{phit} for $\phi(t)$
and Eq. \x{phieq} for $\Phi(t)$. From these we can obtain the following equality
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{eqss}
\frac} \def\fa{\forall\,{\,{\rm d}}{\,{\rm d}t}\left} \def\y{\right(\dot \Phi \cdots} \def\pa{\partialot \phi- \cdots} \def\pa{\partialot \Phi \dot \phi\y) =
\left} \def\y{\right(\Phi f''(\phi) + F_{13}\y)\dot\phi^2 +F_{23} \dot \phi.
\varepsilon} \def\de{\deltae
In fact, by using Eq. \x{phit} and Eq. \x{phieq}, one has
\[
\dot \Phi \cdots} \def\pa{\partialot \phi- \cdots} \def\pa{\partialot \Phi \dot \phi= -\dot \Phi f(\phi) + \Phi q \dot \phi + F_3 \dot \phi.
\]
Thus the left-hand side of \x{eqss} is
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}aa
\EM -\frac} \def\fa{\forall\,{\,{\rm d}}{\,{\rm d}t}\left} \def\y{\right(\dot \Phi f(\phi)\y) + \frac} \def\fa{\forall\,{\,{\rm d}}{\,{\rm d}t}\left} \def\y{\right(\Phi q \dot \phi\y) + \frac} \def\fa{\forall\,{\,{\rm d}}{\,{\rm d}t}\left} \def\y{\right(F_3 \dot \phi\y)\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh -\cdots} \def\pa{\partialot \Phi f(\phi) - \dot \Phi f'(\phi)\dot\phi + \dot\Phi q \dot \phi+\Phi \dot q \dot \phi +\Phi q \cdots} \def\pa{\partialot \phi+F_3 \cdots} \def\pa{\partialot \phi +\dot F_3 \dot \phi\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \left} \def\y{\right(\cdots} \def\pa{\partialot \Phi+q \Phi +F_3\y) \cdots} \def\pa{\partialot \phi +\left} \def\y{\right(-f'(\phi)+q \y) \dot \Phi \dot \phi +\Phi \dot q \dot \phi+\dot F_3 \dot \phi \quad} \def\qq{\qquadq \mbox{(by \x{phit})}\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \Phi \dot q \dot \phi+\dot F_3 \dot \phi\quad} \def\qq{\qquadq \mbox{(by \x{phieq} and \x{qt})}\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \Phi f''(\phi)\dot\phi^2 + F_{13}\dot\phi^2 +F_{23} \dot \phi\quad} \def\qq{\qquadq \mbox{(by \x{qt} and \x{df3})}.
\varepsilon} \def\de{\deltaeaa
Finally, as $\Phi(t)$ and $\phi(t)$ are ${2m\pi}$-periodic, by integrating \x{eqss} over $[0,{2m\pi}]$, we obtain
\[
\int_0^\T \left} \def\y{\right(\Phi f''(\phi) + F_{13}\y)\dot\phi^2\,{\rm d}t +\int_0^\T F_{23} \dot \phi\,{\rm d}t =0.
\]
Combining with \x{tau0}, we obtain the desired formula \x{dtau0}. \quad} \def\qq{\qquaded
Since $\tau_{m,p}(0)=2$, the role of formula \x{dtau0} is as follows.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Corollary} \label{M21}
{\rm (i)} If $\tau'_{m,p}(0)<0$, then $\phi_{m,p}(t,e)$ is elliptic and is linearized stable for $0<e\ll 1$.
{\rm (ii)} If $\tau'_{m,p}(0)>0$, then $\phi_{m,p}(t,e)$ is hyperbolic and is Lyapunov unstable for $0<e\ll 1$.
\varepsilon} \def\de{\deltand{Corollary}
\subsection{A stability criterion for even periodic solutions, revisited} The bifurcations and linearized stability of even $({m,p})$-periodic solutions of Eq. \x{xe} have been done in \cite{ZCC18}. In the present notations, we restate the results in \cite{ZCC18} as follows. For $\xi>0$, we use $x=\ul{X}(t,\xi,e)$ to denote the solution of problem \x{xe}-\x{ini2}. Let $m\in \N$ and $p\in \N$ and the energy $h_{m,p}$ be as in \x{Tm}. By taking $\xi_{m,p}>0$ such that
\[
E(\xi_{m,p})=h_{m,p},
\]
we know that
\[
\vp_{m,p}(t):= C(t,\xi_{m,p})= \ul{X}(t,\xi_{m,p},0)
\]
is an even $({m,p})$-periodic solution of \x{x} of the minimal period $T(h_{m,p})={2m\pi}/p$. From Lemmas 2.5 and 2.6 of \cite{ZCC18}, under the same non-degeneracy condition \x{nd90}, i.e. $T'(h_{m,p})\ne0$, one has from the IFT a smooth function $\ul{X}i_{m,p}(e)$ of $e\in [0,\underline{e}_{m,p})$ such that $\ul{X}i_{m,p}(0)=\xi_{m,p}$ and
\[
\dot \ul{X}(m\pi,\ul{X}i_{m,p}(e),e)\varepsilon} \def\de{\deltaquiv 0.
\]
Thus
\[
\vp_{m,p}(t,e) := \ul{X}(t,\ul{X}i_{m,p}(e),e)
\]
defines a family of even $({m,p})$-periodic solutions of Eq. \x{xe} which are emanated from $\vp_{m,p}(t)$. Moreover, $\vp_{m,p}(t,e)$ is also $m\pi$-anti-periodic as in \x{p01}.
Let $\underline{\tau}_{m,p}(e)$ be the trace of the ${2m\pi}$-periodic Poincar\'e matrix of the linearization equation of \x{xe} along the solution $\vp_{m,p}(t,e)$. One has $\underline{\tau}_{m,p}(0)=2$ and the following formula.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Theorem} \label{M3} {\rm (\cite[Theorem 3.1]{ZCC18})}
With the notations above,
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{tau-e1}
\underline{\tau}'_{m,p}(0)= \left} \def\y{\right.\frac} \def\fa{\forall\,{\,{\rm d}\underline{\tau}_{m,p}(e)}{\,{\rm d} e}\y|_{e=0} = - p T'(h_{m,p}) \int_0^\T\underline{F}_{23}(t) \dot\vp_{m,p}(t)\,{\rm d}t,
\varepsilon} \def\de{\deltae
where
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}
\label{F23e}
\underline{F}_{23}(t):=\pas{\frac} \def\fa{\forall\,{\pa^2 F}{\pa t\pa e }}{(\vp_{m,p}(t),t,0)}.
\varepsilon} \def\de{\deltae
\varepsilon} \def\de{\deltand{Theorem}
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Remark} \label{M31}
{\rm For the case $m=1$, result \x{tau-e1} is proved in \cite{ZCC18}. See Formula (3.2) there. However, the coefficient there is expressed using $\dot {\underline{\psi}}_1(2\pi)$ and $f(\xi_{1,p})$, where $\underline{\psi}_1(t)$ is the first fundamental solution of the corresponding linearization equation. For general $m$, formula \x{tau-e1} can be deduced by a scaling of time. Moreover, arguing as in the deduction of \x{bmp}, the coefficient can be written in the present way. One can notice that the forms of formulas \x{dtau0} and \x{tau-e1} are the same.}
\varepsilon} \def\de{\deltand{Remark}
\section{Stability Results for the Elliptic Sitnikov Problem} \setcounter{equation}{0} \label{Sitnikov}
\subsection{Equations for the motions of the Sitnikov problems} After choosing the masses and the gravitational constant in an appropriate way, the governing equation for the motion of the infinitesimal mass in the elliptic Sitnikov problem $(S_e)$ is \cite{BLO94, LO08}
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{se}
\cdots} \def\pa{\partialot x +F(x,t,e) =0,\quad} \def\qq{\qquadq F(x,t,e):= \frac} \def\fa{\forall\,rac{x}{\left} \def\y{\right(x^2 + r^2(t,e) \y)^{3/2}}.
\varepsilon} \def\de{\deltae
Here $e\in [0, 1)$ is the eccentricity, and
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}\label{r}
r(t,e) = r_0(1-e \cos u(t,e)), \quad} \def\qq{\qquadq r_0:=1/2,
\varepsilon} \def\de{\deltae
where, after some translation of time, $u=u(t,e)$ is the solution of the Kepler's equation
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Ke}
u - e\sin u =t.
\varepsilon} \def\de{\deltae
Note that the Kepler solution $u(t,e)$ is smooth in $(t,e)$ and satisfies
\[
u(-t,e) \varepsilon} \def\de{\deltaquiv - u(t,e)\quad \mbox{ and } \quad u(t+2\pi,e) \varepsilon} \def\de{\deltaquiv u(t,e) +2\pi.
\]
Consequently, $F(x,t,e)$ fulfills all requirements in \x{Sy1}. Moreover, when $e\in(0,1)$, the minimal period of $F(x,t,e)$ in $t$ is $2\pi$.
In particular, the circular Sitnikov problem $(S_0)$ is described by the autonomous equation
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{s0}
\cdots} \def\pa{\partialot x + f(x)=0,\quad} \def\qq{\qquadq f(x):= \frac} \def\fa{\forall\,rac{x}{\left} \def\y{\right(x^2 + r_0^2\y)^{3/2}}.
\varepsilon} \def\de{\deltae
For Eq. \x{s0}, the energy $E(x)$ in \x{Ex} is
\[
E(x)=\int_0^x f(u) \,{\rm d}u = 2- \frac} \def\fa{\forall\,{1}{ \sqrt{x^2+r_0^2}}.
\]
Solutions $x(t)$ of Eq. \x{s0} are on energy levels
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{H}
H(x,\dot x):=\frac} \def\fa{\forall\,{1}{2} \dot x^2 - \frac} \def\fa{\forall\,{1}{ \sqrt{x^2+r_0^2}}= h.
\varepsilon} \def\de{\deltae
Here the energy $h$ differs from that in \x{ener} by a constant $2$ and takes values from
\(
h\in[-2,+\infty} \def\d{\cdot).
\)
For $h=-2$, \x{H} corresponds to the origin which is the equilibrium of \x{s0}. For $h\in(-2,0)$, \x{H} corresponds to periodic orbits of \x{s0} whose minimal period is denoted by $T(h)$. It is not difficult to verify that
\[
\lim_{h\to -2+} T(h) = 2\pi/\sqrt{8} \quad \mbox{ and } \quad \lim_{h\to0-} T(h) =+\infty} \def\d{\cdot.
\]
Moreover, it is proved in \cite[Theorem C]{BLO94} that
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Th1}
T'(h)=\frac} \def\fa{\forall\,{\,{\rm d} T(h)}{\,{\rm d} h} >0 \quad} \def\qq{\qquadqf h\in (-2,0).
\varepsilon} \def\de{\deltae
Hence the origin is surrounded by a family of periodic orbits, whose minimal periods take values from $(2\pi/\sqrt{8},+\infty} \def\d{\cdot)$. For more facts on the dynamics of Eq. \x{s0}, see \cite{BLO94, LO08}.
To bifurcate the families $\phi_{m,p}(t,e)$ and $\vp_{m,p}(t,e)$ of $({m,p})$-periodic solutions of Eq. \x{se} which are respectively odd and even in $t$, the integers $m, \ p$ are required that ${2m\pi}/p\in (2\pi/\sqrt{8},+\infty} \def\d{\cdot)$, i.e.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{mp1}
1\le p \le \nu_m := [\sqrt{8} m],\quad} \def\qq{\qquadq m\in \N,
\varepsilon} \def\de{\deltae
because the non-degeneracy conditions \x{nd90} are ensured by \x{Th1}. Condition \x{mp1} is also used in \cite[\S 3]{LO08}. As before, we write $\phi_{m,p}(t,0)$ and $\vp_{m,p}(t,0)$ as $\phi_{m,p}(t)$ and $\vp_{m,p}(t)$ respectively. For these $({m,p})$-periodic solutions, it is convenient to call
\[
\varrho:={p}/{m}
\]
the rotation number. Condition \x{mp1} for $({m,p})$ is now equivalent to
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{mp}
\varrho\in (0,\sqrt{8}) {\cal A}p {\mathbb Q}} \def\SS{{\mathbb S}.
\varepsilon} \def\de{\deltae
\subsection{Analytical results for stability of odd periodic orbits}\label{odd}
From the defining equalities \x{se}--\x{s0}, a direct computation can yield
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{f23}
\pas{\frac} \def\fa{\forall\,{\pa^2 F}{\pa t\pa e}}{(x,t,0)} =\frac} \def\fa{\forall\,{-3 x}{4\left} \def\y{\right(x^2+r^2_0\y)^{5/2}}\sin t.
\varepsilon} \def\de{\deltae
See also \cite[Formula (4.21)]{ZCC18}.
We first study the families $\phi_{m,p}(t,e)$ of odd $({m,p})$-periodic solutions of Eq. \x{se} for $m, \ p$ as in \x{mp1}. By \x{F23}, \x{dtau0} and \x{f23}, we have
\[
F_{23}(t)=\pas{\frac} \def\fa{\forall\,{\pa^2 F}{\pa t\pa e}}{(\phi_{m,p}(t),t,0)} = \frac} \def\fa{\forall\,{- 3 \phi_{m,p}(t)}{4\left} \def\y{\right(\phi_{m,p}^2(t)+r^2_0\y)^{5/2}}\sin t,
\]
and
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}aa
\tau'_{m,p}(0)\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh - p T'(h_{m,p})\int_0^\T F_{23}(t)\dot \phi_{m,p}(t)\,{\rm d}t\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh - \frac{\footnotesize 1}{\footnotesize 4} p T'(h_{m,p})\int_0^\T \frac} \def\fa{\forall\,{-3 \phi_{m,p}(t)\dot \phi_{m,p}(t)}{\left} \def\y{\right(\phi^2_{m,p}(t)+r^2_0\y)^{5/2}}\sin t\,{\rm d}t.
\varepsilon} \def\de{\deltaeaa
Define
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{G}
G_{m,p}(t):={1}/{\left} \def\y{\right(\phi^2_{m,p}(t)+r^2_0\y)^{3/2}}.
\varepsilon} \def\de{\deltae
One has
\[
\dot G_{m,p}(t)= -{3 \phi_{m,p}(t)\dot \phi_{m,p}(t)}/{\left} \def\y{\right(\phi^2_{m,p}(t)+r^2_0\y)^{5/2}}.
\]
Integrating by parts, we know that $\tau'_{m,p}(0)$ can be written as
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{t11}
\tau'_{m,p}(0) =\frac{\footnotesize 1}{\footnotesize 4} p T'(h_{m,p})\int_0^\T G_{m,p}(t)\cos t\,{\rm d}t.
\varepsilon} \def\de{\deltae
Such an observation was also used in \cite{ZCC18} for the study of even periodic solutions.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Theorem} \label{M4}
One has $\tau'_{m,p}(0)=0$ if $({m,p})$ satisfies \x{mp1} and
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{pm0}
\varrho=\frac} \def\fa{\forall\,{p}{m}\ne \frac} \def\fa{\forall\,{1}{2}, \ \frac} \def\fa{\forall\,{1}{4}, \ \frac} \def\fa{\forall\,{1}{6},\ \cdots} \def\pa{\partial
\varepsilon} \def\de{\deltae
In particular, $\tau'_{m,p}(0)=0$ if $m$ is odd and $1\le p\le \nu_m$, or $m$ is even and $m/2+1 \le p \le \nu_m$.
\varepsilon} \def\de{\deltand{Theorem}
\noindent{\bf Proof} \quad Let us notice from \x{ps0} and \x{G} that the minimal period of $G_{m,p}(t)$ is $m\pi/p$. Moreover, $G_{m,p}(t)$ is even in $t$. Hence one has the $m\pi/p$-periodic Fourier expansion
\[
G_{m,p}(t) \varepsilon} \def\de{\deltaquiv \sum_{n=0}^\infty} \def\d{\cdot a_n \cos\left} \def\y{\right(n \frac} \def\fa{\forall\,{2p t}{m}\y)= \sum_{n=0}^\infty} \def\d{\cdot a_n \cos\left} \def\y{\right(2n p \frac} \def\fa{\forall\,{t}{m}\y).
\]
Let us write $\cos t$ as $\cos \left} \def\y{\right( m \frac} \def\fa{\forall\,{t}{m}\y)$. By using the orthogonality of $\{\cos \left} \def\y{\right(n \frac} \def\fa{\forall\,{t}{m}\y): n \in {\mathbb Z}} \def\N{{\mathbb N}^+\}$ in the space $L^2[0,{2m\pi}]$, we know from \x{t11} that $\tau'_{m,p}(0)=0$ if $({m,p})$ satisfies $m\ne 2 n p$ for all $n\in \N$, i.e. if $\varrho$ satisfies \x{pm0}.
\quad} \def\qq{\qquaded
\iffalse
As a corollary, one has the following results on the odd ${2m\pi}$-periodic solutions.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Corollary}\label{M41}
One has $\tau'_{m,p}(0)=0$ if
$\bullet$\ $m$ is odd and $1\le p\le \nu_m$, or
$\bullet$\ $m$ is even and $m/2+1 \le p \le \nu_m$.
\varepsilon} \def\de{\deltand{Corollary}
\frac} \def\fa{\forall\,i
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Remark} \label{M42}
{\rm From Theorem \ref{M4}, the signs of $\tau'_{m,p}(0)$ depend on the frequencies $({m,p})$ in a delicate way. For example, we have no information on the stability of odd $({m,p})$-periodic orbits $\phi_{m,p}(t,e)$ for any odd number $m$. This phenomenon was also observed for the families $\vp_{m,p}(t,e)$ of even periodic solutions of Eq. \x{xe} and Eq. \x{se}. See \cite{ZCC18} and \cite{GNRR18}.}
\varepsilon} \def\de{\deltand{Remark}
In contrast to case \x{pm0}, we have $m/(2p)=n \in \N$, i.e. $m=2p n$, or equivalently,
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{pm09}
\varrho=\frac} \def\fa{\forall\,{1}{2n},\quad} \def\qq{\qquadq n\in \N.
\varepsilon} \def\de{\deltae
In this case,
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{phint}
\phi_{2pn,p}(t)\varepsilon} \def\de{\deltaquiv\phi_{2n,1}(t)=:\phi_n(t),
\varepsilon} \def\de{\deltae
which are the odd periodic solutions used by Ortega \cite{O16}. Note that $\phi_n(t)$ has the minimal period $T={2m\pi}/p=4n\pi$. More symmetries on $\phi_n(t)$ include
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{sys}
\left} \def\y{\right\{ \begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{array}{l} \phi_n(-t) \varepsilon} \def\de{\deltaquiv -\phi_n(t), \\
\phi_n(t+2n\pi)\varepsilon} \def\de{\deltaquiv -\phi_n(t),\\
\phi_n(2n\pi-t) \varepsilon} \def\de{\deltaquiv \phi_n(t), \\
\phi_n(t)> 0 \quad} \def\qq{\qquad \mbox{for } t\in(0,2n\pi),\\
\phi_n(t) \mbox{ is strictly increasing on $[0,n\pi]$.}
\varepsilon} \def\de{\deltand{array}\y.
\varepsilon} \def\de{\deltae
Here the third equality of \x{sys} is deduced from \x{Os12}. Passing to the function
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Gnt}
G_n(t):=1/\left} \def\y{\right(\phi_n^2(t)+r^2_0\y)^{3/2},
\varepsilon} \def\de{\deltae
one has
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{sysn}
\left} \def\y{\right\{ \begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{array}{l} \mbox{$G_n(t)>0$ is even and has the minimal period $2n\pi$,} \\
G_n(2n\pi-t) \varepsilon} \def\de{\deltaquiv G_n(t), \\
\mbox{$G_n(t)$ is strictly decreasing on $[0,n\pi]$.}
\varepsilon} \def\de{\deltand{array}\y.
\varepsilon} \def\de{\deltae
For the solution $\phi_n(t)$ as in \x{phint}, we can use the symmetries in \x{sysn} to obtain
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}aa
\int_0^{{2m\pi}} G_n(t) \cos t\,{\rm d}t\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh\int_0^{2p\d 2n\pi} G_n(t) \cos t\,{\rm d}t\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh 2p \int_0^{2n\pi}G_n(t) \cos t\,{\rm d}t\nonumber} \def\tl{\tilde\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh 2 p \left} \def\y{\right(\int_0^{n\pi} G_n(t) \cos t\,{\rm d}t +\int_{n\pi}^{2n\pi} G_n(t) \cos t\,{\rm d}t\y)\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh 4 p \int_0^{n\pi} G_n(t) \cos t\,{\rm d}t,
\varepsilon} \def\de{\deltaeaa
because both $G_n(t)$ and $\cos t$ are symmetric with respect to $t=n\pi$. Combining with \x{Th1} and \x{t11}, we have the following results.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Lemma}\label{same}
For any $p, \ n\in \N$, we have
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{t12}
\tau'_{2pn, p}(0) = p^2 T'(h_{2n,1})A_n,
\varepsilon} \def\de{\deltae
where
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}\label{An}
A_n:=\int_0^{n\pi} G_n(t) \cos t\,{\rm d}t= \frac{\footnotesize 1}{\footnotesize 2} \int_0^{2n\pi} G_n(t) \cos t\,{\rm d}t.
\varepsilon} \def\de{\deltae
In particular, $\tau'_{2pn, p}(0)$ and $A_n$ have the same sign for any $p\in \N$.
\varepsilon} \def\de{\deltand{Lemma}
\iffalse
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Theorem} \label{M5}
For any $p\in \N$ and $m=2p$, i.e. $n=1$ in \x{pm09}, one has
\[
\tau'_{2p, p}(0)>0.
\]
Consequently, for $e>0$ small, $\phi_{2p,p}(t,e)$ is hyperbolic and therefore is Lyapunov unstable.
\varepsilon} \def\de{\deltand{Theorem}
\frac} \def\fa{\forall\,i
Now we can complete the proof of Theorem \ref{M5-7} (i) for odd $(2p,p)$-periodic solutions $\phi_{2p,p}(t,e)$. The frequencies $({m,p})=(2p,p)$ correspond to the rotation number $\varrho=\frac{\footnotesize 1}{\footnotesize 2}$. See \x{pm09}. Due to Lemma \ref{same}, we need only to prove that $A_1>0$. By \x{An}, one has $n=1$ and
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}a \label{A1}
A_1\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \int_0^{\pi/2} G_1(t) \cos t\,{\rm d}t +\int_{\pi/2}^{\pi} G_1(t) \cos t\,{\rm d}t \nonumber} \def\tl{\tilde\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \int_0^{\pi/2} G_1(t) \cos t\,{\rm d}t +\int_{\pi/2}^0 G_1(\pi-t) \cos (\pi-t)\,{\rm d}(\pi-t)\nonumber} \def\tl{\tilde\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \int_0^{\pi/2} \left} \def\y{\right(G_1(t)-G_1(\pi-t) \y) \cos t \,{\rm d}t.
\varepsilon} \def\de{\deltaea
From the last property of \x{sysn}, $G_1(t)$ is strictly decreasing on $[0,\pi]$. Hence \x{A1} implies that $A_1 >0$. \quad} \def\qq{\qquaded
\subsection{Analytical results for stability of even periodic orbits} \label{even}
Let $m, \ p$ be as in \x{mp1}. We are now studying the family $\vp_{m,p}(t,e)$ of even $({m,p})$-periodic solutions of Eq. \x{se}. By \x{tau-e1}, \x{F23e} and \x{f23}, we have
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}a \label{t22}
\underline{F}_{23}(t)\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \pas{\frac} \def\fa{\forall\,{\pa^2 F}{\pa t\pa e}}{(\vp_{m,p}(t),t,0)} = \frac} \def\fa{\forall\,{-3\vp_{m,p}(t)}{4\left} \def\y{\right(\vp_{m,p}^2(t)+r^2_0\y)^{5/2}}\sin t,\nonumber} \def\tl{\tilde \\
\underline{\tau}'_{m,p}(0)\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh -p T'(h_{m,p})\int_0^\T \underline{F}_{23}(t)\dot \vp_{m,p}(t)\,{\rm d}t\nonumber} \def\tl{\tilde\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh -\frac{\footnotesize 1}{\footnotesize 4} p T'(h_{m,p})\int_0^\T \frac} \def\fa{\forall\,{-3 \vp_{m,p}(t)\dot \vp_{m,p}(t)}{\left} \def\y{\right(\vp^2_{m,p}(t)+r^2_0\y)^{5/2}}\sin t\,{\rm d}t\nonumber} \def\tl{\tilde\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \frac{\footnotesize 1}{\footnotesize 4} p T'(h_{m,p})\int_0^\T \underline{G}_{m,p}(t)\cos t\,{\rm d}t,
\varepsilon} \def\de{\deltaea
where
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{Ge}
\underline{G}_{m,p}(t):={1}/{\left} \def\y{\right(\vp^2_{m,p}(t)+r^2_0\y)^{3/2}}.
\varepsilon} \def\de{\deltae
Note that $\underline{G}_{m,p}(t)$ is even in $t$ and has the minimal period $m\pi/p$. The similar proof as in Theorem \ref{M4} can yield the following result.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Theorem} \label{M6}
One has $\underline{\tau}'_{m,p}(0)=0$ if $({m,p})$ satisfies \x{mp1} and \x{pm0}.
\varepsilon} \def\de{\deltand{Theorem}
For the cases as in \x{pm09}, we have the following relation.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Lemma} \label{rels}
For any $p, \ n\in \N$, there holds
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{rels1}
\underline{\tau}'_{2pn,p}(0) = (-1)^n \tau'_{2pn,p}(0).
\varepsilon} \def\de{\deltae
\varepsilon} \def\de{\deltand{Lemma}
\noindent{\bf Proof} \quad We go back to formulas \x{t11} and \x{t22}, where $m=2pn$. Note that $\phi_{m,p}(t)$ and $\vp_{m,p}(t)$ have the same energy $h_{2pn,p}=h_{2n,1}$ and the same minimal period $T={2m\pi}/p=4n\pi$. Hence \x{sc1} is verified and the factors in \x{t11} and \x{t22} are the same. By \x{sc2}, one has
\[
\vp_{m,p}(t)\varepsilon} \def\de{\deltaquiv \phi_{m,p}(t+n\pi).
\]
By \x{Gnt} and \x{Ge}, we obtain the relation
\[
\underline{G}_{m,p}(t)\varepsilon} \def\de{\deltaquiv G_{m,p}(t+n\pi)
\]
Hence
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}aa
\int_0^\T \underline{G}_{m,p}(t) \cos t \,{\rm d}t \hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \int_0^{4pn \pi}G_{m,p}(t+n\pi)\cos t\,{\rm d}t\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \int_{n\pi}^{n\pi+4pn \pi}G_{m,p}(t)\cos (t-n\pi)\,{\rm d}t\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh (-1)^n \int_{n\pi}^{n\pi+4pn \pi}G_{m,p}(t)\cos t\,{\rm d}t\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh (-1)^n \int_{0}^{4pn \pi}G_{m,p}(t)\cos t\,{\rm d}t,
\varepsilon} \def\de{\deltaeaa
because $G_{m,p}(t)$ and $\cos t$ are $2n\pi$-periodic. Thus we have relation \x{rels1}. \quad} \def\qq{\qquaded
The stability result of Theorem \ref{M5-7} (ii) for even $(2p,p)$-periodic solutions $\vp_{2p,p}(t,e)$ follows immediately from Theorem \ref{M5-7} (i) and Lemma \ref{rels}. Hence the proof of Theorem \ref{M5-7} is complete.
\iffalse
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{Theorem} \label{M7}
For any $p\in \N$ and $m=2p$, i.e. $n=1$ in \x{pm09}, one has
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation} \label{ell2}
\underline{\tau}'_{2p, p}(0)<0.
\varepsilon} \def\de{\deltae
Consequently, for $e>0$ small, $\vp_{2p,p}(t,e)$ is elliptic and therefore is linearized stable.
\varepsilon} \def\de{\deltand{Theorem}
In fact, result \x{ell2} can be proved in a direct way. Arguing as in the deduction of \x{t12} and \x{A1}, we have
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}aa
\underline{\tau}'_{2p, p}(0)\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh p^2 T'(h_{2,1})\int_0^{\pi} \underline{G}_1(t) \cos t\,{\rm d}t\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh p^2 T'(h_{2,1})\int_0^{\pi/2} \left} \def\y{\right(\underline{G}_1(t)-\underline{G}_1(\pi-t) \y) \cos t \,{\rm d}t\\
\hh & < & \hh} \def\GT{\hh & > & \hh 0,
\varepsilon} \def\de{\deltaeaa
because, for the present case, the function
\(
\underline{G}_1(t) = {1}/{\left} \def\y{\right(\vp^2_{2,1}(t)+r^2_0\y)^{3/2}}
\)
is strictly increasing on $[0,\pi]$.
\frac} \def\fa{\forall\,i
\subsection{The numerical result and a conjecture} \label{s44} For conservative systems like Hamiltonian systems, the stability of periodic orbits is an important and a difficult problem \cite{SM71}. For the $N$-body problems and the related systems, one can refer to \cite{C10, C08, HLS14} for some different approaches to the stability of periodic orbits.
Going back to the Sitnikov problem, we know from Lemmas \ref{same} and \ref{rels} that, for any $n\ge 2$ and any $p\in \N$, the linearized stability/instability of $\phi_{2pn,p}(t)$ and $\vp_{2pn,p}(t)$ are determined by the sign of $A_n$. By \x{Gnt} and \x{An}, $A_n$ is only involved of the odd $(2n,1)$-periodic solution $\phi_n(t):=\phi_{2n,1}(t)$ of Eq. \x{s0}. It is easy to do the numerical simulation. With the choice of $1\le n \le 10$, we have the numerical results listed in Table \ref{tab1}.
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{table}
\centering
{\cal A}ption{Numerical results for $\varepsilon} \def\de{\deltata_n:=\varepsilon} \def\de{\deltata_{2n,1}$, $h_n:=h_{2n,1}$ and $A_n$.} \label{tab1}
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{tabular}{rrrrrrr}
$n$ & & $\varepsilon} \def\de{\deltata_n$ & & $h_n$ & & $A_n$ \\
\midrule
1 & & $ 1.7192 $ & & $-0.5221$ & & $2.3179$ \\
2 & & $ 1.8319 $ & & $-0.3221$ & & $2.2194$ \\
3 & & $ 1.8735 $ & & $-0.2449$ & & $2.1843$ \\
4 & & $ 1.8965 $ & & $-0.2017$ & & $2.1615$ \\
5 & & $ 1.9112 $ & & $-0.1736$ & & $2.1479$ \\
6 & & $ 1.9216 $ & & $-0.1537$ & & $2.1380$ \\
7 & & $ 1.9294 $ & & $-0.1387$ & & $2.1293$ \\
8 & & $ 1.9355 $ & & $-0.1269$ & & $2.1227$ \\
9 & & $ 1.9404 $ & & $-0.1174$ & & $2.1174$ \\
10 & & $ 1.9445 $ & & $-0.1095$ & & $2.1131$ \\
\bottomrule[1pt]
\varepsilon} \def\de{\deltand{tabular}
\varepsilon} \def\de{\deltand{table}
\iffalse
From \x{t11}, the signs of $\tau'_{m,p}(0)$ are only involved of the odd periodic solutions $\phi_{m,p}(t)$ of the circular Sitnikov problem \x{s0} we are considering. Thus it is easy to evaluate numerically. For $m=2,\ 4, \ 6, \ 8,\ 10, \ 12$, we list the numerical results in the table.
$\bullet$\ the numerical result is consistent with the analytical results \x{pm0} and \x{A1}, and
$\bullet$\ for all of those $(p,m)$ other than that in \x{pm0}, $\tau'_{m,p}(0)$ are always positive. It is then a very interesting question whether this is really true.
$\bullet$\ The problem is to prove the positiveness of
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}\label{An9}
A_n=\int_0^{n\pi} \frac} \def\fa{\forall\,{\cos t}{\left} \def\y{\right(\phi_n^2(t) +r_0^2\y)^{3/2}}\,{\rm d}t,\quad} \def\qq{\qquadq n=2,3,\cdots} \def\pa{\partial
\varepsilon} \def\de{\deltae
where $\phi_n(t):=\phi_{2n,1}(t)$ are odd $(2n,1)$-periodic solutions considered in \cite{O16}, i.e. $\phi_n(t)$ is the unique odd $4n\pi$-periodic solution of the circular Sitnikov problem \x{s0} such that $\dot \phi_n(0)>0$ and $\phi_n(t)$ has the unique zero $t=2n\pi$ in the interval $(0,4n\pi)$.
From \cite{BLO94}, $\phi_n(t)$ can be expressed as elliptic functions of different kinds in an implicit way. Is this useful?
\frac} \def\fa{\forall\,i
Note that the positiveness of $A_1$ in Table \ref{tab1} has already been proved in an analytical way. It is surprising that numerically, all of $A_n$, $n\ge 2$ are positive. Hence we have the following interesting problem.
\noindent
{\bf Conjecture} One has $A_n>0$ for all $n\ge 2$.
We end the paper with two remarks.
1. Once the conjecture is proved, we could conclude that (i) odd $(2np,p)$-periodic solutions $\phi_{2np,p}(t,e)$ are hyperbolic and Lyapunov unstable for $e>0$ small, (ii) even $(4np,p)$-periodic solutions $\vp_{4np,p}(t,e)$ are also hyperbolic and Lyapunov unstable for $e>0$ small, and (iii) even $((4n-2)p,p)$-periodic solutions $\vp_{(4n-2)p,p}(t,e)$ are elliptic and linearized stable for $e>0$ small.
2. For the case $n=2$, arguing as in \x{A1}, we have from \x{An}
\[
A_2
=\int_0^{\pi/2} \left} \def\y{\right(G_2(t)-G_2(\pi-t)+G_2(2\pi-t) -G_2(\pi+t) \y) \cos t \,{\rm d}t.
\]
The sign of $A_2$ is related with a certain kind of `convexity' of $G_2(t)$ on the interval $[0,2\pi]$. This is also true for general case $n\ge 3$.
\iffalse
For general case $n\ge 2$, arguing as in \x{A1}, we have from \x{An}
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}a\label{An1}
A_n\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \sum_{i=1}^n \int_{(i-1)\pi}^{i\pi} G_n(t) \cos t\,{\rm d}t\nonumber} \def\tl{\tilde\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh\sum_{i=1}^n \int_{0}^{\pi} G_n(t+(i-1)\pi) \cos (t+(i-1)\pi)\,{\rm d}t\nonumber} \def\tl{\tilde\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \sum_{i=1}^n \int_{0}^{\pi} (-1)^{i-1} G_n(t+(i-1)\pi) \cos t\,{\rm d}t\nonumber} \def\tl{\tilde\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \sum_{i=1}^n \int_{0}^{\pi/2} (-1)^{i-1} \left} \def\y{\right(G_n(t+(i-1)\pi) -G_n(i\pi-t)\y) \cos t\,{\rm d}t\nonumber} \def\tl{\tilde\\
\hh & = & \hh} \def\EE{\hh & \varepsilon} \def\de{\deltaquiv & \hh \int_{0}^{\pi/2}\left} \def\y{\right( \sum_{i=1}^n (-1)^{i-1}\left} \def\y{\right(G_n(t+(i-1)\pi) -G_n(i\pi-t)\y)\y) \cos t\,{\rm d}t.
\varepsilon} \def\de{\deltaea
It is crucial to prove that $A_2>0$. This is related with some `convexity' of $G_2(t)$. However, $G_2(t)$ cannot be convex in the whole interval $[0,2\pi]$.
\frac} \def\fa{\forall\,i
\begin{equation}} \def\varepsilon} \def\de{\deltae{\varepsilon} \def\de{\deltand{equation}gin{thebibliography}{333}
\itemsep-2pt
{\small
\bibitem{BLO94} E. Belbruno, J. Llibre, and M. Oll\'e,
`On the families of periodic orbits which bifurcate from the circular Sitnikov motions',
{\it Celest. Mech. Dyn. Astr.}, {\bf 60} (1994), 99--129.
\bibitem{C10} K.-C. Chen,
`Variational constructions for some satellite orbits in periodic gravitational force fields',
{\it Amer. J. Math.}, {\bf 132} (2010), 681--709.
\bibitem{C08} A. Chenciner,
`Four lectures on the $N$-body problem',
{\it in} ``Hamiltonian Dynamical Systems and Applications", pp. 21--52,
NATO Sci. Peace Secur. Ser. B Phys. Biophys., Springer, Dordrecht, 2008.
\bibitem{F03} S. B. Faruque,
`Solution of the Sitnikov problem',
{\it Celest. Mech. Dyn. Astr.}, {\bf 87} (2003), 353--369.
\bibitem{GNR18} J. Gal\'an, D. N\'u\~nez, and A. Rivera,
`Quantitative stability of certain families of periodic solutions in the Sitnikov problem',
{\it SIAM J. Appl. Dyn. Syst.}, {\bf 17} (2018), 52--77.
\bibitem{GNRR18} J. Gal\'an-Vioque, D. Nu\~nez, A. Rivera, and C. Riccio,
`Stability and bifurcations of even periodic orbits in the Sitnikov problem',
{\it Celest. Mech. Dyn. Astr.}, {\bf 130}:82 (2018), 20 pp.
\bibitem{HLS14} X. Hu, Y. Long, and S. Sun,
`Linear stability of elliptic Lagrangian solutions of the planar three-body problem via index theory',
{\it Arch. Ration. Mech. Anal.}, {\bf 213} (2014), 993--1045.
\bibitem{LLYZ03} J. Lei, X. Li, P. Yan, and M. Zhang,
`Twist character of the least amplitude periodic solution of the forced pendulum',
{\it SIAM J. Math. Anal.}, {\bf 35} (2003), 844--867.
\bibitem{L91} M. Levi,
`Quasiperiodic motions in superquadratic time-periodic potentials',
{\it Comm. Math. Phys.}, {\bf 143} (1991), 43--83.
\bibitem{LS90} J. Liu and Y.-S. Sun,
`On the Sitnikov problem',
{\it Cel. Mech. Dynam. Syst.}, {\bf 49} (1990), 285--302.
\bibitem{LO08} J. Llibre and R. Ortega,
`On the families of periodic orbits of the Sitnikov problem',
{\it SIAM J. Appl. Dyn. Syst.}, {\bf 7} (2008), 561--576.
\bibitem{LS80} J. Llibre and C. Sim\'o,
`Estudio cualitativo del problema de Sitnikov',
{\it Publ. Mat. U.A.B.}, {\bf 18} (1980), 49--71.
\bibitem{MW66} W. Magnus and S. Winkler,
`Hill's Equations',
John Wiley, New York, 1966.
\bibitem{M18} M. Misquero,
`Resonance tongues in the linear Sitnikov equation',
{\it Celest. Mech. Dyn. Astr.}, {\bf 130}:30 (2018), 25 pp.
\iffalse
\bibitem{M83} J. Moser,
`Integrable Hamiltonian Systems and Spectral Theory',
Lezioni Fermiane, Accademia Nazionale dei Lincei, Rome, 1983.
\frac} \def\fa{\forall\,i
\bibitem{O96} R. Ortega,
`Periodic solutions of a Newtonian equation: Stability by the third approximation',
{\it J. Differential Equations}, {\bf 128} (1996), 491--518.
\bibitem{O16} R. Ortega,
`Symmetric periodic solutions in the Sitnikov problem',
{\it Arch. Math.}, {\bf 107} (2016), 405--412.
\bibitem{O17} R. Ortega,
`Stability of periodic solutions of Hamiltonian systems with low dimension',
{\it Rend. Sem. Mat. Univ. Politec. Torino}, {\bf 75} (2017), 53--78.
\bibitem{OR10} R. Ortega and A. Rivera,
`Global bifurcations from the center of mass in the Sitnikov problem',
{\it Discrete Contin. Dyn. Syst. Ser. B}, {\bf 14} (2010), 719--732.
\bibitem{SM71} C. L. Siegel and J. K. Moser,
`Lectures on Celestial Mechanics',
Springer-Verlag, Berlin, 1971.
\bibitem{S60} K. A. Sitnikov,
`Existence of oscillating motion for the three-body problem',
{\it Dokl. Akad. Nauk}, {\bf 133} (1960), 303--306.
\bibitem{ZCC18} M. Zhang, X. Cen, and X. Cheng,
`Linearized stability and instability of nonconstant periodic solutions of Lagrangian equations',
{\it Math. Meth. Appl. Sci.}, {\bf 41} (2018), 4853--4866.
}
\varepsilon} \def\de{\deltand{thebibliography}
\frac} \def\fa{\forall\,box{\small Ver. 1, 2019-04-26}
\varepsilon} \def\de{\deltand{document} |
\begin{document}
\title[Fractional Schr\"odinger-Poisson equations]
{Fractional Schr\"odinger-Poisson equations with general nonlinearities}
\author[Ronaldo C. Duarte]{Ronaldo C. Duarte}
\email{[email protected]}
\email{[email protected]}
\author[Marco A. S. Souto]{Marco A. S. Souto}
\keywords{positive solutions, ground state solutions, periodic potential}
\subjclass{Primary 35J60; Secondary 35J10}
\begin{abstract}
In this paper we investigate the existence of positive solutions and ground state solutions for a class of fractional Schr\"odinger-Poisson equations
in $\mathbb R^3$ with general nonlinearity.
\end{abstract}
\maketitle
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{definition}[theorem]{Definition}
\renewcommand{\thesection.\arabic{equation}}{\thesection.\arabic{equation}}
\section{Introduction}
In this article we consider the Schr\"odinger-Poisson system
$$
\left\{\begin{array}{lcl}
\left(-\Delta\right)^{s} u +V(x)u+\phi u= f(u), &\mbox{ in }& \mathbb R^3, \\ \left(-\Delta\right)^{t} \phi=u^2, &\mbox{ in }& \mathbb R^3,
\end{array}\right. \leqno (P)
$$
where $\left(-\Delta\right)^\alpha$ is the fractional Laplacian for $\alpha = s,t$. This paper was motivated by \cite{amsss}. In \cite{amsss} the authors show the existence of positive solutions for the system
$$
\left\{\begin{array}{lcl}
- \Delta u +V(x)u+\phi u= f(u), &\mbox{ in }& \mathbb R^3, \\ -\Delta \phi=u^2, &\mbox{ in }& \mathbb R^3,
\end{array}\right.
$$
where $V:\mathbb{R}^3 \to \mathbb R$ is a continuous periodic potential and positive. Our purpose is to show that when we consider this system with fractional Laplacian operator instead of the Laplacian, then we get a positive solution and a ground state solution for the system. We emphasize that we prove the existence of weak solution to the system and without using results of regularity, we show that the weak solution is positive almost everywhere in $\mathbb{R}^{3}$. To prove this, we present another version of the Logarithmic lemma and we deduce a weak comparison principle for the solution of the system (See Theorem \ref{lm41}).
We will admit that the potential $V$ satisfies,
\begin{enumerate}
\item[$(V_o)$\ ] $V(x) \geq \alpha >0$, $\forall x \in \mathbb R^3$, for some constant $\alpha >0$,
\item[$(V_1)$\ ] $V(x)=V(x+y)$, for all $x \in \mathbb R^3$, $y\in \mathbb Z^3$.\newline
\end{enumerate}
Also, we will assume that
$f\in C(\mathbb R,\mathbb R)$ is a function satisfying:
\begin{enumerate}
\item[$(f_1)$\ ]
$f(u)u>0$, $u \neq 0$;
\item[$(f_2)$\ ]
$\displaystyle\lim_{u\rightarrow 0} \frac{f(u)}{u} = 0$;
\item[$(f_3)$\ ] there exists $p \in (4,2^{*}_{s})$ and $C>0$, such that
$$|f(u)|\leq C(|u|+|u|^{p-1}),$$ for all $u \in \mathbb{R}$, where $2^{*}_{s}=\frac{6}{3-2s}$.
\item[$(f_4)$\ ]
$\displaystyle\lim_{u\rightarrow +\infty} \frac{F(u)}{u^4} =+\infty$, where $F(u)=\int_0^u f(z)dz$;
\item[$(f_5)$\ ]The function
$u \longmapsto \frac{f(u)}{|u|^{3}}$ is increasing
in $|u|\neq 0$.
\end{enumerate}
We will denote $g(u):=f(u^{+})$ and $G(t)=\int_{0}^{t}g(s)ds$.
The System $(P)$ was studied in \cite{bf}. The author studied the following one dimensional system
$$
\left\{\begin{array}{lcl}
- \Delta u +\phi u= a|u|^{p-1}u, &\mbox{ in }& \mathbb R, \\ \left(-\Delta\right)^{t} \phi=u^2, &\mbox{ in }& \mathbb R,
\end{array}\right.
$$
for $p\in(1,5)$ and $t \in (0,1)$. In \cite{zjs2}, the authors show the existence of positive solutions for the system
$$
\left\{\begin{array}{lcl}
- \Delta u + u + \lambda \phi u= f(u), &\mbox{ in }& \mathbb{R}^{3}, \\ -\Delta \phi= \lambda u^2, &\mbox{ in }& \mathbb R,
\end{array}\right.
$$
for $\lambda>0$ and general critical nonlinearity, $f$.
In \cite{zjs}, the authors have proved the existence of radial ground state solutions of $(P)$ when $V=0$. In \cite{zhang}, the system was studied, although the sign of the solutions is not considered. In this paper, we prove the existence of positive solutions for system $(P)$. Moreover, by the method of the Nehari manifold, we ensure the existence of a ground state solution for the problem.
Our result is:
\begin{theorem}{\label{fth}}
Suppose that $s \in (\frac{3}{4},1)$, $t \in (0,1)$, $V$ satisfies $(V_o)$ and $(V_1)$, and $f$ satisfies $(f_1)- (f_5)$. Then the system ($P$) has a positive solution and a ground state solution.
\end{theorem}
The hypothesis $s \in (\frac{3}{4},1)$ is required to ensure that the interval $(4,2^{\ast}_{s})$ is nondegenerate.
\begin{remark}{\label{rm1}} The condition $(f_5)$ implies that
$H(u)=uf(u)-4F(u)$ is a non-negative function.
\end{remark}
In the paper \cite{gsd}, Lemma 2.3, the authors proved another version of the Lions lemma. We will need this lemma to prove our result. It states that:
\begin{lemma}\label{l1.3}
If $\left\{u_{n}\right\}_{n \in \mathbb{N}}$ is a bounded sequence in $H^{s}(\mathbb{R}^{3})$ such that for some $R>0$ and $2\leq q< 2^{\ast}_{s}$ we have
$$
\sup_{x \in \mathbb{R}^{3}}\int_{B_{R}(x)}|u_{n}|^{q} \longrightarrow 0
$$
when $n \rightarrow \infty$, then $u_{n}\rightarrow 0$ in $L^{r}(\mathbb{R}^{3})$ for all $r \in (2,2^{\ast}_{s})$.
\end{lemma}
\section{Some preliminary results}
Let $s \in (0,1)$, we denote by $\dot{H}^{s}(\mathbb{R}^{3})$ the homogeneous fractional space. It is defined as the completion of $C_{0}^{\infty}(\mathbb{R}^{3})$ under the norm
$$
||u||_{\dot{H}^{s}}=\left(\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}\frac{(u(x)-u(y))^{2}}{|x-y|^{3+2s}}dxdy\right)^{\frac{1}{2}}
$$
and we define
$$
H^{s}(\mathbb{R}^{3}):=\left\{u \in L^{2}(\mathbb{R}^{3});\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}\frac{(u(x)-u(y))^{2}}{|x-y|^{3+2s}}dxdy<\infty \right\}.
$$
The space $H^{s}(\mathbb{R}^{3})$ is a Hilbert space with the norm
$$
||u||_{H^{s}}=\left(\int_{\mathbb{R}^{3}}|u|^{2}dx+\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}\frac{(u(x)-u(y))^{2}}{|x-y|^{3+2s}}dxdy\right)^{\frac{1}{2}}
$$
We define the fractional Laplacian operator
$\left(-\Delta\right)^{s}:\dot{H}^{s}(\mathbb{R}^{3})\longrightarrow (\dot{H}^{s}(\mathbb{R}^{3}))'$ by $(\left(-\Delta\right)^{s}u,v)=\frac{\zeta}{2}(u,v)_{\dot{H}^{s}}$, where
$
\zeta=\zeta(s)=\left(\int_{\mathbb{R}^{3}}\frac{1-cos(\xi_{1})}{|\xi|^{3+2s}}d \xi \right)^{-1}
$ and $(\cdot,\cdot)_{H^{s}}$ is an inner product of $H^{s}(\mathbb{R}^{3})$. The constant $\zeta$ satisfies
$$
\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}\frac{(u(x)-u(y))(v(x)-v(y))}{|x-y|^{3+2s}}dxdy=2\zeta^{-1}\int_{\mathbb{R}^{3}}|\xi|^{2s}\mathcal{F}u(\xi) \overline{\mathcal{F}v(\xi)}d \xi,
$$
where $\mathcal{F}u$ is the Fourier transform of $u$ (see Proposition 3.4 of \cite{dpv}). The fractional Laplacian operator is a bounded linear operator.
A pair $(u, \phi_{u})$ is a solution of $(P)$ if
$$
\frac{\zeta(t)}{2}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}\frac{(\phi_{u}(x)-\phi_{u}(y))(w(x)-w(y))}{|x-y|^{3+2t}}dxdy = \int_{\mathbb{R}^{3}}u^{2}wdx.
$$
for all $w \in \dot{H}^{t}(\mathbb{R}^{3})$, and
$$
(\left(-\Delta\right)^{s}u,v)+\int_{\mathbb{R}^{3}}V(x)uvdx+\int_{\mathbb{R}^{3}}\phi_{u}uvdx=\int_{\mathbb{R}^{3}}f(u)vdx
$$
for all $v\in H^{s}(\mathbb{R}^{3})$.
Let us recall some facts about the Schr\"odinger-Poisson equations (see \cite{Ruiz,ap,zz,G} for instance). We can transform $(P)$ into a fractional Schr\"odinger problem with a nonlocal term. For all $u\in H^{s}(\mathbb R^3)$, there exists a unique $\phi=\phi_u \in \dot{H}^{t}(\mathbb R^3)$ such that
$$
\left(-\Delta\right)^{t} \phi=u^2.
$$
In fact, since $H^{s}(\mathbb R^3)\hookrightarrow L^{\frac{22^{\ast}_{t}}{2^{\ast}_{t}-1}}(\mathbb R^3)$ (continuously), a simple application of the Lax-Milgram theorem shows that $\phi_u$ is well defined and
$$
||\phi_{u}||_{\dot{H}^{t}}^{2}\leq S^2 ||u||^4_{\frac{22^{\ast}_{t}}{2^{\ast}_{t}-1}},
$$
where $||.||_p$ denotes the $L^p(\mathbb R^3)$ norm and $S$ is the best constant of the Sobolev immersion $H^s(\mathbb R^3) \rightarrow L^{2^{\ast}_{t}}(\mathbb R^3)$, that is
$$
S= \inf_{u \in \dot{H}^{t}(\mathbb{R}^{3})\setminus \left\{0\right\}}\frac{||u||_{\dot{H}^{t}}^{2}}{||u||_{2^{\ast}_{t}}^{2}}.
$$
\begin{lemma}{\label{lm1}}
We have:
$i)$ there exists $C>0$ such that $||\phi_u||_{\dot{H}^{t}}\leq C||u||_{H^{s}}^2$ and
$$
\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}\frac{(\phi_{u}(x)-\phi_{u}(y))^{2}}{|x-y|^{3+2t}}dxdy \leq C||u||_{{H}^{s}}^{4}
$$ for all $u\in H^s(\mathbb R^3)$;
$ii)$ $\phi_u\geq 0$, $\forall u\in H^s(\mathbb R^3)$;
$iii)$ $\phi_{tu}=t^2\phi_u$, $\forall t>0, u\in H^s(\mathbb R^3)$.
$iv)$ If $\tilde{u}(x):=u(x+z)$ then $\phi_{\tilde{u}}(x) = \phi_{u}(x+z)$ and
$$
\int_{\mathbb{R}^{3}}\phi_{u}u^{2}dx = \int_{\mathbb{R}^{3}}\phi_{\tilde{u}}\tilde{u}^{2}dx.
$$ for all $z \in \mathbb{R}^{3}$ and $u \in H^{s}(\mathbb{R}^{3})$.
$v)$
If $\left\{u_{n}\right\}$ converges weakly to $u$ in $H^{s}(\mathbb{R}^{3})$, then $\left\{\phi_{u_{n}}\right\}$ converges weakly to $\phi_{u}$ in $\dot{H}^{t}(\mathbb{R}^{3})$.
\end{lemma}
The proof is analogous to the case of Poisson equation in $\mathcal{D}^{1,2}(\mathbb{R}^{3})$ (See \cite{amsss, Ruiz, zz}).
At first, we are interested in showing the existence of a positive solution for $(P)$. We will consider the following Euler-Lagrange functional
$$
\begin{array}{cccl}
I:&H^{s}(\mathbb{R}^{3})&\longrightarrow&\mathbb{R}\\
&u& \longmapsto &
\begin{array}{ll}\frac{\zeta(s)}{4}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}&\frac{(u(x)-u(y))^{2}}{|x-y|^{3+2s}}dxdy+\frac{1}{2}\int_{\mathbb{R}^{3}}V(x)u^{2}dx\\
&+\frac{1}{4}\int_{\mathbb{R}^{3}}\phi_{u}u^{2}dx-\int_{\mathbb{R}^{3}}G(u)dx,
\end{array}
\end{array}
$$
whose derivative is
$$
\begin{array}{ll}
I^{'}(u)(v)&=\frac{\zeta}{2}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}\frac{(u(x)-u(y))(v(x)-v(y))}{|x-y|^{3+2s}}dxdy\\
& +\int_{\mathbb{R}^{3}}V(x)uvdx
+\int_{\mathbb{R}^{3}}\phi_{u}u vdx-\int_{\mathbb{R}^{3}}g(u)vdx \\
&= (\left(-\Delta\right)^{s}u,v)+\int_{\mathbb{R}^{3}}V(x)uvdx
+\int_{\mathbb{R}^{3}}\phi_{u}u vdx-\int_{\mathbb{R}^{3}}g(u)vdx.
\end{array}
$$
Remark that critical points of $I$ determine solutions for $P$.
\begin{lemma}
The function
$$
u \longmapsto ||u||:=\left(\frac{\zeta(s)}{2}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}\frac{(u(x)-u(y))^{2}}{|x-y|^{3+2s}}dxdy + \int_{\mathbb{R}^{3}}V(x)u^{2}dx\right)^{\frac{1}{2}}
$$
defines a norm in $H^{s}(\mathbb{R}^{3})$ wich is equivalent to the standard norm.
\end{lemma}
The proof of the previous lemma is trivial and therefore we will omit it in this paper.
\section{Existence of the Solution}
\begin{theorem}\label{th31}
Suppose that $1>s> \frac{3}{4}$, $t \in (0,1)$, $V$ satisfies $(V_{0}), (V_{1})$ and $f$ satisfies $(f_{1})-(f_{4})$. Then, the problem $(P)$ has a nontrivial solution.
\end{theorem}
\begin{proof}
By usual arguments, we prove that the functional $I$ has the mountain pass geometry. By Montain Pass theorem, there is a Cerami's sequence for $I$ at the mountain pass level c. That is, there is $\left\{u_{n}\right\}_{n \in \mathbb{N}} \subset H^{s}(\mathbb{R}^{3})$ such that
$$
I(u_{n})\rightarrow c
$$
and
$$
(1+||u_{n}||)I^{'}(u_{n}) \rightarrow 0.
$$
where
$$
c= \inf_{\gamma \in \Gamma}\sup_{t \in [0,1]}I(\gamma(t))
$$
and
$$
\Gamma=\left\{\gamma \in C([0,1], H^{s}(\mathbb{R}^{3}));\gamma(0)=0, \gamma(1)=e \right\},
$$
where $e \in H^{s}(\mathbb{R}^{3})$, and $e$ satisfies $I(e)<0$. By Remark \ref{rm1}
$$
\begin{array}{ll}
4I(u_{n})-I'(u_{n})u_{n}
&=||u_{n}||^{2}+\int_{\mathbb{R}^{3}}[f(u_{n})u_{n}-4F(u_{n})]dx \\
& \geq ||u_{n}||^{2}
\end{array}
$$
Therefore $\left\{u_{n}\right\}$ is bounded in $H^{s}(\mathbb{R}^{3})$. So, there is $u \in H^{s}(\mathbb{R}^{3})$ such that $\left\{u_{n}\right\}$ converges weakly to $u$. The Lemma \ref{lm1}, $(f_{2})$, and $(f_{3})$ imply that $u$ is a critical point for $I$. If $u \neq 0$ then $u$ is a nontrivial solution for
$(P)$. Suppose that $u = 0$.
We claim that $\{u_{n}\}$ does not converge to $0$ in $L^{r}(\mathbb{R}^{3})$ for all $r \in (2,2^{\ast}_{s})$. Indeed, otherwise, by $(f_{2})$, $(f_{3})$ and the boundedness of $\{u_{n}\}$ in $L^{2}(\mathbb{R}^{3})$
we have
$$
\int_{\mathbb{R}^{3}}g(u_{n})u_{n}dx \rightarrow 0;
$$
By Lemma \ref{lm1}
$$
\begin{array}{ll}
||u_{n}||^{2}& \leq ||u_{n}||^{2}+\int_{\mathbb{R}^{3}}\phi_{u_{n}}u_{n}^{2}dx \\
& = \int_{\mathbb{R}^{3}}g(u_{n})u_{n}dx + I'(u_{n})u_{n}.
\end{array}
$$
The right side of this last inequality converges to $0$. In this case, $u_{n}\rightarrow 0$ in $H^{s}(\mathbb{R}^{3})$. Consequently
$$
c=\lim I(u_{n})=0.
$$
This last equality can not occur. Then, we can assume that there are $R>0$ and $\delta>0$ such that passing to a subsequence if necessary
$$
\int_{B_{R}(y_{n})}u_{n}^{2}dx\geq \delta,
$$
for some sequence $\{y_{n}\} \subset \mathbb{Z}^{3}$ (See Lemma \ref{l1.3}).
For each $n \in \mathbb{N}$, we define
$$
w_{n}(x):=u_{n}(x+y_{n}).
$$
Note that $w_{n} \in H^{s}(\mathbb{R}^{3})$. Moreover, changing the variables in the integral below, we have
$$
\begin{array}{ll}
I(w_{n})
&= \begin{array}{ll}\frac{\zeta}{4}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}&\frac{(u_{n}(x+y_{n})-u_{n}(y+y_{n}))^{2}}{|(x+y_{n})-(y+y_{n})|^{3+2s}}dxdy+\frac{1}{2}\int_{\mathbb{R}^{3}}V(x)u_{n}(x+y_{n})^{2}dx\\
&+\frac{1}{4}\int_{\mathbb{R}^{3}}\phi_{w_{n}}w_{n}^{2}dx-\int_{\mathbb{R}^{3}}G(u_{n}(x+y_{n}))dx
\end{array} \\
& = \begin{array}{ll}\frac{\zeta}{4}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}&\frac{(u_{n}(z)-u_{n}(w))^{2}}{|z-w|^{3+2s}}dzdw+\frac{1}{2}\int_{\mathbb{R}^{3}}V(z)u_{n}(z)^{2}dz\\
&+\frac{1}{4}\int_{\mathbb{R}^{3}}\phi_{u_{n}}u_{n}^{2}dx-\int_{\mathbb{R}^{3}}G(u_{n}(z))dz.
\end{array}\\
& = I(u_{n}).
\end{array}
$$
Analogously, for every $\phi \in H^{s}(\mathbb{R}^{3})$
$$
\begin{array}{ll}
I'(w_{n})\phi&=\begin{array}{ll}\frac{\zeta}{2}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}&\frac{(w_{n}(x)-w_{n}(y))(\phi(x)-\phi(y))}{|x-y|^{3+2s}}dxdy+\int_{\mathbb{R}^{3}}V(x)w_{n}\phi dx\\
&+\int_{\mathbb{R}^{3}}\phi_{w_{n}}w_{n}\phi dx-\int_{\mathbb{R}^{3}}g(w_{n})\phi dx
\end{array} \\
& = \begin{array}{ll}\frac{\zeta}{2}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}&\frac{(u_{n}(x+y_{n})-u_{n}(y+y_{n}))(\phi(x)-\phi(y))}{|(x+y_{n})-(y+y_{n})|^{3+2s}}dxdy\\&+\int_{\mathbb{R}^{3}}V(x+y_{n})u_{n}(x+y_{n})\phi(x) dx\\
&+\int_{\mathbb{R}^{3}}\phi_{u_{n}}(x+y_{n})u_{n}(x+y_{n})\phi dx\\
&-\int_{\mathbb{R}^{3}}g(u_{n}(x+y_{n}))\phi(x) dx
\end{array} \\
& = \begin{array}{ll}\frac{\zeta}{2}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}&\frac{(u_{n}(z)-u_{n}(w)(\phi(z-y_{n})-\phi(w-y_{n}))}{|z-w|^{3+2s}}dzdw\\
&+\int_{\mathbb{R}^{3}}V(z)u_{n}(z)\phi(z-y_{n}) dz\\
&+\int_{\mathbb{R}^{3}}\phi_{u_{n}}(z)u_{n}(z)\phi(z-y_{n}) dz\\
&-\int_{\mathbb{R}^{3}}g(u_{n}(z))\phi(z-y_{n}) dz
\end{array} \\
& = I'(u_{n})\overline{\phi}
\end{array}
$$
where $\overline{\phi}(x)=\phi(x-y_{n})$. This implies that $\{w_{n}\}$ is a Cerami's sequence for $I$ at the level $c$. Analogously, we can show that $\{w_{n}\}$ is bounded, $\{w_{n}\}$ converges weakly to some $w_{0}\in H^{s}(\mathbb{R}^{3})$ and that $I'(w_{0})=0$. Passing to a subsequence, if necessary, we can assume that $\{w_{n}\}$ converges on $L^{2}_{loc}(\mathbb{R}^{3})$ to $w_{0}$. Then
$$
\begin{array}{ll}
\int_{B_{R}(0)}w_{0}^{2}dx & = \lim\limits_{n \rightarrow \infty}\int_{B_{R}(0)}w_{n}^{2}dx \\
& = \lim\limits_{n \rightarrow \infty}\int_{B_{R}(0)}u_{n}(x+y_{n})^{2}dx \\
& = \lim\limits_{n \rightarrow \infty}\int_{B_{R}(y_{n})}u_{n}(z)^{2}dz \geq \delta.
\end{array}
$$
Therefore, $w_{0}$ is a nontrivial solution for $(P)$. Thus, if $u=0$ we prove that there is a critical point for $I$, that is nontrivial.
\end{proof}
\section{Positivity of the solution}
In this section, we will prove that the solution of Theorem \ref{th31} is positive. Initially, we will prove a version of Logarithmic lemma. The Logarithmic lemma was presented by Di Castro, Kuusi and Palatucci. (lemma 1.3 of \cite{dkp}). In the Logarithmic lemma, the authors give an estimate for weak solutions of the equation
$$
\left\{\begin{array}{rcccc}
\left(-\Delta_{p}\right)^{s}u&=&0&in& \Omega \\
u&=&g&in& \mathbb{R}^{n}\setminus\Omega
\end{array}
\right.
$$
in $B_{r}(x_{0})\subset B_{\frac{R}{2}}(x_{0}) \subset \Omega$, for $x_{0}\in \Omega$ and $u \geq 0$ in $B_{R}(x_{0})$. Following the ideas from Di Castro, Kuusi and Palatucci, we will show a similar estimate for a supersolution of the problem
$$
\begin{array}{lll}
\left(-\Delta\right)^{s}u+a(x)u=0&in &\mathbb{R}^{n}
\end{array}
$$
(See Lemma \ref{lm41} bellow). Supersolutions are defined in the following way
$$
\int_{\mathbb{R}^{n}}\int_{\mathbb{R}^{n}}\frac{(u(x)-u(y))\left(v(x)-v(y)\right)}{|x-y|^{n+2s}}dxdy+ \int_{\mathbb{R}^{n}}a(x)u(x)v(x)dx\geq 0,
$$
for all $v \in H^{s}(\mathbb{R}^{n})$ with $v \geq 0$ almost everywere. Also, in this situation, we need not to assume that $u\geq0$ in some subset of $\mathbb{R}^{n}$.
With this estimate, we conclude that the supersolution satisfies $u > 0$ almost everywere in $\mathbb{R}^{3}$ or $u=0$ almost everywere in $\mathbb{R}^{3}$.
\begin{lemma}\label{lm41}
Suppose that $a:\mathbb{R}^{n}\rightarrow \mathbb{R}$ is a nonnegative function and $u \in H^{s}(\mathbb{R}^{n})$. If
$$
\int_{\mathbb{R}^{n}}\int_{\mathbb{R}^{n}}\frac{(u(x)-u(y))\left(v(x)-v(y)\right)}{|x-y|^{n+2s}}dxdy+ \int_{\mathbb{R}^{n}}a(x)u(x)v(x)dx\geq 0.
$$
for all $v \in H^{s}(\mathbb{R}^{n})$ with $v \geq 0$ almost everywere, then $u \geq 0$ almost everywere. In other words, if $\left(-\Delta\right)^{s}u+a(x)u\geq0$ then $u \geq 0$ almost everywere.
\end{lemma}
\begin{proof}
Define $v=u^{-} = \max\{0,-u\}$. By hypothesis
$$
\int_{\mathbb{R}^{n}}\int_{\mathbb{R}^{n}}\frac{(u(x)-u(y))\left(u^{-}(x)-u^{-}(y)\right)}{|x-y|^{n+2s}}dxdy+ \int_{\mathbb{R}^{n}}a(x)u(x)u^{-}(x)dx\geq 0.
$$
But,
\begin{itemize}
\item if $u(x)>0$ and $u(y)>0$ then $(u(x)-u(y))\left(u^{-}(x)-u^{-}(y)\right) =0$.
\item if $u(x)<0$ and $u(y)<0$ then $(u(x)-u(y))\left(u^{-}(x)-u^{-}(y)\right) = - (u(x)-u(y))^{2} \leq 0$.
\item if $u(x)>0$ and $u(y)<0$ then $(u(x)-u(y))\left(u^{-}(x)-u^{-}(y)\right) = (u(x)-u(y))u(y) \leq 0$.
\item if $u(x)<0$ and $u(y)>0$ then $(u(x)-u(y))\left(u^{-}(x)-u^{-}(y)\right) = (u(x)-u(y))(-u(x)) \leq 0$.
\item if $u(x) < 0$, then $a(x)u(x)u^{-}(x) = -a(x)u^{2}(x)<0$, and $a(x)u(x)u^{-}(x)$ $= 0$ in the case $u(x)\geq0$.
\end{itemize}
We conclude that each one of the integrals above is equal to zero. Therewith
$$
\frac{(u(x)-u(y))\left(u^{-}(x)-u^{-}(y)\right)}{|x-y|^{n+2s}}= 0.
$$
Therefore $u^{-}$ is constant in $H^{s}(\mathbb{R}^{n})$, that is, $u^{-}=0$.
\end{proof}
\begin{lemma}\label{lm42}
Suppose that $\epsilon \in \left(\left.0,1\right]\right.$ and $a,b \in \mathbb{R}^{n}$. Then
$$
|a|^{2}\leq |b|^{2}+2\epsilon|b|^{2}+\frac{1+\epsilon}{\epsilon}|a-b|^{2}
$$
\end{lemma}
\begin{proof}
$$
\begin{array}{ll}
|a|^{2} & \leq \left(|b|+|a-b|\right)^{2} \\
& = |b|^{2}+2|b||a-b|+|a-b|^{2} \\
\end{array}
$$
By Cauchy inequality with $\epsilon$
$$
|b||a-b|\leq \epsilon|b|^{2}+\frac{|a-b|^{2}}{4\epsilon} \leq
\epsilon|b|^{2}+\frac{|a-b|^{2}}{2\epsilon}
$$
Replacing in the inequality above
$$
\begin{array}{ll}
|a|^{2} & \leq |b|^{2}+2\epsilon|b|^{2}+\frac{|a-b|^{2}}{\epsilon}+|a-b|^{2} \\
& = |b|^{2}+2\epsilon|b|^{2}+\frac{1+\epsilon}{\epsilon}|a-b|^{2}.
\end{array}
$$
\end{proof}
\begin{lemma}\label{lm43}
With the same assumptions of Lemma \ref{lm41} and $a \in L^{1}_{loc}(\mathbb{R}^{3})$, we have for all $r,d>0$ and $x_{0} \in \mathbb{R}^{n}$
\begin{equation}
\int_{B_{r}}\int_{B_{r}}\left|\log\left(\frac{d+u(x)}{d+u(y)}\right)\right|^{2}\frac{1}{|x-y|^{n+2s}}dxdy
\leq Cr^{n-2s} + \int_{B_{2r}}a(x)dx,
\end{equation}
where $B_{r}=B_{r}(x_{0})$ and $C=C(n,s)>0$ is a constant.
\end{lemma}
\begin{proof}
Consider $\phi \in C_{0}^{\infty}(B_{\frac{3r}{2}})$, $0\leq \phi \leq 1$, $\phi = 1$ in $B_{r}$ and $K>0$ such that $||D\phi||_{\infty} \leq Kr^{-1}$. The function
$$
\eta=\frac{\phi^{2}}{u+d}
$$
is in $H^{s}(\mathbb{R}^{n})$ and $\eta\geq0$ (see Lemma 5.3 in \cite{dpv}). By hypothesis
$$
\begin{array}{ll}
0&\leq \int_{R^{n}}\int_{R^{n}}\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}dxdy + \int_{\mathbb{R}^{n}}a(x)u(x)\eta(x)dx \\
&= \int_{B_{2r}}\int_{B_{2r}}\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}dxdy \\
& + \int_{R^{n}-B_{2r}}\int_{B_{2r}}\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}dxdy \\
& +
\int_{B_{2r}}\int_{R^{n}-B_{2r}}\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}dxdy \\
& +
\int_{R^{n}-B_{2r}}\int_{R^{n}-B_{2r}}\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}dxdy \\
& + \int_{\mathbb{R}^{n}}a(x)u(x)\eta(x)dx.
\end{array}
$$
We will prove some statements about the five integrals of the last inequality.
\begin{itemize}
\item $A.1)$ There are constants $C_{2},C_{3}>0$, such that, they depend only on $n$ and $s$ and
$$
\begin{array}{ll}
&\int_{B_{2r}}\int_{B_{2r}}\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}dxdy \\
& \leq -C_{2}\int_{B_{2r}}\int_{B_{2r}}\left|\log\left(\frac{d+u(x)}{d+u(y)}\right)\right|^{2}\frac{1}{|x-y|^{n+2s}}\min\left\{\phi(y)^{2}, \phi(x)^{2}\right\}dxdy \\
&+C_{3}\int_{B_{2r}}\int_{B_{2r}}\frac{|\phi(x)-\phi(y)|^{2}}{|x-y|^{n+2s}}dxdy,
\end{array}
$$
where $\min\left\{a,b\right\} = a$ if $a \leq b$ and $\min\left\{a,b\right\} = b$ if $a \geq b$, for all $a,b \in \mathbb{R}$.
\end{itemize}
Fix $x,y \in B_{2r}$ and suppose that $u(x)>u(y)$. Define
$$
\epsilon = \delta\frac{u(x)-u(y)}{u(x)+d}
$$
where $\delta \in (0,1)$ is chosen small enough such that $\epsilon \in (0,1)$. Taking $a= \phi(x)$ and $b=\phi(y)$ in the Lemma \ref{lm42}, we get
$$
|\phi(x)|^{2}\leq |\phi(y)|^{2}+2 \delta\frac{u(x)-u(y)}{u(x)+d}|\phi(y)|^{2}+\left(\delta^{-1}\frac{u(x)+d}{u(x)-u(y)}+1\right)|\phi(x)-\phi(y)|^{2}
$$
Replacing
$$
\begin{array}{ll}
&\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}\\
&=(u(x)-u(y))\left(\frac{\phi^{2}(x)}{u(x)+d}-\frac{\phi^{2}(y)}{u(y)+d}\right)\frac{1}{|x-y|^{n+2s}}\\
&
\begin{array}{ll}
&\leq (u(x)-u(y))\left(\frac{ |\phi(y)|^{2}+2 \delta\frac{u(x)-u(y)}{u(x)+d}|\phi(y)|^{2}+\left(\delta^{-1}\frac{u(x)+d}{u(x)-u(y)}+1\right)|\phi(x)-\phi(y)|^{2}}{u(x)+d}\right.\\&
\left.-\frac{\phi^{2}(y)}{u(y)+d}\right)\frac{1}{|x-y|^{n+2s}}
\end{array}
\\ &
\begin{array}{ll}
&=(u(x)-u(y))\frac{|\phi(y)|^{2}}{u(x)+d}\left[ 1+ 2\delta\frac{u(x)-u(y)}{u(x)+d}+\left(\delta^{-1}\frac{u(x)+d}{u(x)-u(y)}+1\right)\frac{|\phi(x)-\phi(y)|^{2}}{|\phi(y)|^{2}}\right.\\
&\left.-\frac{u(x)+d}{u(y)+d}\right]\frac{1}{|x-y|^{n+2s}}
\end{array}
\\
&
\begin{array}{ll} &=(u(x)-u(y))\frac{|\phi(y)|^{2}}{u(x)+d}\frac{1}{|x-y|^{n+2s}}\left( 1+ 2\delta\frac{u(x)-u(y)}{u(x)+d}-\frac{u(x)+d}{u(y)+d}\right)\\
& + \left(\delta^{-1} + \frac{(u(x)-u(y))}{u(x)+d}\right)|\phi(x)-\phi(y)|^{2}\frac{1}{|x-y|^{n+2s}}
\end{array}
\\
&
\begin{array}{ll}
&\leq (u(x)-u(y))\frac{|\phi(y)|^{2}}{u(x)+d}\frac{1}{|x-y|^{n+2s}}\left( 1+ 2\delta\frac{u(x)-u(y)}{u(x)+d}-\frac{u(x)+d}{u(y)+d}\right) \\
&+ 2\delta^{-1}|\phi(x)-\phi(y)|^{2}\frac{1}{|x-y|^{n+2s}}.
\end{array}
\\
\end{array}
$$
We will rewrite the first part of the sum appearing on the right side of the last inequality
$$
\begin{array}{ll}
&(u(x)-u(y))\frac{|\phi(y)|^{2}}{u(x)+d}\frac{1}{|x-y|^{n+2s}}\left( 1+ 2\delta\frac{u(x)-u(y)}{u(x)+d}-\frac{u(x)+d}{u(y)+d}\right)\\
& = \left(\frac{u(x)-u(y)}{u(x)+d}\right)^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}\left[ \frac{u(x)+d}{u(x)-u(y)}+ 2\delta-\frac{u(x)+d}{u(y)+d}\cdot\frac{u(x)+d}{u(x)-u(y)} \right]\\
& = \left(\frac{u(x)-u(y)}{u(x)+d}\right)^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}\left[\frac{1-\frac{u(x)+d}{u(y)+d}}{1-\frac{u(y)+d}{u(x)+d}} + 2\delta \right].\\
\end{array}
$$
Define the function $g:(0,1) \rightarrow \mathbb{R}$ by
$$
g(t)= \frac{1-t^{-1}}{1-t}.
$$
It satisfies $g(t) \leq -\frac{1}{4}\frac{t^{-1}}{1-t}$ if $t \in \left(\left.0,\frac{1}{2}\right]\right.$ and $g(t)\leq -1$ for all $t \in (0,1)$. We have two cases. If $\frac{u(y)+d}{u(x)+d} \leq \frac{1}{2}$ then, we conclude that
$$
\begin{array}{ll}
&\left(\frac{u(x)-u(y)}{u(x)+d}\right)^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}\left[\frac{1-\frac{u(x)+d}{u(y)+d}}{1-\frac{u(y)+d}{u(x)+d}} + 2\delta \right]\\
& \leq \left(\frac{u(x)-u(y)}{u(x)+d}\right)^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}\left[-\frac{1}{4}\frac{\frac{u(x)+d}{u(y)+d}}{\frac{u(x)-u(y)}{u(x)+d}} + 2\delta \right]\\
& = \frac{u(x)-u(y)}{u(x)+d}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}\left[-\frac{1}{4}\frac{u(x)+d}{u(y)+d} + 2\delta\frac{u(x)-u(y)}{u(x)+d} \right] \\
& = \frac{u(x)-u(y)}{u(y)+d}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}\left[-\frac{1}{4} + 2\delta\frac{(u(x)-u(y))(u(y)+d)}{(u(x)+d)^{2}} \right] \\
&\leq \frac{u(x)-u(y)}{u(y)+d}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}\left[-\frac{1}{4} + 2\delta \right].
\end{array}
$$
In the last inequality, we use that
$$
\frac{(u(x)-u(y))(u(y)+d)}{(u(x)+d)^{2}} \leq 1.
$$
Choosing $\delta=\frac{1}{16}$
we have
$$
\begin{array}{ll}
&\left(\frac{u(x)-u(y)}{u(x)+d}\right)^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}\left[\frac{1-\frac{u(x)+d}{u(y)+d}}{1-\frac{u(y)+d}{u(x)+d}} + 2\delta \right]\\
& \leq -\frac{1}{8}\frac{u(x)-u(y)}{u(y)+d}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}\\
& \leq -\frac{1}{8}\left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}.
\end{array}
$$
Above, we have used that
$
(\log(t))^{2}\leq t-1
$
for all $t\geq2$, and that $\frac{u(x)+d}{u(y)+d}\geq 2$.
But, if $ \frac{u(y)+d}{u(x)+d} > \frac{1}{2}$, then using that $g(t) \leq -1$ and that $\delta= \frac{1}{16}$
$$
\begin{array}{ll}
&\left(\frac{u(x)-u(y)}{u(x)+d}\right)^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}\left[\frac{1-\frac{u(x)+d}{u(y)+d}}{1-\frac{u(y)+d}{u(x)+d}} + 2\delta \right]\\ &\leq \left(\frac{u(x)-u(y)}{u(x)+d}\right)^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}\left[-1 + 2\delta \right]\\
& \leq-\frac{7}{8}\left(\frac{u(x)-u(y)}{u(x)+d}\right)^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}} \\
&\leq-\frac{7}{32}\left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}.
\end{array}
$$
Here, we have used that
$$
\begin{array}{ll}
\left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2} & = \left[\log\left(1+ \frac{u(x)-u(y)}{u(y)+d}\right)\right]^{2} \\
& \leq 4\left(\frac{u(x)-u(y)}{u(x)+d}\right)^{2}.
\end{array}
$$
This is a consequence of
$$
\log(1+t)\leq t
$$
for all $t>0$, and that
$$
t=\frac{u(x)-u(y)}{u(y)+d}=\frac{u(x)-u(y)}{u(x)+d}\cdot \frac{u(x)+d}{u(y)+d}\leq 2\frac{u(x)-u(y)}{u(x)+d}.
$$
In short
$$
\begin{array}{ll}
&(u(x)-u(y))\frac{|\phi(y)|^{2}}{u(x)+d}\frac{1}{|x-y|^{n+2s}}\left( 1+ 2\delta\frac{u(x)-u(y)}{u(x)+d}-\frac{u(x)+d}{u(y)+d}\right)\\
&\leq -\frac{1}{8}\left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}.
\end{array}
$$
We have proved that, if $u(x)>u(y)$ then
$$
\begin{array}{ll}
&\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}\\
&\leq -\frac{1}{8}\left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}} + 32|\phi(x)-\phi(y)|^{2}\frac{1}{|x-y|^{n+2s}}.
\end{array}
$$
Integrating in $B_{2r}$ the last inequality, we get
$$
\begin{array}{ll}
&\int_{B_{2r}}\int_{B_{2r}}\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}dxdy\\
&=\int_{B_{2r}}\int_{\left\{x;u(x)>u(y)\right\}}\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}dxdy \\
&+\int_{B_{2r}}\int_{\left\{x;u(x)<u(y)\right\}}\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}dxdy \\
& \leq -\frac{1}{8}\int_{B_{2r}}\int_{\left\{x;u(x)>u(y)\right\}}\left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}dxdy \\
& -\frac{1}{8}\int_{B_{2r}}\int_{\left\{x;u(x)<u(y)\right\}}\left[\log\left(\frac{u(y)+d}{u(x)+d}\right)\right]^{2}\phi(x)^{2}\frac{1}{|x-y|^{n+2s}}dxdy \\
& + 32\int_{B_{2r}}\int_{B_{2r}}|\phi(x)-\phi(y)|^{2}\frac{1}{|x-y|^{n+2s}}dxdy. \\
\end{array}
$$
But, using that $\left|\log(x)\right| = \left|\log \left(\frac{1}{x}\right)\right|$ for all $x\neq 0$, we obtain that
$$
\left[\log\left(\frac{u(y)+d}{u(x)+d}\right)\right]^{2} = \left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2}.
$$
Replacing
$$
\begin{array}{ll}
&\int_{B_{2r}}\int_{B_{2r}}\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}dxdy\\
& \leq -\frac{1}{8}\int_{B_{2r}}\int_{\left\{x; u(x)>u(y)\right\}}\left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2}\phi(y)^{2}\frac{1}{|x-y|^{n+2s}}dxdy \\
& - \frac{1}{8}\int_{B_{2r}}\int_{\left\{x;u(x)<u(y)\right\}}\left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2}\phi(x)^{2}\frac{1}{|x-y|^{n+2s}}dxdy\\
& + 32\int_{B_{2r}}\int_{B_{2r}}|\phi(x)-\phi(y)|^{2}\frac{1}{|x-y|^{n+2s}}dxdy \\
& \leq -\frac{1}{8}\int_{B_{2r}}\int_{\left\{x; u(x)>u(y)\right\}}\left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2}\min{\left\{\phi(y)^{2},\phi(x)^{2}\right\}}\frac{1}{|x-y|^{n+2s}}dxdy \\
& - \frac{1}{8}\int_{B_{2r}}\int_{\left\{x;u(x)<u(y)\right\}}\left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2}\min{\left\{\phi(y)^{2},\phi(x)^{2}\right\}}\frac{1}{|x-y|^{n+2s}}dxdy\\
& + 32\int_{B_{2r}}\int_{B_{2r}}|\phi(x)-\phi(y)|^{2}\frac{1}{|x-y|^{n+2s}}dxdy \\
& = -\frac{1}{8}\int_{B_{2r}}\int_{B_{2r}}\left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2}\min{\left\{\phi(y)^{2},\phi(x)^{2}\right\}}\frac{1}{|x-y|^{n+2s}}dxdy\\
&+32\int_{B_{2r}}\int_{B_{2r}}|\phi(x)-\phi(y)|^{2}\frac{1}{|x-y|^{n+2s}}dxdy,
\end{array}
$$
Thus, we have proved the claim 1.
\begin{itemize}
\item $A.2)$ There is $C_{3}>0$ such that, it depends only on $s$ and $n$ and
$$
\int_{R^{n}-B_{2r}}\int_{B_{2r}}\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}dxdy \leq C_{3}r^{n-2s}.
$$
\end{itemize}
Indeed,
$$
\begin{array}{ll}
&\int_{R^{n}-B_{2r}}\int_{B_{2r}}\frac{(u(x)-u(y))(\eta(x)-\eta(y))}{|x-y|^{n+2s}}dxdy\\
&=\int_{\mathbb{R}^{n}-B_{2r}}\int_{\mathbb{R}^{n}}(u(x)-u(y))\left(\frac{\phi^{2}(x)}{u(x)+d}-\frac{\phi^{2}(y)}{u(y)+d}\right)\frac{1}{|x-y|^{n+2s}}dxdy \\
& =\int_{\mathbb{R}^{n}-B_{2r}}\int_{\mathbb{R}^{n}}|\phi(x)|^{2}\frac{u(x)-u(y)}{u(x)+d}\frac{1}{|x-y|^{n+2s}}dxdy \\
& \leq \int_{\mathbb{R}^{n}-B_{2r}}\int_{\mathbb{R}^{n}}|\phi(x)|^{2}\frac{1}{|x-y|^{n+2s}}dxdy
\end{array}
$$
In the last equality, we have used that $u(y)\geq 0$ and therefore
$$
\frac{u(x)-u(y)}{u(x)+d} \leq 1.
$$
A simple calculation shows that
$$
\begin{array}{ll}
&\int_{\mathbb{R}^{n}-B_{2r}}\int_{\mathbb{R}^{n}}|\phi(x)|^{2}\frac{1}{|x-y|^{n+2s}}dxdy
\leq C_{3}r^{n-2s}\\
\end{array}
$$
and $C_{3}$ depends only on $n$ and $s$. Therefore we get the assertion 2.
\begin{itemize}
\item $A.3)$ We claim that $$\int_{\mathbb{R}^{n}}a(x)u(x)\eta(x)dx \leq \int_{B_{2r}}a(x)dx$$.
\end{itemize}
Indeed,
$$
\begin{array}{ll}
\int_{\mathbb{R}^{n}}a(x)u(x)\eta(x)dx& = \int_{\mathbb{R}^{n}}a(x)u(x)\frac{\phi^{2}(x)}{u(x)+d}dx \\
& =\int_{B_{2r}}a(x)u(x)\frac{\phi^{2}(x)}{u(x)+d}dx \\
& =\int_{B_{2r}}a(x)\frac{u(x)}{u(x)+d} \phi^{2}(x)dx\\
& \leq \int_{B_{2r}}a(x)dx
\end{array}
$$
We have used that $supp(\eta)\subset B_{2r}$, that $\phi(x)\in (0,1)$ and that $\frac{u(x)}{u(x)+d} \leq 1$.
The statements 1,2 and 3 imply that
$$
\begin{array}{ll}
&\int_{B_{2r}}\int_{B_{2r}}\left[\log\left(\frac{u(x)+d}{u(y)+d}\right)\right]^{2}\min\left\{\phi(y)^{2}, \phi(x)^{2}\right\}\frac{1}{|x-y|^{n+2s}}dxdy \\
&\leq C_{5}\int_{B_{2r}}\int_{B_{2r}}\frac{|\phi(x)-\phi(y)|^{2}}{|x-y|^{n+2s}}dxdy +C_{6}r^{n-2s}+\int_{B_{2r}}a(x)dx.
\end{array}
$$
for constants $C_{5},C_{6}$. The constants $C_{5},C_{6}$ depend only on $n$ and $s$. But $\phi=1$ in $B_{r}$ implies that
\begin{equation}\label{eq1}
\begin{array}{ll}
&\int_{B_{r}}\int_{B_{r}}\left|\log\left(\frac{d+u(x)}{d+u(y)}\right)\right|^{2}\frac{1}{|x-y|^{n+2s}}dxdy \\
&\leq C_{5}\int_{B_{2r}}\int_{B_{2r}}\frac{|\phi(x)-\phi(y)|^{2}}{|x-y|^{n+2s}}dxdy +C_{6}r^{n-2s}+\int_{B_{2r}}a(x)dx
\end{array}
\end{equation}
Finally, we will show that
$$
\int_{B_{2r}}\int_{B_{2r}}\frac{|\phi(x)-\phi(y)|^{2}}{|x-y|^{n+2s}}dxdy \leq C_{7}r^{n-2s}
$$
By hypothesis
$$
\begin{array}{ll}
\int_{B_{2r}}\int_{B_{2r}}\frac{|\phi(x)-\phi(y)|^{2}}{|x-y|^{n+2s}}dxdy &\leq Kr^{-2}\int_{B_{2r}}\int_{B_{2r}}\frac{|x-y|^{2}}{|x-y|^{n+2s}}dxdy \\
&=Kr^{-2}\int_{B_{2r}}\int_{B_{2r}}\frac{1}{|x-y|^{n+2(s-1)}}dxdy \\
&\leq Kr^{-2}\frac{r^{2(1-s)}}{2(1-s)}|B_{2r}|=C_{7}r^{n-2s}
\end{array}
$$
where $C_{7}$ depends only on $n$ and $s$. Replacing this last estimate in (\ref{eq1}) we obtain the Lemma \ref{lm43}.
\end{proof}
Following the same ideas of Theorem A.1 in \cite{bf2}, we will prove the theorem stated at the beginning of the section.
\begin{theorem}\label{th44}
Suppose that $u \in H^{s}(\mathbb{R}^{n})$ and $a\geq 0$ with $a \in L^{1}_{loc}(\mathbb{R}^{n})$. We will assume that
$$
\int_{\mathbb{R}^{n}}\int_{\mathbb{R}^{n}}\frac{(u(x)-u(y))\left(v(x)-v(y)\right)}{|x-y|^{n+2s}}dxdy+ \int_{\mathbb{R}^{n}}a(x)u(x)v(x)dx\geq 0,
$$
for all $v \in H^{s}(\mathbb{R}^{n})$ with $v \geq 0$ almost everywere.
Then $u > 0$ almost everywere in $\mathbb{R}^{n}$ or $u=0$ almost everywere in $\mathbb{R}^{n}$.
\end{theorem}
\begin{proof}
By Lemma \ref{lm41}, $u \geq 0$. Suppose that $x_{0}\in \mathbb{R}^{n}$ and $r>0$. Define
$$
Z:=\{x \in B_{r}(x_{0}); u(x)=0\}
$$
If $|Z|>0$, then we define
$$
\begin{array}{cccl}
F_{\delta}:&B_{r}(x_{0})& \longrightarrow & \mathbb{R} \\
&x& \longmapsto & \log\left(1+\frac{u(x)}{\delta}\right)
\end{array}
$$
for all $\delta>0$.
We have $F_{\delta}(y)=0$ for all $y\in Z$. Therefore, if $x \in B_{r}(x_{0})$ and $y \in Z$
$$
|F_{\delta}(x)|^{2} = \frac{|F_{\delta}(x)-F_{\delta}(y)|^{2}}{|x-y|^{n+2s}}|x-y|^{n+2s}
$$
Integrating with respect to $ y \in Z $ we get
$$
\begin{array}{ll}
|Z||F_{\delta}(x)|^{2} &= \int_{Z}\frac{|F_{\delta}(x)-F_{\delta}(y)|^{2}}{|x-y|^{n+2s}}|x-y|^{n+2s}dy \\
&\leq 2r^{n+2s}\int_{Z}\frac{|F_{\delta}(x)-F_{\delta}(y)|^{2}}{|x-y|^{n+2s}}dy
\end{array}
$$
Now, integrating with respect to $x \in B_{r}$ we get
$$
\begin{array}{ll}
\int_{B_{r}(x_{0})}|F_{\delta}(x)|^{2}dx & \leq \frac{1}{|Z|} 2r^{n+2s}\int_{B_{r}(x_{0})}\int_{Z}\frac{|F_{\delta}(x)-F_{\delta}(y)|^{2}}{|x-y|^{n+2s}}dydx
\\ &\leq \frac{1}{|Z|} 2r^{n+2s}\int_{B_{r}(x_{0})}\int_{B_{r}(x_{0})}\frac{|F_{\delta}(x)-F_{\delta}(y)|^{2}}{|x-y|^{n+2s}}dydx \\
& = \frac{1}{|Z|} 2r^{n+2s}\int_{B_{r}(x_{0})}\int_{B_{r}(x_{0})}\left|\log \left(\frac{\delta+u(x)}{\delta+u(y)}\right)\right|^{2}\frac{1}{|x-y|^{n+2s}}dxdy \\
& \leq \frac{1}{|Z|} 2r^{n+2s}\left(Cr^{n-2s}+\int_{B_{2r}}a(x)dx\right) \\
& = \frac{1}{|Z|} 2Cr^{2n}+ \frac{1}{|Z|} 2r^{n+2s}\int_{B_{2r}}a(x)dx:=L.
\end{array}
$$
The number $L$ does not depend on $\delta$. In short, we have proved that
$$
\int_{B_{r}(x_{0})}\left|\log\left(1+\frac{u(x)}{\delta}\right)\right|^{2}dx \leq C
$$
for some constant $C>0$ and $C$ does not depend on $\delta$. If $u(x) \neq 0$ then $F_{\delta}(x) \rightarrow \infty$ when $\delta \rightarrow 0$. By Fatou's lemma, if $|B_{r} \cap Z^{c}|>0$,
$$
+\infty \leq \liminf_{\delta \rightarrow 0}\int_{B_{r}\cap Z^{c}}|F_{\delta}(x)|^{2} \leq C.
$$
Therefore $|Z|=|B_{r}|$ and $u=0$ almost everywere in $B_{r}(x_{0})$. Now, we define
$$
A=\left\{B_{r}(x); r>0, x \in \mathbb{R}^{n}, u>0\ in\ B_{r}(x)\right\}
$$
$$
B=\left\{B_{r}(x); r>0, x \in \mathbb{R}^{n}, u=0\ in\ B_{r}(x)\right\}
$$
$$
S=\bigcup_{V\in A}V
$$
and
$$
W=\bigcup_{V\in B}V
$$
$S$ and $W$ are open sets. Consider $x \in \mathbb{R}^{n}$ and $r>0$. We have two possibilities, either $u \neq 0$ in $B_{r}(x)$ or $u=0$ in $B_{r}(x)$. If $u \neq 0$ in $B_{r}$ then $u>0$ in $B_{r}$. In this case, $x \in S$. If $u=0$ in $B_{r}(x)$, then $x \in W$. Consequently
$$
\mathbb{R}^{n}= S \cup W.
$$
By connectedness, we should have $S=\emptyset$ or $W=\emptyset$. If $\mathbb{R}^{n}=S$ then $u>0$ almost everywere in $\mathbb{R}^{n}$. If $\mathbb{R}^{n}=W$ then $u=0$ almost everywere in $\mathbb{R}^{n}$.
\end{proof}
\begin{corollary}\label{cl45}
The solution $u$ found in Theorem \ref{th31} is positive in the following sense, $ u>0$ almost everywere in $\mathbb{R}^{3}$.
\end{corollary}
\begin{proof}
For some $v\in H^{s}(\mathbb{R}^{3})$, with $v \geq0$ almost everywere, we have
$$
\begin{array}{ll}
\frac{(\zeta)}{2}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}\frac{(u(x)-u(y))(v(x)-v(y))}{|x-y|^{3+2s}}dxdy&+ \int_{\mathbb{R}^{3}}V(x)uvdx \\&+\int_{\mathbb{R}^{3}}\phi_{u}uvdx=\int_{\mathbb{R}^{3}}g(u)vdx \geq 0.
\end{array}
$$
If we define $a(x)=\frac{2}{\zeta}(V(x)+\phi_{u}(x))$, we have that $a \in L^{1}_{loc}(\mathbb{R}^{3})$, because $L^{2^{\ast}_{t}}(\mathbb{R}^{3})\subset L^{1}_{loc}(\mathbb{R}^{3})$ and $V$ is continuous. By $(V_{0})$ and Lemma \ref{lm1} we have $a(x)>0$ in $\mathbb{R}^{3}$.
Thereby,
$$
\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}\frac{(u(x)-u(y))(v(x)-v(y))}{|x-y|^{3+2s}}dxdy + \int_{\mathbb{R}^{3}}a(x)uvdx \geq 0.
$$
for all $v\in H^{s}(\mathbb{R}^{3})$ with $v\geq0$.
But $u \neq 0$. Then, Theorem \ref{th44} implies that $u>0$ almost everywere in $\mathbb{R}^{3}$.
\end{proof}
\begin{remark}\label{rm46}
Define $\mathcal{N}=\left\{u \in H^{s}(\mathbb{R}^{3})\setminus\left\{0\right\};I'(u)u=0\right\}$, where
$$
\begin{array}{ll}
\begin{array}{ll}I(u)=\frac{\zeta(s)}{4}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}&\frac{(u(x)-u(y))^{2}}{|x-y|^{3+2s}}dxdy+\frac{1}{2}\int_{\mathbb{R}^{3}}V(x)u^{2}dx\\
&+\frac{1}{4}\int_{\mathbb{R}^{3}}\phi_{u}u^{2}dx-\int_{\mathbb{R}^{3}}F(u)dx.
\end{array}
\end{array}
$$
If $f$ satisfies $(f_{1})-(f_{5})$ then
$$
I_{\infty}=inf_{u\in \mathcal{N}} I(u)
$$
coincides with the mountain pass level associated with $I$.
\end{remark}
\begin{theorem}
If $f$ satisfies $(f_{1})-(f_{5})$ and $V$ satisfies $(V_{0})$ and $(V_{1})$, then the problem $(P)$ has a ground state solution.
\end{theorem}
\begin{proof}
Taking the following Euler-Lagrange functional
$$
\begin{array}{cccl}
I:&H^{s}(\mathbb{R}^{3})&\longrightarrow&\mathbb{R}\\
&u& \longmapsto &
\begin{array}{ll}\frac{\zeta(s)}{4}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}&\frac{(u(x)-u(y))^{2}}{|x-y|^{3+2s}}dxdy+\frac{1}{2}\int_{\mathbb{R}^{3}}V(x)u^{2}dx\\
&+\frac{1}{4}\int_{\mathbb{R}^{3}}\phi_{u}u^{2}dx-\int_{\mathbb{R}^{3}}F(u)dx,
\end{array}
\end{array}
$$
and following with the same ideas of Theorem \ref{th31}, we prove that there is a nonzero solution $u$ to the system $(P)$. Also, we prove that there is a Cerami's sequence $\left\{w_{n}\right\}$ in the montain pass level associated with $I$ converging to $u$. By Remark \ref{rm1} and Fatou's lemma
$$
\begin{array}{ll}
4c&= \liminf_{n \rightarrow \infty}\left(4I(w_{n})-I'(w_{n})w_{n}\right) \\
& = \liminf_{n \rightarrow \infty}\left(||w_{n}||^{2}+ \int_{\mathbb{R}^{3}}H(w_{n})dx\right) \\
& \geq \liminf_{n \rightarrow \infty}||w_{n}||^{2}+ \liminf_{n \rightarrow \infty}\int_{\mathbb{R}^{3}}H(w_{n})dx \\
& \geq ||u||^{2}+\int_{\mathbb{R}^{3}}H(u)dx \\
& = 4I(u)-I'(u)u \\
& = 4I(u).
\end{array}
$$
where $H(u)=uf(u)-4F(u)$.
By definition $u \in \mathcal{N}$. Then $I(u) \leq \inf_{u\in \mathcal{N}}I(u)$. By Remark \ref{rm46}
$$
I(u)=\inf_{u\in \mathcal{N}}I(u).
$$
\end{proof}
\section{Asymptotically Periodic Potential}
In this section, we study the problem $(P)$, when we consider $V$ satisfying the condition $(V_{0})$ and
\begin{itemize}
\item[$(V_3)$\ ] There is a function $V_{p}$ satisfying $(V_{1})$ such that
$$
\lim_{|x|\rightarrow \infty}|V(x)-V_{p}(x)|=0;
$$
\item[$(V_4)$\ ] $V(x)\leq V_{p}(x)$ and there is a open set $\Omega \subset \mathbb{R}^{3}$ with $|\Omega|>0$ and $V(x)< V_{p}(x)$ in $\Omega$.
\end{itemize}
Here $V_{p}$ is a periodic continuous potential. This case follows the same ideas already studied in Schr\"odinger-Poisson system with asymptotically periodic potential in \cite{amsss}. We are writing this case to make a most complete work for the reader.
\begin{theorem}
Suppose that $V$ satisfies $(V_{0})$, $(V_{3})$, $(V_{4})$ and $f$ satisfies $(f_{1})-(f_{5})$. Then, the problem $(P)$ has a ground state solution.
\end{theorem}
\begin{proof}
We can define in $H^{s}(\mathbb{R}^{3})$ the norm,
$$
||u||_{p}=\left(\frac{\zeta}{2}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}\frac{(u(x)-u(y))^{2}}{|x-y|^{3+2s}}dxdy+\int_{\mathbb{R}^{3}}V_{p}(x)u^{2}dx\right)^{\frac{1}{2}}.
$$
Consider the functional $I_{p}$
$$
I_{p}(u)=\frac{1}{2}||u||_{p}^{2}+\frac{1}{4}\int_{\mathbb{R}^{3}}\phi_{u}u^{2}dx - \int_{\mathbb{R}^{3}}F(u)dx.
$$
We claim that there is $w_{p} \in H^{s}(\mathbb{R}^{3})$ such that $I_{p}'(w_{p})=0$ and $I_{p}(w_{p})=c_{p}$, where $c_{p}$ is the mountain pass level associated with $I_{p}$.
We will consider another norm in $H^{s}(\mathbb{R}^{3})$.
$$
||u||=\left(\frac{\zeta}{2}\int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}\frac{(u(x)-u(y))^{2}}{|x-y|^{3+2s}}dxdy+\int_{\mathbb{R}^{3}}V(x)u^{2}dx\right)^{\frac{1}{2}}.
$$
Then, we define
$$
I(u)=\frac{1}{2}||u||^{2}+\frac{1}{4}\int_{\mathbb{R}^{3}}\phi_{u}u^{2}dx - \int_{\mathbb{R}^{3}}F(u)dx.
$$
The functional $I$ has a mountain pass geometry. If $c$ is the mountain pass level associated with $I$ then $c<c_{p}$. Indeed, there is a $t_{\ast}$ such that $t_{\ast}w_{p} \in \mathcal{N}$ (see remark \ref{rm46}) and it is the unique with this property. Then
$$
\begin{array}{ll}
c &\leq I(t^{\ast}w_{p})\\
&<I_{p}(t^{\ast}w_{p}) \\
& \leq \max_{t\geq 0}I_{p}(tw_{p}) \\
& = I_{p}(w_{p})=c_{p}
\end{array}
$$ Consider $\{u_{n}\}_{n \in \mathbb{N}}$ a Cerami's sequence at the mountain pass level $c$ associated with $I$. Similarly to the periodic case, we prove that the sequence $\{u_{n}\}$ is bounded and therefore, converges weakly to $u \in H^{s}(\mathbb{R}^{3})$. Additionally $I'(u)=0$. Now we will prove that $u \neq 0$. Suppose that $u=0$.
Regarding the sequence $\left\{u_{n}\right\}$, the following equalities are true
$\newline$
\begin{enumerate}
\item $\lim\limits_{n \rightarrow \infty}\int_{\mathbb{R}^{3}}|V(x)-V_{p}(x)|u_{n}^{2}dx= 0$
\item $\lim\limits_{n \rightarrow \infty}|||u_{n}||-||u_{n}||_{p}|=0$.
\item $\lim\limits_{n \rightarrow \infty}|I_{p}(u_{n})-I(u_{n})|=0$
\item $\lim\limits_{n \rightarrow \infty}|I_{p}'(u_{n})u_{n}-I'(u_{n})u_{n}|=0$
\end{enumerate}
We will prove (1). The limits (2), (3) and (4) are immediate consequences of (1). Consider $\epsilon>0$ and $A>0$ such that $||u_{n}||_{2}^{2}< A$ for all $n \in \mathbb{N}$. By $(V_{3})$, there is $R>0$ such that, for all $|x|>R$ we have
$$
|V(x)-V_{p}(x)|< \frac{\epsilon}{2A}.
$$
But $\{u_{n}\}$ converges weakly to $u=0$. Then $u_{n}\rightarrow 0$ in $L^{2}(B_{R}(0))$. This convergence implies that there is $n_{0} \in \mathbb{N}$ such that
$$
\int_{B_{R}(0)}|V(x)-V_{p}(x)|u_{n}^{2}dx< \frac{\epsilon}{2}
$$
for all $n \geq n_{0}$. Then, if $n\geq n_{0}$
$$
\begin{array}{ll}
&\int_{\mathbb{R}^{3}}|V(x)-V_{p}(x)|u_{n}^{2}dx\\
&=\int_{B_{R}(0)}|V(x)-V_{p}(x)|u_{n}^{2}dx+\int_{(B_{R}(0))^{c}}|V(x)-V_{p}(x)|u_{n}^{2}dx \\
&<\frac{\epsilon}{2}+\frac{\epsilon}{2}=\epsilon.
\end{array}
$$
\newline
Consider $s_{n}>0$ such that
$$
s_{n}u_{n} \in \mathcal{N}_{p}
$$
for every $n \in \mathbb{N}$.
Where $N_{p}=\left\{u \in H^{s}(\mathbb{R}^{3})\setminus\left\{0\right\};I_{p}'(u)u=0\right\}$. We claim that $\limsup_{n \rightarrow \infty}s_{n}\leq 1$. Indeed, otherwise, there is $\delta>0$ such that, passing to a subsequence if necessary, we can assume that $s_{n}\geq 1+\delta$ for all $n \in \mathbb{N}$. By $(4)$ we have $I_{p}'(u_{n})u_{n}\rightarrow 0$, that is,
$$
\begin{array}{ll}
||u_{n}||_{p}^{2}+\int_{\mathbb{R}^{3}}\phi_{u_{n}}u_{n}^{2}dx = \int_{\mathbb{R}^{3}}f(u_{n})u_{n}dx+o_{n}(1)
\end{array}
$$
From $s_{n}u_{n}\in \mathcal{N}_{p}$ we have $I_{p}'(s_{n}u_{n})u_{n}=0.$ Equivalently
$$
s_{n}||u_{n}||_{p}^{2}+s_{n}^{3}\int_{\mathbb{R}^{3}}\phi_{u_{n}}u_{n}^{2}dx = \int_{\mathbb{R}^{3}}f(s_{n}u_{n})u_{n}dx
$$
Therefore
\begin{equation}\label{eq53}
\int_{\mathbb{R}^{3}}\left[\frac{f(s_{n}u_{n})}{(s_{n}u_{n})^{3}}-\frac{f(u_{n})}{(u_{n})^{3}}\right]u_{n}^{4}dx = \left(\frac{1}{s_{n}^{2}}-1\right)||u_{n}||_{p}^{2}+o_{n}(1) \leq o_{n}(1).
\end{equation}
If $\{u_{n}\}_{n \in \mathbb{N}}$ converges to $0$ in $L^{q}(\mathbb{R}^{3})$ for all $q \in (2,2^{\ast}_{s})$, then by Lemma \ref{lm1}
$$
||u_{n}||^{2}\leq||u_{n}||^{2}+\int_{\mathbb{R}^{3}}\phi_{u_{n}}u_{n}^{2}dx = \int_{\mathbb{R}^{3}}f(u_{n})u_{n}+I'(u_{n})u_{n}
$$
consequently $\{u_{n}\}$ would have limit $0$ in $H^{s}(\mathbb{R}^{3})$ and this would contradict the fact that $c>0$. Therefore, there is a sequence $\{y_{n}\} \subset \mathbb{Z}^{n}$, $R>0$ and $\beta>0$ such that
$$
\int_{B_{R}(y_{n})}u_{n}^{2}dx \geq \beta>0
$$
Taking $v_{n}(x):=u_{n}(x+y_{n})$ we have $||v_{n}||=||u_{n}||$ and therefore we can assume that $\{v_{n}\}_{n \in \mathbb{N}}$ converges weakly to some $v \in H^{s}(\mathbb{R}^{3})$. Note that
$$
\int_{B_{R}(0)}v^{2}dx \geq \beta>0
$$
The inequality $(\ref{eq53})$, Remark \ref{rm1} and Fatou's lemma imply that
$$
\begin{array}{ll}
0<&\int_{\mathbb{R}^{3}}\left[\frac{f((1+\delta)v)}{((1+\delta)v)^{3}}-\frac{f(v)}{(v)^{3}}\right]v^{4}dx \\
& \leq \liminf_{n \rightarrow \infty}\int_{\mathbb{R}^{3}}\left[\frac{f((1+\delta)v_{n})}{((1+\delta)v_{n})^{3}}-\frac{f(v_{n})}{(v_{n})^{3}}\right]v_{n}^{4}dx \\
& \leq \liminf_{n \rightarrow \infty}\int_{\mathbb{R}^{3}}\left[\frac{f(s_{n}v_{n})}{(s_{n}v_{n})^{3}}-\frac{f(v_{n})}{(v_{n})^{3}}\right]v_{n}^{4}dx \\
& = \liminf_{n \rightarrow \infty}\int_{\mathbb{R}^{3}}\left[\frac{f(s_{n}u_{n})}{(s_{n}u_{n})^{3}}-\frac{f(u_{n})}{(u_{n})^{3}}\right]u_{n}^{4}dx \\
& \leq \liminf_{n \rightarrow \infty}o_{n}(1)=0.
\end{array}
$$
The last inequality is a contradiction. Therefore $\limsup_{n \rightarrow \infty}s_{n}\leq 1$. Now, we will prove that for $n$ large enough, $s_{n}> 1$. Suppose that the statement is false. In this case, passing to a subsequence if necessary, we can assume that $s_{n}\leq 1$ for all $n \in \mathbb{N}$. Note that by $(f_{5})$, the function $H(u):=uf(u)-4F(u)$ is increasing in $|u| \neq 0$. Then
$$
\begin{array}{ll}
4c_{p}&=4\inf_{u \in N_{p}}I_{p}(u) \\
& \leq 4I_{p}(s_{n}u_{n}) \\
& = 4I_{p}(s_{n}u_{n}) - I_{p}'(s_{n}u_{n})(s_{n}u_{n})\\
& = s_{n}^{2}||u_{n}||_{p}^{2}+\int_{\mathbb{R}^{3}}f(s_{n}u_{n})(s_{n}u_{n})-4F(s_{n}u_{n})dx \\
& \leq ||u_{n}||_{p}^{2}+\int_{\mathbb{R}^{3}}f(u_{n})(u_{n})-4F(u_{n})dx \\
& \leq 4I(u_{n})-I'(u_{n})u_{n}+\int_{\mathbb{R}^{3}}|V(x)-V_{p}(x)|u_{n}^{2}dx.
\end{array}
$$
This implies that
$$
4c_{p} \leq 4c.
$$
But, this last inequality is false, because we have proved that $c<c_{p}$. Therefore, we have that $s_{n}> 1$ for $n$ large enough. Then, about the sequence $\left\{s_{n}\right\}$ we have proved that
$$
1 \leq \liminf_{n \rightarrow \infty}s_{n} \leq \limsup_{n \rightarrow \infty}s_{n} \leq 1.
$$
and therefore
\begin{equation}\label{eq54}
\lim\limits_{n \rightarrow \infty}s_{n}=1.
\end{equation}
The Fundamental Theorem of Calculus implies that
\begin{equation}\label{eq55}
\begin{array}{ll}
\int_{\mathbb{R}^{3}}F(s_{n}u_{n})dx-\int_{\mathbb{R}^{3}}F(u_{n})dx = \int_{1}^{s_{n}}\left[\int_{\mathbb{R}^{3}}f(\tau u_{n})u_{n}dx \right] d \tau.
\end{array}
\end{equation}
Also, by $(f_{3})$ we obtain $C>0$ such that
\begin{equation}\label{eq56}
\int_{\mathbb{R}^{3}}f(\tau u_{n})u_{n}dx \leq C(s_{n}||u_{n}||^{2}+s_{n}^{p-1}||u_{n}||^{p}).
\end{equation}
for all $\tau \in (1,s_{n})$.
We have that the sequence $\{u_{n}\}$ is bounded. Then, by (\ref{eq54}), (\ref{eq55}) and (\ref{eq56})
$$
\int_{\mathbb{R}^{3}}F(s_{n}u_{n})dx-\int_{\mathbb{R}^{3}}F(u_{n})dx = o_{n}(1).
$$
Then
$$
\begin{array}{ll}
&I_{p}(s_{n}u_{n})-I_{p}(u_{n})\\
& = \frac{(s_{n}^{2}-1)}{2}||u_{n}||^{2}+\frac{(s_{n}^{4}-1)}{4}\int_{\mathbb{R}^{3}}\phi_{u_{n}}u_{n}^{2}dx - \int_{\mathbb{R}^{3}}F(s_{n}u_{n})dx+\int_{\mathbb{R}^{3}}F(u_{n})dx \\
& = o_{n}(1)
\end{array}
$$
because $\{u_{n}\}$ is bounded and
$\int_{\mathbb{R}^{3}}\phi_{u_{n}}u_{n}^{2}dx = ||\phi_{u_{n}}||_{\dot{H}^{t}(\mathbb{R}^{3})}^{2} \leq C||u_{n}||^{4}$.
By $(3)$
$$
\begin{array}{ll}
c_{p}&\leq I_{p}(s_{n}u_{n}) \\
& = I_{p}(u_{n})+o_{n}(1).\\
&= I(u_{n})+o_{n}(1)
\end{array}
$$
Taking $n \rightarrow \infty$ we obtain
$$
c_{p} \leq c
$$
But, this last inequality is false, because we have proved that $c<c_{p}$. This contradiction was generated because we assumed that $u = 0$. It follows that $u$ is nontrivial.
Particularly
$$
I(u) \geq \inf_{u \in \mathcal{N}}I(u).
$$
As in the periodic case
$$
I(u)\leq c=\inf_{u \in \mathcal{N}}I(u).
$$
Therefore $u$ is a ground state solution for the system $(P)$.
\end{proof}
\begin{bibdiv}
\begin{biblist}
\bib{as}{article}{
author={Alves, C.},
author={Souto, M.},
title={On existence of solution for a class of semilinear elliptic equations wit nonlinearities that lies between two different powers },
journal={Abst. and Appl. Analysis},
volume={ID 578417},
date={2008},
pages={1--7},
review={ }
}
\bib{amsss}{article}{
author={Alves, C.},
author={Souto, M.},
author={Soares, S.}
title={Schr\"odinger-Poisson equations without Ambrosetti-Rabinowitz condition},
journal={J. Math. Anal Appl},
volume={377},
date={2011},
pages={584--592},
review={ }
}
\bib{ap}{article}{
author={Azzollini, A.},
author={Pomponio, A.},
title={Ground state solutions for the nonlinear Schr\"odinger-Maxwell equations },
journal={J. Math. Appl.},
volume={14},
date={2008},
pages={--},
review={doi:10.1016/jmaa.2008.03.057 }
}
\bib{bf2}{article}{
author={Brasco, L.},
author={Franzina, G.},
title={Convexity Properties of Dirichlet Integrals and Picone-Type Inequalities},
journal={Kodai Math. J.},
volume={37},
date={2014},
pages={769-799},
review={}
}
\bib{dkp}{article}{
author={Di Castro, A.},
author={Kuusi, T.},
author={Palatucci, G.},
title={Local behavior of fractional p-minimizers},
journal={Annales de l'Institut Henri Poincare (C) Non Linear Analysis},
volume={},
date={2015},
pages={},
review={}
}
\bib{dpv}{article}{
author={Di Nezza, E.},
author={Palatucci, G.},
author={Valdinoci, E.},
title={Hitchhiker's guide to the fractional Sobolev spaces},
journal={Bull. Sci. Math.},
volume={136},
date={2012},
pages={512-573},
review={ }
}
\bib{Ek}{article}{
author={Ekeland, I.},
title={Convexity Methods in Hamilton Mechanics},
journal={Springer Verlag},
volume={},
date={1990},
pages={},
review={}
}
\bib{W}{article}{
author={Evans, L. C.},
title={Partial Differential Equations},
journal={American Mathematical Society},
date={2010}
}
\bib{G}{article}{
author={Gaetano, S.},
title={Multiple positive solutions for a Schr\"odinger-Poisson-Slater system },
journal={J. Math. Analysis and Appl., Issue 1},
volume={365},
date={2010},
pages={288--299},
review={doi:10.1016/j.jmaa.2009.10.061 }
}
\bib{gsd}{article}{
author={Gaetano, S.},
author={Squassina, M.},
author={D'avenia, P.},
title={On Fractional Choquard Equations },
journal={Math. Models Methods Appl. Sci.},
volume={25},
date={2015},
pages={1447-1476},
review={}
}
\bib{bf}{article}{
author={A. R. Giammetta},
title={Fractional Schr\"odinger-Poisson-Slater system in one dimension},
journal={arXiv:1405.2796v1.}
volume={}
date={}
pages={}
review={}
}
\bib{jj}{article}{
author={Jeanjean, L.},
title={On the existence of bounded Palais-Smale sequences and application to a Landesman-Lazer type problem set on $\mathbb R^3$},
journal={Proc. Roy, Soc. Edinburgh, A},
volume={129},
date={1999},
pages={787--809},
review={}
}
\bib{L1}{article}{
author={Lions, P. L.},
title={The concentration-compactness principle in the calculus of variations. The locally compact case, part 2},
journal={Analyse Nonlinéaire},
volume={I},
date={1984},
pages={223--283},
review={}
}
\bib{Ruiz}{article}{
author={Ruiz, D.},
title={The Schr\"odinger-Poisson equation under the effect of a nonlinear local term },
journal={J. Funct. Analysis},
volume={237},
date={2006},
pages={655--674},
review={doi:10.1016/j.jfa.2006.04.005 }
}
\bib{W}{article}{
author={Willem, M.},
title={Minimax Theorems},
journal={Birkhauser},
date={1986}}
\bib{zhang}{article}{
author={Zhang, J.},,
title={Existence and Multiplicity results for the Fractional Schr\"odinger-Poisson Systems},
journal={arXiv:1507.01205v1.},
volume={},
date={},
pages={},
review={ }
}
\bib{zjs}{article}{
author={Zhang, J.},
author={Do Ó, J. M,},
author={Squassina, M.}
title={Fractional Schr\"odinger-Poisson Systems with a general subcritical or critical nonlinearity},
journal={Adv. Nonlinear Stud.},
volume={16},
date={2016},
pages={15-30},
review={}
}
\bib{zjs2}{article}{
author={Zhang, J.},
author={Do Ó, J. M,},
author={Squassina, M. }
title={Schr\"odinger-Poisson systems with a general critical nonlinearity},
journal={Communications in Contemporary Mathematics},
volume={},
date={2015},
pages={},
review={}
}
\bib{zz}{article}{
author={Zhao, F.},
author={Zhao, L.},
title={Positive solutions for the nonlinear Schr\"odinger-Poisson equations with the critical exponent },
journal={Nonlinear Analysis, Theory, Meth. and Appl.},
volume={},
date={2008},
pages={--},
review={doi:10.1016/na.2008.02.116 }
}
\end{biblist}
\end{bibdiv}
\end{document} |
\begin{document}
\title{Direct measurement of large-scale quantum states}
\textbf{In quantum mechanics, predictions are made by way of calculating expectation values of observables, which take the form of Hermitian operators. It is far less common to exploit non-Hermitian operators to perform measurements. Here, we show that the expectation values of a particular set of non-Hermitian matrices, which we call column operators, directly yield the complex coefficients of a quantum state vector. We provide a definition of the state vector in terms of measurable quantities by decomposing the column operators into observables. The technique we propose renders very-large-scale quantum states significantly more accessible in the laboratory, as we demonstrate by experimentally characterising a 100 000-dimensional entangled state. This represents an improvement of two orders of magnitude with respect to previous characterisations of discrete entangled states. Furthermore, in numerical studies, we consider mixed quantum states and show that for purities greater that 0.81, we can reliably extract the most significant eigenvector of the density matrix with a probability greater than 99\%. We anticipate that our method will prove to be a useful asset in the quest for understanding and manipulating large-scale quantum systems.}
\paragraph{} One of the current challenges in the field of computing is harnessing the potential processing power provided by quantum devices that exploit entanglement. Experimental research aimed at overcoming this challenge is driven by the production, control and detection of larger and larger entangled quantum states \cite{Monz:2011,Wong:2012,Yokoyama:2013,Krenn:2014}. However, the task of characterising these entangled states quickly becomes intractable as the number of parameters that define a many-body system scales exponentially with the system size. To keep up with the ever-growing quantum state dimensionality, much effort is put into developing efficient characterisation methods \cite{Smith:2005,Banaszek:2013,Flammia:2005,Cramer:2010,Bogdanov:2010,Toth:2010,Gross:2010,Mahler:2013,Schwemmer:2014,Shabani:2011,Teo:2013,Tonolini:2014,Lloyd:2014,Ferrie:2014,Lundeen:2011}.
\paragraph{} Quantum state tomography is the process of retrieving the values that define a quantum system. The process typically involves two steps: i) gathering an informationally complete set of data and ii) finding the quantum state most consistent with the data set using post-measurement processing such as the maximum likelihood estimation algorithm \cite{Banaszek:1999}. Many efficient tomographic methods capitalize on the first step by making simplifying assumptions about the state\cite{Flammia:2005,Cramer:2010,Toth:2010,Schwemmer:2014,Gross:2010,Shabani:2011,Tonolini:2014,Lloyd:2014,Lundeen:2011}, thus reducing the number of measurements required to uniquely identify it. In particular, tomography via compressed sensing allows one to efficiently reconstruct quantum states based on the fact that low-rank density matrices, i.e.~quasi-pure states, are sparse in a particular basis \cite{Gross:2010,Liu:2012,Schwemmer:2014,Tonolini:2014}. Compared to assumption-free tomography, compressive sensing provides a square-root improvement on the required number of measurements \cite{Banaszek:2013}. This improvement enabled the reconstruction of the density matrices of a 6-qubit state \cite{Schwemmer:2014} and a (17$\times$17)-dimensional state \cite{Tonolini:2014}, the largest phase-and-amplitude measurement of an entangled state reported to date. Although compressed sensing does not make use of maximum likelihood estimation, it does require non-trivial post-measurement processing.
\paragraph{} Recently, Lundeen \textit{et al.}~reported on the direct measurement of a wavefunction using a method that, for the first time, required no involved post-measurement processing \cite{Lundeen:2011}. Their method is based on weak measurements, whereby one weakly couples a quantum system to a pointer state and subsequently performs a few standard strong measurements on the pointer state. The outcome of a weak measurement is known as the ``weak value", and in the conditions exposed in Ref. \cite{Lundeen:2011} the weak value is proportional to a given state vector coefficient. The method of Lundeed \textit{et al.}~can be used in combination with the assumption that the quantum state at hand is pure, providing the same square-root improvement as compressed sensing. Variations on the original scheme allow measurements of mixed states and increased detection efficiency \cite{Bamber:2014,Salvail:2013,Wu:2013gb}.
\paragraph{} An important contribution of the work by Lundeen \textit{et al.}~was to link the state vector elements to the expectation value of weak measurements. We take a different approach, and point out that the enabling feature that allows access to the complex state vector is not weak measurement but the use of particular non-Hermitian operators. Although weak measurements provide a way to decompose these non-Hermitian operators, it is not the only suitable approach. Moreover, the introduction of weak values in the measurement procedure adds complexity to the experiment and the formalism that links weak values to measurement outcomes involves an approximation that breaks down in a variety of circumstances \cite{Duck:1989,Salvail:2013,Malik:2013}.
\paragraph{} In this paper, we propose an alternative approach to the direct measurement of quantum states that is exact in the case of pure states, proves to be reliable in the presence of noise, and is consistent with results obtained with well-established tomographic techniques. The key principle of our formalism is to decompose the particular non-Hermitian matrices that yield the complex state vector coefficients using only observables. Our method therefore only requires strong measurements, as in standard tomography, while maintaining the directness of weak-value-assisted tomography. The simplicity in both the experimental procedure and post-measurement processing renders our method ideally suited for the characterisation of large-scale systems, which can be high-dimensional, many-body or both. We begin by developing the theory on which our method is based and then demonstrate the potential of this scheme by experimentally retrieving the complex coefficients of a (341$\times$341)-dimensional entangled state.
\paragraph{} Consider a quantum system in a $d$-dimensional Hilbert space, whose state vector
\begin{equation}\label{eq:psi}
\ket{\Psi}=\sum_{j=0}^{d-1} c_j \ket{j}
\end{equation}
is expanded in the basis $\{ \ket{j}\}$ and where $c_j$ are unknown complex expansion coefficients. In order to retrieve these coefficients, we introduce the column operators $\widehat{C}_j=\ket{a}\bra{j}$, where $\ket{a}$ is an arbitrary reference vector. Each column operator has an expectation value
\begin{equation}\label{eq:column}
\langle{\widehat{C}_j}\rangle=\braket{\Psi}{a}{c_j},
\end{equation}
which is proportional to a complex state vector expansion coefficient. Since the value of ${\braket{\Psi}{a}}$ is independent of $j$, we can express the state vector in terms of the column operators up to a phase factor:
\begin{equation}\label{eq:master}
\ket{\Psi}=\frac{\text{e}^{i\phi}}{\nu}\sum_{j=0}^{d-1} \langle\widehat{C}_j\rangle\ket{j},
\end{equation}
where $\nu = |{\braket{\Psi}{a}}|$ is a normalization constant. We can ignore the phase factor $\text{e}^{i\phi}$ since it bears no physical significance.
\paragraph{} Most column operators $\widehat{C}_j$ are not Hermitian matrices and are thus not observables. To overcome this apparent constraint, we recognize that any non-Hermitian matrix can be constructed from a complex-weighted sum of Hermitian matrices. Hence, the crucial step to our method is to construct the column operators in terms of measurable quantities: $\widehat{C}_j= \sum_q {w}_{jq} {\widehat{\mathcal O}_{jq}}$, where ${w}_{jq}$ are complex weights and ${\widehat{\mathcal O}_{jq}}$ are observables. As a result, this allows us to retrieve any state vector element with a complex-weighted sum of measurement outcomes:
\begin{equation}\label{eq:coef}
c_j=\frac{1}{\nu}\sum_{q} {w}_{jq} \langle\widehat{\mathcal{O}}_{jq}\rangle.
\end{equation}
Equation \ref{eq:coef} is an exact definition of the pure state vector that is provided in terms of measurable quantities. The above formalism readily applies to a general class of quantum states, including high-dimensional and many-body systems.
\paragraph{}
As an example, consider the case of a qubit $\ket{\Psi}=c_0\ket{0}+c_1\ket{1}$ with $\ket{a}=\ket{0}$ as the reference vector. The first column operator $\widehat{C}_0$ is Hermitian and given by the projector $\ket{0}\bra{0}$. The second column operator $\widehat{C}_1=\ket{0}\bra{1}$ is not Hermitian but can be constructed a number of ways. The first construction -- which, as pointed out earlier, is a key part of the weak value formalism -- is the complex-weighted sum of Pauli matrices: $\widehat{C}_1=(\hat\sigma_x+i\hat\sigma_y)/2$, a decomposition that requires two observables, each of which is made of two projectors or eigenvectors. A second decomposition requiring only three projectors is given by
\begin{equation}\label{eq:tetrarec}
\widehat{C}_1= \sum_{q=0}^2 \frac{2}{3}\text{e}^{i 2\pi q /3} \ket{s_{q}}\bra{s_{q}},
\end{equation}
where $\ket{s_{q}}=(\ket{0}+\text{e}^{i 4 \pi q/3 } \ket{1})/\sqrt{2}$ are the states onto which the observables $\widehat{\mathcal{O}}_{1q}$ project. In both cases, the qubit state vector is exactly given by $\ket{\Psi}=(\langle\widehat{C}_0\rangle \ket{0} + \langle\widehat{C}_1\rangle\ket{1})/\langle\widehat{C}_0\rangle^\frac{1}{2}$.
\paragraph{}
To demonstrate the power and scalability of our scheme, we apply it to the measurement of a state entangled in greater than 100 000 dimensions. We provide a complete characterisation of the spatially entangled two-photon field produced through spontaneous parametric downconversion (SPDC). In general, SPDC can give rise to spatial and frequency correlations between two photons \cite{Miatto:2011,Dada:2011,Agnew:2011,Leach:2012,Salakhutdinov:2012,Tasca:2012,Geelen:2013,Krenn:2014,Osorio:2008,Mosley:2008,Osorio:2013}. The purity of the spatial part of the full state can only be guaranteed if the two types of correlations are completely decoupled, which can be achieved in the collinear regime \cite{Osorio:2008} -- see Supplementary Information section A for a theoretical estimation of our system purity. The consequences of applying our scheme to a quantum state with non-unit purity, which is always the case in the presence of noise, will be discussed below.
\paragraph{}
We express the spatial part of the entangled state in a discrete cylindrical basis of transverse spatial modes. The azimuthal part of the modes is given by $\text{e}^{i\ell\phi}$, where $\ell$ is an integer between $-\infty$ and $\infty$ and $\phi$ is the azimuthal angle. This type of phase profile is known to carry $\ell$ units of orbital angular momentum (OAM). We decompose the radial part of the field with the recently introduced Walsh modes, labelled by the integer $k$ ranging from 0 to $\infty$ \cite{Geelen:2013}. The Walsh modes all have the same Gaussian amplitude envelope, but different $\pi$-steps radial phase profiles. Combining the OAM modes with the Walsh modes yields a complete basis for coherent two-dimensional images. To perform the characterisation of the two-photon spatial field, we consider 31 OAM modes and 11 Walsh modes for each photon. The state vector thus takes the form
\begin{equation}\label{eq:c}
\ket{\Phi} = \sum_{\ell_1=-15}^{15}\sum_{k_1=0}^{10} \sum_{\ell_2=15}^{-15}\sum_{k_2=0}^{10} c_{\ell_1,k_1}^{\ell_2,k_2} \ket{\ell_1,k_1}\ket{\ell_2,k_2}.
\end{equation}
Using the column-operator decomposition described in the Methods section, we sequentially measure all 116 281 coefficients $c_{\ell_1,k_1}^{\ell_2,k_2}$, which are shown in figure \ref{fig:Walsh}a and \ref{fig:Walsh}b. The total Hilbert space dimensionality of this measured state is more than two orders of magnitude larger than any previously reported amplitude-and-phase-characterised discrete entangled state \cite{Tonolini:2014}. As a simple verification of the accuracy of our method, we calculate the probabilities associated with each joint mode via the Born rule, $|c_{\ell_1,k_1}^{\ell_2,k_2}|^2$, as shown in figure \ref{fig:Walsh}c. This result is consistent with the directly measured correlation matrix shown in figure \ref{fig:Walsh}e, showing that we retrieve the correct magnitude of the amplitudes.
\paragraph{} To rigorously assess the validity of the directly measured complex quantum state $\ket{\psi}$, i.e.~both the amplitudes and the phases, we compare it to the results obtained through full tomography (i.e. assumption-free tomography). As full tomography cannot be performed on a (341$\times$341)-dimensional entangled state in a reasonable time, we characterise a ($5\times 5$)-dimensional subset of the SPDC two-photon state. We perform the comparison in a basis of various OAM modes ($\ell_1 \in \{1,-1,2,-2,3 \}$, $\ell_2 \in \{1,-1,2,-2,-3 \}$ ) and a fixed radial Walsh mode ($k_1=k_2=0$). The total number of unknown parameters in the corresponding density matrix is equal to $624$. After performing the direct measurement procedure in this basis, we record 8000 random projective measurements that we break into 8 sets of 1000. For each set, we recover a density matrix $\rho_{\textrm{exp}}$ and calculate its purity and the fidelity with the directly measured state $\ket{\psi}$; fidelity is defined as $\sqrt{\bra{\psi}\rho_{\textrm{exp}}\ket{\psi}}$. On average, the purity calculation yields ($0.96\pm0.02$), and the fidelity gives ($0.985\pm0.004$), where the uncertainties correspond to one standard deviation. After reconstruction of a density matrix, we find that the average error between the measured count rates and the count rates predicted by the density matrix is 5.5\%. This can be explained by shot noise, the pixelated nature of the SLM, and the finite aperture of the optical elements. While we expect unit purity, the 5\% noise level accounts for the discrepancy with the measured value.
\paragraph{} The extremely high fidelity between the tomography results $\rho_{\textrm{exp}}$ and the directly measured state $\ket{\psi}$ indicates the validity of our approach for quantum state measurements applied to near pure states. To evaluate our method in the context of mixed states, we perform a series of numerical simulations where we vary the rank, purity, and dimension of an unknown state $\rho_{\textrm{sim}}$, where no sources of noise are added to the simulated measurement outcomes. We apply our direct measurement procedure to these states and calculate the fidelity $|\braket{\psi}{\psi_{\textrm{sim}}}|$, where $\ket{\psi_{\textrm{sim}}}$ is the eigenvector of $\rho_{\textrm{sim}}$ with the largest eigenvalue. For initial states $\rho_{\textrm{sim}}$ with purity greater than $0.81$, we measure a fidelity greater than $0.99$ in at least 99\% of the cases. The dependency of this result on the dimensionality of the state is negligible. This result indicates that our direct method is able to extract the primary eigenvector of a density matrix, even for a partially mixed state. Full details of this analysis and the density matrix reconstruction are presented in the Supplementary Information.
\paragraph{} Knowledge of the amplitude and phase of the state vector elements allows us to perform otherwise inaccessible calculations. As an example, we perform a calculation of the Schmidt decomposition \cite{Ekert:1995}. This is equivalent to the singular value decomposition for the case of optical transfer matrices. The Schmidt decomposition yields a new joint basis in which the photons are perfectly correlated and where the joint modes have equal phases, as shown in figure \ref{fig:Walsh}d. When the Schmidt decomposition is applied to the entire state, we calculate a number of Schmidt modes equal to 142; this represents the effective number of independent joint modes contained within the state (the maximum for a (341$\times$341)-dimensional state being 341). The Schmidt decomposed two-photon field is a good candidate for the violation of very-high-dimensional Bell inequalities \cite{Dada:2011}. Further details on the Schmidt decomposition can be found in the Supplementary Information.
\paragraph{} There are a number of approaches to reducing the necessary cost and effort for measuring large-scale quantum states. These include, but are not limited to, developing technologies for mode sorting \cite{Berkhout:2010} and arbitrary unitary transformations \cite{Morizur:2010,Miller:2013}, reducing the required number of measurement settings, and circumventing the requirement for reconstruction procedures. It is clear that there is significant interplay between each of these approaches. The theoretical implementation of an approach that combines the principles of our work with generalised measurements, such as POVMs (positive operator value measures), is considered in the Supplementary Information. The ability to use POVMs in the laboratory relies on the aforementioned technologies. Access to these types of technologies would reduce the overall number of measurement settings to uniquely recover a quantum state. However, such a system requires arbitrary unitary transformations for spatial states, which is in itself an active area of research \cite{Berkhout:2010,Morizur:2010,Miller:2013}. Given the limitations of mode sorters for very large dimensions, and the practical nature of projective measurements, our scheme provides a simple and elegant method for the characterisation of large-scale quantum states.
\paragraph{} Our scheme allows direct access to the complex coefficients that define large-scale quantum states. The main result of our work is a novel method for retrieving a state vector coefficient with a complex-weighted sum of strong measurement outcomes. One challenge in reconstructing a quantum state from measurement outcomes lies in data processing; our scheme trades the difficulty of data processing for theoretical analysis prior to the experiment, that is, finding the measurements one has to perform. We anticipate that our work will have an impact on a number of disciplines, for example, quantum parameter estimation, measurement in quantum computing, quantum information and metrology. \\
\noindent \textbf{Methods}\\
\textbf{Experiment}
The two-photon field is generated via SPDC with a 405-nm laser diode pumping a 1-mm-long periodically-poled KTP (PPKTP) crystal with 50 mW of power. The experimental setup is shown in figure \ref{fig:setup}. We separate the two photons with a right angle prism and image the plane of the crystal to a Holoeye spatial light modulator (SLM) with a magnification of -10. We simultaneously display two holograms, one on each side of the SLM, to control the amplitude and phase profiles of the two photons independently. In order to make projective measurements of superposition modes, we make use of intensity masking \cite{Bolduc:2013}. We image the plane of the SLM with a magnification of $-1/2500$ to two single mode fibers. The combination of the SLM and singles mode fibers allows us to make arbitrary projective measurements. All measurements are performed in coincidence with two single photon avalanche detectors, with a timing window of 25 ns, an integration time of 1 s for modes outside the diagonal and 20 s for the diagonal elements ($\ell_1=-\ell_2$ and $k_1=k_2$). We start an automatic alignment procedure with the SLM every four hours to compensate for drift. Including the time it takes to calculate and display a hologram (about one second), the entire experiment takes two weeks; assumption-free tomography would take more than four centuries at the same acquisition rate. We perform no background subtraction and use the fundamental mode ($\ell_1=-\ell_2=k_1=k_2=0$) as the reference vector $\ket{a}$. The count rate of the fundamental mode is approximately 900 coincidences per second and varies by 10\% over 24-hour periods. To correct for long term drift, we normalise each outcome to the count rate of the fundamental mode, which we measure before the measurement of each column operator. In standard tomography, the calculation of error bounds on the measured state is not a straightforward task \cite{Christandl:2012}. Here, we can calculate the error bound on a given coefficient with a weighted sum of the detector counts used to retrieve it. For a given state vector coefficient, the errors on the amplitude $|c_j|$ and phase arg$(c_j)$ are both inversely proportional to the overlap $\nu$ of the reference vector with the quantum state. In order to minimize the errors, it is important to choose a reference vector that has a high probability of occurrence within the state -- the fundamental mode is the most probable one in our case. \\
\noindent \textbf{Two-body column-operator decomposition} In order to decompose a given state vector coefficient $c_{\ell_1,k_1}^{\ell_2,k_2}$ into a set of measurement outcomes, we need to find a projector decomposition of the corresponding column operator $\widehat{C}_{\ell_1,k_1}^{\ell_2,k_2}=\ket{0,0}\bra{\ell_1,k_1}\otimes\ket{0,0}\bra{\ell_2,k_2}$, as in equation \ref{eq:coef}. We numerically find this column-operator decomposition, i.e. the complex weights ${w}_q$ and the observables $\widehat{\mathcal{O}}_q$, using the differential evolution algorithm (see Supplementary Information part D). By inspection, we find that the corresponding analytical form of the state vector coefficients is given by
\begin{equation}\label{eq:coef5}
\widehat{C}_{\ell_1,k_1}^{\ell_2,k_2}=\frac{1}{\nu} \sum_{q=0}^{4} \frac{4}{5} \text{e}^{i 2\pi q /5}\ket{s_{1,q}}\bra{s_{1,q}}\otimes\ket{s_{2,q}}\bra{s_{2,q}},
\end{equation}
where $\sqrt{2}\ket{s_{m,q}}=\ket{0,0}+\text{e}^{i 4 \pi q/5} \ket{\ell_m,k_m} $ with $m=\{1,2 \}$, and $\nu=|\braket{\Psi}{0,0}|$ is a normalisation constant. This decomposition is only valid when the state of any photon is different from the reference vector, i.e. $\ket{\ell_m,k_m}\neq\ket{0,0}$. Each coefficient measured with the above column-operator decomposition requires five projective measurements, thus explaining the $5D^2$ scaling, where $D$ is the Hilbert space dimensionality of a single particle. The protocol scales much more favorably than assumption-free tomography, which requires $D^4$ projections.
\paragraph{} Here, we briefly explain our protocol for measuring the entire SPDC state vector. We measure more than 99\% of the coefficients using the decomposition of equation \ref{eq:coef5}. The remaining column operators are the special cases $\ket{0,0}\bra{\ell_1,k_1}\otimes\ket{0,0}\bra{0,0}$ and $\ket{0,0}\bra{0,0}\otimes\ket{0,0}\bra{\ell_2,k_2}$, which respectively correspond to a row and a column of the result shown in figure \ref{fig:Walsh}a. These column operators can be decomposed into only three joint local measurements using the projector $\ket{0,0}\bra{0,0}$ on one system and a column-operator decomposition similar to that of equation \ref{eq:tetrarec} on the other system. Finally, the column operator $\ket{0,0}\bra{0,0}\otimes\ket{0,0}\bra{0,0}$ is a projector, and its expectation value can be measured in a single experimental configuration. \\
\noindent \textbf{Full quantum tomography} We perform full tomography with high count rates in order to achieve high accuracy. We set the magnification between the plane of the SLM and that of the single mode fibers to 1/400. In this condition, we obtain a count rate of approximately 18,000 counts per second for the fundamental mode and integrate over 1 second for each individual projective measurement. The increase in the count rate of the fundamental mode comes at the price of lower count rates for high order modes. Regarding the full tomography measurements, we take an overcomplete set of 1000 random projective measurements in a $(5\times5)$-dimensional space. To minimize high-frequency components on the SLM, we limit the random superpositions to two-dimensional subsets of the state space.
\begin{thebibliography}{10}
\expandafter\ifx\csname url\endcsname\relax
\def\url#1{\texttt{#1}}\fi
\expandafter\ifx\csname urlprefix\endcsname\relax\defURL {URL }\fi
\providecommand{\bibinfo}[2]{#2}
\providecommand{\eprint}[2][]{\url{#2}}
\bibitem{Monz:2011}
\bibinfo{author}{Monz, T.} \emph{et~al.}
\newblock \bibinfo{title}{{14-Qubit Entanglement: Creation and Coherence}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{106}}, \bibinfo{pages}{130506}
(\bibinfo{year}{2011}).
\bibitem{Wong:2012}
\bibinfo{author}{Yao, X.~C.} \emph{et~al.}
\newblock \bibinfo{title}{{Observation of eight-photon entanglement}}.
\newblock \emph{\bibinfo{journal}{Nature Photon.}}
\textbf{\bibinfo{volume}{6}}, \bibinfo{pages}{225--228}
(\bibinfo{year}{2012}).
\bibitem{Yokoyama:2013}
\bibinfo{author}{Yokoyama, S.} \emph{et~al.}
\newblock \bibinfo{title}{{Ultra-large-scale continuous-variable cluster states
multiplexed in the time domain}}.
\newblock \emph{\bibinfo{journal}{Nature Photon.}}
\textbf{\bibinfo{volume}{7}}, \bibinfo{pages}{982--986}
(\bibinfo{year}{2013}).
\bibitem{Krenn:2014}
\bibinfo{author}{Krenn, M.} \emph{et~al.}
\newblock \bibinfo{title}{{Generation and confirmation of a
(100x100)-dimensional entangled quantum system}}.
\newblock \emph{\bibinfo{journal}{Proc. Natl Acad. Sci.}}
\textbf{\bibinfo{volume}{111}}, \bibinfo{pages}{6243--6247}
(\bibinfo{year}{2014}).
\bibitem{Smith:2005}
\bibinfo{author}{Smith, B.~J.}, \bibinfo{author}{Killett, B.},
\bibinfo{author}{Raymer, M.~G.}, \bibinfo{author}{Walmsley, I.~A.} \&
\bibinfo{author}{Banaszek, K.}
\newblock \bibinfo{title}{{Measurement of the transverse spatial quantum state
of light at the single-photon level}}.
\newblock \bibinfo{type}{Tech. Rep.} (\bibinfo{year}{2005}).
\bibitem{Bogdanov:2010}
\bibinfo{author}{Bogdanov, Y.~I.} \emph{et~al.}
\newblock \bibinfo{title}{{Statistical Estimation of the Efficiency of Quantum
State Tomography Protocols}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{105}}, \bibinfo{pages}{010404}
(\bibinfo{year}{2010}).
\bibitem{Mahler:2013}
\bibinfo{author}{Mahler, D.~H.} \emph{et~al.}
\newblock \bibinfo{title}{{Adaptive Quantum State Tomography Improves Accuracy
Quadratically}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{111}}, \bibinfo{pages}{183601}
(\bibinfo{year}{2013}).
\bibitem{Teo:2013}
\bibinfo{author}{Teo, Y.~S.}, \bibinfo{author}{{\v{R}}eh{\'a}{\v c}ek, J.} \&
\bibinfo{author}{Hradil, Z.}
\newblock \bibinfo{title}{{Informationally incomplete quantum tomography}}.
\newblock \emph{\bibinfo{journal}{Quantum Measurements and Quantum Metrology}}
\textbf{\bibinfo{volume}{1}}, \bibinfo{pages}{57--83}.
\bibitem{Ferrie:2014}
\bibinfo{author}{Ferrie, C.}
\newblock \bibinfo{title}{{Self-Guided Quantum Tomography}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{113}}, \bibinfo{pages}{190404}
(\bibinfo{year}{2014}).
\bibitem{Banaszek:2013}
\bibinfo{author}{Banaszek, K.}, \bibinfo{author}{Cramer, M.} \&
\bibinfo{author}{Gross, D.}
\newblock \bibinfo{title}{{Focus on quantum tomography}}.
\newblock \emph{\bibinfo{journal}{New J. Phys.}} \textbf{\bibinfo{volume}{15}},
\bibinfo{pages}{125020} (\bibinfo{year}{2013}).
\bibitem{Shabani:2011}
\bibinfo{author}{Shabani, A.} \emph{et~al.}
\newblock \bibinfo{title}{{Efficient Measurement of Quantum Dynamics via
Compressive Sensing}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{106}}, \bibinfo{pages}{100401}
(\bibinfo{year}{2011}).
\bibitem{Flammia:2005}
\bibinfo{author}{Flammia, S.~T.}, \bibinfo{author}{Silberfarb, A.} \&
\bibinfo{author}{Caves, C.~M.}
\newblock \bibinfo{title}{{Minimal Informationally Complete Measurements for
Pure States}}.
\newblock \emph{\bibinfo{journal}{Found Phys}} \textbf{\bibinfo{volume}{35}},
\bibinfo{pages}{1985--2006} (\bibinfo{year}{2005}).
\bibitem{Cramer:2010}
\bibinfo{author}{Cramer, M.} \emph{et~al.}
\newblock \bibinfo{title}{{Efficient quantum state tomography}}.
\newblock \emph{\bibinfo{journal}{Nat. Commun.}} \textbf{\bibinfo{volume}{1}},
\bibinfo{pages}{149--7} (\bibinfo{year}{2010}).
\bibitem{Toth:2010}
\bibinfo{author}{T{\'o}th, G.} \emph{et~al.}
\newblock \bibinfo{title}{{Permutationally Invariant Quantum Tomography}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{105}}, \bibinfo{pages}{250403}
(\bibinfo{year}{2010}).
\bibitem{Gross:2010}
\bibinfo{author}{Gross, D.}, \bibinfo{author}{Liu, Y.-K.},
\bibinfo{author}{Flammia, S.~T.}, \bibinfo{author}{Becker, S.} \&
\bibinfo{author}{Eisert, J.}
\newblock \bibinfo{title}{{Quantum State Tomography via Compressed Sensing}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{105}}, \bibinfo{pages}{150401}
(\bibinfo{year}{2010}).
\bibitem{Schwemmer:2014}
\bibinfo{author}{Schwemmer, C.} \emph{et~al.}
\newblock \bibinfo{title}{{Experimental Comparison of Efficient Tomography
Schemes for a Six-Qubit State}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{113}}, \bibinfo{pages}{040503}
(\bibinfo{year}{2014}).
\bibitem{Tonolini:2014}
\bibinfo{author}{Tonolini, F.}, \bibinfo{author}{Chan, S.},
\bibinfo{author}{Agnew, M.}, \bibinfo{author}{Lindsay, A.} \&
\bibinfo{author}{Leach, J.}
\newblock \bibinfo{title}{{Reconstructing high-dimensional two-photon entangled
states via compressive sensing}}.
\newblock \emph{\bibinfo{journal}{Sci. Rep.}} \textbf{\bibinfo{volume}{4}},
\bibinfo{pages}{6542} (\bibinfo{year}{2014}).
\bibitem{Lloyd:2014}
\bibinfo{author}{Lloyd, S.}, \bibinfo{author}{Mohseni, M.} \&
\bibinfo{author}{Rebentrost, P.}
\newblock \bibinfo{title}{{Quantum principal component analysis}}.
\newblock \emph{\bibinfo{journal}{Nature Phys.}} \textbf{\bibinfo{volume}{10}},
\bibinfo{pages}{631--633} (\bibinfo{year}{2014}).
\bibitem{Lundeen:2011}
\bibinfo{author}{Lundeen, J.~S.}, \bibinfo{author}{Sutherland, B.},
\bibinfo{author}{Patel, A.}, \bibinfo{author}{Stewart, C.} \&
\bibinfo{author}{Bamber, C.}
\newblock \bibinfo{title}{{Direct measurement of the quantum wavefunction}}.
\newblock \emph{\bibinfo{journal}{Nature}} \textbf{\bibinfo{volume}{474}},
\bibinfo{pages}{188--191} (\bibinfo{year}{2012}).
\bibitem{Banaszek:1999}
\bibinfo{author}{Banaszek, K.}, \bibinfo{author}{D'Ariano, G.~M.},
\bibinfo{author}{Paris, M. G.~A.} \& \bibinfo{author}{Sacchi, M.~F.}
\newblock \bibinfo{title}{{Maximum-likelihood estimation of the density
matrix}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{61}},
\bibinfo{pages}{10304} (\bibinfo{year}{2000}).
\bibitem{Liu:2012}
\bibinfo{author}{Liu, W.-T.}, \bibinfo{author}{Zhang, T.},
\bibinfo{author}{Liu, J.-Y.}, \bibinfo{author}{Chen, P.-X.} \&
\bibinfo{author}{Yuan, J.-M.}
\newblock \bibinfo{title}{{Experimental Quantum State Tomography via Compressed
Sampling}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{108}}, \bibinfo{pages}{170403}
(\bibinfo{year}{2012}).
\bibitem{Bamber:2014}
\bibinfo{author}{Bamber, C.} \& \bibinfo{author}{Lundeen, J.~S.}
\newblock \bibinfo{title}{{Observing Dirac{\textquoteright}s Classical Phase
Space Analog to the Quantum State}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{112}}, \bibinfo{pages}{070405}
(\bibinfo{year}{2014}).
\bibitem{Wu:2013gb}
\bibinfo{author}{Wu, S.}
\newblock \bibinfo{title}{{State tomography via weak measurements}}.
\newblock \emph{\bibinfo{journal}{Sci. Rep.}} \textbf{\bibinfo{volume}{3}}
(\bibinfo{year}{2013}).
\bibitem{Salvail:2013}
\bibinfo{author}{Salvail, J.~Z.} \emph{et~al.}
\newblock \bibinfo{title}{{Full characterization of polarization states of
light via direct measurement}}.
\newblock \emph{\bibinfo{journal}{Nature Photon.}}
\textbf{\bibinfo{volume}{7}}, \bibinfo{pages}{1--6} (\bibinfo{year}{2013}).
\bibitem{Duck:1989}
\bibinfo{author}{Duck, I.~M.}, \bibinfo{author}{Stevenson, P.~M.} \&
\bibinfo{author}{Sudarshan, E. C.~G.}
\newblock \bibinfo{title}{{The Sense in Which a 'Weak Measurement' of a Spin
1/2 Particle's Spin Component Yields a Value 100}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{40}},
\bibinfo{pages}{2112--2117} (\bibinfo{year}{1989}).
\bibitem{Malik:2013}
\bibinfo{author}{Malik, M.} \emph{et~al.}
\newblock \bibinfo{title}{{Direct measurement of a 27-dimensional
orbital-angular-momentum state vector}}.
\newblock \emph{\bibinfo{journal}{Nat. Commun.}} \textbf{\bibinfo{volume}{5}},
\bibinfo{pages}{3115} (\bibinfo{year}{2014}).
\bibitem{Osorio:2008}
\bibinfo{author}{Osorio, C.~I.}, \bibinfo{author}{Valencia, A.} \&
\bibinfo{author}{Torres, J.~P.}
\newblock \bibinfo{title}{{Spatiotemporal correlations in entangled photons
generated by spontaneous parametric down conversion}}.
\newblock \emph{\bibinfo{journal}{New J. Phys.}} \textbf{\bibinfo{volume}{10}},
\bibinfo{pages}{113012} (\bibinfo{year}{2008}).
\bibitem{Miatto:2011}
\bibinfo{author}{Miatto, F.~M.}, \bibinfo{author}{Yao, A.~M.} \&
\bibinfo{author}{Barnett, S.~M.}
\newblock \bibinfo{title}{{Full characterization of the quantum spiral
bandwidth of entangled biphotons}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{83}},
\bibinfo{pages}{033816} (\bibinfo{year}{2011}).
\bibitem{Dada:2011}
\bibinfo{author}{Dada, A.~C.}, \bibinfo{author}{Leach, J.},
\bibinfo{author}{Buller, G.~S.}, \bibinfo{author}{Padgett, M.~J.} \&
\bibinfo{author}{Andersson, E.}
\newblock \bibinfo{title}{{Experimental high-dimensional two-photon
entanglement and violations of generalized Bell inequalities}}.
\newblock \emph{\bibinfo{journal}{Nature Phys.}} \textbf{\bibinfo{volume}{7}},
\bibinfo{pages}{1--4} (\bibinfo{year}{2011}).
\bibitem{Agnew:2011}
\bibinfo{author}{Agnew, M.}, \bibinfo{author}{Leach, J.},
\bibinfo{author}{McLaren, M.}, \bibinfo{author}{Roux, F.~S.} \&
\bibinfo{author}{Boyd, R.~W.}
\newblock \bibinfo{title}{{Tomography of the quantum state of photons entangled
in high dimensions}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{84}},
\bibinfo{pages}{062101} (\bibinfo{year}{2011}).
\bibitem{Leach:2012}
\bibinfo{author}{Leach, J.}, \bibinfo{author}{Bolduc, E.},
\bibinfo{author}{Gauthier, D.~J.} \& \bibinfo{author}{Boyd, R.~W.}
\newblock \bibinfo{title}{{Secure information capacity of photons entangled in
many dimensions}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{85}},
\bibinfo{pages}{060304} (\bibinfo{year}{2012}).
\bibitem{Salakhutdinov:2012}
\bibinfo{author}{Salakhutdinov, V.~D.}, \bibinfo{author}{Eliel, E.~R.} \&
\bibinfo{author}{L{\"o}ffler, W.}
\newblock \bibinfo{title}{{Full-Field Quantum Correlations of Spatially
Entangled Photons}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{108}}, \bibinfo{pages}{173604}
(\bibinfo{year}{2012}).
\bibitem{Tasca:2012}
\bibinfo{author}{Tasca, D.~S.} \emph{et~al.}
\newblock \bibinfo{title}{{Imaging high-dimensional spatial entanglement with a
camera}}.
\newblock \emph{\bibinfo{journal}{Nat. Commun.}} \textbf{\bibinfo{volume}{3}},
\bibinfo{pages}{3:984} (\bibinfo{year}{2012}).
\bibitem{Geelen:2013}
\bibinfo{author}{Geelen, D.} \& \bibinfo{author}{L{\"o}ffler, W.}
\newblock \bibinfo{title}{{Walsh modes and radial quantum correlations of
spatially entangled photons}}.
\newblock \emph{\bibinfo{journal}{Opt. Lett.}} \textbf{\bibinfo{volume}{38}},
\bibinfo{pages}{4108--4111} (\bibinfo{year}{2013}).
\bibitem{Mosley:2008}
\bibinfo{author}{Mosley, P.~J.}, \bibinfo{author}{Lundeen, J.~S.},
\bibinfo{author}{Smith, B.~J.} \& \bibinfo{author}{Walmsley, I.~A.}
\newblock \bibinfo{title}{{Conditional preparation of single photons using
parametric downconversion: a recipe for purity}}.
\newblock \emph{\bibinfo{journal}{New J. Phys.}} \textbf{\bibinfo{volume}{10}},
\bibinfo{pages}{093011} (\bibinfo{year}{2008}).
\bibitem{Osorio:2013}
\bibinfo{author}{Osorio, C.~I.}, \bibinfo{author}{Sangouard, N.} \&
\bibinfo{author}{Thew, R.~T.}
\newblock \bibinfo{title}{{On the purity and indistinguishability of
down-converted photons}}.
\newblock \emph{\bibinfo{journal}{J. Phys. B: At. Mol. Opt. Phys.}}
\textbf{\bibinfo{volume}{46}}, \bibinfo{pages}{055501}
(\bibinfo{year}{2013}).
\bibitem{James:2001}
\bibinfo{author}{James, D. F.~V.}, \bibinfo{author}{Kwiat, P.~G.},
\bibinfo{author}{Munro, W.~J.} \& \bibinfo{author}{White, A.~G.}
\newblock \bibinfo{title}{{Measurement of qubits}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{64}},
\bibinfo{pages}{052312} (\bibinfo{year}{2001}).
\bibitem{Ekert:1995}
\bibinfo{author}{Ekert, A.} \& \bibinfo{author}{Knight, P.~L.}
\newblock \bibinfo{title}{{Entangled quantum systems and the Schmidt
decomposition}}.
\newblock \emph{\bibinfo{journal}{Am. J. Phys.}} \textbf{\bibinfo{volume}{63}}
(\bibinfo{year}{1995}).
\bibitem{Miller:2013}
\bibinfo{author}{Miller, D. A.~B.}
\newblock \bibinfo{title}{{Self-configuring universal linear optical component
[Invited]}}.
\newblock \emph{\bibinfo{journal}{Photon. Res.}} \textbf{\bibinfo{volume}{1}},
\bibinfo{pages}{1} (\bibinfo{year}{2013}).
\bibitem{Bolduc:2013}
\bibinfo{author}{Bolduc, E.}, \bibinfo{author}{Bent, N.},
\bibinfo{author}{Santamato, E.}, \bibinfo{author}{Karimi, E.} \&
\bibinfo{author}{Boyd, R.~W.}
\newblock \bibinfo{title}{{Exact solution to simultaneous intensity and phase
encryption with a single phase-only hologram}}.
\newblock \emph{\bibinfo{journal}{Opt. Lett.}} \textbf{\bibinfo{volume}{38}},
\bibinfo{pages}{3546--3549} (\bibinfo{year}{2013}).
\bibitem{Christandl:2012}
\bibinfo{author}{Christandl, M.} \& \bibinfo{author}{Renner, R.}
\newblock \bibinfo{title}{{Reliable Quantum State Tomography}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{109}}, \bibinfo{pages}{120403}
(\bibinfo{year}{2012}).
\bibitem{Berkhout:2010}
\bibinfo{author}{Berkhout, G.~C.G.}, \bibinfo{author}{ Lavery, Martin P.J.}, \bibinfo{author}{Courtial, J.}, \bibinfo{author}{Beijersbergen, M.~W.} \& \bibinfo{author}{Padgett, M. J.}
\newblock \bibinfo{title}{{Efficient sorting of orbital angular momentum states of light}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{105}}, \bibinfo{pages}{153601}
(bibinfo{year}{2010}).
\bibitem{Morizur:2010}
\bibinfo{author}{Morizur, J.-F.}, \bibinfo{author}{ Nicholls, L.}, \bibinfo{author}{Jian, P.}, \bibinfo{author}{Armstrong, S.}, \bibinfo{author}{ Treps, N.}, \bibinfo{author}{ Hage, B.}, \bibinfo{author}{ Hsu, M.}, \bibinfo{author}{ Bowen, W.}, \bibinfo{author}{ Janousek, J.}, \& \bibinfo{author} {Bachor, H.-A.}
\newblock \bibinfo{title}{{Programmable unitary spatial mode manipulation}}.
\newblock \emph{\bibinfo{journal}{JOSA A}}
\textbf{\bibinfo{volume}{27}}, \bibinfo{pages}{2524--2531}
(\bibinfo{year}{2010}).
\bibitem{Miller:2013}
\bibinfo{author}{Miller, D.}
\newblock \bibinfo{title}{{Reconfigurable add-drop multiplexer for spatial modes}}.
\newblock \emph{\bibinfo{journal}{Opt. Expr.}}
\textbf{\bibinfo{volume}{21}}, \bibinfo{pages}{20220--20229}
(bibinfo{year}{2013}).
\bibitem{Boyd79}
\bibinfo{author}{Boyd, R.~W.}
\newblock \bibinfo{title}{Intuitive explanation of the phase anomaly of focused
light beams}.
\newblock \emph{\bibinfo{journal}{J. Opt. Soc. Am.}}
\textbf{\bibinfo{volume}{70}}, \bibinfo{pages}{877--880}
(\bibinfo{year}{1980}).
\end{thebibliography}
\makeatletter
\setlength{\@fpbot}{1cm}
\makeatother
\begin{figure}
\caption{\small{\textbf{Measured and calculated properties of the two-photon state.}
\label{fig:Walsh}
\end{figure}
\makeatletter
\setlength{\@fpbot}{1cm}
\makeatother
\begin{figure}
\caption{\textbf{Generation and characterisation of a two-photon field.}
\label{fig:setup}
\end{figure}
\makeatletter
\setlength{\@fpbot}{1cm}
\makeatother
\end{document} |
\begin{document}
\title{A path-following inexact Newton method for PDE-constrained optimal control in BV (extended version)
\thanks{\textit{This is an extended version of the corresponding journal article \cite{HafeMan_reg}. It contains some proofs that are omitted in the journal's version.}}
}
\titlerunning{A path-following inexact Newton method for optimal control in BV}
\author{D. Hafemeyer \and
F. Mannel}
\institute{Dominik Hafemeyer \at
TU M\"unchen\\
Lehrstuhl f\"ur Optimalsteuerung, Department of Mathematics\\
Boltzmannstr.~3, 85748 Garching b. M\"unchen, Germany\\
\email{[email protected]}
\and
Florian Mannel \at
University of Graz\\
Institute of Mathematics and Scientific Computing\\
Heinrichstraße 36, 8010 Graz, Austria\\
\email{[email protected]}
}
\date{Received: date / Accepted: date}
\maketitle
\begin{abstract}
We study a PDE-constrained optimal control problem that involves functions of bounded variation as controls and includes the TV seminorm of the control in the objective.
We apply a path-following inexact Newton method to the problems that arise from smoothing the TV seminorm and adding an $H^1$ regularization.
We prove in an infinite-dimensional setting that, first, the solutions of these auxiliary problems converge to the solution of the original problem and, second, that an inexact Newton method enjoys fast local convergence when applied to a reformulation of the auxiliary optimality systems in which the control appears as implicit function of the adjoint state.
We show convergence of a Finite Element approximation, provide a globalized preconditioned inexact Newton method as solver for the discretized auxiliary problems, and embed it into an inexact path-following scheme.
We construct a two-dimensional test problem with fully explicit solution and present numerical results to illustrate the accuracy and robustness of the approach.
\keywords{optimal control \and partial differential equations \and TV seminorm \and functions of bounded variation \and path-following Newton method}
\subclass{35J70 \and 49-04 \and 49M05 \and 49M15 \and 49M25 \and 49J20 \and 49K20 \and 49N60}
\end{abstract}
\section*{Problem setting and introduction}
\addcontentsline{toc}{section}{Problem setting and introduction}
This work is concerned with the optimal control problem
\begin{equation} \tag{OC} \label{eq:ocintro}
\min_{(y,u)\in H_0^1(\Omega)\times \ensuremath {\text{BV}(\Omega)} } \; \underbrace{\frac{1}{2}\mathds{N}orm[L^2(\Omega)]{y-y_\Omega}^2 + \beta |u|_{\ensuremath {\text{BV}(\Omega)} }}_{=:J(y,u)}
\qquad\ensuremath{\text{s.t.}}\qquad
Ay = u,
\end{equation}
where throughout $\Omega\subset \mathds{R}^N$ is a bounded $C^{1,1}$ domain and $N\in\{1,2,3\}$.
The control $u$ belongs to the space of functions of bounded variation $\ensuremath {\text{BV}(\Omega)} $, the state $y$ lives in $Y:=H^1_0(\Omega)$, the parameter $\beta$ is positive, and $Ay=u$ is a partial differential equation of the form
\begin{equation*}
\left\{
\begin{aligned}
\mathcal{A} y + c_0 y & = u && \text{ in }\Omega,\\
y & = 0 &&\text{ on }\partial\Omega
\end{aligned}
\right.
\end{equation*}
with a non-negative function $c_0\in L^\infty(\Omega)$ and a linear and uniformly elliptic operator of second order in divergence form $ \ensuremath{{\cal{A}}} : H^1_0(\Omega) \rightarrow H^{-1}(\Omega)$,
$ \ensuremath{{\cal{A}}} y (\varphi) = \int_{\Omega} \sum_{i,j=1}^N a_{ij}\partial_{i} y\partial_{j} \varphi \ensuremath {\,\mathrm{d}x}$ whose coefficients satisfy $a_{ij}=a_{ji}\in C^{0,1}(\Omega)$ for all $i,j\in\{1,\ldots,N\}$.
The specific feature of \eqref{eq:ocintro} is the appearance of the BV seminorm $|u|_{\ensuremath {\text{BV}(\Omega)} }$ in the cost functional, which favors piecewise constant controls and has recently attracted considerable interest in PDE-constrained optimal control, cf. \cite{BergouniouxBonnefondHaberkornPrivat,CasasKogutLeugering,kruse,Casas2019,Clason2011,EngelKunischBVWaveSemismooth,EngelVexlerTrautmann,Hafemeyer15,HafemeyerMaster,Hafemeyer2019}
and the earlier works \cite{CasasKunischPola,CasasKunischPola2}.
The majority of these contributions focuses on deriving optimality conditions and studying Finite Element approximations. In contrast, the main focus of this work is on a path-following method. Specifically,
\begin{itemize}
\item we propose to smooth the TV seminorm in $J$ and add an $H^1$ regularization, and show in an infinite-dimensional setting that the solutions of the resulting family of auxiliary problems converge to the solution of \eqref{eq:ocintro};
\item for any given auxiliary problem we prove that an infinite-dimensional inexact Newton method converges locally;
\item we derive a practical path-following method that yields accurate solutions for \eqref{eq:ocintro} and illustrate its capabilities in numerical examples for $\Omega\subset\mathds{R}^2$.
\end{itemize}
To the best of our knowledge, these aspects have only been investigated partially for optimal control problems that involve the TV seminorm in the objective.
In particular, there are few works that address the numerical solution when the measure $\nabla u$ is supported in a \emph{two-dimensional} set. In fact, we are only aware of \cite{Clason2011}, where a doubly-regularized version of the Fenchel predual of \eqref{eq:ocintro} is solved
for fixed regularization parameters, but path-following is not applied. We stress that in our numerical experience the two-dimensional case is significantly more challenging than the one-dimensional case.
A FeNiCs implementation of our path-following method is available at \url{https://arxiv.org/abs/2010.11628}.
It includes all the features that we discuss in section~\ref{sec:numericalsolution}, e.g.,
a preconditioner for the Newton systems, a non-monotone line search globalization,
and inexact path-following.
A further contribution of this work is that
\begin{itemize}
\item we provide an example of \eqref{eq:ocintro} for $N=2$ with fully explicit solution.
\end{itemize}
For the case that $\nabla u$ is defined in an interval ($N=1$) such examples are available, e.g. \cite{kruse,Hafemeyer2019}, but for $N=2$ this is new.
Let us briefly address three difficulties associated with \eqref{eq:ocintro}.
First, the fact that \eqref{eq:ocintro} is posed in the non-reflexive space $\ensuremath {\text{BV}(\Omega)} $ complicates the proof of existence of optimal solutions. By now it is, however, well understood how to deal with this issue also in more complicated situations, cf. e.g. \cite{kruse,Casas2019}.
Second, we notice that $u\mapsto\lvert u \rvert_{\ensuremath {\text{BV}(\Omega)} }$ is not differentiable.
We will cope with this by replacing $\lvert u \rvert_{\ensuremath {\text{BV}(\Omega)} }$ with the smooth functional $\psi_\delta(u)=\int_\Omega \sqrt{\lvert\nabla u\rvert^2+\delta^2}$, $\delta\geq 0$, that satisfies $\psi_0(\cdot)=\lvert \cdot \rvert_{\ensuremath {\text{BV}(\Omega)} }$.
The functional $\psi_\delta$ is well-known, particularly in the imaging community, e.g. \cite{acar,chan}. However, in most of the existing works the smoothing parameter $\delta>0$ is fixed, whereas we are interested in driving $\delta$ to zero.
We will also add the regularizer $\gamma\lVert u\rVert_{H^1(\Omega)}^2$, $\gamma\geq 0$, to $J$ and drive $\gamma$ to zero.
This allows us to prove that for $\ensuremath {{\gamma,\delta}} >0$ the optimal control $\bar u_{\ensuremath {{\gamma,\delta}} }$ of the smoothed and regularized auxiliary problem is $C^{1,\alpha}$,
which is crucial to show, for instance, that the adjoint-to-control mapping is differentiable; cf. Theorem~\ref{thm_PtoUfrechet}. In contrast, for $\gamma=0$ only $\bar u_{0,\delta}\in\ensuremath {\text{BV}(\Omega)} $ can be expected.
Third, our numerical experience with PDE-constrained optimal control involving the TV seminorm \cite{kruse,Clason2018,Hafemeyer15,HafemeyerMaster,Hafemeyer2019} suggests that path-following Newton methods work significantly better if the optimality systems of the auxiliary problems do not contain the control as independent variable.
Therefore, we express the auxiliary optimality conditions in terms of state and adjoint state by regarding the control as an implicit function of the adjoint state.
Let us set our work in perspective with the available literature.
As one of the main contributions we show that the solutions of the auxiliary problems
converge to the solution of \eqref{eq:ocintro}, cf. section~\ref{sec:regconvergence}.
The asymptotic convergence for vanishing $H^1$ seminorm regularization is analyzed in \cite[Section~6]{Casas2019} for a more general problem than \eqref{eq:ocintro}, but the fact that our setting is less general allows us to prove convergence in stronger norms than the corresponding \cite[Theorem~10]{Casas2019}.
The asymptotic convergence for a doubly-regularized version of the predual of \eqref{eq:ocintro} is established in \cite[Appendix~A]{Clason2011}, but one of the regularizations is left untouched, so convergence is towards the solution of a regularized problem, not towards the solution of \eqref{eq:ocintro}.
Next, we demonstrate local convergence of an infinite-dimensional inexact Newton method applied to the optimality system of the auxiliary problem.
Because the control and the adjoint state are coupled by a quasilinear PDE, this convergence analysis is non-trivial; among others, it relies on
Hölder estimates for the gradient of the control that are rather technical to derive.
A related result is \cite[Theorem~3.5]{Clason2011}, where local q-superlinear convergence of a semismooth Newton method is shown for the doubly-regularized Fenchel predual for fixed regularization parameters.
Yet, since we work with a different optimality system, the overlap is small.
Nonetheless, \cite{Clason2011} is closely related to the present paper, and it would be interesting to embed the semismooth Newton method
\cite[Algorithm~2]{Clason2011} in a path-following scheme and compare it to our algorithm.
The concept to view the control as an implicit function of the adjoint state or to eliminate it, is well-known in optimal control, cf., e.g., \cite{kruse,Clason2011,HintermKunisch,HintermStadler,Hinze05,HinzeTroe,NeitzelPruefertSlawig,PieperDiss,Schiela_IPMefficient,WeiserGaenzlerSchiela}.
Turning to the discrete level we provide a Finite Element approximation and demonstrate that the Finite Element solutions of the auxiliary problems converge to their non-discrete counterparts.
Finite Element approximations for optimal control in BV involving the TV seminorm have also been studied in \cite{BergouniouxBonnefondHaberkornPrivat,CasasKogutLeugering,kruse,Casas2019,EngelKunischBVWaveSemismooth,EngelVexlerTrautmann,Hafemeyer15,HafemeyerMaster,Hafemeyer2019}, but in our assessment the regularization of \eqref{eq:ocintro} that we propose is not covered by these studies.
The papers \cite{ElvetunNielsen,PreconditioningTVRegularization} study the linear systems that arise in split Bregman methods when applied to a discretization of \eqref{eq:ocintro} with homogeneous Neumann boundary conditions.
The BV-term in \eqref{eq:ocintro} favors sparsity in the gradient of the control. Other sparsity promoting control terms that have recently been studied are measure norms and $L^1$--type functionals,
e.g., \cite{AllendesFuicaOtarola,CCK2012,CCK2013,CasasKunisch2014,CasasRyllTroeltzsch,CasasVexlerZuazua,HSW,LiStadler,PieperDiss,Stadler}.
TV-regularization is also of significant importance in imaging problems and its usefulness for, e.g., noise removal has long been known \cite{Rudin1992}.
However, we take the point of view that imaging problems are different in nature from optimal control problems, for instance because their forward operator is usually cheap to evaluate and non-compact.
This paper is organized as follows. After preliminaries in section~\ref{sec:prelim},
we consider existence, optimality conditions and convergence of solutions in section~\ref{sec:origandregulproblems}.
In section~\ref{sec_regularity} we study differentiability of the adjoint-to-control mapping, which paves the way
for proving local convergence of an inexact Newton method in section~\ref{sec:newton}.
Section~\ref{sec:FEapproximation} addresses the Finite Element approximation and its convergence, while section~\ref{sec:numericalsolution} provides the
path-following method.
Numerical experiments are presented in section~\ref{sec:numericmainchapter}, including for the test problem with explicit solution.
In section~\ref{sec_sum} we summarize.
Several technical results such as H\"older continuity of solutions to quasilinear PDEs are deferred to the appendix.
\section{Preliminaries}\label{sec:prelim}
We recall facts about the space $\ensuremath {\text{BV}(\Omega)} $, introduce the smoothed BV seminorm that we use in this work, and collect properties of the solution operator associated to the PDE in \eqref{eq:ocintro}.
\subsection{Functions of bounded variation}
The following statements about $\ensuremath {\text{BV}(\Omega)} $ can be found in \cite[Chapter~3]{ambrosio} unless stated otherwise.
For $u\in L^1(\Omega)$ we let
\begin{equation*}
|u|_{\ensuremath {\text{BV}(\Omega)} } := \sup_{v\in C^1_0(\Omega)^N, \lVert \lvert v\rvert \rVert_\infty \leq 1} \int_\Omega u \operatorname{div} v\ensuremath {\,\mathrm{d}x},
\end{equation*}
where here and throughout, $|\cdot|$ denotes the Euclidean norm.
The space of functions of bounded variation is defined as
\begin{equation*}
\ensuremath {\text{BV}(\Omega)} := \Bigl\{u\in L^1(\Omega): \; \lvert u \rvert_{\ensuremath {\text{BV}(\Omega)} } < \infty
\Bigr\},
\end{equation*}
and $\lvert u\rvert_{\ensuremath {\text{BV}(\Omega)} }$ is called the BV seminorm (also TV seminorm) of $u\in\ensuremath {\text{BV}(\Omega)} $.
We endow $\ensuremath {\text{BV}(\Omega)} $ with the norm $\lVert \cdot \rVert_{\ensuremath {\text{BV}(\Omega)} } := \lVert \cdot \rVert_{L^1(\Omega)} + |\cdot|_{\ensuremath {\text{BV}(\Omega)} }$
and recall from \cite[Thm.~10.1.1]{Attouch} that this makes $\ensuremath {\text{BV}(\Omega)} $ a Banach space.
It can be shown that $u\in\ensuremath {\text{BV}(\Omega)} $ iff there exists a vector measure $(\partial_{x_1} u, \dots, \partial_{x_N} u )^T = \nabla u \in\ensuremath {{\cal M}(\Omega)} ^N$ such that for all $i\in\{1,\ldots,n\}$ there holds
\begin{equation*}
\int_\Omega \partial_{x_i} u v\ensuremath {\,\mathrm{d}x} = - \int_\Omega u \partial_{x_i} v\ensuremath {\,\mathrm{d}x} \qquad \forall v\in C_0^\infty(\Omega),
\end{equation*}
where $\ensuremath {{\cal M}(\Omega)} $ denotes the linear space of regular Borel measures, e.g. \cite[Chapter~2]{Rudin1987}.
Also, for $u\in\ensuremath {\text{BV}(\Omega)} $ we have $|u|_{\ensuremath {\text{BV}(\Omega)} }=\lVert \lvert\nabla u\rvert \rVert_{\ensuremath {{\cal M}(\Omega)} }$, i.e., $|u|_{\ensuremath {\text{BV}(\Omega)} }$ is the total variation of the vector-measure $\nabla u$.
The space $\ensuremath {\text{BV}(\Omega)} $ embeds continuously (compactly) into $L^r(\Omega)$ for $r\in[1,\frac{N}{N-1}]$ ($r \in [1, \frac{N}{N-1})$), see, e.g., \cite[Cor.~3.49 and Prop.~3.21]{ambrosio}. We use the convention that $\frac{N}{N-1}=\infty$ for $N=1$.
Also important is the notion of strict convergence, e.g. \cite{ambrosio,Attouch}.
\begin{definition} \label{def:strictconv}
For $r \in [1, \frac{N}{N-1} ]$ the metric $\ensuremath { d_{\operatorname{BV},r} }$ is given by
\begin{align*}
\ensuremath { d_{\operatorname{BV},r} }\colon & \ensuremath {\text{BV}(\Omega)} \times \ensuremath {\text{BV}(\Omega)} \rightarrow \mathds{R},\\
& (u,v) \mapsto \lVert u-v \rVert_{L^r(\Omega)} + \left| |u|_{\ensuremath {\text{BV}(\Omega)} } - |v|_{\ensuremath {\text{BV}(\Omega)} } \right|.
\end{align*}
Convergence with respect to $d_{\operatorname{BV},1}$ is called \emph{strict convergence}.
\end{definition}
\begin{remark}
The embedding $\ensuremath {\text{BV}(\Omega)} \hookrightarrow L^r(\Omega)$, for $r \in [1, \frac{N}{N-1} ]$, implies that $d_{BV, r}$ is well-defined and continuous with respect to $\lVert \cdot \rVert_{\ensuremath {\text{BV}(\Omega)} }$.
\end{remark}
We will also use the following density property.
\begin{lemma} \label{lem:bvdensity}
$C^\infty(\bar \Omega)$ is dense in $(\ensuremath {\text{BV}(\Omega)} \cap L^r(\Omega),\,\ensuremath { d_{\operatorname{BV},r} })$ for $r \in [1, \frac{N}{N-1} ]$.
\end{lemma}
\begin{proof}
By straightforward modifications the proof for the special case $r=1$ in \cite[Thm.~10.1.2]{Attouch} can be extended,
using that the sequence of mollifiers constructed in the proof converges in $L^r$, see \cite[Prop.~2.2.4]{Attouch}. \qed
\end{proof}
For the remainder of this work we fix a number $s=s(N) \in (1,\frac{N}{N-1})$ with
\begin{center}
\fbox{$\ensuremath {\text{BV}(\Omega)} \hookrightarrow\hookrightarrow L^s(\Omega)\hookrightarrow H^{-1}(\Omega)$,}
\end{center}
where the first embedding is compact and the second is continuous. For $N=1$ we interpret $\frac{N}{N-1}$ as $\infty$.
\begin{remark}
Consider, for instance, $N=2$ and any $r\in (1,2)$. Then we have $\ensuremath {\text{BV}(\Omega)} \hookrightarrow\hookrightarrow L^r(\Omega)$ and $H^1(\Omega)\hookrightarrow L^\frac{r}{r-1}(\Omega)$ so that any $s \in(1,2)$ can be used.
\end{remark}
\subsection{The smoothed BV seminorm}
We will replace the BV seminorm in \eqref{eq:ocintro} by the function $\psi_\delta \colon \ensuremath {\text{BV}(\Omega)} \rightarrow \mathds{R}$,
\begin{equation*}
\psi_\delta(u) := \sup \, \Biggl\lbrace \int_\Omega u \operatorname{div} v + \sqrt{ \delta ( 1- |v|^2 ) } \ensuremath {\,\mathrm{d}x} : \; v \in C_0^1(\Omega)^N, \, \lVert\lvert v \rvert \rVert_{L^\infty(\Omega)} \leq 1 \Biggr\rbrace,
\end{equation*}
where $\delta\geq 0$. We stress that $\psi_\delta$ is frequently employed in imaging problems for the same purpose, for instance in \cite{acar,chan}. It has the following properties.
\begin{lemma} \label{lem:psidelta}
The following statements are true for all $\delta\geq 0$.
\begin{enumerate}
\item For any $u\in \ensuremath {\text{BV}(\Omega)} $ there holds
\begin{equation*}
|u|_{\ensuremath {\text{BV}(\Omega)} } = \psi_0 (u) \leq \psi_\delta(u) \leq |u|_{\ensuremath {\text{BV}(\Omega)} } + \sqrt{\delta} |\Omega|.
\end{equation*}
\item $\psi_\delta$ is lower semi-continuous with respect to the $L^1(\Omega)$ norm.
\item $\psi_\delta$ is convex.
\item For all $u\in W^{1,1}(\Omega)$ we have \label{thm:psideltaitem4}
\begin{equation*}
\psi_\delta (u) = \int_\Omega \sqrt{ \delta + |\nabla u|^2 } \ensuremath {\,\mathrm{d}x}.
\end{equation*}
\item The function $\psi_\delta |_{H^1(\Omega)}$ is Lipschitz with respect to $\lVert\cdot\rVert_{\ensuremath {H^1(\Omega)} }$.
\end{enumerate}
\end{lemma}
\begin{proof}
The first four statements are from \cite[Section~2]{acar} and the last one follows
from $H^1(\Omega)\hookrightarrow W^{1,1}(\Omega)$, 4. and the Lipschitz continuity of $r\mapsto \sqrt{\delta+r^2}$. \qed
\end{proof}
\subsection{The solution operator of the state equation}
\begin{lemma} \label{lem:solutionoperator}
For every $u\in H^{-1}(\Omega)$ the operator equation $Ay=u$ in
\eqref{eq:ocintro} has a unique solution $y=y(u)\in\VO$. The solution operator
\begin{equation*}
S \colon H^{-1}(\Omega) \rightarrow \VO, \quad
u \mapsto y(u)
\end{equation*}
is linear, continuous, and bijective. In particular, $S$ is $L^s$-$L^2$ continuous.
Moreover, for given $q\in (1,\infty)$ there is a constant $C>0$ such that
\begin{equation*}
\lVert Su \rVert_{W^{2,q}(\Omega)} \leq C \lVert u \rVert_{L^q(\Omega)}
\end{equation*}
is satisfied for all $u\in L^q(\Omega)$.
\end{lemma}
\begin{proof}
Except for the estimate all statements follow from the Lax-Milgram theorem.
The estimate is a consequence of \cite[Lemma 2.4.2.1, Theorem 2.4.2.5]{grisvard}.
\qed
\end{proof}
\begin{remark}\label{rem_feasiblesetocnonempty}
From $\ensuremath {\text{BV}(\Omega)} \hookrightarrow L^s(\Omega)\hookrightarrow H^{-1}(\Omega)$ and Lemma~\ref{lem:solutionoperator} we obtain that \eqref{eq:ocintro} has a nonempty feasible set.
\end{remark}
\section{The solutions of original and regularized problems} \label{sec:origandregulproblems}
In this section we prove the existence of solutions for \eqref{eq:ocintro} and the associated regularized problems,
characterize the solutions by optimality conditions, and show their convergence in appropriate function spaces.
\subsection{The original problem: Existence of solutions and optimality conditions} \label{sec:reducedproblem}
To establish the existence of a solution for \eqref{eq:ocintro} we use the \emph{reduced problem}
\begin{equation*} \tag{ROC} \label{eq:redProblem}
\min_{u\in\ensuremath {\text{BV}(\Omega)} } \; \underbrace{\frac{1}{2} \lVert Su-y_\Omega \rVert^2_{L^2(\Omega)} + \beta \lvert u \rvert_{\ensuremath {\text{BV}(\Omega)} }}_{=:j(u)}.
\end{equation*}
\begin{lemma} \label{lem:uregjconvex}\label{lem:uregjcont}
The function $j:\ensuremath {\text{BV}(\Omega)} \rightarrow\mathds{R}$ is well-defined, strictly convex, continuous with respect to $d_{BV,s}$, and coercive with respect to
$\norm[\ensuremath {\text{BV}(\Omega)} ]{\cdot}$.
\end{lemma}
\begin{proof}
The term $\frac{1}{2} \lVert Su-y_\Omega \rVert^2_{L^2(\Omega)}$ is well-defined by Remark~\ref{rem_feasiblesetocnonempty} and strictly convex in $u$ due to the injectivity of $S$.
Since $\lvert \cdot \rvert_{\ensuremath {\text{BV}(\Omega)} }$ is convex, the strict convexity of $j$ follows.
The continuity holds because $S$ is $L^s$-$L^2$ continuous.
The coercivity follows by virtue of \cite[Lemma~4.1]{acar}
using again that $S$ is injective and $L^s$-$L^2$ continuous.
\qed
\end{proof}
The strict convexity implies that $j$ has at most one (local=global) minimizer.
\begin{theorem}\label{thm_originalproblemhasuniqueglobalsolution}
The problem \eqref{eq:redProblem} has a unique solution $\bar u\in\ensuremath {\text{BV}(\Omega)} $.
\end{theorem}
\begin{proof}
The proof is included in the proof of Theorem~\ref{thm:solutions}. \qed
\end{proof}
As usual, the \emph{optimal state} $\bar y$ and the \emph{optimal adjoint state} $\bar p$ are given by
\begin{equation*}
\bar y:=S\bar u\in\VO\cap W^{2,r_N}(\Omega) \qquad\text{ and }\qquad \bar p := S^\ast(\bar y - y_\Omega),
\end{equation*}
where, due to $\ensuremath {\text{BV}(\Omega)} \hookrightarrow L^{\frac{N}{N-1}}(\Omega)$ and Lemma~\ref{lem:solutionoperator},
we have $r_N=\frac{N}{N-1}$ for $N\in\{2,3\}$, respectively, $r_N \geq 1$ arbitrarily large for $N=1$. Moreover, $S^\ast\colon H^{-1}(\Omega) \rightarrow \VO$ is the dual operator of $S$, where we have identified
the dual space of $H^{-1}(\Omega)$ with $\VO$ using reflexivity. Since $S^\ast=S$ and $\bar y-y_\Omega\in L^2(\Omega)$, Lemma~\ref{lem:solutionoperator} yields $\bar p\in P$ for
\begin{equation*}
P:=H^2(\Omega) \cap H_0^1(\Omega).
\end{equation*}
It is standard to show that $\bar p$ is the unique weak solution of
\begin{equation*}
\left\{
\begin{aligned}
\mathcal{A} p + c_0 p &= \bar y - y_\Omega && \text{ in }\Omega,\\
p &= 0 && \text{ on }\partial\Omega.
\end{aligned}\right.
\end{equation*}
\begin{remark}
The optimality conditions of \eqref{eq:redProblem} are only needed for the construction of the test problem with explicit solution in appendix~\ref{sec_examplewithexplicitsolution}, but not for the following analysis. They are deferred to appendix~\ref{sec_optcondorigprob}. We stress, however, that they allow to discuss the sparsity of $\nabla \bar u$, cf. Remark~\ref{rem_sparsityg}.
\end{remark}
\subsection{The regularized problems: Existence of solutions and optimality conditions} \label{sec:regprob}
Smoothing the BV seminorm and adding an $H^1$ regularization to $j$ yields
\begin{equation*} \tag{\mbox{ROC$_{\gamma,\delta}$}} \label{eq:regProblem}
\min_{u\in\ensuremath {\text{BV}(\Omega)} } \; \underbrace{\frac{1}{2} \mathds{N}orm[\ensuremath {L^2(\Omega)} ]{Su-y_\Omega}^2 + \beta \psi_\delta(u) + \frac{\gamma}{2}\mathds{N}orm[H^1(\Omega)]{u}^2}_{=:j_\ensuremath {{\gamma,\delta}} (u)},
\end{equation*}
where we set $j_\ensuremath {{\gamma,\delta}} (u):=+\infty$ for $u\in\ensuremath {\text{BV}(\Omega)} \setminus\ensuremath {H^1(\Omega)} $ if $\gamma>0$.
\begin{lemma} \label{thm:jconvex}
For any $\ensuremath {{\gamma,\delta}} \geq 0$ the function $j_\ensuremath {{\gamma,\delta}} :\ensuremath {\text{BV}(\Omega)} \rightarrow\mathds{R}\cup\{+\infty\}$ is well-defined, strictly convex, and coercive with respect to $\norm[\ensuremath {\text{BV}(\Omega)} ]{\cdot}$.
Moreover, the function $j_\ensuremath {{\gamma,\delta}} \vert_{\ensuremath {H^1(\Omega)} }$ is $\ensuremath {H^1(\Omega)} $ continuous for any $\ensuremath {{\gamma,\delta}} \geq 0$.
\end{lemma}
\begin{proof}
The well-definition and strict convexity of $j_\ensuremath {{\gamma,\delta}} $ follow similarly as for $j$ in Lemma~\ref{lem:uregjconvex}.
Using Lemma~\ref{lem:psidelta}~1. we find $j\leq j_\ensuremath {{\gamma,\delta}} $, so $j_\ensuremath {{\gamma,\delta}} $ inherits the coercivity from $j$.
The continuity follows term by term.
For the first term it is enough to recall from Lemma~\ref{lem:solutionoperator} the $L^2$-$L^2$ continuity of $S$.
The second term is Lipschitz in $H^1$ by Lemma~\ref{lem:psidelta}. The continuity of the third term is obvious. \qed
\end{proof}
We obtain existence of unique and global solutions.
\begin{theorem} \label{thm:solutions}
For any $\ensuremath {{\gamma,\delta}} \geq 0$, \eqref{eq:regProblem} has a unique solution $\bar u_\ensuremath {{\gamma,\delta}} \in \ensuremath {\text{BV}(\Omega)} $.
For $\gamma>0$ we have $\bar u_\ensuremath {{\gamma,\delta}} \in H^1(\Omega)$.
\end{theorem}
\begin{proof}
Since $j_\ensuremath {{\gamma,\delta}} $ is strictly convex by Lemma~\ref{thm:jconvex}, there is at most one minimizer.
For $\gamma>0$ the existence of $\bar u_{\ensuremath {{\gamma,\delta}} }\in \ensuremath {H^1(\Omega)} $ follows from standard arguments since $j_\ensuremath {{\gamma,\delta}} \vert_{H^1(\Omega)}$ is strongly convex
and $H^1$ continuous by Lemma~\ref{thm:jconvex}.
For $\gamma=0$ and any $\delta\geq 0$, the existence of a minimizer follows from \cite[Theorem~4.1]{acar} by use of the injectivity and boundedness of $S:H^{-1}(\Omega)\rightarrow Y\hookrightarrow L^2(\Omega)$. While $\Omega$ is convex in \cite{acar}, \cite[Theorem~4.1]{acar} remains true without this assumption.
\qed
\end{proof}
\emph{Optimal state} $\bar y_\ensuremath {{\gamma,\delta}} $ and \emph{optimal adjoint state} $\bar p_\ensuremath {{\gamma,\delta}} $
for \eqref{eq:regProblem} are given by
\begin{equation*}
\bar y_\ensuremath {{\gamma,\delta}} := S \bar u_\ensuremath {{\gamma,\delta}} \in \VO\cap W^{2,r_N}(\Omega) \qquad\text{ and }\qquad
\bar p_\ensuremath {{\gamma,\delta}} := S^\ast \bigl( \bar y_\ensuremath {{\gamma,\delta}} - y_\Omega \bigr)\in P,
\end{equation*}
where $r_N=\frac{N}{N-1}$ for $N\in\{2,3\}$, respectively, $r_N\geq 1$ arbitrarily large for $N=1$.
In particular, $\bar p_\ensuremath {{\gamma,\delta}} $ is the unique weak solution of
\begin{equation*}
\left\{
\begin{aligned}
\mathcal{A} p + c_0 p &= \bar y_\ensuremath {{\gamma,\delta}} - y_\Omega & &\text{ in }\Omega,\\
p &= 0 & &\text{ on }\partial\Omega.
\end{aligned}
\right.
\end{equation*}
The optimality conditions of \eqref{eq:regProblem} are based on differentiability of $j_\ensuremath {{\gamma,\delta}} $.
\begin{lemma} \label{lem:jderivatives}
For $\ensuremath {{\gamma,\delta}} >0$ the functional $j_{\ensuremath {{\gamma,\delta}} }:\ensuremath{ \ensuremath {H^1(\Omega)} }\rightarrow\mathds{R}$ is
\text{Fr{\'e}chet } differentiable.
Its first derivative is given by
\begin{equation*}
j^\prime_\ensuremath {{\gamma,\delta}} (u)v = \left( S^\ast(Su-y_\Omega), v \right)_{L^2(\Omega)} + \beta \psi_\delta^\prime(u)v + \gamma (u, v)_{H^1(\Omega)} \qquad\forall v\in \ensuremath{ \ensuremath {H^1(\Omega)} },
\end{equation*}
where
\begin{equation*}
\psi_\delta^\prime(u) v = \int_\Omega \frac{(\nabla u, \nabla v)}{\sqrt{ \delta + |\nabla u|^2} } \ensuremath {\,\mathrm{d}x} \qquad\forall v\in \ensuremath{ \ensuremath {H^1(\Omega)} }.
\end{equation*}
\end{lemma}
\begin{proof}
It suffices to argue for $\psi_\delta$, which we do in Lemma~\ref{thm:psiderivativeNEU} in appendix~\ref{sec_differentiabilityofpsidelta}. The other terms are standard. \qed
\end{proof}
For differentiable convex functions a vanishing derivative is necessary and sufficient for a global minimizer.
This yields the following optimality conditions.
\begin{theorem} \label{thm:optcond}
For $\ensuremath {{\gamma,\delta}} > 0$ the control $\bar u_\ensuremath {{\gamma,\delta}} \in \ensuremath{ \ensuremath {H^1(\Omega)} }$ is the solution of \eqref{eq:regProblem} iff
\begin{equation*}
j_\ensuremath {{\gamma,\delta}} ^\prime(\bar u_\ensuremath {{\gamma,\delta}} )v = 0 \qquad \forall v \in \ensuremath{ \ensuremath {H^1(\Omega)} },
\end{equation*}
which is the nonlinear Neumann problem
\begin{equation} \label{eq:optpde}
\gamma (\bar u_\ensuremath {{\gamma,\delta}} , v)_{H^1(\Omega)} + \beta \int_\Omega \frac{\left( \nabla \bar u_\ensuremath {{\gamma,\delta}} , \nabla v \right) }{ \sqrt{ \delta + | \nabla \bar u_\ensuremath {{\gamma,\delta}} |^2 } }\ensuremath {\,\mathrm{d}x} = -(\bar p_\ensuremath {{\gamma,\delta}} , v)_{L^2(\Omega)} \qquad \forall v\in \ensuremath{ \ensuremath {H^1(\Omega)} },
\end{equation}
where $\bar p_\ensuremath {{\gamma,\delta}} = S^\ast ( S \bar u_\ensuremath {{\gamma,\delta}} - y_\Omega)$.
\end{theorem}
\subsection{Convergence of the path of solutions} \label{sec:regconvergence}
We prove that $(\bar u_\ensuremath {{\gamma,\delta}} $, $\bar y_\ensuremath {{\gamma,\delta}} $,$\bar p_\ensuremath {{\gamma,\delta}} )$ converges to
$(\bar u,\bar y,\bar p)$ for $\ensuremath {{\gamma,\delta}} \to 0$. As a first step we show convergence of the objective values.
\begin{lemma} \label{lem:vcont00}
We have
\begin{equation*}
j_\ensuremath {{\gamma,\delta}} (\bar u_\ensuremath {{\gamma,\delta}} ) \xrightarrow{ \mathds{R}_{\geq 0}^2 \ni \ensuremath {{\gamma,\delta}} br\rightarrow (0,0)} j(\bar u).
\end{equation*}
\end{lemma}
\begin{proof}
Let $\epsilon>0$ and let $(\ensuremath {{\gamma,\delta}} brk)_{k\in\mathds{N}} \subset \mathds{R}_{\geq 0}^2$ converge to $(0,0)$. There holds
\begin{equation*}
0 \leq j_\ensuremath {{\gamma,\delta}} k(\bar u_\ensuremath {{\gamma,\delta}} k) - j(\bar u) = \bigl[ j_\ensuremath {{\gamma,\delta}} k(\bar u_\ensuremath {{\gamma,\delta}} k) - j_{\gamma_k,0}(\bar u_{\gamma_k,0}) \bigr] + \bigl[ j_{\gamma_k,0}(\bar u_{\gamma_k,0}) - j(\bar u) \bigr],
\end{equation*}
where we used $j(\bar u) \leq j(\bar u_\ensuremath {{\gamma,\delta}} k) \leq j_\ensuremath {{\gamma,\delta}} k(\bar u_\ensuremath {{\gamma,\delta}} k)$. The first term in brackets satisfies
\begin{align*}
j_\ensuremath {{\gamma,\delta}} k(\bar u_\ensuremath {{\gamma,\delta}} k) - j_{\gamma_k, 0}(\bar u_{\gamma_k,0}) & \leq j_{\gamma_k, \delta_k}(\bar u_{\gamma_k, 0}) - j_{\gamma_k, 0}(\bar u_{\gamma_k,0})\\
& = \beta \psi_{\delta_k}(\bar u_{\gamma_k, 0}) - \beta |\bar u_{\gamma_k, 0}|_{\ensuremath {\text{BV}(\Omega)} } \leq \beta \sqrt{\delta_k} |\Omega|,
\end{align*}
where the last inequality follows from Lemma~\ref{lem:psidelta}.
For the second term in brackets we deduce from Lemma~\ref{lem:bvdensity} and the $d_{BV,s}$ continuity of $j$ established in Lemma~\ref{lem:uregjcont} that there is $u_\epsilon \in C^\infty(\bar\Omega)$ such that $|j(\bar u)-j(u_\epsilon)| < \epsilon$. This yields
\begin{equation*}
\begin{split}
j_{\gamma_k,0}(\bar u_{\gamma_k, 0}) - j(\bar u)
& \leq j_{\gamma_k,0}(u_\epsilon) - j(\bar u)\\
& = j(u_\epsilon) + \frac{\gamma_k}{2} ||u_\epsilon||^2_{H^1(\Omega)} - j(\bar u) \leq \epsilon + \frac{\gamma_k}{2} ||u_\epsilon||^2_{H^1(\Omega)}.
\end{split}
\end{equation*}
Putting the estimates for the two terms together shows
\begin{equation*}
|j_\ensuremath {{\gamma,\delta}} k(\bar u_\ensuremath {{\gamma,\delta}} k) - j(\bar u)| \leq \beta \sqrt{\delta_k} |\Omega| + \epsilon + \frac{\gamma_k}{2} \lVert u_\epsilon \rVert^2_{H^1(\Omega)}.
\end{equation*}
For $k\rightarrow\infty$ this implies the claim since $\epsilon>0$ was arbitrary and
\begin{equation*}
0 \leq \liminf_{k\rightarrow\infty} |j_\ensuremath {{\gamma,\delta}} k(\bar u_\ensuremath {{\gamma,\delta}} k) - j(\bar u)| \leq \limsup_{k\rightarrow\infty} |j_\ensuremath {{\gamma,\delta}} k(\bar u_\ensuremath {{\gamma,\delta}} k) - j(\bar u)| \leq \epsilon.
\end{equation*}
\qed
\end{proof}
We infer that the optimal controls $\bar u_{\ensuremath {{\gamma,\delta}} }$ converge to $\bar u$ in $L^r$ for suitable $r$.
\begin{lemma}\label{lem_Lsconvofregulcontrols}
For any $r\in[1,\frac{N}{N-1})$ we have $|| \bar u_\ensuremath {{\gamma,\delta}} - \bar u||_{L^r(\Omega)} \xrightarrow{\ensuremath {{\gamma,\delta}} br \rightarrow (0,0)} 0$.
\end{lemma}
\begin{proof}
Let $(\ensuremath {{\gamma,\delta}} brk)_{k\in\mathds{N}} \subset \mathds{R}_{\geq 0}^2$ converge to $(0,0)$. Let $C > 0$ be so large that $\gamma_k,\delta_k\leq C$ for all $k$. The optimality of $\bar u_\ensuremath {{\gamma,\delta}} k$ and Lemma~\ref{lem:psidelta} yield for each $k\in\mathds{N}$
\begin{equation*}
j(\bar u_\ensuremath {{\gamma,\delta}} k) \leq j_\ensuremath {{\gamma,\delta}} k(\bar u_\ensuremath {{\gamma,\delta}} k) \leq j_\ensuremath {{\gamma,\delta}} k(0) \leq j_{C,C}(0).
\end{equation*}
As $(j(\bar u_\ensuremath {{\gamma,\delta}} k))_{k\in\mathds{N}}$ is bounded, we obtain from Lemma~\ref{lem:uregjcont} that $(\norm[\ensuremath {\text{BV}(\Omega)} ]{\bar u_\ensuremath {{\gamma,\delta}} k})_{k\in\mathds{N}}$ is bounded, too.
The compact embedding of $\ensuremath {\text{BV}(\Omega)} $ into $L^{r}(\Omega)$, $r\in[1,\frac{N}{N-1})$ thus implies that there exists $\tilde u\in L^r(\Omega)$ such that a subsequence of $\left( \bar u_\ensuremath {{\gamma,\delta}} k \right)_{k\in\mathds{N}}$, denoted in the same way, converges to $\tilde u$ in $L^r(\Omega)$. It is therefore enough to show $\tilde u=\bar u$.
Since $j$ is lower semi-continuous in the $L^s$ topology, cf. Lemma~\ref{lem:psidelta} and continuity and convexity of the other terms,
we have
\begin{equation*}
j(\tilde u) \leq \liminf_{k\rightarrow\infty} j(\bar u_\ensuremath {{\gamma,\delta}} k) \leq \liminf_{k\rightarrow\infty} j_\ensuremath {{\gamma,\delta}} k(\bar u_\ensuremath {{\gamma,\delta}} k)
= j(\bar u),
\end{equation*}
where we used Lemma~\ref{lem:vcont00} to derive the equality.
This shows $\tilde u\in\ensuremath {\text{BV}(\Omega)} $, hence Theorem~\ref{thm_originalproblemhasuniqueglobalsolution} implies $\tilde u = \bar u$. \qed
\end{proof}
In fact, the convergence of $\bar u_{\ensuremath {{\gamma,\delta}} }$ to $\bar u$ is stronger.
\begin{theorem}\label{thm_convbaru}
For any $r\in[1,\frac{N}{N-1})$ we have $d_{BV,r}(\bar u_{\gamma,\delta}, \bar u) \xrightarrow{\ensuremath {{\gamma,\delta}} br\rightarrow (0,0)} 0.$
\end{theorem}
\begin{proof}
For any $\ensuremath {{\gamma,\delta}} \geq 0$ we have $j(\bar u) \leq j(\bar u_\ensuremath {{\gamma,\delta}} ) \leq j_\ensuremath {{\gamma,\delta}} (\bar u_\ensuremath {{\gamma,\delta}} )$, so Lemma~\ref{lem:vcont00} yields $\lim_{\ensuremath {{\gamma,\delta}} br\rightarrow (0,0)} j(\bar u_\ensuremath {{\gamma,\delta}} ) = j(\bar u)$. Furthermore, there holds
\begin{equation*}
\begin{split}
\beta \Bigl| |\bar u|_{\ensuremath {\text{BV}(\Omega)} } - |\bar u_\ensuremath {{\gamma,\delta}} |_{\ensuremath {\text{BV}(\Omega)} } \Bigr| & \leq \bigl| j(\bar u) - j(\bar u_\ensuremath {{\gamma,\delta}} ) \bigr|\\
& \quad + \frac12 \left| \lVert S\bar u-y_\Omega \rVert^2_{L^2(\Omega)} - \lVert S\bar u_\ensuremath {{\gamma,\delta}} - y_\Omega \rVert^2_{L^2(\Omega)} \right|.
\end{split}
\end{equation*}
By Lemma~\ref{lem_Lsconvofregulcontrols} and the continuity of $S$ from $L^s(\Omega)$ to $L^2(\Omega)$ we thus find
\begin{equation*}
|\bar u_\ensuremath {{\gamma,\delta}} |_{\ensuremath {\text{BV}(\Omega)} } \xrightarrow{\ensuremath {{\gamma,\delta}} br\rightarrow (0,0)} |\bar u|_{\ensuremath {\text{BV}(\Omega)} }.
\end{equation*}
Together with Lemma~\ref{lem_Lsconvofregulcontrols} this proves the claim. \qed
\end{proof}
We conclude this section with the convergence of $(\bar y_{\ensuremath {{\gamma,\delta}} }, \bar p_\ensuremath {{\gamma,\delta}} )$ to $(\bar y,\bar p)$.
\begin{theorem} \label{thm:barybarpconv}
For any $r\in [1,\frac{N}{N-1})$ and any $r^\prime\in[1,\infty)$ we have
\begin{equation*}
\lim_{\ensuremath {{\gamma,\delta}} br\rightarrow (0,0)} \lVert \bar y_\ensuremath {{\gamma,\delta}} - \bar y \rVert_{W^{2,r}(\Omega)} = \lim_{\ensuremath {{\gamma,\delta}} br\rightarrow (0,0)} \lVert \bar p_\ensuremath {{\gamma,\delta}} - \bar p \rVert_{W^{2,r^\prime}(\Omega)} = 0.
\end{equation*}
\end{theorem}
\begin{proof}
The continuity of $S$ from $L^q$ to $W^{2,q}$ for any $q\in(1,\infty)$,
see Lemma~\ref{lem:solutionoperator},
implies with Lemma~\ref{lem_Lsconvofregulcontrols} that $\lim_{\ensuremath {{\gamma,\delta}} br\rightarrow (0,0)} \lVert \bar y_\ensuremath {{\gamma,\delta}} - \bar y \rVert_{W^{2,r}(\Omega)} = 0$ for any $r\in[1,\frac{N}{N-1})$.
Since for any $r^\prime\in(1,\infty)$ there is $r\in [1,\frac{N}{N-1})$ such that $W^{2,r}(\Omega)\hookrightarrow L^{r^\prime}(\Omega)$ is satisfied, we can use the $L^{r^\prime}$-$W^{2,r^\prime}$ continuity of $S^\ast = S$ to find
$\lim_{\ensuremath {{\gamma,\delta}} br\rightarrow (0,0)} \lVert \bar p_\ensuremath {{\gamma,\delta}} - \bar p \rVert_{W^{2,r^\prime}(\Omega)}=\lim_{\ensuremath {{\gamma,\delta}} br\rightarrow (0,0)} \lVert S^\ast(\bar y_\ensuremath {{\gamma,\delta}} -\bar y) \rVert_{W^{2,r^\prime}(\Omega)}=0$.
\qed
\end{proof}
\begin{remark}
The results of section~\ref{sec:origandregulproblems} can also be established for nonsmooth domains $\Omega$, but
$\bar y,\bar p,\bar y_\ensuremath {{\gamma,\delta}} ,\bar p_\ensuremath {{\gamma,\delta}} $ may be less regular since $S$ may not provide the regularity stated in Lemma~\ref{lem:solutionoperator}.
A careful inspection reveals that only Theorem~\ref{thm:barybarpconv} has to be modified.
If, for instance, $\Omega\subset\mathds{R}^N$, $N\in\{2,3\}$, is
a bounded Lipschitz domain, then \cite[Theorem~3]{Savare98} implies
that Theorem~\ref{thm:barybarpconv} holds if $W^{2,r}$ and $W^{2,r^\prime}$ are both replaced by $H^r$, where $r\in[1,\frac32)$ is arbitrary.
If $\Omega$ is convex, then \cite[Theorem~3.2.1.2]{grisvard} further yields that $W^{2,r^\prime}$ can be replaced by $H^2$.
\end{remark}
\section{Differentiability of the adjoint-to-control mapping}\label{sec_regularity}
The main goal of this section is to show that the PDE
\begin{equation} \label{eq:quasilinearpde}
\left\{
\begin{aligned}
-\divg\left(\left[\gamma+\frac{\beta}{\sqrt{\delta+|\nabla u|^2}}\right]\nabla u\right)+\gamma u & = p &&\text{in }\Omega,\\
\left( \left[ \gamma + \frac{\beta}{\sqrt{\delta + |\nabla u|^2}} \right] \nabla u, \nu \right) & = 0 &&\text{on } \partial\Omega
\end{aligned}
\right.
\end{equation}
has a unique weak solution $u=u(p)\in C^{1,\alpha}(\Omega)$
for every right-hand side $p\in L^\infty(\Omega)$, and that $p\mapsto u(p)$ is Lipschitz continuously \text{Fr{\'e}chet } differentiable in any open ball, where the Lipschitz constant is independent of $\gamma$ and $\delta$ provided $\gamma>0$ and $\delta>0$ are bounded away from zero. This is accomplished in Theorem~\ref{thm:ullfd}.
Note that we suppress the dependency on $\ensuremath {{\gamma,\delta}} $ in $u=u(p;\ensuremath {{\gamma,\delta}} )$.
\begin{assumption}\label{ass6}
We are given constants $0 < \gamma_0 \leq \gamma^0$, $0 < \delta_0 \leq \delta^0$ and $b^0 > 0$.
We denote $I:=[\gamma_0,\gamma^0]\times [\delta_0, \delta^0]$ and write
$\ensuremath { \mathds{B} } \subset\ensuremath {L^\infty(\Omega)} $ for the open ball of radius $b^0>0$ centered at the origin in $\ensuremath {L^\infty(\Omega)} $.
\end{assumption}
We first show that $p\mapsto u(p)$ is well-defined and satisfies a Lipschitz estimate.
\begin{lemma}\label{lem_Hoeldercontinuityofregmeancurvatureequation}
Let Assumption~\ref{ass6} hold.
Then there exist $L>0$ and $\alpha\in (0,1)$ such that for each $(\gamma,\delta)\in I$ and all $p_1, p_2 \in \ensuremath { \mathds{B} } $ the PDE \eqref{eq:quasilinearpde} has
unique weak solutions $u_1=u_1(p_1)\in C^{1,\alpha}(\Omega)$
and $u_2 = u_2(p_2)\in C^{1,\alpha}(\Omega)$ that satisfy
\begin{equation*}
\mathds{N}orm[C^{1,\alpha}(\Omega)]{u_1-u_2} \leq L \mathds{N}orm[\ensuremath {L^\infty(\Omega)} ]{p_1-p_2}.
\end{equation*}
In particular, we have the stability estimate
\begin{equation*}
\mathds{N}orm[C^{1,\alpha}(\Omega)]{u_1} \leq L\mathds{N}orm[\ensuremath {L^\infty(\Omega)} ]{p_1}.
\end{equation*}
\end{lemma}
\begin{proof}
Unique existence and the first estimate are established in Theorem~\ref{thm:quasilinearls} in appendix~\ref{sec_HoeldercontinuityforquasilinPDEs}.
The second estimate follows from the first for $p_2=0$. \qed
\end{proof}
We introduce the function
\begin{equation*}
f:\mathds{R}^N \rightarrow\mathds{R}^N, \qquad f(v) := \beta \frac{v}{\sqrt{\delta+|v|^2}},
\end{equation*}
so that \eqref{eq:quasilinearpde} becomes
\begin{equation} \label{eq:quasilinearpdewithf}
- \divg \Bigl( \gamma \nabla u + f(\nabla u) \Bigr) + \gamma u = p \quad\text{ in } H^1(\Omega)^\ast.
\end{equation}
The following two results prove that the adjoint-to-control mapping is differentiable and has a locally Lipschitz continuous derivative whose Lipschitz constant is bounded on bounded sets uniformly in $I$.
\begin{theorem} \label{thm_PtoUfrechet}
Let Assumption~\ref{ass6} hold and let $\alpha \in (0,1)$ be the constant from Lemma~\ref{lem_Hoeldercontinuityofregmeancurvatureequation}.
For each $\ensuremath {{\gamma,\delta}} br\in I$ the mapping $\ensuremath { \mathds{B} } \ni p\mapsto u(p) \in C^{1,\alpha}(\Omega)$ is \text{Fr{\'e}chet } differentiable. Its derivative $z=u'(p)d\in C^{1,\alpha}(\Omega)$ in direction $d \in L^\infty(\Omega)$ is the unique weak solution of the linear PDE
\begin{equation} \label{eq:uprimedef}
\left\{
\begin{aligned}
- \divg \Biggl( \Bigl[\gamma I+ f^{\prime}\bigl(\nabla u(p)\bigr)\Bigr] \nabla z \Biggr) + \gamma z &= d && \text{ in }\Omega, \\
\Biggl( \Bigl[ \gamma I + f^\prime\bigl(\nabla u(p)\bigr) \Bigr] \nabla z, \nu \Biggr) & = 0 && \text{ on } \partial\Omega,
\end{aligned}\right.
\end{equation}
and there exists $C>0$ such that for all $\ensuremath {{\gamma,\delta}} br\in I$, all $p\in\ensuremath { \mathds{B} } $, and all $d\in L^\infty(\Omega)$ we have
\begin{equation*}
\mathds{N}orm[C^{1,\alpha}(\Omega)]{z} \leq C\mathds{N}orm[L^\infty(\Omega)]{d}.
\end{equation*}
\end{theorem}
\begin{proof}
Let $p\in\ensuremath { \mathds{B} } $ and $d\in \ensuremath {L^\infty(\Omega)} $ be such that $p+d \in\ensuremath { \mathds{B} } $.
From Lemma~\ref{lem_Hoeldercontinuityofregmeancurvatureequation} we obtain
$u(p) \in C^{1,\alpha}(\Omega)$ and $\norm[C^{1,\alpha}(\Omega)]{u(p)}\leq C\norm[L^\infty(\Omega)]{p}$, where $C$ is independent of $\gamma,\delta,p$. Combining this with Lemma~\ref{lem_hoeldercompositions} implies
\begin{equation} \label{eq:proof:hoeldercontinuitybeforeAestimate}
f^\prime(\nabla u(p)) = \frac{I}{\sqrt{\delta + |\nabla u(p)|^2}} - \frac{\nabla u(p) \nabla u(p)^T}{\bigl(\delta+|\nabla u(p)|^2\bigr)^\frac{3}{2}} \in C^{0,\alpha}(\Omega, \mathds{R}^{N\times N})
\end{equation}
and the estimate
$\norm[C^{0,\alpha}(\Omega)]{A}\leq a^0$ for $A:=\gamma I+f^\prime(\nabla u(p))$ with a constant $a^0$ that does not depend on $\gamma,\delta,p$.
Since $f'(v)$, $v\in\mathds{R}^N$, is the Hessian of the convex function $v\mapsto\sqrt{\delta+\lvert v\rvert^2}$, it is positive semi-definite.
It follows that Theorem~\ref{thm:linearregularity} is applicable. Thus, the PDE \eqref{eq:uprimedef} has a unique weak solution $z \in C^{1,\alpha}(\Omega)$ that satisfies the claimed estimate.
Concerning the \text{Fr{\'e}chet } differentiability we obtain for $r := u(p+d)-u(p)-z \in C^{1,\alpha}(\Omega)$
\begin{equation*}
\begin{split}
- & \divg \Biggl( \Bigl[\gamma I + f^\prime\bigl( u(p) \bigr)\Bigr] \nabla r \Biggr) + \gamma r \\
& = - \divg \Bigl( \gamma\nabla u(p+d) \Bigr) + \gamma u(p+d) + \divg \Bigl( \gamma \nabla u(p) \Bigr) - \gamma u(p) \\
& \enspace\;\; + \divg \Biggl( \Bigl[\gamma I + f^\prime\bigl(\nabla u(p)\bigr) \Bigr]\nabla z \Biggr) - \gamma z - \divg \Bigl( f^\prime\bigl(\nabla u(p)\bigr) w \Bigr) \\
& = \divg \Bigl( f\bigl(\nabla u(p+d) \bigr) - f\bigl(\nabla u(p)\bigr) - f^\prime\bigl(\nabla u(p)\bigr) w \Bigr),
\end{split}
\end{equation*}
where we set $w:=w(p,d):=\nabla u(p+d)-\nabla u(p)$. Theorem~\ref{thm:linearregularity} implies that there is $C>0$, independent of $d$,
such that
\begin{equation*}
\mathds{N}orm[C^{1,\alpha}(\Omega)]{r} \leq C \mathds{N}orm[C^{0,\alpha}(\Omega)]{f\bigl(\nabla u(p+d) \bigr) - f\bigl(\nabla u(p)\bigr) - f^\prime\bigl(u(p)\bigr) w}.
\end{equation*}
The expression in the norm on the right-hand side satisfies the following identity pointwise in $\Omega$
\begin{equation*}
\begin{split}
f & \bigl( \nabla u(p+d) \bigr) - f\bigl(\nabla u(p)\bigr) - f^\prime\bigl(\nabla u(p)\bigr) w \\
& = \left( \int_0^1 f^\prime\bigl( \nabla u(p) + t w \bigr) - f^\prime\bigl(\nabla u(p)\bigr) \ensuremath {\,\mathrm{d}t} \right) w \\
& = \left( \int_0^1 \int_0^1 f^{\prime\prime}\bigl( \nabla u(p) + \tau t w\bigr) \ensuremath {\,\mathrm{d}\tau} \, t \ensuremath {\,\mathrm{d}t}\right) [w,w].
\end{split}
\end{equation*}
Lemma~\ref{lem_hoeldercompositions} yields
\begin{equation*}
\mathds{N}orm[C^{1,\alpha}(\Omega)]{r}\leq C \int_0^1\int_0^1 \mathds{N}orm[C^{0,\alpha}(\Omega)]{f^{\prime\prime}\bigl( \nabla u(p) + \tau t w \bigr)} \ensuremath {\,\mathrm{d}\tau} \ensuremath {\,\mathrm{d}t} \,
\mathds{N}orm[C^{1,\alpha}(\Omega)]{u(p+d)-u(p)}^2.
\end{equation*}
As $f \in C^3(\mathds{R}^N,\mathds{R}^N)$ with bounded derivatives we have that $f^{\prime\prime}$ is Lipschitz continuous and bounded.
We infer from Lemma~\ref{lem_hoeldercompositions} and Lemma~\ref{lem_Hoeldercontinuityofregmeancurvatureequation} that
\begin{equation*}
\mathds{N}orm[C^{1,\alpha}(\Omega)]{r}\leq C \mathds{N}orm[L^\infty(\Omega)]{d}^2,
\end{equation*}
which shows $\norm[C^{1,\alpha}(\Omega)]{r} = o(\norm[L^\infty(\Omega)]{d})$ since $C$ is independent of $d$.
\qed
\end{proof}
\begin{theorem} \label{thm:ullfd}
Let Assumption~\ref{ass6} hold and let $\alpha \in (0,1)$ be the constant from Lemma~\ref{lem_Hoeldercontinuityofregmeancurvatureequation}.
Then the mapping $u^\prime: \ensuremath { \mathds{B} } \rightarrow \ensuremath {{\cal L}} (L^\infty(\Omega), C^{1,\alpha}(\Omega))$ is Lipschitz continuous and the Lipschitz constant does not depend on $\ensuremath {{\gamma,\delta}} br$, but only on $\Omega$, $N$, $\gamma_0$, $\gamma^0$, $\delta_0$, $\delta^0$ and $b^0$.
\end{theorem}
\begin{proof}
Let $p, q \in \ensuremath { \mathds{B} } $ and $d \in L^\infty(\Omega)$. Set
$z_p:=\nabla \bigl(u'(p)d\bigr)$ and $z_q:=\nabla \bigl(u'(q)d\bigr)$. Then
\begin{equation*}
- \divg \Bigl( \gamma \bigl[z_p - z_q \bigr] + f^\prime\bigl( \nabla u(p) \bigr) z_p - f^\prime\bigl(\nabla u(q)\bigr) z_q \Bigr) + \gamma \bigl[u^\prime(p)d - u^\prime(q) d\bigr] = 0
\end{equation*}
holds in $H^1(\Omega)^\ast$.
Thus, the difference $r := u^\prime(p)d - u^\prime(q) d$ satisfies
\begin{align*}
- \divg \bigl( \gamma \nabla r \bigr) + \gamma r & = \divg \Bigl( f^\prime\bigl( \nabla u(p) \bigr) z_p - f^\prime\bigl(\nabla u(q)\bigr) z_q \Bigr) \\
& = \divg \Bigl( f^\prime\bigl( \nabla u(p) \bigr) \nabla r \Bigl) + \divg \Bigl( \bigl[ f^\prime\bigl(\nabla u(p)\bigr) - f^\prime\bigl(\nabla u(q)\bigr) \bigr] z_q \Bigr),
\end{align*}
from which we infer that
\begin{equation*}
- \divg \Bigl( \bigl[\gamma I + f^\prime\bigl( \nabla u(p) \bigr) \bigr] \nabla r \Bigr) + \gamma r = \divg \Bigl( \bigl[ f^\prime\bigl(\nabla u(p)\bigr) - f^\prime\bigl(\nabla u(q)\bigr) \bigr] z_q \Bigr)
\end{equation*}
in $H^1(\Omega)^\ast$. By the same arguments as below \eqref{eq:proof:hoeldercontinuitybeforeAestimate},
$A := \gamma I + f^\prime\bigl( \nabla u(p) \bigr)$ satisfies $\norm[C^{0,\alpha}(\Omega)]{A} \leq a^0$ with a constant $a^0$ that does not depend on $\gamma,\delta,p,q$. Moreover, $A$ is elliptic with constant $\gamma_0$.
By Theorem~\ref{thm:linearregularity} this yields
\begin{equation*}
\mathds{N}orm[C^{1,\alpha}(\Omega)]{r}
\leq C \mathds{N}orm[C^{0,\alpha}(\Omega)]{\bigl[ f^\prime\bigl(\nabla u(p)\bigr) - f^\prime\bigl(\nabla u(q)\bigr) \bigr] z_q}.
\end{equation*}
Here, $C>0$ does not depend on $p,q$, but only on the desired quantities.
From Lemma~\ref{lem_hoeldercompositions} and Theorem~\ref{thm_PtoUfrechet} we infer that
\begin{equation*}
\mathds{N}orm[C^{1,\alpha}(\Omega)]{r}
\leq C \mathds{N}orm[C^{0,\alpha}(\Omega)]{f^\prime\bigl(\nabla u(p)\bigr) - f^\prime\bigl(\nabla u(q)\bigr)} \mathds{N}orm[L^\infty(\Omega)]{d}.
\end{equation*}
Lemma~\ref{lem_hoeldercompositions} and Lemma~\ref{lem_Hoeldercontinuityofregmeancurvatureequation} therefore imply
\begin{equation*}
\begin{split}
& \mathds{N}orm[\ensuremath {{\cal L}} ( L^\infty(\Omega), C^{1,\alpha}(\Omega) )]{u^\prime(p) - u^\prime(q)}\\
& \leq C \mathds{N}orm[C^{0,\alpha}(\Omega)]{\int_0^1 f^{\prime\prime}\Bigl( \nabla u(q) + t \bigl[ \nabla u(p) - \nabla u(q) \bigr]\Bigr) \ensuremath {\,\mathrm{d}t}}\mathds{N}orm[C^{0,\alpha}(\Omega)]{\nabla u(p) - \nabla u(q)}\\
& \leq C \int_0^1 \mathds{N}orm[C^{0,\alpha}(\Omega)]{f^{\prime\prime}\Bigl( \nabla u(q) + t \bigl[ \nabla u(p) - \nabla u(q) \bigr] \Bigr)}\ensuremath {\,\mathrm{d}t} \,
\mathds{N}orm[L^\infty(\Omega)]{p-q}.
\end{split}
\end{equation*}
The first factor is bounded since $f''$ is bounded and Lipschitz. This demonstrates the asserted Lipschitz continuity. \qed
\end{proof}
\begin{remark}
Theorem~\ref{thm:ullfd} stays valid, for some different $\alpha$, if $\Omega$ is of class $C^{1,\alpha^\prime}$ for some $\alpha^\prime>0$.
\end{remark}
\section{An inexact Newton method for the regularized problems} \label{sec:newton}
In this section we introduce the formulation of the optimality system of \eqref{eq:regProblem} on which our numerical method is based, and we show that the application of an inexact Newton method to this formulation is globally well-defined and locally convergent.
We use the following assumption.
\begin{assumption}\label{ass8}
We are given constants $0 < \gamma_0 \leq \gamma^0$, $0 < \delta_0 \leq \delta^0$ and $b^0 \geq 0$.
We denote $I:=[\gamma_0,\gamma^0]\times [\delta_0, \delta^0]$ and fix $\ensuremath {{\gamma,\delta}} br\in I$.
\end{assumption}
The optimality conditions from Theorem~\ref{thm:optcond} can be cast as $F(\bar y_\ensuremath {{\gamma,\delta}} ,\bar p_\ensuremath {{\gamma,\delta}} )=0$ for
\begin{equation} \label{eq:Fdefinition}
F: \VO\times P \rightarrow \VO^\ast \times L^2(\Omega), \qquad
F(y,p):=\begin{pmatrix}
Ay - u(-p) \\
y - y_\Omega - A^\ast p
\end{pmatrix},
\end{equation}
and the pair $(\bar y_\ensuremath {{\gamma,\delta}} ,\bar p_\ensuremath {{\gamma,\delta}} )$ is the unique root of $F$.
Note that we suppress the dependency of
$u=u(p;\ensuremath {{\gamma,\delta}} )$ and $F=F(y,p; \ensuremath {{\gamma,\delta}} )$ on $\ensuremath {{\gamma,\delta}} $.
By standard Sobolev embeddings we have $P\subset H^2(\Omega)\hookrightarrow L^\infty(\Omega)$,
hence $u(-p) \in C^{1,\alpha}(\Omega)$ for some $\alpha>0$ by Lemma~\ref{lem_Hoeldercontinuityofregmeancurvatureequation}, so
$F$ is well-defined. A Newton system with a somewhat similar structure is considered in \cite{Schiela_IPMefficient}.
To find the root of $F$ we apply the inexact Newton method Algorithm~\ref{alg:newton}.
\begin{algorithm2e}
\DontPrintSemicolon
\caption{An inexact Newton method for \eqref{eq:regProblem}}\label{alg:newton}
\KwIn{ $(y_0, p_0) \in \VO\times P$, $(\gamma,\delta) \in\mathds{R}pos^2$, $\eta\in[0,\infty)$ }
\For(){$k=0,1,2,\ldots$}
{
\lIf{$F(y_k,p_k)=0$}{set $(y^\ast,p^\ast):=(y_k,p_k)$; \textbf{stop}}
Compute $(\delta y_k, \delta p_k)$ such that $\norm{F(y_k, p_k)+F^\prime( y_k, p_k ) (\delta y_k, \delta p_k)}\leq \eta_k\norm{F(y_k, p_k)}$, where $\eta_k\in[0,\eta]$\\
Set $(y_{k+1}, p_{k+1}) = (y_k,p_k) + (\delta y_k, \delta p_k)$
}
\KwOut{ $(y^\ast,p^\ast)$}
\end{algorithm2e}
The remainder of this section is devoted to the convergence analysis for Algorithm~\ref{alg:newton}. A similar analysis can be carried out if the optimality system of \eqref{eq:regProblem} and the inexact Newton method are based on $u$ instead of $(y,p)$. However, in our numerical experiments the
path-following method based on \eqref{eq:Fdefinition}, cf. section~\ref{sec:numericalsolution},
was clearly superior to its counterpart based on $u$: The former could reduce $\ensuremath {{\gamma,\delta}} $ to much smaller values than the latter
and was also significantly more robust. Both observations are well in line with our previous experience \cite{kruse,Clason2018,Hafemeyer15,HafemeyerMaster,Hafemeyer2019} on PDE-constrained optimal control problems involving the TV seminorm.
Since the homotopy path $\ensuremath {{\gamma,\delta}} br\mapsto(\bar u_{\ensuremath {{\gamma,\delta}} },\bar y_{\ensuremath {{\gamma,\delta}} },\bar p_{\ensuremath {{\gamma,\delta}} })$ is not affected by the formulation of the optimality system,
we conjecture that the superior performance of path-following based on \eqref{eq:Fdefinition} is related to the fact that
$(\bar y_\ensuremath {{\gamma,\delta}} ,\bar p_\ensuremath {{\gamma,\delta}} )$ converges to $(\bar y,\bar p)$ in stronger norms than $\bar u_\ensuremath {{\gamma,\delta}} $ to $\bar u$, cf. Theorem~\ref{thm:barybarpconv}, respectively, Theorem~\ref{thm_convbaru}.
There are many works in which the control is considered as an implicit variable of some sort or avoided altogether, e.g., \cite{kruse,Clason2011,HintermKunisch,HintermStadler,Hinze05,HinzeTroe,NeitzelPruefertSlawig,PieperDiss,Schiela_IPMefficient,WeiserGaenzlerSchiela}.
Concerning the optimal triple $(\bar y,\bar p,\bar u)$ for \eqref{eq:ocintro}
we share with those works the idea to base the optimality system on the smoother variables $(y,p)$. In contrast to those works, however, \eqref{eq:Fdefinition} does neither improve the regularity of the controls that appear as iterates (in comparison to a formulation based on $u$) nor does it circumvent their use.
The next two lemmas yield convergence of Algorithm~\ref{alg:newton}.
\begin{lemma} \label{lem:LipschitzcontOfF}
Let Assumption~\ref{ass8} hold.
Then $F$ defined in \eqref{eq:Fdefinition} is locally Lipschitz continuously \text{Fr{\'e}chet } differentiable. Its derivative at $(y,p)\in Y\times P$ is given by
\begin{equation*}
F^\prime(y,p) \colon \VO\times P \rightarrow \VO^\ast\times L^2(\Omega), \qquad
(\delta y,\delta p) \mapsto \begin{pmatrix}
A & u^\prime(-p) \\
I & -A^\ast
\end{pmatrix} \begin{pmatrix}
\delta y\\ \delta p
\end{pmatrix}.
\end{equation*}
\end{lemma}
\begin{proof}
Only $p\mapsto u(-p)$ is nonlinear, so the claims follow from Theorem~\ref{thm:ullfd}. \qed
\end{proof}
\begin{lemma} \label{lem:Fprimeinvertible}
Let Assumption~\ref{ass8} hold. Then $F^\prime(y,p)$ is invertible for all $(y,p) \in \VO\times P$.
\end{lemma}
\begin{proof}
The proof consists of two parts. First we show that $F^\prime(y, p)$ is injective and second that it is a Fredholm operator of index $0$, see \cite[Chapter~IV, Section~5]{Kato1966}. These two facts imply the bijectivity of $F^\prime(y, p)$.
For the injectivity let $(\delta y, \delta p) \in \VO\times P$ with $F^\prime(y,p)(\delta y, \delta p) = 0 \in \VO^\ast \times L^2(\Omega)$, i.e.
\begin{align} \label{eq:proof:Fprimeinverse1}
0 = A\delta y+ u^\prime(-p)\delta p \in \VO^\ast \qquad\text{ and }\qquad 0 = \delta y - A^\ast \delta p \in L^2(\Omega),
\end{align}
and therefore
\begin{equation*}
\mathds{N}orm[L^2(\Omega)]{\delta y}^2 = (A^\ast \delta p, \delta y)_{L^2(\Omega)} = - (u^\prime(-p) \delta p, \delta p)_{L^2(\Omega)}.
\end{equation*}
The representation of $z:=u^\prime(-p) \delta p$ from Theorem~\ref{thm_PtoUfrechet} yields
\begin{equation} \label{eq:proof:ellipofuprime}
\begin{split}
-\mathds{N}orm[L^2(\Omega)]{\delta y}^2
& = \Biggl(\Bigl[ \gamma I + f^\prime\bigl( \nabla u(-p) \bigr) \Bigr] \nabla z, \nabla z \Biggr)_{L^2(\Omega)}
+ \gamma \bigl( z, z \bigr)_{L^2(\Omega)}\\
& \geq \Bigl( f^\prime\bigl( \nabla u(-p) \bigr) \nabla z, \nabla z \Bigr)_{L^2(\Omega)}.
\end{split}
\end{equation}
Since $f^{\prime}$ is positive semi-definite, we find $\norm[L^2(\Omega)]{\delta y}^2 \leq 0$. This shows $\delta y = 0$.
By \eqref{eq:proof:Fprimeinverse1} this yields $A^\ast \delta p = 0$ in $L^2(\Omega)$, hence $\delta p = 0$, which proves the injectivity.\\
To apply Fredholm theory we decompose $F^\prime(y,p)$ into the two operators
\begin{equation*}
F^\prime(y,p) = \begin{pmatrix}
A & 0 \\ 0 & - A^\ast
\end{pmatrix}
+
\begin{pmatrix}
0 & u^\prime(-p) \\ I & 0
\end{pmatrix}.
\end{equation*}
We want to use \cite[Chapter~IV, Theorem~5.26]{Kato1966}, which states: If the first operator is a Fredholm operator of index $0$ and the second operator is compact with respect to the first operator (see \cite[Chapter~IV, Introduction to Section~3]{Kato1966}), then their sum $F^\prime(y,p)$ is also a Fredholm operator of index $0$. By the injectivity of $F^\prime(y,p)$ this implies its bijectivity.
The operators $A:\VO\rightarrow \VO^\ast$ and $A^\ast: P \rightarrow L^2(\Omega)$ are invertible
by Lemma~\ref{lem:solutionoperator}, and thus
\begin{equation*}
\VO\times P\rightarrow \VO^\ast \times L^2(\Omega), \qquad
(\delta y, \delta p) \mapsto \begin{pmatrix} A & 0 \\ 0 & - A^\ast
\end{pmatrix} \begin{pmatrix}
\delta y\\ \delta p
\end{pmatrix}
\end{equation*}
is invertible and in particular a Fredholm operator of index $0$.
It remains to show that
\begin{equation*}
\VO\times P\rightarrow \VO^\ast \times L^2(\Omega), \qquad
(\delta y, \delta p) \mapsto \begin{pmatrix}
0 & u^\prime(-p) \\ I & 0
\end{pmatrix} \begin{pmatrix}
\delta y\\ \delta p
\end{pmatrix}
\end{equation*}
is compact with respect to the first operator.
Thus, we have to establish that for any sequence $( (\delta y_n, \delta p_n) )_{n\in\mathds{N}} \subset\VO\times P$ such that there exists a $C>0$ with
\begin{align} \label{eq:proof:Fprimeinverse2}
\Bigl( \mathds{N}orm[\VO]{\delta y_n} + \mathds{N}orm[P]{\delta p_n} \Bigr) + \left( \mathds{N}orm[\VO^\ast]{A \delta y_n} + \mathds{N}orm[L^2(\Omega)]{A^\ast \delta p_n} \right) \leq C \qquad \forall n\in\mathds{N},
\end{align}
the sequence $\bigl( ( u^\prime(-p) \delta p_n, \delta y_n ) \bigr)_{n\in\mathds{N}} \subset \VO^\ast \times L^2(\Omega)$ contains a convergent subsequence. By \eqref{eq:proof:Fprimeinverse2} we have that $( \norm[\VO]{\delta y_n} )_{n\in\mathds{N}}$ is bounded.
The compact embedding $\VO \hookrightarrow\hookrightarrow L^2(\Omega)$ therefore implies the existence of a point $\hat y \in L^2(\Omega)$ and a subsequence, denoted in the same way, such that $\norm[L^2(\Omega)]{\delta y_n - \hat y}\to 0$.
We also have that $( \norm[P]{\delta p_n} )_{n\in\mathds{N}}$ is bounded. In particular $\norm[L^\infty(\Omega)]{\delta p_n} \leq b^0$ for all $n\in\mathds{N}$ for some $b^0>0$. By Theorem~\ref{thm_PtoUfrechet} this implies that $( u^\prime(-p) \delta p_n )_{n\in\mathds{N}}$ is bounded in $C^{1,\alpha}(\Omega)$.
Since $C^{1,\alpha}(\Omega)\hookrightarrow\hookrightarrow \VO^\ast$,
the proof is complete. \qed
\end{proof}
\begin{remark}
Lemma~\ref{lem:Fprimeinvertible} implies that Algorithm~\ref{alg:newton} is globally well-defined.
\end{remark}
It is well-known that the properties established in Lemma~\ref{lem:LipschitzcontOfF} and Lemma~\ref{lem:Fprimeinvertible} are sufficient for local linear/q-superlinear/q-quadratic convergence of the inexact Newton method if the residual in iteration $k$ is of appropriate order, e.g. \cite[Theorem~6.1.4]{Kelley_Itmethodseq}. Thus, we obtain the following result.
\begin{theorem}\label{thm_convinexNewtoninfdim}
Let Assumption~\ref{ass8} hold.
If $(y_0,p_0)\in\VO\times P$ is sufficiently close to $(\bar y_{\ensuremath {{\gamma,\delta}} },\bar p_{\ensuremath {{\gamma,\delta}} })$, then Algorithm~\ref{alg:newton} either terminates after finitely many iterations with output $(y^\ast,p^\ast)=(\bar y_{\ensuremath {{\gamma,\delta}} },\bar p_{\ensuremath {{\gamma,\delta}} })$ or it generates a sequence $((y_k,p_k))_{k\in\mathds{N}}$ that converges
to $(\bar y_{\ensuremath {{\gamma,\delta}} },\bar p_{\ensuremath {{\gamma,\delta}} })$. The convergence rate is
r-linear if $\eta<1$, q-linear if $\eta$ is sufficiently small,
q-superlinear if $\eta_k\to 0$, and of q-order $1+\omega$ if $\eta_k=O(\norm{F(y_k,p_k)}^\omega)$. Here, $\omega\in(0,1]$ is arbitrary; for $\omega=1$ this means q-quadratic convergence.
\end{theorem}
\begin{remark}
Lemma~\ref{lem_Hoeldercontinuityofregmeancurvatureequation} shows that
convergence of $(p_k)_{k\in\mathds{N}}$ (with a certain rate) implies convergence of $(u(p_k))_{k\in\mathds{N}}$ in $C^{1,\alpha}(\Omega)$ (with a related rate).
\end{remark}
\section{Finite Element approximation} \label{sec:FEapproximation}
In this section we provide a discretization scheme for \eqref{eq:regProblem}
and prove its convergence. Throughout, we work with a fixed pair $\ensuremath {{\gamma,\delta}} br\in\mathds{R}pos^2$.
\subsection{Discretization} \label{sec:femdiscr}
We use Finite Elements for the discretization of \eqref{eq:regProblem}. Control, state and adjoint state are discretized by piecewise linear and globally continuous elements on a triangular grid. We point out that discretizing the control by piecewise constant Finite Elements
will not ensure convergence to the optimal control $\bar u_{\ensuremath {{\gamma,\delta}} }$, in general; cf. \cite[Section~4]{Bartels2012}.
For all $h\in(0,h_0]$ and a suitable $h_0>0$ let $\mathcal{T}_h$ denote a collection of open triangular cells $T \subset\Omega$ with $h = \max_{T\in\mathcal{T}_h} \operatorname{diam}(T)$. We write $\Omega_h := \operatorname{int}( \cup_{T\in\mathcal{T}_h} \bar T )$.
We assume that there are constants $C>0$ and $c > \frac{1}{2}$ such that
\begin{equation} \label{eq:boundarydist}
\max_{x\in\partial\Omega_h}\operatorname{dist}( x, \partial\Omega ) \leq Ch^c, \qquad |\Omega\setminus\Omega_h| \xrightarrow{h\rightarrow 0} 0, \qquad |\partial\Omega_h| \leq C
\end{equation}
for all $h\in(0,h_0]$.
We further assume $(\mathcal{T}_h)_{h\in(0,h_0]}$ to be quasi-uniform and $\Omega_h \subset \Omega_{h^\prime}$ for $h^\prime \leq h$. The assumptions in \eqref{eq:boundarydist} are rather mild and in part implied if, for example, $\Omega$ and $(\Omega_h)_{h>0}$ are a family of uniform Lipschitz domains, cf. \cite[Sections 4.1.2 \& 4.1.3]{Hafemeyer2020}.
We utilize the function spaces
\begin{equation*}
V_h := \left\lbrace v_h\in C(\bar\Omega_h): v_h|_T \text{ is affine linear } \forall \,T\in\mathcal{T}_h \right\rbrace, \quad \ensuremath { Y_h } := V_h \cap H^1_0(\Omega_h).
\end{equation*}
Because $V_h\hookrightarrow H^1(\Omega_h)$ it follows that $\ensuremath { Y_h } $ contains precisely those functions of $V_h$ that vanish on $\partial\Omega_h$.
We use the standard nodal basis $\varphi_1, \varphi_2, \dots, \varphi_{\dim(\ensuremath { V_h } )}$ in $\ensuremath { V_h } $ and assume that it is ordered in such a way that $\varphi_1, \varphi_2, \dots, \varphi_{\dim( \ensuremath { Y_h } )}$ is a basis of $\ensuremath { Y_h } $.
For every $u\in L^2(\Omega_h)$ there is a unique $y_h\in\ensuremath { Y_h } $ that satisfies
\begin{equation*}
\int_{\Omega_h} \left(\sum_{i,j=1}^N a_{ij}\partial_i y_h \partial_{j}\varphi_h\right) + c_0 y_h \varphi_h \ensuremath {\,\mathrm{d}x}
= \int_{\Omega_h} u \varphi_h \ensuremath {\,\mathrm{d}x} \qquad\forall\varphi_h\in\ensuremath { Y_h }
\end{equation*}
and by defining $S_h u:=y_h$ we obtain the discrete solution operator
$S_h: L^2(\Omega_h) \rightarrow \ensuremath { Y_h } $ to the PDE in \eqref{eq:ocintro}.
The discretized version of \eqref{eq:regProblem} is given by
\begin{equation*} \tag{\mbox{ROC$_{\gamma,\delta,h}$}} \label{eq:regProblemdisc}
\min_{u\in \ensuremath { V_h } } \; \underbrace{\frac{1}{2} \mathds{N}orm[L^2(\Omega_h)]{S_h u-y_{\Omega}}^2 + \beta \psi_{\delta,h}(u) + \frac{\gamma}{2}\mathds{N}orm[H^1(\Omega_h)]{u}^2}_{=:j_\ensuremath {{\gamma,\delta}} h(u)},
\end{equation*}
where $\psi_{\delta,h}:H^1(\Omega_h)\rightarrow\mathds{R}$ is defined in the same way as $\psi_\delta$, but with $\Omega$ replaced by $\Omega_h$.
By standard arguments this problem has a unique optimal solution $\bar u_\ensuremath {{\gamma,\delta}} h$.
Based on $\bar u_\ensuremath {{\gamma,\delta}} h$ we define
$\bar y_\ensuremath {{\gamma,\delta}} h:=S_h\bar u_\ensuremath {{\gamma,\delta}} h$ and $\bar p_\ensuremath {{\gamma,\delta}} h:=S_h^\ast(S_h\bar u_\ensuremath {{\gamma,\delta}} h-{y_{\Omega}})$.
For $h\to 0$ the triple $(\bar u_\ensuremath {{\gamma,\delta}} h,\bar y_\ensuremath {{\gamma,\delta}} h, \bar p_\ensuremath {{\gamma,\delta}} h)$ converges to the continuous optimal triple $(\bar u_\ensuremath {{\gamma,\delta}} ,\bar y_\ensuremath {{\gamma,\delta}} ,\bar p_\ensuremath {{\gamma,\delta}} )$ in an appropriate sense, as we show next.
\subsection{Convergence} \label{sec:disconv}
In this section we prove convergence of the Finite Element approximation.
We will tacitly use that extension-by-zero yields for each $v\in\ensuremath { Y_h } \subset H^1_0(\Omega_h)$ a function in $H^1_0(\Omega)$.
Also, we need the following density result.
\begin{lemma} \label{lem:pseudointerpol}
Let \eqref{eq:boundarydist} hold.
For each $\varphi\in C_0^\infty(\Omega)$ there exists a sequence $(\varphi_h)_{h > 0}$ with $\varphi_h\in Y_h$ for all $h$ such that $\lim_{h\to 0^+} \norm[H^1(\Omega_h)]{\varphi_h-\varphi} = 0$.
\end{lemma}
\begin{proof}
Let $\varphi\in C_0^\infty(\Omega)$.
Due to \eqref{eq:boundarydist} we have $\operatorname{supp}(\varphi)\subset \Omega_h$ for all sufficiently small $h$.
The claim then follows by choosing for $\varphi_h$ the nodal interpolant of $\varphi$ since $\lim_{h\to 0^+}\norm[H^1(\Omega_h)]{\varphi_h-\varphi}=0$ for this choice, see \cite[Theorem 1.103]{Ern2004}.
\qed
\end{proof}
\begin{theorem}\label{thm_FEMconvergenceinLtwo}
Let \eqref{eq:boundarydist} hold.
We have
\begin{equation*}
\lim_{h\to 0^+}\mathds{N}orm[L^2(\Omega)^3]{\left(\bar u_\ensuremath {{\gamma,\delta}} h,\bar y_\ensuremath {{\gamma,\delta}} h,\bar p_\ensuremath {{\gamma,\delta}} h\right) - \left(\bar u_\ensuremath {{\gamma,\delta}} ,\bar y_\ensuremath {{\gamma,\delta}} ,\bar p_\ensuremath {{\gamma,\delta}} \right)} = 0,
\end{equation*}
where $\bar u_\ensuremath {{\gamma,\delta}} h$, $\bar y_\ensuremath {{\gamma,\delta}} h$ and $\bar p_\ensuremath {{\gamma,\delta}} h$ are extended by zero to $\Omega$.
\end{theorem}
\begin{proof}
For ease of notation we do not change indices in this proof when passing to subsequences.
Let $(h_n)_{n\in\mathds{N}}$ be a zero sequence, without loss of generality monotonically decreasing.
From $j_\ensuremath {{\gamma,\delta}} n(\bar u_\ensuremath {{\gamma,\delta}} n)\leq j_\ensuremath {{\gamma,\delta}} n(0)\leq j_\ensuremath {{\gamma,\delta}} (0)$ it follows that there is a constant $C>0$, independent of $n$, such that $\lVert \bar u_\ensuremath {{\gamma,\delta}} n \rVert_{H^1(\Omega_{h_n})} \leq C$. This implies
$\lVert \bar y_\ensuremath {{\gamma,\delta}} n \rVert_{H^1(\Omega_{h_n})} \leq C$.
Using extension by zero we find that $\lVert \bar y_\ensuremath {{\gamma,\delta}} n \rVert_{H^1(\Omega)} \leq C$ for some $C$ that is still independent of $n$.
From the compact embedding of $H^1_0(\Omega)$ into $L^2(\Omega)$ and the reflexivity of $H^1_0(\Omega)$ we obtain a subsequence and a $\hat y \in H^1_0(\Omega)$ such that $\bar y_\ensuremath {{\gamma,\delta}} n \xrightarrow{n\rightarrow\infty} \hat y$ strongly in $L^2(\Omega)$ and weakly in $H_0^1(\Omega)$.
Extending $\bar u_\ensuremath {{\gamma,\delta}} n$ by $0$ to $\Omega$ and using the reflexivity of $L^2(\Omega)$ we obtain on a subsequence that $\bar u_\ensuremath {{\gamma,\delta}} n
\xrightarrow{n\rightarrow\infty} \hat u$ weakly in $L^2(\Omega)$ for some $\hat u\in L^2(\Omega)$.
Let $\varphi\in C_0^\infty(\Omega)$ and $(\varphi_{h_n})_{n\in\mathbb{N}}$ be defined as in Lemma~\ref{lem:pseudointerpol}. Extending $\varphi_{h_n}$ by zero we have
\begin{equation*}
0 = A( \bar y_\ensuremath {{\gamma,\delta}} n) \varphi_{h_n} - (\bar u_\ensuremath {{\gamma,\delta}} n, \varphi_{h_n} )_{L^2(\Omega_{h_n})} \xrightarrow{n\rightarrow\infty} A( \hat y ) \varphi - (\hat u, \varphi )_{L^2(\Omega)}.
\end{equation*}
Thus $\hat y = S\hat u$ by the density of $C_0^\infty(\Omega)$ in $H^1_0(\Omega)$. Analogous arguments show that the adjoints $\bar p_\ensuremath {{\gamma,\delta}} n$ converge in the same way to some $\hat p\in H_0^1(\Omega)$ with $\hat p = S^\ast(\hat y-y_\Omega)$.
It therefore suffices to prove that $\hat u=\bar u_\ensuremath {{\gamma,\delta}} $,
i.e., that $\hat u$ minimizes $j_\ensuremath {{\gamma,\delta}} $, and that $\bar u_\ensuremath {{\gamma,\delta}} n \xrightarrow{n\rightarrow\infty} \hat u$ strongly in $L^2(\Omega)$.
Let $u\in H^1(\Omega) \cap C^\infty(\Omega)$ and denote by $I_h u \in H^1(\Omega_h)$ the usual nodal interpolant. Then it is well-known, e.g. \cite[Theorem 1.103]{Ern2004}, that $\lVert u-I_{h_n}u \rVert_{H^1(\Omega_{h_n})} \xrightarrow{n\rightarrow\infty} 0$.
Let $\hat n\in\mathds{N}$ and $n\geq \hat n$. Using $\Omega_{h_{\hat n}} \subset \Omega_{h_n}$ and the optimality of $\bar u_\ensuremath {{\gamma,\delta}} n$ we find
\begin{equation*}
J_{\ensuremath {{\gamma,\delta}} nhat}(\bar y_\ensuremath {{\gamma,\delta}} n,\bar u_\ensuremath {{\gamma,\delta}} n)\leq j_\ensuremath {{\gamma,\delta}} n(\bar u_\ensuremath {{\gamma,\delta}} n) \leq j_\ensuremath {{\gamma,\delta}} n ( I_{h_n} u ),
\end{equation*}
where $J_{\ensuremath {{\gamma,\delta}} nhat}:L^2(\Omega_{h_{\hat n}})\times H^1(\Omega_{h_{\hat n}})\rightarrow\mathds{R}$ is given by
\begin{equation*}
J_{\ensuremath {{\gamma,\delta}} nhat}(v,w) := \frac{1}{2} \norm[L^2(\Omega_{h_{\hat n}})]{v-y_\Omega}^2 + \beta \psi_{\delta,h_{\hat n}}(w) + \frac{\gamma}{2}\mathds{N}orm[H^1(\Omega_{h_{\hat n}})]{w}^2.
\end{equation*}
By $\lVert u-I_{h_n}u \rVert_{H^1(\Omega_{h_n})} \xrightarrow{n\rightarrow\infty} 0$
we obtain
\begin{equation*}
\limsup_{n\rightarrow\infty} \, J_\ensuremath {{\gamma,\delta}} nhat(\bar y_\ensuremath {{\gamma,\delta}} n,\bar u_\ensuremath {{\gamma,\delta}} n) \leq j_\ensuremath {{\gamma,\delta}} (u).
\end{equation*}
From $\lVert \bar u_\ensuremath {{\gamma,\delta}} n \rVert_{H^1(\Omega_{h_{\hat n}})}
\leq \lVert \bar u_\ensuremath {{\gamma,\delta}} n \rVert_{H^1(\Omega_{h_n})} \leq C$ we infer that there exists $\tilde u\in H^1(\Omega_{h_{\hat n}})$ such that $\bar u_\ensuremath {{\gamma,\delta}} n \xrightarrow{n\rightarrow\infty} \tilde u$ weakly in $H^1(\Omega_{h_{\hat n}})$ and strongly in $L^2(\Omega_{h_{\hat n}})$.
On the other hand, we have $\bar u_\ensuremath {{\gamma,\delta}} n \xrightarrow{n\rightarrow\infty} \hat u$ weakly in $L^2(\Omega_{h_{\hat n}})$, so $\hat u\in H^1(\Omega_{h_{\hat n}})$ and the convergence $\bar u_\ensuremath {{\gamma,\delta}} n \xrightarrow{n\rightarrow\infty} \hat u$ holds strongly in $L^2(\Omega_{h_{\hat n}})$ and weakly in
$H^1(\Omega_{h_{\hat n}})$.
The semi-continuity properties of $\psi_{\delta,h_{\hat n}}$ and $\norm[H^1(\Omega_{h_{\hat n}})]{\cdot}$ together with
the fact that $\bar y_\ensuremath {{\gamma,\delta}} n \xrightarrow{n\rightarrow\infty} \hat y$ strongly in $L^2(\Omega_{h_{\hat n}})$
imply that
\begin{equation*}
J_{\ensuremath {{\gamma,\delta}} nhat}(\hat y,\hat u)
\leq \liminf_{n\rightarrow\infty} \, J_{\ensuremath {{\gamma,\delta}} nhat}(\bar y_\ensuremath {{\gamma,\delta}} n,\bar u_\ensuremath {{\gamma,\delta}} n)
\leq j_\ensuremath {{\gamma,\delta}} (u).
\end{equation*}
Because of $|\Omega\setminus\Omega_{h_{\hat n}}| \xrightarrow{\hat n\rightarrow\infty} 0$ we can infer by dominated convergence for $\hat n\rightarrow\infty$ that
$\bar u_\ensuremath {{\gamma,\delta}} n \xrightarrow{n\rightarrow\infty} \hat u$ strongly in $L^2(\Omega)$
and that $j_\ensuremath {{\gamma,\delta}} (\hat u)\leq j_\ensuremath {{\gamma,\delta}} (u)$ for all $u\in H^1(\Omega) \cap C^\infty(\Omega)$.
By density, this implies that $\hat u$ is the minimizer of $j_\ensuremath {{\gamma,\delta}} $, thereby concluding the proof. \qed
\end{proof}
\begin{corollary}
Let \eqref{eq:boundarydist} hold. We have
\begin{equation*}
\lim_{h\to 0^+}\mathds{N}orm[H^1(\Omega)^2]{\left(\bar y_\ensuremath {{\gamma,\delta}} h,\bar p_\ensuremath {{\gamma,\delta}} h\right) - \left(\bar y_\ensuremath {{\gamma,\delta}} ,\bar p_\ensuremath {{\gamma,\delta}} \right)} = 0,
\end{equation*}
where $\bar y_\ensuremath {{\gamma,\delta}} h$ and $\bar p_\ensuremath {{\gamma,\delta}} h$ are extended by zero to $\Omega$.
\end{corollary}
\begin{proof}
Let $R_h \bar y_\ensuremath {{\gamma,\delta}} \in Y_h$ denote the Ritz projection with respect to $A$. Extending $\bar y_\ensuremath {{\gamma,\delta}} h\in Y_h$ and $R_h \bar y_\ensuremath {{\gamma,\delta}} $ by zero to $\Omega$ we clearly have
\begin{equation*}
\lVert \bar y_\ensuremath {{\gamma,\delta}} h - \bar y_\ensuremath {{\gamma,\delta}} \rVert_{H^1(\Omega)} \leq \lVert \bar y_\ensuremath {{\gamma,\delta}} h - R_h \bar y_\ensuremath {{\gamma,\delta}} \rVert_{H^1(\Omega_h)} + \lVert R_h \bar y_\ensuremath {{\gamma,\delta}} - \bar y_\ensuremath {{\gamma,\delta}} \rVert_{H^1(\Omega)}.
\end{equation*}
By definition, $\bar y_\ensuremath {{\gamma,\delta}} h-R_h \bar y_\ensuremath {{\gamma,\delta}} $ satisfies
\begin{equation*}
A(\bar y_\ensuremath {{\gamma,\delta}} h-R_h \bar y_\ensuremath {{\gamma,\delta}} ) (\varphi_h) = (\bar u_\ensuremath {{\gamma,\delta}} h-\bar u_\ensuremath {{\gamma,\delta}} , \varphi_h)_{L^2(\Omega_h)} \qquad \forall\varphi_h\in \ensuremath { Y_h } .
\end{equation*}
Thus, choosing $\varphi_h = \bar y_\ensuremath {{\gamma,\delta}} h-R_h \bar y_\ensuremath {{\gamma,\delta}} $ and using the ellipticity of $ \ensuremath{{\cal{A}}} $ and $c_0\geq 0$ in $\Omega$ together with the Poincar\'e inequality in $\Omega$ yields a constant $C>0$, independent of $h$, such that
$\lVert \bar y_\ensuremath {{\gamma,\delta}} h-R_h \bar y_\ensuremath {{\gamma,\delta}} \rVert_{H^1(\Omega)} \leq C \lVert \bar u_\ensuremath {{\gamma,\delta}} h-\bar u_\ensuremath {{\gamma,\delta}} \rVert_{L^2(\Omega)} \xrightarrow{h\rightarrow 0^+} 0$, where we also used extension by zero and Theorem~\ref{thm_FEMconvergenceinLtwo}. Since $R_h \bar y_\ensuremath {{\gamma,\delta}} \xrightarrow{h\rightarrow 0^+} \bar y_\ensuremath {{\gamma,\delta}} $ in $\ensuremath { Y } $, the $H^1(\Omega)$ convergence $\bar y_\ensuremath {{\gamma,\delta}} h\xrightarrow{h\rightarrow 0^+}\bar y_\ensuremath {{\gamma,\delta}} $ follows. The proof for $\bar p_\ensuremath {{\gamma,\delta}} h-\bar p_\ensuremath {{\gamma,\delta}} $ is analogous. \qed
\end{proof}
\section{Numerical solution} \label{sec:numericalsolution}
Based on the Finite Element approximation from section~\ref{sec:FEapproximation} we now study an inexact Newton method to compute the discrete solution $(\bar y_{\ensuremath {{\gamma,\delta}} h},\bar p_{\ensuremath {{\gamma,\delta}} h},\bar u_{\ensuremath {{\gamma,\delta}} h})$ and we embed it into a path-following method.
\subsection{A preconditioned inexact Newton method for the discrete problems}
We prove local convergence of an inexact Newton method when applied to a discretized version of \eqref{eq:Fdefinition} for fixed $\ensuremath {{\gamma,\delta}} br\in\mathds{R}pos^2$. To this end, let us introduce the discrete adjoint-to-control mapping $u_h$.
We recall that the constant $h_0>0$ is introduced at the beginning of section~\ref{sec:femdiscr}.
The proof of the following result is similar to the continuous case in Theorems~\ref{thm_PtoUfrechet}, \ref{thm:ullfd} and \ref{thm:quasilinearls}, so we omit it.
\begin{lemma}\label{lem_Lipdiffofuh}
Let $h\in(0,h_0]$.
For every $p\in L^2(\Omega_h)$ there exists a unique $u_h=u_h(p)\in\ensuremath { V_h } $ that satisfies the following discrete version of \eqref{eq:quasilinearpde}
\begin{equation} \label{eq:discretemeancurve}
\begin{split}
\Bigl( \gamma \nabla u_h + f\bigl(\nabla u_h\bigr), \nabla \varphi_h \Bigr)_{L^2(\Omega_h)} + \gamma\bigl( u_h, \varphi_h \bigr)_{L^2(\Omega_h)} = (p, \varphi_h)_{L^2(\Omega_h)}
\quad \forall \varphi_h\in\ensuremath { V_h } .
\end{split}
\end{equation}
The associated solution operator $u_h: L^2(\Omega_h) \rightarrow \ensuremath { V_h } $
is Lipschitz continuously \text{Fr{\'e}chet } differentiable. Its derivative $u_h^\prime(p)\in\ensuremath {{\cal L}} (L^2(\Omega_h),\ensuremath { V_h } )$ at $p\in L^2(\Omega_h)$ in direction $d\in L^2(\Omega_h)$ is given by $z_h = u_h^\prime(p) d \in \ensuremath { V_h } $, where $z_h$ is the unique solution to
\begin{equation}\label{eq:discretemeancurveder}
\begin{split}
\Biggl( \Bigl[\gamma I + f^\prime\bigl(\nabla u_h(p)\bigr)\Bigr] \nabla z_h, \nabla \varphi_h \Biggr)_{L^2(\Omega_h)} + \gamma \bigl(z_h, \varphi_h\bigr)_{L^2(\Omega_h)} & = (d, \varphi_h)_{L^2(\Omega_h)} \\ &\qquad \forall \varphi_h\in\ensuremath { V_h } .
\end{split}
\end{equation}
\end{lemma}
With $u_h$ at hand we can discretize \eqref{eq:Fdefinition} by
\begin{equation*}
F_h : \ensuremath { Y_h } \times \ensuremath { Y_h } \rightarrow \ensuremath { Y_h } ast \times \ensuremath { Y_h } ast, \qquad F_h(y, p) := \begin{pmatrix}
A y - u_h(-p) \\
y-y_\Omega - A^\ast p
\end{pmatrix}.
\end{equation*}
The same $F_h$ is obtained if we consider the optimality conditions of \eqref{eq:regProblemdisc} and express them in terms of $(y,p)$. Moreover, $(\bar y_\ensuremath {{\gamma,\delta}} h,\bar p_\ensuremath {{\gamma,\delta}} h)$ is the unique root of $F_h$ and
the properties of $F$ from Lemma~\ref{lem:LipschitzcontOfF} and Lemma~\ref{lem:Fprimeinvertible} carry over to $F_h$.
\begin{lemma} \label{lem:discpropofFprime}
Let $h\in(0,h_0]$.
The map $F_h: \ensuremath { Y_h } \times \ensuremath { Y_h } \rightarrow \ensuremath { Y_h } ast \times \ensuremath { Y_h } ast$ is Lipschitz continuously \text{Fr{\'e}chet } differentiable. Its derivative at $(y,p) \in \ensuremath { Y_h } \times \ensuremath { Y_h } $ is given by
\begin{equation*}
F_h^\prime(y,p) \colon \ensuremath { Y_h } \times \ensuremath { Y_h } \rightarrow \ensuremath { Y_h } ast \times \ensuremath { Y_h } ast, \qquad
(\delta y,\delta p) \mapsto \begin{pmatrix}
A & u_h^\prime(-p) \\
I & -A^\ast
\end{pmatrix} \begin{pmatrix}
\delta y\\ \delta p
\end{pmatrix}.
\end{equation*}
Moreover, $F_h^\prime(y, p)$ is invertible for every $(y,p)\in\ensuremath { Y_h } \times\ensuremath { Y_h } $.
\end{lemma}
\begin{proof}
The regularity follows from Lemma~\ref{lem_Lipdiffofuh}.
Since $\dim( \ensuremath { Y_h } \times \ensuremath { Y_h } ) = \dim( \ensuremath { Y_h } ast \times \ensuremath { Y_h } ast )$, it is sufficient to show that $F_h^\prime(y,p)$ is injective. This can be done exactly as in Lemma~\ref{lem:Fprimeinvertible}. \qed
\end{proof}
Similar to Theorem~\ref{thm_convinexNewtoninfdim} we have the following result.
\begin{theorem} \label{thm:discnewton}
Let $h\in(0,h_0]$ and $\eta\in[0,\infty)$.
Then there is a neighborhood $N\subset \ensuremath { Y_h } \times\ensuremath { Y_h } $ of $(\bar y_{\ensuremath {{\gamma,\delta}} h},\bar p_{\ensuremath {{\gamma,\delta}} h})$
such that for any $(y_0,p_0)\in N$ any sequence $((y_k,p_k))_{k\in\mathds{N}}$ that is generated according to
$(y_{k+1},p_{k+1})=(y_k,p_k) + (\delta y_k,\delta p_k)$, where
$(\delta y_k,\delta p_k)\in\ensuremath { Y_h } \times \ensuremath { Y_h } $ satisfies for all $k\geq 0$
\begin{equation*}
\mathds{N}orm{F_h (y_k,p_k) + F_h^\prime(y_k,p_k)(\delta y_k,\delta p_k) }\leq \eta_k \mathds{N}orm{F_h (y_k,p_k)}
\end{equation*}
with $(\eta_k)\subset[0,\eta]$, converges to $(\bar y_{\ensuremath {{\gamma,\delta}} h},\bar p_{\ensuremath {{\gamma,\delta}} h})$. The convergence is
r-linear if $\eta<1$, q-linear if $\eta$ is sufficiently small,
q-superlinear if $\eta_k\to 0$, and of q-order $1+\omega$ if
$\eta_k=O(\norm{F_h(y_k,p_k)}^\omega)$.
Here, $\omega\in(0,1]$ is arbitrary.
\end{theorem}
As a preconditioner for the fully discrete Newton system based on
\begin{equation}\label{eq_defPrecond}
F_h^\prime(y,p)
= \begin{pmatrix}
\mathbf{A} & \mathbf{u_h^\prime(-p)} \\ \mathbf{M} & -\mathbf{A}^T
\end{pmatrix} \quad\text{ we use }\quad
\ensuremath{{\cal{P}}} ^{-1} :=
\begin{pmatrix}
\mathbf{B}&\mathbf{0}\\ \mathbf{B}^T \mathbf{M} \mathbf{B} & -\mathbf{B}^T
\end{pmatrix},
\end{equation}
where $\mathbf{B}=\mathbf{A}^{-1}$ is the inverse of the stiffness matrix $\mathbf{A}$, and $\mathbf{M}$ is the mass matrix.
The preconditioner would agree with $F_h^\prime(y,p)^{-1}$ if $\mathbf{u_h^\prime(-p)}$ were zero.
\subsection{A practical path-following method} \label{sec:fullydiscalg}
The following Algorithm~\ref{alg:pfdisc} is a practical path-following inexact Newton method to solve \eqref{eq:regProblemdisc}.
We expect that its global convergence can be shown if $\rho(\gamma_i,\delta_i)$ and $\eta_k$ are chosen sufficiently small,
but this topic is left for future research. For the choices detailed below, global convergence holds in practice.
\begin{algorithm2e}
\DontPrintSemicolon
\caption{Inexact path-following inexact Newton method \label{alg:pfdisc}}
\KwIn{
$(\hat y_0, \hat p_0) \in \ensuremath { Y_h } \times\ensuremath { Y_h } $, $(\gamma_0,\delta_0) \in \mathds{R}pos^2$, $\kappa>0$ }
\For(){$i=0,1,2,\ldots$}
{
set $(y_0, p_0) := (\hat y_i, \hat p_i)$ \\
\For(\label{line_forloopNewton}){$k=0,1,2,\ldots$}
{
\If(\label{line_Newtontermination}){$\norm{F_h(y_k,p_k)}\leq \rho(\gamma_i,\delta_i)$}
{
set $(\hat y_{i+1}, \hat p_{i+1}):=(y_k,p_k)$\\
\textbf{go to }line~\ref{line_endofl} }\label{line_endoft}
choose $\eta_k>0$ and use preconditioned \textsc{gmres} to determine $(\delta y_k, \delta p_k)$ such that
$\norm{F_h(y_k, p_k) + F_h^\prime( y_k, p_k ) (\delta y_k, \delta p_k)}\leq \eta_k\norm{F_h(y_k,p_k)}$
\label{line_inexacttermination}
\\
\textbf{call}\, Algorithm~\ref{alg:ls}, input $w_k:=(y_k, p_k)$, $\delta w_k:=(\delta y_k,\delta p_k)$;
output: $\lambda_k$\\
set $(y_{k+1}, p_{k+1}) := (y_k,p_k) + \lambda_k(\delta y_k, \delta p_k)$
}\label{line_endofk}
select $\sigma_i\in(0,1)$\label{line_endofl}\\
\lIf{$\norm[H^1]{(\hat y_{j+1},\beta^{-1}\hat p_{j+1})-(\hat y_j,\beta^{-1}\hat p_j)}\leq (1-\sigma_j)\kappa\norm[H^1]{(\hat y_{i+1},\beta^{-1}\hat p_{i+1})}$ \rm{for} $j=i$ and $j=i-1$}{terminate with output $(y^\ast,p^\ast):=(\hat y_{i+1},\hat p_{i+1})$ \label{line_pftermination}}
\lElse{set $(\gamma_{i+1},\delta_{i+1}):= (\sigma_i\gamma_i,\sigma_i \delta_i)$\label{line_paramupdate}}
}
\KwOut{$(y^\ast,p^\ast)$}
\end{algorithm2e}
The inner loop in lines~\ref{line_forloopNewton}--\ref{line_endofk} of Algorithm~\ref{alg:pfdisc}
uses an inexact Newton method to compute an approximation $(\hat y_{i+1}, \hat p_{i+1})$ of the root of $F_h$ for fixed $(\gamma_i,\delta_i)$
satisfying $\norm{F_h(\hat y_{i+1},\hat p_{i+1})}\leq \rho(\gamma_i,\delta_i)$, where $\rho:\mathds{R}pos^2\rightarrow\mathds{R}pos$.
In the implementation we use $\rho(\gamma,\delta) = \max\{10^{-6},\gamma\}$, which may be viewed as inexact path-following. For the forcing term $\eta_k$ we use the two choices $\eta_k=\bar\eta_k:=10^{-6}$ and $\eta_k=\hat\eta_k:=\max\{10^{-6},\min\{10^{-k-1},\sqrt{\delta_i}\}\}$, where $k=k(i)$. For $\bar\eta_k$ we have $\bar\eta_k\leq \norm{F_h(y_k,p_k)}$ since we terminate the inner loop if $\norm{F_h(y_k,p_k)} < 10^{-6}$.
The choice $\eta_k=\bar\eta_k$ is related to quadratic convergence, while $\eta_k=\hat \eta_k$ corresponds to superlinear convergence, cf. Theorem~\ref{thm:discnewton}.
We also terminate \textsc{gmres } if the Euclidean norm of $F_h(y_k, p_k) + F_h^\prime( y_k, p_k ) (\delta y_k, \delta p_k)$ drops below $\eta_k$
since this seemed beneficial in the numerical experiments.
To compute the control $u_h(-p_k)$ that satisfies \eqref{eq:discretemeancurve} we use a globalized Newton method
that can be shown to converge q-quadratically for arbitrary starting points $u^0\in\ensuremath { V_h } $.
In fact, since \eqref{eq:discretemeancurve} is equivalent to $u_h(p)$ being the minimizer of the smooth and strongly convex problem
\begin{equation*}
\min_{v_h\in V_h} \, \beta\psi_{\delta,h}(v_h)+\frac{\gamma}{2}\lVert v_h\rVert_{H^1(\Omega_h)}^2 - \left(p,v_h\right)_{L^2(\Omega_h)},
\end{equation*}
standard globalization techniques for Newton's method will ensure these convergence properties (e.g., Newton's method combined with an Armijo line search \cite[Section~9.5.3]{BoydVandenberghe}).
The method terminates when the Newton residual falls below a threshold that decreases with $(\gamma_i,\delta_i)$. The linear systems are solved using SciPy's sparse direct solver \texttt{spsolve}. As an alternative we tested a preconditioned conjugate gradients method (PCG). The results were mixed: The use of PCG diminished the total runtime of Algorithm~\ref{alg:pfdisc} if all went well, but broke down on several occasions for smaller values of $(\gamma_i,\delta_i)$.
In lines~\ref{line_endofl}--\ref{line_paramupdate} it is decided whether to accept $(\hat y_{i+1},\hat p_{i+1})$ as a solution and terminate the algorithm; if not, then we continue the path-following by updating $(\gamma_i,\delta_i)$ with the factor $\sigma_i$.
We select $\sigma_i$ based on the number of Newton steps that are needed to compute the implicit controls $\{u_h(-p_k)\}_k$ in outer iteration $i$. If this number surpasses a predefined $m\in\mathds{N}$, then we choose $\sigma_i>\sigma_{i-1}$. If it belongs to $[0,0.75 m]$, then we choose $\sigma_i<\sigma_{i-1}$. Otherwise, we let $\sigma_i=\sigma_{i-1}$. In addition, we enforce $\sigma_i\geq 0.25$ for all $i$ since we found in the numerical experiments
that choosing $\sigma_i$ too small can slow down or prevent convergence in some cases once $(\gamma_i,\delta_i)$ is very small, cf. Table~\ref{tab_ex1_sigma} below.
The weighing $1/\beta$ in the termination criterion is made since the amplitude of the adjoint state is roughly of order $\beta$ in comparison to the state.
In all experiments we use $\kappa=10^{-3}$.
Algorithm~\ref{alg:ls} augments the inexact Newton method in lines~\ref{line_forloopNewton}--\ref{line_endofk} of Algorithm~\ref{alg:pfdisc} by a non-monotone line search globalization introduced in \cite{LiFukushima2000} for Broyden's method. The non-monotonicity allows to always accept the inexact Newton step and yields potentially larger step sizes than descent-based strategies. The intention is to keep the number of trial step sizes low since every trial step size requires the evaluation of $F_h$ and hence a recomputation of $u_h(-p_k)$. In the numerical experiments we use $\tau=10^{-4}$ and we observe that in the vast majority of iterations full steps are taken, i.e., $\lambda_k=1$.
To briefly discuss convergence properties of the globalized inexact Newton method, let us assume for simplicity that $u_h(-p_k)$ is determined exactly for each $k$.
By following the arguments of \cite{LiFukushima2000} we can show that for sufficiently small $\eta_k$ the sequence $((y_k,p_k))_{k\in\mathds{N}}$ obtained by ignoring lines~\ref{line_Newtontermination}--\ref{line_endoft} must either be unbounded or converge to the unique root of $F_h$; a key ingredient in the corresponding proof is that $F_h'(y,p)$ is invertible for all $(y,p)$, which we have demonstrated in Lemma~\ref{lem:discpropofFprime}. In particular, if $((y_k,p_k))_{k\in\mathds{N}}$ is bounded, then the globalized inexact Newton method in lines~\ref{line_forloopNewton}--\ref{line_endofk} terminates finitely.
While we always observed this termination in practice, the question whether $((y_k,p_k))_{k\in\mathds{N}}$ can be unbounded remains open.
Furthermore, as in \cite{LiFukushima2000} it follows that if $((y_k,p_k))_{k\in\mathds{N}}$ converges, then eventually step size 1 is always accepted, in turn ensuring that the convergence rates of Theorem~\ref{thm:discnewton} apply.
All norms without index in Algorithm~\ref{alg:pfdisc} and \ref{alg:ls} are $L^2(\Omega_h)$ norms.
\begin{algorithm2e}
\DontPrintSemicolon
\caption{Computation of step size}\label{alg:ls}
\KwInofupalg{ $(w_k,\delta w_k)$,
\, $\tau>0$ }\\
\For{ $l=0,1,2,\ldots$ }
{
\lIf{$\norm{F_h(w_k+2^{-l} \delta w_k)}\leq \Bigl(1+\frac{1}{(l+1)^2}\Bigr)\norm{F_h(w_k)} - \tau\norm{2^{-l} \delta w_k}^2$}{set $\lambda_k:=2^{-l}$; \textbf{stop}}\label{alg2_line_residualcheck2}
}
\KwOut{ $\lambda_k$ }
\end{algorithm2e}
\section{Numerical results} \label{sec:numericmainchapter}
We provide numerical results for two examples. Our main goal is to illustrate that Algorithm~\ref{alg:pfdisc}
can robustly compute accurate solutions of \eqref{eq:ocintro}.
The results are obtained from a Python implementation of Algorithm~\ref{alg:pfdisc} using DOLFIN \cite{LoggWells2010a,LoggWellsEtAl2012a},
which is part of FEniCS \cite{AlnaesBlechta2015a,LoggMardalEtAl2012a}.
The code for the second example is available at \url{https://arxiv.org/abs/2010.11628}.
\subsection{Example~1: An example with explicit solution}
The first example has an explicit solution and satisfies the assumptions used in this work.
We consider \eqref{eq:ocintro} for an arbitrary $\beta>0$ with non-convex $C^\infty$ domain $\Omega = B_{4\pi}(0) \setminus \overline{B_{2\pi}(0)}$ in $\mathds{R}^2$,
$ \ensuremath{{\cal{A}}} =-\Delta$ and $c_0\equiv 0$.
The desired state is
\begin{equation*}
y_\Omega(r) = \frac{\beta}{2r^3}\Bigl((1+r)\sin(r) - 1 - (2r^2-1)\cos(r)\Bigr) + \bar y
\end{equation*}
where $r(x,y)=\sqrt{x^2+y^2}$, and the optimal state $\bar y$ is
\begin{equation*}
\bar y(r) = \begin{cases}
- \frac{r^2}{4} + A\ln(r/(4\pi)) + B & \text{ if } r\in (2\pi,3\pi),\\
C \ln (r/(4\pi)) & \text{ if } r\in (3\pi,4\pi)
\end{cases}
\end{equation*}
with constants $A,B,C$ whose values are contained in appendix~\ref{sec_examplewithexplicitsolution}.
The optimal control is
\begin{equation*}
\bar u(r)= 1_{(2\pi,3\pi)}(r),
\end{equation*}
i.e., $\bar u$ has value $1$ on the disc $B_{3\pi}(0) \setminus \overline{B_{2\pi}(0)}$ and value $0$ on the disc $B_{4\pi}(0) \setminus {B_{3\pi}(0)}$.
The optimal value is $j(\bar u)\approx 24.85 \beta^2 + 59.22 \beta$.
In appendix~\ref{sec_examplewithexplicitsolution} we provide details on the construction of this example and verify that
$(\bar y,\bar u)$ is indeed the optimal solution of \eqref{eq:ocintro}.
If not stated otherwise, we use $\beta=10^{-3}$.
We use unstructured triangulations that approximate $\partial\Omega$ increasingly better as the meshes become finer, cf. \eqref{eq:boundarydist}.
Figure~\ref{fig_ex1} depicts the optimal control $\bar u_h$, optimal state $\bar y_h$ and negative optimal adjoint state $-\bar p_h$, which were computed by Algorithm~\ref{alg:pfdisc} on a grid with 1553207 degrees of freedom (DOF).
\begin{figure}
\caption{$\bar u_h$}
\caption{$\bar u_h$ with circles of radii $j\pi$, $j\in\{2,3,4\}
\caption{$\bar y_h$}
\caption{$-\bar p_h$}
\caption{Numerically computed optimal solutions for Example~1}
\label{fig_ex1}
\end{figure}
We begin by studying convergence on several grids. We use the fixed ratio $(\gamma_i/\delta_i)\equiv 10^2$ and apply Algorithm~\ref{alg:pfdisc} with $(\gamma_0,\delta_0)=(1,0.01)$ and $(\hat y_0,\hat p_0)=(0,0)$.
Table~\ref{tab_ex1_meshind} shows \#it, which represents the total number of inexact Newton steps for $(y,p)$, and \#it$_u$, which is the total number of Newton steps used to compute the implicit function $u$.
Table~\ref{tab_ex1_meshind} also contains the errors
\begin{equation*}
\ensuremath{{\cal{E}}} _j := \bigl\lvert j_{\gamma_\mathrm{final},\delta_\mathrm{final},h}-\bar j \bigr\rvert, \qquad
\ensuremath{{\cal{E}}} _u := \mathds{N}orm[L^1(\Omega_\ast)]{\hat u_\mathrm{final}-\bar u}, \qquad
\end{equation*}
as well as
\begin{equation*}
\ensuremath{{\cal{E}}} _y := \mathds{N}orm[H^1(\Omega_\ast)]{\hat y_\mathrm{final}-\bar y}, \qquad
\ensuremath{{\cal{E}}} _p := \mathds{N}orm[H^1(\Omega_\ast)]{\hat p_\mathrm{final}-\bar p}.
\end{equation*}
where $\Omega_\ast$ represents a reference grid with $\mathrm{DOF}=1553207$. To evaluate the errors, $\hat u_\mathrm{final}$, $\hat y_\mathrm{final}$ and $\hat p_\mathrm{final}$ are extended to $\Omega_\ast$ using extrapolation.
Table~\ref{tab_ex1_courseofalg} provides details for the run from Table~\ref{tab_ex1_meshind} with $\mathrm{DOF}=97643$ and $\eta_k=\hat\eta_k$.
Table~\ref{tab_ex1_courseofalg} includes
$\tau^i:=\norm[H^1(\Omega_h)]{(\hat y_{i+1},\beta^{-1}\hat p_{i+1})-(\hat y_i,\beta^{-1}\hat p_i)}$, which appears in the termination criterion of Algorithm~\ref{alg:pfdisc}, and $\tau_u^i:=\norm[L^2(\Omega_h)]{u_h(\hat p_{i+1})-u_h(\hat p_i)}$.
\begin{table}
\caption{Example~1: Number of Newton steps and errors for several meshes;
the first value is for the forcing term $\bar\eta_k$, the second for $\hat\eta_k$ (only shown if different)}
\label{tab_ex1_meshind}
\scalebox{0.95}{
\begin{tabular}{@{}llllllll@{}}
\toprule
DOF&$\gamma_{\mathrm{final}}$&\#it&\#it$_u$&$ \ensuremath{{\cal{E}}} _j$&$ \ensuremath{{\cal{E}}} _u$&$ \ensuremath{{\cal{E}}} _y$&$ \ensuremath{{\cal{E}}} _p$\\\midrule
$1588$&$1.2\times 10^{-8}/9.3\times 10^{-9}$&58/72
&390/428
&$3.4\times 10^{-2}$&$18.7$&$2.3$&$6.0\times 10^{-2}$\\%time: 383/384; 262:220<0.84
$6251$&$1.7/1.6\times 10^{-10}$&78/91
&597/608
&$4.0\times 10^{-3}$&$7.3$&$1.1$&$3.3\times 10^{-2}$\\%time: 2033/1756; :1331<0.825
$24443$&$2.1/1.5\times 10^{-11}$&55/64
&454/491
&$9.4\times 10^{-4}$&$5.1$&$0.50$&$1.3\times 10^{-2}$\\%time: 5980/5192; 3503:3710<0.945
$97643$&$5.4/6.2\times 10^{-11}$&46/48
&407/380
&$3.3\times 10^{-4}$&$3.7$&$0.22$&$5.6\times 10^{-3}$\\%time: 15750/11970 ; 1197:1575<0.88
$389027$&$4.6/4.3\times 10^{-10}$&32/34
&367/358
&$1.2\times 10^{-4}$&$2.8$&$0.09$&$2.9\times 10^{-3}$\\%time: 57170/45770; 4577:5717<0.
\bottomrule
\end{tabular}
}
\end{table}
\begin{table}
\begin{center}
\caption{Example~1: Course of Algorithm~\ref{alg:pfdisc}}
\label{tab_ex1_courseofalg}
\scalebox{0.76}{
\begin{tabular}{@{}llllllllll@{}}
\toprule
$i$&$\gamma_i$&$\sigma_i$&(\#it$^i$,\#it$^i_u$)&$ \ensuremath{{\cal{E}}} _j^i$&$ \ensuremath{{\cal{E}}} _u^i$&$ \ensuremath{{\cal{E}}} _y^i$&$ \ensuremath{{\cal{E}}} _p^i$&$\tau^i$&$\tau_u^i$\\\midrule
0/1/2&$1.0/0.45/0.18$&$0.45/0.41/0.37$&$(0,0)$&$575$&$155$&$38.5$&$9.8\times 10^{-3}$&$0$&$0$\\
3&$6.8\times 10^{-2}$&$0.33$&$(1,1)$&$5.7$&$44$&$1.8$&$1.4$&$1440$&$11.3$\\
4&$2.2\times 10^{-2}$&$0.30$&$(1,2)$&$2.0$&$38$&$1.0$&$0.48$&$959$&$0.83$\\
\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots\\
12&$8.7\times 10^{-6}$&$0.32$&$(3,18)$&$1.8\times 10^{-3}$&$5.1$&$0.23$&$8.8\times 10^{-3}$&$3.5$&$0.22$\\
13&$2.8\times 10^{-6}$&$0.31$&$(3,20)$&$8.7\times 10^{-4}$&$4.3$&$0.23$&$6.8\times 10^{-3}$&$2.0$&$0.15$\\
14&$8.7\times 10^{-7}$&$0.28$&$(3,18)$&$5.1\times 10^{-4}$&$3.9$&$0.22$&$6.0\times 10^{-3}$&$0.84$&$0.073$\\
15&$2.4\times 10^{-7}$&$0.26$&$(5,20)$&$3.8\times 10^{-4}$&$3.7$&$0.22$&$5.7\times 10^{-3}$&$0.32$&$0.027$\\
16&$6.4\times 10^{-8}$&$0.25$&$(3,15)$&$3.5\times 10^{-4}$&$3.7$&$0.22$&$5.6\times 10^{-3}$&$0.13$&$8.2\times 10^{-3}$\\
17&$1.6\times 10^{-8}$&$0.25$&$(3,16)$&$3.3\times 10^{-4}$&$3.7$&$0.22$&$5.6\times 10^{-3}$&$0.081$&$2.2\times 10^{-3}$\\
18&$4.0\times 10^{-9}$&$0.25$&$(3,15)$&$3.3\times 10^{-4}$&$3.7$&$0.22$&$5.6\times 10^{-3}$&$0.060$&$7.0\times 10^{-4}$\\
19&$9.9\times 10^{-10}$&$0.25$&$(3,20)$&$3.3\times 10^{-4}$&$3.7$&$0.22$&$5.6\times 10^{-3}$&$0.045$&$3.6\times 10^{-4}$\\
20&$2.5\times 10^{-10}$&$0.25$&$(3,22)$&$3.3\times 10^{-4}$&$3.7$&$0.22$&$5.6\times 10^{-3}$&$0.028$&$2.2\times 10^{-4}$\\
21&$6.2\times 10^{-11}$&---&$(3,33)$&$3.3\times 10^{-4}$&$3.7$&$0.22$&$5.6\times 10^{-3}$&$0.017$&$1.4\times 10^{-4}$\\
\bottomrule
\end{tabular}
}
\end{center}
\end{table}
Table~\ref{tab_ex1_meshind} indicates convergence of the computed solutions
$(\hat u_\mathrm{final},\hat y_\mathrm{final},\hat p_\mathrm{final})$ to $(\bar u,\bar y,\bar p)$
and of the objective value $j_{\gamma_\mathrm{final},\delta_\mathrm{final},h}$ to $\bar j$.
It also suggests that convergence takes place at certain rates with respect to $h$.
Moreover, the total number of Newton steps both for $(y,p)$ and for $u$ stays bounded as DOF increases, which suggests mesh independence. The choice $\eta_k=\bar\eta_k$ frequently yields lower numbers of Newton steps for $(y,p)$ and for $u$, yet the runtime (not depicted) is consistently higher than for $\eta_k=\hat\eta_k$ since more iterations of \textsc{gmres } are required to compute the step for $(y,p)$. Specifically, using $\hat\eta_k$ saves between 5\% and 36\% of runtime,
with $36\%$ being the saving on the finest grid.
(Since the runtime depends on many factors, these numbers are intended as reference points rather than exact values.)
In the vast majority of iterations, step size $1$ is accepted for $(y_k,p_k)$.
For instance, all of the 52 iterations required for $\mathrm{DOF}=97643$ and $\eta_k=\hat\eta_k$ use full steps;
for $\mathrm{DOF}=6251$ and $\eta_k=\bar\eta_k$, 86 of the 87 iterations use step size 1.
Table~\ref{tab_ex1_sigma} displays the effect of fixing $(\sigma_i)\equiv \sigma$ in Algorithm~\ref{alg:pfdisc}. The mesh uses $\mathrm{DOF}=24443$ and is the same as in Table~\ref{tab_ex1_meshind}.
\begin{table}
\begin{center}
\caption{Example~1: Results for fixed values $(\sigma_i)\equiv\sigma$; the first value is for the forcing term $\bar\eta_k$, the second for $\hat\eta_k$ (only shown if different)}
\label{tab_ex1_sigma}
\scalebox{0.99}{
\begin{tabular}{@{}llllllll@{}}
\toprule
$\sigma$&$\gamma_\mathrm{final}$&\#it&\#it$_u$&$ \ensuremath{{\cal{E}}} _j$&$ \ensuremath{{\cal{E}}} _u$&$ \ensuremath{{\cal{E}}} _y$&$ \ensuremath{{\cal{E}}} _p$\\\midrule
0.2&$6.6\times 10^{-12}$&48/51&505/512&$9.4\times 10^{-4}$&$5.1$&$0.50$&$1.3\times 10^{-2}$\\%gmres: 3683/2686 time: 5707/4932
0.3&$3.5\times 10^{-11}$&51/57&408/405&$9.4\times 10^{-4}$&$5.1$&$0.50$&$1.3\times 10^{-2}$\\%gmres: 3603/2606 time: 5581/4517
0.5&$2.3\times 10^{-10}$&70/86&454/466&$9.4\times 10^{-4}$&$5.1$&$0.50$&$1.3\times 10^{-2}$\\%gmres: 4893/2927 time: 6999/5106
0.7&$5.1\times 10^{-10}$&97/130&522/552&$9.4\times 10^{-4}$&$5.1$&$0.50$&$1.3\times 10^{-2}$\\%gmres: 6710/3584 time: 8894/6085
0.9&$1.1/8.0\times 10^{-9}$&276/474&1261/1329&$9.4/9.5\times 10^{-4}$&$5.1$&$0.50$&$1.3\times 10^{-2}$\\%gmres: 15114/7200 time: 22620/13290
\bottomrule
\end{tabular}
}
\end{center}
\end{table}
For $\sigma=0.1$ the iterates failed to converge for both forcing terms once $\gamma_{12}=10^{-12}$ is reached because $u_h(-p_k)$ could not be computed to sufficient accuracy within the 200 iterations that we allow for this process.
Together with the case $\sigma=0.2$ in Table~\ref{tab_ex1_sigma} this shows that small values of $\sigma_i$ can increase the number of steps required for $u$ and even prevent convergence. We therefore enforce $\sigma_i\geq 0.25$ for all $i$ in all other experiments, although this diminishes the efficacy of Algorithm~\ref{alg:pfdisc} in some cases.
We now turn to the robustness of Algorithm~\ref{alg:pfdisc}. We emphasize that in our numerical experience the robustness of algorithms for optimal control problems involving the TV seminorm in the objective is a delicate issue.
Table~\ref{tab_ex1_differentbeta} displays the iteration numbers required
by Algorithm~\ref{alg:pfdisc} for different values of $\beta$ on the mesh with $\mathrm{DOF}=24443$ along with the error $ \ensuremath{{\cal{E}}} _u$ for $\eta_k=\hat\eta_k$ for the two choices $(\gamma_i/\delta_i) \equiv 10^2$ and $(\gamma_i/\delta_i)\equiv 1$.
The omitted values for $\beta=10^{-3}$ and
$(\gamma_i/\delta_i) \equiv 10^2$ are identical to those from Table~\ref{tab_ex1_meshind} for $\mathrm{DOF}=24443$ and $\eta_k=\hat\eta_k$.
Table~\ref{tab_ex1_differentkappa} provides iteration numbers and errors for various fixed choices of $(\gamma_i/\delta_i)$ on the mesh with $\mathrm{DOF}=24443$ for $\beta=10^{-3}$, $\eta_k=\bar\eta_k$ and $(\sigma_i)\equiv 0.5$. For the ratios $10^{-1}$ and $10^{-2}$ we increased $\kappa$ from $10^{-3}$ to $5\cdot 10^{-3}$ to obtain convergence. Since our goal is to demonstrate robustness, no further changes are made although this would lower the iteration numbers.
\begin{table}
\begin{center}
\caption{Example~1: Results for various values of $\beta$; the first line is for the choice $(\gamma_i/\delta_i)\equiv 10^2$, the second for $(\gamma_i/\delta_i)\equiv 1$}
\label{tab_ex1_differentbeta}
\begin{tabular}{@{}lllll@{}}
\toprule
$\beta$&$10^{-1}$&$10^{-2}$&
$10^{-4}$&$10^{-5}$
\\\midrule
$(\#$it,$\#$it$_u)/ \ensuremath{{\cal{E}}} _u$
&$(28,373)/21$
&$(37,217)/7.2$
&$(100,914)/4.3$
&$(153,1230)/4.1$
\\
$(\#$it,$\#$it$_u)/ \ensuremath{{\cal{E}}} _u$
&$(40,435)/21$
&$(78,795)/7.3$
&$(126,996)/4.3$
&$(137,1147)/4.1$
\\
\bottomrule
\end{tabular}
\end{center}
\end{table}
\begin{table}
\begin{center}
\caption{Example~1: Iteration numbers and errors for several ratios $\gamma_i/\delta_i$; the computations for $\gamma_i/\delta_i\in \{10^{-1},10^{-2}\}$ use a lower accuracy}
\label{tab_ex1_differentkappa}
\begin{tabular}{@{}llllllll@{}}
\toprule
$\frac{\gamma_i}{\delta_i}$&$\delta_\mathrm{final}$&\#it&\#it$_u$&$ \ensuremath{{\cal{E}}} _j$&$ \ensuremath{{\cal{E}}} _u$&$ \ensuremath{{\cal{E}}} _y$&$ \ensuremath{{\cal{E}}} _p$\\\midrule
$10^{-2}$&$3.0\times 10^{-8}$
&71&288&$9.9\times 10^{-4}$&$5.1$&$0.50$&$1.3\times 10^{-2}$\\%gmres: 5510. time: 7123.
$10^{-1}$&$3.0\times 10^{-8}$
&58
&246
&$9.9\times 10^{-4}$
&$5.1$&$0.50$&$1.3\times 10^{-2}$\\%gmres: 4617/4097. time: 5962/4707. $rtol=5\times 10^{-3}$ for first, 4\times 10^{-3}$ for second.
$1$&$1.8\times 10^{-12}$
&102
&469
&$9.4\times 10^{-4}$&$5.1$&$0.50$&$1.3\times 10^{-2}$\\%gmres: 7300/5248. time: 9246/89470
$10^1$&$2.9\times 10^{-12}$
&80
&396
&$9.4\times 10^{-4}$&$5.1$&$0.50$&$1.3\times 10^{-2}$\\%gmres: 5544/3897. time: 6702/5652.
$10^{2}$&$2.3\times 10^{-12}$&70&454&$9.4\times 10^{-4}$&$5.1$&$0.50$&$1.3\times 10^{-2}$\\%gmres: 4893/2927 time: 6999/5106
$10^3$&$1.9\times 10^{-12}$
&59
&475
&$9.4\times 10^{-4}$&$5.1$&$0.50$&$1.3\times 10^{-2}$\\%gmres: 4177/3217. time: 5598/5313.
\bottomrule
\end{tabular}
\end{center}
\end{table}
Table~\ref{tab_ex1_differentbeta} and \ref{tab_ex1_differentkappa} suggest that Algorithm~\ref{alg:pfdisc} is able to handle a range of parameter values without modification of its internal parameters.
\subsection{Example~2}
From section~\ref{sec_regularity} onward we have used that $\Omega$ is of class $C^{1,1}$.
To show that Algorithm~\ref{alg:pfdisc} can still solve \eqref{eq:ocintro} if $\Omega$ is only Lipschitz,
we consider an example from \cite[section~4.2]{Clason2011} on the square $\Omega = [-1,1]^2$.
We have $ \ensuremath{{\cal{A}}} =-\Delta$, $c_0\equiv 0$, $\beta=10^{-4}$ and $y_\Omega = 1_D$, where $1_D\colon\Omega\rightarrow\{0,1\}$ is the characteristic function of the square $D=(-0.5,0.5)^2$.
We use uniform triangulations and denote by $n+1$ the number of nodes in coordinate direction.
Figure~\ref{fig_ex1} depicts the optimal control $\bar u_h$, optimal state $\bar y_h$ and negative optimal adjoint state $-\bar p_h$, which were computed with $n=1024$. Apparently, $\bar u_h$ is piecewise constant.
\begin{figure}
\caption{$\bar u_h$}
\caption{$\bar u_h$ (top view)}
\caption{$\bar y_h$}
\caption{$-\bar p_h$}
\caption{Numerically computed optimal solutions for Example~2}
\label{fig_ex2}
\end{figure}
Throughout, we use the fixed ratio $(\gamma_i/\delta_i)\equiv 10^{-2}$ and apply Algorithm~\ref{alg:pfdisc} with $(\gamma_0,\delta_0)=(0.01,1)$ and $(\hat y_0,\hat p_0)=(0,0)$. As in example~1,
cf. Table~\ref{tab_ex1_differentkappa}, other ratios for $\gamma_i/\delta_i$ can be employed as well.
We only provide results for $\bar\eta_k$ since the forcing term $\hat\eta_k$ does not yield lower runtimes in this example; both forcing terms produce the same errors, though.
Table~\ref{tab_ex2_meshind} displays iteration numbers and errors for different grids, while Table~\ref{tab_ex2_courseofalg} shows details for $n=256$.
\begin{table}
\begin{center}
\caption{Example~2: Number of Newton steps and errors for several meshes}
\label{tab_ex2_meshind}
\begin{tabular}{@{}llllllll@{}}
\toprule
$n$&$\gamma_{\mathrm{final}}$&\#it&\#it$_u$&$ \ensuremath{{\cal{E}}} _j$&$ \ensuremath{{\cal{E}}} _u$&$ \ensuremath{{\cal{E}}} _y$&$ \ensuremath{{\cal{E}}} _p$\\\midrule
32&$1.7\times 10^{-11}$&43
&321
&$1.8\times 10^{-2}$&$8.6$&$0.75$&$9.0\times 10^{-3}$\\%time: 76/79
64&$9.8\times 10^{-12}$
&48
&551
&$9.9\times 10^{-3}$&$4.3$&$0.37$&$4.9\times 10^{-3}$\\%time: 493/487
128&$3.2\times 10^{-11}$
&46
&902
&$4.9\times 10^{-3}$&$2.3$&$0.19$&$2.4\times 10^{-3}$\\%time: 3401/3770
256&$3.3\times 10^{-11}$
&50
&1212
&$2.2\times 10^{-3}$&$1.1$&$0.081$&$1.1\times 10^{-3}$\\%time: 17470/20110
512&$5.6\times 10^{-11}$
&58
&2868
&$7.3\times 10^{-4}$
&$0.42$
&$0.031$&$4.2\times 10^{-4}$\\%time: 169000/182200
\bottomrule
\end{tabular}
\end{center}
\end{table}
\begin{table}
\begin{center}
\caption{Example~2: Course of Algorithm~\ref{alg:pfdisc}}
\label{tab_ex2_courseofalg}
\scalebox{0.86}{
\begin{tabular}{@{}llllllllll@{}}
\toprule
$i$&$\gamma_i$&$\sigma_i$&(\#it$^i$,\#it$^i_u$)&$ \ensuremath{{\cal{E}}} _j^i$&$ \ensuremath{{\cal{E}}} _u^i$&$ \ensuremath{{\cal{E}}} _y^i$&$ \ensuremath{{\cal{E}}} _p^i$&$\tau^i$&$\tau_u^i$\\\midrule
0--4&$0.01/\ldots$&$0.45/\ldots$&$(0,0)$&$0.42$&$34$&$3.4$&$1.7\times 10^{-2}$&$0$&$0$\\
5&$6.7\times 10^{-5}$&$0.27$&$(2,7)$&$6.0\times 10^{-2}$&$27$&$1.9$&$4.2\times 10^{-2}$&$498$&$7.3$\\
6&$1.8\times 10^{-5}$&$0.26$&$(2,37)$&$3.4\times 10^{-2}$&$23$&$1.6$&$2.4\times 10^{-2}$&$212$&$3.4$\\
\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots\\
11&$2.9\times 10^{-7}$&$0.55$&$(2,59)$&$3.1\times 10^{-3}$&$12$&$0.50$&$4.6\times 10^{-3}$&$16.4$&$1.6$\\
12&$1.6\times 10^{-7}$&$0.55$&$(2,59)$&$1.6\times 10^{-3}$&$9.6$&$0.39$&$3.4\times 10^{-3}$&$12.6$&$1.6$\\
13&$9.0\times 10^{-8}$&$0.53$&$(2,56)$&$4.4\times 10^{-4}$&$7.4$&$0.29$&$2.5\times 10^{-3}$&$9.5$&$1.4$\\
14&$4.7\times 10^{-8}$&$0.53$&$(3,70)$&$4.5\times 10^{-4}$&$5.3$&$0.21$&$1.9\times 10^{-3}$&$7.6$&$1.4$\\
\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots\\
21&$9.5\times 10^{-10}$&$0.52$&$(3,64)$&$2.1\times 10^{-3}$&$1.3$&$0.084$&$1.1\times 10^{-3}$&$0.62$&$0.32$\\
22&$5.0\times 10^{-10}$&$0.47$&$(2,23)$&$2.1\times 10^{-3}$&$1.1$&$0.083$&$1.1\times 10^{-3}$&$0.38$&$0.24$\\
23&$2.4\times 10^{-10}$&$0.45$&$(2,56)$&$2.1\times 10^{-3}$&$1.1$&$0.081$&$1.1\times 10^{-3}$&$0.28$&$0.27$\\
24&$1.1\times 10^{-10}$&$0.59$&$(2,80)$&$2.1\times 10^{-3}$&$1.1$&$0.081$&$1.1\times 10^{-3}$&$0.15$&$0.17$\\
25&$6.2\times 10^{-11}$&$0.53$&$(2,15)$&$2.2\times 10^{-3}$&$1.1$&$0.081$&$1.1\times 10^{-3}$&$0.062$&$0.052$\\
26&$3.3\times 10^{-11}$&---&$(2,17)$&$2.2\times 10^{-3}$&$1.1$&$0.081$&$1.1\times 10^{-3}$&$0.042$&$0.037$\\
\bottomrule
\end{tabular}
}
\end{center}
\end{table}
Table~\ref{tab_ex2_meshind} hints at possible mesh independence for $(y,p)$, but indicates that the number of Newton steps for $u$ increases with $n$. The depicted errors are computed by use of a reference solution that is obtained by Algorithm~\ref{alg:pfdisc} with $\eta_k=\bar\eta_k$ on the mesh with $n=1024$.
As in the first example it seems that convergence with respect to $h$ takes place at certain rates.
The majority of iterations use full Newton steps for $(y,p)$. For instance, all but one of the 50 iterations for $n=256$ use step length one.
We also repeated the experiments from Table~\ref{tab_ex2_meshind} with $y_\Omega$ rotated by $30$, respectively, $45$ degree.
The omitted results are similar to those in Table~\ref{tab_ex2_meshind}, which further illustrates the robustness of our approach.
Table~\ref{tab_ex2_nestedgrids} shows the outcome of Algorithm~\ref{alg:pfdisc} if a sequence of nested grids is used, where the grids are refined once $\gamma_i<10^{-4}$, $\gamma_i < 10^{-6}$ and $\gamma_i<10^{-8}$, respectively. In this example, nesting reduces the runtime by about $57\%$
while providing the same accuracy as a run for $n=512$, cf. the last line of Table~\ref{tab_ex2_meshind}.
\begin{table}
\begin{center}
\caption{Example~2: Results for a sequence of nested grids}
\label{tab_ex2_nestedgrids}
\begin{tabular}{@{}llllllll@{}}
\toprule
$n$&$\gamma_\mathrm{final}$&\#it&\#it$_u$&$ \ensuremath{{\cal{E}}} _j$&$ \ensuremath{{\cal{E}}} _u$&$ \ensuremath{{\cal{E}}} _y$&$ \ensuremath{{\cal{E}}} _p$\\\midrule
64&$4.0\times 10^{-5}$&5&26
&$3.9\times 10^{-2}$&$25$&$1.8$&$3.4\times 10^{-2}$\\%time (on this grid): 19
128&$4.8\times 10^{-7}$&12&260
&$1.7\times 10^{-3}$&$14$&$0.64$&$6.3\times 10^{-3}$\\%time (on this grid): 767
256&$6.3\times 10^{-9}$&19&481
&$1.7\times 10^{-3}$&$2.1$&$0.10$&$1.2\times 10^{-3}$\\%time (on this grid): 6765
512&$5.6\times 10^{-11}$&20&792
&$7.3\times 10^{-4}$&$0.42$&$0.031$&$4.2\times 10^{-4}$\\%time (on this grid): 63720
\bottomrule
\end{tabular}
\end{center}
\end{table}
Table~\ref{tab_ex2_differentbeta} addresses the robustness of Algorithm~\ref{alg:pfdisc} with respect to $\beta$. The computations are carried out on nested grids and the displayed iteration numbers are those for the finest grid, which has $n=128$. The reference solution is computed for $n=256$. The final grid change happens once $\gamma_i<10^{-8}$.
\begin{table}
\begin{center}
\caption{Example~2: Results for various values of $\beta$.
A sequence of nested grids is used and the displayed iteration numbers are for the finest grid only}
\label{tab_ex2_differentbeta}
\scalebox{0.8}{
\begin{tabular}{@{}lllll@{}}
\toprule
$\beta$
&$10^{-3}$&$10^{-4}$&$10^{-5}$&$5\times 10^{-6}$\\\midrule
$(\#$it,$\#$it$_u)/ \ensuremath{{\cal{E}}} _j$
&$(12,117)/3.3\times 10^{-3}$
&$(22,355)/2.8\times 10^{-3}$
&$(70,958)/2.4\times 10^{-3}$
&$(104,1569)/2.2\times 10^{-3}$
\\
\bottomrule
\end{tabular}
}
\end{center}
\end{table}
Table~\ref{tab_ex2_differentbeta} indicates that Algorithm~\ref{alg:pfdisc} is robust with respect to $\beta$.
As in example~1 it is possible to achieve lower iteration numbers through manipulation of the algorithmic parameters. For instance, if the final grid change for $\beta=10^{-5}$ happens once $\gamma_i<10^{-9}$ instead of $\gamma_i<10^{-8}$, then only $(41,638)$ iterations are needed on the final grid instead of $(70,958)$.
\section{Summary}\label{sec_sum}
We have studied an optimal control problem
with controls from BV in which the control costs are given by the TV seminorm, favoring piecewise constant controls.
By smoothing the TV seminorm and adding the $H^1$ norm we obtained a family of auxiliary problems whose solutions converge to the optimal solution of the original problem in appropriate function spaces. For fixed smoothing and regularization parameter we showed local convergence of an infinite-dimensional inexact Newton method applied to a reformulation of the optimality system that involves the control as an implicit function of the adjoint state.
Based on a convergent Finite Element approximation a practical path-following algorithm was derived, and it was demonstrated that the algorithm is able to robustly compute the optimal solution of the control problem with considerable accuracy. To verify this, a two-dimensional test problem with known solution was constructed.
\appendix
\section{Differentiability of \texorpdfstring{$\mathbf{\psi_\delta}$}{the smoothed TV seminorm}}\label{sec_differentiabilityofpsidelta}
The following result follows by standard arguments, but we provide its proof for convenience.
\begin{lemma}\label{thm:psiderivativeNEU}
Let $\delta>0$, $N\in\mathds{N}$ and let $\Omega\subset\mathds{R}^N$ be open. The functional
\begin{equation*}
\psi_\delta: H^1(\Omega)\rightarrow\mathds{R}, \qquad
u\mapsto\int_\Omega \sqrt{\delta+\lvert\nabla u\rvert^2}\,\ensuremath {\,\mathrm{d}x}
\end{equation*}
is Lipschitz continuously \text{Fr{\'e}chet } differentiable and twice \text{G{\^a}teaux } differentiable. Its first derivative at $u$ in direction $v$ and its second derivative at $u$ in directions $v,w$ are given by
\begin{equation*}
\psi_\delta^\prime(u)v = \int_\Omega \frac{ \left(\nabla u, \nabla v\right) }{\sqrt{\delta + \rvert\nabla u\lvert^2 } } \ensuremath {\,\mathrm{d}x}
\quad\text{and}\quad
\psi_\delta^{\prime\prime}(u)[v,w] = \int_\Omega \frac{ \left( \nabla v, \nabla w \right)}{ \sqrt{ \delta + \lvert\nabla u\rvert^2 } } - \frac{ \left(\nabla u, \nabla v \right) \left(\nabla u, \nabla w \right) }{ \left( \delta + \lvert\nabla u\rvert^2 \right)^{\frac{3}{2}} } \ensuremath {\,\mathrm{d}x}.
\end{equation*}
\end{lemma}
\begin{proof}
\textbf{First \text{G{\^a}teaux } derivative} \\
Let $u, v\in H^1(\Omega)$.
As $s \mapsto \sqrt{\delta + s}$ is Lipschitz on $[0,\infty)$ with constant $\frac{1}{2\sqrt{\delta}}$, we obtain for all $t\in[-1,1]$, $t\neq 0$,
\begin{equation}\label{eq_domconvprereq}
\left\lvert \frac{\sqrt{ \delta + |\nabla u + t\nabla v|^2} - \sqrt{\delta + \lvert\nabla u\rvert^2 } }{t}\right\rvert
\leq \frac{\left\lvert \nabla v\right\rvert\cdot\left(2\left\lvert \nabla u\right\rvert + \left\lvert\nabla v\right\rvert\right)}{2\sqrt{\delta}} \qquad\text{ a.e. in }\Omega.
\end{equation}
Thus, we can apply the theorem of dominated convergence, which yields
\begin{equation*}
\lim_{t\to 0} \frac{\psi_\delta(u + tv) - \psi_\delta(u)}{t}
= \int_\Omega \lim_{t\to 0} \frac{\sqrt{ \delta + |\nabla u + t\nabla v|^2} - \sqrt{\delta + |\nabla u|^2 } }{t} \ensuremath {\,\mathrm{d}x}
= \int_\Omega \frac{ \left(\nabla u, \nabla v\right) }{\sqrt{\delta + |\nabla u|^2 } } \ensuremath {\,\mathrm{d}x}.
\end{equation*}
From
\begin{equation*}
\left\lvert\int_\Omega \frac{ \left(\nabla u, \nabla v\right) }{\sqrt{\delta + \lvert\nabla u\rvert^2 } } \ensuremath {\,\mathrm{d}x}\right\rvert
\leq \int_\Omega \left\lvert\frac{ \left(\nabla u, \nabla v\right) }{\sqrt{\delta + \lvert\nabla u\rvert^2 } } \right\rvert\ensuremath {\,\mathrm{d}x}
\leq \frac{\lVert \nabla u\rVert_{L^2(\Omega)}\lVert \nabla v\rVert_{L^2(\Omega)}}{\sqrt{\delta}}
\leq \frac{\lVert u\rVert_{H^1(\Omega)}\lVert v\rVert_{H^1(\Omega)}}{\sqrt{\delta}}
\end{equation*}
we see that the functional $v\mapsto\psi_\delta^\prime(u)v$ is linear and continuous.\\
\textbf{Second \text{G{\^a}teaux } derivative}\\
Let $u, v, w \in H^1(\Omega)$. Since
$g:\mathds{R}^N\rightarrow\mathds{R}$, $g(y):=\frac{(y,z)}{\sqrt{\delta + \lvert y\rvert^2}}$, with $z\in\mathds{R}^N$ fixed,
is Lipschitz continuous on $\mathds{R}^N$ with constant $\frac{2}{\sqrt{\delta}}\lvert z\rvert$, we obtain for all $t\in\mathds{R}$, $t\neq 0$,
\begin{equation}\label{eq_domconvprereq2}
\left\lvert\frac{1}{t}\right\rvert\left\lvert\frac{ \left( \nabla u + t \nabla w, \nabla v \right) }{ \sqrt{ \delta + \lvert\nabla u + t\nabla w\rvert^2 } } - \frac{ \left(\nabla u, \nabla v\right) }{\sqrt{\delta + \lvert\nabla u\rvert^2 } } \right\rvert
\leq \frac{2}{\sqrt{\delta}}\lvert\nabla v\rvert \lvert\nabla w\rvert \qquad\text{ a.e. in }\Omega.
\end{equation}
Dominated convergence yields
\begin{equation*}
\begin{split}
\lim_{t\to 0} \frac{\psi_\delta^\prime(u + tw)v - \psi_\delta^\prime(u)v}{t}
& = \int_{\Omega}\lim_{t\to 0}\frac{1}{t}\left(\frac{ \left( \nabla u + t \nabla w, \nabla v \right) }{ \sqrt{ \delta + \lvert\nabla u + t\nabla w\rvert^2 } } - \frac{ \left(\nabla u, \nabla v\right) }{\sqrt{\delta + \lvert\nabla u\rvert^2 } } \right)\ensuremath {\,\mathrm{d}x}\\
& = \int_\Omega \frac{ \left( \nabla v, \nabla w \right)}{ \sqrt{ \delta + \lvert\nabla u\rvert^2 } } - \frac{ \left(\nabla u, \nabla v \right) \left(\nabla u, \nabla w \right) }{ \left( \delta + \lvert\nabla u\rvert^2 \right)^{\frac{3}{2}} } \ensuremath {\,\mathrm{d}x},
\end{split}
\end{equation*}
where we used the directional derivative of $g$ to derive the last equality.
From \eqref{eq_domconvprereq2} we deduce the boundedness of the bilinear mapping $(v,w)\mapsto\psi_\delta^{\prime\prime}(u)[v,w]$ by
\begin{equation}\label{eq_lipcontoffirstderivativeofpsidelta}
\left\lvert\int_\Omega \frac{ \left( \nabla v, \nabla w \right)}{ \sqrt{ \delta + \lvert\nabla u\rvert^2 } } - \frac{ \left(\nabla u, \nabla v \right) \left(\nabla u, \nabla w \right) }{ \left( \delta + \lvert\nabla u\rvert^2 \right)^{\frac{3}{2}} } \ensuremath {\,\mathrm{d}x}\right\rvert
\leq \frac{2}{\sqrt{\delta}}\lVert v\rVert_{H^1(\Omega)}\lVert w\rVert_{H^1(\Omega)}.
\end{equation}
\textbf{Lipschitz continuous \text{Fr{\'e}chet } differentiability}\\
Denoting by
$\norm[{\cal B}]{\cdot}$ the standard norm for bounded bilinear forms on $H^1(\Omega)\times H^1(\Omega)$, we infer from \eqref{eq_lipcontoffirstderivativeofpsidelta} that
$\sup_{u\in H^1(\Omega)}\norm[{\cal B}]{\psi_\delta^{\prime\prime}(u)}\leq \frac{2}{\sqrt{\delta}}$.
This implies that $u\mapsto\psi_\delta^{\prime}(u)$ is Lipschitz with constant
$\frac{2}{\sqrt{\delta}}$, hence
$u\mapsto\psi_\delta(u)$ is \text{Fr{\'e}chet } differentiable. \qed
\end{proof}
\section{H\"older continuity for quasilinear partial differential equations}\label{sec_HoeldercontinuityforquasilinPDEs}
To prove results on the H\"older continuity of solutions to quasilinear elliptic PDEs, we first discuss linear elliptic PDEs.
\begin{theorem}\label{thm:linearregularity}
Let $\alpha \in (0,1)$ and let $\Omega$ be a bounded $C^{1,\alpha}$ domain. Let $\gamma_0, \mu>0$ be given.
Let $A\in C^{0,\alpha}(\Omega,\mathds{R}^{N\times N})$ be a uniformly elliptic matrix with ellipticity constant $\mu$ and let $\gamma \geq \gamma_0$. Let $a^0>0$ be such that $\gamma, \lVert A \rVert_{C^{0,\alpha}(\Omega)} \leq a^0$. Then there is a constant $C>0$ depending only on $\alpha$, $\Omega$, $N$, $\mu$, $a^0$ and $\gamma_0$ such that for any $p\in L^\infty(\Omega)$ and any $f\in C^{0,\alpha}(\Omega,\mathds{R}^N)$ the unique weak solution $u$ to
\begin{equation} \label{eq:linearpde2}
\left\{
\begin{aligned}
-\divg (A\nabla u) +\gamma u & = p - \divg(f) && \text{ in }\Omega,\\
\partial_{A_\nu} u & = 0 && \text{ on }\Gamma,
\end{aligned}
\right.
\end{equation}
satisfies $u\in C^{1,\alpha}(\Omega)$ and
\begin{equation*}
\lVert u \rVert_{C^{1,\alpha}(\Omega)} \leq C \left( \lVert p \rVert_{L^\infty(\Omega)} + \lVert f \rVert_{C^{0,\alpha}(\Omega)} \right).
\end{equation*}
\end{theorem}
\begin{proof}
We did not find a proof of Theorem~\ref{thm:linearregularity} in the literature, so we provide it here.\\
A standard ellipticity argument delivers unique existence and $\lVert u \rVert_{H^1(\Omega)} \leq C \lVert p \rVert_{L^\infty(\Omega)}$, where $C$ only depends on the claimed quantities. Moreover, by \cite[Theorem 3.16(iii)]{troianello}
\begin{equation*}
\lVert u \rVert_{\mathcal{L}^{2,N+2\alpha}(\Omega)} + \lVert \nabla u \rVert_{\mathcal{L}^{2,N+2\alpha}(\Omega)} \leq C \Bigl( \lVert p \rVert_{\mathcal{L}^{2,(N+2\alpha-2)^+}(\Omega)} + \lVert f \rVert_{\mathcal{L}^{2,N+2\alpha}(\Omega)} + \lVert u \rVert_{H^1(\Omega)} \Bigr).
\end{equation*}
Here, $C$ depends on all of the claimed quantities except $\gamma_0$, and $\mathcal{L}^{2,\lambda}(\Omega)$ denotes a Campanato space; for details see \cite[Chapter~1.4]{troianello}. The definition of Campanato spaces implies $\lVert p \rVert_{\mathcal{L}^{2,(N+2\alpha-2)^+}(\Omega)}\leq C \lVert p \rVert_{L^\infty(\Omega)}$. Using the isomorphism between $\mathcal{L}^{2,N+2\alpha}(\Omega)$ and $C^{0,\alpha}(\Omega)$ from \cite[Theorem 1.17~(ii)]{troianello} we obtain
\begin{equation*}
\lVert u \rVert_{C^{1,\alpha}(\Omega)} \leq C \left( \lVert p \rVert_{L^\infty(\Omega)} + \lVert f \rVert_{C^{0,\alpha}(\Omega)} + \lVert u \rVert_{H^1(\Omega)} \right).
\end{equation*}
The earlier ellipticity estimate concludes the proof. \qed
\end{proof}
The next result concerns quasilinear PDEs. It follows directly from \cite[Theorem 2]{Liebermann1988}.
\begin{theorem}\label{thm:lieberalpha}
Let $\Omega$ be a bounded $C^{1,\alpha^\prime}$ domain for some $\alpha^\prime \in (0,1]$. Let $A: \Omega\times\mathds{R}\times\mathds{R}^N\rightarrow \mathds{R}^{N}$, $B: \Omega\times\mathds{R}\times\mathds{R}^N\rightarrow\mathds{R}$, $M>0$ and $0 < \lambda \leq \Lambda$. Let $\kappa, m \geq 0$ and suppose that
\begin{align}
& \sum_{i,j=1}^N \partial_{\eta_i} A_j(x,u,\eta) \xi_i\xi_j \geq \lambda \bigl(\kappa + |\eta|_2^2 \bigr)^m |\xi|_2^2, &&\text{(ellipticity)} \label{eq:Lk1}\\
& \sum_{i,j=1}^N |\partial_{\eta_i} A_j(x,u,\eta)| \leq \Lambda \bigl(\kappa + |\eta|_2\bigr)^m, &&\text{(boundedness of $A$)} \label{eq:Lk2}\\
& |B(x,u,\eta)| \leq \Lambda \bigl(1+|\eta|_2\bigr)^{m+2}, &&\text{(boundedness of $B$)} \label{eq:Lk4}
\end{align}
as well as the H\"older continuity property
\begin{equation}\label{eq:Lk3}
|A(x_1,u_1,\eta) - A(x_2,u_2,\eta)| \leq \Lambda\bigl(1+|\eta|_2\bigr)^{m+1} \bigl( |x_1-x_2|^{\alpha^\prime} + |u_1-u_2|^{\alpha^\prime} \bigr)
\end{equation}
are satisfied for all $x,x_1,x_2\in\Omega$, $u,u_1,u_2 \in [-M, M]$ and $\eta,\xi\in\mathds{R}^N$. Then there exist constants $\alpha \in (0,1)$ and $C>0$ such that each solution $u\in H^1(\Omega)$ of
\begin{equation*}
\int_\Omega A(x,u,\nabla u)^T \nabla \varphi \ensuremath {\,\mathrm{d}x} = \int_\Omega B(x,u,\nabla u)\varphi\ensuremath {\,\mathrm{d}x} \qquad \forall\varphi\in H^1(\Omega)
\end{equation*}
satisfies
\begin{equation*}
\lVert u \rVert_{C^{1,\alpha}(\Omega)} \leq C.
\end{equation*}
Here, $C>0$ only depends on $\alpha^\prime$, $\Omega$, $N$, $\Lambda/\lambda$, $m$, and $M$, while $\alpha \in (0,1)$ only depends on $\alpha^\prime$, $N$, $\Lambda/\lambda$ and $m$.
\end{theorem}
We collect elementary estimates for H\"older continuous functions.
\begin{lemma} \label{lem_hoeldercompositions}
Let $\Omega\subset\mathds{R}^N$ be nonempty, let $\alpha>0$, and let $f,g\in C^{0,\alpha}(\Omega)$. Then:
\begin{itemize}
\item[$\bullet$] $\lVert fg \rVert_{C^{0,\alpha}(\Omega)} \leq \lVert f \rVert_{C^{0,\alpha}(\Omega)} \lVert g \rVert_{C^{0,\alpha}(\Omega)}$.
\item[$\bullet$] $\lVert \sqrt{\epsilon+f^2}\rVert_{C^{0,\alpha}(\Omega)} \leq
\sqrt{\epsilon}+\lVert f \rVert_{C^{0,\alpha}(\Omega)}$ for all $\epsilon>0$.
\item[$\bullet$] If $\lvert f\rvert \geq \epsilon>0$ on $\Omega$ for some constant $\epsilon>0$, then there holds
\begin{equation*}
\left\lVert 1/f \right\rVert_{C^{0,\alpha}(\Omega)} \leq \epsilon^{-2} \lVert f \rVert_{C^{0,\alpha}(\Omega)} + \epsilon^{-1}.
\end{equation*}
\item[$\bullet$] $\lVert \,\lvert h \rvert \, \rVert_{C^{0,\alpha}(\Omega)}
\leq \lVert h \rVert_{C^{0,\alpha}(\Omega,\mathds{R}^N)}$
for all $h\in C^{0,\alpha}(\Omega,\mathds{R}^N)$.
\item[$\bullet$] Let $N_i\in\mathds{N}$ and let $U_i\subset\mathds{R}^{N_i}$ be nonempty, $1\leq i\leq 4$.
For $\phi\in C^{0,1}(U_2,U_3)$, $h\in C^{0,\alpha}(U_1,U_2)$ and $H\in C^{0,\alpha}(U_3,U_4)$ there hold
\begin{equation*}
\lVert\phi\circ h\rVert_{C^{0,\alpha}(U_1,U_3)}\leq
\lvert\phi\rvert_{C^{0,1}(U_2,U_3)} \lvert h\rvert_{C^{0,\alpha}(U_1,U_2)} +
\lVert\phi\rVert_{L^\infty(U_2,U_3)}
\end{equation*}
and
\begin{equation*}
\lVert H\circ\phi\rVert_{C^{0,\alpha}(U_2,U_4)}\leq \lvert\phi\rvert_{C^{0,1}(U_2,U_3)}^\alpha\lvert H\rvert_{C^{0,\alpha}(U_3,U_4)}+
\lVert H\rVert_{L^\infty(U_3,U_4)}.
\end{equation*}
\end{itemize}
\end{lemma}
\begin{proof}
\textbf{First claim:} Because of
$\lvert f(x)g(x) - f(y)g(y)\rvert
\leq \lvert f(x)\rvert\,\lvert g(x)-g(y)\rvert + \lvert g(y)\rvert\,\lvert f(x)-f(y)\rvert
\leq ( \,
\lVert f \rVert_{L^\infty(\Omega)} \lvert g \rvert_{C^{0,\alpha}(\Omega)} + \lVert g \rVert_{L^\infty(\Omega)} \lvert f \rvert_{C^{0,\alpha}(\Omega)} \, ) |x-y|^\alpha$ for all $x,y\in\Omega$, we infer
$\lvert f g\rvert_{C^{0,\alpha}(\Omega)} \leq
\lVert f \rVert_{L^\infty(\Omega)} \lvert g \rvert_{C^{0,\alpha}(\Omega)} + \lVert g \rVert_{L^\infty(\Omega)} \lvert f \rvert_{C^{0,\alpha}(\Omega)}$.
Together with $\lVert fg\rVert_{L^\infty(\Omega)} \leq
\lVert f \rVert_{L^\infty(\Omega)} \lVert g \rVert_{L^\infty(\Omega)}$
this implies the first claim.\\
\textbf{Second claim:}
Since $\phi(t):=\sqrt{\epsilon + t^2}$ is Lipschitz continuous with constant $1$ in $\mathds{R}$, the assertion follows from the fifth claim
by use of
$\lVert\sqrt{\epsilon+f^2}\rVert_{L^\infty(\Omega)}\leq \sqrt{\epsilon}+\lVert f\rVert_{L^\infty(\Omega)}$.\\
\textbf{Third claim:}
Since $\phi(t):=|t|^{-1}$ is Lipschitz continuous with constant $\epsilon^{-2}$ in $\mathds{R}\setminus(-\epsilon,\epsilon)$, the assertion follows from the fifth claim, applied with $U_2:=\{f(x):\,x\in\Omega\}$, by use of
$\lVert\phi\rVert_{L^\infty(U_2,U_3)}=\lVert \, \lvert f\rvert^{-1}\, \rVert_{L^\infty(\Omega)}\leq\epsilon^{-1}$.\\
\textbf{Fourth claim:}
The assertion follows from the fifth claim.
\\
\textbf{Fifth claim:}
For $x,y\in U_1$ we have
\begin{equation*}
\lvert\phi(h(x))-\phi(h(y))\rvert
\leq \lvert\phi\rvert_{C^{0,1}(U_2,U_3)} \lvert h(x)-h(y)\rvert
\leq \lvert\phi\rvert_{C^{0,1}(U_2,U_3)} \lvert h\rvert_{C^{0,\alpha}(U_1,U_2)}\lvert x-y\rvert^\alpha.
\end{equation*}
Together with $\lvert\phi(h(x))\rvert \leq \sup_{y\in U_2} \lvert\phi(y)\rvert = \lVert\phi\rVert_{L^\infty(U_2,U_3)}$ for all $x\in U_1$ we obtain the assertion for $\phi\circ h$. The assertion for $H\circ\phi$ can be established analogously.
\qed
\end{proof}
We can now establish the desired regularity and continuity result for \eqref{eq:quasilinearpde}.
\begin{theorem} \label{thm:quasilinearls}
Let $\Omega\subset\mathds{R}^N$ be a bounded $C^{1,\alpha^\prime}$ domain for some $\alpha^\prime\in (0,1]$. Let $\beta>0$ and $\gamma^0 \geq \gamma \geq\gamma_0 > 0$ and $\delta^0\geq\delta\geq \delta_0>0$.
By $u(p) \in H^1(\Omega)$ we denote for each $p\in L^\infty(\Omega)$ the unique weak solution of
\begin{equation} \label{eq:thm:nonlinearpde}
\left\{
\begin{aligned}
-\divg\Biggl( \Biggl[ \gamma +\frac{\beta}{\sqrt{\delta + \lvert\nabla u\rvert^2}} \Biggr] \nabla u \Biggr) + \gamma u & = p &&\text{ in }\Omega,\\
\Biggl( \left[ \gamma+\frac{\beta}{\sqrt{\delta + \lvert\nabla u\rvert^2}} \right] \nabla u, \nu \Biggr) & = 0 &&\text{ on }\Gamma.
\end{aligned}
\right.
\end{equation}
Let $b^0>0$ be arbitrary and let $\ensuremath { \mathds{B} } \subset L^\infty(\Omega)$ denote the open ball with radius $b_0$. There exists $\alpha\in(0,1)$ such that $u:\ensuremath { \mathds{B} } \rightarrow C^{1,\alpha}(\Omega)$ is well-defined and Lipschitz continuous, i.e. $\lVert u(p_1)-u(p_2)\rVert_{C^{1,\alpha}(\Omega)}\leq L\lVert p_1-p_2\rVert_{L^\infty(\Omega)}$ for all $p_1,p_2\in \ensuremath { \mathds{B} } \subset L^\infty(\Omega)$ and some $L>0$. The constants $L$ and $\alpha$ are independent of $\gamma$ and $\delta$, but may depend on $\alpha^\prime$, $\Omega$, $N$, $\beta$, $b^0$, $\gamma_0,\gamma^0,\delta_0$ and $\delta^0$.
\end{theorem}
\begin{proof}
We did not find a proof of Theorem~\ref{thm:quasilinearls} in the literature, so we provide it here. To this end, let $b^0>0$ and let $p_1,p_2\in L^\infty(\Omega)$ with $\lVert p_1 \rVert_{L^\infty(\Omega)}, \lVert p_2 \rVert_{L^\infty(\Omega)} < b^0$.\\
\textbf{Part 1: Showing existence of $\mathbf{u_1,u_2\in H^1(\Omega)}$.}\\
For $i=1,2$ we define
\begin{equation*}
F_i \colon \ensuremath {H^1(\Omega)} \rightarrow \mathds{R}, \qquad
F_i(v):=\gamma \norm[\ensuremath {H^1(\Omega)} ]{v}^2 + \beta \int_\Omega \sqrt{ \delta + |\nabla v|^2 } \ensuremath {\,\mathrm{d}x} - (p_i,v)_{L^2(\Omega)}.
\end{equation*}
As $F_i$ is strongly convex,
it has a unique minimizer $u_i \in H^1(\Omega)$.
Since $F_i$ is \text{Fr{\'e}chet } differentiable by Lemma~\ref{thm:psiderivativeNEU},
we have $F^\prime(u_i) = 0$ in $H^1(\Omega)^\ast$,
which is equivalent to \eqref{eq:thm:nonlinearpde}.\\
\textbf{Part 2: Showing $\mathbf{u_1,u_2\in L^\infty(\Omega)}$ and an estimate for $\mathbf{\lVert u_1\rVert_{L^\infty(\Omega)}}$ and $\mathbf{\lVert u_2\rVert_{L^\infty(\Omega)}}$}\\
Fix $M>\gamma_0^{-1} b^0$ and let $u_{i,M} := \min(M, \max(-M, u_i) )$, $i=1,2$.
For any $\mathds{N} \ni q\geq 1$ we have
\begin{equation*}
\nabla \bigl( u_{i,M}^{2q-1} \bigr) = (2q-1) \, u_{i}^{2q-2} \, \nabla u_i \cdot 1_{\{ -M < u_i < M \}} \in L^2(\Omega).
\end{equation*}
Testing \eqref{eq:thm:nonlinearpde} with $u_{i,M}^{2q-1}$ yields
\begin{equation*}
\begin{split}
\gamma & (u_i, u_{i,M}^{2q-1})_{L^2(\Omega)}\\ & = (p, u_{i,M}^{2q-1})_{L^2(\Omega)} - (2q-1) \int_{\{-M< u_i < M\}} \gamma u_{i}^{2q-2} |\nabla u_i|^2 + \beta u_{i}^{2q-2} \frac{|\nabla u_i|^2}{\sqrt{\delta+|\nabla u_i|^2}}\ensuremath {\,\mathrm{d}x} \\
& \leq \lVert p \rVert_{L^\infty(\Omega)} \lVert u_{i,M}^{2q-1} \rVert_{L^1(\Omega)}
\leq b^0 \lVert 1 \rVert_{L^{2q}(\Omega)} \lVert u_{i,M}^{2q-1} \rVert_{L^{\frac{2q}{2q-1}}(\Omega)}
= b^0 |\Omega|^{\frac{1}{2q}} \lVert u_{i,M} \rVert_{L^{2q}(\Omega)}^{2q-1}.
\end{split}
\end{equation*}
In combination with
\begin{equation*}
\begin{split}
& \gamma (u_i, u_{i,M}^{2q-1})_{L^2(\Omega)}\\ & = \gamma \left( \int_{\{ -M < u_i < M \}} u_{i}^{2q} \ensuremath {\,\mathrm{d}x} + \int_{\{ u_i \leq -M \}} u_i (-M)^{2q-1} \ensuremath {\,\mathrm{d}x} + \int_{\{ M \leq u_i \}} u_i M^{2q-1} \ensuremath {\,\mathrm{d}x} \right) \\
& \geq \gamma \left( \lVert u_i \rVert_{L^{2q}(\{ -M < u_i < M \})}^{2q} + \int_{ \{ u_i \leq -M \} } (-M)^{2q} \ensuremath {\,\mathrm{d}x} + \int_{\{ M \leq u_i \}} M^{2q} \ensuremath {\,\mathrm{d}x} \right) \geq \gamma_0 \lVert u_{i,M} \rVert_{L^{2q}(\Omega)}^{2q}
\end{split}
\end{equation*}
this yields
$\gamma_0 \lVert u_{i,M} \rVert_{L^{2q}(\Omega)} \leq b^0 |\Omega|^{\frac{1}{2q}}$. Sending $q\to\infty$ gives $\lVert u_{i,M} \rVert_{L^\infty(\Omega)} \leq \gamma_0^{-1} b^0$. As $M>\gamma_0^{-1} b^0$ by assumption we conclude that
\begin{equation} \label{eq:proof:uiLinfty}
\lVert u_i \rVert_{L^\infty(\Omega)} \leq \gamma_0^{-1} b^0 \quad\text{ for }i=1,2.
\end{equation}
\textbf{Part 3: Obtaining $\mathbf{C^{1,\alpha}}$ regularity of $\mathbf{u_1,u_2}$}\\
We apply Theorem~\ref{thm:lieberalpha} with $m = 0$, $A(x,u,\eta) = \gamma\eta + {\beta\eta}/{\sqrt{\delta+\lvert\eta\rvert^2}}$, $B(x,u,\eta)=p_i(x)$ for $i=1,2$, $\kappa=0$, identical values for $\alpha^\prime$, $\lambda =\gamma_0$, $\Lambda =\max\{b^0,\gamma^0 N + \delta_0^{-1/2}\beta (N+N^2)\}$ and $M = \gamma_0^{-1} b^0$, cf. \eqref{eq:proof:uiLinfty}. Since $A$ is independent of $(x,u)$ and continuously differentiable, it is easy to see that the requirements of Theorem~\ref{thm:lieberalpha} are met.
This shows $u_1,u_2\in C^{1,\alpha}({\Omega})$ for some $\alpha>0$ and yields
\begin{align} \label{eq:proof:uiC1alpha}
\lVert u_i \rVert_{C^{1,\alpha}(\Omega)} \leq C,
\end{align}
where $C>0$ and $\alpha\in (0,1)$ depend only on $\alpha^\prime$, $\Omega$, $N$, $\Lambda / \lambda = \gamma_0^{-1} \Lambda$ and $M = \gamma_0^{-1} b^0$.\\
\textbf{Part 4: Lipschitz continuity of $\mathbf{p\mapsto u(p)}$}\\
Taking the difference of the weak formulations supplies
\begin{equation} \label{eq:proof:localLSofT0}
\int_\Omega \nabla \varphi^T \Biggl( \gamma \nabla \tilde u + \beta \frac{\nabla u_1}{\sqrt{\delta + \lvert\nabla u_1\rvert^2}} - \beta \frac{\nabla u_2}{\sqrt{\delta + \lvert\nabla u_2\rvert^2}} \Biggr) + \gamma \varphi \tilde u \ensuremath {\,\mathrm{d}x} = \int_{\Omega}\varphi\tilde p\ensuremath {\,\mathrm{d}x}\;\quad\forall\varphi\in H^1(\Omega),
\end{equation}
where we abbreviated $\tilde u:= u_1-u_2$ and $\tilde p:= p_1-p_2$.
The function $H: \mathds{R}^N \rightarrow \mathds{R}$ given by $H(v) := \sqrt{\delta + \lvert v\rvert^2}$ is convex.
Let $t\in [0,1]$ and denote by $u^\tau:\Omega\rightarrow\mathds{R}$ the $C^{1,\alpha}(\Omega)$ function $u^\tau(x) := u_2(x) + \tau \tilde u(x)$.
For every $x\in\Omega$ it holds that
\begin{equation*}
\begin{split}
\frac{\nabla u_1(x)}{\sqrt{\delta + \lvert\nabla u_1(x)\rvert^2}} - \frac{\nabla u_2(x)}{\sqrt{\delta + \lvert\nabla u_2(x)\rvert^2}}
& = \nabla H\bigl(\nabla u_1(x)\bigr) - \nabla H\bigl(\nabla u_2(x)\bigr)\\
& = \int_0^1 \nabla^2 H(\nabla u^\tau(x)) \ensuremath {\,\mathrm{d}\tau}\;\nabla \tilde u(x),
\end{split}
\end{equation*}
where the integral is understood componentwise.
Together with \eqref{eq:proof:localLSofT0} we infer that $\tilde u$ satisfies
\begin{equation*}
\left\{
\begin{aligned}
-\divg\Bigl(\tilde A \nabla \tilde u \Bigr) +\gamma \tilde u & = \tilde p &&\text{ in }\Omega,\\
\partial_{\nu_{\tilde A}} \tilde u & = 0 &&\text{ on }\Gamma,
\end{aligned}
\right.
\end{equation*}
where $\tilde A:\Omega\rightarrow\mathds{R}^{N\times N}$ is given by
\begin{equation*}
\tilde A(x):=\gamma I+ \beta \int_0^1 \nabla^2 H(\nabla u^\tau(x)) \ensuremath {\,\mathrm{d}\tau}.
\end{equation*}
In order to apply Theorem~\ref{thm:linearregularity} to this PDE, we show $\tilde A \in C^{0,\alpha}(\Omega,\mathds{R}^{N\times N})$.
The convexity of $H$ implies that $\nabla^2 H$ is positive semi-definite.
Thus we find for any $v\in \mathds{R}^N$ and any $x\in\Omega$
\begin{equation*}
v^T \tilde A(x) v \geq \gamma |v|^2 \geq \gamma_0 |v|^2.
\end{equation*}
For $x\in\Omega$ and $1\leq i,j\leq N$ it holds that
\begin{equation*}
\lvert \tilde A_{ij}(x)\rvert\leq \gamma + \beta \int_0^1 \left\lvert \bigl[\nabla^2 H(\nabla u^\tau(x))\bigr]_{ij}\right\rvert\ensuremath {\,\mathrm{d}\tau}
\leq \gamma^0 + \beta \sup_{\tau\in[0,1]} \left\lVert\bigl[\nabla^2 H(\nabla u^\tau)\bigr]_{ij}\right\rVert_{L^\infty(\Omega)}.
\end{equation*}
We also have for all $x,y\in\Omega$
\begin{equation*}
\begin{split}
\left\lvert \tilde A_{ij}(x)-\tilde A_{ij}(y)\right\rvert
& \leq \beta \int_0^1 \left\lvert \Bigl[\nabla^2 H(\nabla u^\tau(x))-\nabla^2 H(\nabla u^\tau(y))\Bigr]_{ij}\right\rvert\ensuremath {\,\mathrm{d}\tau}\\
& \leq \beta\sup_{\tau\in[0,1]} \left\lvert \Bigl[\nabla^2 H(\nabla u^\tau(x))-\nabla^2 H(\nabla u^\tau(y))\Bigr]_{ij}\right\rvert\\
& \leq \beta \sup_{\tau\in[0,1]} \left\lvert \Bigl[\nabla^2 H(\nabla u^\tau)\Bigr]_{ij} \right\rvert_{C^{0,\alpha}(\Omega)} \left\lvert x-y\right\rvert^\alpha,
\end{split}
\end{equation*}
which shows $\lvert \tilde A_{ij}\rvert_{C^{0,\alpha}(\Omega)}\leq
\beta \sup_{\tau\in[0,1]} \lvert [\nabla^2 H(u^\tau)]_{ij} \rvert_{C^{0,\alpha}(\Omega)}$. Together, we infer that
\begin{equation} \label{eq:proof:LScontinuityC1alpha}
\bigl\lVert \tilde A_{ij}\bigr\rVert_{C^{0,\alpha}(\Omega)}
\leq
\gamma^0 + 2\beta\sup_{\tau\in[0,1]}\left\lVert \bigl[\nabla^2 H(\nabla u^\tau)\bigr]_{ij} \right\rVert_{C^{0,\alpha}(\Omega)}
\end{equation}
for all $1\leq i,j\leq N$. From Lemma~\ref{lem_hoeldercompositions} we obtain for every fixed $1\leq i,j\leq N$
\begin{equation*}
\begin{split}
& \left\lVert \bigl[\nabla^2 H(\nabla u^\tau)\bigr]_{ij} \right\rVert_{C^{0,\alpha}(\Omega)}
\leq \left\lVert \frac{1}{\sqrt{\delta + \lvert \nabla u^\tau\rvert^2}} \right\rVert_{C^{0,\alpha}(\Omega)} + \enspace \left\lVert \frac{\partial_{x_i} u^\tau \partial_{x_j} u^\tau}{\sqrt{ \delta + \lvert \nabla u^\tau\rvert^2 }^3} \right\rVert_{C^{0,\alpha}(\Omega)}\\
& \hspace{1cm} \leq C \left( 1 + \Bigl\lVert \sqrt{ \delta + \lvert \nabla u^\tau\rvert^2 } \,\Bigr\rVert_{C^{0,\alpha}(\Omega)} + \bigl\lVert \nabla u^\tau \bigr\rVert_{C^{0,\alpha}(\Omega)}^2 \Bigl\lVert \left(\delta + \lvert \nabla u^\tau\rvert^2\right)^{-\frac32} \Bigr\rVert_{C^{0,\alpha}(\Omega)} \right),
\end{split}
\end{equation*}
where $C$ only depends on $\delta_0$.
Since $\lVert \nabla u_1 \rVert_{C^{0,\alpha}(\Omega)}, \lVert \nabla u_2 \rVert_{C^{0,\alpha}(\Omega)} \leq C$ by \eqref{eq:proof:uiC1alpha}, there holds $\lVert \nabla u^\tau \rVert_{C^{0,\alpha}(\Omega)}\leq C$ with the same $C>0$. This $C$ only depends on $\alpha^\prime$, $\Omega$, $N$, $\beta$, $b^0$, $\gamma_0$, $\gamma^0$ and $\delta_0$.
This and Lemma~\ref{lem_hoeldercompositions} show
\begin{equation*}
\begin{split}
\Bigl\lVert \bigl[\nabla^2 H(\nabla u^\tau)\bigr]_{ij} \Bigr\rVert_{C^{0,\alpha}(\Omega)}
& \leq C \Bigl( 1 + \Bigl\lVert \sqrt{ \delta + \lvert \nabla u^\tau\rvert^2 } \,\Bigr\rVert_{C^{0,\alpha}(\Omega)} + \Bigl\lVert \sqrt{ \delta + \lvert \nabla u^\tau\rvert^2 } \,\Bigr\rVert_{C^{0,\alpha}(\Omega)}^3 \Bigr)\\
& \leq C \Bigl( 1 + \Bigl(\sqrt{\delta}+\left\lVert \nabla u^\tau \right\rVert_{C^{0,\alpha}(\Omega)} \Bigr)^3\Bigr)
\leq C,
\end{split}
\end{equation*}
where $C>0$ is independent of $\tau$ and only depends on the quantities stated in the theorem. Hence, with the same $C$ there holds
\begin{equation*}
\sup_{\tau\in[0,1]}
\left\lVert\bigl[\nabla^2 H(\nabla u^\tau)\bigr]_{ij} \right\rVert_{C^{0,\alpha}(\Omega)} \leq C
\qquad\forall \, 1\leq i,j\leq N.
\end{equation*}
Inserting this into
\eqref{eq:proof:LScontinuityC1alpha} yields $\tilde A \in C^{0,\alpha}(\Omega,\mathds{R}^{N\times N})$ with $\lVert \tilde A \rVert_{C^{0,\alpha}(\Omega)} \leq \gamma^0+2\beta C$,
This implies that Theorem~\ref{thm:linearregularity} is applicable,
which yields $\lVert \tilde u \rVert_{C^{1,\alpha}(\Omega)} \leq C \lVert \tilde p \rVert_{L^\infty(\Omega)}$
with a constant $C$ that only depends on the claimed quantities. This proves the asserted Lipschitz continuity. \qed
\end{proof}
\section{The original problem: Optimality conditions}\label{sec_optcondorigprob}
The first order optimality conditions of \eqref{eq:redProblem} can be obtained by use of \cite{holler}.
The space $W_0^q(\divg;\Omega)$, $q\in[1,\infty)$, that appears in the following is defined in \cite[Definition~10]{holler}.
\begin{theorem}\label{thm:firstorderoptcondoriginalproblem}
Let $\Omega\subset\mathds{R}^N$, $N\in\{1,2,3\}$, be a bounded Lipschitz domain and let $r_N=\frac{N}{N-1}$ if $N>1$, respectively, $r_N\in[1,\infty)$ if $N=1$. Then we have:
The function $\bar u\in \ensuremath {\text{BV}(\Omega)} $ is the solution of \eqref{eq:redProblem} iff there is
\begin{equation*}
\bar h \in L^\infty(\Omega,\mathds{R}^N)\cap W_0^{r_N}(\divg;\Omega)
\end{equation*}
that satisfies $\lVert\lvert\bar h\rvert\rVert_{L^\infty(\Omega)} \leq \beta$ and $\operatorname{div} \bar h= \bar p$, where $\bar p$ is defined as in section~\ref{sec:reducedproblem}, as well as
\begin{equation*}
\begin{aligned}
\bar h & = \beta \frac{\nabla \bar u_a}{|\nabla \bar u_a|} && ~~~~\mathcal{L}^N \text{-a.e. in } \Omega \setminus \left\{ x: \nabla \bar u_a(x) = 0 \right\}, \\
T\bar h & = \beta \frac{\bar u^+(x) - \bar u^-(x)}{|\bar u^+(x) - \bar u^-(x)|} \nu_{\bar u} && ~~~~ \mathcal{H}^1 \text{-a.e. in } J_{\bar u},\\
T\bar h & = \beta \sigma_{C_{\bar u}} && ~~~~ |\nabla \bar u_c| \text{-a.e.}
\end{aligned}
\end{equation*}
Here, the first, second and third equation correspond to the absolutely continuous part, the jump part, respectively, the
Cantor part of the vector measure $\nabla \bar u$.
Also, $\sigma_{C_{\bar u}}$ is the Radon-Nikodym density of $\nabla \bar u_c$ with respect to $|\nabla \bar u_c|$,
cf. e.g. \cite[Theorem 9.1]{brokatemassengl}. Moreover, $\nu_{\bar u}$ is the jump direction of $\bar u$ and $J_{\bar u}$ denotes the discontinuity set of $\bar u$ in the sense of \cite[Definition 3.63]{ambrosio}. Further, $\mathcal{H}^1$ is the Hausdorff measure of $J_{\bar u}$, cf. e.g. \cite[Chapter~4]{Attouch}.
The operator $T \colon \operatorname{dom}(T) \subset W^{\operatorname{div}, q}(\Omega) \cap L^\infty(\Omega,\mathds{R}^N) \rightarrow L^1(\Omega,\mathds{R}^N, |\nabla u|)$ is called the \emph{full trace operator} and is introduced in \cite[Definition 12]{holler}. We emphasize that $\bar h\in \operatorname{dom}(T)$.
\end{theorem}
\begin{proof}
The well-known optimality condition $0\in\partial j(\bar u)$ from convex analysis can be expressed as
$-\frac{\bar p}{\beta} \in \partial\lvert \bar u\rvert_{\ensuremath {\text{BV}(\Omega)} }$, so the claim follows from \cite[Proposition~8]{holler}. \qed
\end{proof}
\begin{remark} \label{rem_sparsityg}
Theorem~\ref{thm:firstorderoptcondoriginalproblem}
implies the sparsity relation
$\{x: \nabla \bar u_a(x)\neq 0\}\subset \{x: |\bar h(x)|=\beta\}$.
Since $\{x: |\bar h(x)|=\beta\}$ typically has small Lebesgue measure (often: measure $0$), $\bar u$ is usually constant a.e. in large parts (often: all) of $\Omega$; cf. also the example in section~\ref{sec_examplewithexplicitsolution}.
\end{remark}
\section{An example with explicit solution}\label{sec_examplewithexplicitsolution}
Using rotational symmetry we construct an example for \eqref{eq:ocintro} for $N=2$ with an explicit solution.
We let $ \ensuremath{{\cal{A}}} =-\Delta$ and $c_0\equiv 0$ in the governing PDE.
We define
$\hat h: [0,\infty) \rightarrow \mathds{R}$, $\hat h(r):=\frac{\beta}{2} (\cos(\frac{2\pi}{R}r)-1)$ and $\Omega := B_{2R}(0) \setminus \overline{B_R(0)}$, where the parameters $R>0$ and $\beta>0$ are arbitrary.
We introduce the functions
\begin{equation*}
r(x,y):=\sqrt{x^2+y^2},\qquad
\bar h(x,y):=\hat h(r(x,y))\nabla r(x,y)
\quad\text{ and }\quad
\bar u(x,y):= 1_{(R,\frac{3R}{2})}(r(x,y)),
\end{equation*}
all of which are defined on $\Omega$. The problem data is given by
\begin{equation*}
\bar p := \operatorname{div} \bar h,
\qquad
\bar y := S\bar u
\qquad\text{ and }\qquad
y_\Omega := \Delta\bar p + \bar y.
\end{equation*}
We now show that these quantities satisfy the properties of Theorem~\ref{thm:firstorderoptcondoriginalproblem}.
By construction $\bar y$ and $\bar p$ are the state and adjoint state associated to $\bar u$ and we have $\bar p = \operatorname{div} \bar h$. We check the properties of $\bar h$. Since $\lvert\nabla r\rvert = 1$ for $(x,y)\in\Omega$,
we obtain $|\bar h(x,y)| = |\hat h(r(x,y))| \leq \frac{\beta}{2} 2 = \beta$. We also see that $\bar h$ is $C^1$ in $\bar\Omega$ and satisfies $\bar h = 0$ on $\partial\Omega$ so that $\bar h \in L^\infty(\Omega,\mathds{R}^N)\cap W_0^q(\divg;\Omega)$ for any $q\in[1,\infty)$.
As $\nabla \bar u(x,y) = -\nabla r(x,y) \mathcal{H}^1_{\partial B_{\frac{3R}{2}}(0)}(x,y)$, we find that $\nabla \bar u$ has no Cantor part and no parts that are absolutely continuous with respect to the Lebesgue measure. Thus, the first and third condition on $\bar h$ in Theorem~\ref{thm:firstorderoptcondoriginalproblem} are trivially satisfied. For $(x,y) \in \partial B_{\frac{3R}{2}}(0)=J_{\bar u}$ we have $\bar h(x,y) =-\beta\nabla r(x,y)=-\beta \nu_{\bar u}$ and $\bar u^+(x)=0$, $\bar u^-(x)=1$ for $x\in J_{\bar u}$, hence the second condition on $\bar h$ in Theorem~\ref{thm:firstorderoptcondoriginalproblem} holds.
Let us confirm that $\bar p$ satisfies the homogeneous Dirichlet boundary conditions. From $\Delta r = r^{-1}$ and $|\nabla r|^2 = 1$ we obtain
\begin{equation*}
\bar p = \operatorname{div} \bar h = \nabla \hat h(r)^T\nabla r + \hat h(r) \Delta r = \hat h^\prime(r) \lvert\nabla r\rvert^2 + r^{-1} \hat h(r) = \hat h^\prime(r) + r^{-1} \hat h(r).
\end{equation*}
Thus, $\bar p$ satisfies the boundary conditions.
Let us confirm that $\bar y$ satisfies the boundary conditions.
The ansatz $\bar y(x,y) = \hat y(r(x,y))$,
with $\hat y:\Omega\rightarrow\mathds{R}$ to be determined,
yields
\begin{equation*}
- 1_{(R, 3R/2)}(r) = - \bar u(x,y) = \Delta \bar y(x,y) = \operatorname{div} (\hat y^\prime(r) \nabla r) = \hat y^{\prime\prime}(r) + r^{-1} \hat y^\prime(r).
\end{equation*}
This leads to
\begin{equation*}
\hat y(r) = \begin{cases}
- \frac{r^2}{4} + A\ln(r/(2R)) + B & \text{ if } r\in (R,3R/2),\\
C \ln (r/(2R)) & \text{ if } r\in (3R/2,2R),
\end{cases}
\end{equation*}
and it is straightforward to check that
$\bar y$ satisfies the boundary conditions and is continuously differentiable for the parameters
\begin{equation*}
A
= \frac{R^2}{8} \cdot \frac{18\ln(3/4)-5}{\ln(1/4)},
\qquad
B
= \frac{9R^2}{8} \left( \frac12 - \ln(3/4) \right)
\qquad\text{and}\qquad
C = \frac{R^2}{8} \cdot\frac{18\ln(3/2) - 5}{\ln(1/4)}.
\end{equation*}
All in all, the optimality conditions of Theorem~\ref{thm:firstorderoptcondoriginalproblem} are satisfied.
Moreover, the optimal value in this example is given by
\begin{equation*}
j(\bar u)=\frac12\lVert \bar y - y_\Omega\rVert_{\ensuremath {L^2(\Omega)} }^2 + \beta\lvert\bar u\rvert_{\ensuremath {\text{BV}(\Omega)} }
= \frac12\lVert \Delta\bar p \rVert_{\ensuremath {L^2(\Omega)} }^2 + \beta\lvert\bar u\rvert_{\ensuremath {\text{BV}(\Omega)} },
\end{equation*}
which for $R=2\pi$ results in
\begin{equation*}
j(\bar u) = \frac{\beta^2 \pi}{4} \left( 3 \pi^2 + \ln(8) + \frac{15}{4} \ensuremath{ \operatorname{Ci} } (2\pi) - \frac{27}{4} \ensuremath{ \operatorname{Ci} } (4\pi) + 3 \ensuremath{ \operatorname{Ci} } (8\pi) \right) + 6 \pi^2 \beta
\approx 24.85 \beta^2 + 59.22 \beta
\end{equation*}
with $\ensuremath{ \operatorname{Ci} } (t):=-\int_t^\infty \frac{\cos\tau}{\tau}\ensuremath {\,\mathrm{d}\tau}$.
\end{document} |
\begin{document}
\author{Michel J.\,G. WEBER}
\address{IRMA, UMR 7501, Universit\'e
Louis-Pasteur et C.N.R.S., 7 rue Ren\'e Descartes, 67084
Strasbourg Cedex, France.
E-mail: {\tt [email protected]}}
\keywords{Local limit theorem, asymptotic uniform distribution, Rozanov's Theorem, divisors, Bernoulli random variables, i.i.d. sums, Theta functions. \vskip 1pt 2010 \emph{Mathematics Subject Classification}: {Primary: 60F15, 60G50 ;
Secondary: 60F05}.}
\begin{abstract} For sums $S_n=\sum_{k=1}^n X_k$, $n\ge 1$ of independent random variables $ X_k $ taking values in
${\mathbb Z}$
we prove, as a consequence of a more general result, that
if (i) For some function $1\le \phi(t)\uparrow \infty $ as $t\to \infty$, and some constant $C$, we have for all $n$ and $\nu\in {\mathbb Z}$,
\begin{equation*}\label{abstract1}
\big|B_n{\mathbb P}\big\{ S_n=\nu\big\}- {1\over \sqrt{ 2\pi } }\ e^{-
{(\nu-M_n)^2\over 2 B_n^2} }\big|\,\le \, {C\over \,\phi(B_n)},
\end{equation*}
then (ii) There exists a numerical constant $C_1$, such that for all $n $ such that $B_n\ge 6$, all $h\ge 2$,
and $\m=0,1,\ldots, h-1$,
\begin{align*}\label{abstract1}
\Big|{\mathbb P}\big\{ S_n\equiv\, \m\ \hbox{\rm{ (mod $h$)}}\big\}- \frac{1}{h}\Big|
\le {1\over \sqrt{2\pi}\, B_n }+\frac{1+ 2 {C}/{h}
}{ \phi(B_n)^{2/3} }
+ C_1 \,e^{-(1/ 16 )\phi(B_n)^{2/3}}.
\end{align*}
Assumption (i) holds if a local limit theorem in the usual form is applicable, and (ii) yields a strenghtening of Rozanov's necessary condition.
Assume in place of (i) that $\t_j =\sum_{k\in {\mathbb Z}}{\mathbb P}\{X_j= k\}\wedge{\mathbb P}\{X_j= k+1 \} >0$, for each $j$ and that
$\nu_n =\sum_{j=1}^n \t_j\uparrow \infty$.
We prove strenghtened forms of the asymptotic uniform distribution property. (iii) Let $\a\!>\!\a'\!>\!0$, $0\!<\!\e\!<\!1$. Then for each $n$ such that
$$|x|\le\frac12 \big( \frac{ 2\a\log (1-\epsilon)\nu_n}{ (1-\epsilon)\nu_n }\big)^{1/2}\qq {\mathbb R}ightarrow \qq{\sin x\over
x}\ge (\a^\prime/\a)^{1/2},$$
we have
\begin{eqnarray*} \sup_{u\ge 0}\,\sup_{d< \pi
( {(1-\epsilon)\nu_n \over 2\a\log (1-\epsilon)\nu_n})^{1/2}
} \ \big| {\mathbb P} \{d|S_n+u \} - {1\over d} \big|
\,\le \,2 \,e^{- \frac{\epsilon^2 }{2}\nu_n}+
\,\big( (1-\epsilon)\nu_n\big)^{-\a'} .
\end{eqnarray*}
(iv) Let $0<\rho<1 $ and $0<\e<1$. The sharper uniform bound $2 e^{- \frac{\epsilon^2 }{2}\nu_n}+e^{- ( (1-\epsilon)\nu_n)^\rho}$ is also proved (for a corresponding $d$-region of divisors), for each $n$ such that $$|x|\le\frac12 \,\big( \frac{ 2 }{ ((1-\epsilon)\nu_n)^{1-\rho} }\big)^{1/2}\qq {\mathbb R}ightarrow \qq{\sin x\over
x}\ge \sqrt{1-\e}.$$ \end{abstract}
\maketitle
\section{\bf Local limit theorem and asymptotic uniform distribution.}\label{s1}
Let $X=\{X_i , i\ge 1\}$ be a sequence of independent variables taking values in ${\mathbb Z}$,
and let $S_n=\sum_{k=1}^n X_k$, for each $n$.
\vskip 3 pt The sequence $X$
is said to be {asymptotically uniformly distributed with respect
to lattices of span $d$}, in short a.u.d.($d$), if for $m = 0,1,\ldots,d-1$, we
have
\begin{equation} \label{aud1}\lim_{n\to \infty}\ {\mathbb P}\{S_n \equiv m \,{\rm (mod)}\,d\}=\frac1d.
\end{equation}
Equivalenty for $m = 0,1,\ldots,d-1$, we
have
\begin{equation}\label{uad.lim1}\lim_{n\to \infty}\ {\mathbb P}\{d|S_n-m\}=\frac1d.
\end{equation} The sequence $X$ is {asymptotically uniformly distributed}, in short a.u.d., if \eqref{aud1} holds true for any $d\ge 2$ and $m = 0,1,\ldots,d-1$.
\vskip 5 pt
\vskip 3 pt
Dvoretsky and Wolfowitz \cite{DW} proved the following characterization. Assume that $X$ is composed with independent random variables taking only the values
$$ 0, 1,\ldots, h-1.$$
In order that the partial sums $\{S_n, n\ge 1\}$ be a.u.d.($h$), it is necessary and sufficient that
\begin{equation} \label{aud.dw.ns} \prod_{k=1}^\infty\bigg( \sum_{m=0}^{h-1}{\mathbb P}\{X_k=m\}\,e^{\frac{2i\pi }{h}rm}\bigg) \,=\, 0, \qq \quad (r=1,\ldots, h-1).
\end{equation}
Equivalently,
\begin{equation} \label{aud.dw.ns.} \prod_{k=1}^\infty\big({\mathbb E \,} e^{\frac{2i\pi }{h}rX_k}\big) \,=\, \lim_{N\to \infty} \big({\mathbb E \,} e^{\frac{2i\pi }{h}rS_N}\big) \,=\, 0, \qq \quad (r=1,\ldots, h-1).
\end{equation}
This notion plays an important role in the study of the local limit theorem. Let us assume that the random variables $X_k$ take values in a common lattice $\mathcal L(v_{0},D )$, namely defined by the
sequence $v_{ k}=v_{ 0}+D k$, $k\in {\mathbb Z}$, $v_{0} $ and $D >0$ being reals, and are square integrable,
and let
\begin{equation} \label{MnBn}M_n= {\mathbb E\,} S_n , \qq B_n^2={\rm Var}(S_n)\to \infty.
\end{equation}
\vskip 20pt
We say that the local limit theorem (in the usual form) is applicable to $X$ if
\begin{equation}\label{def.llt.indep} \sup_{N=v_0n+Dk }\Big|B_n\, {\mathbb P}\{S_n=N\}-{D\over \sqrt{ 2\pi } }e^{-
{(N-M_n)^2\over 2 B_n^2} }\Big| = o(1), \qq \quad n\to\infty.
\end{equation}
When the random variables $X_i$ are identically distributed, \eqref{def.llt.indep} reduces to \begin{equation}\label{llt.iid} \sup_{N=v_0n+Dk }\Big| \s \sqrt{n}\, {\mathbb P}\{S_n=N\}-{D\over \sqrt{ 2\pi } }e^{-
{(N-n\m)^2\over 2 n\s^2} }\Big| = o(1),
\end{equation}
where $\m={\mathbb E\,} X_1$, $\s^2={\rm Var}(
X_1)$. By Gnedenko's Theorem \cite{G}, see also \cite{P}, p.\,187, \cite{SW}, Th.\,1.4, \eqref{llt.iid} holds
if and only if the span $D$ is maximal (there are no other real numbers
$v'_{0}
$ and
$D' >D$ for which
${\mathbb P}\{X
\in\mathcal L(v'_0,D')\}=1$).
Note that the transformation
\begin{equation}\label{llt.transf.}
X'_j= \frac{X_j-v_0}{D},
\end{equation}
allows one to reduce to the case $v_0=0$, $D=1$.
\begin{remark}Note that the series (in $k$)
\begin{equation}\label{def.llt.indep.sum} \sum_{N=v_0n+Dk } \Big( {\mathbb P}\{S_n=N\}-{D\over \sqrt{ 2\pi } B_n}e^{-
{(N-M_n)^2\over 2 B_n^2} } \Big),
\end{equation}
is obviously convergent, whereas
nothing can be deduced concerning its order from the very definition of the local limit theorem. Further by using Poisson summation formula
the series associated to the second summand verifies
\begin{equation}
\label{def.llt.indep.poisson}
\sum_{N=v_0n+Dk } {D\over \sqrt{ 2\pi } B_n}e^{-
{(N-M_n)^2\over 2 B_n^2} }\,=\,\sum_{\ell \in{\mathbb Z}} e^{2i\pi \ell \{\frac{v_0n-M_n}{D}\}-\frac{2\pi^2\ell^2 B_n^2}{D^2}},
\end{equation}
and so is
$ 1+\mathcal O(D/B_n)$, whereas the one associated to the first is 1. Therefore
\begin{equation}\label{def.llt.indep.sum.} \sum_{N=v_0n+Dk }
\Big( {\mathbb P}\{S_n=N\}-{D\over \sqrt{ 2\pi } B_n}e^{-
{(N-M_n)^2\over 2 B_n^2} }\Big)\,=\, \mathcal O(D/B_n).
\end{equation}
\end{remark}
When a strong local limit theorem with convergence in variation holds we have the more informative result
\begin{equation} \label{sllt1}
\lim_{n\to\infty}
\sum_{N=v_0n+Dk }\Big| {\mathbb P}\{S_n=N\}-{D\over \sqrt{ 2\pi }B_n }e^{-
{(N-M_n)^2\over 2 B_n^2} }\Big| =0.
\end{equation}
\vskip 20pt The following result is well-known.
\begin{theorem}[Rozanov] \label{l1} Let $X=\{X_i , i\ge 1\}$ be a sequence of independent variables taking values in ${\mathbb Z}$,
and let $S_n=\sum_{k=1}^n X_k$, for each $n$. The local limit theorem is applicable to $X$ only if $X$ satisfies the a.u.d. property.
\end{theorem}
\begin{remark} In Petrov \cite{P}, Lemma 1,\,p.\,194, also in Rozanov's \cite{Ro} Lemma 1,\,p.\,261, Theorem \ref{l1} is stated under the assumption that a local limit theorem in the strong form holds, which is not necessary.
\end{remark}
We will in fact prove the following stronger result providing an explicit link between the local limit theorem and the a.u.d. property, through a quantitative estimate of the difference ${\mathbb P} \{ S_n\equiv\, m\! \hbox{\rm{ (mod $h$)}} \}- {1}/{h}$.
\begin{theorem} \label{l1a} Let $X=\{X_i , i\ge 1\}$ be a sequence of independent variables taking values in ${\mathbb Z}$,
and let $S_n=\sum_{k=1}^n X_k$, for each $n$. Assume that
for some function $1\le \phi(t)\uparrow \infty $ as $t\to \infty$, and some constant $C$, we have for all $n$
\begin{equation}\label{phi.cond}
\sup_{m\in {\mathbb Z}}\Big|B_n{\mathbb P}\big\{ S_n=m\big\}- {1\over \sqrt{ 2\pi } }\ e^{-
{(m-M_n)^2\over 2 B_n^2} }\Big|\,\le \, {C\over \,\phi(B_n)}.
\end{equation}
\vskip 3 pt \noindent Then there exists a numerical constant $C_1$, such that for all $0<\e \le 1$, all $n $ such that $B_n\ge 6$, and all $h\ge 2$,
\begin{align*}
\sup_{\m=0,1,\ldots, h-1} \,&\Big|{\mathbb P}\big\{ S_n\equiv\, \m\ \hbox{\rm{ (mod $h$)}}\big\}- \frac{1}{h}\Big|
\cr &\le {1\over \sqrt{2\pi}\, B_n }+\frac{2C}{h\,\sqrt{\e}\,\phi(B_n)}
+ {\mathbb P}\Big\{ \frac{|S_n -M_n |}{B_n}> \frac{1}{\sqrt \e}\Big\}+C_1 \,e^{-1/(16\e)}.
\end{align*}
\end{theorem}
\vskip 8 pt
\begin{remark} \label{rem.thl1a} It follows from the proof that $C_1=2e\sqrt{\pi}$ is suitable.
\end{remark}
Choosing $\e= \phi(B_n)^{-2/3}$ and using Tchebycheff's inequality, we get the following
\begin{corollary}\label{cor}For all $n $ such that $B_n\ge 6$, and all $h\ge 2$,
we have
\begin{align}\label{eps.phi}
\sup_{\m=0,1,\ldots, h-1} \, \Big|{\mathbb P}\big\{ S_n\equiv\, \m\ \hbox{\rm{ (mod $h$)}}\big\}- \frac{1}{h}\Big|
\le H_n ,
\end{align}
with
\begin{align}\label{eps.phi.Hn}
H_n= {1\over \sqrt{2\pi}\, B_n }+\frac{1+ 2 {C}/{h}
}{ \phi(B_n)^{2/3} }
+ C_1 \,e^{-(1/ 16 )\phi(B_n)^{2/3}}.
\end{align}
\end{corollary}
Theorem \ref{l1a} contains Theorem \ref{l1}, since by definition such a function $\phi$ exists if the local limit theorem is applicable to $X$. Further condition \eqref{phi.cond} implies that the local limit theorem is applicable to $X$.
\begin{remark} Examples of LLT's with speed of convergence are given in Appendix.
\end{remark}
\begin{proof}
By assumption,
\begin{equation*}
\Big|B_n{\mathbb P}\big\{ S_n=m\big\}- {1\over \sqrt{ 2\pi } }\ e^{-
{(m-M_n)^2\over 2 B_n^2} }\Big|\,\le \, {C\over \phi(B_n) },
\end{equation*}
for all $m$ and $n$. Let $\e>0$.
We have
\begin{eqnarray*}
\Big|{\mathbb P}\big\{ S_n\equiv\, m\ \hbox{\rm (mod $h$)}\big\}- \sum_{|k-M_n|\le B_n/\sqrt \e
\atop k\equiv m\, (h)} {\mathbb P}\big\{S_n=k\}\Big|&\le &
{\mathbb P}\Big\{ \frac{|S_n -M_n |}{B_n}> \frac{1}{\sqrt \e}\Big\}
,
\end{eqnarray*}
\begin{align*}
\Big| \sum_{|k-M_n|\le B_n/\sqrt \e
\atop k\equiv m\, (h)} {\mathbb P}\big\{S_n=k\}- & {1\over \sqrt{ 2\pi }B_n } \sum_{|k-M_n|\le B_n/\sqrt \e
\atop k \equiv m\, (h)} e^{-
{(k-M_n)^2\over 2 B^2_n} } \Big| \cr &\le \, {C\over B_n\phi(B_n) }\,\sum_{|k-M_n|\le B_n/\sqrt \e
\atop k\equiv m\, (h)}1
\ \le \, \frac{2C}{h\,\sqrt{\e}\,\phi(B_n)}
.
\end{align*}
Letting $z_n= \lfloor M_n\rfloor$, we have
\begin{eqnarray*} \sum_{k\in{\mathbb Z}\atop|k-M_n|> B_n/\sqrt \e } e^{-
{(k-M_n)^2\over 2 B^2_n} }
&\le & \sum_{Z\in {\mathbb Z} \atop |Z-z_n |> B_n/ \sqrt \e } e^{-
{(Z-z_n)^2\over 2 B^2_n} }.
\end{eqnarray*}
Now using the elementary inequality $(a+b)^2\le 2(a^2+b^2)$ for reals $a$, $b$, we have $|Z-z_n |\le\sqrt 2( |Z |+|z_n|) $ and $|Z-z_n |^2\ge |Z |^2/2-z_n^2$. We can thus continue as follows
\begin{eqnarray*}\,\le\, \sum_{Z\in {\mathbb Z} \atop \sqrt 2( |Z |+|z_n|) > B_n/ \sqrt \e } e^{-
{(Z-z_n)^2\over 2 B^2_n} }
&\le& e^{
{1\over 2 B^2_n} }\,\sum_{ Z\in {\mathbb Z} \atop |Z | > (B_n/ \sqrt{2 \e}) -1} e^{-
{ Z ^2\over 4 B^2_n} }
.\qq \end{eqnarray*}
Assume that $B_n\ge \max( 1/\sqrt 2,4\sqrt{2 \e})$, then ${B_n\over \sqrt{2 \e}}-2\ge {B_n\over 2\sqrt{2 \e}}$.
In particular $|Z|\ge 1$ in the previous series, and so we have the estimates
\begin{eqnarray*}\ \le\ 2\,e^{
{1\over 2 B^2_n} }\,\sum_{ Z > (B_n/ 2\sqrt{2 \e}) +1} e^{-
{ Z ^2\over 4 B^2_n} }&\le & 2\,e
\sum_{ Z > (B_n/ 2\sqrt{2 \e}) +1} \int_{Z-1}^Z e^{-{t^2\over 4 B^2_n}} {\rm d} t
\cr &\le & 2\,e
\int_{B_n/ 2\sqrt{2 \e} }^\infty e^{-{t^2\over 4 B^2_n}} {\rm d} t
\cr(
t= \sqrt 2B_n u)\quad&=& 2 \sqrt{2 }e
B_n\int_{1/4\sqrt{ \e}}^\infty e^{-{u^2\over 2 }} {\rm d} u
\cr &\le & 2\sqrt{2 }e
B_n\sqrt{{\pi\over 2}}\, e^{-1/(16\e)}
\cr &= &
2e\sqrt{\pi}
B_n \, e^{-1/(16\e)},\end{eqnarray*}
since $ e^{x^2/2}\int_x^\infty e^{-t^2/2}{\rm d}t \le \sqrt{{\pi\over 2}}$, for any $x\ge 0$.
Therefore
\begin{eqnarray}\label{est.1}
& &\Big|{\mathbb P}\big\{ S_n\equiv\, m\ \hbox{\rm (mod $h$)}\big\}
- {1\over \sqrt{ 2\pi }B_n } \sum_{ k \equiv m\, (h)} e^{-
{(k-M_n)^2\over 2 B^2_n} }\Big|\cr &\le & {\mathbb P}\Big\{ \frac{|S_n -M_n |}{B_n}> \frac{1}{\sqrt \e}\Big\} + \frac{2C}{h\,\sqrt{\e}\,\phi(B_n)} +C_1 \, e^{-1/(16\e)} ,
\end{eqnarray}
with $C_1=2e\sqrt{\pi}$.
\vskip 5 pt
Recall Poisson summation formula: for $x\in {\mathbb R},\ 0\le
\d\le 1
$,
\begin{equation}\label{poisson}\sum_{\ell\in {\mathbb Z}} e^{-(\ell+\d)^2\pi x^{-1}}=x^{1/2} \sum_{\ell\in {\mathbb Z}} e^{2i\pi \ell\d -\ell^2\pi x}.
\end{equation}
Write $k=m+l h$, $M'_n=M_n-m$,
\begin{equation}{(k-M_n)^2\over 2 B^2_n}={( l h-M'_n)^2\over 2 B^2_n}={( l -\lceil M'_n/h\rceil+\{ M'_n/h\})^2\over 2 B^2_n/h^2}={( \ell +\{ M'_n/h\})^2\over 2 B^2_n/h^2},
\end{equation}
letting $ \ell=l -\lceil M'_n/h\rceil$.
\vskip 3 pt
By applying it with $x=2 B^2_n\pi /h^2$, $\d=\{ M'_n/h\}$, we get
\begin{equation} \sum_{ k \equiv m\, (h)} e^{-
{(k-M_n)^2\over 2 B^2_n} }\,=\, \sum_{\ell \in {\mathbb Z}}e^{-{( \ell -\{ M'_n/h\})^2\over 2 B^2_n/h^2}}\,=\, {\sqrt{2 \pi}B_n\over h}\,\sum_{\ell \in {\mathbb Z}}e^{ -2i\pi \ell \{ M'_n/h\} -2\pi^2B_n^2\ell^2/h^2}.
\end{equation}
Whence
\begin{equation} \Big|{ h\over \sqrt{2 \pi}B_n} \sum_{ k \equiv m\, (h)} e^{-
{(k-M_n)^2\over 2 B^2_n} } -1\Big|\le \sum_{|\ell |\ge 1}e^{ -2\pi^2B_n^2\ell^2/h^2}.
\end{equation}
But for any positive real $a$,
\begin{equation}\label{aux.est1}\sum_{H=1}^\infty e^{-aH^2}\le {\sqrt \pi\over 2 }\min({1\over \sqrt a}, {1\over a}).
\end{equation}
Therefore with $a= 2\pi^2B_n^2/h^2$,
\begin{equation*} \Big|{ h\over \sqrt{2 \pi}B_n} \sum_{ k \equiv m\, (h)}
e^{-{(k-M_n)^2\over 2 B^2_n} } -1\Big|\le {\sqrt \pi }\min({h\over \sqrt{2}\pi B_n }, {h^2\over 2\pi^2B_n^2 })\le {h\over \sqrt{2\pi}\, B_n }.
\end{equation*}
We have thus obtained the explicit bound
\begin{equation}\label{est2} \Big|{ 1\over \sqrt{2 \pi}B_n} \sum_{ k \equiv m\, (h)} e^{-
{(k-M_n)^2\over 2 B^2_n} } -\frac1h\Big| \le {1\over \sqrt{2\pi}\, B_n }.
\end{equation}
By carrying it back to \eqref{est.1}, we get for any $\e>0$, all $n $ such that $B_n\ge \max( 1/\sqrt 2,4\sqrt{2 \e})$, and all $h\ge 2$,
\begin{align}\label{est.3}\sup_{\m=0,1,\ldots, h-1}
\Big|{\mathbb P}\big\{ S_n\equiv\, \m\ \hbox{\rm{ (mod $h$)}}\big\}-\frac{1}{h}\Big|
&\le {1\over \sqrt{2\pi}\, B_n }+ \frac{2C}{h\,\sqrt{\e}\,\phi(B_n)}
\cr &\quad+ {\mathbb P}\Big\{ |S_n -M_n |> {B_n\over \sqrt \e}\Big\}+ C_1\, e^{-1/(16\e)}.
\end{align}
This is fulfilled if we choose $0<\e \le 1$, and $n$ such that $B_n\ge 6$, whence the claimed estimate.
\end{proof}
\section{Local limit theorem in the strong form}\label{s2}
There are easy examples of sequences $X$ for which the fulfilment of the local limit theorem depends on the behavior of the first members of $X$.
Hence it is reasonable to introduce the following definition due to Prohorov \cite{Pr}. A local limit theorem in the {\it strong form}
(or {\it in a strengthened form})
is said to be
applicable to $ X$, if a local limit theorem in the usual form is applicable to any subsequence extracted from $ X$, which differs from
$ X$ only in a finite number of members.
\vskip 3 pt This definition can be made a bit more convenient, see Gamkrelidze \cite{Gam3}.
Let
\begin{equation}S_{k,n}=X_{k+1}+ \ldots +X_{k+n},\qq A_{k,n}={\mathbb E \,} S_{k,n}, \qq B^2_{k,n}={\rm Var} (S_{k,n}).
\end{equation}
The local limit theorem in the strong form holds if and only if
\begin{equation}\label{lltsf.ref}
{\mathbb P}\big\{ S_{k,n}=m\big\}= {D\over B_{k,n} \sqrt{ 2\pi } }\ e^{-
{(m-A_{k,n})^2\over 2 B_{k,n}^2} }+o\Big({1\over B_{k,n} }\Big),
\end{equation}
uniformly in $m$ and every finite $k$, $k=0,1,2, \ldots$, as $n\to \infty$ and $B_{k,n}\to \infty$.
\vskip 8 pt
Rozanov's necessary condition states as follows.
\begin{theorem}[\cite{Ro},\,Th.\,I]\label{rozanov.I} Let $ X= \{ X_j , j\ge 1\}$ be a sequence of independent, square integrable random variables taking values in ${\mathbb Z}$. Let $b_k^2= {\rm Var}(X_k)$, $B_n^2 =b_1^2+\ldots+ b_n^2$. Assume that
\begin{equation}\label{Ro.A}B_n \to \infty\qq\qq {\rm as} \ n\to \infty.
\end{equation}
The following condition is necessary for the applicability of a local limit theorem in the strong form to the sequence $X$,
\begin{equation}\label{rozn}\prod_{k=1}^\infty
\big[ \max_{0\le m< h} {\mathbb P}\big\{X_k\equiv m\, {\rm (mod {\it \, h })} \big\}\big]=0\qq {\it for\ any\ } h\ge 2 .
\end{equation}
\end{theorem}
\vskip 2 pt
Condition \eqref{rozn} is also sufficient in some important examples, in particular if $X_j$ have stable limit distribution, see Mitalauskas \cite{Mit}.
We briefly indicate how Theorem \ref{rozanov.I} is proved.
If the local limit theorem in the strong form is applicable to the sequence $X$, then
\begin{equation} \label{roz.cond}
\sum_{k=1}^\infty \ {\mathbb P}\big\{ X_k\not\equiv 0\, ({\rm mod} \ h) \big\}= \infty, \qq {\it for\ any} \ h\ge 2.
\end{equation}
Indeed, otherwise given $h\ge 2$, by the Borel--Cantelli lemma, on a set of measure greater than $3/4$, $X_k\equiv 0\, ({\rm mod} \ h)$ for all $k\ge k_0$, say. The new sequence $X'$ defined by $X'_k=0$ if $k< k_0$, $X'_k=X_k$ unless, with partial sums $S'_n$, verifies ${\mathbb P}\{ S'_n\equiv 0\, ({\rm mod} \ h) \}>3/4$ for all $n$ large enough, and this can be used to bring a contradiction with the fact
that ${\mathbb P}\{ S'_n\equiv 0\, ({\rm mod} \ h) \}$ should converge to $1/h$.
\vskip 2 pt
\vskip 3 pt
The arithmetical quantity
$$\max_{0\le m< h} {\mathbb P}\big\{X_k\equiv m\, {\rm (mod({\it h})} \big\}$$
also appears in the study of
local limit theorems with arithmetical sufficient conditions. The approaches used (Freiman, Moskvin and Yudin \cite{FMY}, Mitalauskas \cite{Mit1}, Raudelyunas \cite{Rau} and later Fomin \cite{Fo}, for instance) require the random variables to do not overly much concentrate in a particular residue class $m$ (mod $h$) of ${\mathbb Z}$, and impose
arithmetical conditions of type: For all $h\ge 2$
\begin{equation}\label{llt.arithm.cond.}
\max_{0\le m<h}{\mathbb P}\{X_k\equiv m \ {\rm (mod\, {\it h})}\}\le 1-\a_k,
\end{equation}
for all $k$, where $\a_k$ is some specific sequence of reals decreasing to $0$. In addition, one generally have that $\sum _k \a_k = \infty$. Although the simple form of local limit theorem is here considered, for obvious reasons, condition \eqref{rozn} brings nothing more in this context.
\vskip 3 pt
\vskip 3 pt As a consequence of the quantitative formulation of the a.u.d. property obtained in Theorem \ref{l1a}, we have the following result.
\begin{theorem} Under the assumptions of Theorem \ref{rozanov.I}, assume further that the local limit theorem is applicable to a sequence $X$. Then
\vskip 3 pt
{\rm(i)}
\begin{eqnarray*} \limsup_{h\to \infty}\ \prod_{k=1}^\infty \max_{0\le m< h}{\mathbb P}\{X_k\equiv\, m\ \hbox{\rm (mod $h$)}\}\,=\ 0.
\end{eqnarray*}
{\rm(ii)} There exists a function $1\le \phi(t)\uparrow \infty $ as $t\to \infty$, such that
\begin{eqnarray*} \sum_{k=1}^n\frac{ \max_{0\le m< h}{\mathbb P}\{X_k\equiv\, m\ \hbox{\rm (mod $h$)}\}}{1- \max_{0\le m< h}{\mathbb P}\{X_k\equiv\, m\ \hbox{\rm (mod $h$)}\}}
&\ge & -\log\big(\frac{1}{h}+H_n\big),
\end{eqnarray*}
where
$H_n={ 1\over \sqrt{2\pi}\, B_n }+\frac{1+ 2 {C}/{h}}{ \phi(B_n)^{2/3} }
+ C_1\,e^{-(1/ 16 )\phi(B_n)^{2/3}} $, and $C,C_1$ are absolute constants.\end{theorem}
\begin{proof} We purpose a direct argument.
Consider a
sequence $Y$ where $Y_k=X_k-m_k$, $m_k$ are integers, for all $k\ge 1$. Let $h\ge 2$ be fixed. Choose $m_k$ so that
$$\max_{0\le m< h} {\mathbb P}\big\{X_k\equiv m\, {\rm mod({\it h})} \big\}= {\mathbb P}\big\{X_k\equiv m_k\, {\rm mod({\it h})} \big\}
= {\mathbb P}\big\{Y_k\equiv 0\, {\rm mod({\it h})} \big\}
, $$
and let $\m_n=\sum_{k=1}^nm_k$. Note that $\sum_{k=1}^n Y_k=S_n -\m_n$, ${\rm Var}(\sum_{k=1}^nY_k)={\rm Var}(S_n)=B_n^2$.
\vskip 2 pt \vskip 2 pt As the local limit theorem is applicable to the sequence $X$,
condition \eqref{phi.cond} is satisfied for some function $1\le \phi(t)\uparrow \infty $ as $t\to \infty$, namely we have for all $n$,
\begin{equation*}
\sup_{\nu\in {\mathbb Z}}\Big|B_n{\mathbb P}\big\{ S_n=\nu\big\}- {1\over \sqrt{ 2\pi } }\ e^{-
{(\nu-M_n)^2\over 2 B_n^2} }\Big|\,\le \, {C\over \,\phi(B_n)}.
\end{equation*}
Given $n$, letting $\nu=m+\m_n$ and observing that ${\mathbb P} \{ \sum_{k=1}^n Y_k =m\}={\mathbb P} \{ S_n -\m_n =m\}$, we get for $m\in {\mathbb Z}$, $n\ge 1$,
\begin{equation*}
\Big|B_n{\mathbb P}\Big\{ \sum_{k=1}^n Y_k =m \Big\}- {1\over \sqrt{ 2\pi } }\ e^{-
{(m+\m_n-M_n)^2\over 2 B_n^2} }\Big|\,\le \, {C\over \,\phi(B_n)}.
\end{equation*}
Thus $Y$ satisfies condition \eqref{phi.cond} with the same function $\phi(n) $.
\vskip 2 pt \vskip 2 pt Applying Remark \ref{rem.thl1a} to the sequence $Y$, it follows that,
\begin{eqnarray}\label{kb} \prod_{k=1}^n \max_{0\le m< h}{\mathbb P}\{X_k\equiv\, m\ \hbox{\rm (mod $h$)}\}&=&\prod_{k=1}^n {\mathbb P}\{Y_k\equiv\, 0\ \hbox{\rm (mod $h$)}\}
\cr &\le &
{\mathbb P}\big\{ \sum_{k=1}^n Y_k\equiv\, 0\ \hbox{\rm (mod $h$)}\big\}\le \frac{1}{h}+H_n,
\end{eqnarray}
where $H_n$ has the form given in the statement, and $H_n\to 0$ as $n\to \infty$.
\vskip 2 pt Letting $n$ tend to infinity in \eqref{kb} implies,
\begin{eqnarray}\label{kbh} \prod_{k=1}^\infty \max_{0\le m< h}{\mathbb P}\{X_k\equiv\, m\ \hbox{\rm (mod $h$)}\}&\le& \frac{1}{h}.
\end{eqnarray}
This being true for each $h$, $h\ge 2$, letting now $h$ tend to infinity in \eqref{kbh} yields,
\begin{eqnarray} \limsup_{h\to \infty}\ \prod_{k=1}^\infty \max_{0\le m< h}{\mathbb P}\{X_k\equiv\, m\ \hbox{\rm (mod $h$)}\}&=& 0.
\end{eqnarray}
\vskip 2 pt \vskip 2 pt
We also have by using the elementary inequality $\log( 1-x)\ge -x/(1-x)$, $0\le x<1$,
\begin{eqnarray*} \prod_{k=1}^n {\mathbb P}\{Y_k\equiv\, m\ \hbox{\rm (mod $h$)}\}
&=&\prod_{k=1}^n\big(1- {\mathbb P}\{Y_k\not\equiv\, m\ \hbox{\rm (mod $h$)}\}\big) \cr&=&e^{\sum_{k=1}^n \log(1-{\mathbb P}\{Y_k\not\equiv\, m\ \hbox{\rm (mod $h$)}\})}\cr&\ge &e^{-\sum_{k=1}^n {\mathbb P}\{Y_k\not\equiv\, m\ \hbox{\rm (mod $h$)}\}/(1-{\mathbb P}\{Y_k\not\equiv\, m\ \hbox{\rm (mod $h$)}\})}
.
\end{eqnarray*}
Thus by Remark \ref{rem.thl1a},
\begin{eqnarray*} \sum_{k=1}^n\frac{ \max_{0\le m< h}{\mathbb P}\{X_k\equiv\, m\ \hbox{\rm (mod $h$)}\}}{1- \max_{0\le m< h}{\mathbb P}\{X_k\equiv\, m\ \hbox{\rm (mod $h$)}\}}
&\,=\,&\sum_{k=1}^n\frac{ {\mathbb P}\{Y_k\not\equiv\, m\ \hbox{\rm (mod $h$)}\}}{1-{\mathbb P}\{Y_k\not\equiv\, m\ \hbox{\rm (mod $h$)}\}}
\cr&\ge & -\log\big(\frac{1}{h}+H_n\big).
\end{eqnarray*}
\end{proof}
\begin{remark}\label{kb.rem} (i) Note that the bound used in \eqref{kb} is very weak since
\begin{eqnarray*} \prod_{k=1}^n {\mathbb P}\{Y_k\equiv\, m\ \hbox{\rm (mod $h$)}\}
\ =\
{\mathbb P}\big\{ \forall J\subset [1,n],\ \sum_{k\in J} Y_k\equiv\, m\ \hbox{\rm (mod $h$)}\big\} .
\end{eqnarray*}
One can replace individuals $Y_k$ by sums over blocks according to any partition of $\{1,\ldots,n\}$.
\vskip 2 pt \noindent (ii) Sets of multiples serve as good test sets for the applicability of the local limit theorem because addition is a closed operation. What can be derived when testing the applicability of the local limit theorem with other remarkable sets of integers (squarefree numbers, primes numbers, power numbers, geometric growing sequences, \ldots) is unknown.
Concerning the squarefree integers, namely
having no squared prime factors, we note the bound \begin{equation}\label{squarefree} \Big|2^{-n}
\sum_{j\, { \rm squarefree}} C_n^j - \frac{6}{\pi^2}\Big|\le C_1e^{-C_2 {(\log
n^{{3/5}}}/{(\log\log n) ^{1/5}}}.
\end{equation}
We refer to \cite{DS}.
\end{remark}
\section{Random sequences satisfying the a.u.d. property}\label{s3}
It has some interest to relate the a.u.d. property for Bernoulli sums to the one of sets having Euler density, in this particular case here, arithmetic progressions. A subset $A$ of $ {\mathbb N}$ is said to have Euler density $\l$
with parameter
$\varrho $ (in short ${ E}_\varrho$ density $\l$) if
\begin{equation*}
\lim_{n\to \infty}\sum_{j\in A} C_n^j \varrho^j (1-\varrho)^{n-j}= \l.
\end{equation*}
By a result due to Diaconis and Stein, we have the following characterization.
\begin{theorem}[\cite{DS},\,Th.\,1]\label{ds}For any $A\subset {\mathbb N}$, and $\varrho\in ]0,1[$ the following assertions are equivalent:
\begin{eqnarray*} \label{dschar} ({\rm i}) & &\qq \hbox{\it $A$ has ${E}_\varrho$ density $\l$},
\cr ({\rm ii}) & &\qq\lim_{t\to \infty} e^{-t}\sum_{j\in A} \frac{t^j}{j!}=\l,
\cr ({\rm iii}) & &\qq \hbox{\it for all $\e>0$}, \quad \lim_{n\to \infty} \frac{\#\{j\in A : n\le j< n+\e\sqrt n\} }{\e\sqrt n}
=\l .
\end{eqnarray*}
\end{theorem}
Applying (iii) with $\rho=\frac12$, to
\begin{equation}\label{setA}A= \{ u+kd,\ k\ge 1\},
\end{equation}
straightforwardly implies
\begin{lemma}\label{ber.uad}
Let $\B_n=\b_1+\ldots+\b_n$, where $ \b_i $ are i.i.d. Bernoulli random variables. Then $\{\B_n, n\ge 1\}$ is a.u.d.($d$) for any $d \ge 2$.
\end{lemma}
\vskip 10 pt
Now consider the independent case and introduce the following characteristic.
Let $Y$ be
a random variable with values in ${\mathbb Z}$. Put
\begin{eqnarray}\label{vartheta} \t_Y =\sum_{k\in {\mathbb Z}}{\mathbb P}\{Y= k\}\wedge{\mathbb P}\{Y= k+1 \} ,
\end{eqnarray}
where $a\wedge b=\min(a,b)$. Note that $0\le \t_Y<1$.
\begin{theorem} \label{t3} Let $ X= \{ X_j , j\ge 1\}$ be a sequence of independent random variables taking values in ${\mathbb Z}$. Assume that $\t_{X_j}>0$ for each $j$. Further assume that the series
$\sum_{j=1}^\infty \t_{X_j}$ diverges. Then $X$ is a.u.d.,
the conclusion holds in particular if the $X_j$ are i.i.d. and $\t_{X_1}>0$.
\end{theorem}
Note that no integrability condition is required, whereas square integrability is required in order that the local limit theorem be applicable.
We prove in the next section that if the series
$\sum_{j=1}^\infty \t_{X_j}$ diverges, much more is in fact true.
Under the assumption made, each $X_j$ admits a Bernoulli component. This is the principle of a coupling method (the Bernoulli part extraction) introduced by McDonald \cite{M}, Davis and McDonald \cite{MD} in the study of the local limit theorem. See Weber \cite{W} for an application of this method to almost sure local limit theorem, and Giuliano and Weber \cite{GW3} where this method is used to obtain approximate local limit theorems with effective rate.
\vskip 3 pt Before passing to the proof, we briefly recall some facts and state an auxiliary Lemma. Let $\mathcal L(v_0,D)$ be a lattice defined by the
sequence $v_{ k}=v_{ 0}+D k$, $k\in {\mathbb Z}$,
$v_{0} $ and $D >0$ being real numbers. Let $X$ be
a random variable such that ${\mathbb P}\{X
\in\mathcal L(v_0,D)\}=1$, and assume that
$\t_X>0$.
Let $ f(k)= {\mathbb P}\{X= v_k\}$, $k\in {\mathbb Z}$. Let also $0<\t\le\t_X$. Associate to $\t$ and $X$ a
sequence $ \{ \tau_k, k\in {\mathbb Z}\}$ of non-negative reals such that
\begin{equation}\label{basber0} \tau_{k-1}+\tau_k\le 2f(k), \qq \qq\sum_{k\in {\mathbb Z}} \tau_k =\t.
\end{equation}
For instance $\tau_k= \frac{\t}{\nu_X} \, (f(k)\wedge f(k+1)) $ is suitable.
Next define a pair of random variables $(V,\e)$ as follows:
\begin{eqnarray}\label{ve} \qq\qq\begin{cases} {\mathbb P}\{ (V,\e)=( v_k,1)\}=\tau_k, \cr
{\mathbb P}\{ (V,\e)=( v_k,0)\}=f(k) -{\tau_{k-1}+\tau_k\over
2} . \end{cases}\qq (\forall k\in {\mathbb Z})
\end{eqnarray}
\begin{lemma} \label{lemd} Let $L$
be a Bernoulli random variable which is independent of $(V,\e)$, and let $Z= V+ \e DL$. Then $Z\buildrel{\mathcal D}\over{ =}X$.
\end{lemma}
\begin{proof}[Proof of Theorem \ref{t3}]
We apply Lemma \ref{lemd} with $D=1$ to each $X_j$, and choose $0<\t_j\le\t_{X_j}$ so that the series $\sum_{j=1}^\infty \t_j$ diverges.
One can associate to them a
sequence of independent vectors $ (V_j,\e_j, L_j) $, $j=1,\ldots,n$ such that
\begin{eqnarray}\label{dec0} \big\{V_j+\e_j L_j,j=1,\ldots,n\big\}&\buildrel{\mathcal D}\over{ =}&\big\{X_j, j=1,\ldots,n\big\} .
\end{eqnarray}
Further the sequences $\{(V_j,\e_j),j=1,\ldots,n\}
$ and $\{L_j, j=1,\ldots,n\}$ are independent.
For each $j=1,\ldots,n$, the law of $(V_j,\e_j)$ is defined according to (\ref{ve}) with $\t=\t_j$. And $\{L_j, j=1,\ldots,n\}$ is a sequence of
independent Bernoulli random variables. Set
\begin{equation}\label{dec}
W_n =\sum_{j=1}^n V_j,\qq M_n=\sum_{j=1}^n \e_jL_j, \quad B_n=\sum_{j=1}^n
\e_j .
\end{equation}
Denoting again $X_j= V_j+ \e_jL_j$,
$j\ge 1$, we have
\begin{eqnarray}\label{dep} {\mathbb P} \{d|S_n +u\} &=& {\mathbb E \,}_{(V,\e)} \, {\mathbb P}_{\!L}
\Big\{d|\big( \sum_{j= 1}^n \e_jL_j+W_n \big)+u
\Big\}
. \end{eqnarray}
As $\sum_{j= 1}^n \e_jL_j\buildrel{\mathcal D}\over{ =}\sum_{j=1}^{B_n } L_j$, we have
\begin{eqnarray*} {\mathbb P}_{\!L}
\Big\{d|\big( \sum_{j= 1}^n \e_jL_j+W_n \big)+u
\Big\} &=& {\mathbb P}_{\!L}
\Big\{d| \, \sum_{j=1}^{B_n } L_j+\big(W_n +u\big)
\Big\}.
\end{eqnarray*}
In view of the dominated convergence theorem, it suffices to prove that for each $d\ge 2$,
\begin{eqnarray*} {\mathbb P}_{\!L}
\Big\{d| \, \sum_{j=1}^{B_n } L_j+ (W_n +u) \Big\}\ \to \frac{1}{d},
\end{eqnarray*}
as $n\to \infty$, ${\mathbb P}_{(V,\e)}$ almost surely.
But the set (compare with \eqref{setA})
\begin{equation*} A= \{ (W_n +u)+kd,\ k\ge 1\},
\end{equation*}
now depends on $W_n$, thus on $n$, which is complicating things.
However we can write
\begin{equation*}
\chi\Big({d\, \big|\,\sum_{j=1}^{B_n } L_j+ (W_n +u)}\Big)=\frac1d\,\sum_{j=0}^{d-1} e^{2i\pi {j \over d} (W_n +u)}e^{2i\pi {j \over d}\sum_{j=1}^{B_n } L_j}.
\end{equation*}
By integrating with respect to $ {\mathbb P}_{\!L}$ we get,
$$ {\mathbb P}_{\!L}
\Big\{d| \, \sum_{j=1}^{B_n } L_j+\big(W_n +u\big)
\Big\}={1\over d}+{1\over d}\sum_{j=1}^{d-1} e^{2i\pi {j \over d} (W_n +u)}\big(\cos { \pi j\over d}\big)^{B_n}
.$$
By the assumption made,
$B_n$ tends to infinity ${\mathbb P}_{(V,\e)}$ almost surely, ((8.3.5) in \cite{W2} for instance). Thus the latter sum tends to 0 as $n\to \infty$, ${\mathbb P}_{(V,\e)}$ almost surely.
Therefore by the convergence argument invoked before, $ {\mathbb P} \{d|S_n +u\}$ tends to ${1\over d}$ as $n$ tends to infinity, for any $d\ge 2$ and $u\in {\mathbb N}$. Whence it follows that the sequence $\{S_n, n\ge 1\}$ is {\rm a.u.d.}\,.
\end{proof}
\section{Random sequences satisfying a strenghtened a.u.d. property.}\label{s4}
For Bernoulli sums, the a.u.d. property is only a rough aspect of the value distribution of divisors of $\B_{ n}+u$, $u\ge 0$ integer. Much more is known.
\begin{theorem}[\cite{W3},\, Th.\,2.1]\label{estPdlBn.u}
We have the uniform estimate
\begin{equation*}
\sup_{u\ge 0}\,\sup_{2\le d\le n}\Big|{\mathbb P}\big\{ d| \B_{ n}+u \big\}- {1\over d}\sum_{ 0\le |j|< d }
e^{i\pi (2u+n){j\over d}}\ e^{ -n
{\pi^2j^2\over 2d^2}}\Big|= {\mathcal O}\big((\log n)^{5/2}n^{-3/2}\big).
\end{equation*}
\end{theorem}
The special case $u=0$ was proved in \cite[Th.\,II]{W1}. Introduce the Theta function
\begin{equation}\label{theta.u.}
\Theta_u(d,n) = \sum_{\ell\in {\mathbb Z}} e^{i\pi (2u+n){\ell\over d}}\ e^{ -n
{\pi^2\ell^2\over 2d^2}}.
\end{equation}
By Poisson summation formula
\begin{equation}\label{theta.u..}
\Theta_u(d,n) = \Big(d\sqrt{\frac{2}{\pi n}}\Big)\ \sum_{\ell \in {\mathbb Z}} e^{-(\ell+\{\frac{u+n/2}{d}\})^2\frac{2d^2}{n}}.
\end{equation}
As a consequence of Theorem \ref{estPdlBn.u}, we get\begin{corollary}\label{cor.estPdlBn.u}
We have the uniform estimate
\begin{equation*} \sup_{u\ge 0}\, \sup_{2\le d\le n}\Big|{\mathbb P}\big\{ d| \B_{ n}+u \big\}-{\Theta_u(d,n)\over d} \Big| \le C \,(\log
n)^{5/2}n^{-3/2} .
\end{equation*}
\end{corollary}
Apart from this important but specific case, it seems that the speed of convergence in the limit \eqref{aud1} was not investigated, in particular when $d$ and $n$ are varying simultaneously.
\vskip 3 pt
Consider the independent case and assume as in Theorem \ref{t3}, that $\nu_n =\sum_{j=1}^n \t_j\uparrow \infty $. The speed of uniform convergence over regions (in $d$ and $n$) presents a singularity when $d$ is getting too close to $\sqrt {\nu_n}$. That quantity already appears in Davis and McDonald \cite{MD}. On the other hand when $d$ is not close to $\sqrt {\nu_n}$, in a sense that we shall make precise, we show that an explicit speed of convergence can be assigned, this under the {\it sole} divergence assumption of the series $\sum_{j=1}^\infty \t_j$. So, for this important class of independent sequences, the well-known a.u.d. necessary condition turns up to be a particularly weak requirement. Further one can show by using Poisson summation formula that in the Bernoulli case, the local limit theorem implies a weaker speed of convergence than the one obtained in Theorem \ref{estPdlBn.u}.
\vskip 3 pt The speed of uniform convergence problem for {\it all} $d$ and $n$, $n\ge d\ge 2$, $n\to\infty$, is more complicated and one must restrict to the i.i.d. case. In place of the limiting term ${1}/{d}$ appears a more complicated Theta elliptic function. See \cite{W3}. For the independent case, the approach used becomes inoperant, due to appearance of integral products with interlaced integrants.
In fact, what will make possible to handle the independent case, is not just that $d$ and $\sqrt {\nu_n}$ are not too close, but also that in background, symmetries properties of the Bernoulli model permitted to effect the necessary calculations
in the first quadrant and {\it not} in the half-circle. This point is crucial for getting the uniform speed of convergence in Theorem \ref{estPdlBn.u}. This is explained in \cite{W3}, see reduction Lemma 2.3. In short, when the Bernoulli extraction part applies, these symmetry properties allow one to get a speed of convergence. The proof in the Bernoulli case is transposable to
other systems of random variables when such symmetries exist. This is not the case for the Hwang and Tsai model of the Dickman function \cite{HT}, \cite{GSW}, neither for the Cram\'er model of primes \cite{W4}.
\vskip 3 pt We prove the following result.
\begin{theorem}\label{saud1} Assume that $D=1$, $\t_{X_j}>0$ for each $j$, and that the series
$\sum_{j=1}^\infty \t_{X_j}$ diverges.
Let $\a\!>\!\a'\!>\!0$, $0\!<\!\e\!<\!1$. Then for each $n$ such that
$$|x|\le\frac12 \sqrt{ \frac{ 2\a\log (1-\epsilon)\nu_n}{ (1-\epsilon)\nu_n }}\qq {\mathbb R}ightarrow \qq{\sin x\over
x}\ge (\a^\prime/\a)^{1/2},$$
recalling that $\nu_n =\sum_{j=1}^n \t_j$, we have
\begin{eqnarray*} \sup_{u\ge 0}\,\sup_{d< \pi \sqrt{ (1-\epsilon)\nu_n \over 2\a\log (1-\epsilon)\nu_n}} \ \Big| {\mathbb P} \{d|S_n+u \} - {1\over d} \Big|
&\le &2 \,e^{- \frac{\epsilon^2 }{2}\nu_n}+
\,\big( (1-\epsilon)\nu_n\big)^{-\a'} .
\end{eqnarray*}
\end{theorem}
\
For the proof we use the following Lemma.
\begin{lemma}[\cite{di}, Theorem 2.3] \label{di.1}
Let $X_1, \dots, X_n$ be independent random variables, with $0 \le X_k \le 1$ for each $k$.
Let $S_n = \sum_{k=1}^n X_k$ and $\mu = {\mathbb E \,} S_n$. Then for any $\epsilon >0$,
\begin{eqnarray*}
{\rm (a)} &&
{\mathbb P}\big\{S_n \ge (1+\epsilon)\mu\big\}
\le e^{- \frac{\epsilon^2\mu}{2(1+ \epsilon/3) } } .
\cr {\rm (b)} & &{\mathbb P}\big\{S_n \le (1-\epsilon)\mu\big\}\le e^{- \frac{\epsilon^2\mu}{2}}.
\end{eqnarray*}
\end{lemma}
We also need the following result.
\begin{proposition}[\cite{W3}, Corollary 2.4]
\label{special.cases}{\rm (i)} For each $\a\!>\!\a'\!>\!0$ and $n$ such that $ \tau_n\ge (\a^\prime/\a)^{1/2}$, where
\begin{equation*}
\tau_n= {\sin\p_n/2\over
\p_n /2}, \qquad\qquad \p_n= \big( {2\a\log n \over n}\big)^{1/2},
\end{equation*}
we have \begin{equation*}
\sup_{u\ge 0}\,\sup_{d< \pi \sqrt{ n \over 2\a\log n}}\Big|{\mathbb P}\big\{ d| \mathcal B_{ n} +u\big\}-{1\over d}
\Big|\,\le\, n^{-\a'}.
\end{equation*}
{\rm (ii)}
Let $0<\rho<1 $. Let also $0<\eta<1$, and suppose $n$ sufficiently large so that $\widetilde\tau_n\ge \sqrt{1-\eta}$, where
$$ \widetilde\tau_n= {\sin\psi_n/2\over
\psi_n /2}\qq \qq \psi_n= \big({2n^\rho \over n}\big)^{1/2}.$$
Then,
\begin{equation*} \sup_{u\ge 0}\,\sup_{d< (\pi/\sqrt 2) n^{(1-\rho)/2} }\Big|{\mathbb P}\big\{ d| \mathcal B_{ n} +u\big\}-{1\over d}
\Big|\,\le\, e^{-(1-\eta)\, n^\rho}.
\end{equation*}
\end{proposition}
\begin{proof}[Proof of Theorem \ref{saud1}]
We use the Bernoulli part extraction displayed at Lemma \ref{lemd}, \eqref{dec0}, \eqref{dec} as well as the notation introduced. Let
\begin{eqnarray}\label{dep0}A_n=\big\{B_n\le (1-\e)\nu_n
\big\} .
\end{eqnarray}
We deduce from Lemma \ref{di.1} that ${\mathbb P}\{A_n \} \, \le e^{- \frac{\epsilon^2\nu_n}{2}}$ for all positive $n$. We write
\begin{equation}\label{dep..} {\mathbb P} \{d|S_n \} -{1\over d} \,=\, {\mathbb E \,}_{(V,\e)}
\, \big( \chi(A_n)+\chi(A_n^c)\big)
\, \, \Big({\mathbb P}_{\!L}
\big\{d|\big( \sum_{j= 1}^n \e_jL_j+W_n \big)
\big\}-{1\over d}\Big)
. \end{equation}
On the one hand,
\begin{eqnarray}\label{proof.th.saud1}& & {\mathbb E \,}_{(V,\e)}
\chi(A_n)
\, \Big| {\mathbb P}_{\!L}
\big\{d|\big( \sum_{j= 1}^n \e_jL_j+W_n \big)
\big\}
-{1\over d}
\Big|\ \le \,2 {\mathbb P}\{A_n \} \, \le 2 e^{- \frac{\epsilon^2 }{2}\nu_n}.
\end{eqnarray}
So that
\begin{equation}\label{dep1} \big|{\mathbb P} \{d|S_n \} -{1\over d} \big| \,\le \,2 e^{- \frac{\epsilon^2 }{2}\nu_n}+ {\mathbb E \,}_{(V,\e)}
\,
\chi(A_n^c) \, \cdot \, \Big|{\mathbb P}_{\!L}
\big\{d|\big( \sum_{j= 1}^n \e_jL_j+W_n \big)
\big\}-{1\over d}\Big|
. \end{equation}
Now on $A_n^c$, $B_n\ge (1-\epsilon)\nu_n $, and since $ \sqrt{ x / \log x}$ is increasing on $[e,\infty)$, we have
\begin{equation}\label{phintaun1}
\sqrt{{ (1-\epsilon)\nu_n \over 2\a\log (1-\epsilon)\nu_n}}\le \sqrt{{ B_n \over 2\a\log B_n}}.
\end{equation}
Also
\begin{equation}\label{phintaun2} \p_n=\sqrt{\frac {2\a\log B_n}{ B_n}}\le \sqrt{ \frac{ 2\a\log (1-\epsilon)\nu_n}{ (1-\epsilon)\nu_n }} \quad \hbox{\rm and \ thus} \quad {\sin\p_n/2\over
\p_n /2}\ge (\a^\prime/\a)^{1/2},
\end{equation}
by the assumption made.
\vskip 2 pt By applying Proposition \ref{special.cases}, we have ${\mathbb P}_{(V,\e)}$ almost surely,
\begin{equation*}
\sup_{u\ge 0}\,\sup_{d< \pi \sqrt{ B_n \over 2\a\log B_n}}\Big|{\mathbb P}_{\!L}
\Big\{d\,\big|\Big( \sum_{j=1}^{B_n } L_j+W_n +u \Big)
\Big\}-{1\over d}
\Big|\,\le\, B_n^{-\a'}.\end{equation*}
Whence on $A_n^c$,
\begin{eqnarray}\label{proof.th.saud2}& & \sup_{u\ge 0}\,\sup_{d< \pi \sqrt{ (1-\epsilon)\nu_n \over 2\a\log (1-\epsilon)\nu_n}}\Big|{\mathbb P}_{\!L}
\Big\{d\,\big|\Big( \sum_{j=1}^{B_n } L_j+W_n +u \Big)
\Big\}-{1\over d}
\Big|
\cr &\le& \sup_{u\ge 0}\,\sup_{d< \pi \sqrt{ B_n \over 2\a\log B_n}}\ \Big|{\mathbb P}_{\!L}
\Big\{d\,\big|\Big( \sum_{j=1}^{B_n } L_j+W_n +u \Big)
\Big\}-{1\over d}
\Big|\cr &\le& B_n^{-\a'}\, \le \,\big( (1-\epsilon)\nu_n\big)^{-\a'} .
\end{eqnarray}
In view of \eqref{dep1} and \eqref{proof.th.saud2}, we get for all $u\ge 0$ and $d< \pi \sqrt{{ (1-\epsilon)\nu_n \over 2\a\log (1-\epsilon)\nu_n}}$,
\begin{eqnarray}
\big| {\mathbb P} \{d|S_n+u \} - {1\over d} \big|
&\le & 2 e^{- \frac{\epsilon^2 }{2}\nu_n}+ \,\big( (1-\epsilon)\nu_n\big)^{-\a'} {\mathbb E \,}_{(V,\e)}
\, \chi(A_n^c)
\cr &\le &2 e^{- \frac{\epsilon^2 }{2}\nu_n}+
\,\big( (1-\epsilon)\nu_n\big)^{-\a'} .
\end{eqnarray}
\end{proof}
\vskip 3 pt
\vskip 3 pt
The next result shows a considerable variation of the speed of convergence when $d$ is less close to $\sqrt{\nu_n}$.
\begin{theorem}\label{saud2} Let $0<\rho<1 $ and $0<\e<1$.Then for each $n$ such that
$$|x|\le\frac12 \,\sqrt{ \frac{ 2 }{ ((1-\epsilon)\nu_n)^{1-\rho} }}\qq {\mathbb R}ightarrow \qq{\sin x\over
x}\ge \sqrt{1-\e}$$
we have\begin{eqnarray*}
\sup_{u\ge 0}\,\sup_{d< (\pi/\sqrt 2) ((1-\e)\nu_n)^{(1-\rho)/2} }\ \big| {\mathbb P} \{d|S_n+u \} - {1\over d} \big|
&\le & 2 e^{- \frac{\epsilon^2 }{2}\nu_n}+e^{- ( (1-\epsilon)\nu_n)^\rho}
.
\end{eqnarray*}
\end{theorem}
\begin{proof} The proof is similar. We operate with the same set $A_n$ as in \eqref{dep0}, and use the decomposition \eqref{dep}.
Let $0<\rho<1 $ and $0<\e<1$.
By applying Proposition \ref{special.cases} with $\eta=\e$, we have ${\mathbb P}_{(V,\e)}$ almost surely, for
$n$ such that $\widetilde\tau_n\ge \sqrt{1-\e}$,
where here
$$ \widetilde\tau_n= {\sin\psi_n/2\over
\psi_n /2}\qq {\rm with}\qq \psi_n= \big({2B_n^\rho \over B_n}\big)^{1/2},$$
\begin{equation*} \sup_{u\ge 0}\,\sup_{d< (\pi/\sqrt 2) B_n^{(1-\rho)/2} }\Big|{\mathbb P}_{\!L}
\Big\{d\,\big|\Big( \sum_{j=1}^{B_n } L_j+W_n +u \Big)
\Big\}-{1\over d}
\Big|\,\le\, e^{-(1-\e) B_n^\rho}.
\end{equation*}
By using corresponding estimates to \eqref{phintaun1}, \eqref{phintaun2}, namely that on $A_n^c$,
$$\psi_n=\Big(\frac{2}{B_n^{1-\rho}}\Big)^{1/2}\le \Big(\frac{2}{((1-\e)\nu_n)^{1-\rho}}\Big)^{1/2}, $$
so that $\widetilde\tau_n\ge \sqrt{1-\e}$, we deduce that on
$A_n^c$,$$ \sup_{u\ge 0}\,\sup_{d< (\pi/\sqrt 2) ((1-\e)\nu_n)^{(1-\rho)/2} }\ \Big|{\mathbb P}_{\!L}
\Big\{d\,\big|\Big( \sum_{j=1}^{B_n } L_j+W_n +u \Big)
\Big\}-{1\over d}
\Big|
$$ $$\,\le\, \sup_{u\ge 0}\,\,\sup_{d< (\pi/\sqrt 2) B_n^{(1-\rho)/2} }\ \Big|{\mathbb P}_{\!L}
\Big\{d\,\big|\Big( \sum_{j=1}^{B_n } L_j+W_n +u \Big)
\Big\}-{1\over d}
\Big| \,\le\, e^{-(1-\e) B_n^\rho}.$$
Therefore
\begin{eqnarray} && \sup_{u\ge 0}\,\sup_{d< (\pi/\sqrt 2) ((1-\e)\nu_n)^{(1-\rho)/2} }\ \big| {\mathbb P} \{d|S_n+u \} - {1\over d} \big| \cr &\le & 2 e^{- \frac{\epsilon^2 }{2}\nu_n}+ {\mathbb E \,}_{(V,\e)}
\, \chi(A_n^c)
\,e^{-(1-\e) B_n^\rho}
\,\le \, 2 e^{- \frac{\epsilon^2 }{2}\nu_n}+e^{-(1-\e)^{1+\rho} \nu_n ^\rho}
.
\end{eqnarray}
\end{proof}
\begin{remark} So far we only have considered necessary conditions for the validity of the local limit theorem, which are formulated in terms of a.u.d. property, as well as strenghtenings of this property yielding effective speed of convergence bounds. It is important to mention in that context, that in 1984, Mukhin found a remarkable necessary and sufficient condition for the validity of the local limit theorem.
Let $\{S_n,n\ge 1\}$ be a sequence of ${\mathbb Z}$--valued random variables such that an integral limit theorem holds: there exist $a_n\in {\mathbb R}$ and real $b_n\to \infty$ such that the sequence of distributions of $(S_n-a_n)/b_n$ converges weakly to an absolutely continuous distribution $G$ with density $g(x)$, which is uniformly continuous in ${\mathbb R}$.
The local limit theorem is valid if
\begin{equation}\label{ilt.llt}
{\mathbb P}\{S_n=m\}=B_n^{-1} g\Big(\frac{m-A_n}{B_n}\Big) + o(B_n^{-1}),
\end{equation}
uniformly in $m\in {\mathbb Z}$. Muhkin showed that the validity of the local limit theorem is equivalent to the existence
of a sequence of integers $v_n=o(b_n)$ such that
\begin{equation}\label{ilt.llt.diff} \sup_{m}\Big|{\mathbb P}\{S_n=m+v_n\big\}-{\mathbb P}\{S_n=m \big\}\Big|\,=\,\,o\Big(\frac{1}{b_n}\Big).
\end{equation}
Revisiting the succint proof given in \cite{Mu2}, we however could only prove rigorously a weaker necessary and sufficient condition, with a significantly different formulation, namely that a necessary and sufficient condition for the local limit theorem in the usual form to hold is
\begin{equation} \sup_{m, k\in{\mathbb Z}\atop |m-k|\le \max\{1, [\sqrt \e_n b_n]\}}\Big|{\mathbb P}\{S_n=m\big\}-{\mathbb P}\{S_n=k \big\}\Big|\,=\,\,o\Big(\frac{1}{b_n}\Big),
\end{equation}
where \begin{equation} \label{en}\e_n:=\sup_{x\in {\mathbb R}}\Big|{\mathbb P}\Big\{\frac{S_n-a_n}{b_n}<x\Big\}-G(x)\Big|\ \to \ 0,
\end{equation}
by the integral limit theorem.
This is the object of the Note \cite{W5}, with remarks and references on general relations of type \eqref{ilt.llt.diff} therein. Mukhin wrote at this regard in \cite{Mu2}: \lq\lq ... getting from here more general sufficient conditions turns out to be difficult in view of the lack of good criteria. Working with asymptotic equidistribution properties are more convenient in this respect\,\rq\rq.
\end{remark}
\appendix
\vskip 6pt
\section{LLT's with speed of convergence.} Let $S_n=X_1+\ldots +X_n$, $n\ge 1$, where
$X_j$ are independent random variables such that
${\mathbb P}\{X_j
\in\mathcal L(v_{ 0},D )\}=1$.
\vskip 3 pt
Assume first that the random variables $X_j$ are identically distributed. Then we have the following characterization result.
\begin{theorem} \label{r} Let
$F$ denote the distribution function of
$X_1$.
{\rm (i) (\cite{IBLIN}, Theorem 4.5.3)} In order that the property
\begin{equation} \label{alfa}
\sup_{N=an+Dk}\Big|
{ \frac{\s \sqrt n}{D} }{\mathbb P}\{S_n=N\}-{1 \over \sqrt{ 2\pi}\s}e^{-
{(N-n\m )^2\over 2 n \s^2} }\Big| ={\mathcal O}\big(n^{-\alpha{/2}} \big) ,
\end{equation}
{ where $0<\a<1$},
it is necessary and sufficient that the following conditions be satisfied:
\begin{eqnarray*} (1) \ D \ \hbox{is maximal}, \ \qq\qq
(2) \ \ \int_{|x|\ge u} x^2 F(dx) = \mathcal O(u^{-\a})\quad \hbox{as $u\to \infty$.}
\end{eqnarray*}
{\rm (ii) (\cite{P} Theorem 6 p.\,197)} If ${\mathbb E\,} |X_1|^3<\infty$, then \eqref{alfa} holds with $\a =1/2$.
\end{theorem}
\vskip 6 pt
Now consider the non-identically distributed case. Assume that (see \eqref{vartheta})
\begin{equation}\label{basber.pos} \t_{X_j}>0, \qq \quad j=1,\ldots, n.
\end{equation}
Let $\nu_n =\sum_{j=1}^n \t_j$. Let $\psi:{\mathbb R}\to {\mathbb R}^+$ be even, convex and such that $\frac
{\psi(x)}{x^2}$ and $\frac{x^3}{\psi(x)}$ are non-decreasing on ${\mathbb R}^+$. We further assume that
\begin{equation}\label{did} {\mathbb E \,} \psi( X_j )<\infty .
\end{equation} Put $$L_n=\frac{ \sum_{j=1}^n{\mathbb E \,} \psi (X_j) }
{ \psi (\sqrt
{ {\rm Var}(S_n )})} .$$
The following result is Corollary 1.7 in Giuliano-Weber in \cite{GW3}.
\begin{theorem}\label{ger3} Assume that $\frac{ \log \nu_n }{\nu_n}\le {1}/{14} $. Then, for all $\k\in \mathcal L( v_{
0}n,D )$ such that
$$\frac{(\k- {\mathbb E \,}
S_n)^2}{ {\rm Var}(S_n) } \le \sqrt{\frac{7 \log \nu_n} {2\nu_n}},$$ we have
\begin{eqnarray*} \Big| {\mathbb P} \{S_n =\kappa \} -{ D e^{- \frac{(\k- {\mathbb E \,}
S_n)^2}{ 2 {\rm Var}(S_n) } } \over \sqrt{2\pi {\rm Var}(S_n) }} \Big| & \le & C_3\Big\{
D\big({ { \log \nu_n } \over
{ {\rm Var}(S_n) \nu_n} } \big)^{1/2} + { L_n
+ \nu_n^{-1}
\over \sqrt{ \nu_n} } \Big\} .
\end{eqnarray*}
And $C_3=\max (C_2, 2^{ 3/2}C_{{\rm E}}) $, $C_{{\rm E}}$ being an absolute constant arising from Berry-Esseen's inequality.
\end{theorem}
\vskip 8 pt We pass to another speed of convergence result due to Mukhin. Consider the structural characteristic of a random variable $X$, introduced and studied by Mukhin in \cite{Mu1} and \cite{Mu} for instance,
$$ H(X ,d) = {\mathbb E \,} \langle X^*d\rangle^2,$$ where $\langle \a \rangle$ denotes the distance from $\a$ to the nearest integer, and $X^*$
is a symmetrization of
$X$. Let $\p_X$ be the characteristic function $X$.
The two-sided inequality
\begin{eqnarray}\label{fih} 1-2\pi^2 H(X ,\frac{t }{2\pi}) \le |\p_X(t)|\le 1-4 H(X ,\frac{t }{2\pi}) ,
\end{eqnarray}
is established in the above references. See also Szewczak and Weber \cite{SW} for more.
The following is the one-dimensional version of Theorem 5 in \cite{Mu}, see also \cite{SW} and is stated without proof, however.
\begin{theorem}[Mukhin]\label{Mukhin.th.Hn} Let $X_1,\ldots, X_n$ have zero mean and finite third moments. Let
$$ B_n^2= \sum_{j=1}^n{\mathbb E\,} |X_j|^2 ,\qq H_n= \inf_{1/4\le d\le 1/2}\sum_{j=1}^n H(X_j
,d), \qq L_n= \frac{\sum_{j=1}^n{\mathbb E\,} |X_j|^3}{(B_n)^{3/2}} .$$ Then
\begin{equation}\label{llt} \sup_{N=v_0n+Dk }\Big|B_n {\mathbb P}\{S_n=N\}-{D\over \sqrt{ 2\pi } }e^{-
{(N-M_n)^2\over 2 B_n^2} }\Big|\,\le CL_n\, \big( {B_n }/{ H_n}\big) .
\end{equation} \end{theorem}
\vskip 8 pt
\end{document} |
\begin{document}
\preprint{APS/123-QED}
\title{Cavity-enhanced superradiant Rayleigh scattering with ultra-cold and Bose-Einstein condensed atoms}
\author{Sebastian Slama}
\author{Gordon Krenz}
\author{Simone Bux}
\author{Claus Zimmermann}
\author{Philippe W. Courteille}
\affiliation{Physikalisches Institut, Eberhard-Karls-Universit\"at T\"ubingen,\\
Auf der Morgenstelle 14, D-72076 T\"ubingen, Germany}
\date{\today}
\begin{abstract}
We report on the observation of collective atomic recoil lasing and
superradiant Rayleigh scattering with ultracold and Bose-Einstein
condensed atoms in an optical ring cavity. Both phenomena are based
on instabilities evoked by the collective interaction of light with
cold atomic gases. This publication clarifies the link between the
two effects. The observation of superradiant behavior with thermal
clouds as hot as several tens of $\mu\textrm{K}$ proves that the
phenomena are driven by the cooperative dynamics of the atoms, which
is strongly enhanced by the presence of the ring cavity.
\end{abstract}
\pacs{42.50.Gy, 03.75.-b, 42.60.Lh, 34.50.-s}
\maketitle
\section{Introduction}
The interaction of light with atomic gases takes place in most cases as a local process: Light shone into an atomic cloud is scattered by individual atoms. In principle, every atom having scattered a photon can be detected through the momentum imparted to it by photonic recoil, and in general, the scattering process is ignored by all other atoms. This holds even for Bose-Einstein condensates (BEC), which are pure quantum states consistent of an ensemble of delocalized atoms. There are however prominent exceptions: Dicke superradiance \cite{Dicke54} is a well-known synchronization phenomenon in spontaneous emission. It is observed, for instance, as a collective deexcitation of an ensemble of inverted atoms with an accelerated rate, which scales with the square of the number of inverted atoms \cite{Skribanowitz73}. Another example is the collective absorption of photonic recoil by an ensemble of atoms tight together by strong forces known as M\"o\ss bauer effect \cite{Moessbauer58,Steane97}.
Collective effects in light scattering arise when the scatterers are mutually coupled by interactions or display long-range order. Often the collective coupling involves mechanical forces, for example photonic recoil or the electrostrictive force arising from dipole-dipole interactions. In both cases, the interatomic force originates from a radiative interaction, or using fully quantized terms, the transfer of phonons is mediated by an exchange of photons. Compared to short-ranged binary collisions radiation-based interaction extends much further in space. Under some circumstances it can be completely delocalized. In some cases, collective coupling can trigger instabilities. Well-known examples for instabilities in the field of nonlinear optics are stimulated Raman scattering, stimulated Brillouin scattering or the collective atomic recoil laser (CARL) \cite{Bonifacio94,Bonifacio95,Kruse03b,Cube04,Slama07}.
Collective instabilities have recently been observed in clouds of cold and ultra-cold atoms driven by light \cite{Inouye99,Kozuma99,Fallani05,Kruse03b,Elsaesser03b,Black03,Labeyrie06}. In the present paper, we focus on two types of experiments,
dealing with the superradiant Rayleigh scattering (SRyS) phenomenon on one hand \cite{Inouye99,Kozuma99,Fallani05} and the collective atomic recoil laser \cite{Kruse03b} on the other.
CARL is observed, when a strong pump field is shone onto an atomic gas. This leads to the exponential growth of an unpumped probe light field and to the formation of an atomic density grating \cite{Bonifacio94,Bonifacio95}. If pump and probe light field are counter-propagating modes of a high-finesse ring cavity, the interaction time of the light fields with the atoms can be enhanced by several orders of magnitude, which supports the amplification. Consequently, all CARL experiments carried out up to date employed ring cavities \cite{Kruse03b,Cube04,Slama07}. Therefore, in this paper we will use the term CARL in the tight sense of a cavity-assisted collective instability, although the CARL has originally been postulated without cavity \cite{Bonifacio94}.
SRyS has first been observed in Bose-Einstein condensed atomic
clouds. A short laser pulse shone onto the cloud is scattered from
atoms of the BEC, which then by photonic recoil form motional
sidemodes. Matter-wave interference between the recoiling atoms and
the BEC at rest leads to the formation of an atomic density grating
thereby exponentially enhancing the scattering. SRyS was originally
attributed to four-wave mixing between optical and matter waves,
bosonically stimulated by the macroscopic occupation of the final
momentum state. Already in the pioneering work \cite{Inouye99} it
was recognized that SRyS does not require quantum degeneracy and
would in principle also work in a thermal cloud. Nevertheless the
terminology of bosonic stimulation and the fact that SRyS could at
first not be observed with thermal clouds led to some obscurity and
discussions about the role of quantum statistical effects. Theoretic
work \cite{Moore01,Ketterle01} showed that the gain mechanism is
independent of the quantum statistics and should in principle also
be observable with fermionic and thermal atoms. The experimental
prove was given by the observation of CARL \cite{Kruse03b} and SRyS
with thermal gases \cite{Yoshikawa05}. The important feature is not
the quantum state of the atoms but the cooperative behavior.
CARL has a close analogy with SRyS, since they both share the same gain mechanism \cite{Piovella01}. However in contrast to SRyS, CARL activity has been observed with thermal atoms as hot as a few $100~\mu$K \cite{Kruse03b}. This fact raises the question, what distinguishes both collective effects. In both experiments there must be a coherent mechanism correlating the individual scattering events. Coherence can be transferred between scattering events either via de Broglie waves interference or optical interference.
SRyS is difficult to observe with thermal atomic ensembles, because
the coherence is stored in the momentum states of the atoms. Thermal
motion of atoms therefore Doppler-limits the coherence time of the
system \cite{Yoshikawa05}. CARL is much less sensitive to the
thermal motion of the atoms, because the coherence is stored in the
light field of the cavity. The density-of-states in the cavity
restricts the frequency of the scattered light to values close to
one of its eigenfrequencies. In the case of a so-called good-cavity
this is equivalent to the fact that the atomic momentum states which
can be populated by photonic momentum transfer are limited to a few
low-lying states. This effect counteracts momentum diffusion which
can occur due to a thermal motion of the atoms, but is also
intrinsically connected with the collective gain process itself.
We organized this paper as follows: In section~\ref{SecTheory} we expose the problem of motion-induced collective effects in light scattering. In particular, we will discuss the intricate relationship between CARL and SRyS, pointing out the common features and the differences. We will then briefly introduce the mathematical models we use to reproduce our observations in simulations. Ideally, in a perfectly homogeneous cloud the collective instability would start from quantum fluctuations in the reverse mode, thermal excitations of this mode being completely frozen out at room temperature. However, thermal fluctuations in the atomic density distribution and, even more
important, spurious light scattering at the surfaces of the cavity mirrors scatter a certain amount of light into the reverse mode, which is sufficient to seed the instability. It is thus important to incorporate mirror backscattering in realistic theoretical models, as we will show in section~\ref{SecTheoryMirrors}. Section~\ref{SecExperiment} is devoted to presenting our experimental apparatus, the temporal sequence of an experimental run and several measurements. In particular, we will show the measured dependences of the collectively scattered light power on various parameters, such as atom number, pump power, and mirror backscattering. We will demonstrate that both regimes, the good- as well as the bad-cavity regime, can be realized and exhibit characteristic signatures. In section~\ref{SecExperimentTof}, we present and discuss time-of-flight absorption images taken on thermal and Bose-condensed atomic clouds. We conclude this paper with a discussion and a brief outlook.
\section{Theoretical background}
\label{SecTheory}
CARL and SRyS have been observed under very different experimental
circumstances and in different parameter regimes. In the case of
CARL the atoms are stored in a ring cavity, for SRyS they are held
in free space. CARL can be observed with $100~\mu$K cold atoms
\cite{Kruse03b}, while SRyS requires temperatures lower than
$1~\mu$K and is hardly seen with thermal clouds. Finally, CARL is
seen with pump laser detunings, which are 3 or 4 orders of magnitude
larger, than for SRyS.
Nevertheless, both phenomena have an important feature in common.
They share the same gain mechanism based on collective light
scattering and leading to an exponential instability in the atomic
density distribution and to the emission of coherent light pulses.
In this section, we will summarize and combine the main theoretical
results published in \cite{Inouye99, Piovella01b, Robb05} in order
to clarify the connection between CARL and SRyS in a consistent
picture supporting the understanding of our measurements. Later we
derive equations of motion valid in both regimes of CARL and SRyS.
\subsection{Self-amplification in CARL and SRyS}
\label{SecTheoryCarlSrys}
In the CARL experiments \cite{Kruse03b,Cube04,Slama07}, a cold or ultracold atomic cloud is brought into the mode volume of a unidirectionally pumped ring cavity. The pump light is very far detuned by more than $1~$nm. It is irrelevant whether the cloud is condensed or thermal. The atoms scatter light from the pumped into the reverse mode. Tiny fluctuations in the nearly homogeneous atomic density distribution are exponentially amplified. The atoms self-organize into a one-dimensional optical lattice and a red-detuned coherent probe light is emitted by the reverse mode.
The Rabi frequency generated by a single photon in the ring cavity of round-trip length $L$ and waist $w_0$ is $\Omega_1=\sqrt{3\Gamma c/k^2w_0^2L}$ \cite{Gangl00,Cube06}. The single-photon light-shift far from resonance, $U_0=\Omega_1^2/\Delta$, can also be interpreted as the Rabi frequency for the coupling between the pump and the probe mode, i.e.~the rate at which photons are exchanged between the modes.
The small signal gain can be derived from a linearization of the CARL equations \cite{Piovella01b,Robb05},
\begin{equation}\label{Eq01}
G_c = \frac{2g^2N}{\kappa_{\mathrm{c}}}~,
\end{equation}
where $N$ is the atom number and
$\kappa_{\mathrm{c}}=\pi\delta_{\text{fsr}}/F$ the decay rate of the
light field in the cavity. $\delta_{\text{fsr}}$ is the free
spectral range of the cavity and $F$ its finesse. The quantity $g$
is given by
\begin{equation}\label{Eq02}
g = \frac{\Omega_+\Omega_-}{2\Delta}~,
\end{equation}
where the Rabi frequency generated by the pump mode scales with the root of the pump photon numbers, $\Omega_+=\Omega_1\sqrt{n_+}$. The coupling strength in the probe mode is $\Omega_-=\Omega_1$.
>From the above equations, we get
\begin{equation}\label{Eq03}
G_c = \frac{\Omega_+^2}{2\Delta}\frac{N}{\kappa_{\mathrm{c}}}\frac{\Omega_-^2}{\Delta}~.
\end{equation}
In the SRyS experiments performed up to date \cite{Inouye99,Kozuma99,Schneble03,Fallani05,Yoshikawa05}, an ultracold, in general Bose-condensed atomic cloud with ellipsoidal shape is irradiated by a short pump laser pulse modestly detuned from an atomic resonance by about $1~$GHz. The pulsed pump light drives a transient dynamics simultaneously forming a matter wave grating and emitting an optical mode into the BEC's long axis, which exponentially amplify each other.
Following Ref.~\cite{Inouye99}, one may associate the part of the BEC that corresponds to atoms which have scattered a photon with an atom number $N_r$. The remaining part consists of $N$ atoms. The density is modulated by interference between the two parts of the wave function, and the number of atoms that form the density modulation is $N_{\text{mod}}\propto\sqrt{2N N_r}$. As for usual Bragg scattering or Dicke superradiance the number of photons $n$ scattered at the density modulation is $n\propto N_{\text{mod}}^2\propto N_r$. Since every scattered photon generates a recoiling atom, the number of recoiling atoms increases like $\dot{N_r}\propto n$, and we get $\dot{N_r}=G_{\text{sr}}N_r$, i.e.~an exponential increase of recoiling atoms with a gain factor $G_{\text{sr}}$. This increase is mirrored by an identical rise of the number of scattered photons, which results in a gain mechanism for the scattered light mode. The incident and the scattered light mode are coherently coupled, just like in the case of CARL, so that in principle the scattered photons can be scattered back into the incident mode.
The superradiant gain can be expressed as
\begin{equation}\label{Eq04}
G_{\text{sr}} = RN_0\frac{\Phi_s}{8\pi/3}~,
\end{equation}
where $R=\Gamma\Omega_+^2/(4\Delta^2+2\Omega_+^2+\Gamma^2)$ is the single-atom Rayleigh scattering rate, with $\Gamma$ being the linewidth of the atomic resonance, $\Delta$ the detuning, and $\Omega_+$ the Rabi frequency generated by the incident laser beam. $\Phi_s\simeq\lambda^2/\frac{\pi}{4}w^2$ is the scattering solid angle, with $w$ being the waist of the condensate. Hence, far from resonance,
\begin{equation}\label{Eq05}
G_{\text{sr}} = \frac{\Omega_+^2}{\Delta^2}N_0\frac{3\Gamma}{2k^2w^2}~.
\end{equation}
This result can be brought into the same form as the CARL
gain~(\ref{Eq03}), if we interpret the condensate, whose length
along the long axis is $L$, as a cavity with free spectral range
$\delta_{\text{fsr}}=c/L$ and finesse $F_{\text{sr}}=\pi$. With this
interpretation the decay rate of the light mode scattered by the
condensate is given by the residence time of the light within the
BEC \cite{Stamper-Kurn00},
$\kappa_{\text{fsr}}=\pi\delta_{\text{fsr}}/F_{\text{sr}}=c/L$.
\begin{equation}\label{Eq06}
G_{\text{sr}} = \frac{\Omega_+^2}{2\Delta^2}\frac{N_0}{\kappa_{\text{sr}}}\frac{3\Gamma\delta_{\text{fsr}}}{k^2w^2}
= \frac{\Omega_+^2}{2\Delta}\frac{N_0}{\kappa_{\text{sr}}}\frac{\Omega_1^2}{\Delta}~.
\end{equation}
This result shows the equivalence of the superradiant gain and the gain occurring in CARL in equation (\ref{Eq03}).
The formal identity of the small signal gain of CARL and SRyS points
to the same roots of both phenomena. Nevertheless, their respective
experimental circumstances are quite different. The differences
become most apparent in the simultaneous build-up of the atomic
density grating and optical standing wave, occurring as well in CARL
as in SRyS. The difference lies in the storage of the coherence,
which is crucial in order to sustain the build-up process. In
principle the coherence can either be stored as a matter wave
coherence between different atomic momentum states or as a phase
coherence between the two involved light fields. In SRyS the optical
coherence time alone would be very small, as can be estimated from
the decay rates of the optical modes, which are on the order of
$\kappa_{\text{sr}}\simeq10^{12}~\text{s}^{-1}$. The coherence must
therefore be maintained in the atomic momentum states which then
form a matter wave grating. This is the reason why SRyS is very
sensitive to the temperature of the atomic cloud. The thermal energy
of the atoms must be smaller than the recoil energy
$k_BT<\hbar\omega_{\textrm{r}}=2\hbar^2k^2/m$. Otherwise, the
Doppler broadening leads to decoherence of the momentum states and
detroys the matter wave coherence and the resulting density
grating.
For CARL the situation is reversed. CARL has been observed with
temperatures much higher than the recoil temperature, i.e.~in a
regime where interferences between atoms in Raman superpositions of
momentum states are quickly smeared out by Doppler broadening. Here,
the optical cavity plays the crucial role, because it
phase-coherently stores the participating light fields for times on
the order of several $\mu\text{s}$, given by the cavity decay rate
$\kappa_{\mathrm{c}}/2\pi=20~\text{kHz}\ll\kappa_{\text{sr}}$ which
is 7 orders of magnitude smaller than in the case of SRyS without
cavity.
\subsection{Collective gain in various regimes}
\label{SecTheoryRegimes}
The important point is now, that the broad range in which the collective gain can be varied in our experiment allows us to study CARL and SRyS dynamics as two opposite regimes of one system, called the good-cavity and the bad-cavity regime. Both regimes can be further divided into a semiclassical and a quantum domain and are characterized by two parameters, the CARL parameter $\rho$ and the scaled decay rate $\kappa$ \cite{Piovella01}. The CARL parameter is given by the product of the small signal gain and the decay rate of light both in units of the recoil frequency $\omega_{\textrm{r}}=2\hbar k^2/m$
\begin{equation}\label{Eqrho}
\rho^3=\frac{G_{\textrm{c,sr}}}{\omega_{\textrm{r}}}\cdot\frac{\kappa_{\textrm{c,sr}}}{\omega_{\textrm{r}}}~.
\end{equation}
The scaled decay rate $\kappa=\kappa_{\textrm{c,sr}}/\omega_{\textrm{r}}\rho$ depends via $\rho$ on the gain, too. The good-cavity regime is given by $\kappa<1$, the bad-cavity regime by $\kappa>1$.\\
For the interpretation it is helpful to link the gain $G_{\textrm{c,sr}}$ to the gain bandwidth $\Delta\omega_G$, which is defined as the width of spectral range where the light scattering is exponentially amplified \cite{Piovella01}.
Let us first consider the semiclassical regime. The good-cavity limit is reached for strong saturation of the transition between the coupled cavity modes. This means that the gain, which can be interpreted as Rabi frequency, overwhelms the cavity decay width, $G_{\textrm{c,sr}}\gg\kappa_{\textrm{c,sr}}$. In this regime the transition is power-broadened by an amount $\Delta\omega_G\sim\omega_{\textrm{r}}\rho$ (see Fig. \ref{fig:Scheme}). This refers to the CARL experiments performed so far, where the gain bandwidth is proportional to the CARL parameter. In contrast, the bad-cavity regime is reached for small gain, $G_{\textrm{c,sr}}\ll\kappa_{\textrm{c,sr}}$. In this case, the gain bandwidth is given by the cavity decay rate, $\Delta\omega_G\sim\kappa_{\textrm{c,sr}}$. Obviously, the resolution of the gain profile cannot be better than $\kappa_{\textrm{c,sr}}$. This is the typical situation of SRyS.
\begin{figure}
\caption{
Representation of the two limiting cases of a long and short cavity lifetime. Shown are the cavity transmission profile
(dark shaded areas) and the gain profile (bright shaded areas). (a) When the cavity linewidth is smaller than the gain,
the good-cavity limit is realized. (b) When the gain is smaller, the superradiant limit is realized.}
\label{fig:Scheme}
\end{figure}
The distinction between semiclassical and quantum regime is based on
the characteristic scale set by the recoil frequency
$\omega_{\textrm{r}}$. In the semiclassical regime the gain
bandwidth is large enough to amplify many adjacent momentum states
of the quantized motion $\Delta\omega_G>\omega_{\textrm{r}}$,
whereas in the quantum regime only one momentum state can be
amplified at a time $\Delta\omega_G<\omega_{\textrm{r}}$. Both, the
semiclassical as well as the quantum regime have been studied in
Ref.~\cite{Schneble03} in the bad-cavity limit by varying the gain
bandwidth. Strictly the CARL gain (\ref{Eq01})-(\ref{Eq03}) is only
valid in the quantum regime. In this regime the equivalence to the
SRyS gain (\ref{Eq04})-(\ref{Eq06}) appears in its clearest way. In
the semiclassical regime valid for our experiment the CARL gain is
reduced \cite{Robb05}, as has also been observed in SRyS
\cite{Schneble03}. In our experiment, the quantum limit could be
reached by reducing atom number and pump power. This would however
generate signals which are below the detection limit of our current
setup. Nevertheless, small deviations due to the quantum nature of
the atomic motion are expected, as will be briefly discussed in the
next sections.
\subsection{Equations of motion for atoms in a ring cavity}
\label{SecTheoryRegimes}
The system under consideration consists of ultra-cold or Bose-condensed atoms interacting with two counter-propagating modes of an optical cavity. The most general approach would treat all modes as quantized, in particular the atomic cloud would be described by a second-quantized matter wave field \cite{MooreMG99b,Piovella03}. Such an approach is necessary whenever mean field interactions or quantum statistical effects, like non-local interparticle correlations, particle fluctuations or entanglement, play a role. In the circumstances of our experiments, however, several simplifications can be made.
1.~All electronically excited states may be adiabatically eliminated \cite{Bonifacio95,Gangl00}. The detuning of the pump laser beam from the nearest resonance frequencies of the rubidium atom is so large, that the internal dynamics is continuously at a steady state keeping the population of the excited states at a negligible level. 2.~Propagation effects of light inside the atomic cloud \cite{Bonifacio97b,Zobay06} do not need to be considered. In comparison with the SRyS experiments, where the pump light is generally detuned by amounts on the order of $1~$GHz, our experiment uses $1000$ times larger detunings. Hence, the optical density of our atomic clouds at these detunings is negligibly small. 3.~Quantum statistical effects, such as entanglement, are predicted to occur naturally as a result of CARL dynamics \cite{Piovella03}. However, our experiment is not sensitive to signatures arising from quantum statistics. 4.~We treat all light fields classically. The mode volume of our cavity is of a size that the atom-field coupling constant larger than the cavity decay width, but it is much smaller than the spontaneous emission decay width of the atomic transition. Hence we are far from the cavity QED regime. Even in situations where shot noise could play a role, e.g.~in seeding the instability, perturbations arising from experimental imperfections (mirror backscattering) dominate. 5.~We treat the problem in one dimension, i.e.~along the optical axis of the cavity. Transversal oscillations of the atomic cloud, which may result from the collective dynamics \cite{Elsaesser03b} are not considered here. 6.~We neglect the backaction of the atoms on the pump light field (undepleted pump approximation). This is possible because the probe light is typically three orders of magnitude weaker than the pump field. In the experiment, the pump laser is tightly phase-locked to a cavity eigenfrequency. Consequently, as pointed out in Ref.~\cite{Kruse03b}, we can suppose a fixed phase relation between the incident pump laser field (labeled by the electric field amplitude normalized to the field generated by a single photon), $\alpha_{in}$, and the pumped cavity mode, $\alpha_+=\alpha_{in}\sqrt{\delta_{fsr}/\kappa_c}$. The phase can be arbitrarily chosen, e.g.~$\alpha_+$ can be taken as real.\\
Even though quantum statistical effects do not emerge from our measurements at temperatures close to or below the recoil limit the quantized nature of the atoms' motion influences their dynamics, as described by a model derived by Piovella and coworkers \cite{Piovella01}. Within this model and in the approximations specified above, the CARL Hamiltonian for an ensemble of $N$ atoms reads
\begin{align}\label{Eq11}
H & = \frac{1}{2m}\sum_{j=1}^N\hat{p}_j^2+\hbar\Delta_c\left(|\alpha_-|^2+|\alpha_+|^2\right)\\
& + \hbar U_0\alpha_+\sum_{j=1}^N\left(\alpha_-^*e^{-2ik\hat{z}_j}+h.c.\right)~\notag,
\end{align}
where $U_0$ is the single-photon light shift, and $\Delta_c$ the detuning between pump and probe. The motional degrees of freedom, i.e.~the position $\hat{z}_j$ and the momentum $\hat{p}_j$ of every atom, satisfy the following commutation relation $[\hat{z}_j,\hat{p}_{j^{\prime}}] =i\hbar \delta_{jj^{\prime}}$. From the Heisenberg equations $i\hbar\dot{\hat{z}}=[\hat{z},H]$ and $i\hbar\dot{\hat{p}}=[\hat{p},H]$ we derive the equations of motion for the coupled system,
\begin{align}\label{Eq12}
\frac{d\hat{z}_j}{dt} & =\frac{\hat{p}_j}{m}~,\\
\frac{d\hat{p}_j}{dt} & =-2i\hbar kU_0\alpha_+\left(\alpha_-^*e^{2ik\hat{z}_j}-\alpha_-e^{-2ik\hat{z}_j}\right)~,\notag\\
\frac{d\alpha_-}{dt} & =-(\kappa_c+i\Delta_c)\alpha_--iU_0\alpha_+\sum_{j=1}^Ne^{-2ik\hat{z}_j}~.\notag
\end{align}
In the last equation cavity damping has been introduced phenomenologically. $\hat{b}\equiv N^{-1}\sum\nolimits_je^{-2ik\hat{z}_j}$ measures the degree of atomic bunching. Starting from these equations, we either treat the motion classically or quantized \cite{Piovella01,Slama-07}. In the first case, we simply replace the position and momentum operators by their classical expectation values. These are the basic equations used to model most of the curves shown in this paper \cite{Perrin02}.
In order to check, whether quantum effects of the motion have an impact on the collective dynamics, we have derived from (\ref{Eq11}) a master equation for the density operator defining a momentum basis $\left|n\right\rangle_j$ such that $\hat{p}_j\left|n\right\rangle_j=2\hbar kn\left|n\right\rangle_j$ and $\left|\psi(\theta_j\right\rangle =\sum_nc_j(n)\left|n\right\rangle_j$. The calculations, which are analogous to those presented in Ref.~\cite{Piovella01}, are not reproduced here. They basically show that, for the parameters used in our experiments, quantum effects of the atomic motion are small. I.e., ~using the terminology of Ref.~\cite{Piovella01}, we are in the semiclassical regime.
\subsection{Modeling mirror backscattering and radiation pressure}
\label{SecTheoryMirrors}
Perturbative effects resulting from backscattering from the mirror
surfaces and from radiation pressure have been neglected so far.
Unfortunately, we found both effects to influence the experimental
observations, so that this idealization has to be given up. Let us
first discuss mirror backscattering. Dust particles or
irregularities on the mirror surfaces can scatter light from a
cavity mode into the counterpropagating mode. This effect is
well-known in laser gyroscopes, where it leads to phase-locking.
Interestingly, the effect is the more pronounced the better the
reflectivity of the mirrors and hence the finesse of the cavity
\cite{Cube06}. In principle, to describe mirror backscattering, one
has to know the precise locations of the scatterers on the mirrors.
As we explain in another paper \cite{Krenz07}, we can describe their
influence by a single scatterer localized at position
$z_{\textrm{s}}$ with a wavelength-dependent scattering rate
$U_{\textrm{s}}$. The scattering can be modeled in the very same way
as backscattering from atoms, except for the fact that the
scatterers are now fixed in space. Hence, we may just replace the
Hamiltonian~(\ref{Eq11}) by
\begin{equation}\label{Eq15}
H' = H+\hbar U_{\textrm{s}}\alpha_+\left(\alpha_-^*e^{-2ikz_{\textrm{s}}}+h.c.\right)~.
\end{equation}
The resulting modified equations of motion are only changed by an additional term for the evolution of the field amplitude. I.e.~the third of the equations~(\ref{Eq12}) is supplemented with a gain rate $iU_{\textrm{s}}\alpha_+$ for the probe mode resulting from photons scattered out of the pump mode by mirror backscattering. In the experiment, we determine the amount of mirror backscattering $U_{\textrm{s}}$ from independent measurements.
Radiation pressure is due to spurious population of electronically
excited states under the influence of the pump laser beam. Although,
far from resonance the effect is weak, it still leads to a
noticeable acceleration of the atoms. Gangl and Ritsch
\cite{Gangl00} have shown that the adiabatic elimination of
electronically excited states introduces additional contributions in
the classical CARL equations scaling with the Rayleigh scattering
rate $\gamma_0$. This describes the effect of recoil heating due to
radiation pressure
\begin{align}\label{Eq16}
m\frac{d^2z_j}{dt^2} & = -\hbar k\gamma_0\left(|\alpha_+|^2-|\alpha_-|^2\right)\\
& - 2i\hbar kU_0\alpha_+\left(\alpha_-e^{2ikz_j}-\alpha_-^*e^{-2ikz_j}\right)~,\notag\\
\frac{d\alpha_-}{dt} & = -(\kappa_c+N\gamma_0)\alpha_--N(\gamma_0+iU_0)\alpha_+b-iU_{\textrm{s}}\alpha_+~.\notag
\end{align}
The additional contributions not only lead to losses for the light
mode, but also exert an accelerating force onto the atoms.
Experimentally, we observe a broadening of the momentum distribution
by recoil heating which slightly impairs the collective dynamics for
measuring times longer than $100~\mu$s.
\section{Measurements}
\label{SecExperiment}
\begin{figure}
\caption{Technical drawing of the setup in the main chamber including coils for magnetic and magneto-optical trapping,
wires for a Joffe-Pritchard type trap and the ring cavity. All pieces are held together by massive copper parts
omitted in this figure for clarity.}
\label{fig:setup}
\end{figure}
We describe our experimental setup tracking the temporal sequence of an experimental run. The whole setup (shown in Fig.~\ref{fig:setup}) consisting of magnetic coils, wires and the ring cavity is placed inside a ultra-high vacuum chamber pumped by a cryogenic titanium sublimation pump and a $20$\,l/s ion getter pump to a pressure of about $10^{-11}$\,mbar. Heat produced in coils and wires inside the vacuum is dissipated via a temperature-stabilized cooling rod to a liquid nitrogen reservoir. A second vacuum chamber is connected with this main chamber via a differential pumping hole and contains a Rb partial pressure of several $10^{-7}$\,mbar. The second chamber accommodates a two-dimensional magneto-optical trap (2D-MOT) producing a cold atomic beam directed into the main chamber. From this atomic beam about $10^8$ atoms/s are recaptured in a standard magneto-optical trap (MOT) in the main chamber. After the MOT has been loaded for 15~s, the atoms are transferred into a magnetic trap produced by the same coils as the MOT. On a typical day, we load about $2\times10^8$ atoms at a temperature of $T=100~\mu\textrm{K}$ into the magnetic trap. The atoms are then magnetically transferred via a second into a third pair of coils, whereby the atoms are compressed adiabatically. The magnetic quadrupole field gradient between the third pair of coils is $160$\,G/cm in the horizontal and $320$\,G/cm in the vertical direction. With two pairs of wires separated by $1$\,mm and running parallel to the symmetry axis of the coils a Joffe-Pritchard type potential is created \cite{Silber05}. Typical values of the oscillation frequencies in this trap are $\omega_{\mathrm{r}}/2\pi=200$\,Hz and $\omega_z/2\pi=50$\,Hz at a magnetic offset field of $B_0=2$\,G with the $z$-direction pointing along the cavity mode through the gap between the wires. The vertical position of the wire trap can easily be shifted by the currents in the quadrupole coils. Inside the wire trap the atoms are cooled by forced evaporation: a microwave frequency is tuned resonantly to the ground state hyperfine structure and couples the trapped Zeeman state $|2,2\rangle$ and the untrapped state $|1,1\rangle$. We ramp down the frequency for 15~s starting from a detuning of $210$\,MHz and reach quantum degeneracy at a detuning of about $4$\,MHz with about $N=5\times10^5$ atoms at $T_c=800$\,nK. Almost pure condensates of $N=2\times10^5$ atoms can be achieved by ramping down to even lower frequencies. When the evaporative cooling stage is completed, the cold atoms are vertically transferred into the mode volume of the ring cavity. The ring cavity consists of one plane (IC) and two curved (HR) mirrors with a curvature radius of $R_c=10$\,cm. The round-trip length of the cavity is $8.5$\,cm, corresponding to a free spectral range of $\delta_{\textrm{fsr}}=3.5$\,GHz. One of the two counterpropagating modes is continuously pumped by a titanium-sapphire laser. The laser can be stabilized to this mode using the Pound-Drever-Hall (PDH) method. The quality factor of the cavity depends on the polarization of the incoupled light. For p-polarized light, a finesse of $F=87000$ is determined from a measured intensity decay time of $\tau=3.8~\mu\textrm{s}$. For s-polarized light the finesse is 6400.
\subsection{Experimental procedure}
\label{SecExperimentProcedure}
The measurements are performed in the following way. A cloud of cold atoms is magnetically transferred into the cavity. During this time the cavity is not pumped with light in order to prevent losses of atoms due to Rayleigh scattering.
\begin{figure}
\caption{(a) Typical measured time signal (solid line) of the probe-light power. Experimental parameters are $N=1.5\cdot10^6$, $P_+=4$\,W,
$\lambda=797.3$\,nm and $F=87000$. For visibility the pump-light power (dashed line) is scaled down by a factor of $0.001$.
(b) Simulation of the CARL equations. The measured rise of the pump-light power is used in the simulation and
the experimental parameters are fitted in order to agree with the measured time curve. The fitted
parameters ($P_+$, $N$) are in reasonable agreement with
the measured parameters.}
\label{fig:timesignal}
\end{figure}
This implies that the frequency of the laser cannot be stabilized to
a mode of the cavity during the transfer. As soon as the atoms are
inside the cavity, we switch on the pump light again and ramp its
frequency across the cavity resonance. This is done by means of a
piezo-electric transducer normally controlled by the slow branch of
the Pound-Drever-Hall (PDH) servo, which is interrupted for this
reason. As soon as the frequency is close to the cavity resonance,
the fast branch of the PDH servo acting on an acousto-optic
modulator (AOM) quickly pulls the laser frequency to the center of
the resonance and tightly locks its phase, thus compensating for the
frequency ramp. After a time of about $50~\mu$s the pump light is
turned off. The build-up time for the ring cavity pump mode is
limited by the bandwidth of the locking servo to about
$\tau_{bw}=20~\mu$s, which is longer than the cavity decay time.
As soon as the pump mode power builds up in the ring cavity, the
collective dynamics results in light scattering into the cavity
probe mode. The limited build-up time of the pump power leads to a
delayed and slightly weaker dynamics as compared to a rapid
switch-on.We study this dynamics mainly via the evolution of the
recorded probe light power $P_-$. The time signal of the probe light
shows characteristic maxima and minima like the ones presented in
Fig.~\ref{fig:timesignal}. This behavior can be explained most
easily in the case, where the atoms occupy a initial momentum
eigenstate and are coupled by the coherent dynamics to a final
momentum state. The temporal evolution is a Rabi oscillation-like
change of occupation from the initial to the final state. This
causes the build-up of an atomic density grating which reaches its
maximum with half of the atoms in each state and zero contrast when
all atoms are in the initial or the final state. The scattered light
is proportional to this density grating contrast. Maxima in the
probe light power therefore occur with each change of the momentum
state. In the situation depicted in Fig.~\ref{fig:timesignal} the
dynamics leads to the simultaneous occupation of an increasing
number of momentum states. The maximum atomic density grating washes
out with time and we observe a decrease of the light power maxima.
In the following we analyze the probe light power reached at the
first maximum $P_{-,1}$, because it shows a clear dependence on atom
number $N$, pump light power $P_+$, laser wavelength $\lambda$,
finesse of the cavity $F$, and on the atomic cloud's temperature
$T$. In contrast, it is quite robust against perturbative effects
such as mirror backscattering. Simulations of the CARL dynamics like
shown in Fig.~\ref{fig:timesignal}(b) are performed by numeric
integration of (\ref{Eq16}) with the explicit Euler method. We
simulate the trajectories of $N_s=100$ atoms, each representing
$N/N_s$ real atoms. At the beginning of the simulation the atoms are
spread in position over half a wavelength with equal spacings. For
simulations of clouds with temperature $T=0$ the start momentum of
all atoms is set to $p_j=0$. For simulations of clouds with nonzero
temperature the momenta at the beginning are normally distributed
with $\langle p_j^2\rangle=mk_BT$.
\subsection{Mirror backscattering}
\label{SecExperimentMirrors}
Scattering from the mirror surfaces leads to the presence of light
in the probe mode even in the absence of atoms in the cavity. In the
presence of atoms, this light influences the atomic collective
dynamics. Fig.~\ref{fig:spr} shows the impact of mirror
backscattering on the height of the first maximum $P_{-,1}$ and on
the time delay $\Delta t$ from switching on the pump until the
maximum is reached.
\begin{figure}
\caption{Influence of mirror backscattering (a) on the peak probe power $P_{-,1}
\label{fig:spr}
\end{figure}
The backscattering rate strongly depends on the wavelength of the
pump laser, when it is resonant to an eigenfrequency of the cavity
\cite{Krenz07}. This phenomenon can be understood as interference of
the waves backscattered from all three cavity mirrors. From the
experimental point of view, the most interesting feature is that
backscattering can be avoided by a proper choice of the resonant
cavity mode. The mirror-induced probe light power varies between
almost 0 and 0.6\% of the pump power.
Backscattered light in the probe mode represents an artificial
instability, which seeds the collective dynamics. Consequently,
increased mirror backscattering reduces the time delay $\Delta t$.
On the other hand, the maximum probe light power $P_{-,1}$ decreases
with $\Delta t$, because the finite switch-on time limits the pump
light available at this stage. This behavior is verified by the
measurements shown in Fig.~\ref{fig:spr}. For these measurements we
vary the mirror backscattering by choosing different longitudinal
cavity modes \cite{Krenz07}.
In the simulations shown in same figure, the finite switch-on time
is taken into account. The atom number and pump power are fitted in
order to reach good agreement with the experimental data, but the
general behavior can be reproduced without free parameters. For a
hypothetic sudden switch-on, we would expect a much weaker
dependence of $P_{-,1}$ on mirror backscattering.
The observation that increased mirror backscattering leads to a
faster rise of the collective dynamics only applies, when the amount
of mirror backscattering is smaller than the atomic coupling
strength $U_{\textrm{s}}<NU_0$, which is true for the above given
values. For a reduced atom number of about $N\sim 10^5$ though,
mirror backscattering is on the same order of magnitude as the
atomic coupling. In this case, it is able to suppress the collective
dynamics, which we do observe experimentally. When we use
Bose-Einstein condensed clouds, atom numbers are precisely on the
order of $10^5$. It is therefore necessary to resort to cavity modes
with ultra-low mirror backscattering. To control and cancel the
amount of mirror backscattering, we have developed a method
described in \cite{Krenz07} based on the injection of an additional
light field into the probe mode of the cavity.
\subsection{Pump power}
\label{SecExperimentPump}
The dynamics of the collective instability depends on the pump light power. A reduction of pump power leads to a decrease of the contrast of the optical standing wave resulting from the interference of the pump and probe modes.
\begin{figure}
\caption{Influence of the pump light power on (a) the peak probe power $P_{-,1}
\label{fig:pdep}
\end{figure}
This weakens the collective dynamics. In previous experiments \cite{Cube04}, where the CARL has been exposed to the dissipative and diffusive forces of an optical molasses, we observed a threshold behavior in the pump power. In contrast, the present setup lacks a strongly dissipative reservoir, so that it is unclear whether CARL with BECs can show a threshold behavior. The only channel available to dissipation in this setup is transmission through the cavity mirrors. This provides a coupling of the cavity modes to the electromagnetic field of the surroundings, which to good approximation can be regarded as a zero-temperature reservoir of photons. One therefore would expect dissipation without diffusion.
We observed that temperature effects can lead to a threshold-like behavior, if the atoms are not Bose-condensed. Fig.~\ref{fig:pdep}(a) shows measurements of the maximum probe light power $P_{-,1}$ as a function of the pump power $P_+$. The data agree very well with simulations (solid line) using the parameters specified in the captions of Fig.~\ref{fig:pdep} and a temperature of the atoms of $T=800$\,nK. The dotted line is a simulation with the same parameters, but at temperature $T=0$. Down to a pump power of about $P_+\approx0.1$\,W, both curves coincide. Below this value the probe power is considerably reduced if the temperature of the atoms is finite. This demonstrates that thermal motion of the atoms can suppress the collective dynamics if the gain is not strong enough \cite{Note01}.
\begin{figure}
\caption{Simulations (solid lines) of the transition from bad-cavity to good-cavity regime with respect to (a) atom number and
(b) pump power. Each dependency is plotted for two values of the finesse $F$. The experimental parameters are $P_+=1$\,W in
(a) and $N=10^6$ in (b). For the $F=87000$ simulations the wavelength is $\lambda=796.1$\,nm for $F=6400$ it is
$\lambda=795.3$\,nm. The vertical lines in each figure show the value of the parameter held fixed in the other part of the
figure. They characterize the region where our experiments take place. The dotted (dashed) lines show the asymptotic behavior
typical for the good-cavity (bad-cavity) regime. The deviation of the simulations from the asymptotic behavior [solid curves in
Fig.~(a) below $N=10^4$] stems from mirror backscattering, which plays a major role for small atom numbers and suppresses the
collective dynamics. The simulations are performed at $T=0$ in
order to show the underlying physics without being
influenced by temperature effects.}
\label{fig:fdep}
\end{figure}
Another observable which depends on the pump power is the time
difference $\Delta t_{1,2}$ between the first and the second
superradiant light pulse. This time difference corresponds to the
typical time-scale, on which the atomic momentum distribution is
shuffled between different momentum states. The stronger the pump
power is, the faster the momentum distribution changes. This
connection is shown in Fig.~\ref{fig:pdep} (b), where the data agree
very well with a simulation with the above given parameters and an
atomic temperature of $T=0$. A simulation with the realistic atomic
temperature of $T=800$\,nK hardly differs from the $T=0$ curve and
is omitted in Fig.~\ref{fig:pdep} (b) for clarity. This shows that
the time difference $\Delta t_{1,2}$ is quite insensitive to the
momentum spread of the atoms.
\subsection{Finesse}
\label{SecExperimentFinesse}
The CARL model comprises different regimes, which are denoted as good-cavity and bad-cavity regime. While former work in our group was performed in the good-cavity regime \cite{Kruse03,Cube04}, the SRyS experiments are very far in the bad-cavity regime \cite{Inouye99,Schneble03}. With our new apparatus we are able to reach both regimes by varying the finesse of the cavity and to find characteristic signatures of the regimes in the comportment on certain experimental parameters. The maximum probe light power scales in the good-cavity regime with $P_{-,1}\propto N^{4/3}\cdot P_+^{1/3}$ and in the bad-cavity regime with $P_{-,1}\propto N^2\cdot P_+$ \cite{Bonifacio94}. Which regime is reached does not only depend on the finesse $F$, but also on the atom number and the pump power themselves.
\begin{figure}
\caption{Measured dependency of the maximum probe light power as a function of atom number for values of the finesse of
(a) $F=87000$ and $F=6400$. The parameters are a) $\lambda=796.1$\,nm, $P_+=1.43$\,W and b) $\lambda=795.3$\,nm, $P_+=66$\,mW.
Comparing the data points to the asymptotic behavior shown in the dotted and dashed lines the situation in a) can be
identified as good-cavity regime and the situation in (b) as bad-cavity regime. The behavior is confirmed by simulations
with no free parameters (solid lines).
The values of the data points are scaled by (a) 0.75 and (b) 2.8 in order to
improve agreement with the simulation. This systematic error of the data points is due to uncertainties in the
calibration of the probe light power. These depends on the polarisation of the light and for this reason
we have to apply different scalings for low and good finesse. Nevertheless the dependency on atom number,
which in the logarithmic plot shows up as a different slope is not changed by this pure
multiplication. The stochastic error lies within the size of the markers.}
\label{fig:ndep}
\end{figure}
As discussed in Sec.~\ref{SecTheoryRegimes}, the regime is determined by the relative size of the cavity decay rate, $\kappa_{\mathrm{c}}\sim F^{-1}$, and the gain bandwidth which depends on the collective gain $G\sim nNU_0^2/\kappa_{\mathrm{c}}$. Hence, the good-cavity regime is characterized by large atom numbers and large pump powers, and the bad-cavity regime by small atom numbers and small pump powers. This feature is shown in Fig. \ref{fig:fdep}, where the dependence is simulated for the two values of the finesse accessible to our experiment. As can be seen, the transition between the two regimes is not sudden, but spreads across a wide range of atom number and pump power.
Measurements of the dependence of the maximum probe light power on atom number are shown in Fig.~\ref{fig:ndep}. The finesse of the ring cavity can be set to either $F=87000$ in Fig.~(a) or $F=6400$ in Fig.~(b) by simply rotating the polarization of the pump light with respect to the symmetry plane of the cavity. This enables us o probe both, the good-cavity and the bad-cavity regime. The asymptotic dependency in the good-cavity regime is shown by dotted lines, the dependency in the bad-cavity regime by dashed lines. The solid line represents a simulation with no free parameters. By varying the atom number in (a) between $N_1=3\cdot10^5$ and $N_2=2\cdot10^6$ the corresponding CARL parameters [Eq.~(\ref{Eqrho})] \cite{Bonifacio94} are $\rho_1=4.7$ and $\rho_2=7.0$, the corresponding scaled decay rates are $\kappa_1=\kappa_c/\omega_{\mathrm{r}}\rho_1=0.3$ and $\kappa_2=0.2$. The conditions $\kappa_{1,2}<1$ and $\rho_{1,2}>1$ are typical for the semi-classical good-cavity regime. Indeed, the data points are lying close to the good-cavity theoretical lines. In Fig.~\ref{fig:ndep}(b) the measured atom numbers between $N_3=1.1\cdot10^6$ and $N_4=2.5\cdot10^6$ correspond to CARL parameters between $\rho_3=5.1$ and $\rho_4=6.7$ and scaled decay rates between $\kappa_3=3.7$ and $\kappa_4=2.8$. The conditions $\kappa_{3,4}>1$ and $\rho_{3,4}>\kappa_{3,4}$ are typical for the semi-classical bad-cavity regime. This is confirmed by the data points which seem to be approximated by the good-cavity asymptotic line for high atom numbers. The discrepancy for low atom numbers is due to mirror backscattering. This effect is also visible in the simulation.
\subsection{Temperature}
With our apparatus the atomic temperature can be varied within a range from below one $\mu\textrm{K}$ to several tens of $\mu\textrm{K}$.
\begin{figure}
\caption{(a) Measured time signal of the probe-light for different atomic temperatures. For clarity the curves are shifted by
$0.35$\,mW from each other. The experimental parameters are for all curves $N=10^6$, $\lambda=796.1$\,nm and $F=87000$.
The signal decreases and the contrast is washed out for rising temperature.
(b) Maximum probe-light power as a function of temperature extracted from (a).}
\label{fig:tdep}
\end{figure}
This allows us to systematically examine the influence of the temperature on the collective dynamics and identify the role of quantum statistics in the dynamics of CARL and SRyS. Fig.~\ref{fig:tdep}(a) presents recorded time signals of the probe light for different temperatures.
The curves show characteristic trains of superradiant pulses. With rising temperature the maximum probe-light power decreases and subsequent pulses are washed out. The bottom curve, which corresponds to a temperature of $T=40~\mu\textrm{K}$, shows no modulation of the light power and resembles the time evolution of pure mirror backscattering. The decrease of the maximum probe light power is separately plotted in (b). Obviously, a rising temperature leads to a suppression of the collective dynamics. This can be explained by the fact that the self-amplified optical standing wave has to arrange the atoms into an atomic grating. This is only possible if the depth of the optical lattice is larger than the thermal energy of the atoms. For that reason a rising temperature leads to fewer atoms participating in the gain mechanism. This is the reason why we cannot see CARL activity in the present experiment with atom numbers of $N=10^6$ at a temperature of $T=40~\mu\textrm{K}$, while we observed CARL in recent experiments with atom numbers of $N=10^7$ at temperatures well above $T=100~\mu\textrm{K}$ \cite{Kruse03b,Cube04}. The fact that CARL is observable at all with thermal clouds of atoms, is the proof that quantum statistical phenomena do not play a role for the dynamics of CARL.
\subsection{Evaluation of absorption images}
\label{SecExperimentTof}
\begin{figure}
\caption{(a) Typical absorption image of a thermal atomic cloud after the CARL dynamics and $10$\,ms ballistic expansion.
(b) Vertically integrated optical density of the cloud. (c) Measured mean momentum as a function of pump power
compared to a simulation with no free parameter.
(d) Simulated time evolution of the mean momentum. The momentum is given in units of the recoil momentum
$p_{\mathrm{r}
\label{fig:carlpic}
\end{figure}
After a time period where the atoms are exposed to collective
dynamics, the atoms are released from the magnetic trap. The atomic
cloud expands ballistically, and after a time-of-flight of typically
$t_{\textrm{TOF}}=10$\,ms an absorption image is recorded, revealing
the momentum distribution of the atoms in the trap.
Fig.~\ref{fig:carlpic}(a) shows a typical image of a thermal atomic
cloud with (b) the vertically integrated optical density. The
momentum can be calculated from the horizontal displacement of the
atoms. Individual momentum states cannot be resolved, because the
momentum distribution appears broadened by the thermal motion.
Nevertheless, interesting information like the mean momentum
$\langle p\rangle$ can be extracted from such images. Therefore, we
calculate the center-of-mass of the vertically integrated optical
density. This mean momentum can be examined as a function of the
experimental parameters. Fig.~\ref{fig:carlpic}(c) shows this
dependency of the pump power. The measurements are very well
reproduced by simulations of the CARL equations. The simulations in
Fig.~\ref{fig:carlpic}(d) show that the mean momentum increases
rapidly during the first $T=50~\mu\textrm{s}$ and then starts to
saturate. The saturation is due to the presence of the optical
cavity restricting the range of accessible momentum states. In the
simulations, we assume a realistic temperature of
$T=1.2~\mu\textrm{K}$. The strong spatial modulation of the atomic
density in Fig.~\ref{fig:carlpic}(a) depicts the momentum
distribution generated by the collective dynamics. This behavior is
qualitatively supported by simulations.
If as shown in Fig.~\ref{fig:beccarl} a Bose-Einstein condensate is
used, we are able to resolve individual momentum states for (a) no
pump light-field and (b) a pump light-power of
$P_+^{\textrm{max}}\approx1$\,W. Due to the short interaction time
of the BEC with the light-field of $t_{\textrm{ia}}\approx
40~\mu\textrm{s}$ only two superradiant maxima are observed in (c).
The measured atomic momentum distribution after the interaction in
(d) shows a depopulation of the $|p\rangle=|0\rangle$ state and a
shift towards momentum states with positive momentum. The
substantial population of the momentum state with negative momentum
$|p\rangle=|-1\rangle$ is due to the semiclassical behavior of the
system and is equivalent to the observation of momentum spread in
\cite{Schneble03}.
\begin{figure}
\caption{Absorption images of Bose-Einstein condensates after $10$\,ms ballistic expansion (a) without CARL and (b) with CARL
activity. (c) Simultaneously recorded pump (dashed line) and probe power (solid line). Pump power is scaled down by $10^{-4}
\label{fig:beccarl}
\end{figure}
\section{Conclusion}
We conclude this paper with the statement, that the collective
atomic recoil laser and superradiant Rayleigh scattering are two
faces of the same medal. Previous theoretical work
\cite{Bonifacio97} has shown that the characteristic quantity
distinguishing both effects is the collective gain bandwidth
compared to the cavity decay rate. Our experiment is designed to
give access to both regimes, the superradiant (or bad-cavity) regime
and the good-cavity regime. The observed characteristic dependence
of the instability amplitude on the atom number allows us to clearly
identify the regimes, and to experimentally demonstrate the
intrinsic link between both phenomena.
Another important result is the presence of collective instabilities
at high temperatures. In earlier experiments, CARL dynamics have
been observed with atomic clouds as hot as several $100~\mu$K
\cite{Kruse03b}. This proves that the gain process underlying both,
SRyS and CARL, is not based on quantum statistics, but on
cooperativity \cite{Moore01}. From this results a better
understanding of the intricate relationship between CARL and
superradiance.
This experiment represents the first study of Bose-Einstein
condensates in macroscopic cavities. For the experiments described
within this publication though, the quantum degeneracy of the atoms
is unimportant. However in future experiments, we want to study the
role of quantum statistics in a regime, where photonic and
matter-wave modes are coherently coupled \cite{Horak00}. In this new
regime the CARL dynamics may generate entangled states between atoms
and scattered photons \cite{MooreMG99,Piovella03}.
Another challenge would be to reach the so-called quantum limit.
This limit is distinguished from the semiclassical limit by the fact
that the gain bandwidth is so small,
$\Delta\omega_G\gg\omega_{\mathrm{r}}$, that only adjacent momentum
states of the atomic motion are coupled. This case (provided the
temperature is very low) results in a train of self-similar
superradiant pulses \cite{Piovella01b}. In our experiment this
regime could be reached by enhancing the finesse of the ring cavity
or by reducing $\omega_{\mathrm{r}}$, e.g.~by tuning the pump laser
to an atomic resonance at a much higher frequency. To treat this
regime the use of quantized atomic motion in the CARL equations is
compulsory \cite{Piovella01b}.
This work has been supported by the Deutsche Forschungsgemeinschaft
(DFG) under Contract No. Co~229/3-1. We like to thank W. Ketterle
for helpful discussions.
\end{document} |
\begin{document}
\title{Wavelet Design in a Learning Framework}
\author{Dhruv~Jawali,~Abhishek~Kumar~and~Chandra~Sekhar~Seelamantula,~\IEEEmembership{Senior Member,~IEEE}
\IEEEcompsocitemizethanks{
\IEEEcompsocthanksitem This work has been submitted to the IEEE Transactions on Pattern Analysis and Machine Intelligence for possible publication. Copyright may be transferred without notice, after which this version may no longer be accessible. \protect\\
\IEEEcompsocthanksitem D. Jawali is with the National Mathematics Initiative, Indian Institute of Science, Bangalore - 560012, India (Email: [email protected]).
\IEEEcompsocthanksitem A. Kumar is presently with the Electrical and Computer Engineering Department, Rice University, Texas 77005, USA (Email: [email protected]).
\IEEEcompsocthanksitem C. S. Seelamantula is with the Department of Electrical Engineering, Indian Institute of Science, Bangalore - 560012, India (Email: [email protected], Tel.: +91 80 22932695, Fax: +91 80 23600444). \protect\\
\IEEEcompsocthanksitem \indent Sections~\ref{sec:fb_ae}, \ref{sec:vm} and \ref{sec:perf_measures} appeared in the Proceedings of IEEE International Conference on Acoustics, Speech, and Signal Processing 2019 \cite{icassp2019}.}}
\markboth{}{Wavelet Design in a Learning Framework}
\IEEEtitleabstractindextext{
\begin{abstract}
Wavelets have proven to be highly successful in several signal and image processing applications. Wavelet design has been an active field of research for over two decades, with the problem often being approached from an analytical perspective. In this paper, we introduce a learning based approach to wavelet design. We draw a parallel between convolutional autoencoders and wavelet multiresolution approximation, and show how the learning angle provides a coherent computational framework for addressing the design problem. We aim at designing data-independent wavelets by training filterbank autoencoders, which precludes the need for customized datasets. In fact, we use high-dimensional Gaussian vectors for training filterbank autoencoders, and show that a near-zero training loss implies that the learnt filters satisfy the perfect reconstruction property with very high probability. Properties of a wavelet such as orthogonality, compact support, smoothness, symmetry, and vanishing moments can be incorporated by designing the autoencoder architecture appropriately and with a suitable regularization term added to the mean-squared error cost used in the learning process. Our approach not only recovers the well known Daubechies family of orthogonal wavelets and the Cohen-Daubechies-Feauveau family of symmetric biorthogonal wavelets, but also learns wavelets outside these families.
\end{abstract}
\begin{IEEEkeywords}
Wavelet design, convolutional autoencoders, perfect reconstruction filterbanks, multiresolution approximation, vanishing moments constraint.
\end{IEEEkeywords}}
\maketitle
\IEEEdisplaynontitleabstractindextext
\IEEEpeerreviewmaketitle
\IEEEraisesectionheading{\section{Introduction}\label{sec:introduction}}
\label{sec:intro}
\IEEEPARstart{W}{avelet} representations are at the heart of several successful signal and image processing applications developed over the past three decades. Wavelets endowed with important properties such as {\it vanishing moments} are ideal for detecting singularities and edges in signals \cite{mallat1992singularity, mallat1992characterization}. The ability of wavelets to produce sparse representations of natural and medical images has led to several successes in solving inverse problems such as image denoising \cite{donoho1995adapting, luisier2010sure, luisier2012cure, seelamantula2015image}, deconvolution/deblurring \cite{figueiredo2003algorithm, bioucas2007new, li2017pure}, reconstruction \cite{chan2003wavelet}, image compression \cite{taubman2002jpeg2000} etc. Wavelets have also given rise to multidimensional generalizations such as curvelets \cite{candes2004new}, contourlets \cite{do2005contourlet}, surfacelets \cite{lu2007multidimensional}, directionlets \cite{velisavljevic2006directionlets}, etc. A review of these representations can be found in \cite{jacques2011panorama}.\\
\indent Wavelet filterbanks implementing a multiresolution approximation have been incorporated as fixed/non-trainable blocks within trainable convolutional neural networks \cite{kang2017deep, liu2018multi}, achieving state-of-the-art performance for inverse problems in image restoration. Scattering convolutional networks incorporating wavelets have been developed by Bruna and Mallat \cite{bruna2013invariant}. The resulting representations are translation-invariant, stable to deformations, and preserve high-frequency content, all of which are important requirements for achieving high-accuracy image classification performance. Hybrid architectures that use the wavelet scattering network in conjunction with trainable neural network blocks have been shown to provide competitive image classification performance, and have also been used to represent images within a generative adversarial framework \cite{oyallon2018scattering}. Such phenomenal successes have been possible with wavelets because of their ability to provide parsimonious and invariant representations.\\
\indent Wavelet design lies at the interface between signal processing and mathematics, and has benefited greatly by a fruitful exchange of ideas between the two communities. Wavelets can be designed to constitute a frame \cite{kovacevic2007lifea, kovacevic2007lifeb}, Riesz basis, or orthonormal basis for the space of square-integrable functions $L^2(\mathbb{R})$ \cite{primer1998introduction, mallat2008wavelet, chui2016introduction}. Of particular interest is the orthonormal flavor in a multiresolution approximation (MRA) setting, which comprises a nested subspace structure that allows one to move seamlessly across various resolutions. In particular, considering the dyadic MRA and orthonormal wavelet bases satisfying the two-scale equation, efficient algorithms have been developed to compute the projections of a function $f \in L^2(\mathbb{R})$ at different resolutions --- this property also establishes a close connection between wavelet decomposition and filterbank analysis \cite{Vetterli86, mallat1989theory}. Thanks to this connection, the problem of designing orthonormal wavelet bases that satisfy chosen properties becomes equivalent to one of optimizing discrete filters that obey certain design conditions. One could, therefore, start with a discrete filter and determine the corresponding wavelet or vice versa \cite{primer1998introduction, mallat2008wavelet}.
\subsection{Motivation for This Paper}
The design of wavelets has largely been carried out analytically. For instance, consider the construction of Daubechies wavelets of a certain order and vanishing moments \cite{daubechies1988orthonormal, daubechies1992ten}. The vanishing moments property is incorporated explicitly following the Strang-Fix condition \cite{strang2011fourier}, which partly determines the wavelet filter. The remaining part of the filter is determined by enforcing the conjugate-mirror filter condition and solving for the filter coefficients based on Bezout's theorem and Kolmogorov spectral factorization. Filterbank design, on the other hand, employs sophisticated optimization machinery to enforce the perfect reconstruction conditions \cite{vetterli1995wavelets, strang1996wavelets, vaidyanathan2006multirate}.\\
\indent In this paper, we ask if one could adopt a {\it learning} strategy to revisit the problem of designing perfect reconstruction filterbanks (PRFBs) and wavelets. Given the enormous attention that machine learning approaches have been receiving of late due to their phenomenal successes, the quest is but pertinent. Our objective is to leverage the state-of-the-art machine learning techniques and specialized tools for solving the problem of wavelet design. We draw a parallel between convolutional autoencoders and perfect reconstruction filterbanks, which casts the design problem within a learning framework. Recently, Pfister and Bresler introduced an undecimated filterbank design approach to learning \emph{data-adaptive} sparsifying transforms \cite{pfister2018learning}. They employed techniques such as stochastic gradient descent and automatic differentiation to optimize the filters. The problem of learning data-adaptive wavelet frames for solving inverse problems has also been explored by Tai and E \cite{jmlrwaveletlearning}. In this paper, our objective is to design \emph{data-independent} two-channel critically sampled PRFBs satisfying certain properties, and thereafter, to determine the corresponding wavelet bases using the recently developed tools in machine learning.
\subsection{Our Contributions}
We begin by reviewing the crucial connections between a dyadic wavelet transform and PRFBs (Section~\ref{sec:overview}), which are well established in the literature and also form the foundation for the viewpoint adopted in this paper. The starting point for our developments is to interpret a PRFB as a convolutional autoencoder but without the max-pool and activation nonlinearities in the encoder/decoder blocks --- essentially, we have a {\it filterbank autoencoder}. Once this analogy is established, a plethora of techniques and tools used in state-of-the-art deep learning approaches become readily applicable. We train the filterbank autoencoder by minimizing the mean-square error (MSE) loss with the training data being high-dimensional Gaussian vectors (Section~\ref{sec:wv_lf}). The use of Gaussian vectors for training serves as a scaffolding to steer the optimization objective and renders the autoencoder data-independent. Properties such as sparsity for piecewise-regular signals have to be incorporated via other means, for instance, using vanishing moments. We then proceed with designing 1-D orthonormal wavelets with a specified number of vanishing moments (Section~\ref{sec:1d_wv}). The formalism is adapted to handle the biorthogonal case as well. The parameters of the training procedure are the filter lengths and the number of vanishing moments. By an appropriate choice of the parameters, we obtain several well-known wavelets, for instance the Daubechies wavelets and Symmlets in the orthonormal case and members of the Cohen-Daubechies-Feauveau (CDF) family \cite{cohen1992biorthogonal} in the birthogonal case. We do impose necessary checks to ensure that the learnt filters indeed generate stable Riesz bases. The proposed framework enables one to learn wavelets that are outside of these classes as well. For designs that have conflicting constraints and for which no solution exists, for instance, perfect symmetry in the case of orthonormal wavelets with more than one vanishing moment, our framework indicates that as well. We also show that one could also design asymmetric biorthogonal wavelets using our approach. Concluding remarks and potential directions for future work are presented in Section~\ref{sec:conclusions}.
\subsection{Notation}
Scalars are denoted by lowercase letters (e.g. $m$), vectors by boldface lowercase letters (e.g. $\boldsymbol{h}$) and matrices by boldface uppercase letters (e.g. $\boldsymbol{H}$). The transpose of $\boldsymbol{H}$ is denoted as $\boldsymbol{H}^{\mathrm{T}}$. The $n \times n$ identity and zero matrices are denoted by $\boldsymbol{I}_n$ and $\boldsymbol{0}_n$, respectively. The zero vector is denoted by $\boldsymbol{0}$. Discrete signals are represented as $h[n]$. In this paper, we focus only on compactly supported filters. Therefore, interpreting a 1-D filter $h[n]$ as a finite-dimensional vector $\boldsymbol{h}$ is natural. Fourier transforms are denoted by a caret or hat notation. The context would disambiguate whether the Fourier transform is in the continuous-time domain or the discrete-time domain. For instance, given a sequence $h[n]$, the discrete-time Fourier transform (DTFT) is denoted by $\hat{h}(\omega)$, which is $2\pi$-periodic. Similarly, for a function $\phi(t)$, the continuous-time Fourier transform (CTFT) is denoted by $\hat{\phi}(\omega)$, which is in general not periodic. The space of square-integrable functions is denoted as $L^2(\mathbb{R})$, and the corresponding inner-product is defined as $\langle x, y \rangle = \int_{-\infty}^{+\infty} x(t) y^*(t)\,\mathrm{d}t$, where $y^*(t)$ is the complex conjugate of $y(t)$. The space of square-summable sequences is denoted as $\ell^2(\mathbb{Z})$, and the corresponding inner-product is denoted by $\langle x, y \rangle = \displaystyle\sum_{n \in \mathbb{Z}} x[n] y^*[n]$.
\section{A Review of 1-D Wavelet Design}
\label{sec:overview}
\indent A wavelet function $\psi(t) \in L^2(\mathbb{R})$ has zero average, i.e., $\int \psi(t)\,\mathrm{d}t = 0$, and is localized in the time-frequency plane. The family of functions $$\left\{\psi_{a,b}(t) = \frac{1}{\sqrt{a}}\psi\left(\frac{t-b}{a}\right)\right\}_{a \in \mathbb{R}^+, b \in \mathbb{R}},$$
obtained by time-shifting and scaling the prototype function $\psi(t)$, could be designed to constitute either overcomplete or orthonormal bases for functions in $L^2(\mathbb{R})$.
\subsection{Multiresolution Approximation}
\indent Central to wavelet analysis is the multiresolution approximation developed by Mallat \cite{mallat1989theory}.
A multiresolution approximation (cf. Definition 7.1, \cite{mallat2008wavelet}) is a sequence of closed nested subspaces $\{ V_j \}_{j \in \mathbb{Z}}$ of $L^2(\mathbb{R})$ --- called the {\it approximation subspaces} --- which can be used to represent signals at varying resolutions. The space $V_j$ corresponding to resolution $2^{-j}$ is spanned by the orthonormal bases $\{ \phi_{j,n}(t) := \phi_j(t - n) \}_{n \in \mathbb{Z}}$, where $\phi_j(t)$ is obtained by time-scaling the prototype function $\phi(t)$ as follows:
$$
\phi_j(t) = \frac{1}{\sqrt{2^{j}}} \phi \left( \frac{t}{2^{j}} \right).
$$
The function $\phi(t)$ is the \emph{scaling function} and constitutes the generator kernel for the space $V_0$. The nesting property means that a lower-resolution subspace $V_{j + 1}$ is contained within a higher-resolution space $V_j$, i.e., $V_{j+1} \subset V_j, \forall j \in \mathbb{Z}$. Let $W_{j+1}$ denote the orthogonal complement of $V_{j+1}$ in $V_j$, which leads to the direct-sum decomposition: $V_{j} = V_{j + 1} \oplus W_{j + 1}$. $W_j$ is the {\it detail subspace} and is spanned by $\{\psi_{j,n}(t) := \psi_j(t - n)\}_{n \in \mathbb{Z}}$, where $\psi_{j,n}(t)$ is the time-scaled and translated version of the \emph{wavelet function} $\psi(t)$. The generators for $V_{j + 1}$ and $W_{j + 1}$ satisfy the \emph{two-scale equations}:
\begin{flalign}
\label{eqn:2s_phi}
\phi_{j + 1} \left( t \right) &= \sum\limits_{n \in \mathbb{Z}} h[n] \phi_j(t - n), \text{and}\\
\label{eqn:2s_psi}
\psi_{j + 1} \left( t \right) &= \sum\limits_{n \in \mathbb{Z}} g[n] \phi_j(t - n),
\end{flalign}
respectively, where the filters $h[n]$ and $g[n]$ are referred to as the {\it scaling} and {\it wavelet} filters, respectively. Given a signal $f \in L^2(\mathbb{R})$, the {\it approximation} and {\it detail coefficients} at resolution $2^{-j}$, denoted by $a_j[n]$ and $d_j[n]$, respectively, are given by the orthogonal projection of $f$ onto the spaces $V_j$ and $W_j$, respectively:
\begin{flalign}
a_j[n] = \langle f, \phi_{j, n} \rangle; \quad d_j[n] = \langle f, \psi_{j, n} \rangle.
\end{flalign}
The representation coefficients at various scales are computed efficiently by Mallat's algorithm \cite{mallat1989theory, mallat2008wavelet}:
\begin{flalign}
\label{eqn:fb_1}
a_{j + 1}[n] = \sum\limits_{m \in \mathbb{Z}} h[m - 2n] a_j[m] = (a_j * \bar{h})[2n], \\
\label{eqn:fb_2}
d_{j + 1}[n] = \sum\limits_{m \in \mathbb{Z}} g[m - 2n] a_j[m] = (a_j * \bar{g})[2n],
\end{flalign}
where $\bar{h}[n] := h[-n]$ and $\bar{g}[n] := g[-n]$ are the corresponding time-reversed sequences. Equations (4) and (5) correspond to the lowpass and highpass outputs of the analysis filterbank illustrated in \figurename{~\ref{fig:prfb}}, with input $x[n] = a_j[n]$. The higher-resolution approximation coefficients are computed using the lower-resolution approximation and detail coefficients as follows:
\begin{align}
\label{eqn:fb_syn}
a_{j}[n] = \sum\limits_{m \in \mathbb{Z}} \tilde{h}[n - 2m] a_{j+1}[m] + \sum\limits_{m \in \mathbb{Z}} \tilde{g}[n - 2m] d_{j+1}[m],
\end{align}
which corresponds to the output of the synthesis filterbank illustrated in \figurename{~\ref{fig:prfb}}, where the filters are related as $\tilde{h}[n] = h[n]$, $\tilde{g}[n] = g[n]$, and $g[n] = (-1)^{1-n} h[1 - n]$.
So far, we have assumed that $\{ \phi_{j, n}(t) \}_{j, n \in \mathbb{Z}}$ and $\{ \psi_{j, n}(t) \}_{j, n \in \mathbb{Z}}$ form orthonormal bases for $V_j$ and $W_j$, respectively. Instead of orthonormal bases, one could also consider Riesz bases for the spaces $V_j$ and $W_j$. A Riesz basis $\{ \phi_{j, n}(t) \}_{j, n \in \mathbb{Z}}$ of the space $V_j$ is characterized by two constants $0 < \sigma_{\text{min}} \leq \sigma_{\text{max}} < \infty$ such that for all $x(t) \in V_j$,
$$
\sigma_{\text{min}} \|x\|_2^2 \leq \sum_{j, n \in \mathbb{Z}} |\langle x, \phi_{j, n} \rangle|^2 \leq \sigma_{\text{max}} \|x\|_2^2.
$$
The dual spaces $\tilde{V}_j$ and $\tilde{W}_j$ corresponding to the Riesz bases $V_j$ and $W_j$, respectively, would be spanned by their biorthogonal counterparts $\{ \tilde{\phi}_{j^{\prime}, n^{\prime}}(t) \}_{j^{\prime}, n^{\prime} \in \mathbb{Z}}$ and $\{ \tilde{\psi}_{j^{\prime}, n^{\prime}}(t) \}_{j^{\prime}, n^{\prime} \in \mathbb{Z}}$, such that:
\begin{flalign*}
\langle \phi_{j, n}, \tilde{\phi}_{j^{\prime}, n^{\prime}} \rangle =
\langle \psi_{j, n}, \tilde{\psi}_{j^{\prime}, n^{\prime}} \rangle = \delta[j - j^{\prime}] . \delta[n - n^{\prime}],
\end{flalign*}
where $\delta[\cdot]$ denotes the Kronecker delta. The biorthogonal bases also satisfy two-scale equations, with corresponding scaling and wavelet filters $\tilde{h}[n]$ and $\tilde{g}[n]$, respectively. Just as $h[n]$ and $g[n]$ constitute the lowpass and highpass filters on the analysis side, the filters $\tilde{h}[n]$ and $\tilde{g}[n]$ constitute their counterparts on the synthesis side, resulting in a perfect reconstruction filterbank.
\subsection{Perfect Reconstruction Filterbanks}
\begin{figure}
\caption{A 1-D two-channel filterbank, with analysis filters $\bar{h}
\label{fig:prfb}
\end{figure}
Consider the two-channel filterbank shown in \figurename{~\ref{fig:prfb}}. Enforcing ${\hat x}[n] = x[n]$ gives rise to the following perfect reconstruction (PR) conditions \cite{Vetterli86}:
\begin{alignat}{3}
\label{eqn:pr}
& \text{PR-1}: \quad &\hat{h}^*(\omega) \hat{\tilde{h}}(\omega) &+ \hat{g}^*(\omega) \hat{\tilde{g}}(\omega) = 2, \\
\label{eqn:ac}
\text{and}\quad&\text{PR-2}: \quad &\hat{h}^*(\omega + \pi) \hat{\tilde{h}}(\omega) &+ \hat{g}^*(\omega + \pi) \hat{\tilde{g}}(\omega) = 0.
\end{alignat}
Constraining the filters $g[n]$ and $\tilde{g}[n]$ in terms of $\tilde{h}[n]$ and $h[n]$ as follows:
\begin{flalign}
\label{eqn:ac1}
g[n] &= a\,(-1)^{1 - n} \tilde{h}[1 - n], \text{and}\\
\label{eqn:ac2}
\tilde{g}[n] &= a^{-1} (-1)^{1 - n} h[1 - n],
\end{flalign}
ensures that the PR-2 condition in Equation~(\ref{eqn:ac}) is automatically satisfied. The PR-1 condition then takes the form:
\begin{flalign}
\label{eqn:pr1}
\hat{h}^*(\omega) \hat{\tilde{h}}(\omega) &+ \hat{h}^*(\omega + \pi) \hat{\tilde{h}}(\omega + \pi) = 2,
\end{flalign}
which is both necessary and sufficient for the filterbank to be PR. Hence, the perfect reconstruction filterbank is completely specified by the two filters $h[n]$ and $\tilde{h}[n]$, and is referred to as a \emph{biorthogonal} filterbank \cite{cvetkovic1998oversampled}. Further, setting $\tilde{h}[n] = h[n]$ reduces it to an \emph{orthogonal} filterbank, which satisfies the conjugate mirror filter (CMF) condition:
\begin{flalign}
\label{eqn:qmf}
|\hat{h}(\omega)|^2 + |\hat{h}(\omega + \pi)|^2 = 2.
\end{flalign}
Thus, the multiresolution approximation and PRFBs are very closely related to each other.
\subsection{Wavelet Construction From Orthogonal Filterbanks}
\begin{figure}
\caption{An infinitely cascaded two-channel filterbank, obtained by iterating the analysis filterbank over the approximation coefficients at each level. If the filters $\bar{h}
\label{fig:cascade}
\end{figure}
A key result by Mallat and Meyer \cite{meyer1986ondelettes, mallat1989theory} states that an orthogonal filterbank gives rise to a wavelet multiresolution approximation if, and only if, the filter $h[n]$ satisfying (\ref{eqn:qmf}) also satisfies $\hat{h}(0) = \sqrt{2}$. Such a filter $h[n]$ and the filter $g[n]$ specified by (\ref{eqn:ac1}) constitute the scaling and wavelet filters of an MRA, respectively. The corresponding scaling and wavelet {\it functions} are obtained by iterating over the resolution parameter $j$ in the two-scale equation, which corresponds to an infinite cascade of the filterbank as illustrated in \figurename{~\ref{fig:cascade}}. More precisely, in the Fourier domain, the infinite cascade takes the form:
\begin{flalign}
\label{eqn:filttowave}
\hat{\phi}(\omega) = \prod\limits_{j=1}^{\infty}\frac{\hat{h}\left(2^{-j}\omega\right)}{\sqrt{2}}\text{;} \quad
\hat{\psi}(\omega) = \frac{1}{\sqrt{2}} \hat{g}\left(\frac{\omega}{2}\right) \hat{\phi}\left(\frac{\omega}{2}\right).
\end{flalign}\\
\indent Substituting $\hat{h}(0) = \sqrt{2}$ into (\ref{eqn:qmf}) yields $\hat{h}(\pi) = 0$, implying that $h[n]$ is lowpass, and has at least one root at $\omega = \pi$. When $\hat{h}(\omega)$ has $p$ roots at $\omega = \pi$, $\phi(t)$ obtained from $h[n]$ using (\ref{eqn:filttowave}) satisfies the Strang-Fix conditions \cite{strang2011fourier}, $\hat{g}(\omega)$ has $p$ roots at $\omega = 0$, and $\psi(t)$ has $p$ vanishing moments (cf. Theorem 7.4, \cite{mallat2008wavelet}), i.e.,
$$ \int\limits_{-\infty}^{\infty} t^k \psi(t)\, \mathrm{d}t = 0, \text{ for } 0 \leq k < p.$$
The vanishing moments property empowers wavelets to annihilate polynomials of order $p-1$ or less, effectively resulting in parsimonious representations for smooth signals, i.e., signals that exhibit a high degree of regularity. In fact, this property lies at the heart of wavelet-based sparse representations for most naturally occurring signals and images.
\subsection{From Biorthogonal Filterbanks to Wavelets}
\indent A biorthogonal filterbank yields an MRA when the filters $h[n]$ and $\tilde{h}[n]$ satisfy the PR-1 condition (\ref{eqn:pr1}) as well as the following necessary conditions:
\begin{flalign}
\label{eqn:necessary}
\hat{h}(0) = \sqrt{2}, \text{ and } \hat{\tilde{h}}(0) = \sqrt{2}.
\end{flalign}
The synthesis filters $\tilde{h}[n]$ and $\tilde{g}[n]$ give rise to a dual of the MRA generated by the analysis filters. The synthesis scaling and wavelet functions obtained by the infinite cascade are specified as
\begin{flalign}
\label{eqn:filttowave_syn}
\hat{\tilde{\phi}}(\omega) = \prod\limits_{j=1}^{\infty}\frac{\hat{\tilde{h}}\left(2^{-j}\omega\right)}{\sqrt{2}}\text{,} \quad
\hat{\tilde{\psi}}(\omega) = \frac{1}{\sqrt{2}} \hat{\tilde{g}}\left(\frac{\omega}{2}\right) \hat{\tilde{\phi}}\left(\frac{\omega}{2}\right),
\end{flalign}
respectively. The number of vanishing moments of the wavelet $\tilde{\psi}(t)$ is controlled by the number of zeros of $\hat{\tilde{h}}(\omega)$ at $\omega = \pi$.\\
\indent Unlike the orthogonal filterbank case, the infinite cascades in the biorthogonal case may not converge in $L^2(\mathbb{R})$, even though $h[n]$ and $\tilde{h}[n]$ satisfy the required conditions. Cohen and Daubechies give a sufficient condition on the filters $h[n]$ and $\tilde{h}[n]$ \cite{cohen1992stability} that allows one to check whether the infinite cascade indeed converges to a finite-energy function. The check involves constructing Lawton matrices for $h[n]$ and $\tilde{h}[n]$. The Lawton matrix
$\boldsymbol{\Lambda}_{h} \in \mathbb{R}^{(2l - 1) \times (2l - 1)}$ for a filter $h[n]$ such that $\hat{h}(0) = \sqrt{2}$ is constructed as follows:
\begin{flalign}
\label{eqn:lawton}
\Lambda_{h} = \begin{bmatrix}
r_{hh}[l-1] & 0 & \ldots & 0\\
r_{hh}[l-3] & r_{hh}[l-2] & \ldots & 0\\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \ldots & r_{hh}[l-1]
\end{bmatrix},
\end{flalign}
where $r_{hh}$ is the autocorrelation sequence of $h[n]$; similarly for ${\tilde h}[n]$. Cohen and Daubechies showed that if the Lawton matrices corresponding to $h[n]$ and $\tilde{h}[n]$ have a non-degenerate eigenspace corresponding to the eigenvalue $1$, with all other eigenvalues being lesser than $1$, then the infinite cascade will converge to scaling and wavelet functions that form stable biorthogonal bases.\\
\indent In summary, wavelet bases are constructed through the design of appropriately constrained PRFBs. While orthogonal wavelet bases originate from the design of a single filter $h[n]$, the biorthogonal ones require two filters, $h[n]$ and $\tilde{h}[n]$. In the following, we rely on these connections to optimize autoencoders that generate wavelets possessing the desired properties.
\section{The Wavelet Learning Framework}
\label{sec:wv_lf}
The starting point for the proposed wavelet learning approach is an autoencoder. In the standard scenario, an autoencoder is a nonlinear transformation achieved by means of a neural network. It can be viewed as a nonlinear generalization of principal component analysis \cite{hinton2006reducing}. Autoencoders are central objects in {\it representation learning} and are used for discovering latent features in the input data. An autoencoder comprises an encoder and a decoder. The encoder performs dimensionality reduction, which is typically accomplished using a bottleneck layer, or a sparse hidden layer \cite{ranzato2008sparse}. The decoder learns to map the compressed representation to the encoder's input. The learnt representations are useful in several practical applications --- compression \cite{theis2017lossy}, denoising \cite{vincent2010stacked}, anomaly detection \cite{ribeiro2018study}, etc.\\
\indent We view the two-channel PRFB as a specific type of a {\it convolutional autoencoder}, henceforth referred to as the {\it filterbank autoencoder} in this paper. For the filterbank autoencoder illustrated in \figurename{~\ref{fig:fbae}}, the filtering operations are implemented as convolutional layers. The non-linear subsampling methods such as max-pooling and average-pooling typically used in deep neural networks are replaced by a linear, shift-varying downsampling operation. There is a key difference though. While an autoencoder is trained to minimize the reconstruction loss on a given dataset and rarely offers perfect reconstruction, we train a filterbank autoencoder to yield perfect reconstruction no matter what the input is, including random noise. We leverage this property to learn wavelet generating PRFBs using Gaussian vectors by imposing appropriate constraints on the filters.
\begin{figure}
\caption{(Color online) A two-channel filterbank interpreted and implemented as a convolutional autoencoder. Constraints are imposed on the filters of each convolution block so as to learn wavelets with the desired properties.}
\label{fig:fbae}
\end{figure}
\subsection{The Filterbank Autoencoder Model}
\label{sec:fb_ae}
We consider finite-length filters in the filterbank autoencoder. An unconstrained filterbank autoencoder has a set of four learnable filters
$\Theta_u = \left\{ \boldsymbol{h}, \boldsymbol{\tilde{g}} \in \mathbb{R}^{l}, \boldsymbol{\tilde{h}}, \boldsymbol{g} \in \mathbb{R}^{\tilde{l}} \right\}$
for chosen lengths $l$ and $\tilde{l}$. One could constrain the filters to learn a {\it biorthogonal filterbank autoencoder}, in which only the filters $\boldsymbol{h}$ and $\boldsymbol{\tilde{h}}$ are learnt, whereas $\boldsymbol{g}$ and $\boldsymbol{\tilde{g}}$ are fixed according to (\ref{eqn:ac1}) and (\ref{eqn:ac2}). The filters $h[n]$ and $\tilde{g}[n]$ then have the same support size, as do the filters $\tilde{h}[n]$ and $g[n]$. The dimensionality of the search space for the filters is reduced by a factor of two in the biorthogonal setting: $ \Theta_b = \left\{ \boldsymbol{h} \in \mathbb{R}^{l}, \boldsymbol{\tilde{h}} \in \mathbb{R}^{\tilde{l}} \right\}.$
Proceeding further, one could design an {\it orthogonal filterbank autoencoder} by setting $\boldsymbol{\tilde{h}} = \boldsymbol{h}$, with $\boldsymbol{g}$ and $\boldsymbol{\tilde{g}}$ specified by (\ref{eqn:ac1}) and (\ref{eqn:ac2}), respectively, which requires learning only a single filter $ \Theta_o = \left\{ \boldsymbol{h} \in \mathbb{R}^{l} \right\}.$
\subsection{Incorporating Vanishing Moments}
\label{sec:vm}
Not all PRFBs generate a valid multiresolution approximation. The filters $\boldsymbol{h}$ and $\boldsymbol{\tilde{h}}$ must additionally have at least one zero at $\omega = \pi$, from Equations~\eqref{eqn:qmf} and \eqref{eqn:necessary}. In the case of a wavelet with $p$ vanishing moments, the filter must have $p$ zeros at $\omega=\pi$, which leads to the following factorization of the filter $\hat {h}(\omega)$:
\begin{equation}
\label{eqn:h_form_fourier}
\hat {h}(\omega)=\left(\frac{1+e^{-\mathrm{j}\omega}}{2}\right)^p \cdot \hat{\ell}(\omega).
\end{equation}
A similar factorization holds for $\hat{\tilde{h}}(\omega)$. Daubechies' construction of compactly supported orthonormal wavelets is based on this factorization. The $\hat{\ell}(\omega)$ part of the refinement filter is determined subject to the PR conditions. Unser and Blu \cite{unser2003wavelet} showed that the continuous-domain scaling function may also be expressed as the convolution of a spline component and a distributional component, and that the spline is completely responsible for several key properties of the corresponding wavelet, including vanishing moments, order of approximation, annihilation of polynomials, regularity etc. They argued that $\left(\frac{1+e^{-\mathrm{j}\omega}}{2}\right)^p$ contributes to the B-spline part and $\hat{\ell}(\omega)$ to the distributional part. For a comprehensive review on B-splines, the reader is referred to \cite{unser1993ba, unser1993bb, unser1999splines}.\\
\indent In the learning framework that we are proposing, what this means is that the factor $\hat{\beta}_p (\omega) := \left(\frac{1+e^{-\mathrm{j}\omega}}{2}\right)^p$ is predetermined and fixed if one desires a wavelet with $p$ vanishing moments, and the {\it learnable component} is the distributional part $\hat{\ell}(\omega)$. In fact, the fixed factor is essentially a $p^\text{th}$-order discrete B-spline. Based on these considerations, we express the filters as follows:
\begin{flalign}
\label{eqn:h_form}
\boldsymbol{h} = \boldsymbol{\beta}_p * \boldsymbol{\ell}, \quad \text{and} \quad \boldsymbol{\tilde{h}} = \boldsymbol{\beta}_{\tilde{p}} * \boldsymbol{\tilde{\ell}},
\end{flalign}
where $*$ denotes the 1-D convolution operation, the vector $\boldsymbol{\beta}_p \in \mathbb{R}^{p + 1}$ is the discrete $p^{\text{th}}$-order B-spline, and the vectors $\boldsymbol{\ell} \in \mathbb{R}^{l - p}$ and $\boldsymbol{\tilde{\ell}} \in \mathbb{R}^{\tilde{l} - \tilde{p}}$ are learnt. The filterbank autoencoder that incorporates the {\it vanishing moments constraint} is shown in \figurename{~\ref{fig:vm}}. The parameters $p$ and $\tilde{p}$ are the number of vanishing moments of the wavelets $\psi(t)$ and $\tilde{\psi}(t)$, respectively, generated by these filters. In summary, the parameter sets for learning orthogonal and biorthogonal autoencoders with vanishing moments are
\begin{flalign}
\label{eqn:vc}
\nonumber
\Theta_o^{p} &= \{ \boldsymbol{h} \in \mathbb{R}^{l} \mid \boldsymbol{h} = \boldsymbol{\beta}_p * \boldsymbol{\ell}\} \text{ and }\\ \Theta_b^{p, \tilde{p}} &= \{ \boldsymbol{h} \in \mathbb{R}^{l}, \boldsymbol{\tilde{h}} \in \mathbb{R}^{\tilde{l}} \mid \boldsymbol{h} = \boldsymbol{\beta}_p * \boldsymbol{\ell}, \boldsymbol{\tilde{h}} = \boldsymbol{\beta}_{\tilde{p}} * \boldsymbol{\tilde{\ell}} \},
\end{flalign}
respectively.
\subsection{The Optimization Problem}
We use the mean-squared error loss for training the filterbank autoencoder. Given data $\mathcal{X} = \{ \boldsymbol{x}_j \in \mathbb{R}^{s} \}_{j = 1}^{m}$, the loss is defined as
\begin{align}
\label{eqn:loss}
\mathcal{L}(\mathcal{X}; \Theta) = \frac{1}{m}\sum_{j = 1}^{m} \|\boldsymbol{x}_j - \boldsymbol{\hat{x}}_j(\Theta)\|_2^2,
\end{align}
where $\Theta$ denotes the parameter set to optimize (the filter coefficients) and $\boldsymbol{\hat{x}}_j(\Theta)$ is the output of the autoencoder corresponding to the input $\boldsymbol{x}_j$. In the orthogonal wavelet case with $p$ vanishing moments, $\Theta = \Theta_o^{p}$, and in the biorthogonal case with $(p,\tilde{p})$ vanishing moments, $\Theta = \Theta_b^{p, \tilde{p}}$. The objective is to minimize
$\mathcal{L}(\mathcal{X}; \Theta)$ with respect to $\Theta$.
The Adam optimization algorithm \cite{kingma2014adam} is used for carrying out the minimization. Adam adapts the learning rate for each model parameter to expedite convergence.\\
\indent Since the filterbank autoencoder is linear, one could write $\boldsymbol{x}_j - \boldsymbol{\hat{x}}_j(\Theta) = \boldsymbol{B}_{\Theta} \boldsymbol{x}_j$. When $\boldsymbol{B}_{\Theta}$ equals $\boldsymbol{0}_s$, the filters in $\Theta$ form a perfect reconstruction filterbank. This result is captured in the form of the following proposition. The quantity $\boldsymbol{B}_{\Theta}$ is determined by the filters in the parameter set $\Theta$. For the sake of brevity of notation, in the following analysis, we drop the subscript $\Theta$ in $\boldsymbol{B}_{\Theta}$.
\begin{figure}
\caption{(Color online) The filterbank autoencoder with the vanishing moments constraint incorporated. The analysis filter $\boldsymbol{h}
\label{fig:vm}
\end{figure}
\begin{prop}
\label{prop:loss_form}
For a set of filters $\Theta= \left\{ \boldsymbol{h}, \boldsymbol{\tilde{g}} \in \mathbb{R}^{l}, \boldsymbol{\tilde{h}}, \boldsymbol{g} \in \mathbb{R}^{\tilde{l}} \right\}$, and a dataset $\mathcal{X}= \{ \boldsymbol{x}_j \in \mathbb{R}^{s} \}_{j = 1}^{m}$, there exists a matrix $\boldsymbol{B} \in \mathbb{R}^{s \times s}$ such that the loss function defined in \eqref{eqn:loss} can be expressed as
\begin{align}
\label{eqn:loss_form}
\mathcal{L}(\mathcal{X}; \Theta) = \frac{1}{m}\sum_{j = 1}^{m} \|\boldsymbol{B} \boldsymbol{x}_j\|_2^2.
\end{align}
If the filters form a PRFB, then $\boldsymbol{B} = \boldsymbol{0}_s$ and vice versa.
\end{prop}
\begin{IEEEproof}
The proof is given in Appendix A.
\end{IEEEproof}
\indent When the filters form a PRFB, the loss $\mathcal{L}(\mathcal{X}; \Theta)$ becomes zero and vice versa. Hence, the loss as defined in \eqref{eqn:loss} is indeed appropriate to learn a PRFB. The PRFB property must hold regardless of the data used for training the filterbank autoencoder. This brings us to the question about the choice of the data. We show that choosing random Gaussian vectors suffices for the purpose of training. The consequent statistical guarantees are reassuring.
\subsection{Training Data}
In our experiments, we choose a dataset containing $m = s$ Gaussian vectors, i.e., $ \mathcal{X} = \{\boldsymbol{x}_j \in \mathbb{R}^s \mid \boldsymbol{x}_j \sim \mathcal{N}(\boldsymbol{0}, \boldsymbol{I}_s)\}_{j = 1}^{s}, $ where each entry $x_i$ of $\boldsymbol{x} \in \mathcal{X}$ is sampled independently and identically from a zero-mean, unit-variance Gaussian distribution. A training dataset comprising Gaussian vectors has two benefits: first, $s$ randomly sampled Gaussian vectors are nearly orthogonal with high probability; and second, the squared-length of linearly transformed Gaussian vectors, i.e., $\|\boldsymbol{B x}\|_2^2$ concentrates around its mean value with high probability.\\
\indent The expected squared-length of $\boldsymbol{x} \in \mathcal{X}$ is given by $\mathbb{E}[\|\boldsymbol{x}\|_2^2] = \mathbb{E}\left[\sum_{i = 1}^{s} x_i^2\right] = s$, using the independence and unit variance properties of $x_i$. We recall the {\it Gaussian annulus theorem} (cf. Theorem 2.9, \cite{blum2020foundations}) below.
\begin{theorem} \cite{blum2020foundations}
\label{thm:annulus}
For an s-dimensional spherical Gaussian centered on the origin and with unit variance in each direction, for any $k \leq \sqrt{s}$, all but at most $3 e^{-\frac{ k^2}{96}}$ of the probability mass lies within the annulus $\sqrt{s} - k \leq \|\boldsymbol{x}\|_2 \leq \sqrt{s} + k$.
\end{theorem}
The theorem states that, with high probability, the length of the Gaussian vector $\boldsymbol{x}$ is $\sqrt{s}$. The expected squared Euclidean distance between two independent Gaussian vectors with zero-mean and unit-variance entries $\boldsymbol{x}$ and $\boldsymbol{y}$ is given by
\begin{align}
\mathbb{E}[\|\boldsymbol{x-y}\|_2^2] &= \mathbb{E}\left[\displaystyle\sum_{i = 1}^{s} (x_i - y_i)^2\right] \nonumber \\
&= \displaystyle\sum_{i = 1}^{s} \left( \mathbb{E}\left[x_i^2\right] + \mathbb{E}\left[y_i^2\right] - 2 \mathbb{E}[x_i] \mathbb{E}[y_i] \right) = 2s.\nonumber
\end{align}
Since the squared-length of each vector $\boldsymbol{x}$ and $\boldsymbol{y}$ is close to $s$ with high probability, and the expected distance between them is close to $2s$, using Pythagoras theorem, we can conclude that the vectors are orthogonal with a high probability. We recollect another important result (Theorem 2.8 from \cite{blum2020foundations}), which proves that $s$ vectors drawn at random from the unit ball are orthogonal with high probability. Owing to spherical symmetry of the standard Gaussian distribution, $\mathcal{N}(\boldsymbol{0}, \boldsymbol{I}_s)$, vectors on the unit ball in $s$-dimensions can be obtained by drawing $s$-dimensional Gaussian vectors and rescaling them to possess unit norm.
\begin{theorem} \cite{blum2020foundations}
Consider drawing $m$ points $\boldsymbol{x}_1, \ldots, \boldsymbol{x}_m \in \mathbb{R}^s$ at random from the unit ball. With probability $1 - \mathcal{O}(\frac{1}{m})$,
\begin{align*}
\|\boldsymbol{x}_j\|_2 \geq 1 - \frac{2 \ln m}{s}, &\text{ for all } j, \text{ and } \\
|\langle \boldsymbol{x}_i, \boldsymbol{x}_j \rangle| \leq \frac{\sqrt{6 \ln m}}{\sqrt{s - 1}}, &\text{ for all } i \neq j.
\end{align*}
\end{theorem}
\indent Based on the orthogonality property of the vectors in $\mathcal{X}$, the loss function in \eqref{eqn:loss_form} could be interpreted as the square of the Hilbert-Schmidt norm of the finite-dimensional operator $\boldsymbol{B}$. We show that $\mathcal{L}(\mathcal{X}; \Theta)$ is concentrated about its expected value with high probability. The expectation of the loss is $\| \boldsymbol{B} \|_{\mathrm{F}}^2$, where $\|\cdot\|_\textsc{F}$ denotes the Frobenius norm, as justified below:
\begin{align*}
\mathbb{E}\{\mathcal{L}(\mathcal{X}; \Theta)\} &= \mathbb{E}\left\{\frac{1}{s}\sum_{j = 1}^{s} \|\boldsymbol{B} \boldsymbol{x}_j\|_2^2\right\}, \\
&= \mathbb{E}\{\|\boldsymbol{B} \boldsymbol{x}\|_2^2\} = \mathbb{E}\{\boldsymbol{x}^\textsc{T}\boldsymbol{B}^\textsc{T}\boldsymbol{B}\boldsymbol{x}\}\\
&= \sum_i (\boldsymbol{B}^\textsc{T}\boldsymbol{B})_{i,i} = \|\boldsymbol{B}\|_{\textsc{F}}^2,
\end{align*}
where we have used the property that the entries of $\boldsymbol{x}$ are i.i.d.
The random vector $\boldsymbol{B} \boldsymbol{x}$ is Gaussian, with mean $\boldsymbol{0}$ and covariance matrix $\boldsymbol{B} \boldsymbol{B}^{\mathrm{T}}$. Only when $\boldsymbol{B}\boldsymbol{B}^{\mathrm{T}}$ is a diagonal matrix will the entries of $\boldsymbol{B} \boldsymbol{x}$ have unit variance and be independent. In the special case where $\boldsymbol{B}$ is a unitary matrix, $\boldsymbol{B} \boldsymbol{x}$ follows the standard Gaussian distribution, more precisely, $\boldsymbol{B} \boldsymbol{x} \sim \mathcal{N}(\boldsymbol{0}, \boldsymbol{I}_s)$, which is due to the \emph{rotational invariance} of the standard Gaussian. The exact nature of the concentration of the loss about its mean is specified next.
\begin{theorem}
\label{thm:conc_loss}
For the loss function $\mathcal{L}$ defined in \eqref{eqn:loss_form}, where $\mathcal{X}$ comprises standard Gaussian vectors, the deviation of the loss from its expected value is bounded in probability as follows:
\begin{align*}
\mathbb{P}(\big| \mathcal{L} - \mathbb{E} \{ \mathcal{L} \} \big| \geq k) \leq \begin{cases}
2 e^{\frac{-k^2 s}{8 \|\boldsymbol{B}\|_{2}^{2} \|\boldsymbol{B}\|_{\textsc{F}}^{2}}}, \, &k \leq \|\boldsymbol{B}\|_{\textsc{F}}^2, \\
2 e^{\frac{-k s}{8 \|\boldsymbol{B}\|_{2}^{2}}}, \, &k > \|\boldsymbol{B}\|_{\textsc{F}}^2.
\end{cases}
\end{align*}
\end{theorem}
\begin{IEEEproof}
The proof is given in Appendix B.
\end{IEEEproof}
A more general version of Theorem \ref{thm:conc_loss} is the Hanson-Wright inequality \cite{rudelson2013hanson}, which extends the argument to linear transformations of sub-Gaussian vectors. For small deviations about the mean ($k \leq \|\boldsymbol{B}\|_{\textsc{F}}^2$), the concentration behavior of $\|\boldsymbol{B} \boldsymbol{x}\|_2^2$ is similar to that of a Gaussian, while for large deviations, the tail probability is heavier than that for a Gaussian. More specifically, $\|\boldsymbol{B} \boldsymbol{x}\|_2^2$ is a \emph{sub-exponential} random variable (cf. Theorem 2.13, \cite{wainwright2019high}).
The preceding theorem states that, for standard Gaussian data $\mathcal{X}$ and $k \leq \|\boldsymbol{B}\|_{\textsc{F}}^2$, all but at most $2 e^{\frac{-k^2 s}{8 \|\boldsymbol{B}\|_{2}^{2} \|\boldsymbol{B}\|_{\text{F}}^{2}}}$ of the probability mass of the loss lies within the annulus $\|\boldsymbol{B}\|_{\textsc{F}}^2 - k \leq \mathcal{L}(\mathcal{X}; \Theta) \leq \|\boldsymbol{B}\|_{\textsc{F}}^2 + k$. Proposition \ref{prop:loss_form} indicates that $\|\boldsymbol{B}\|_{\textsc{F}}^2$ is a direct measure of how close the filters are to achieving perfect reconstruction. The high concentration of the loss about its mean for Gaussian data goes to show that minimizing an instance of the loss is, with high probability, equivalent to minimizing its expectation, which is an assurance that the filters are being steered toward achieving PR.
\begin{figure*}
\caption{(Color online) Learning filterbank autoencoders: The results of learning unconstrained (red), biorthogonal constrained (green), and orthogonal constrained (blue) filterbank autoencoders are shown. The frequency responses of the learnt filters are displayed in the top two rows ($\omega \in [0, \pi]$; the $y$-axis limits are $[0, 3]$). The loss as a function of the epochs and the learning rate (right-hand side $y$-axis) for Adam on a log-scale are shown. Imposing biorthogonality/orthogonality constraints leads to faster convergence compared with the unconstrained case. Note that the $x$-axis limits in the loss vs. epochs plots are different in the three cases.}
\label{fig:fb_learn}
\end{figure*}
\subsection{Learning Rate Schedule}
While using optimizers such as Adam \cite{kingma2014adam}, a monotonically decreasing loss is not guaranteed, and tuning the learning rate parameter $\eta$ during training becomes necessary. A high learning rate may accelerate the optimization in the initial phase, but would ultimately lead to a large deviation away from the minimum loss value. The back-tracking line-search method (cf. Section 9.2 of \cite{boyd2004convex}) finds an appropriate learning rate $\eta^{(k)}$ at iteration $k$ by starting with $\eta^{(k)} = 1$ and iteratively dividing $\eta^{(k)}$ by a factor $\alpha > 1$ as long as the gradient-descent step does not result in a reduction of the loss. However, this method requires one to compute the output of the autoencoder several times at each iteration $k$ before the next update is computed. We use a heuristic that requires computing the forward pass only once at iteration $k$, by tolerating the deviation from the minimum loss value from iterations $1$ to $k$, up to a factor $\tau$.\\
\indent Let the loss at iteration $k$ be denoted by $\mathcal{L}^{(k)}(\mathcal{X}; \Theta)$. We keep track of the minimum value of the loss as well as the corresponding model state. Let the minimum value of the loss function encountered up to the current iteration be $\mathcal{L}^{(k_{\text{min}})}(\mathcal{X}; \Theta)$, occurring at iteration index $k_{\text{min}}$. At iteration $k > k_{\text{min}}$, if $\log_{10}\left( \frac{\mathcal{L}^{(k)}(\mathcal{X}; \Theta)}{\mathcal{L}^{(k_{\text{min}})}(\mathcal{X}; \Theta)} \right) > \tau$, $\eta$ is reduced by a certain factor $\alpha$, i.e., $\eta \leftarrow \frac{\eta}{\alpha}$, and the training is resumed after resetting the model state to that of iteration $k_{\text{min}}$. This procedure effectively reduces the learning-rate every time a large deviation from the minimum occurs, and the reduced learning-rate is used until either another deviation occurs, or until the stopping criterion is satisfied. In our experiments, we used a threshold of $\tau = 6$ and a factor $\alpha = 10$.
\subsection{Stopping Criteria}
We use two stopping criteria and terminate the training when either of them is met. The first criterion measures the relative change in the loss value for 100 consecutive iterations. The training is stopped at iteration $k$ if the percentage change in the loss value falls below a certain threshold $\delta$ for $100$ consecutive iterations preceding $k$, i.e., if for all $i$ such that $ 0 \leq i \leq 99$, the following happens:
$$
\frac{|\mathcal{L}^{(k-i)}(\mathcal{X}; \Theta) - \mathcal{L}^{(k-i-1)}(\mathcal{X}; \Theta)|}{\mathcal{L}^{(k-i-1)}(\mathcal{X}; \Theta)} < \delta.
$$
\indent The second criterion is an absolute one and checks for the loss falling below a preset threshold $\epsilon$, i.e., $\mathcal{L}^{(k)}(\mathcal{X}; \Theta) < \epsilon$. In our experiments, we set $\delta = 10^{-5}$ and $\epsilon = 10^{-15}$.
\subsection{Performance Measures}
\label{sec:perf_measures}
We employ two performance measures to determine how close to PR the learnt filterbank autoencoders are. The first one is the signal-to-reconstruction error ratio (SRER) defined as $$\text{SRER} = 20 \log_{10}\left( \frac{1}{s} \sum\limits_{i = 1}^{s} \frac{\|\boldsymbol{x}_i\|_2}{\|\boldsymbol{x}_i - \tilde{\boldsymbol{x}}_i\|_2}\right)\,\text{dB}.$$
A test set of white Gaussian vectors $\left\{\boldsymbol{x}_i \sim \mathcal{N}(\boldsymbol{0}_{1000}, \boldsymbol{I}_{1000})\right\}_{1 \leq i \leq 1000}$ is used to compute the SRER. An SRER greater than $144.50$ dB for single-precision floats and an SRER greater than $319.09$ dB for double-precision floats indicates machine epsilon, or maximum relative error due to rounding, according to the IEEE 754 standard \cite{ieee754}.\\
\indent An ideal filterbank autoencoder must satisfy the PR-1 condition for all $\omega \in [0, 2\pi]$. The second performance measure checks the PR-1 condition considering samples of $\omega \in [0, 2\pi]$:
$$
\Delta_{\text{pr}} = \frac{1}{N}\sum\limits_{k = 0}^{N-1} \left(\hat{h}^*(\omega_k) \hat{\tilde{h}}(\omega_k) + \hat{h}^*(\omega_k + \pi) \hat{\tilde{h}}(\omega_k + \pi) - 2\right)^2,
$$
where $h$ and $\tilde{h}$ are the learnt analysis and synthesis lowpass filters, respectively, and $\omega_k = \frac{2 \pi k}{N}, 0 \leq k \leq N-1, N = l + \tilde{l} - 1 $.
\begin{figure*}
\caption{The learnt orthogonal scaling (left) and wavelet (right) functions, with filter length $l = 8$ and vanishing moments $p$ varying from $1$ to $4$. Observe that the smoothness of both the scaling and wavelet functions increases as the number of vanishing moments imposed increases.}
\label{fig:vm_increase}
\end{figure*}
\subsection{Experimental Validation}
We present the results of learning filterbank autoencoders corresponding to the three flavors: unconstrained (parameter set $\Theta_u$), biorthogonal ($\Theta_b$), and orthogonal ($\Theta_o$). The models were implemented using the Tensorflow Python library \cite{tensorflow2015-whitepaper} and run on a computer with an i7 processor and 8 GB RAM. The stopping criterion threshold $\epsilon$ was set to $10^{-30}$. For all three filterbanks, the SRER was greater than $200$ dB, and $\Delta_{\text{pr}}$ was of the order of $10^{-18}$.\\
\indent The frequency responses of the filters, the training loss, and learning rate as a function of epochs are shown in \figurename{~\ref{fig:fb_learn}}. The unconstrained and biorthogonal filterbanks were initialized with the same filters $\boldsymbol{h}$ and $\boldsymbol{\tilde{h}}$, having length $8$ each, with their entries drawn from the standard normal distribution. The filter $\boldsymbol{h}$ of the orthogonal autoencoder was also initialized with the same values as the filter $\boldsymbol{h}$ for the other cases. Additionally, the filters $\boldsymbol{g}$ and $\boldsymbol{\tilde{g}}$ were initialized randomly for the unconstrained variant, while they were computed using the biorthogonal relations (\ref{eqn:ac1}) and (\ref{eqn:ac2}) for the other two models. The same dataset of $128$ random vectors was used to train all the models.
We observe that each model converges to a PRFB, as indicated by the loss function going below the threshold of $\epsilon = 10^{-30}$. For the unconstrained case, the filters $\boldsymbol{g}$ and $\boldsymbol{\tilde{g}}$ were found to obey the biorthogonal relations (\ref{eqn:ac1}) and (\ref{eqn:ac2}) automatically although they were not specifically constrained to do so, indicating that training under the MSE loss and our choice of the dataset indeed enforces the perfect reconstruction property. The unconstrained model takes longer to train than the constrained versions. Although the three models were given the same initialization, they converged to different solutions, demonstrating the non-uniqueness of the solution space. Also, the filters learnt do not have any zeros, neither at $\omega = 0$ nor at $\omega = \pi$, indicating that the learnt filterbank autoencoders, although satisfying the PR property, do not generate a {\it bona fide} multiresolution approximation.
\section{From Filterbank Autoencoders to Wavelets}
\label{sec:1d_wv}
In this section, we explain how wavelet properties such as vanishing moments, orthogonality, symmetry, and biorthogonality could be incorporated into the filterbank autoencoder learning process. The learning paradigm, choice of the loss function and dataset are as discussed in the previous section. Once a filterbank autoencoder with the desired properties is learnt, the corresponding scaling and wavelet functions are obtained using the infinite cascade relation specified in \eqref{eqn:filttowave}. The wavelet learnt by an autoencoder for a particular setting of vanishing moments $p$ and $\tilde{p}$ need not be unique, and additional constraints may have to be imposed to arrive at wavelets with the desired properties. These nuances are discussed in the following, in which we consider learning both orthogonal (corresponding to $\Theta_o^{p}$) and biorthogonal wavelet bases (corresponding to $\Theta_b^{p, \tilde{p}}$).
\subsection{Orthogonal Wavelets with $p$ Vanishing Moments}
Orthogonal wavelets are learnt using the autoencoder model $\Theta_o^{p}$ defined in \eqref{eqn:vc}. The parameters to be specified are the length of the filter ($l$) and the number of vanishing moments ($p$). The length of $\boldsymbol{h}$ must be greater than twice the number of vanishing moments $p$ \cite{daubechies1988orthonormal}, and for a given choice of $l \geq 2 p$ and $p \geq 1$, one of several possible wavelet generating filterbanks may be learnt. The number of vanishing moments of a wavelet is related to its smoothness, as established by Tchamitchian (Theorem 7.6, \cite{mallat2008wavelet}). We demonstrate this relationship in \figurename{~\ref{fig:vm_increase}} by learning length-$8$ filters $\boldsymbol{h}$, with $p$ varying from $1$ to $4$. As expected, the smoothness of the learnt wavelet and scaling functions increases with an increase in the number of vanishing moments imposed. For $p = 4$, the learnt wavelet closely resembles the db$4$ wavelet of the Daubechies family. This is the maximally smooth wavelet for $l = 8$ and $p = 4$. In fact, Daubechies proved that the maximally smooth wavelet for a fixed, even length $l$ of $\boldsymbol{h}$ has $p = \frac{l}{2}$ vanishing moments \cite{daubechies1988orthonormal}. We next discuss how to learn members of the Daubechies family.
\begin{figure*}
\caption{(Color online) Scaling (top row) and wavelet (bottom row) function pairs $(\phi_i(t), \psi_i(t)), i = 1, 2, \ldots, 4$, obtained by learning an orthogonal filterbank autoencoder (filter length $l = 8$, vanishing moments $p = 4$). Each column shows one pair obtained by a different training dataset and a different initialization of $\boldsymbol{h}
\label{fig:db4_repeat}
\end{figure*}
\subsection{Daubechies Family of Orthogonal Wavelets}
The Daubechies wavelet with $p$ vanishing moments, denoted as the db$p$ wavelet, is generated by the minimum-phase filter of length $l = 2p$ satisfying the CMF condition \eqref{eqn:qmf}. The minimum-phase criterion makes the filter real-valued and unique among the $2^{p - 1}$ possible length-$2p$ filters, complex-valued in general, having $p$ vanishing moments satisfying (\ref{eqn:qmf}) (cf. Section 4 of \cite{daubechies1988orthonormal}). We show that a filterbank autoencoder with $l = 8$ and $p = 4$ converges to one of four real-valued wavelet generating filterbanks. Over $10$ repetitions of the experiment, with different random initializations of $\boldsymbol{h}$ and different datasets, the four wavelets and scaling functions learnt are shown in \figurename{~\ref{fig:db4_repeat}}. The wavelet $\psi_1(t)$ corresponds to the minimum-phase (Daubechies) wavelet and $\psi_2(t)$ corresponds to the length-$8$ Symmlet. We also observe from \figurename{~\ref{fig:db4_repeat}} that $\phi_3(t) = \phi_2(-t)$, $\phi_4(t) = \phi_1(-t)$, $\psi_3(t) = -\psi_2(-t)$ and $\psi_4(t) = -\psi_1(-t)$. Understanding this finding further requires one to go into the details of Daubechies' construction.
Considering the factorization of $\boldsymbol{h}$ specified in (\ref{eqn:h_form_fourier}), we may rewrite (\ref{eqn:pr1}) as follows:
\begin{flalign}
\label{eqn:daub_form}
P(y)Q(1 - y) + P(1 - y)Q(y) = 2,
\end{flalign}
where $y = \frac{(2 + e^{\mathrm{j} \omega} + e^{-\mathrm{j} \omega})}{4}$, $P(y) = 4^p y^p$ and $Q(y) = |\hat{\ell}(\omega)|^2$. The polynomial $Q(y)$ that satisfies \eqref{eqn:daub_form} is specified by Bezout's theorem (cf. Theorem 7.8, \cite{mallat2008wavelet}).
\begin{theorem} \label{thm:bezout} \cite{mallat2008wavelet} Let $P_1(y)$ and $P_2(y)$ be two polynomials of degrees $p_1$ and $p_2$, respectively, and having no common zeros. Then, there exist two unique polynomials $Q_1(y)$ and $Q_2(y)$ of degrees $p_2 - 1$ and $p_1 - 1$, respectively, such that
\begin{align*}
P_1(y) Q_1(y) + P_2(y) Q_2(y) = 1.
\end{align*}
\end{theorem}
Since $P(y)$ is an order-$p$ polynomial in $y$, having no common zeros with the order-$p$ polynomial, $P(1 - y)$, $Q(y)$, and $Q(1 - y)$ are order-$(p - 1)$ polynomials. The roots of $Q(y)$, once found, must be factorized to determine $\hat{\ell}(\omega)$. However, the factorization is not unique. The trigonometric polynomial $\hat{\ell}(\omega)$ is expressed in terms of its roots, $\{ r_k \}_{k = 1}^{p - 1}$, as follows:
\begin{flalign}
\hat{\ell}(\omega) = \ell[0] \prod\limits_{k = 1}^{p - 1} (1 - r_k e^{-\mathrm{j} \omega}).
\end{flalign}
Since the complex conjugate $\hat{\ell}^*(\omega)$ has roots $\{ \frac{1}{r_k} \}_{k = 1}^{p - 1}$, $Q(y)$ is written in terms of its roots as follows:
\begin{flalign}
\label{eqn:q_y}
Q(y) = |\hat{\ell}(\omega)|^2 = \left(\ell[0]\right)^2 \prod\limits_{k = 1}^{m} \left\{(1 + r_k)^2 - 4 r_k y\right\}.
\end{flalign}
Observe that every root $r_k$ of $\hat{\ell}(\omega)$ corresponds to the root $\frac{(1 + r_k)^2}{4 r_k}$ of the polynomial $Q(y)$. Given a root for $Q(y)$, one could obtain $r_k$ by solving a quadratic equation. One could check that both $r_k$ and $\frac{1}{r_k}$ construct the same root of $Q(y)$, implying that if we choose $r_k$ as a root of $\hat{\ell}(\omega)$, then $\frac{1}{r_k}$ is automatically a root of $\hat{\ell}^*(\omega)$. In the construction of the db$p$ wavelet, the roots $r_k$ assigned to $\hat{\ell}(\omega)$ are chosen so that $|r_k| < 1$, making $h[n]$ a minimum-phase filter.\\
\indent For the case $p = 4$, both $Q(y)$ and $\hat{\ell}(\omega)$ have three roots, two of which are complex. Since we only consider real filters $\ell[n]$, there are four possible factorizations of $Q(y)$, which yield the four solutions learnt. Let the roots of $\hat{\ell}_1(\omega)$, which is the learnable part of the filter $\hat{h}_1(\omega)$ that generates the scaling and wavelet functions $\phi_1(t)$ and $\psi_1(t)$, respectively, be $(r_1, r_2, r_3)$, where $r_1$ is real and $r_2$ and $r_3$ form a complex-conjugate pair. We found experimentally that the roots of the learnable filters $\hat{\ell}_i(\omega)$ corresponding to $\phi_i(t)$ and $\psi_i(t), i = 2, 3, 4$, are $(\frac{1}{r_1}, r_2, r_3), (r_1, \frac{1}{r_2}, \frac{1}{r_3})$, and $(\frac{1}{r_1}, \frac{1}{r_2}, \frac{1}{r_3})$, respectively. The relationship between the roots of $\hat{\ell}_i(\omega)$ explains the relationship between the learnt scaling and wavelet functions.\\
\indent In general, setting $m = 2p$ and learning wavelets would yield one of the possible real factorizations of the filter $\hat{\ell}(\omega)$ from the roots of $Q(y)$. Hence, in order to obtain a minimum-phase db$p$ wavelet, we train an orthogonal filterbank autoencoder with $l = 2p$ and factorize the learnt filter $\boldsymbol{h}$ to obtain its roots $\{ r_k \}_{k = 1}^{p - 1}$. The db$p$ wavelet filter is then constructed by replacing each root $r_k$ having magnitude greater than unity with $\displaystyle\frac{1}{r_k}$.
\subsection{Symmetric Orthogonal Wavelets?}
\label{subsec:symm_ortho}
\begin{figure*}
\caption{(Color online) Results pertaining to learning symmetric orthogonal wavelets for length-$8$ filters with different number of vanishing moments ($p = 1, 2, 3$). In all three cases, the learnt scaling filters are symmetric, but the training loss values clearly show that only the $p = 1$ case generates a valid PRFB. The training loss for $p = 2, 3$ saturated at a high value, indicating that PR is compromised when symmetry is introduced. The $p = 1$ case results in the dilated Haar filter, because the length is greater than the minimum support required, which is in perfect agreement with Equation~\eqref{eqn:sym_orth_prfb}
\label{fig:hsym_combined}
\end{figure*}
We now consider incorporating symmetry into the wavelet.
Symmetry results in a linear-phase response, which is important in several audio and image processing applications. If $h[n]$ is a symmetric filter, the wavelet and scaling functions obtained using the infinite cascade given in (\ref{eqn:filttowave}) would also be symmetric. Hence, it suffices to impose symmetry on the filter $\boldsymbol{h}$ to obtain a symmetric wavelet. In addition, one could also incorporate $p$ vanishing moments. Considering the factorization in \eqref{eqn:h_form_fourier} and \eqref{eqn:h_form}, and the fact that the spline component $\boldsymbol{\beta}_p$ is symmetric, it is clear that it is sufficient to impose symmetry on the trainable component $\boldsymbol{\ell}$. \\
\indent Consider the expansion of $\boldsymbol{\ell}$ in a symmetric basis $\boldsymbol{S}$: $\boldsymbol{\ell} = \boldsymbol{S \ell^{\prime}}$, where $\boldsymbol{\ell^{\prime}}$ is the filter to be learnt. For a length-$l$ filter $\boldsymbol{h}$, and $p$ vanishing moments, $\boldsymbol{{\ell}^{\prime}} \in \mathbb{R}^{\left\lceil \frac{l - p}{2} \right\rceil}$ is the trainable component of $\boldsymbol{h}$, $\boldsymbol{\beta}_p \in \mathbb{R}^{p + 1}$ is the spline component and $\boldsymbol{S} \in \mathbb{R}^{(l - p) \times \left\lceil \frac{l - p}{2} \right\rceil}$ contains the basis vectors for length-$(l - p)$ symmetric filters. For example, for $l = 8$ and $p = 4$, $\boldsymbol{\ell^{\prime}} \in \mathbb{R}^{4}$, and
$$
\boldsymbol{S} = \begin{bmatrix}
1 & 0 \\
0 & 1 \\
0 & 1 \\
1 & 0
\end{bmatrix}.
$$
The symmetric basis expansion of $\boldsymbol{\ell}$ is a sure-shot way of ensuring that the learnt scaling filter will always be symmetric. It remains to be verified whether perfect reconstruction is achieved for a chosen value of $p$.\\
\indent The results of training symmetry-constrained filterbank autoencoders with $l = 8$ and $p$ varying from $1$ to $3$ are presented in \figurename{~\ref{fig:hsym_combined}}. The training loss vs. epochs for each case is shown on the left-most panel of \figurename{~\ref{fig:hsym_combined}}. For $p = 1$, the loss saturates at a value of $2.12 \times 10^{-26}$, with an SRER of $214.79$ dB and $\Delta_{\text{pr}} = 1.36 \times 10^{-18}$, which corresponds to a PRFB for all practical purposes. The learnt filter in this case is a circularly shifted Haar filter. The $p = 2$ and $p = 3$ cases are interesting, since the corresponding training loss values saturated at $3.73 \times 10^{-6}$ and $0.27$, respectively, with SRER $= 53.90$ dB, and $\Delta_{\text{pr}} = 0.017$ for $p = 2$, and SRER $= 5.73$ dB, and $\Delta_{\text{pr}} = 1095.06$ for $p = 3$. These results indicate that the filterbanks learnt are not PR. Therefore, the optimization has compromised PR in favour of symmetry. Despite repeating the experiments several times with different initialization, training data, various values of $p$ and $l$, we found that the learnt filterbank was not PR. These observations are completely in agreement with known results in filterbank and wavelet theory. For instance, Vaidyanathan showed that symmetric filters that satisfy the CMF condition \eqref{eqn:qmf} must have the following form (cf. Chapter 7, \cite{vaidyanathan2006multirate}):
\begin{align}
\label{eqn:sym_orth_prfb}
h[n] = a\,\delta[n - s_1] + b\,\delta[n - s_2],
\end{align}
where $a, b \in \mathbb{R}$ and satisfy $a b = \frac{1}{2}$, and the shifts $s_1, s_2 \in \mathbb{Z}$ satisfy $s_1 + s_2 = l - 1$. The learnt filter in the $p = 1$ case satisfies this requirement. Daubechies proved that the only symmetric orthogonal wavelet is the Haar wavelet \cite{daubechies1988orthonormal}. Our finding that the learnt filterbank was far from PR for $p \neq 1$ is perfectly consistent with these results.
\subsection{Symmlets}
\begin{figure*}
\caption{Learning a Symmlet: Incorporating regularization in learning a filterbank autoencoder with $\Theta_o^p$, where $l = 8$ and $p = 4$. We started with the dataset and initialization that yielded the db$4$ wavelet in \figurename{~\ref{fig:db4_repeat}
\label{fig:sym4_combined}
\end{figure*}
\indent If one is interested in achieving PR and also have the filters be as close to symmetric as possible, it can be done by means of a regularization penalty added to the loss function $\mathcal{L}$ instead of the symmetric basis expansion considered in Section~\ref{subsec:symm_ortho}. The regularized cost function is given by
\begin{flalign}
\label{eqn:sym_loss}
\mathcal{L}^{\text{reg}}(\mathcal{X}; \Theta_o^p) = \mathcal{L}(\mathcal{X}; \Theta_o^p) + \lambda \|\boldsymbol{h} - \boldsymbol{h}^{\text{flip}}\|_2^2,
\end{flalign}
where $\boldsymbol{h}^{\text{flip}}$ is the flipped version of the vector $\boldsymbol{h}$. The regularizer penalizes asymmetric filters, with the point of symmetry being the midpoint of the filter (e.g., $3.5$ for $l = 8$). We used the same dataset and initialization that yielded the db$4$ wavelet shown in \figurename{~\ref{fig:db4_repeat}} to gauge the effect of the regularizer. The regularization parameter $\lambda$ was reduced in a staircase fashion with a decay factor of $10$ for the first $5000$ iterations, after which it was set to $0$. Progressively decreasing the regularization parameter (cf. \figurename{~\ref{fig:sym4_combined}}(c)) has the effect of favouring nearly symmetric filters in the initial stages of learning, and PR in the latter stages. We found that the regularized loss $\mathcal{L}^{\text{reg}}(\mathcal{X}; \Theta_o^p)$ saturated at a value of $7.20 \times 10^{-26}$, with SRER $= 229.89$ dB, and $\Delta_{\text{pr}} = 4.19 \times 10^{-20}$, and the learnt filters generate the sym$4$ wavelet and scaling functions as shown in \figurename{~\ref{fig:sym4_combined}}(a). A similar approach can be used with choices of $l$ and $p$ other than $8$ and $4$ shown here, to obtain approximately symmetric orthogonal wavelets. Thus, the proposed wavelet learning framework allows one to generate Symmlets as well.
\subsection{Learning Biorthogonal Wavelets}
\begin{figure}
\caption{(Color online) A PRFB with vanishing moments that does not generate a wavelet. The scaling filters and frequency responses of a learnt filterbank autoencoder with $l = 5, \tilde{l}
\label{fig:bior2_1}
\end{figure}
Biorthogonal wavelets are learnt using the autoencoder model $\Theta_b^{p, \tilde{p}}$ defined in \eqref{eqn:vc}. Unlike the orthogonal case, $\boldsymbol{\tilde{h}}$ is no longer constrained to be equal to $\boldsymbol{h}$, and therefore filter-lengths, $l$ and $\tilde{l}$, and vanishing moments, $p$ and $\tilde{p}$, must be specified. Consequently, the added flexibility allows for the construction of symmetric biorthogonal wavelets as well as wavelets with more vanishing moments than $\frac{l}{2}$. As in the orthogonal case, not all choices of the parameters yield valid wavelet generating filterbanks.\\
\indent Invoking the factorization of $h[n]$ and $\tilde{h}[n]$ into the spline and distributional components as given in \eqref{eqn:h_form_fourier}, the PR condition \eqref{eqn:pr1} is expressed as follows:
\begin{flalign}
\label{eqn:bior_form}
(1 + e^{\mathrm{j} \omega})^{p + \tilde{p}} R(\omega) + (1 - e^{\mathrm{j} \omega})^{p + \tilde{p}} R(\omega + \pi) = 2,
\end{flalign}
where $R(\omega) = 2^{-(p + \tilde{p})} e^{-\mathrm{j} p \omega} \hat{\ell}^*(\omega) \hat{\tilde{\ell}}(\omega)$.
\begin{figure*}
\caption{(Color online) Learning spline biorthogonal wavelets: The scaling and wavelet functions shown in this figure were learnt by fixing the filter $\boldsymbol{\tilde{h}
\label{fig:bior2_2}
\end{figure*}
\begin{figure*}
\caption{(Color online) Learnt scaling and wavelet functions for a biorthogonal autoencoder ($\Theta_b^{p, \tilde{p}
\label{fig:bior_4_4}
\end{figure*}
Once $R(\omega)$ is determined, its factorization into $\boldsymbol{\ell}$ and $\boldsymbol{\tilde{\ell}}$ determines the filters $\boldsymbol{h}$ and $\boldsymbol{\tilde{h}}$. However, not all filters that satisfy \eqref{eqn:bior_form} generate a stable wavelet basis, since the convergence of the infinite cascade is not guaranteed. One must additionally verify that the Lawton matrices \eqref{eqn:lawton} constructed using the filters $\boldsymbol{h}$ and $\boldsymbol{\tilde{h}}$ have a non-degenerate eigenspace corresponding to the eigenvalue $1$, and that the other eigenvalues are less than unity, which is a sufficient condition for convergence \cite{cohen1992stability}. An example of a learnt filterbank autoencoder that does not generate a valid wavelet biorthogonal basis is demonstrated in \figurename{~\ref{fig:bior2_1}}. The learnt filterbank had SRER of $230.69$ dB and $\Delta_{\text{pr}} = 3.49 \times 10^{-20}$, indicating that the PR condition is satisfied. However, the Lawton matrices corresponding to $\boldsymbol{h}$ and $\boldsymbol{\tilde{h}}$ were found to have three eigenvalues each with magnitudes greater than zero, indicating that the infinite cascade does not converge for either filter.\\
\indent We now consider parameter choices corresponding to members of the Cohen-Daubechies-Feauveau (CDF) family of symmetric biorthogonal wavelets \cite{cohen1992biorthogonal}. Cohen et al. showed that, for symmetric filters $\boldsymbol{h}$ and $\boldsymbol{\tilde{h}}$ satisfying (\ref{eqn:bior_form}), the pair of vanishing moments $p$ and $\tilde{p}$, and the pair of lengths $l$ and $\tilde{l}$ must have the same parity (cf. Proposition 6.1 of \cite{cohen1992biorthogonal}), i.e., if $p$ is even, then $\tilde{p}$ must also be even; and if $p$ is odd, then $\tilde{p}$ must also be odd. The filter-lengths $l$ and $\tilde{l}$ must also follow the same pattern. Using this result to set the parameters, one could construct symmetric biorthogonal wavelets belonging to the CDF family. Two types of wavelets were constructed in \cite{cohen1992biorthogonal} using this result: spline biorthogonal wavelets and symmetric biorthogonal wavelet pairs having similar support sizes.\\
\indent Spline biorthogonal wavelets are obtained by setting the synthesis scaling filter $\boldsymbol{\tilde{h}}$ as the $\tilde{p}^{\text{th}}$-order discrete B-spline, and learning an appropriate analysis scaling filter with $p$ vanishing moments. Since the synthesis filter is fixed, each setting of $p$ yields a unique analysis filter satisfying the PR-$1$ condition. A higher value of $p$ yields a smoother analysis wavelet. The lengths of $\boldsymbol{\tilde{h}}$ and $\boldsymbol{h}$ are $\tilde{p} + 1$ and $2p + \tilde{p} - 1$, respectively. Spline biorthogonal wavelet learning is illustrated in \figurename{~\ref{fig:bior2_2}}, corresponding to the choices $\tilde{p} = 2, \tilde{l} = 3$, and setting $p = 2, 4$ and $6$, resulting in a filter $\boldsymbol{h}$ of length $5, 9$ and $13$, respectively. The coefficients of the filters $\boldsymbol{h}$ and $\boldsymbol{\tilde{h}}$ were normalized to have a sum equal to $\sqrt{2}$, and the filters $\boldsymbol{g}$ and $\boldsymbol{\tilde{g}}$ were rescaled appropriately. The learnt filterbanks had an SRER greater than $210$ dB and $\Delta_{\text{pr}}$ lesser than $3 \times 10^{-18}$. The wavelet corresponding to the choice $p = \tilde{p} = 2$, also known as the CDF 5/3 wavelet is used in the JPEG-2000 image compression scheme, and uses only integer coefficients, avoiding quantization noise and providing lossless compression. As expected, the smoothness of the learnt scaling and wavelet functions increases with an increase in the number of vanishing moments.\\
\indent Cohen et al. also constructed another class of symmetric biorthogonal wavelets by specifying lengths $l$ and $\tilde{l}$ close to each other. For example, setting $p = 4, \tilde{p} = 4, l = 9, \tilde{l} = 7$ results in the CDF 9/7 wavelets used in the JPEG-2000 standard for lossy image compression. We considered the same set of parameters to learn a symmetric biorthogonal filterbank autoencoder. Symmetry was enforced using the symmetric basis expansion trick described in Section \ref{subsec:symm_ortho}. The learnt filterbank had an SRER of $209.95$ dB and $\Delta_{\text{pr}} = 4.21 \times 10^{-18}$, and the resulting biorthogonal scaling and wavelet functions are shown in \figurename{~\ref{fig:bior_4_4}}(a). Interestingly, for these parameter settings, regardless of the initialization and training dataset, we found that the learnt filterbank converged to the same solution. The learnt biorthogonal scaling and wavelet functions are almost identical to the CDF 9/7 counterpart. Thus, the learning approach discovered the CDF 9/7 wavelets. When the constraint of symmetry was relaxed, different solutions were obtained, which also changed with the initialization. An example of the learnt biorthogonal scaling and wavelet functions is shown in \figurename{~\ref{fig:bior_4_4}}(b), which does not exhibit symmetry. The learnt filterbank had an SRER of $212.13$ dB and $\Delta_{\text{pr}} = 2.5 \times 10^{-18}$.
\section{Conclusions}
\label{sec:conclusions}
We have introduced a learning approach to the problem of wavelet design, by viewing the two-channel perfect reconstruction filterbank as a convolutional autoencoder. Our approach leverages the well-established connection between perfect reconstruction filterbanks (PRFBs) and wavelets to reduce the problem to one of designing and training a filterbank autoencoder. The requirement of a certain number of {\it vanishing moments} on the wavelets is imposed by splitting the autoencoder filters into fixed and learnable parts, with the fixed part directly accounting for the vanishing moments, and the learnable part accounting for the other properties. Random Gaussian data is used to train the autoencoder by minimizing a mean-squared error (MSE) loss function. We showed that a near-zero MSE loss for a Gaussian dataset implies that the learnt filters satisfy the PR property with high probability. A learning rate schedule and stopping criteria were introduced to ensure that the learnt filterbank autoencoders are PR for all practical purposes. The training algorithm was validated by learning PRFBs with and without the vanishing moments constraint. We observed that imposing orthogonality and biorthogonality on the filterbank speeds up training considerably. This is expected because the search space of filters reduces when the constraints are imposed. Several members from the Daubechies family of minimum-phase, finite-support, orthogonal wavelets and the CDF family of symmetric biorthogonal wavelets were learnt by setting the filter lengths and vanishing moments of the filterbank autoencoder model appropriately. We also demonstrated that our framework learns wavelets outside these families. Imposing constraints allows us to learn wavelets with the desired properties, effectively making the search for optimal wavelets a computational problem. Overall, the framework we have proposed is cogent and allows us to leverage modern machine learning tools and optimization tricks to address a problem that has been primarily approached using analytical tools.\\
\indent Throughout this paper, we focused on 1-D wavelet learning. One could ask if the 2-D counterpart would be amenable to a similar design strategy. The answer lies in whether one is interested in designing separable or nonseparable wavelets/filterbanks. The separable 2-D design could be accomplished by a relatively straightforward extension of the 1-D learning framework. However, nonseparable wavelet design with constraints on symmetry and vanishing moments is not an easy problem. Incorporating vanishing moments using the factorization trick proposed in this paper does not carry over to 2-D. The challenge is of a fundamental nature because the roots of 2-D polynomials in general lie on curves and not points as in the 1-D case. Further, orientation plays an important role in the 2-D design aspect. Developing the 2-D counterpart of the wavelet learning framework is a fertile direction for further research.
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{figures/dhruv}}]{Dhruv Jawali}
received the Bachelor of Technology degree from the Department of Computer Science and Engineering, National Institute of Technology Goa, India, in 2014. He worked as a software developer at the Samsung Research Institute, Bangalore from 2014-2015. He enrolled into the PhD program at the National Mathematics Initiative, Indian Institute of Science (IISc) in August 2015, and has been working at the Spectrum Lab, Department of Electrical Engineering ever since. His research interests include wavelet theory, deep neural networks, and sparse signal processing.
\end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{figures/abhishek.png}}]{Abhishek Kumar}
received the Bachelor of Technology degree in 2014 from the Department of Electrical Engineering, Indian Institute of Technology (IIT) Indore, India. He worked as a control systems engineer in research and development division of Endurance Technologies Ltd, Aurangabad, India, from 2014 to 2016. He received the Masters in Artificial intelligence degree from Indian Institute of science (IISc), Bangalore, India, in 2019. He is currently a graduate student in the Department of Electrical and Computer Engineering at Rice University, Houston, USA. His research interest include signal processing, wireless networks, deep learning on graphs and reinforcement learning.
\end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{figures/chandra_sir}}]{Chandra Sekhar Seelamantula}
(M'99--SM'13) received the Bachelor of Engineering degree in 1999 with a Gold Medal and the Best Thesis Award from the Osmania University College of Engineering, India, with a specialization in Electronics and Communication Engineering. He received the Ph.D. degree from the Department of Electrical Communication Engineering, Indian Institute of Science (IISc), Bangalore, in 2005. During April 2005 -- March 2006, he was a Technology Consultant for M/s. ESQUBE Communication Solutions Private Limited, Bangalore, and developed proprietary audio coding solutions. During April 2006 -- July 2009, he was a Postdoctoral Fellow in the Biomedical Imaging Group, Ecole Polytechnique F\'ed\'erale de Lausanne (EPFL), Switzerland, where he specialized in the fields of biomedical imaging, splines, wavelets, and sampling theories. In July 2009, he joined the Department of Electrical Engineering, IISc., where he is currently an Associate Professor and directs the Spectrum Lab. He is also an Associate Faculty in the Centre for Neuroscience, IISc. He is currently the Chair of the IEEE Signal Processing Society Bangalore Chapter, a Senior Area Editor of the IEEE Signal Processing Letters, an Associate Editor of IEEE Transactions on Image Processing, a member of the IEEE Technical Committee on Computational Imaging, and an Area Chair of IEEE International Conference on Acoustics, Speech, and Signal Processing 2021. He received the Prof. Priti Shankar Teaching Award from IISc in 2013. His research interests include signal processing, sampling theory, inverse problems in computational imaging, and machine learning.
\end{IEEEbiography}
\appendices
\section{Proof of Proposition 1}
\label{sec:appendix_form}
\setcounter{prop}{0}
\begin{prop}
\label{prop:loss_form_app}
For a set of filters $$\Theta= \left\{ \boldsymbol{h}, \boldsymbol{\tilde{g}} \in \mathbb{R}^{l}, \boldsymbol{\tilde{h}}, \boldsymbol{g} \in \mathbb{R}^{\tilde{l}} \right\},$$ and a dataset $\mathcal{X}= \{ \boldsymbol{x}_j \in \mathbb{R}^{s} \}_{j = 1}^{m}$, there exists a matrix $\boldsymbol{B} \in \mathbb{R}^{s \times s}$ such that the loss function defined in Equation \textsc{(21)} in the main document can be expressed as
\begin{align}
\label{eqn:loss_form_app}
\mathcal{L}(\mathcal{X}; \Theta) = \frac{1}{m}\sum_{j = 1}^{m} \|\boldsymbol{B} \boldsymbol{x}_j\|_2^2.
\end{align}
If the filters form a PRFB, then $\boldsymbol{B} = \boldsymbol{0}_s$ and vice versa.
\end{prop}
\begin{IEEEproof}
The linear convolution of $\boldsymbol{x} \in \mathbb{R}^{s}$ with $\boldsymbol{h} \in \mathbb{R}^{l}$ may be expressed as the matrix operation $\boldsymbol{H} \boldsymbol{x}$, where $\boldsymbol{H} \in \mathbb{R}^{(l + s - 1) \times s}$ is a Toeplitz matrix constructed from $\boldsymbol{h}$.
If $\tilde{l} < l$, we pad the filters of length $\tilde{l}$ with $\left\lfloor \frac{l - \tilde{l}}{2} \right\rfloor$ zeros on the left and $\left\lceil \frac{l - \tilde{l}}{2} \right\rceil$ on the right, and vice versa. In both cases, the net delay caused by the filterbank is $\tau - 1$, where $\tau = \max(l, \tilde{l})$. Effectively, the output of the filterbank can be expressed as follows:
\begin{align*}
\boldsymbol{\hat{x}}_j^{\prime}(\Theta) = \left(\boldsymbol{\tilde{H}} \boldsymbol{D} \boldsymbol{H} + \boldsymbol{\tilde{G}} \boldsymbol{D} \boldsymbol{G} \right) \boldsymbol{x}_j,
\end{align*}
where $\boldsymbol{H}, \boldsymbol{G} \in \mathbb{R}^{(s + \tau - 1) \times s}$ and $\boldsymbol{\tilde{H}}, \boldsymbol{\tilde{G}} \in \mathbb{R}^{(s + 2 \tau - 2) \times (s + \tau - 1)}$ are linear convolution matrices corresponding to $\boldsymbol{h}, \boldsymbol{g}, \boldsymbol{\tilde{h}}$ and $\boldsymbol{\tilde{g}}$, respectively, and $\boldsymbol{D} \in \mathbb{R}^{(s + \tau - 1) \times (s + \tau - 1)}$ represents the cascade of the downsample and upsample operations. The cascade results in the odd-indexed entries of the input becoming $0$, while the even-indexed entries remain unchanged. Hence, $\boldsymbol{D}$ is a diagonal matrix, with entries $\boldsymbol{D}_{i, i} = 1$ for $i$ even, and $\boldsymbol{D}_{i, i} = 0$ for $i$ odd. The dimensions of $\boldsymbol{\hat{x}}_j^{\prime}(\Theta)$ do not match with that of $\boldsymbol{x}_j$ because of the linear convolution operation. Therefore, the loss is computed considering a truncated version of $\boldsymbol{\hat{x}}_j^{\prime}(\Theta)$, denoted as $\boldsymbol{\hat{x}}_j(\Theta)$. More precisely, since the overall delay of the filterbank is $\tau - 1$, the middle portion of $\boldsymbol{\hat{x}}_j^{\prime}(\Theta)$ is extracted by pre-multiplying it with a matrix $\boldsymbol{P} \in \mathbb{R}^{s \times s + 2 \tau - 2}$ defined as
$
\boldsymbol{P} = \begin{bmatrix} \boldsymbol{Z}_{\tau - 1},
\boldsymbol{I}_{s}, \boldsymbol{Z}_{\tau - 1}
\end{bmatrix}
$, where $\boldsymbol{Z}_{\tau - 1}$ is a zero matrix of dimensions $s \times \tau - 1$. The preceding considerations lead to the following expression for the loss $$\mathcal{L}(\mathcal{X}; \Theta) = \frac{1}{m}\sum_{j = 1}^{m} \|\boldsymbol{B} \boldsymbol{x}_j\|_2^2,$$
where $$\boldsymbol{B} = \boldsymbol{I}_s - \boldsymbol{P} \left(\boldsymbol{\tilde{H}} \boldsymbol{D} \boldsymbol{H} + \boldsymbol{\tilde{G}} \boldsymbol{D} \boldsymbol{G} \right).$$ When $\boldsymbol{B} = \boldsymbol{0},$ perfect reconstruction is achieved because the net transfer function of the filterbank autoencoder equals the identity. Consequently, $$ \boldsymbol{P} \left(\boldsymbol{\tilde{H}} \boldsymbol{D} \boldsymbol{H} + \boldsymbol{\tilde{G}} \boldsymbol{D} \boldsymbol{G} \right) = \boldsymbol{I}_s.$$ The diagonal entries of $\boldsymbol{B}$ correspond to the PR-1 conditions and the off-diagonal ones correspond to the PR-2 conditions.
\end{IEEEproof}
\section{Proof of Theorem \ref{thm:conc_loss_app}}
\label{sec:appendix_conc}
\setcounter{theorem}{2}
\begin{theorem}
\label{thm:conc_loss_app}
For the loss function $\mathcal{L}$ defined in \eqref{eqn:loss_form_app}, where $\mathcal{X}$ comprises standard Gaussian vectors, the deviation of the loss from its expected value is bounded in probability as follows:
\begin{align*}
\mathbb{P}(\big| \mathcal{L} - \mathbb{E} \{ \mathcal{L} \} \big| \geq k) \leq \begin{cases}
2 e^{\frac{-k^2 s}{8 \|\boldsymbol{B}\|_{2}^{2} \|\boldsymbol{B}\|_{\textsc{F}}^{2}}}, \, &k \leq \|\boldsymbol{B}\|_{\textsc{F}}^2, \\
2 e^{\frac{-k s}{8 \|\boldsymbol{B}\|_{2}^{2}}}, \, &k > \|\boldsymbol{B}\|_{\textsc{F}}^2.
\end{cases}
\end{align*}
\end{theorem}
\begin{IEEEproof}
Here, we provide the key results together with their proofs that ultimately lead to the proof of Theorem \ref{thm:conc_loss_app}. To begin with, we analyze the concentration of $\boldsymbol{Bx}$ for $\boldsymbol{B} \in \mathbb{R}^{s \times s}$ and $\boldsymbol{x} \sim \mathcal{N}(\boldsymbol{0}, \boldsymbol{I}_s)$.
\begin{lemma}
\label{thm:gen_annulus}
Let $\boldsymbol{x} \sim \mathcal{N}(\boldsymbol{0}, \boldsymbol{I}_s)$ and $\boldsymbol{B} \in \mathbb{R}^{s \times s}$ be a deterministic matrix. Then, for $k \geq 0$,
\begin{align*}
\mathbb{P}\left( \big| \| \boldsymbol{B} \boldsymbol{x} \|_2^2 - \|\boldsymbol{B}\|_{\textsc{F}}^2
\big| \geq k \right) \leq \begin{cases}
2 e^{\frac{-k^2}{8 \|\boldsymbol{B}\|_{2}^{2} \|\boldsymbol{B}\|_{\textsc{F}}^{2}}}, &k \leq \|\boldsymbol{B}\|_{\textsc{F}}^2, \\
2 e^{\frac{-k}{8 \|\boldsymbol{B}\|_{2}^{2}}}, &k > \|\boldsymbol{B}\|_{\textsc{F}}^2.
\end{cases}
\end{align*}
\end{lemma}
\begin{IEEEproof}
Let $X:= \|\boldsymbol{B} \boldsymbol{x} \|_2^2 - \|\boldsymbol{B}\|_{\text{F}}^2$. The moment generating function (m.g.f.) of $X$ is
\begin{align*}
M_{X}(t) = \mathbb{E}\left\{ e^{t X} \right\} = \mathbb{E} \left\{ e^{t (\boldsymbol{x}^\textsc{T}\boldsymbol{B}^\textsc{T}\boldsymbol{B}\boldsymbol{x} - \|\boldsymbol{B}\|_{\text{F}}^2 )} \right\}.
\end{align*}
The spectral decomposition of $\boldsymbol{B}^\textsc{T}\boldsymbol{B}$ is written as $$\boldsymbol{B}^\text{T}\boldsymbol{B} = \boldsymbol{U} \Lambda \boldsymbol{U}^{\text{T}} = \sum_{i = 1}^{s} \lambda_i \boldsymbol{u}_i \boldsymbol{u}_i^{\text{T}},$$
where $\lambda_i$ and $\boldsymbol{u}_i$ are the corresponding $i^{\text{th}}$ eigenvalue and eigenvector, respectively, and $\boldsymbol{U}$ is a unitary matrix with the eigenvectors as its columns. Let $\sigma_i$ denote the $i^{\text{th}}$ singular value of $\boldsymbol{B}$. Since $\sigma_{i}^2 = \lambda_i$, and $\|\boldsymbol{B}\|_{\text{F}}^2 = \sum_{i = 1}^{s} \sigma_i^2$,
\begin{align*}
M_{X}(t) = \mathbb{E} \left\{ e^{t \sum_{i = 1}^{s} \left[ \sigma_i^2 (\boldsymbol{u}_i^{\text{T}} \boldsymbol{x} )^2 - \sigma_i^2 \right] } \right\}.
\end{align*}
The vector $\boldsymbol{U}^{\text{T}} \boldsymbol{x} = \left[ \boldsymbol{u}_1^{\text{T}} \boldsymbol{x}, \ldots, \boldsymbol{u}_s^{\text{T}} \boldsymbol{x} \right]^{\textsc{T}}$ has the same distribution as $\boldsymbol{x}$ due to the rotation-invariance/unitary-invariance property of $\mathcal{N}(\boldsymbol{0}, \boldsymbol{I}_s)$. Therefore, $z_i := \boldsymbol{u}_i^{\text{T}} \boldsymbol{x}$, $i = 1, \ldots, s$, are independent and identically distributed Gaussian random variables with mean $0$ and variance $1$. Hence,
\begin{align*}
M_{X}(t) &= \mathbb{E} \left\{ e^{\sum_{i = 1}^{s} t \sigma_i^2 \left( z_i^2 - 1 \right)} \right\} = \prod_{i = 1}^{s} \mathbb{E} \left\{ e^{t \sigma_i^2 \left( z_i^2 - 1 \right)} \right\} \\
&= \prod_{i = 1}^{s} \frac{1}{\sqrt{2 \pi}} \int\limits_{-\infty}^{+\infty} e^{t \sigma_i^2 \left( z_i^2 - 1 \right) } \cdot e^{-\frac{z_i^2}{2}} \mathrm{d}z_i \\
&= \prod_{i = 1}^{s} \frac{e^{-t \sigma_i^2}}{\sqrt{2 \pi}} \int\limits_{-\infty}^{+\infty} e^{-\left(1 - 2 t \sigma_i^2 \right) \frac{z_i^2}{2}} \mathrm{d}z_i\\
&= \prod_{i = 1}^{s} \frac{e^{-t \sigma_i^2}}{(1 - 2 t \sigma_i^2)^{\frac{1}{2}}}, \qquad t \leq \frac{1}{2 \sigma_i^2}, i = 1, \ldots, s.
\end{align*}
We now establish that $X$ is a sub-exponential random variable by bounding the m.g.f., which is facilitated by the following inequality (Chapter 2, Example $2.8$ of \cite{wainwright2019high}):
\begin{align}
\label{eqn:up_bound}
\frac{e^{-x}}{(1 - 2x)^{\frac{1}{2}}} \leq e^{2 x^2}, \quad |x| \leq \frac{1}{4}.
\end{align}
The proof of \eqref{eqn:up_bound} is given in Appendix \ref{sec:proof_ineq}.
\noindent Hence,
\begin{align*}
M_{X}(t) \leq \prod_{i = 1}^{s} e^{2 t^2 \sigma_i^4} = e^{2 t^2 \sum\limits_{i = 1}^{s} \sigma_i^4}, \quad \text{ for } |t| \leq \frac{1}{4 \|\boldsymbol{B}\|_2^2}.
\end{align*}
Note that $\sum_{i = 1}^{s} \sigma_i^4 = \sum_{i = 1}^{s} \lambda_i^2 = \|\boldsymbol{B}^{\text{T}} \boldsymbol{B}\|_{\text{F}}^2$, and
\begin{align*}
\|\boldsymbol{B}^{\text{T}} \boldsymbol{B}\|_{\text{F}}^2 = \sum\limits_{i = 1}^{s} \| \boldsymbol{B}^{\text{T}} \boldsymbol{b}_i \|_2^2 \leq \sum_{i = 1}^{s} \|\boldsymbol{B}^{\text{T}}\|_2^2 \|\boldsymbol{b}_i \|_2^2 = \|\boldsymbol{B}\|_2^2 \|\boldsymbol{B}\|_{\text{F}}^2,
\end{align*}
where $\boldsymbol{b}_i$ is the $i^{\text{th}}$ column of $\boldsymbol{B}$, and the inequality follows from the definition of the matrix $2$-norm. Hence,
\begin{align}
\label{eqn:mgf_upper_bound}
M_{X}(t) \leq e^{2 t^2 \|\boldsymbol{B}\|_2^2 \|\boldsymbol{B}\|_{\text{F}}^2}, \quad \text{ for } |t| \leq \frac{1}{4 \|\boldsymbol{B}\|_2^2}.
\end{align}
The tail bounds are obtained from the m.g.f. using the Chernoff approach. For $t \geq 0$ and $k \geq 0$,
\begin{align*}
\mathbb{P}(X \geq k) = \mathbb{P}(e^{t X} \geq e^{t k}) \leq \frac{\mathbb{E}\{e^{t X} \} }{e^{t k}} = \frac{M_{X}(t)}{e^{t k}},
\end{align*}
using Markov's inequality. The Chernoff bound is obtained by minimizing $e^{-t k} M_{X}(t)$ with respect to $t \in \left(0, \frac{1}{4 \|\boldsymbol{B}\|_2^2} \right)$:
\begin{align*}
\log \mathbb{P}(X \geq k) &\leq \min_{t} \left\{ \log M_{X}(t) - t k\right\}\\
&\leq \min_{t} \left\{ 2 t^2 \|\boldsymbol{B}\|_2^2 \|\boldsymbol{B}\|_{\text{F}}^2 - t k \right\}.
\end{align*}
The minimum value of the parabola $g(t, k) := 2 t^2 \|\boldsymbol{B}\|_2^2 \|\boldsymbol{B}\|_{\text{F}}^2 - t k$ occurs at $t= \frac{k}{4 \|\boldsymbol{B}\|_2^2 \|\boldsymbol{B}\|_{\text{F}}^2}$. If $k \leq \|\boldsymbol{B}\|_{\text{F}}^2$, then $t \leq \frac{1}{4 \|\boldsymbol{B}\|_2^2}$, and the minimum in the interval $t \in \left(0, \frac{1}{4 \|\boldsymbol{B}\|_2^2} \right)$ is the global minimum. Hence
\begin{align}
\label{eqn:upper_dev_1}
\mathbb{P}(X \geq k) &\leq e^{-\frac{k^2}{8 \|\boldsymbol{B}\|_2^2 \|\boldsymbol{B}\|_{\text{F}}^2}}, \quad \text{ for } k \leq \|\boldsymbol{B}\|_{\text{F}}^2.
\end{align}
If $k > \|\boldsymbol{B}\|_{\text{F}}^2$, then $t > \frac{1}{4 \|\boldsymbol{B}\|_2^2}$, and the minimum in the interval occurs at the boundary point $t = \frac{1}{4 \|\boldsymbol{B}\|_2^2}$, since $g(t, k)$ is a monotonically decreasing function in the interval $t \in \left(0, \frac{1}{4 \|\boldsymbol{B}\|_2^2} \right)$, and for $k > \|\boldsymbol{B}\|_{\text{F}}^2$ we have
\begin{eqnarray}
\log \mathbb{P}(X \geq k) &\leq \frac{\|\boldsymbol{B}\|_{\text{F}}^2}{8 \|\boldsymbol{B}\|_2^2} - \frac{k}{4 \|\boldsymbol{B}\|_2^2} \leq \frac{-k}{8 \|\boldsymbol{B}\|_2^2}\nonumber\\
\Rightarrow \mathbb{P}(X \geq k) &\leq e^{\frac{-k}{8 \|\boldsymbol{B}\|_2^2}} \quad \text{ for } k > \|\boldsymbol{B}\|_{\text{F}}^2.
\label{eqn:upper_dev_2}
\end{eqnarray}
To obtain the lower deviates, let $Y = -X = \|\boldsymbol{B}\|_{\text{F}}^2 - \|\boldsymbol{B} \boldsymbol{x} \|_2^2$, and note that $M_Y(t)$ is only defined for $t > -\frac{1}{2 \|\boldsymbol{B}\|_{2}^2}$. In the interval $(-\frac{1}{2}, \frac{1}{2})$, $M_Y(t) = M_X(-t)$, and hence, the inequality in \eqref{eqn:up_bound} could be used to upper-bound $M_Y(t)$. Thus,
\begin{align*}
M_{Y}(t) \leq e^{2 t^2 \|\boldsymbol{B}\|_2^2 \|\boldsymbol{B}\|_{\text{F}}^2}, \quad \text{ for } |t| \leq \frac{1}{4 \|\boldsymbol{B}\|_2^2},
\end{align*}
which is similar to \eqref{eqn:mgf_upper_bound}. The Chernoff bounds for $Y$ are the same as those for $X$, and $\mathbb{P}(Y \geq k) = \mathbb{P}(X \leq -k)$. Hence,
\begin{align}
\label{eqn:lower_dev}
\mathbb{P}(X \leq -k) &\leq \begin{cases}
e^{-\frac{k^2}{8 \|\boldsymbol{B}\|_2^2 \|\boldsymbol{B}\|_{\text{F}}^2}}, \quad &k \leq \|\boldsymbol{B}\|_{\text{F}}^2, \\
e^{\frac{-k}{8 \|\boldsymbol{B}\|_2^2}}, \quad &k > \|\boldsymbol{B}\|_{\text{F}}^2.
\end{cases}
\end{align}
Consolidating \eqref{eqn:upper_dev_1}, \eqref{eqn:upper_dev_2}, and \eqref{eqn:lower_dev}, and invoking the \emph{union bound}, we have the required result.
\end{IEEEproof}
Having established the concentration of $\boldsymbol{Bx}$, we next establish similar concentration inequalities for the loss function $\mathcal{L}(\mathcal{X}; \Theta) = \frac{1}{s} \sum_{i = 1}^{s} \|\boldsymbol{B} \boldsymbol{x_i} \|_2^2$, where $\boldsymbol{x}_i \sim \mathcal{N}(\boldsymbol{0}, \boldsymbol{I}_s)$ are independent and identically distributed. Let $X_i:= \|\boldsymbol{B} \boldsymbol{x_i} \|_2^2 - \|\boldsymbol{B}\|_{\text{F}}^2$. The m.g.f. of the centered loss function
$$\mathcal{L}_c := \mathcal{L}(\mathcal{X}; \Theta) - \mathbb{E} \{ \mathcal{L}(\mathcal{X}; \Theta) \} = \left(\frac{1}{s} \sum_{i = 1}^{s} \|\boldsymbol{B} \boldsymbol{x_i} \|_2^2 \right) - \|\boldsymbol{B}\|_{\text{F}}^2$$
is given by
\begin{align*}
M_{\mathcal{L}_c}(t) &= \mathbb{E} \left\{ e^{t \mathcal{L}_c} \right\} = \mathbb{E} \left\{ e^{\frac{t}{s} \sum_{i = 1}^{s} X_i} \right\}\\
&= \prod_{i = 1}^s \mathbb{E} \left\{ e^{\frac{t}{s} X_i} \right\} = \prod_{i = 1}^{s} M_{X_i}\left( \frac{t}{s} \right).
\end{align*}
Invoking the inequality on the m.g.f. in \eqref{eqn:mgf_upper_bound}, we obtain
\begin{align*}
M_{\mathcal{L}_c}(t) \leq e^{2 \frac{t^2}{s} \|\boldsymbol{B}\|_{2}^2 \|\boldsymbol{B}\|_{\text{F}}^2}, \quad \text{ for } t \leq \frac{s}{4 \|\boldsymbol{B}\|_{2}^2}.
\end{align*}
Hence, the Chernoff bound for the upper deviate is given by
\begin{align*}
\mathbb{P}(\mathcal{L}_c \geq k) &\leq \begin{cases}
e^{-\frac{k^2 s}{8 \|\boldsymbol{B}\|_2^2 \|\boldsymbol{B}\|_{\text{F}}^2}}, \quad &k \leq \|\boldsymbol{B}\|_{\text{F}}^2, \\
e^{\frac{-k s}{8 \|\boldsymbol{B}\|_2^2}}, \quad &k > \|\boldsymbol{B}\|_{\text{F}}^2.
\end{cases}
\end{align*}
Similarly, considering $- \mathcal{L}_c$ instead of $\mathcal{L}_c$ gives us the bound for the lower deviate. Combining the two, we get
\begin{align*}
\mathbb{P}(\big| \mathcal{L}_c \big| \geq k) \leq \begin{cases}
2 e^{\frac{-k^2 s}{8 \|\boldsymbol{B}\|_{2}^{2} \|\boldsymbol{B}\|_{\textsc{F}}^{2}}}, \, &k \leq \|\boldsymbol{B}\|_{\textsc{F}}^2, \\
2 e^{\frac{-k s}{8 \|\boldsymbol{B}\|_{2}^{2}}}, \, &k > \|\boldsymbol{B}\|_{\textsc{F}}^2.
\end{cases}
\end{align*}
Thus, Theorem \ref{thm:conc_loss_app} stands proved.
\end{IEEEproof}
\section{Proof of the inequality in \eqref{eqn:up_bound}}
\label{sec:proof_ineq}
Let $f(x) = e^{-x} (1 - 2x)^{-\frac{1}{2}}$ and $ g(x) = e^{2 x^2}$. Then, $f^{\prime}(x) = 2x e^{-x} (1 - 2x)^{-\frac{3}{2}}$ and $ g^{\prime}(x) = 4 x e^{2 x^2}$. Let
$$
h(x) = \frac{g^{\prime}(x)}{f^{\prime}(x)} = 2 e^{2x^2 + x} (1 - 2x)^{\frac{3}{2}}.
$$
For $x < \frac{1}{2}$, $h(x)$ is strictly positive. The derivative of $h(x)$ is
$$
h^{\prime}(x) = 2 e^{2x^2 + x} (1 - 2x)^{\frac{1}{2}} \left( - 8 x^2 + 2x - 2 \right).
$$
The factor $\left( - 8 x^2 + 2x - 2 \right)$ is strictly negative for all values of $x$. Hence, $h^{\prime}(x)$ is strictly negative for $x < \frac{1}{2}$, and therefore $h(x)$ is a monotonically decreasing in the interval $(-\infty, \frac{1}{2})$. Consequently, for $x \leq \frac{1}{4}$, $h(x) \geq h(\frac{1}{4}) \geq 1$. Hence, for all $|x| \leq \frac{1}{4}$, $h(x) \geq 1$. Further, we note that both $f^{\prime}(x)$ and $g^{\prime}(x)$ have the same sign. Combining this property with $h(x) \geq 1$, gives $
|g^{\prime}(x)| \geq |f^{\prime}(x)|.$
Since $f(0) = g(0) = 1$, integrating over the intervals $\left[-\frac{1}{4}, 0\right]$ and $\left[0, \frac{1}{4}\right]$, we obtain that $g(x) \geq f(x)$ for $x \in \left[-\frac{1}{4}, \frac{1}{4}\right]$.
\IEEEQEDopen
\end{document} |
\begin{document}
\thetaitle{Strongly vertex-reinforced jump process on a complete graph}
\alphauthor{Olivier Raimond}
\alphaddress{(O. Raimond) Mod\'elisation al\'eatoire de l'Universit\'e Paris Nanterre (MODAL'X), 92000 Nanterre, France}
\email{[email protected]}
\alphauthor{Tuan-Minh Nguyen}
\alphaddress{(T.M. Nguyen) School of Mathematics, Monash University, 3800 Victoria, Australia}
\email{[email protected]}
\date{\thetaoday}
\keywords{Vertex-reinforced jump processes; nonlinear reinforcement; random walks with memory; stochastic approximation; non convergence to unstable equilibria.}
\subjclass[2010]{60J55, 60J75}
\begin{abstract}
The aim of our work is to study vertex-reinforced jump processes with super-linear weight function $w(t)=t^{\alphalpha}$, for some $\alphalpha>1.$ On any complete graph $G=(V,E)$, we prove that there is one vertex $v\in V$ such that the total time spent at $v$ almost surely tends to infinity while the total time spent at the remaining vertices is bounded.\\
\thetaextbf{Résumé.} Le but de notre travail est d'étudier les processus de sauts renforcés par sites par une fonction de poids sur-linéaire $w(t)= t^{\alphalpha}$, avec $\alphalpha>1$. Sur tout graphe complet $G = (V, E)$, on montre qu'il y a un sommet $v \in V$ tel que le temps total passé en $v$ tend presque sûrement vers l'infini tandis que le temps total passé dans les sommets restants est borné.
\end{abstract}
\title{Strongly vertex-reinforced jump process on a complete graph}
\section{Introduction}\ellabel{sec:introduction}
Let $G=(V,E)$ be a finite connected, undirected graph without loops, where $V=\{1,2,...,d\}$ and $E$ respectively stand for the set of vertices and the set of edges. We consider a continuous-time jump process $X$ on the vertices of $G$ such that the law of $X$ satisfies the following condition:
\begin{enumerate}
\item[(e)]m[i.] at time $t\elle 0$, the local time at each vertex $v\in V$ has a positive initial value $\ell^{(v)}_0$,
\item[(e)]m[ii.] at time $t>0$, given the $\sigmama$-field $\mathcal{F}_t$ generated by $\{X_{s},s\elle t\}$, the probability that there is a jump from $X_t$ during $(t,t+h]$ to a neighbour $v$ of $X_t$ (i.e. $\{v,X_t\}\in E$) is given by
$$w\elleft(\ell^{(v)}_0+\int_0^t \mathbf{1}_{\{X_s=v\}}{\rm d} s \right)\cdot h+o(h)$$
as $h\thetao 0$, where $w:[0,\infty)\thetao(0,\infty)$ is a weight function.
\end{enumerate}
For each vertex $v\in V$, we denote by $L(v,t)=\ell^{(v)}_0+\int_0^t\mathbf{1}_{\{X_s=v\}}{\rm d} s$ the local time at $v$ up to time $t$ and let
$$Z_t=\elleft(\frac{L(1,t)}{\ell_0+t},\frac{L(2,t)}{\ell_0+t},... ,\frac{L(d,t)}{\ell_0+t}\right)$$ stand for the (normalized) occupation measure on $V$ at time $t$, where
$\ell_0=\ell^{(1)}_0+\ell^{(2)}_0+\cdots +\ell^{(d)}_0$.
In our work, we consider the weight function $w(t)=t^{\alphalpha}$, for some $\alphalpha>0$. The jump process $X$ is called \thetaextit{strongly vertex-reinforced} if $\alphalpha>1$, \thetaextit{weakly vertex-reinforced} if $\alphalpha<1$ or \thetaextit{linearly vertex-reinforced} if $\alphalpha=1$.
The model of discrete time edge-reinforced random walks (ERRW) was first studied by Coppersmith and Diaconis in their unpublished manuscripts \cite{Coppersmith86} and later the model of discrete time vertex-reinforced random walks (VRRW) was introduced by Pemantle in \cite{Pemantle88} and \cite{Pemantle92}. Several remarkable results about localization of ERRW and VRRW were obtained in \cite{Volkov01}, \cite{Tarres04}, \cite{Volkov06}, \cite{Benaim13} and \cite{Cotar2015}. Wendelin Werner then proposed a model in continuous time so-called vertex reinforced jump processes (VRJP) whose linear case was first investigated by Davis and Volkov in \cite{Davis02} and \cite{Davis04}. In particular, these authors showed in \cite{Davis04} that linearly VRJP on any finite graph with $d$ vertices is recurrent, i.e. all local times are almost surely unbounded and the normalized occupation measure process converges almost surely to an element in the interior of the $(d-1)$ dimensional standard unit simplex as time goes to infinity. In \cite{Sabot15}, Sabot and Tarr\`es also obtained the limiting distribution of the centred local times process for linearly VRJP on any finite graph and showed that linearly VRJP is
actually a mixture of time-changed Markov jump processes. Many aspects of linearly VRJP as well as its relations to ERRW and the supersymmetric hyperbolic sigma model have been well studied in recent years (see, e.g. \cite{Collevecchio2009}, \cite{Basdevant2012}, \cite{Disertori14}, \cite{Merkl16}, \cite{Sabot15}, \cite{Sabot15b}, \cite{Sabot2019} \cite{Sabot17}, \cite{Zeng16}, and \cite{Lupu2018}).
The main aim of our paper is to prove that strongly VRJP on a complete graph $G=(V,E)$ almost surely have an infinite local time at some vertex $v$, while the local times at the remaining vertices remain bounded. The main technique of our proofs is based on the method of stochastic approximation (see, e.g. \cite{Brandiere96, Benaim96, Benaim97, Benaim99}). We organize the present paper as follows.
In Section \ref{sec:outline}, our main Theorem and an outline of its proof are given.
In Section \ref{sec:notation}, we give some preliminary notations as well as some results of stochastic calculus being used throughout the paper. We show in Section \ref{sec:Dyn} that the occupation measure process of strongly VRJP on a complete graph is an asymptotic pseudo-trajectory of a flow generated by a vector field. We then prove the convergence towards stable equilibria in Section \ref{sec:convergence} and the non convergence towards unstable equilibria in Section \ref{sec:nonCV}, which yields our above-mentioned main result.
\section{Main result and outline of proof}\ellabel{sec:outline}
The main result of our paper is the following theorem:
\begin{theorem}\ellabel{thm:mainresult}
Assume that $X$ is a strongly VRJP in a complete graph with weight function $w(t)=t^{\alphalpha}$, for some $\alphalpha>1$. Then there almost surely exists a vertex such that its local time tends to infinity while the local times at the remaining vertices remain bounded.
\end{theorem}
The main technique to prove this theorem is based on the method of stochastic approximation (see, e.g. \cite{Brandiere96, Benaim96, Benaim97, Benaim99}). The core idea of this method is to describe the asymptotic behaviour of stochastic processes (which are stochastic algorithms in the discrete setting) in terms of the behaviour of ordinary differential equations. When the sample path of a stochastic process is asymptotically close to the solution of an autonomous differential equation, it is reasonable to investigate the relation between the limiting set of this process and the set of equilibria of the associated differential equation.
Let us explain how we make use of this idea in the context of VRJP in a complete graph with super-linear weight function $w(t)=t^{\alphalpha}$, for some $\alphalpha>1$. We first make the time change: for $t>0$, set $\thetailde{Z}_t=Z_{e^t-\ell_0}$ and $\thetailde{X}_t=X_{e^t-\ell_0}$.
The occupation measure $\thetailde{Z}$ satisfies the following equation: $$\frac{{\rm d}\thetailde{Z}^i_t}{{\rm d} t}=-\thetailde{Z}^i_t+\mathbf{1}_{\{\thetailde{X}_t=i\}}.$$
Let now $t$ be a large time. Then, for every fixed $T$, the process $(X_{t+s})_{s\in [0,T]}$ evolves almost like a Markov process with generator $A_t=A(L(\cdot,t))$ (with $A(\ellambdabda \ell)=\ellambdabda^\alphalpha A(\ell)$).
It will be remarked in Section \ref{sec:Dyn} that this diffusion has a unique invariant probability $\varphii_t=\varphii(Z_t)$.
Such properties will allow us to prove Theorem \ref{cvthrm} in which $\thetailde{Z}$ is an asymptotic pseudo-trajectory of a semi-flow ${\mathsf P}hi$ generated by the vector field $F(z)=-z+\varphii(z)$, i.e. for all $T>0$, the trajectory $(\thetailde{Z}_{t+s}:\;s\in [0,T])$ is close as $t\thetao\infty$ to the trajectory of the semi-flow $({\mathsf P}hi_s(\thetailde{Z}_t):\;s\in [0,T])$.
In Section \ref{sec:convergence}, using Theorem \ref{cvthrm} with the fact that there is a strict Lyapounov function $H$ for the vector field $F$ (i.e. a function such that $\ellangle F(z),\nablabla H(z)\ranglengle >0$ if and only if $F(z)\ne 0$), we will show that almost surely the limit set of $Z$ is a connected subset of $\mathcal{C}$, the set of equilibria of $F$ (i.e. the set of all $z$ such that $F(z)=0$). Combining with the fact that the set $\mathcal{C}$ is finite, this will prove Theorem \ref{thm:CVeq} stating the a.s. convergence of $Z$ towards an equilibrium. In Section \ref{sec:convergence}, after having remarked that the stable equilibria of $F$ are Dirac measures $\deltata_i$, $i\in V$, we will prove Theorem \ref{thm:localization} asserting that a.s. on the event $Z$ converges to $\deltata_i$, $X$ eventually localizes at $i$, i.e. $L(i,\infty)=\infty$ and $\sum_{j\ne i} L(j,\infty)<\infty$.
Finally in Section \ref{sec:nonCV} we will prove Theorem \ref{thm:nonCV_VRJP} wherein a.s. $Z$ does not converge towards an unstable equilibrium.
In preparation for the proof of this theorem, we will demonstrate Theorem \ref{THM:nonCV} which is a general non convergence theorem for a class of finite variation c\`adl\`ag processes.
To do so, we will follow (and correct) arguments from the proof of a theorem by Brandi\`ere and Duflo (see \cite{Brandiere96} or \cite{Duflo1996}), but use a new idea as follows. We will first show that, under additional assumptions, an asymptotic pseudo-trajectory converging towards an unstable equilibrium is attracted exponentially fast towards the unstable manifold of this equilibrium.
This will allow the proof of the non convergence theorem to be reduced to the case where the unstable equilibrium has no stable direction.
Theorem \ref{thm:nonCV_VRJP} will then permit to conclude the proof of Theorem \ref{thm:mainresult}.
\section{Preliminary notations and remarks}\ellabel{sec:notation}
Throughout this paper, we denote by $\Deltata$ and $T\Deltata$ respectively the $(d-1)$ dimensional standard unit simplex in $\mathbb{R}^d$ and its tangent space, which are defined by
\begin{align*}
&\Deltata=\{z=(z_1,z_2,...,z_d)\in\mathbb{R}^d:z_1+z_2+\cdots +z_d=1, z_j\ge0, j=1,2,\cdots ,d \},\\
&T\Deltata=\{z=(z_1,z_2,...,z_d)\in\mathbb{R}^d:z_1+z_2+\cdots +z_d=0\}.
\end{align*}
Also, let $\|\cdot\|$ and $\elleft\ellangle\cdot,\cdot\right\ranglengle$ denote the Euclidean norm and the Euclidean scalar product in $\mathbb{R}^d$ respectively.
For a c\`adl\`ag process $Y=(Y_t)_{t\ge0}$, we denote by $Y_{t-}=\ellim_{s\thetao t-}Y_t$ and $\Deltata Y_t=Y_t-Y_{t-}$ respectively the left limit and the size of the jump of $Y$ at time $t$. Let $[Y]$ be as usual the \thetaextit{quadratic variation} of the process $Y$. Note that, for a c\`adl\`ag finite variation process $Y$, we have $[Y]_t=\sum_{0<u\elle t}(\Deltata Y_u)^2$. In the next sections, we will use the following useful well-known results of stochastic calculus (see e.g. \cite{Jacod2003} and \cite{Protter}):
1. \thetaextbf{Change of variables formula.} (see Theorem 31, p.~78 in \cite{Protter}) Let $A=(A^1_t,A^2_t,\dots,A^d_t)_{t\ge0}$ be a c\`adl\`ag finite variation process in $\mathbb{R}^d$ and let $f:\mathbb{R}^d\thetao \mathbb{R}$ be a $C^1$ function. Then for $ t\ge 0$,
$$f(A_t)-f(A_0)=\sum_{i=1}^d\int_0^t \varphiartial_i f(A_{u-}) {\rm d} A^i_u +\sum_{0<u\elle t}\elleft(\Deltata f(A_u)-\sum_{i=1}^d\varphiartial _i f(A_{u-})\Deltata A_u^i\right).$$
2. Let $M=(M_t)_{t\ge0}$ be a c\`adl\`ag locally square-integrable martingale with finite variation in $\mathbb{R}$. A well-known result is that if ${\mathsf E}[[M]_t]<\infty$ for all $t$, then $M$ is a true martingale (see e.g. Corollary 3, p.~73 in \cite{Protter}).
The change of variable formula implies that
$$M_t^2=M_0^2+\int_0^t 2M_{s-}{\rm d} M_s+[M]_t.$$
Let $\ellangle M\ranglengle$ denote the \thetaextit{angle bracket} of $M$, i.e. the unique predictable non-decreasing process such that $M^2-\ellangle M\ranglengle$ is a local martingale. Note that $[M]-\ellangle M\ranglengle$ is also a local martingale.
Let $H$ be a locally bounded predictable process and denote by $H\cdot M$ the c\`adl\`ag locally square-integrable martingale with finite variation defined by $(H\cdot M)_t=\int_0^t H_{s} dM_s$. Recall the following rules:
$$\ellangle H\cdot M\ranglengle_t=\int_0^t H^2_{s}{\rm d}\ellangle M\ranglengle_s \quad \thetaext{ and }\quad [H\cdot M]_t=\int_0^t H^2_{s}{\rm d} [M]_s$$
(see Theorem 4.40, p.~48 and the statement 4.54, p.~55 in \cite{Jacod2003}). Recall also that $H\cdot M$ is a square integrable martingale if and only if for all $t>0$, ${\mathsf E}[\ellangle H\cdot M\ranglengle_t]<\infty$.
3. \thetaextbf{Integration by part formula.} (see Corollary 2, p.~68 in \cite{Protter}) Let $X=(X)_{t\ge0}$ and $Y=(Y)_{t\ge0}$ be two c\`adl\`ag finite variation processes in $\mathbb{R}$. Then for $t\ge s\ge 0$,
$$X_tY_t-X_sY_s=\int_s^t X_{u-}{\rm d} Y_u+\int_s^t Y_{u-}{\rm d} X_u+[X,Y]_t-[X,Y]_s,$$
where we recall that $[X,Y]$ is the \thetaextit{covariation} of $X$ and $Y$, computed as $[X,Y]_t=\sum_{0<u\elle t}\Deltata X_u \Deltata Y_u$.
4. \thetaextbf{Doob's maximal inequality.} (see Theorem 20, p.~11 in \cite{Protter}) Let $X=(X)_{t\ge0}$ be a c\`adl\`ag martingale adapted to a filtration $(\mathcal F_t)_{t\ge0}$. Then for any $p>1$ and $t\ge s\ge 0$,
$$ {\mathsf E}[\sup_{s\elle u\elle t}|X_u|^p \big|\mathcal F_s ]\elle\elleft(\frac{p}{p-1}\right)^p{{\mathsf E}}[\vert X_t\vert^p\big|\mathcal F_s].$$
5. \thetaextbf{Burkholder-Davis-Gundy inequality.} (see Theorem 48, p.~193 in \cite{Protter}) Let $X=(X)_{t\ge0}$ be a c\`adl\`ag martingale adapted to a filtration $(\mathcal F_t)_{t\ge0}$ such that $X_0=0$. For each $1\elle p<\infty$ there exist positive constants $c_p$ and $C_p$ depending on only $p$ such that
$$\displaystyle c_p{{\mathsf E}}\elleft[ [X]^{p/2}_t\big|\mathcal F_s\right]\elle{{\mathsf E}}\elleft[\sup_{s\elle u\elle t}|X_u|^p \big|\mathcal F_s\right]\elle C_p{{\mathsf E}}\elleft[ [X]^{p/2}_t \big|\mathcal F_s\right].$$
\section{Dynamics of the occupation measure process\ellabel{sec:Dyn}}
We study in this section the dynamics of the occupation process of VRJP on a complete graph with weight function $w(t)=t^{\alphalpha},\ \alphalpha>0$. In particular, we show in Theorem \ref{cvthrm} below that, after a time scaling, the occupation measure process is asymptotically close to the unique solution of an autonomous system of ordinary differential equations. Our approach is inspired by the theory of asymptotic pseudo-trajectories and stochastic approximation techniques introduced in \cite{Benaim99}.
For $t>0$ which is not a jumping time of $X$, we have
\begin{equation}\ellabel{ode}
\frac{{\rm d} Z_t}{{\rm d} t}=\frac{1}{\ell_0+t}\elleft(-Z_t+I[{X_t}]\right),
\end{equation}
where for each matrix $M$, $M[j]$ is the $j$-th row vector of $M$ and $I$ is as usual
the identity matrix. Observe that the process $Z=(Z_t)_{t\ge0}$ always takes values in the interior of the standard unit simplex $\Deltata$.
For fixed $t\ge0$, let $A_t$ be the $d$-dimensional infinitesimal generator matrix such that the $(i,j)$ element is defined by
$$
A^{i,j}_t:=\elleft\ellbrace\begin{matrix}\mathbf{1}_{(i,j)\in E} w_t^{(j)}, \ \ \ \ \ \ i\neq j; \\ \displaystyle - \sum_{k\in V,(k,i)\in E} w_t^{(k)}, i=j,\end{matrix}\right.$$
where we have set $w^{(j)}_t=w(L(j,t))=L(j,t)^{\alphalpha}$ for each $j\in V$. Also, let $w_t=w^{(1)}_t+w^{(2)}_t+\cdots +w^{(d)}_t$.
Note that
$$\varphii_t:=\elleft(\frac{w^{(1)}_t}{w_t},\frac{w^{(2)}_t}{w_t},\cdots ,\frac{w^{(d)}_t}{w_t}\right)$$
is the unique invariant probability measure of $A_t$ in the sense that $\varphii_tA_t=0$. Since $\varphii_t$ can be rewritten as a function of $Z_t$, we will also use the notation $\varphii_t=\varphii(Z_t)$, where we define the function $\varphii: \Deltata\thetao \Deltata$, such that for each $z=(z_1,z_2,...,z_d)\in \Deltata$,
$$\varphii(z)=\elleft( \frac{z_1^{\alphalpha}}{z_1^{\alphalpha}+\cdots +z_d^{\alphalpha}},\cdots , \frac{z_d^{\alphalpha}}{z_1^{\alphalpha}+\cdots +z_d^{\alphalpha}} \right).$$
Now we can rewrite the equation \eqref{ode} as
\begin{equation}\ellabel{ode2}\frac{{\rm d} Z_t}{{\rm d} t}=\frac{1}{\ell_0+t}(-Z_t+\varphii_t) + \frac{1}{\ell_0+t}(I[X_t]-\varphii_t).\end{equation}
Changing variable $\ell_0+t=e^u$ and denoting $\thetailde{Z}_u=Z_{e^u-\ell_0}$ for $u>0$, we can transform the equation \eqref{ode2} as
\begin{equation*}\frac{{\rm d} \thetailde{Z}_u}{{\rm d} u}=-\thetailde{Z}_u+\varphii(\thetailde{Z}_u) + (I[X_{e^u-\ell_0}]-\varphii_{e^u-\ell_0}).\end{equation*}
Taking integral of both sides, we obtain that
\begin{equation}\ellabel{ode3}\thetailde{Z}_{t+s}-\thetailde{Z}_{t}=\int_t^{t+s}\elleft(-\thetailde{Z}_u+\varphii(\thetailde{Z}_u) \right) {\rm d} u +\int_{e^t-\ell_0}^{e^{t+s}-\ell_0} \frac{ I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u.\end{equation}
Let us fix a function $f:\{1,\dots,d\}\thetao\mathbb{R}$. For $t>0$, define $A_tf:\{1,\dots,d\}\thetao\mathbb{R}$ by $A_tf(i)=\sum_j A_t^{i,j}f(j)$ and define the process $M^f$ by
$$M^f_t=f(X_t)-f(X_0)-\int_0^t A_sf(X_s) {\rm d} s.$$
\begin{lemma}
The process $M^f$ is a martingale, with $[M^f]_t=\sum_{0<s\elle t} (\Deltata f(X_s))^2$ and
\begin{equation}\ellabel{eq:anglebracket}
\ellangle M^f\ranglengle_t = \int_0^t \big(A_sf^2(X_s)-2f(X_s)A_sf(X_s)\big) {\rm d} s.
\end{equation}
\end{lemma}
\begin{proof}
Let us first prove that $M^f$ is a martingale. For small $h>0$, we have
\begin{align*}
{\mathsf E}[f(X_{t+h})-f(X_t)|\mathcal{F}_t]
&= \sum_{j\sim X_t} (f(j)-f(X_t)) {\mathsf P}[X_{t+h}=j|\mathcal{F}_t]\\
&= \sum_{j\sim X_t} (f(j)-f(X_t)) w^{(j)}_t.h+o(h)\\
&= A_tf(X_t).h + o(h).
\end{align*}
Let us fix $0<s<t$ and define $t_j=s+j(t-s)/n$ for $j=0,1,\dots,n$. Note that
\begin{align*} {\mathsf E}\elleft[ f(X_{t})-f(X_s)|\ \mathcal{F}_s \right] & ={\mathsf E}\elleft[\elleft. \sum_{j=1}^n {\mathsf E}[f(X_{t_{j}})-f(X_{t_{j-1}})\ |\ \mathcal{F}_{t_{j-1}} ]\ \right|\ \mathcal{F}_s \right]\\
& ={\mathsf E}\elleft[ \elleft.\sum_{j=1}^n A_{t_{j-1}}f(X_{t_{j-1}})(t_j-t_{j-1}) +n\cdot o\elleft(\frac{t-s}{n}\right) \ \right| \ \mathcal{F}_s \right].
\end{align*}
Since the left hand side is independent on $n$, using Lebesgue's dominated convergence theorem and taking the limit of the random sum under the expectation sign on the right hand side, we obtain that
$${\mathsf E}\elleft[ f(X_{t})-f(X_s)|\ \mathcal{F}_s \right)={\mathsf E}\elleft[\int_s^t A_uf(X_u) {\rm d} u\ | \ \mathcal{F}_s \right].$$ Thus, ${\mathsf E}[ M^f_t|\ \mathcal{F}_s ]=M_s$.
To prove \eqref{eq:anglebracket},
we calculate (to simplify the calculation, we will suppose that $f(X_0)=0$).
\begin{align*}
(M^f_t)^2
=&\; f^2(X_t)- 2\elleft(M^f_t+\int_0^t A_sf(X_s){\rm d} s\right)\int_0^t A_sf(X_s){\rm d} s + \elleft(\int_0^t A_sf(X_s){\rm d} s \right)^2\\
=&\; M^{f^2}_t+\int_0^t A_sf^2(X_s){\rm d} s - 2M^f_t\int_0^t A_sf(X_s){\rm d} s - \elleft(\int_0^t A_sf(X_s){\rm d} s \right)^2\\
=&\; N_t + \int_0^t A_sf^2(X_s){\rm d} s\\
&\quad - 2\int_0^t M^f_s A_sf(X_s){\rm d} s - 2\int_0^t A_sf(X_s)\elleft(\int_0^s A_uf(X_u){\rm d} u\right){\rm d} s \\
=&\; N_t + \int_0^t A_sf^2(X_s){\rm d} s - 2\int_0^t f(X_s) A_sf(X_s){\rm d} s,
\end{align*}
where the process $N$, defined by $N_t:=M_t^{f^2}-2\int_{0}^t\elleft(\int_0^sA_uf(X_u){\rm d} u\right){\rm d} M^f_s $, is a martingale.
The lemma is proved.
\end{proof}
Let $M$ be the process in $\mathbb{R}^d$ defined by
$$M_t=I[X_t]-\int_0^t A_s[X_s]{\rm d} s\quad \thetaext{for } t\ge0 .$$
Then for each $j$, $M^j$ is a martingale since $M^j=M^{\deltata_j}$, with $\deltata_j$ defined by $\deltata_j(i)=1$ if $i=j$ and $\deltata_j(i)=0$ if $i\neq j$.
We also have that
\begin{equation}\ellabel{eq:crochetMj}
\ellangle M^j\ranglengle_t = \int_0^t \Lambda^j_s {\rm d} s,
\end{equation}
with $\Lambda^j$ defined by
\begin{equation}\ellabel{eq:defLambdaj}
\Lambda^j_t= \elleft\ellbrace \begin{array}{ll}
w^{(j)}_t & \thetaext{ if } \quad X_t\sim j,\\
\sum_{k\sim X_t} w^{(k)}_t &\thetaext{ if }\quad X_t=j, \\
0 & \thetaext{ otherwise. } \\
\end{array}
\right.
\end{equation}
\begin{lemma}\ellabel{noise}
Assume that $G=(V,E)$ is a complete graph and $w(t)=t^{\alphalpha}$ with $\alphalpha>0$. Then almost surely
\begin{equation}
\ellabel{bound}\ellim_{t\thetao\infty} \sup_{1\elle c\elle C} \elleft\|\int_{t-\ell_0}^{ct-\ell_0} \frac{I[X_s]-\varphii_s}{\ell_0+s}{\rm d} s\right\| =0
\end{equation}
for each $C>1$.
\end{lemma}
\begin{proof}
Note that, for $t\ge 0$,
$$\varphii_t-\displaystyle I[X_t]=\frac{1}{w_t}A_t[X_t].$$
Using the integration by part formula, we obtain the following identity for each $c\in [1,C]$
\begin{align*}
\int_{t-\ell_0}^{ct-\ell_0} \frac{\varphii_s-I[X_s]}{\ell_0+s}{\rm d} s&=\int_{t-\ell_0}^{ct-\ell_0} A_s[X_s]\frac{{\rm d} s}{(\ell_0+s)w_s}\\
&= \elleft(\frac{I[X_{ct-\ell_0}]}{ctw_{ct-\ell_0}}-\frac{I[X_{t-\ell_0}]}{tw_{t-\ell_0}}\right)\\
& - \int_{t-\ell_0}^{ct-\ell_0} I[X_s] \frac{{\rm d}}{{\rm d} s}\elleft(\frac{1}{(s+\ell_0)w_s}\right){\rm d} s\\
& - \int_{t-\ell_0}^{ct-\ell_0} \frac{{\rm d} M_s}{(s+\ell_0)w_s}.
\end{align*}
Observe that for some positive constant $k$, $w_s\ge k s^\alpha$ (which is easy to prove, using the fact that $L(1,t)+L(2,t)+\cdots +L(d,t)=\ell_0+t$). We now estimate the terms in the right hand side of the above-mentioned identity. In the following, the positive constant $k$ may change from lines to lines and only depends on $C$ and $\ell_0$.
First,
\begin{equation}\ellabel{first}\elleft\|\frac{I[X_{ct-\ell_0}]}{ctw_{ct-\ell_0}}-\frac{I[X_{t-\ell_0}]}{tw_{t-\ell_0}}\right\| \elle k/ {t^{\alpha+1}}.
\end{equation}
Second, for $s\in [t,ct]$ which is not a jump time, we have
\begin{align*}
\frac{{\rm d}}{{\rm d} s}\elleft(\frac{1}{(\ell_0+s)w_s}\right) = & -\elleft(\frac{1}{(\ell_0+s)^2w_s}+\frac{1}{(\ell_0+s)w^2_s}\frac{{\rm d} w_s}{{\rm d} s}\right).
\end{align*}
When $s$ is not a jump time, it is easy to check that $\elleft|\frac{{\rm d} w_s}{{\rm d} s}\right|\elle \alpha (\ell_0+s)^{\alpha-1}$. Therefore, for $s\in [t,ct]$ which is not a jump time,
$$\elleft|\frac{{\rm d}}{{\rm d} s}\elleft(\frac{1}{(\ell_0+s)w_s}\right)\right| \elle k/s^{2+\alpha}$$
and thus,
\begin{equation}\ellabel{second}\elleft\|\int_{t-\ell_0}^{ct-\ell_0} I[X_s] \frac{{\rm d}}{{\rm d} s}\elleft(\frac{1}{(\ell_0+s)w_s}\right) {\rm d} s\right\| \elle k/t^{\alpha+1}.
\end{equation}
And at last (using Doob's inequality), for $i\in\{1,2,\cdots ,d\}$,
\begin{eqnarray*}
{\mathsf E}\elleft[\sup_{1\elle c\elle C} \elleft|\int_{t-\ell_0}^{ct-\ell_0} \frac{{\rm d} M^i_s}{(\ell_0+s)w_s}\right|^2\right]
&\elle& 4\ {\mathsf E}\elleft[\elleft(\int_{t-\ell_0}^{Ct-\ell_0} \frac{{\rm d} M^i_s}{(\ell_0+s)w_s}\right)^2\right].
\end{eqnarray*}
Observe that in our setting, for $i\in\{1,2,\cdots ,d\}$, $(\Deltata I^i_s)^2=1$ if $s$ is a jump time between $i$ and another vertex. Thus $[M^1]_t+[M^2]_t+\cdots +[M^d]_t$ is just twice the number of jumps up to time $t$ of $X$. So, for $i \in\{1,2,\cdots ,d\}$,
\begin{eqnarray*}
{\mathsf E}\elleft[\elleft(\int_{t-\ell_0}^{Ct-\ell_0} \frac{{\rm d} M^i_s}{(\ell_0+s)w_s}\right)^2\right]
&=& {\mathsf E}\elleft[\int_{t-\ell_0}^{Ct-\ell_0} \frac{{\rm d} [M^i]_s}{(\ell_0+s)^2 w_s^2}\right]\\
&\elle& \frac{k}{t^{2(\alpha+1)}} {\mathsf E}\elleft[ [M^i]_{Ct-\ell_0}-[M^i]_{t-\ell_0}\right]\\
&\elle& \frac{k}{t^{2(\alpha+1)}} (Ct)^\alpha (C-1)t,
\end{eqnarray*}
where in the last inequality, we have used the fact that the number of jumps in $[t-\ell_0,Ct-\ell_0]$ is dominated by the number of jumps of a Poisson process with constant intensity $(C t)^\alpha$ in $[t-\ell_0,Ct-\ell_0]$. Therefore,
\begin{eqnarray}\ellabel{third}
{\mathsf E}\elleft[\sup_{1\elle c\elle C} \elleft\|\int_{t-\ell_0}^{ct-\ell_0} \frac{{\rm d} M_s}{(\ell_0+s)w_s}\right\|^2\right]
&\elle& \frac{k}{t^{\alphalpha+1}}.
\end{eqnarray}
From (\ref{first}), (\ref{second}), (\ref{third}) and by using Markov's inequality, we have
\begin{equation}\ellabel{Markov}{\mathsf P}\elleft[\sup_{1\elle c\elle C} \elleft\|\int_{t-\ell_0}^{ct-\ell_0} \frac{I[X_s]-\varphii_s}{\ell_0+s}{\rm d} s\right\| \ge \frac{1}{t^{\gamma}}\right]\elle \frac{k}{t^{\alphalpha+1-2\gamma}}
\end{equation}
for every $0<\gamma\elle \frac{\alphalpha+1}{2}$.
Using the Borel-Cantelli lemma, we thus obtain
$$\ellimsup_{n\thetao\infty}\sup_{1\elle c\elle C} \elleft\|\int_{C^n-\ell_0}^{cC^n-\ell_0} \frac{I[X_s]-\varphii_s}{\ell_0+s}{\rm d} s\right\| =0.$$
Moreover, for $C^n\elle t\elle C^{n+1}$, we have
\begin{align*} \sup_{1\elle c\elle C} \elleft\|\int_{t-\ell_0}^{ct-\ell_0} \frac{I[X_s]-\varphii_s}{\ell_0+s}{\rm d} s\right\| & \elle \elleft\|\int_{C^n-\ell_0}^{t-\ell_0} \frac{I[X_s]-\varphii_s}{\ell_0+s}{\rm d} s\right\| +\sup_{1\elle c\elle C} \elleft\|\int_{C^n-\ell_0}^{\min(ct,C^{n+1})-\ell_0} \frac{I[X_s]-\varphii_s}{\ell_0+s}{\rm d} s\right\|\\
&+ \sup_{1\elle c\elle C} \elleft\|\int^{\max(ct,C^{n+1})-\ell_0}_{C^{n+1}-\ell_0} \frac{I[X_s]-\varphii_s}{\ell_0+s}{\rm d} s\right\|
\\
& \elle 2\sup_{1\elle c\elle C} \elleft\|\int_{C^n-\ell_0}^{cC^n-\ell_0} \frac{I[X_s]-\varphii_s}{\ell_0+s}{\rm d} s\right\| \\
&+\sup_{1\elle c\elle C} \elleft\|\int_{C^{n+1}-\ell_0}^{cC^{n+1}-\ell_0} \frac{I[X_s]-\varphii_s}{\ell_0+s}{\rm d} s\right\|.
\end{align*}
This inequality immediately implies (\ref{bound}).
\end{proof}
From now on, we always assume that $w(t)=t^{\alphalpha}$, $\alphalpha>1$ and $G=(V,E)$ is a complete graph. Let us define the vector field $F:\Deltata\thetao T\Deltata $ such that $F(z)=-z +\varphii(z)$ for each $z\in \Deltata$. We also remark that for each $z=(z_1,z_2,\cdots,z_d)\in \Deltata$,
\begin{align}\ellabel{vecF}F(z)=\elleft(-z_1+ \frac{z_1^{\alphalpha}}{z_1^{\alphalpha}+\cdots +z_d^{\alphalpha}},\cdots ,-z_d+ \frac{z_d^{\alphalpha}}{z_1^{\alphalpha}+\cdots +z_d^{\alphalpha}} \right).\end{align}
A continuous map ${\mathsf P}hi: \mathbb{R}_+\thetaimes \Deltata \thetao \Deltata$ is called a \thetaextit{semi-flow} if ${\mathsf P}hi(0,\cdot):\Deltata \thetao \Deltata$ is the identity map and ${\mathsf P}hi$ has the semi-group property, i.e. ${\mathsf P}hi({t+s},\cdot)={\mathsf P}hi(t,\cdot)\circ {\mathsf P}hi(s,\cdot)$ for all $s,t\in \mathbb{R}_{+}$.
Now for each $z^0\in \Deltata$, let ${\mathsf P}hi_t(z^0)$ be the solution of the differential equation
\begin{equation}\ellabel{odeF}\elleft\ellbrace
\begin{array}{ll}
\displaystyle
\frac{{\rm d}}{{\rm d} t}z(t) = & F(z(t)),\ t>0;\\
z(0)\ \ \ = & z^0.
\end{array}
\right.
\end{equation}
Note that $F$ is Lipschitz. Thus the solution ${\mathsf P}hi_t(z^0)$ can be extended for all $t\in \mathbb{R}_+$ and ${\mathsf P}hi:\mathbb{R}_+\thetaimes \Deltata \thetao \Deltata$ defined by ${\mathsf P}hi(t,z)={\mathsf P}hi_t(z)$ is a semi-flow.
\begin{theorem}\ellabel{cvthrm} $\thetailde{Z}$ is an asymptotic pseudo-trajectory of the semi-flow ${\mathsf P}hi$, i.e.
for all $T>0$,
\begin{equation}\ellabel{pseu}
\ellim_{t\thetao\infty} \sup_{0\elle s\elle T} \elleft\| \thetailde{Z}_{t+s}- {\mathsf P}hi_s(\thetailde{Z}_{t})\right\|=0.\ \thetaext{a.s}.
\end{equation}
Furthermore, $\thetailde{Z}$ is an -$\frac{\alphalpha+1}{2}$-asymptotic pseudo-trajectory, i.e. for
\begin{equation}\ellabel{pseu2}\ellimsup_{t\thetao\infty}\frac{1}{t}\ellog\elleft( \sup_{0\elle s\elle T} \| \thetailde{Z}_{t+s}-{\mathsf P}hi_s(\thetailde{Z}_{t})\| \right)\elle -\frac{\alphalpha+1}{2} \ \thetaext{a.s}.
\end{equation}
\end{theorem}
\begin{proof}
From the definition of ${\mathsf P}hi$, we have
$${\mathsf P}hi_s(\thetailde{Z}_{t})-\thetailde{Z}_{t}=\int_{0}^{s}F({\mathsf P}hi_u(\thetailde{Z}_{t})){\rm d} u.$$
Moreover, from \eqref{ode3}
$$\thetailde{Z}_{t+s}-\thetailde{Z}_{t}=\int_0^{s}F(\thetailde{Z}_{t+u}) {\rm d} u +\int_{e^t-\ell_0}^{e^{t+s}-\ell_0} \frac{ I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u.$$
Subtracting both sides of the two above identities, we obtain that
$$\thetailde{Z}_{t+s}-{\mathsf P}hi_s(\thetailde{Z}_{t})=\int_0^{s}\elleft( F(\thetailde{Z}_{t+u}) - F({\mathsf P}hi_u(\thetailde{Z}_{t}))\right) {\rm d} u+\int_{e^t-\ell_0}^{e^{t+s}-\ell_0} \frac{ I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u.$$
Observe that $F$ is Lipschitz, hence
$$ \| \thetailde{Z}_{t+s}-{\mathsf P}hi_s(\thetailde{Z}_{t})\| \elle K \int_0^{s}\|\thetailde{Z}_{t+u}-{\mathsf P}hi_u(\thetailde{Z}_{t})\|{\rm d} u +\elleft\|\int_{e^t-\ell_0}^{e^{s+t}-\ell_0} \frac{I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u\right\|,$$
where $K$ is the Lipschitz constant of $F$. Using Gr\"onwall's inequality, we thus have
\begin{equation}\ellabel{gron}\| \thetailde{Z}_{t+s}-{\mathsf P}hi_s(\thetailde{Z}_{t})\|\elle \sup_{0\elle s \elle T}\elleft\|\int_{e^t-\ell_0}^{e^{s+t}-\ell_0} \frac{I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u\right\| e^{K s}.\end{equation}
On the other hand, from Lemma \ref{noise}, we have
\begin{equation}\ellabel{noise2}
\ellim_{t\thetao\infty} \sup_{0\elle s\elle T} \elleft\|\int_{e^t-\ell_0}^{e^{s+t}-\ell_0} \frac{I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u\right\| =0. \ \ \thetaext{a.s.}
\end{equation}
The inequality (\ref{gron}) and (\ref{noise2}) immediately imply (\ref{pseu}).
We now prove the second part of the theorem. From (\ref{Markov}), we have
$${\mathsf P}\elleft[\sup_{0\elle s\elle T} \elleft\|\int_{e^t}^{e^{s+t}} \frac{I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u\right\| \ge e^{-\gamma t}\right] \elle k e^{-(\alphalpha+1-2\gamma)t},$$
for every $0<\gamma\elle\frac{\alphalpha+1}{2}$. By Borel-Cantelli lemma, it implies that
$$\ellimsup_{n\thetao\infty}\frac{1}{nT}\ellog\elleft( \sup_{0\elle s\elle T} \elleft\|\int_{e^{nT}}^{e^{s+nT}} \frac{I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u\right\| \right)\elle -\gamma\ \ \thetaext{a.s}.$$
and therefore that (taking $\gamma\thetao \frac{\alphalpha+1}{2}$)
$$\ellimsup_{n\thetao\infty}\frac{1}{nT}\ellog\elleft( \sup_{0\elle s\elle T} \elleft\|\int_{e^{nT}}^{e^{s+nT}} \frac{I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u\right\| \right)\elle -\frac{\alphalpha+1}{2} \ \thetaext{a.s}.$$
Note that for $nT\elle t\elle (n+1)T$ and $0\elle s\elle T$,
\begin{align*}
\elleft\|\int_{e^t}^{e^{s+t}} \frac{I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u\right\| & \elle 2\sup_{0\elle s\elle T}\elleft\|\int_{e^{nT}}^{e^{s+nT}} \frac{I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u\right\|\\
& +\sup_{0\elle s\elle T}\elleft\|\int_{e^{(n+1)T}}^{e^{s+(n+1)T}} \frac{I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u\right\|.
\end{align*}
Therefore,
\begin{align}\ellabel{logbound2}\ellimsup_{t\thetao\infty}\frac{1}{t}\ellog\elleft( \sup_{0\elle s\elle T} \elleft\|\int_{e^{t}}^{e^{s+t}} \frac{I[X_u]-\varphii_u}{\ell_0+u}{\rm d} u\right\| \right)\elle -\frac{\alphalpha+1}{2} \ \thetaext{a.s}.
\end{align}
Finally, (\ref{pseu2}) is obtained from (\ref{gron}) and (\ref{logbound2}).
\end{proof}
\section{Convergence to equilibria}\ellabel{sec:convergence}
Let
$$\mathcal{C}=\{z\in \Deltata : F(z)=0\}$$
stand for the \thetaextit{equilibria set} of the vector field $F$ defined in (\ref{vecF}). We say an equilibrium $z\in \mathcal{C}$ is (linearly) \thetaextit{stable} if all the eigenvalues of $DF(z)$, the Jacobian matrix of $F$ at $z$, have negative real parts. If there is one of its eigenvalues having a positive real part, then it is called (linearly) \thetaextit{unstable}.
Observe that $\mathcal{C}=\mathcal{S}\cup \mathcal U$, where we define
$$\mathcal S=\{e_1=(1,0,0,\cdots ,0), e_2=(0,1,0,\cdots ,0),\cdots ,e_d= (0,0,\cdots ,0,1)\}$$ as the set of all stable equilibria and
$$\mathcal U=\{ z_{j_1,j_2,\cdots, j_k} : 1\elle j_1<j_2<\cdots <j_k\elle d, k=2,\cdots ,d\}$$ as the set of all unstable equilibria, where $z_{j_1,j_2,\cdots, j_k}$ stands for the point $z=(z_1,\cdots, z_d)\in\Deltata$ such that $z_{j_1}=z_{j_2}=\cdots =z_{j_k}=\frac{1}{k}$ and all the remaining coordinates are equal to 0.
Indeed, for each $z\in \mathcal S$, we have that $DF(z)=-I$. Moreover, $$DF\elleft(\frac{1}{d},\frac{1}{d},\cdots ,\frac{1}{d}\right)=(\alphalpha-1) I -\frac{\alphalpha }{d} N,$$ where
$N$ is the matrix such that $N_{m,n}=1$ for all $m,n$
and $DF(z_{j_1,j_2,\cdots,j_k})= (D_{m,n})$ where $$D_{m,n}=
\elleft\ellbrace
\begin{array}{ll}
(\alphalpha-1)- \frac{\alphalpha}{k} &\thetaext{if } m=n\in\{j_i:\;i=1,\cdots,k\};
\\
-\frac{\alphalpha}{k} & \thetaext{if } m\neq n, \hbox{ with } \{m,n\} \subset\{j_i:\;i=1,\cdots,k\};\\
-1 & \thetaext{if } m=n\not\in\{j_i:\;i=1,\cdots,k\};\\
0 & \thetaext{if } m\neq n, \hbox{ with } \{m,n\} \not\subset\{j_i:\;i=1,\cdots,k\}.
\end{array} \right.
$$
Therefore, we can easily compute that for each $z\in \mathcal U$, the eigenvalues of $DF(z)$ are $-1$ and $\alphalpha-1$, having respectively multiplicity $d-k+1$ and $k-1$.
\begin{theorem}\ellabel{thm:CVeq} $Z_t$ converges almost surely to a point in $\mathcal{C}$ as $t\thetao\infty$.
\end{theorem}
\begin{proof}
Consider the map $H:\Deltata\thetao \mathbb{R}$ such that
$$H(z)=z_1^{\alphalpha}+z_2^{\alphalpha}+\cdots +z_n^{\alphalpha}.$$
Note that $H$ is a strict Lyapounov function of $F$, i.e $\ellangle \nablabla H(z), F(z)\ranglengle$ is positive for all $z\in \Deltata\setminus \mathcal{C}$. Indeed, we have
\begin{align*} \ellangle \nablabla H(z),F(z)\ranglengle & =\displaystyle \sum_{i=1}^d \alphalpha z_i^{\alphalpha-1} \elleft(-z_i +\frac{z_i^{\alphalpha}}{\sum_{j=1}^d z_j^{\alphalpha}} \right)\\
&=\alphalpha \elleft(-\sum_{i=1}^d z_i^{\alphalpha} +\frac{\sum_{i=1}^d z_i^{2\alphalpha-1}}{\sum_{i=1}^d z_i^{\alphalpha}} \right)\\
\ & \displaystyle =\frac{\alphalpha}{H(z)} \elleft(-\elleft( \sum_{i=1}^d z_i^{\alphalpha}\right)^2 +\sum_{i=1}^d z_i^{2\alphalpha-1}\sum_{i=1}^dz_i \right)\\
\ & \displaystyle =\frac{\alphalpha}{H(z)} \sum_{1\elle i<j\elle d} z_iz_j \elleft( z_i^{\alphalpha-1}-z_j^{\alphalpha-1}\right)^2.
\end{align*}
For $z\in \Deltata\setminus \mathcal{C}$, there exist distinct indexes $j_1, j_2\in \{1,2,...,d\}$ such that $z_{j_1}, z_{j_2}$ are positive and $z_{j_1}\neq z_{j_2}$. Therefore,
$$\ellangle \nablabla H(z),F(z)\ranglengle \ge \frac{\alphalpha}{H(z)} z_{j_1}z_{j_2} \elleft( z_{j_1}^{\alphalpha-1}-z_{j_2}^{\alphalpha-1}\right)^2>0.$$
Let $$L(Z)=\bigcap_{t\ge0}\omegaverline{Z([t,\infty))}$$
be limit set of $Z$. Since $\thetailde{Z}$ is an asymptotic pseudo-trajectory of ${\mathsf P}hi$, by Theorem 5.7 and Proposition 6.4 in \cite{Benaim99}, we can conclude that $L(Z)=L(\thetailde{Z})$ is a connected subset of $\mathcal{C}$. Moreover, $\mathcal{C}$ is actually an isolated set and this fact implies the almost sure convergence of ${Z_t}$ toward an equilibrium $z\in\mathcal{C}$ as $t\thetao\infty$.
\end{proof}
\begin{lemma} \ellabel{cvrate} Let $z^*$ be a stable equilibrium. Then for each small $\epsilonilon>0$ there exists $\deltata_{\epsilonilon}>0$ such that $z^*$ attracts exponentially $B_{\deltata_{\epsilonilon}}(z^*):=\elleft\{z\in\Deltata : \|z-z^*\|<\deltata_{\epsilonilon} \right\}$ at rate $-1+\epsilonilon$, i.e.
$$\|{\mathsf P}hi_s(z)-z^*\|\elle e^{-(1-\epsilonilon)s}\|z-z^*\|$$
for all $s>0$ and $z\in B_{\deltata_{\epsilonilon}}(z^*)$.
\end{lemma}
\begin{proof}
We observe that $$F(z)=(z-z^*).DF(z^*)^T+R(z-z^*),$$
where we have set $$R(y)=y.\elleft(\int_0^1DF(ty+z^*)^T{\rm d} t-DF(z^*)^T\right).$$ Note that $\|R(y)\| \elle k \|y\|^{1+\beta},$ where $\beta=\min(1,\alphalpha-1)$ and $k$ is some positive constant. Therefore, we can transform the differential equation \eqref{odeF} to the following integral form
$$z(t)-z^*= (z(0)-z^*)e^{t DF(z^*)^T}+\int_0^t R(z(s)-z^*)e^{(t-s)DF(z^*)^T}{\rm d} s. $$
Note that for $z^*\in \mathcal{S}$, we have $DF(z^*)=-I$. Therefore,
$$\|z(t)-z^*\|\elle e^{-t}\| z(0)-z^*\|+\int_0^t e^{-(t-s)}\|R(z(s)-z^*)\|{\rm d} s.$$
For each small $\epsilonilon>0$, if $\|z(s)-z^*\|\elle\elleft(\frac{\epsilonilon}{k}\right)^{1/\beta}$ for all $0\elle s\elle t$, then
$$e^{t}\|z(t)-z^*\|\elle \|z(0)-z^*\|+\epsilonilon\int_0^t e^{s}\|z(s)-z^*\|{\rm d} s. $$
Thus, by Gronwall inequality, if $\|z(s)-z^*\|\elle\elleft(\frac{\epsilonilon}{k}\right)^{1/\beta}$ for all $0\elle s\elle t$, then
$$\|z(t)-z^*\| \elle \| z(0)-z^*\| e^{-(1-\epsilonilon)t}.$$
But this also implies that if $\|z(0)-z^*\|\elle\elleft(\frac{\epsilonilon}{k}\right)^{1/\beta}$ then $\|z(t)-z^*\|\elle \elleft(\frac{\epsilonilon}{k}\right)^{1/\beta}$ for all $t\ge 0$. Hence, for all $t\ge 0$ and any small $\epsilonilon>0$ and $z(0)$ such that $\|z(0)-z^*\|\elle \elleft(\frac{\epsilonilon}{k}\right)^{1/\beta}$, we have
\begin{equation*}\|z(t)-z^*\|\elle e^{-(1-\epsilonilon) t}\|z(0)-z^*\|.\end{equation*}
\end{proof}
\begin{lemma}\ellabel{lem:CVspeed}
Let $z^*=e_j$ be a stable equilibrium, with $j\in V$. Then, a.s. on the event $\{Z_t\thetao z^*\}$, for all $\epsilonilon>0$,
$$\sum_{i\ne j} L(i,t)=o(t^{\epsilonilon}).$$
\end{lemma}
\begin{proof}
Let us fix $\epsilonilon>0$ and let $\deltata_{\epsilonilon}$ be the constant defined in Lemma \ref{cvrate}. Note that on the event $\Gamma(z^*):=\{Z_t\thetao z^*\}$, there exists $T_{\epsilonilon}>0$ such that $\thetailde{Z}_t\in B_{\deltata_{\epsilonilon}}$ for all $t\ge T_{\epsilonilon}$.
Combining the results in Theorem \ref{cvthrm} with Lemma \ref{cvrate} and using Lemma 8.7 in \cite{Benaim99}, we have a.s. on $\Gamma(z^*),$
$$\ellimsup_{t\thetao\infty} \frac{1}{t}\ellog\|\thetailde{Z}_{t}-z^*\| \elle -1+\epsilonilon$$
for arbitrary $\epsilonilon>0$. This implies that a.s. on $\Gamma(z^*),$ that $\| {Z}_{t}- z^*\|=o(t^{-(1-\epsilonilon)})$. And the lemma easily follows.
\end{proof}
\begin{lemma}\ellabel{boundlm}
Let $j\in V$, $\epsilonilon\in (0,1-1/\alphalpha)$ and $C$ a finite constant. Set $$A_{j,C,\epsilonilon}:=\elleft\{\sum_{i\ne j} L(i,t)\elle C t^\epsilonilon,\;\forall t\ge 1\right\}.$$
Then
${\mathsf E}[\sum_{i\neq j} L(i,\infty) 1_{A_{j,C,\epsilonilon}}] < \infty$.
\end{lemma}
\begin{proof}
For each $n\ge 1$, set $\thetaau_n:=\inf\{t\ge 1:\, L(j,t)= n\}$ and $\gamma_n=\sum_{i\in V\setminus\{j\}} L(i,\thetaau_n)$. Set also $\thetaau:=\inf\{t\ge 1:\; \sum_{i\ne j} L(i,t)> C t^\epsilonilon\}$, $\thetaau'_n=\thetaau_n\wedge \thetaau$ and $\gamma'_n=\sum_{i\in V\setminus\{j\}} L(i,\thetaau'_n)$. Note that $A_{j,C,\epsilonilon}=\{\thetaau=\infty\}$ and on $A_{j,C,\epsilonilon}$, $\thetaau_n=\thetaau'_n<\infty$ and $\gamma_n=\gamma'_n$ for all $n\ge 1$.
During the time interval $[\thetaau'_n,\thetaau'_{n+1}]$, the jumping rate to $j$ is larger than $\rho_0= n^{\alphalpha}$ and the jumping rate from $j$ is smaller than $\rho_1=(C (n+1)^{\epsilonilon})^{\alphalpha}$.
This implies that on the time interval $[\thetaau'_n,\thetaau'_{n+1}]$, the number of jumps from $j$ to $V\setminus\{j\}$ is stochastically dominated by the number of jumps of a Poisson process with intensity $\rho_1$. Since the time spent at $j$ during $[\thetaau'_n,\thetaau'_{n+1}]$ is $L(j,\thetaau'_{n+1})-L(j,\thetaau'_n)\elle 1$, the number of jumps from $j$ is stochastically dominated by a random variable $N\sim \thetaext{Poisson}(\rho_1)$. Therefore, $\gamma'_{n+1}-\gamma'_n$, the time spent at $V\setminus\{j\}$ during $[\thetaau'_n,\thetaau'_{n+1}]$, is stochastically dominated by
$T:=\sum_{i=1}^N \xi_{i}$, where $\xi_i,i=1,2,...,N$ are independent and exponentially distributed random variables with mean value $1/\rho_0.$
Therefore,
$${\mathsf E}[\gamma'_{n+1}-\gamma'_n]\elle \frac{\rho_1}{\rho_0}=\frac{C^\alphalpha(n+1)^{\alphalpha\epsilonilon}}{n^\alphalpha}=O\elleft(\frac{1}{n^{\alphalpha(1-\epsilonilon)}}\right).$$
Since $\ellim_{n\thetao\infty}\gamma'_n=\sum_{i\ne j}L(i,\thetaau)$, this proves that
${\mathsf E}\elleft[\sum_{i\ne j}L(i,\thetaau)\right]<\infty.$
This proves the lemma since $\sum_{i\neq j} L(i,\infty) 1_{A_{j,C,\epsilonilon}} \elle \sum_{i\ne j}L(i,\thetaau)$.
\end{proof}
\begin{theorem}\ellabel{thm:localization}
Let $z^*=e_j\in \mathcal{S}$ be a stable equilibrium, with $j\in \{1,2,...,d\}$. Then, a.s. on the event $\{Z_t\thetao z^*\}$,
$$L(j,\infty)=\infty \quad\thetaext{ and }\quad \sum_{i\ne j} L(i,\infty)<\infty.$$
\end{theorem}
\begin{proof}
Lemma \ref{lem:CVspeed} implies that for $\epsilonilon\in (0,1-\frac{1}{\alphalpha})$, the event $\{Z_t\thetao z^*\}$ coincides a.s. with $\cup_{C} A_{j,C,\epsilonilon}$. Lemma \ref{boundlm} states that for all $C>0$, a.s. on $A_{j,C,\epsilonilon}$, $\sum_{i\neq j} L(i,\infty)<\infty$. Therefore, we have that a.s. on $\{Z_t\thetao z^*\}$, $\sum_{i\neq j} L(i,\infty)<\infty$.
\end{proof}
We will show in the next section that if $z^*$ is an unstable equilibrium, then ${\mathsf P}(Z_t\thetao z^*)=0$ and therefore
this will finish the proof of Theorem \ref{thm:mainresult}.
\section{Non convergence to unstable equilibria}\ellabel{sec:nonCV}
In this section, we prove a general non convergence theorem for a class of finite variation c\`adl\`ag processes.
The proof of this theorem follows ideas from the proof of a theorem of Brandi\`ere and Duflo (see \cite{Brandiere96} or \cite{Duflo1996}), but using a new idea presented in Section \ref{sec:dirattract}, where sufficient conditions are given for an asymptotic pseudo-trajectory $Z$ of a dynamical system to be attracted exponentially fast towards the unstable manifold of an equilibrium $z^*$ on the event $Z_t$ converges towards $z^*$.
Then, in Section \ref{sec:dirinst}, we prove a non convergence theorem towards an unstable equilibrium that has no stable direction. The proof essentially follows \cite{Brandiere96} and \cite{Duflo1996}.
We also point out in Remark \ref{rk:inaccuracy} several inaccuracies in their proof.
The results proved in Sections \ref{sec:dirattract} and \ref{sec:dirinst} are then applied in Section \ref{sec:nonCV_VRJP} to strongly VRJP, showing in particular that the occupation measure process does not converge towards unstable equilibria with probability 1.
\subsection{Attraction towards the unstable manifold}\ellabel{sec:dirattract}
In this section, we fix $m\in\{1,2,\dots d\}$, a point $z\in\mathbb{R}^d$ will be written as $z=(x,y)$ where $x\in \mathbb R^m$ and $y\in \mathbb R^{d-m}$.
Let ${\mathsf P}i:\mathbb{R}^d\thetao\mathbb{R}^m$ be defined by ${\mathsf P}i(x,y)=x$ (since ${\mathsf P}i$ is linear, we will often write ${\mathsf P}i z$ instead of ${\mathsf P}i(z)$).
We let $F:\mathbb{R}^d\thetao\mathbb{R}^d$ be a $C^1$ Lipschitz vector field.
Let us consider a finite variation c\`adl\`ag process $Z=(X,Y)$ in $\mathbb{R}^d$, adapted to a filtration $(\mathcal{F}_t)_{t\ge 0}$, satisfying the following equation
\begin{align*}Z_t-Z_s=\int_s^t F(Z_u){\rm d} u+\int_s^t {{\mathsf P}si}_u{\rm d} u + M_t-M_s\end{align*}
where $M_t$ is a finite variation c\`adl\`ag martingale w.r.t $(\mathcal{F}_t)$ and ${\mathsf P}si_t$ is a $(\mathcal{F}_t)$-adapted process.
Let $z^*=(x^*,y^*)$ be an equilibrium of $F$, i.e. $F(z^*)=0$. In the following, $\Gamma$ denotes the event $\{\ellim_{t\thetao\infty} Z_t = z^*\}$.
\begin{hypothesis}\ellabel{hyp:gpt}
There is $\gamma>0$ such that for all $T>0$, there exists a finite constant $C(T)$, such that for all $t>0$, $${\mathsf E}\elleft[\sup_{0\elle h\elle T} \elleft\|\int_{t}^{t+h} ({\mathsf P}si_u{\rm d} u +{\rm d} M_u)\right\|^2\right]\elle C(T) e^{-2\gamma t}.$$
\end{hypothesis}
\begin{remark}
Using Doob's inequality, Hypothesis \ref{hyp:gpt} is satisfied as soon as there is a constant $C>0$ such that for all $t>0$, $\|{\mathsf P}si_t\|\elle C e^{-\gamma t}$ and for all $t>s>0$ and all $1\elle i\elle d$, $\ellangle M^i\ranglengle_t-\ellangle M^i\ranglengle_s\elle Ce^{-2\gamma s}$.
\end{remark}
\begin{lemma}\ellabel{lem:gpt}
If Hypothesis \ref{hyp:gpt} holds, then
$Z$ is a $\gamma$-pseudotrajectory of ${\mathsf P}hi$, the flow generated by $F$, i.e. a.s. for all $T>0$
$$\ellimsup_{t\thetao\infty}\frac{1}{t}\ellog\elleft(\sup_{0\elle h\elle T} \|Z_{t+h}-{\mathsf P}hi_h(Z_t)\|\right)\elle -\gamma.$$
\end{lemma}
\begin{proof}
Follow the proof of Proposition 8.3 in \cite{Benaim99}.
\end{proof}
\begin{hypothesis}
\ellabel{hyp:dirattract}
There are $\mu>0$ and $\mathcal{N}=\mathcal{N}_1\thetaimes \mathcal{N}_2$ a compact convex neighbourhood of $z^*$ (with $\mathcal{N}_1$ and $\mathcal{N}_2$ respectively neighbourhoods of $x^*\in\mathbb{R}^m$ and of $y^*\in \mathbb{R}^{d-m}$) such that
$K:=\{z=(x,y)\in \omegaverline{\mathcal{N}}:y=y^*\}$ attracts exponentially $\omegaverline{\mathcal{N}}$ at rate $-\mu$ (i.e. there is a constant $C$ such that $d({\mathsf P}hi_t(z),K)\elle C e^{-\mu t}$ for all $t>0$).
\end{hypothesis}
\begin{lemma}\ellabel{lem:dirattract}
If Hypotheses \ref{hyp:gpt} and \ref{hyp:dirattract} hold, then,
setting $\beta_0:=\gamma\wedge \mu$, for all $\beta\in (0,\beta_0)$, on the event $\Gamma$,
\begin{equation}
\|Y_t-y^*\| = O(e^{-\beta t}).
\end{equation}
\end{lemma}
\begin{proof}
This is a consequence of Lemma 8.7 in \cite{Benaim99}.
\end{proof}
\begin{hypothesis} \ellabel{hyp:alpha-holder}
Suppose there are $\alphalpha>1$ and $C>0$ such that for all $1\elle i\elle m$ and all $(x,y)\in \mathcal{N}$,
$$|F_i(x,y)-F_i(x,y^*)|\elle C \|y-y^*\|^\alphalpha.$$
\end{hypothesis}
Set $G:\mathbb{R}^m\thetao\mathbb{R}^m$ be the $C^1$ vector field defined by $G_i(x)=F_i(x,y^*)$, for $1\elle i\elle m$ and $x\in\mathbb{R}^m$.
For $p>0$, denote $$\Gamma_p:=\Gamma\cap \{\forall t\ge p:\; Z_t\in \mathcal{N}\}.$$
For $1\elle i\elle m$, set $$\thetailde{{\mathsf P}si}_i(t)={\mathsf P}si_i(t) + F_i(X_t,Y_t) - F_i(X_t,y^*).$$
\begin{lemma}\ellabel{lem:reducx}
Under Hypotheses \ref{hyp:gpt}, \ref{hyp:dirattract} and \ref{hyp:alpha-holder}, on $\Gamma_p$, it holds that, as $t\thetao\infty$, $$\thetailde{{\mathsf P}si}_t={\mathsf P}i {\mathsf P}si_t + O(e^{-\alphalpha\beta t})$$ for all $\beta\in (0,\beta_0)$ and that for all $p<s<t$,
$$X_t-X_s=\int_s^t G(X_u){\rm d} u+\int_s^t \thetailde{{\mathsf P}si}_u{\rm d} u + {\mathsf P}i M_t-{\mathsf P}i M_s.$$
\end{lemma}
\begin{proof} This lemma is a straightforward consequence of Lemma \ref{lem:dirattract}.
\end{proof}
\subsection{Avoiding repulsive traps}\ellabel{sec:dirinst}
In applications, this subsection will be used for the process $X$ defined in Lemma \ref{lem:reducx}.
In this subsection, we let $F:\mathbb{R}^d\thetao\mathbb{R}^d$ be a $C^1$ Lipschitz vector field and we consider a finite variation c\`adl\`ag process $Z$ in $\mathbb{R}^d$, adapted to a filtration $(\mathcal{F}_t)_{t\ge 0}$, satisfying the following equation
$$Z_t-Z_s=\int_s^t F(Z_u){\rm d} u+\int_s^t {{\mathsf P}si}_u{\rm d} u + M_t-M_s$$
where $M_t$ is a finite variation c\`adl\`ag martingale w.r.t $(\mathcal{F}_t)$ and ${\mathsf P}si_t=r_t+R_t$, with $r$ and $R$ two $(\mathcal{F}_t)$-adapted processes.
Let $z^*\in\mathbb{R}^d$ and $\Gamma$ an event on which $\ellim_{t\thetao\infty} Z_t = z^*$.
Let $\mathcal{N}$ be a convex neighbourhood of $z^*$.
For $p>0$, set $$\Gamma_p:=\Gamma\cap \{\forall t\ge p:\; Z_t\in \mathcal{N}\}.$$ Then, $\Gamma=\cup_{p>0}\Gamma_p$.
We will suppose that
\begin{hypothesis}\ellabel{hyp:z*}
$z^*$ is a repulsive equilibrium, i.e. $F(z^*)=0$ and all eigenvalues of $DF(z^*)$ have a positive real part. Moreover $DF(z^*)=\ellambdabda I$, with $\ellambdabda>0$ and $I$ the identity $d\thetaimes d$ matrix.
\end{hypothesis}
For all $z\in\mathbb{R}^d$,
\begin{align*}
F(z)&=F(z^*)+\int_0^1 DF(z^*+u(z-z^*)).(z-z^*) {\rm d} u\\
&= \ellambdabda (z-z^*) + J(z).(z-z^*)
\end{align*}
where we have set $$J(z)=\int_0^1 (DF(z^*+u(z-z^*))-DF(z^*)){\rm d} u.$$
Then, for all $t\ge s$,
\begin{equation}\ellabel{eq:zl}
Z_t-Z_s=\int_s^t \ellambdabda Z_u {\rm d} u+\int_s^t \elleft[{\mathsf P}si_u+J(Z_u).(Z_u-z^*)\right]{\rm d} u + M_t-M_s.
\end{equation}
Let us fix $p>0$.
Note that \eqref{eq:zl} implies that, for all $t\ge p$,
\begin{equation}\ellabel{eq:zl2}
Z_t=e^{\ellambdabda t} \elleft(e^{-\ellambdabda p} Z_p +\int_p^t\bar{{\mathsf P}si}_sds+ \bar M_t-\bar M_p\right)
\end{equation}
where $\bar M_t= \int_0^t e^{-\ellambdabda s} {\rm d} M_s$ and $\bar{{\mathsf P}si}_t=\bar{r}_t+\bar{R}_t$, with
$$\bar{r}_t:= e^{-\ellambdabda t}r_t \quad \thetaext{ and } \quad \bar{R}_t:=e^{-\ellambdabda t}[R_t+J(Z_t).(Z_t-z^*)] .$$
We assume that the following hypothesis is fulfilled:
\begin{hypothesis} \ellabel{hyp:RM}
There is a random variable $K$ finite on $\Gamma$ and
there is a continuous function $a:[0,\infty)\thetao (0,\infty)$ such that $\int_0^\infty a(s){\rm d} s <\infty$,
$\alphalpha^2(t) :=\int_t^{\infty}a(s){\rm d} s=O\big( \int_t^\infty e^{-2\ellambdabda (s-t)} a(s) {\rm d} s\big)$ as $t\thetao\infty$
and such that the following items \thetaextit{(i)} and \thetaextit{(ii)} hold.
\begin{enumerate}[(i)]
\item[(e)]m For each $i$, $\ellangle M^i\ranglengle_t = \int_0^t \Lambda^i_s {\rm d} s$, with $\Lambda^i$ a positive $(\mathcal{F}_t)$-adapted process. Setting $\Lambda=\sum_i \Lambda^i$, we have that a.s. on $\Gamma$, for all $t>0$,
\begin{align}\ellabel{eq:THM1}
K^{-1}a(t) \elle \Lambda(t)\elle Ka (t),
\end{align}
\begin{align}\ellabel{eq:THM2}
\sum_{i=1}^d|\Deltata M^i_t|\elle K \alphalpha(t),
\end{align}
\begin{align} \ellabel{eq:THM4}
\int_0^\infty\frac{\|r_s\|^2}{a(s)} {\rm d} s \elle K.
\end{align}
\item[(e)]m As $t\thetao\infty$,
\begin{align}\ellabel{eq:THM3}{\mathsf E}\elleft[1_\Gamma \elleft(\int_t^\infty \|{R}_s\| {\rm d} s\right)^2\right]=o\big(\alphalpha^2(t)\big).
\end{align}
\end{enumerate}
\end{hypothesis}
For $p>0$, define $$G_p=\Gamma_p\cap\{\sup_{t\ge p}\|J(Z_t)\|\elle \frac{\ellambdabda}{2}\}\cap\{\sup_{t\ge p}\|Z_t\|\elle 1\}.$$
\begin{lemma} \ellabel{lem:R}
For all $p>0$, as $t\thetao\infty$,
\begin{align}\ellabel{eq:R}
{\mathsf E}\elleft[1_{G_p} \int_t^\infty \|\bar{R}_s\|{\rm d} s\right]=o(e^{-\ellambdabda t}\alphalpha(t)).
\end{align}
\end{lemma}
\begin{proof} Fix $p>0$.
Since Hypothesis \ref{hyp:RM}-(ii) holds, to prove the lemma it suffices to prove that as $t\thetao\infty$,
$${\mathsf E}\elleft[1_{G_p}\int_t^\infty e^{-\ellambdabda s}\|J(Z_s).(Z_s-z^*)\| {\rm d} s\right]= o(e^{-\ellambdabda t}\alphalpha(t)).$$
To simplify the notation, we suppose $z^*=0$. For $s<t$, (using the convention: $\frac{z}{\|z\|}=0$ if $z=0$)
\begin{align*}
\|Z_t\|-\|Z_s\|
=& \;\ellambdabda\int_s^t \|Z_u\| {\rm d} u + \int_s^t \elleft\ellangle \frac{Z_u}{\|Z_u\|},J(Z_u)Z_u\right\ranglengle {\rm d} u\\
& + \int_s^t \elleft\ellangle \frac{Z_{u-}}{\|Z_{u-}\|}, {\rm d} M_u\right\ranglengle + \int_s^t \elleft\ellangle \frac{Z_u}{\|Z_u\|}, {\mathsf P}si_u\right\ranglengle {\rm d} u\\
&+ \sum_{s<u\elle t} 1_{\{Z_{u-}\ne 0\}}\elleft(\Deltata \|Z_u\| - \elleft\ellangle \frac{Z_{u-}}{\|Z_{u-}\|},\Deltata Z_u\right\ranglengle \right).
\end{align*}
Using the inequality $\|z+\deltata\|-\|z\|\ge \ellangle \frac{z}{\|z\|},\deltata \ranglengle$, we have for all $u>p$,
$$\Deltata \|Z_u\| -\elleft\ellangle \frac{Z_{u-}}{\|Z_{u-}\|},\Deltata Z_u\right\ranglengle\ge 0.$$
Furthermore, using Cauchy-Schwarz inequality, on the event $G_p$,
$$\elleft\ellangle \frac{Z_u}{\|Z_u\|},J(Z_u)Z_u\right\ranglengle \ge - \|J(Z_u)Z_u\|\ge -\sup_{t\ge p}\|J(Z_t)\|.\|Z_u\|\ge-\frac{\ellambdabda}{2}\|Z_u\|$$
for all $u> p$. From the above it follows that on the event $G_p$,
\begin{align*}
\|Z_t\|-\|Z_s\|
\ge& \;\frac{\ellambdabda}{2}\int_s^t \|Z_u\| {\rm d} u + \int_s^t \elleft\ellangle \frac{Z_{u-}}{\|Z_{u-}\|}, {\rm d} M_u\right\ranglengle + \int_s^t \elleft\ellangle \frac{Z_u}{\|Z_u\|},{\mathsf P}si_u\right\ranglengle {\rm d} u
\end{align*}
for all $t>s>p$. As a consequence, using Doob's inequality and Hypothesis \ref{hyp:RM}, we obtain that
\begin{eqnarray*}
\frac{\ellambdabda}{2}{\mathsf E}\elleft[1_{G_p}\elleft(\int_t^\infty \|Z_s\|{\rm d} s\right)^2\right]^\frac{1}{2}
&\elle&\; {\mathsf E}\elleft[1_{G_p}\sup_{T>t}\elleft|\int_t^T \elleft\ellangle \frac{Z_{u-}}{\|Z_{u-}\|}, {\rm d} M_u\right\ranglengle\right|^2\right]^\frac{1}{2}\\
& &+ \; \alphalpha(t) {\mathsf E}\elleft[1_{G_p}\int_t^\infty \frac{\|r_u\|^2}{a(u)} {\rm d} u\right]^\frac{1}{2}\\
& &+ \; {\mathsf E}\elleft[1_{G_p}\elleft(\int_t^\infty \|R_u\| {\rm d} u\right)^2\right]^\frac{1}{2}\\
&= &\; O(\alphalpha(t)).
\end{eqnarray*}
Using Cauchy-Schwarz inequality, we have
\begin{align*}
{\mathsf E}\elleft[1_{G_p}\int_t^\infty e^{-\ellambdabda s}\|J(Z_s)Z_s\|{\rm d} s\right]
\elle & \; e^{-\ellambdabda t}{\mathsf E}\elleft[1_{G_p}\sup_{s\ge t} \|J(Z_s)\|^2\right]^{\frac12}
{\mathsf E}\elleft[1_{G_p}\elleft(\int_t^\infty \|Z_s\|{\rm d} s\right)^2\right]^{\frac12}.
\end{align*}
Note that on $G_p$, $\sup_{s\ge t} \|J(Z_s)\|\elle \ellambdabda/2$ and $\ellim_{t\thetao\infty}\sup_{s\ge t} \|J(Z_s)\|=0$ almost surely. Therefore, we conclude that
${\mathsf E}[1_{G_p}\int_t^\infty e^{-\ellambdabda s} \|J(Z_s)Z_s\|{\rm d} s]=o(e^{-\ellambdabda t} \alphalpha(t))$ as $t\thetao \infty$.
\end{proof}
Hypothesis \ref{hyp:RM} ensures in particular that a.s. on $G_p$, $\int_p^\infty \bar{{\mathsf P}si}_s{\rm d} s$ and $\bar M_{\infty}$ are well defined and almost surely finite. Let $L$ be a random variable such that $$L=\int_p^\infty \bar{{\mathsf P}si}_s{\rm d} s+\bar M_{\infty}-\bar M_{p}\ \ \thetaext{ on } G_p.$$
Letting $t\thetao\infty$ in \eqref{eq:zl2}, $\ellambdabda$ being positive, we have $L=-e^{-\ellambdabda p} Z_p\ \thetaext{\ a.s. on }G_p.$
We now apply Theorem \ref{THM:THMA} to the martingale $\bar M_t$ and to the adapted process $\bar {\mathsf P}si_t$. We have $\ellangle \bar{M}^i\ranglengle_t=\int_0^t \bar{\Lambda}^i_s {\rm d} s,$ with $\bar{\Lambda}^i_s=e^{-2\ellambdabda s}\Lambda^i_s$.
We also have $|\Deltata \bar{M}_t|=e^{-\ellambdabda t}|\Deltata M_t|$.
Hypothesis \ref{hyp:RM}-(i) implies that \eqref{eq:THMA1}, \eqref{eq:THMA2} and \eqref{eq:THMA4} are satisfied with the function $\bar{a}(t)=e^{-2\ellambdabda t}a(t)$. Finally, \eqref{eq:THMA3} follows from Lemma \ref{lem:R}. Therefore, we obtain that
$${\mathsf P}(G_p)={\mathsf P}(G_p\cap\{L=-e^{-\ellambdabda p} Z_p\}]=0.$$
Since ${\mathsf P}(\Gamma)=\ellim_{p\thetao\infty}{\mathsf P}(G_p)=0$, we have proved the following theorem:
\begin{theorem}\ellabel{THM:nonCV}
Under Hypotheses \ref{hyp:z*} and \ref{hyp:RM}, we have ${\mathsf P}(\Gamma)=0$.
\end{theorem}
\subsection{Application to strongly VRJP on complete graphs}
\ellabel{sec:nonCV_VRJP}
Recall from Section \ref{sec:Dyn} that the empirical occupation measure process $(Z_t)_{t\ge0}$ satisfies the following equation
\begin{align}\ellabel{vrjp}
Z_t-Z_s &=\;
\int_s^t \frac{1}{u+\ell_0} F(Z_u) {\rm d} u + \frac{I[X_s]}{(s+\ell_0)w_s}- \frac{I[X_t]}{(t+\ell_0)w_t}\\ \nonumber
&+ \int_s^t {\mathsf P}si_u {\rm d} u + \int_s^t \frac{{\rm d} M_u}{(u+\ell_0)w_u},
\end{align}
where $${\mathsf P}si_t=I[X_t]\frac{{\rm d}}{{\rm d} t}\elleft(\frac{1}{(t+\ell_0)w_t}\right)\quad \thetaext{ and } \quad M_t=I[X_t]-\int_0^t A_s[X_s] {\rm d} s.$$
Recall that $\ellangle M^j\ranglengle_t=\int_0^t \Lambda^j_s {\rm d} s$, where $\Lambda^j$ is defined in \eqref{eq:defLambdaj}.
For $t\ge t_0:=\ellog(\ell_0)$, let
\begin{equation}
\ellabel{def:Zhat}
\widehat{Z}_t=Z_{e^t-\ell_0}+\frac{I[X_{e^t-\ell_0}]}{e^tw_{e^t-\ell_0}}.
\end{equation}
Equation (\ref{vrjp}) is thus equivalent to
\begin{align}\ellabel{mvrjp}\widehat{Z}_t-\widehat{Z}_s=\int_s^t F(\widehat{Z}_u){\rm d} u+\int_s^t \widehat{{\mathsf P}si}_u{\rm d} u + \widehat{M}_t-\widehat{M}_s,
\end{align}
where we have set
\begin{align*}
\widehat{{\mathsf P}si}_t=e^t {\mathsf P}si_{e^t-\ell_0} +F(Z_{e^t-\ell_0})-F(\widehat{Z}_t) \qquad \hbox{and} \qquad \widehat{M}_t=\int_0^{e^t-\ell_0}\frac{{\rm d} M_s}{(s+\ell_0)w_s},
\end{align*}
which are respectively an adapted process and a martingale w.r.t the filtration $(\widehat{\mathcal{F}}_t)_{t\ge t_0}:=(\mathcal{F}_{e^t-\ell_0})_{t\ge t_0}$.
Note that $\ellangle \widehat{M}^j\ranglengle_t-\ellangle \widehat{M}^j\ranglengle_{t_0}=\int_{t_0}^t \widehat{\Lambda}^j_s {\rm d} s$, with $\widehat{\Lambda}^j_s=\frac{\Lambda^j_{e^s-\ell_0}}{e^sw^2_{e^s-\ell_0}}.$
In this subsection, we will apply the results of Subsection \ref{sec:dirattract} and Subsection \ref{sec:dirinst} to the process $(\widehat{Z}_t)_{t\ge0}$ and thus show that $P[Z_{t}\thetao z^*]=P[\widehat{Z}_{t}\thetao z^*]=0$ for each unstable equilibrium $z^*$.
\begin{lemma}\ellabel{lem:togpt}
There exists a positive constant $K$ such that for all $t>t_0$, a.s.
\begin{align*}
&\|\widehat{{\mathsf P}si}_t\|\elle K e^{-(\alphalpha+1)t},\quad \quad \widehat{\Lambda}^j_t \elle K e^{-(\alphalpha+1)t} \hbox{ and } \quad |\Deltata \widehat{M}_t^j|\elle K e^{-(\alphalpha+1)t}.
\end{align*}
\end{lemma}
\begin{proof} Let us first recall that $w_t\ge k(t+\ell_0)^\alphalpha$ for some constant $k$.
Using that $F$ is Lipschitz, we easily obtain the first inequality. To obtain the second inequality, observe that for each $j$, $\Lambda^j_t\elle w_t$. Thus for all $t>t_0$,
$$\widehat{\Lambda}^j_t\elle \frac{1}{e^tw_{e^t-\ell_0}}\elle k^{-1} e^{-(\alphalpha+1)t}.$$
Finally,
$$|\Deltata \widehat{M}_t^j|=\frac{|\Deltata I[X_{e^t-\ell_0}]|}{e^tw_{e^t-\ell_0}}\elle \frac{1}{e^tw_{e^t-\ell_0}}
\elle k^{-1}e^{-(\alphalpha+1)t}.$$
\end{proof}
\begin{theorem}\ellabel{thm:nonCV_VRJP}
Assume that $z^*$ is an unstable equilibrium of the vector field $F$ defined by \eqref{vecF}. Then
${\mathsf P}[Z_t\thetao z^*]=0$.
\end{theorem}
\begin{proof}
Note first that Lemma \ref{lem:togpt} implies that Hypothesis \ref{hyp:gpt} holds with $\gamma=\frac{\alphalpha+1}{2}$.
Let $z^*=(x^*,y^*)$ be an unstable equilibrium, where $y^*=0\in\mathbb{R}^{d-m}$ and $x^*=\elleft(\frac1m,\frac1m,\dots,\frac1m\right)\in \mathbb{R}^m$, with $m\in\{2,3,\dots, d\}$ (up to a permutation of indices, this describes the set of all unstable equilibria).
Note also that there is a compact convex neighbourhood $\mathcal{N}=\mathcal{N}_1\thetaimes \mathcal{N}_2$ of $z^*$ and a positive constant $h$ such that for all $z\in\mathcal{N}$, $H(z)=\sum_i z_i^\alphalpha \ge h$.
Setting $C(z)=\frac{1}{H(z)}$, we have that for all $i\in\{1,2,\dots,d-m\}$,
$$ F_{m+i}(x,y)=-y_{i}(1+ C(z)y_{i}^{\alphalpha-1}).$$
Since $\alphalpha>1$, it can easily be shown that Hypothesis \ref{hyp:dirattract} holds for all $\mu\in (0,1)$.
Hypothesis \ref{hyp:alpha-holder} also holds (with the same constant $\alphalpha$).
Therefore, Lemma \ref{lem:reducx} can be applied to the process $(\widehat{Z}_t)_{t\ge t_0}$ defined by (\ref{def:Zhat}). Set $\widehat{X}_t:={\mathsf P}i\widehat{Z}_t$ and let ${G}:\mathbb{R}^m \thetao\mathbb{R}^m$ be the vector field defined by $G_i(x)=F_i(x,0)$. Then for all $s<t$,
$$\widehat{X}_t-\widehat{X}_s=\int_s^t G(\widehat{X}_u){\rm d} u+\int_s^t
\hat{r}_u{\rm d} u + {\mathsf P}i\widehat{M}_t-{\mathsf P}i\widehat{M}_s,$$
with $\hat{r}_t={\mathsf P}i\widehat{{\mathsf P}si}_t+O(e^{-\alphalpha\beta t})$ on $\Gamma$, for all $\beta<\gamma\wedge \mu$. Note that since $\mu$ can be taken as close as we want to $1$ and since $\gamma=\frac{\alphalpha+1}{2}>1$, $\beta$ can be also taken as close as we want to $1$.
We now apply the result of Section \ref{sec:dirinst}, with $Z$, $F$, $M$, $r$ and $R$ respectively replaced by $\hat{X}$, $G$, ${\mathsf P}i\hat{M}$, $\hat{r}$ and $0$.
The vector field $G$ satisfies Hypothesis \ref{hyp:z*} with $\ellambdabda=\alphalpha-1$.
Let us now check Hypothesis \ref{hyp:RM} with $a(t)=e^{-(\alphalpha+1)t}$.
Choosing $\beta\in (\frac{\alphalpha+1}{2\alphalpha},1)$, we have that $\hat{r}$ satisfies \eqref{eq:THM4}.
Set $\widehat{\Lambda}=\sum_{j=1}^m \widehat{\Lambda}^j$. It remains to verify the inequality (\ref{eq:THM1}) for $\widehat{\Lambda}$. Lemma \ref{lem:togpt} shows that for all $t>0$, $$\widehat{\Lambda}_t\elle \frac{m}{k}e^{-(\alphalpha+1)t} = C_+ e^{-(\alphalpha+1)t}.$$
Fix $\epsilonilon\in (0,1)$ and choose the neighbourhood $\mathcal{N}$ sufficiently small such that for all $z\in \mathcal{N}$ and $i\in\{1,\dots,m\}$, $m\varphii_i(z)\in (1-\epsilonilon,1+\epsilonilon)$. Therefore, if $Z_t\in\mathcal{N}$, we have that for $i\in\{1,\dots,m\}$, $w^{(j)}_t=w_t\varphii_j(Z_t)\ge \frac{k(1-\epsilonilon)}{m} (t+\ell_0)^\alphalpha$.
Therefore, since $m\ge 2$, if $Z_t\in\mathcal{N}$, we have that for all $1\elle i\elle m$
$$\Lambda^i_t\ge 1_{\{X_t=i\}} \sum_{j\neq i, 1\elle j\elle m} w^{(j)}_t + 1_{\{X_t\neq i\}} w^{(i)}_t\ge \min_{1\elle j \elle m}w_u^{(j)}\ge \frac{k(1-\epsilonilon)}{m}(u+\ell_0)^\alphalpha.$$
Since $w_t\elle d(t+\ell_0)^{\alphalpha}$, we have that if $Z_t\in\mathcal{N}$,
$$\widehat{\Lambda}_t\ge \frac{k(1-\epsilonilon)e^{\alphalpha t}}{e^td^2e^{2\alphalpha t}}=C_- e^{-(\alphalpha+1)t}.$$
This proves that Hypothesis \ref{hyp:RM} is satisfied.
As a conclusion Theorem \ref{THM:nonCV} can be applied, and this proves that ${\mathsf P}[Z_t\thetao z^*]={\mathsf P}[\widehat X_t\thetao x^*]=0$.
\end{proof}
\subsection{A theorem on martingales}
In this subsection, we prove a martingale theorem, which is a continuous time version of a theorem by Brandi\`ere and Duflo (see Theorem A in \cite{Brandiere96} or Theorem 3.IV.13 in \cite{Duflo1996}).
\begin{theorem}\ellabel{THM:THMA}
Let $M$ be a finite variation c\`adl\`ag martingale in $\mathbb{R}^d$ with $M_0=0$, $r$ and $R$ be adapted processes in $\mathbb{R}^d$ with respect to a filtration $(\mathcal{F}_t)_{t\ge0}$. Set ${\mathsf P}si_t=r_t+R_t$.
Let $\Gamma$ be an event and let $a:[0,\infty)\thetao (0,\infty)$ be a continuous function such that $\int_0^\infty a(s){\rm d} s <\infty$ and set $\alphalpha^2(t)=\int_t^\infty a(s) {\rm d} s$.
Suppose that for each $i$, $\ellangle M^i\ranglengle_t = \int_0^t \Lambda^i_s {\rm d} s$, with $\Lambda^i$ a positive adapted c\`adl\`ag process. Set $\Lambda=\sum_i \Lambda^i$.
Suppose that there is a random variable $K$, such that a.s. on $\Gamma$, $1<K<\infty$ and for all $t>0$,
\begin{align}\ellabel{eq:THMA1}
K^{-1} a(t) \elle \Lambda(t)\elle Ka (t).
\end{align}
\begin{align}\ellabel{eq:THMA2}
\sum_i|\Deltata M^i_t|\elle K \alphalpha(t).
\end{align}
\begin{align}\ellabel{eq:THMA4}
\int_0^\infty \frac{\|r_s\|^2}{a(s)} {\rm d} s \elle K
\end{align}
and as $t\thetao\infty$,
\begin{align}\ellabel{eq:THMA3}
{\mathsf E}\elleft[1_\Gamma\int_t^\infty \|R_s\| {\rm d} s\right]= o(\alphalpha(t)).\end{align}
Then, a.s. on $\Gamma$, $S_t:=\int_0^t {{\mathsf P}si}_s{\rm d} s+ M_t$ converges a.s. towards a finite random variable $L$ and for all $\mathcal{F}_p$-measurable random variable $\eta$, $p>0$, we have
$${\mathsf P}[\Gamma\cap\{L=\eta\}]=0.$$
\end{theorem}
\begin{remark} \ellabel{rk:inaccuracy} Our theorem here is a continuous-time version of Theorem A by Brandi\`ere and Duflo in \cite{Brandiere96}. Their results is widely applied to discrete stochastic approximation processes, in particular to showing the non convergence to a repulsive equilibrium. Note that there is an inaccuracy in the application of the Burkholder's inequality in their proof. Beside of this, there is also a mistake in the application of their theorem to the proof of Proposition 4 in \cite{Brandiere96} since the process $S_n$ defined in page 406 is not adapted.
\end{remark}
\begin{proof} \ \\
\thetaextit{Simplification of the hypotheses:}
It is enough to prove the Theorem assuming in addition that the random variable $K$ is non-random and that \eqref{eq:THMA1}, \eqref{eq:THMA2} and \eqref{eq:THMA4} are satisfied a.s. on $\Omega$.
Let us explain shortly why: The idea is due to Lai and Wei in \cite{Lai1983} (see also \cite{Duflo1996}, p. 60-61).
For $n\in\mathbb{N}$, let $T_n$ be the first time $t$ such $\Lambda(t)\not\in [n^{-1} a(t),na(t)]$ or $|\Deltata M^i_t|>n \alphalpha(t)$ for some $i$ or $\int_0^t \frac{\|r_s\|^2}{a(s)} {\rm d} s >n$. Then $T_n$ is an increasing sequence of stopping times and a.s. on $\Gamma\cap\{K\elle n\}$, $T_n=\infty$.
Possibly extending the probability space, let $N$ be a Poisson process with intensity $a(t)$.
For $n\in \mathbb{N}$, $i\in\{1,\dots,d\}$ and $t>0$, set $$\thetailde{M}^{i}_t=M^i_{t\wedge T_n} + N_t-N_{t\wedge T_n}\ \thetaext{ and } \ \thetailde{r}_t=r_{t\wedge T_n}.$$ Then, $\thetailde{M}$ and $\thetailde{r}$ satisfy \eqref{eq:THMA1}, \eqref{eq:THMA2} and \eqref{eq:THMA4} a.s. on $\Omega$, with $K=n$, and on the event $\{T_n=\infty\}$, $\thetailde{M}=M$ and $\thetailde{r}=r$. Now set $$L_n=\int_0^\infty (\thetailde{r}_s+R_s) {\rm d} s + \thetailde{M}_\infty,$$
which is well defined on $\Gamma$. Then a.s. on the event $\Gamma_n:=\Gamma\cap\{K\elle n\}$, we have $L_n=L$.
Suppose now that for all $n$, we have ${\mathsf P}[\Gamma_n\cap \{L_n=\eta\}]=0$, then we also have
${\mathsf P}[\Gamma\cap \{L=\eta\}]=\ellim_{n\thetao\infty}{\mathsf P}[\Gamma_n\cap \{L=\eta\}]=\ellim_{n\thetao\infty}{\mathsf P}[\Gamma_n\cap \{L_n=\eta\}]=0$.
Let $\thetailde\Omega$ be the event that \eqref{eq:THMA1}, \eqref{eq:THMA2} and \eqref{eq:THMA4} is satisfied with non-random positive constant $K$.
From now on, we suppose that $K$ is non-random and that \eqref{eq:THMA1}, \eqref{eq:THMA2} and \eqref{eq:THMA4} are satisfied a.s. on $\Omega$.
A first consequence is that, $M$, $[M^i]-\ellangle M^i\ranglengle$ and $\|M\|^2-A$, with $A=\sum_i \ellangle M^i\ranglengle$, are uniformly integrable martingales. Indeed, using Lemma VII.3.34 in \cite{Jacod2003}, p. 423, there are constant $k_1$ and $k_2$ such that
$${\mathsf E}\elleft[{\sup}_{0\elle s\elle t}|M_s^i|^4\right]\elle k_1\elleft(\sup_{0\elle s\elle t,\omegamega\in\thetailde \Omega}|\Deltata M_t^i(\omegamega)|\right)^2\elleft({\mathsf E}\elleft[\ellangle M^i \ranglengle_t^2\right]\right)^{1/2}+k_2{\mathsf E}\elleft[\ellangle M^i \ranglengle_t^2\right].$$
Recall from (\ref{eq:THMA1}) and (\ref{eq:THMA2}) that $$\ellangle M^i\ranglengle_t=\int_0^t\Lambda_s^i{\rm d} s\elle K\int_0^t a(s){\rm d} s<K\int_0^{\infty} a(s){\rm d} s,\quad |\Deltata M^i_t|\elle K\alphalpha(t)\elle K\alphalpha(0)$$ for all $t\ge0$. It implies that ${\mathsf E}(\|M_t\|^4)$ is uniformly bounded and $M$ is thus uniformly integrable.
Without loss of generality, we also suppose that $p=0$ and $\eta=0$. Otherwise, one can replace $\mathcal{F}_t$, $M_t$, $r_t$ and $R_t$ by $\mathcal{F}_{t+p}$,
$M_{t+p}-M_p$, $r_{t+p}+\beta'(t)\elleft(\eta-\int_0^pr_s{\rm d} s-M_p\right)$ and $R_{t+p}+\beta'(t)\elleft(\eta-\int_0^pR_s{\rm d} s-M_p\right)$ respectively, where $\beta:[0,\infty)\thetao (0,\infty)$ is some differentiable function such that $\beta(0)=1$, $\ellim_{t\thetao\infty}\beta(t)=0$ and $\beta(t)=o(\alphalpha(t))$.
Set $G=\Gamma\cap \{L=0\}$. For $t\ge 0$, define $\rho_t=M_\infty-M_t,\ \thetaau_t=\int_t^\infty {\mathsf P}si_s {\rm d} s\ \thetaext{ and } T_t=\rho_t+\thetaau_t.$
Then $T_t=L-S_t$ and on $G$, $T_t=-S_t$.
Since for all $t>0$, $(\|M_{s}-M_t\|^2-(A_s-A_t),\ s\ge t)$ is a uniformly integrable martingale, we have that for all $t>0$, ${\mathsf E}[\|\rho_t\|^2|\mathcal{F}_t]={\mathsf E}\big[ A_\infty-A_t|\mathcal{F}_t\big]={\mathsf E}\big[ \int_t^{\infty}\Lambda(s){\rm d} s|\mathcal{F}_t\big]$ and therefore
$$K^{-1}\alphalpha^2(t)\elle {\mathsf E}[\|\rho_t\|^2|\mathcal{F}_t]\elle K\alphalpha^2(t).$$
Using Lemma VII.3.34 in \cite{Jacod2003} to the martingale $(M_s-M_t,\ s\ge t)$, we have
\begin{align*}{\mathsf E}\elleft[|M_s^i-M_t^i|^4|\mathcal{F}_t\right]& \elle k_1\elleft(\sup_{t\elle u\elle s,\omegamega\in\thetailde \Omega}|\Deltata M_u^i(\omegamega)|\right)^2\elleft({\mathsf E}\elleft[\elleft(\ellangle M^i \ranglengle_s-\ellangle M^i \ranglengle_t\right)^2|\mathcal{F}_t\right]\right)^{1/2}\\
&+k_2{\mathsf E}\elleft[\elleft(\ellangle M^i \ranglengle_s-\ellangle M^i \ranglengle_t\right)^2|\mathcal{F}_t\right]\\
&\elle k_1K^3\alphalpha^2(t)\int_t^s a(u){\rm d} u+k_2K^2\elleft(\int_t^s a(u){\rm d} u\right)^2.
\end{align*}
Hence, for all $t>0$, there is a constant $k$ such that ${\mathsf E}[\|\rho_t^4\|\mathcal{F}_t]\elle k\alphalpha^4(t).$
Set $c_0=K^{-\frac32} k^{-\frac12}$. Since
${\mathsf E}\big[\|\rho_t\|^2|\mathcal{F}_t\big]
\elle {\mathsf E}\big[\|\rho_t\||\mathcal{F}_t\big]^{\frac23}{\mathsf E}\big[\|\rho_t\|^4|\mathcal{F}_t]^\frac13,$
we have that for all $t$,
\begin{align*}
{\mathsf E}[\|\rho_t\||\mathcal{F}_t]& \ge c_0 \alphalpha(t).
\end{align*}
Let $U$ be a Borel function from $\mathbb{R}^d\setminus\{0\}$ onto the set of $d\thetaimes d$ orthogonal matrices such that $U(a)[a/\|a\|]=e_1$ (with $e_1=(1,0,\dots,0)$).
Then on $G$,
\begin{align*}
&\|T_t\| e_1+U(S_t) T_t = 0\\
&\big\|\|\rho_t\| e_1+ U(S_t)\rho_t\big\|\elle 2\|\thetaau_t\|.
\end{align*}
Set $G_t:=\{{\mathsf P}(G|\mathcal{F}_t)>\frac12\}$. Then for all $t>0$ (using in the second inequality that $S_t$ is $\mathcal{F}_t$-measurable and that ${\mathsf E}[\rho_t|\mathcal{F}_t]=0$)
\begin{align*}
{\mathsf P}(G_t)
&\elle \frac{1}{c_0\alphalpha(t)}\big\|{\mathsf E}\big[ 1_{G_t}{\mathsf E}[\|\rho_t\|e_1|\mathcal{F}_t]\big]\big\|\\
&\elle \frac{1}{c_0\alphalpha(t)}\big\|{\mathsf E}\big[ 1_{G_t}{\mathsf E}[\|\rho_t\|e_1 + U(S_t)\rho_t|\mathcal{F}_t]\big]\big\|\\
&\elle \frac{1}{c_0\alphalpha(t)}\big\|{\mathsf E}\big[ 1_{G}{\mathsf E}[\|\rho_t\|e_1 + U(S_t)\rho_t|\mathcal{F}_t]\big]\big\|\\
& + \frac{1}{c_0\alphalpha(t)}\big\|{\mathsf E}\big[ (1_{G_t}-1_{G}){\mathsf E}[\|\rho_t\|e_1 + U(S_t)\rho_t|\mathcal{F}_t]\big]\big\|\\
&\elle \frac{2}{c_0\alphalpha(t)}{\mathsf E}\big[ 1_{G}\|\thetaau_t\|\big] + \frac{2}{c_0\alphalpha(t)}\elleft({\mathsf E}\big[ (1_{G_t}-1_{G})^2\right)^{\frac12} \elleft({\mathsf E}[\|\rho_t\|^2]\right)^{\frac12}.
\end{align*}
Note that $$\ellim_{t\thetao\infty}{\mathsf E}\big[(1_{G_t}-1_{G})^2\big]=0 \quad \thetaext{ and } \quad{\mathsf E}[\|\rho_t\|^2]\elle c_+\alphalpha^2(t).$$
Thus, the second term converges to $0$. For the first term, (using Cauchy-Schwarz inequality to obtain the first term on the right hand side)
\begin{align*}
{\mathsf E}\big[ 1_{G}\|\thetaau_t\|\big]
\elle & \; {\mathsf E}\elleft[ 1_{G}\int_t^\infty \|r_s\| {\rm d} s\right] + {\mathsf E}\elleft[ 1_{G}\int_t^\infty \|R_s\| {\rm d} s\right]\\
\elle & \; \alphalpha(t) {\mathsf E}\elleft[ 1_{G}\elleft(\int_t^\infty \frac{\|r_s\|^2}{a(s)} {\rm d} s\right)^{\frac12}\right] + o(\alphalpha(t)) = o(\alphalpha(t))
\end{align*}
using Cauchy-Schwarz inequality, Lebesgue's Dominated Convergence Theorem and the hypotheses. We thus obtain that ${\mathsf P}(G)=\ellim_{t\thetao\infty}{\mathsf P}(G_t)=0$.
\end{proof}
\end{document} |
\begin{document}
\newcommand {\emptycomment}[1]{}
\baselineskip=14pt
\newcommand{\newcommand}{\newcommand}
\newcommand{\delete}[1]{}
\newcommand{\mfootnote}[1]{\footnote{#1}}
\newcommand{\todo}[1]{\tred{To do:} #1}
\delete{
\newcommand{\mlabel}[1]{\label{#1}}
\newcommand{\mcite}[1]{\cite{#1}}
\newcommand{\mref}[1]{\ref{#1}}
\newcommand{\meqref}[1]{\ref{#1}}
\newcommand{\mbibitem}[1]{\bibitem{#1}}
}
\newcommand{\mlabel}[1]{\label{#1}
{\mathfrak hfill \mathfrak hspace{1cm}{\bf{{\ }\mathfrak hfill(#1)}}}}
\newcommand{\mcite}[1]{\cite{#1}{{\bf{{\ }(#1)}}}}
\newcommand{\mref}[1]{\ref{#1}{{\bf{{\ }(#1)}}}}
\newcommand{\meqref}[1]{\eqref{#1}{{\bf{{\ }(#1)}}}}
\newcommand{\mbibitem}[1]{\bibitem[\bf #1]{#1}}
\newtheorem{thm}{Theorem}[section]
\newtheorem{lem}[thm]{Lemma}
\newtheorem{cor}[thm]{Corollary}
\newtheorem{pro}[thm]{Proposition}
\theoremstyle{definition}
\newtheorem{defi}[thm]{Definition}
\newtheorem{ex}[thm]{Example}
\newtheorem{rmk}[thm]{Remark}
\newtheorem{pdef}[thm]{Proposition-Definition}
\newtheorem{condition}[thm]{Condition}
\renewcommand{{\rm(\alph{enumi})}}{{\rm(\alph{enumi})}}
\renewcommand{\alph{enumi}}{\alph{enumi}}
\newcommand{\tred}[1]{\textcolor{red}{#1}}
\newcommand{\tblue}[1]{\textcolor{blue}{#1}}
\newcommand{\tgreen}[1]{\textcolor{green}{#1}}
\newcommand{\tpurple}[1]{\textcolor{purple}{#1}}
\newcommand{\btred}[1]{\textcolor{red}{\bf #1}}
\newcommand{\btblue}[1]{\textcolor{blue}{\bf #1}}
\newcommand{\btgreen}[1]{\textcolor{green}{\bf #1}}
\newcommand{\btpurple}[1]{\textcolor{purple}{\bf #1}}
\newcommand{\ld}[1]{\textcolor{blue}{Landry:#1}}
\newcommand{\cm}[1]{\textcolor{red}{Chengming:#1}}
\newcommand{\li}[1]{\textcolor{yellow}{#1}}
\newcommand{\lir}[1]{\textcolor{blue}{Li:#1}}
\newcommand{\twovec}[2]{\left(\begin{array}{c} #1 \\ #2\end{array} \right )}
\newcommand{\threevec}[3]{\left(\begin{array}{c} #1 \\ #2 \\ #3 \end{array}\right )}
\newcommand{\twomatrix}[4]{\left(\begin{array}{cc} #1 & #2\\ #3 & #4 \end{array} \right)}
\newcommand{\threematrix}[9]{{\left(\begin{matrix} #1 & #2 & #3\\ #4 & #5 & #6 \\ #7 & #8 & #9 \end{matrix} \right)}}
\newcommand{\twodet}[4]{\left|\begin{array}{cc} #1 & #2\\ #3 & #4 \end{array} \right|}
\newcommand{\rk}{\mathrm{r}}
\newcommand{\mathfrak g}{\mathfrak g}
\newcommand{\mathfrak h}{\mathfrak h}
\newcommand{\noindent{$Proof$.}\ }{\noindent{$Proof$.}\ }
\newcommand{\mathfrak g}{\mathfrak g}
\newcommand{\mathfrak h}{\mathfrak h}
\newcommand{\rm{Id}}{\rm{Id}}
\newcommand{\mathfrak gl}{\mathfrak {gl}}
\newcommand{\mathrm{ad}}{\mathrm{ad}}
\newcommand{\mathrm{ad}d}{\mathfrak a\mathfrak d}
\newcommand{\mathfrak a}{\mathfrak a}
\newcommand{\mathfrak b}{\mathfrak b}
\newcommand{\mathfrak c}{\mathfrak c}
\newcommand{\mathfrak d}{\mathfrak d}
\newcommand {\comment}[1]{{\marginpar{*}\scriptsize\textbf{Comments:} #1}}
\newcommand{\tforall}{\text{ for all }}
\newcommand{\svec}[2]{{\tiny\left(\begin{matrix}#1\\
#2\end{matrix}\right)\,}}
\newcommand{\ssvec}[2]{{\tiny\left(\begin{matrix}#1\\
#2\end{matrix}\right)\,}}
\newcommand{\typeI}{local cocycle $3$-Lie bialgebra\xspace}
\newcommand{\typeIs}{local cocycle $3$-Lie bialgebras\xspace}
\newcommand{\typeII}{double construction $3$-Lie bialgebra\xspace}
\newcommand{\typeIIs}{double construction $3$-Lie bialgebras\xspace}
\newcommand{\bia}{{$\mathcal{P}$-bimodule ${\bf k}$-algebra}\xspace}
\newcommand{\bias}{{$\mathcal{P}$-bimodule ${\bf k}$-algebras}\xspace}
\newcommand{\rmi}{{\mathrm{I}}}
\newcommand{\rmii}{{\mathrm{II}}}
\newcommand{\rmiii}{{\mathrm{III}}}
\newcommand{\pr}{{\mathrm{pr}}}
\newcommand{\mathfrak huaA}{\mathcal{A}}
\newcommand{\OT}{constant $\theta$-}
\newcommand{\T}{$\theta$-}
\newcommand{\IT}{inverse $\theta$-}
\newcommand{\pll}{\beta}
\newcommand{\plc}{\epsilon}
\newcommand{\ass}{{\mathit{Ass}}}
\newcommand{\lie}{{\mathit{Lie}}}
\newcommand{\comm}{{\mathit{Comm}}}
\newcommand{\dend}{{\mathit{Dend}}}
\newcommand{\zinb}{{\mathit{Zinb}}}
\newcommand{\tdend}{{\mathit{TDend}}}
\newcommand{\prelie}{{\mathit{preLie}}}
\newcommand{\postlie}{{\mathit{PostLie}}}
\newcommand{\quado}{{\mathit{Quad}}}
\newcommand{\octo}{{\mathit{Octo}}}
\newcommand{\ldend}{{\mathit{ldend}}}
\newcommand{\lquad}{{\mathit{LQuad}}}
\newcommand{\mathrm{ad}ec}{\check{;}} \newcommand{\aop}{\alpha}
\newcommand{\dftimes}{\widetilde{\otimes}} \newcommand{\dfl}{\succ} \newcommand{\dfr}{\prec}
\newcommand{\dfc}{\circ} \newcommand{\dfb}{\bullet} \newcommand{\dft}{\star}
\newcommand{\dfcf}{{\mathbf k}} \newcommand{\apr}{\ast} \newcommand{\spr}{\cdot}
\newcommand{\twopr}{\circ} \newcommand{\tspr}{\star} \newcommand{\sempr}{\ast}
\newcommand{\disp}[1]{\displaystyle{#1}}
\newcommand{\bin}[2]{ (_{\stackrel{\scs{#1}}{\scs{#2}}})}
\newcommand{\binc}[2]{ \left (\!\! \begin{array}{c} \scs{#1}\\
\scs{#2} \end{array}\!\! \right )}
\newcommand{\bincc}[2]{ \left ( {\scs{#1} \atop
\scs{#2}} \right )}
\newcommand{\sarray}[2]{\begin{array}{c}#1
\\ \mathfrak hline
\\ #2 \end{array}}
\newcommand{\bs}{\bar{S}} \newcommand{\dcup}{\stackrel{\bullet}{\cup}}
\newcommand{\dbigcup}{\stackrel{\bullet}{\bigcup}} \newcommand{\etree}{\big |}
\newcommand{\la}{\longrightarrow} \newcommand{\fe}{\'{e}} \newcommand{\rar}{\rightarrow}
\newcommand{\dar}{\downarrow} \newcommand{\dap}[1]{\downarrow
\rlap{$\scriptstyle{#1}$}} \newcommand{\uap}[1]{\uparrow
\rlap{$\scriptstyle{#1}$}} \newcommand{\defeq}{\stackrel{\rm def}{=}}
\newcommand{\dis}[1]{\displaystyle{#1}} \newcommand{\dotcup}{\,
\displaystyle{\bigcup^\bullet}\ } \newcommand{\sdotcup}{\tiny{
\displaystyle{\bigcup^\bullet}\ }} \newcommand{\mathfrak hcm}{\ \mathfrak hat{,}\ }
\newcommand{\mathfrak hcirc}{\mathfrak hat{\circ}} \newcommand{\mathfrak hts}{\mathfrak hat{\shpr}}
\newcommand{\lts}{\stackrel{\leftarrow}{\shpr}}
\newcommand{\rts}{\stackrel{\rightarrow}{\shpr}} \newcommand{\lleft}{[}
\newcommand{\lright}{]} \newcommand{\uni}[1]{\tilde{#1}} \newcommand{\wor}[1]{\check{#1}}
\newcommand{\free}[1]{\bar{#1}} \newcommand{\den}[1]{\check{#1}} \newcommand{\lrpa}{\wr}
\newcommand{\curlyl}{\left \{ \begin{array}{c} {} \\ {} \end{array}
\right . \!\!\!\!\!\!\!}
\newcommand{\curlyr}{ \!\!\!\!\!\!\!
\left . \begin{array}{c} {} \\ {} \end{array}
\right \} }
\newcommand{\leaf}{\ell}
\newcommand{\longmid}{\left | \begin{array}{c} {} \\ {} \end{array}
\right . \!\!\!\!\!\!\!}
\newcommand{\ot}{\otimes} \newcommand{\sot}{{\scriptstyle{\ot}}}
\newcommand{\otm}{\overline{\ot}}
\newcommand{\ora}[1]{\stackrel{#1}{\rar}}
\newcommand{\ola}[1]{\stackrel{#1}{\la}}
\newcommand{\pltree}{\calt^\pl}
\newcommand{\epltree}{\calt^{\pl,\NC}}
\newcommand{\rbpltree}{\calt^r}
\newcommand{\scs}[1]{\scriptstyle{#1}} \newcommand{\mrm}[1]{{\rm #1}}
\newcommand{\dirlim}{\displaystyle{\lim_{\longrightarrow}}\,}
\newcommand{\invlim}{\displaystyle{\lim_{\longleftarrow}}\,}
\newcommand{\mvp}{
} \newcommand{\svp}{
}
\newcommand{\vp}{
} \newcommand{\proofbegin}{\noindent{\bf Proof: }}
\newcommand{\proofend}{$\blacksquare$
}
\newcommand{\freerbpl}{{F^{\mathrm RBPL}}}
\newcommand{\sha}{{\mbox{\cyr X}}}
\newcommand{\newcommandsha}{{\mbox{\cyr X}^{\mathrm NC}}} \newcommand{\newcommandshao}{{\mbox{\cyr
X}^{\mathrm NC,\,0}}}
\newcommand{\shpr}{\diamond}
\newcommand{\shprm}{\overline{\diamond}}
\newcommand{\shpro}{\diamond^0}
\newcommand{\shprr}{\diamond^r}
\newcommand{\shpra}{\overline{\diamond}^r}
\newcommand{\shpru}{\check{\diamond}} \newcommand{\catpr}{\diamond_l}
\newcommand{\rcatpr}{\diamond_r} \newcommand{\lapr}{\diamond_a}
\newcommand{\sqcupm}{\ot}
\newcommand{\lepr}{\diamond_e} \newcommand{\vep}{\varepsilon} \newcommand{\labs}{\mid\!}
\newcommand{\rabs}{\!\mid} \newcommand{\mathfrak hsha}{\widehat{\sha}}
\newcommand{\lsha}{\stackrel{\leftarrow}{\sha}}
\newcommand{\rsha}{\stackrel{\rightarrow}{\sha}} \newcommand{\lc}{\lfloor}
\newcommand{\rc}{\rfloor}
\newcommand{\tpr}{\sqcup}
\newcommand{\newcommandtpr}{\vee}
\newcommand{\plpr}{\star}
\newcommand{\rbplpr}{\bar{\plpr}}
\newcommand{\sqmon}[1]{\langle #1\rangle}
\newcommand{\forest}{\calf}
\newcommand{\altx}{\Lambda_X} \newcommand{\vecT}{\vec{T}} \newcommand{\onetree}{\bullet}
\newcommand{\rm Ao}{\check{A}}
\newcommand{\seta}{\underline{\rm Ao}}
\newcommand{\deltaa}{\overline{\delta}}
\newcommand{\trho}{\tilde{\rho}}
\newcommand{\rpr}{\circ}
\newcommand{\dpr}{{\tiny\diamond}}
\newcommand{\rprpm}{{\rpr}}
\newcommand{\mmbox}[1]{\mbox{\ #1\ }} \newcommand{\ann}{\mrm{ann}}
\newcommand{\rm Aut}{\mrm{Aut}} \newcommand{\can}{\mrm{can}}
\newcommand{\twoalg}{{two-sided algebra}\xspace}
\newcommand{\colim}{\mrm{colim}}
\newcommand{\Cont}{\mrm{Cont}} \newcommand{\rchar}{\mrm{char}}
\newcommand{\cok}{\mrm{coker}} \newcommand{\dtf}{{R-{\rm tf}}} \newcommand{\dtor}{{R-{\rm
tor}}}
\renewcommand{\mrm{det}}{\mrm{det}}
\newcommand{\depth}{{\mrm d}}
\newcommand{\Div}{{\mrm Div}} \newcommand{\End}{\mrm{End}} \newcommand{\Ext}{\mrm{Ext}}
\newcommand{\Fil}{\mrm{Fil}} \newcommand{\Frob}{\mrm{Frob}} \newcommand{\Gal}{\mrm{Gal}}
\newcommand{\GL}{\mrm{GL}} \newcommand{\Hom}{\mrm{Hom}} \newcommand{\mathfrak hsr}{\mrm{H}}
\newcommand{\mathfrak hpol}{\mrm{HP}} \newcommand{\id}{\mrm{id}} \newcommand{\im}{\mrm{im}}
\newcommand{\incl}{\mrm{incl}} \newcommand{\length}{\mrm{length}}
\newcommand{\LR}{\mrm{LR}} \newcommand{\mchar}{\rm char} \newcommand{\NC}{\mrm{NC}}
\newcommand{\mpart}{\mrm{part}} \newcommand{\pl}{\mrm{PL}}
\newcommand{\ql}{{\QQ_\ell}} \newcommand{\qp}{{\QQ_p}}
\newcommand{\rank}{\mrm{rank}} \newcommand{\rba}{\rm{RBA }} \newcommand{\rbas}{\rm{RBAs }}
\newcommand{\rbpl}{\mrm{RBPL}}
\newcommand{\rbw}{\rm{RBW }} \newcommand{\rbws}{\rm{RBWs }} \newcommand{\rcot}{\mrm{cot}}
\newcommand{\rest}{\rm{controlled}\xspace}
\newcommand{\rdef}{\mrm{def}} \newcommand{\rdiv}{{\rm div}} \newcommand{\rtf}{{\rm tf}}
\newcommand{\rtor}{{\rm tor}} \newcommand{\res}{\mrm{res}} \newcommand{\SL}{\mrm{SL}}
\newcommand{\Spec}{\mrm{Spec}} \newcommand{\tor}{\mrm{tor}} \newcommand{\Tr}{\mrm{Tr}}
\newcommand{\mtr}{\mrm{sk}}
\newcommand{\ab}{\mathbf{Ab}} \newcommand{\rm Alg}{\mathbf{Alg}}
\newcommand{\rm Algo}{\mathbf{Alg}^0} \newcommand{\Bax}{\mathbf{Bax}}
\newcommand{\Baxo}{\mathbf{Bax}^0} \newcommand{\RB}{\mathbf{RB}}
\newcommand{\RBo}{\mathbf{RB}^0} \newcommand{\BRB}{\mathbf{RB}}
\newcommand{\Dend}{\mathbf{DD}} \newcommand{\bfk}{{\bf k}} \newcommand{\bfone}{{\bf 1}}
\newcommand{\base}[1]{{a_{#1}}} \newcommand{\mrm{det}ail}{\marginpar{\bf More detail}
\noindent{\bf Need more detail!}
\svp}
\newcommand{\Diff}{\mathbf{Diff}} \newcommand{\mathfrak gap}{\marginpar{\bf
Incomplete}\noindent{\bf Incomplete!!}
\svp}
\newcommand{\FMod}{\mathbf{FMod}} \newcommand{\mset}{\mathbf{MSet}}
\newcommand{\rb}{\mathrm{RB}} \newcommand{\Int}{\mathbf{Int}}
\newcommand{\Mon}{\mathbf{Mon}}
\newcommand{\remarks}{\noindent{\bf Remarks: }}
\newcommand{\OS}{\mathbf{OS}}
\newcommand{\Rep}{\mathbf{Rep}}
\newcommand{\Rings}{\mathbf{Rings}} \newcommand{\Sets}{\mathbf{Sets}}
\newcommand{\DT}{\mathbf{DT}}
\newcommand{\BA}{{\mathbb A}} \newcommand{\CC}{{\mathbb C}} \newcommand{\DD}{{\mathbb D}}
\newcommand{\EE}{{\mathbb E}} \newcommand{\FF}{{\mathbb F}} \newcommand{\GG}{{\mathbb G}}
\newcommand{\HH}{{\mathbb H}} \newcommand{\LL}{{\mathbb L}} \newcommand{\NN}{{\mathbb N}}
\newcommand{\QQ}{{\mathbb Q}} \newcommand{\RR}{{\mathbb R}} \newcommand{\BS}{{\mathbb{S}}} \newcommand{\TT}{{\mathbb T}}
\newcommand{\VV}{{\mathbb V}} \newcommand{\ZZ}{{\mathbb Z}}
\newcommand{\calao}{{\mathcal A}} \newcommand{\cala}{{\mathcal A}}
\newcommand{\calc}{{\mathcal C}} \newcommand{\cald}{{\mathcal D}}
\newcommand{\cale}{{\mathcal E}} \newcommand{\calf}{{\mathcal F}}
\newcommand{\calfr}{{{\mathcal F}^{\,r}}} \newcommand{\calfo}{{\mathcal F}^0}
\newcommand{\calfro}{{\mathcal F}^{\,r,0}} \newcommand{\oF}{\overline{F}}
\newcommand{\calg}{{\mathcal G}} \newcommand{\calh}{{\mathcal H}}
\newcommand{\cali}{{\mathcal I}} \newcommand{\calj}{{\mathcal J}}
\newcommand{\call}{{\mathcal L}} \newcommand{\calm}{{\mathcal M}}
\newcommand{\caln}{{\mathcal N}} \newcommand{\calo}{{\mathcal O}}
\newcommand{\calp}{{\mathcal P}} \newcommand{\calq}{{\mathcal Q}} \newcommand{\calr}{{\mathcal R}}
\newcommand{\calt}{{\mathcal T}} \newcommand{\caltr}{{\mathcal T}^{\,r}}
\newcommand{\calu}{{\mathcal U}} \newcommand{\calv}{{\mathcal V}}
\newcommand{\calw}{{\mathcal W}} \newcommand{\calx}{{\mathcal X}}
\newcommand{\CA}{\mathcal{A}}
\newcommand{\fraka}{{\mathfrak a}} \newcommand{\frakB}{{\mathfrak B}}
\newcommand{\frakb}{{\mathfrak b}} \newcommand{\frakd}{{\mathfrak d}}
\newcommand{\oD}{\overline{D}}
\newcommand{\frakF}{{\mathfrak F}} \newcommand{\frakg}{{\mathfrak g}}
\newcommand{\frakm}{{\mathfrak m}} \newcommand{\frakM}{{\mathfrak M}}
\newcommand{\frakMo}{{\mathfrak M}^0} \newcommand{\frakp}{{\mathfrak p}}
\newcommand{\frakS}{{\mathfrak S}} \newcommand{\frakSo}{{\mathfrak S}^0}
\newcommand{\fraks}{{\mathfrak s}} \newcommand{\os}{\overline{\fraks}}
\newcommand{\frakT}{{\mathfrak T}}
\newcommand{\oT}{\overline{T}}
\newcommand{\frakX}{{\mathfrak X}} \newcommand{\frakXo}{{\mathfrak X}^0}
\newcommand{\frakx}{{\mathbf x}}
\newcommand{\frakTx}{\frakT}
\newcommand{\frakTa}{\frakT^a}
\newcommand{\frakTxo}{\frakTx^0}
\newcommand{\caltao}{\calt^{a,0}}
\newcommand{\ox}{\overline{\frakx}} \newcommand{\fraky}{{\mathfrak y}}
\newcommand{\frakz}{{\mathfrak z}} \newcommand{\oX}{\overline{X}}
\font\cyr=wncyr10
\newcommand{\al}{\alpha}
\newcommand{\lam}{\lambda}
\newcommand{\lr}{\longrightarrow}
\newcommand{\mathbb {K}}{\mathbb {K}}
\newcommand{\rm A}{\rm A}
\title[Pre-anti-flexible bialgebrask]{Pre-anti-flexible bialgebras}
\author[Mafoya Landry Dassoundo]{Mafoya Landry Dassoundo}
\mathrm{ad}dress[]{Chern Institute of Mathematics \& LPMC,
Nankai University, Tianjin 300071, China} \email{[email protected]}
\begin{abstract}
In this paper, we derive
pre-anti-flexible algebras structures in term of
zero weight's Rota-Baxter operators defined on anti-flexible algebras,
view pre-anti-flexible algebras as a splitting
of anti-flexible algebras, introduce the notion of
pre-anti-flexible bialgebras and establish equivalences among
matched pair of anti-flexible algebras, matched pair of pre-anti-flexible algebras
and pre-anti-flexible bialgebras.
Investigation on special class of pre-anti-flexible bialgebras
leads to the establishment of
the pre-anti-flexible Yang-Baxter equation.
Both dual bimodules of pre-anti-flexible algebras and
dendriform algebras have the same shape and this induces
that both pre-anti-flexible Yang-Baxter equation and
$\mathcal{D}$-equation are identical.
Symmetric solution of pre-anti-flexible
Yang-Baxter equation gives a pre-anti-flexible bialgebra.
Finally, we recall and link $\mathcal{O}$-operators of
anti-flexible algebras to bimodules of pre-anti-flexible algebras and built symmetric
solutions of anti-flexible Yang-Baxter equation.
\end{abstract}
\subjclass[2010]{17A20, 17D25, 16T10, 16T15, 17B38, 16T25}
\keywords{(pre-)anti-flexible algebra, dendriform algebras,
(pre-)anti-flexible bialgebra, Yang-Baxter equation, Rota-Baxter operator}
\maketitle
\tableofcontents
\numberwithin{equation}{section}
\tableofcontents
\numberwithin{equation}{section}
\allowdisplaybreaks
\section{Introduction and Preliminaries}
The notion of pre-anti-flexible algebras are introduced in \cite{DBH3} to derive
the $\mathcal{O}$-operators of anti-flexible algebras which allow to built the
skew-symmetric solutions of anti-flexible Yang-Baxter equation.
Pre-anti-flexible algebras are closed dendriform algebras
which are introduced by J.-L. Loday (\cite{Loday}).
Besides, pre-anti-flexible algebras can be considered as a
generalization of dendriform algebras and
as very well known and widespread in the literature, dendriform algebras
are also induced by the well known notion of Rota-Baxter operators of weight zero
(\cite{Aguiar1}) which are introduced around 1960’s by
G. Baxter (\cite{Baxter}) and G.-C. Rota (\cite{Rota}). Recently,
significant advances contributions on Rota-Baxter operators and related
applications are summarized in \cite{Guo} and the references therein.
Since dendriform algebras are closed to associative algebras,
pre-anti-flexible algebras are strongly linked to anti-flexible algebras
(also known as center-symmetric algebras)
and themselves associated with Lie algebras (\cite{Hounkonnou_D_CSA}) and other
similar deduction are derived and readable
in the following diagram which summarizes underlying relations
among pre-anti-flexible algebras (PAFA),
anti-flexible algebras (AFA),
Lie algebras (LA), associative algebras (AA), dendriform algebras (DA),
and finally with what we call
derived Lie-admissible algebra of a given pre-anti-flexible algebra (DLAd-PAFA)
\begin{eqnarray*}
\xymatrix{
&{\bf PAFA\;} (A, \prec, \succ) \ar[dl]_-{\mbox{C2}}\ar[d]^-{\mbox{C6}}\ar[rr]^-{\mbox{C1}}
&&{\bf AFA\;}(A, \cdot) \ar[d]^-{\mbox{C5}}\\
{\bf DA\;} (A, \prec_{_1}, \succ_{_1})\ar[dr]_-{\mbox{C3}}
& {\bf{DLAd-PAFA}\;}(A, \circ )\ar[rr]^-{\mbox{C7}}&&{\bf LA\;} (A, [,]), \\
&{\bf AA\;}(A, \cdot_{_1} )\ar[rru]_-{\mbox{C4}}
}
\end{eqnarray*}
where, for any $x,y, z\in A$, the condition C1 translates
$x\cdot y=x\prec y+x\succ y$,
C2 means $\prec_{_1}:=\prec; \succ_{_1}:=\succ$ and
$(x,y,z)_m=0, (x,y,z)_l=0, (x,y,z)_r=0$ which are given by
Eqs.~\eqref{eq:biasso} (trivial
pre-anti-flexible algebras are dendriform algebras), C3 describes
$x\cdot_{_1} y= x\prec_{_1} y+x\succ_{_1}y$, C4 expresses the commutator
$[x,y]=x\cdot_{_1}y-y\cdot_{_1} x$, C5 translates
$[x,y]=x\cdot y-y\cdot x$, C6 describes $x\circ y=x\succ y-y\prec x$ and finally
C7 expresses $[x,y]=x\circ y-y\circ x$. It is also useful to recall that any
associative algebra is a trivial anti-flexible algebra.
Thus, anti-flexible algebras generalize associative algebras.
Notice here that, although the goal of this paper
is not to construct a cohomology theory for
anti-flexible algebras, cohomology of associative and Lie algebras,
and other algebras are well known. Unfortunately, despite their links to associative algebras and to Lie algebras described above,
anti-flexible algebras and pre-anti-flexible algebras lack a suitable cohomology theory
which can justify certain shortcomings
on anti-flexible algebras such as for instance among many other,
coboundary anti-flexible algebras and those of pre-anti-flexible algebras
which are well known on associative and Lie algebras.
That said, all is not lost for avoid talking about some notions on the cohomology
of anti-flexible and pre-anti-flexible algebras.
As proof, the analogue to the classical Yang-Baxter
equation on Lie algebras derived
by Drinfeld (\cite{Drinfeld}), that of associative Yang-Baxter
on associative algebras (\cite{Aguiar, Bai_Double})
as well as $\mathcal{D}$-equation of dendriform algebras (\cite{Bai_Double}),
anti-flexible Yang-Baxter equation recovered in special consideration of
anti-flexible algebras (\cite{DBH3}) as well as
pre-anti-flexible Yang-Baxter equation on special class of
pre-anti-flexible algebras.
Furthermore, alternative $\mathcal{D}$-bialgebras are also
provided and described on Cayley-Dickson matrix algebras (\cite{Gon}).
Besides, by keeping the spirit that dendriform algebras are viewed as
splitted associative algebras (\cite{Bai_spit}),
pre-anti-flexible algebras are regarded as splitted anti-flexible algebras and
more generally, operadic definition for the notion of
splitting algebra structures are introduced
and provided some equivalence with Manin products of operads in
quadratic operads (\cite{Pei_Bai_Guo}).
Before straight although to the goal of this paper, recall some
fundamentals which will be necessary throughout our
concern on pre-anti-flexible bialgebras.
In this paper, all considered vector spaces are finite-dimensional
over a base field $\mathbb{F}$ whose characteristic is $0$.
Many derived results still hold regardless the dimension of vector spaces
on which they are stated. For this purpose, we mean by anti-flexible algebra (\cite{Hounkonnou_D_CSA}),
a couple $(A, \ast)$ where $A$ is a vector space equipped with a linear product
"$\ast$" such that for any $x,y, z\in A$, $(x,y,z)=(z,y,x)$, where the triple is defined as
$(x,y,z):=(x\ast y)\ast z-x\ast (y\ast z)$.
If in addition $A$ is equipped with two linear maps $l,r:A\rightarrow \End(V)$, where $V$
is a vector space, such that for any $x,y\in A$
\begin{subequations}
\begin{eqnarray}\label{eqbimodule1}
l{(x\ast y)}-l(x)l(y)=r(x)r(y)-r({y \ast x}),
\end{eqnarray}
\begin{eqnarray}\label{eqbimodule2}
\left[l(x), r(y)\right]= \left[l(y), r(x)\right],
\end{eqnarray}
\end{subequations}
then the triple $(l,r,V)$ is called bimodule of $(A, \ast)$.
\begin{thm}\label{thm_2} \cite{Hounkonnou_D_CSA}
Let $( A, \ast)$ and $(B, \circ)$ be two anti-flexible algebras.
Suppose that $(l_{ A}, r_{ A}, B)$ and $(l_{B}, r_{B}, A)$
are bimodules of $(A, \ast)$ and $(B, \circ)$, respectively, where
$l_{ A}, r_{ A}:A\rightarrow \End(B)$ and
$l_{B}, r_{B}:B\rightarrow \End(A)$ are four linear maps and
obeying the relations,
for any $x, y \in A$ and for any $a, b \in B,$
\begin{subequations}
\begin{eqnarray}\label{eqq1}
l_{B}(a)(x\ast y) +r_{B}(a)(y\ast x)-r_{B}(l_{ A}(x)a)y-
y\ast(r_{B}(a)x) -l_{B}(r_{ A}(x)a)y - (l_{B}(a)x)\ast y = 0,
\end{eqnarray}
\begin{eqnarray}\label{eqq2}
l_{ A}(x)(a\circ b) +r_{ A}(x)(b\circ a)-r_{ A}(l_{B}(a)x)b-
b\circ (r_{ A}(x)a)+l_{ A}(r_{B}(a)x)b - (l_{ A}(x)a)\circ b=0,
\end{eqnarray}
\begin{eqnarray}\label{eqq3}
\begin{array}{lll}
y\ast (l_{B}(a)x)+(r_{B}(a)x)\ast y - (r_{B}(a)y)\ast x-l_{B}(l_{ A}(y)a)x+ \cr
r_{B}(r_{ A}(x)a)y+l_{B}(l_{ A}(x)a)y -x\ast (l_{B}(a)y)-r_{B}(r_{ A}(y)a)x=0,
\end{array}
\end{eqnarray}
\begin{eqnarray}\label{eqq4}
\begin{array}{lll}
b \circ (l_{ A}(x)a)+(r_{ A}(x)a)\circ b -(r_{ A}(x)b)\circ a-
l_{ A}(l_{B}(b)x)a+\cr
r_{ A}(r_{B}(a)x)b+l_{ A}(l_{B}(a)x)b -
a\circ (l_{ A}(x)b) -r_{ A}(r_{B}(b)x)a=0.
\end{array}
\end{eqnarray}
\end{subequations}
Then, there is an anti-flexible algebra structure on $ A \oplus B$
given by for any $x,y\in A$ and any $a, b\in B$
\begin{eqnarray*}
(x+a)\star (y+b)= (x \ast y + l_{B}(a)y+r_{B}(b)x)+ (a \circ b + l_{A}(x)b+r_{A}(y)a).
\end{eqnarray*}
\end{thm}
\begin{defi}\cite{DBH3}
A pre-anti-flexible algebra is a vector space $ A$ equipped with two bilinear products
$\prec, \succ: A\otimes A \rightarrow A$
satisfying the following relations
\begin{subequations}
\begin{eqnarray}\label{eq_pre_antiflexible_1}
(x,y,z)_{_m}=(z,y,x)_{_m}, \; \; \; \forall x,y,z\in A,
\end{eqnarray}
\begin{eqnarray}\label{eq_pre_antiflexible_2}
(x,y,z)_{_l}=(z,y,x)_{_r}, \; \; \; \forall x,y,z\in A,
\end{eqnarray}
\end{subequations}
where for any $x,y, z\in A$,
\begin{subequations}\label{eq:biasso}
\begin{eqnarray}\label{eq_biasso_m}
(x,y,z)_{_m}:=(x \succ y) \prec z-x \succ (y \prec z),
\end{eqnarray}
\begin{eqnarray}\label{eq_biasso_l}
(x,y,z)_{_l}:=(x\cdot y)\succ z-x\succ (y\succ z),
\end{eqnarray}
\begin{eqnarray}\label{eq_biasso_r}
(x,y,z)_{_r}:=(x\prec y)\prec z-x\prec (y\cdot z),
\end{eqnarray}
\end{subequations}
with $x\cdot y=x\prec y+x\succ y$.
Equivalently, a pre-anti-flexible algebra is a triple
$(A, \prec, \succ)$ such that $A$ is a vector space and
$\prec, \succ: A\times A \rightarrow A$ are
two linear maps satisfying the relations for any
$x,y,z\in A$
\begin{subequations}
\begin{eqnarray}\label{eq:pre-antiflexible1}
(x\succ y) \prec z-x\succ (y\prec z)=(z\succ y)\prec x-
z\succ(y\prec x),
\end{eqnarray}
\begin{eqnarray}\label{eq:pre-antiflexible2}
(x\prec y +x\succ y )\succ z-x\succ (y\succ z)=
(z\prec y)\prec x-z\prec (y\prec x+y\succ x).
\end{eqnarray}
\end{subequations}
\end{defi}
\begin{ex}
For a given associative $(A, \ast)$, setting for any $x,y\in A$,
$x\succ y=x\ast y$ (or $x\succ y=y\ast x$) and
$x\prec y=0$, then $(A, \prec, \succ)$ is
a pre-anti-flexible algebra. Similarly, $(A, \prec, \succ)$ is a
pre-anti-flexible algebra by setting for any $x,y\in A$,
$x\prec y=x\ast y$ (or $x\prec y=y\ast x$) and $x\succ y=0$.
\end{ex}
\begin{rmk}\label{rmk_1}
Let $( A, \prec, \succ)$ be a pre-anti-flexible algebra.
\begin{enumerate}
\item\label{rmk_flex}
It is well known that the couple $( A, \cdot)$ is an
anti-flexible algebra (\cite{DBH3}), where
for any $x,y\in A$, $x\cdot y=x\prec y+x\succ y$, i.e.
$(x,y,z)=(z,y,x)$,
and we will denote this anti-flexible algebra by $aF(A)$
call it underlying anti-flexible algebra of the pre-anti-flexible algebra
$(A, \prec, \succ)$.
\item
As we can see, if both sides of equality in the
Eqs.~\eqref{eq:pre-antiflexible1} and \eqref{eq:pre-antiflexible2} are zero
i.e. for any $ x,y,z\in A, (x,y,z)_{_m}=0$, $(x,y,z)_{_l}=0$
and $(x,y,z)_{_r}=0$, then $(A, \prec, \succ)$
is a dendriform algebra i.e. the couple $(A, \prec, \succ)$ such that
for any $x,y,z\in A,$
\begin{eqnarray*}
&&(x\succ y) \prec z-x\succ (y\prec z)=0,\cr
&&(x\prec y +x\succ y )\succ z-x\succ (y\succ z)=0,\cr
&&(x\prec y)\prec z-x\prec (y\prec z+y\succ z)=0,
\end{eqnarray*}
introduced by J.-L.~Loday (\cite{Loday}).
Clearly, dendriform algebra is a pre-anti-flexible algebra
and then pre-anti-flexible algebras can viewed as a generalization
of dendriform algebras.
\end{enumerate}
\end{rmk}
Throughout this paper, if there is no other consideration, for any
$x,y\in A$, $x\ast y$ or $x\cdot y$ will simply written by $xy$.
Furthermore, underlying anti-flexible algebra structure
of a given pre-anti-flexible algebra will generally denote by "$\cdot$" as
in Remark~\ref{rmk_1}~\eqref{rmk_flex}.
Moreover, for a given pre-anti-flexible algebra
$(A, \prec, \succ)$, we denote by
$L_{\prec}, R_{\prec}: A\rightarrow \End(A)$ the left
and right multiplication operators, respectively, on $(A, \prec)$
and similarly by $L_{\succ}, R_{\succ}: A\rightarrow \End(A)$ these on
$(A, \succ)$ which are defined as
$\forall x,y \in A,$
\begin{equation*}
L_{_\prec}(x)y=x\prec y,\;\; R_{_\prec}(x)y=y\prec x,\;\;
L_{_\succ}(x)y=x\succ y,\;\; R_{_\succ}(x)y=y\succ x.
\end{equation*}
\begin{thm}\cite{DBH3}\label{Theo_existance_pre_anti_flexible}
Let $(A, \ast)$ be an anti-flexible algebra equipped with a
non-degenerate bilinear form $\omega: A\otimes A\rightarrow \mathbb{F}$ satisfying
\begin{equation}\label{eq:simplectic_form}
\omega(x\ast y, z)+\omega(y\ast z, x)+\omega(z\ast x, y)=0,
\quad \forall x,y\in A.
\end{equation}
Then there is a pre-anti-flexible algebra structure
"$\prec, \succ$" defined on $ A$
satisfying the following relation, for any $x,y, z\in A$,
\begin{eqnarray}\label{eq_useful1}
\omega(x\prec y,z)=\omega(x, y\ast z), \quad
\omega(x\succ y, z)=\omega(y, z\ast x).
\end{eqnarray}
\end{thm}
To make this introductory section devoted to
fundamentals necessary for addressed main issues
as short as possible, we end it by outlined the content
of this article as follows.
In section~\ref{section1}, we prove and generalize that Rota-Baxter
operators on an anti-flexible algebras
induce pre-anti-flexible algebras.
In Section~\ref{section2},
we study bimodules and matched pairs of pre-anti-flexible algebras.
Precisely, we derive the dual bimodules of
bimodules of an pre-anti-flexible algebras.
In Section~\ref{section3},
we establish the equivalences among matched pair of the underlying
anti-flexible algebras of pre-anti-flexible algebras,
matched pair of pre-anti-flexibles algebras,
and to pre-anti-flexible bialgebras.
In Section~\ref{section4},
we rule on a special class of pre-anti-flexible bialgebras
which lead to the introduction
of the pre-anti-flexible Yang-Baxter equation.
A symmetric solution of the pre-anti-flexible Yang-Baxter
equation gives a such pre-anti-flexible bialgebra.
Finally in Section~\ref{section5},
we recall the notions of $\mathcal{O}$-operators of anti-flexible algebras
and intertwine this notion to that of bimodules of pre-anti-flexible algebras
and use the relationships among
them to provide the
symmetric solutions of pre-anti-flexible Yang-Baxter equation in
pre-anti-flexible bialgebras.
\section{Rota-Baxter operators and pre-anti-flexible algebras}\label{section1}
In this section, we are going to express pre-anti-flexible algebras
in terms of Rota-Baxter operator of weight zero defined on anti-flexible algebras.
\begin{defi}
Let $(A,\ast)$ be an anti-flexible algebra.
A Rota-Baxter operator ($\mathrm{R_B}$) of weight zero on $A$
is a linear operator $\mathrm{R_B}:A\rightarrow A$ satisfying
\begin{eqnarray}\label{eq:Rota-Baxter}
\mathrm{R_B}(x)\ast \mathrm{R_B}(y)=
\mathrm{R_B}(x\ast \mathrm{R_B}(y)+\mathrm{R_B}(x)\ast y), \;\forall x,y\in A.
\end{eqnarray}
\end{defi}
\begin{thm}
Let $(A, \ast)$ be an anti-flexible algebra equipped with a linear map
$\alpha:A\rightarrow A$. Consider the bilinear products "$\prec, \succ$"
given by for any $x,y\in A,$
\begin{eqnarray}\label{eq:pre-anti-flexible-Rota-Baxter}
x\succ y=\alpha(x)\ast y,\;\quad
x\prec y=x\ast \alpha(y).
\end{eqnarray}
Then the triple $(A, \prec, \succ)$ is a pre-anti-flexible algebra
if and only if for any $x,y,z\in A,$
\begin{eqnarray}\label{eq:identity-RB-pre-anti-flexible}
(\alpha(x)\ast \alpha(y)-\alpha(x\ast \alpha(y)+\alpha(x)\ast y))\ast z+
z\ast (\alpha(y)\ast \alpha(x)-\alpha(y\ast \alpha(x)+\alpha(y)\ast x))=0.
\end{eqnarray}
\end{thm}
\begin{proof}
For any $x,y,z\in A$, we have
\begin{eqnarray*}
(x,y,z)_m=(\alpha(z), y, \alpha(x))=(\alpha(x), y, \alpha(z))=(z, y, x)_m.
\end{eqnarray*}
Thus the bilinear products given by Eq.~\eqref{eq:pre-anti-flexible-Rota-Baxter} satisfy Eq.~\eqref{eq_pre_antiflexible_1}.
Besides, we have
\begin{eqnarray*}
(x,y,z)_l=-(\alpha(x)\ast \alpha(y)-\alpha(x\ast \alpha(y)+\alpha(x)\ast y))\ast z+(z, \alpha(y), \alpha(x))
\end{eqnarray*}
and
\begin{eqnarray*}
(z, y, x)_r=z\ast (\alpha(y)\ast \alpha(x)-
\alpha(y\ast \alpha(x)+\alpha(y)\ast x))+(\alpha(x), \alpha(y), z).
\end{eqnarray*}
Therefore, Eq.~\eqref{eq_pre_antiflexible_2} is equivalent to
Eq.~\eqref{eq:identity-RB-pre-anti-flexible}.
\end{proof}
It is obvious to remark that the previous theorem generalizes
the following corollary which links Rota-Baxter operators to
pre-anti-flexible algebras.
\begin{cor}
If $\alpha:A\rightarrow A$ is a Rota-Baxter operator
defined on an anti-flexible algebra $(A, \ast)$
i.e. $\alpha$ is a linear map satisfies Eq.~\eqref{eq:Rota-Baxter}, then there is
a pre-anti-flexible algebra structure "$\prec, \succ$" on $A$ given by
Eq.~\eqref{eq:pre-anti-flexible-Rota-Baxter}.
\end{cor}
\section{Bimodules and matched pair of pre-anti-flexible algebras}\label{section2}
In this section, we provide bimodules and dual bimodules of
pre-anti-flexible algebras. We also
introduce matched pair of pre-anti-flexible algebras and
equivalently link them to a matched pair of their
underlying anti-flexible algebras. Finally, we define anti-flexible bialgebras
and establish related identities.
\begin{defi}
Let $( A, \prec, \succ)$ be a pre-anti-flexible algebra and $V$ be a vector space.
Consider the four linear maps
$l_{_\succ}, l_{_\prec},r_{_\succ}, r_{_\prec}: A\rightarrow \End(V) $.
The quintuple $(l_{_\succ},r_{_\succ}, l_{_\prec}, r_{_\prec}, V)$
is called a bimodule of $( A, \prec, \succ)$ if for any $x,y\in A$,
\begin{subequations}
\begin{eqnarray}\label{eq_bimodule_pre_anti_flexible1}
[r_{_\prec}(x), l_{_\succ}(y)]=[r_{\prec}(y), l_{_\succ}(x)],
\end{eqnarray}
\begin{eqnarray}\label{eq_bimodule_pre_anti_flexible2}
l_{_\prec}(x\succ y)-l_{_\succ}(x)l_{_\prec}(y)=
r_{_\prec}(x)r_{_\succ}(y)-r_{_\succ}(y\prec x),
\end{eqnarray}
\begin{eqnarray}\label{eq_bimodule_pre_anti_flexible3}
l_{_\succ}(x\cdot y)-l_{_\succ}(x)l_{_\succ}(y)=
r_{_\prec}(x)r_{_\prec}(y)-r_{_\prec}(y\cdot x),
\end{eqnarray}
\begin{eqnarray}\label{eq_bimodule_pre_anti_flexible4}
r_{_\succ}(x)l_{\cdot}(y)-l_{_\succ}(y)r_{_\succ}(x)=
r_{_\prec}(y)l_{_\prec}(x)-l_{_\prec}(x)r_{\cdot}(y),
\end{eqnarray}
\begin{eqnarray}\label{eq_bimodule_pre_anti_flexible5}
r_{_\succ}(x)r_{\cdot}(y)-r_{_\succ}(y\succ x)=
l_{_\prec}(x\prec y)-l_{_\prec}(x)l_{\cdot}(y),
\end{eqnarray}
\end{subequations}
where, $x\cdot y=x\prec y+x\succ y, l_{_\cdot}=l_{_\prec}+l_{_\succ}$ and
$r_{_\cdot}=r_{_\prec}+r_{_\succ}$.
\end{defi}
\begin{pro}
Let $( A, \prec, \succ)$ be a pre-anti-flexible algebra and $V$ be a vector space.
Consider the four linear maps
$l_{_\succ}, l_{_\prec},r_{_\succ}, r_{_\prec}: A\rightarrow \End(V) $.
The quintuple $(l_{_\succ},r_{_\succ}, l_{_\prec}, r_{_\prec}, V)$
is called a bimodule of $( A, \prec, \succ)$ if and only if there is a
pre-anti-flexible algebra defined on $ A\oplus V$ by for any
$x, y\in A$, $u, v\in V$,
\begin{eqnarray}\label{eq_pre_anti_flexible_bimodule}
\begin{array}{ccc}
(x+u)\prec'(y+v)=x\prec y+l_{_\prec}(x)v+r_{_\prec}(y)u,\cr
(x+u)\succ'(y+v)=x\succ y+l_{_\succ}(x)v+r_{_\succ}(y)u,
\end{array}
\end{eqnarray}
and
$
(x+u)\cdot'(y+v)=(x+u)\prec'(y+v)+(x+u)\succ'(y+v).
$
\end{pro}
\begin{proof}
According to Eq.~\eqref{eq_pre_anti_flexible_bimodule}
we have for any $x,y,z\in A$ and for any $u,v,w\in V$,
\begin{eqnarray*}
(x+u, y+v, z+w)_{_m}&=&((x+u)\succ'(y+v) )\prec' (z+w)-(x+u)\succ'((y+v)\prec'(z+w))\cr
&=&(x,y,z)_{_m}+\{l_{_\prec}(x\succ y)-l_{_\succ}(x)(l_{_\prec}(y))\}w\cr
&+&\{r_{_\prec}(z)l_{_\succ}(x)-l_{_\succ}(x)r_{_\prec}(z)\}v+
\{r_{_\prec}(z)r_{_\succ}(y)-r_{_\prec}(y\prec z)\}u.
\end{eqnarray*}
\begin{eqnarray*}
(x+u, y+v, z+w)_{_l}&=&((x+u)\cdot'(y+v) )\succ' (z+w)-(x+u)\succ'((y+v)\succ'(z+w))\cr
&=&(x,y,z)_{_l}+\{ l_{\cdot}(x\cdot y)-l_{_\succ}(x)l_{_\succ}(y) \}w\cr
&+&\{r_{_\succ}(z)l_{\cdot}(x)-l_{_\succ}(x)r_{_\succ}(z)\}v+
\{r_{_\succ}(z)r_{\cdot}(y)-r_{_\succ}(y\succ z)\}u.
\end{eqnarray*}
\begin{eqnarray*}
(x+u, y+v, z+w)_{_r}&=&((x+u)\prec'(y+v) )\prec' (z+w)-(x+u)\prec'((y+v)\cdot'(z+w))\cr
&=&(x,y,z)_{_r}+\{l_{_\prec}(x\prec y)-l_{_\prec}(x)l_{\cdot}(y) \}w\cr
&+&\{r_{_\prec}(z)l_{_\prec}(x)-l_{_\prec}(x)r_{\cdot}(z) \}v+
\{r_{_\prec}(z)r_{_\prec}(y)-r_{_\prec}(y\cdot z)\}u.
\end{eqnarray*}
If in addition the four linear maps $l_{_\succ}, l_{_\prec},r_{_\succ}, r_{_\prec}$
satisfy Eqs.~\eqref{eq_bimodule_pre_anti_flexible1}~-~
\eqref{eq_bimodule_pre_anti_flexible5},
then for any $x,y,z\in A$, and for any
$u, v,w\in V$ the following conditions
are satisfied,
\begin{eqnarray*}
(x+u, y+v, z+w)_{_m}=(z+w, y+v, x+u)_{_m},\;\;
(x+u, y+v, z+w)_{_l}=(z+w, y+v, x+u)_{_r}.
\end{eqnarray*}
Therefore, holds the equivalence.
\end{proof}
In the following of this paper, for a given pre-anti-flexible algebra
$(A, \prec, \succ)$, a vector space $V$ and a linear map
$\varphi : A\rightarrow \End(V)$,
its dual linear map is defined as $\varphi^* : A\rightarrow \End(V^*)$ by
\begin{eqnarray}\label{eq_dual_map}
\langle \varphi^*(x)u^*, v\rangle=
\langle u^*, \varphi(x)v\rangle , \quad \forall x\in A, \; v\in V, u^*\in V^*,
\end{eqnarray}
where $\langle , \rangle $ is the usual pairing between $V$ and $V^*$.
In addition, we also denote by $\sigma$ a linear map from
$V\otimes V$ into itself or from $V^*\otimes V^*$ into itself define by for any
$u,v\in V$, $u^*, v^*\in V^*$,
$\sigma(u\otimes v)=v\otimes u$ and $\sigma(u^*\otimes v^*)=v^*\otimes u^*$.
\begin{pro}\label{prop_operation_bimodule_pre_anti_flexible}
Let $(l_{_\succ},r_{_\succ}, l_{_\prec}, r_{_\prec}, V)$ be a bimodule of a
pre-anti-flexible algebra $( A, \prec, \succ)$, where
$V$ is a vector space and $l_{_\succ}, l_{_\prec},r_{_\succ}, r_{_\prec}:
A\rightarrow \End(V) $
are four linear maps. We have:
\begin{enumerate}
\item $(l_{_\succ},0, 0, r_{_\prec}, V)$ ,
$(r^*_{_\prec}, l^*_{_\prec} , l^*_{_\succ}, r^*_{_\succ} , V^*)$and
$(r^*_{_\prec}, 0 , 0,l^*_{_\succ}, V^*)$
are bimodules of the pre-anti-flexible algebra $( A, \prec, \succ)$,
\item\label{eq:one} $(l_{\cdot}, r_{\cdot}, V)$, $(l_{_\succ}, r_{_\prec}, V)$,
$(r^*_{\cdot} , l^*_{\cdot} , V^*)$ and
$(r^*_{_\prec} , l^*_{_\succ} , V^*)$ are bimodules of
the underlying anti-flexible algebra $aF(A)$ of $( A, \prec, \succ)$,
\end{enumerate}
where, $l_{_\succ}+l_{_\prec}=l_{\cdot}, r_{_\succ}+r_{_\prec}=r_{\cdot}$.
\end{pro}
\begin{proof}
It is well known that
$aF(A)$ is an anti-flexible algebra.
\begin{enumerate}
\item
Since $(l_{_\succ},r_{_\succ}, l_{_\prec}, r_{_\prec}, V)$ is a bimodule of the
pre-anti-flexible algebra $( A , \prec, \succ)$, then
the four linear maps $l_{_\succ}, l_{_\prec},r_{_\succ}, r_{_\prec}:
A \rightarrow \End(V)$ satisfy
Eqs.~\eqref{eq_bimodule_pre_anti_flexible1}~-~\eqref{eq_bimodule_pre_anti_flexible5}
which still satisfied by setting
$ l_{_\prec}=0$ and $r_{_\succ}=0$. Thus $(l_{_\succ},0, 0, r_{_\prec}, V)$ is a
bimodule of the pre-anti-flexible algebra $( A , \prec, \succ)$.
Furthermore, using Eq.~\eqref{eq_dual_map}, we deduce that
$(r^*_{_\prec}, l^*_{_\prec} , l^*_{_\succ}, r^*_{_\succ} , V^*)$ and
$(r^*_{_\prec}, 0 , 0,l^*_{_\succ}, V^*)$ are also
bimodules of the pre-anti-flexible algebra $( A , \prec, \succ)$.
\item
Since the four linear maps $l_{_\succ}, l_{_\prec},r_{_\succ}, r_{_\prec}: A \rightarrow \End(V)$
satisfy Eqs.~\eqref{eq_bimodule_pre_anti_flexible1}~-~\eqref{eq_bimodule_pre_anti_flexible5},
then both linear maps $l_{_\succ}$ and $r_{_\prec}$ satisfy
Eqs.~\eqref{eq_bimodule_pre_anti_flexible1} and \eqref{eq_bimodule_pre_anti_flexible3}
which is exactly Eqs.~\eqref{eqbimodule2} and \eqref{eqbimodule1}, respectively.
Thus $(l_{_\succ}, r_{_\prec},V)$ is a bimodule of
$aF(A)$. In view Eqs.~\eqref{eq_bimodule_pre_anti_flexible1}
and \eqref{eq_bimodule_pre_anti_flexible4},
we have for any $x,y\in A $,
\begin{eqnarray*}
[l_{_\cdot}(x), r_{_\cdot}(y)]- [l_{_\cdot}(y), r_{_\cdot}(x)]&=&
\{l_{_\prec}(x)r_{_\cdot}(y)+r_{_\succ}(x)l_{_\cdot}(y)-
l_{_\succ}(y)r_{_\succ}(x)-r_{_\prec}(y)l_{_\prec}(x) \}\cr
&-&\{ l_{_\prec}(y)r_{_\cdot}(x)+r_{_\succ}(y)l_{_\cdot}(x)-
l_{_\succ}(x)r_{_\succ}(y)-r_{_\prec}(x)l_{_\prec}(y) \}\cr
&+&\{[l_{_\succ}(x), r_{_\prec}(y)]-[l_{_\succ}(y), r_{_\prec}(x)] \}=0.
\end{eqnarray*}
In addition, considering Eqs.~\eqref{eq_bimodule_pre_anti_flexible2},
\eqref{eq_bimodule_pre_anti_flexible3} and \eqref{eq_bimodule_pre_anti_flexible5},
we have for any $x,y\in A $,
\begin{eqnarray*}
l_{_\cdot}(x\cdot y)-l_{_\cdot}(x)l_{_\cdot}(y)-r_{_\cdot}(x)r_{_\cdot}(y)+r_{_\cdot}(y\cdot x)
&=&\{l_{_\succ}(x\cdot y) -l_{_\succ}(x)l_{_\succ}(y)-r_{_\prec}(x)r_{_\prec}(y)\cr
&+&r_{_\prec}(y\cdot x)\}+ \{l_{_\prec}(x\succ y)-l_{_\succ}(x)l_{_\prec}(y) \cr
&+&r_{_\succ}(y\prec x)-r_{_\prec}(x)r_{_\succ}(y) \}+\{l_{_\prec}(x\prec y) \cr
&+& r_{_\prec}(y\succ x)-l_{_\prec}(x)l_{_\cdot}(y)-r_{_\succ}(x)r_{_\cdot}(y)\}
=0.
\end{eqnarray*}
Therefore both $( l_{_\cdot}, r_{_\cdot}, V)$ and $(l_{_\succ}, r_{_\prec}, V)$
are bimodules of $aF(A)$.
According to Eq.~\eqref{eq_dual_map}, both
$(r^*_{_\cdot} , l^*_{_\cdot} , V^*)$ and
$(r^*_{_\prec} , l^*_{_\succ} , V^*)$ are bimodules of $aF(A)$.
\end{enumerate}
\end{proof}
\begin{ex}
Consider a pre-anti-flexible algebra $(A, \prec, \succ)$.
We have $( L_{_\prec}, R_{_\prec} , L_{_\succ}, R_{_\succ} , A)$ and
$(L_{_\succ},0, 0, R_{_\prec}, A)$ are bimodules of
$(A, \prec, \succ)$. Besides,
$(R^*_{_\prec}, L^*_{_\prec} , L^*_{_\succ}, R^*_{_\succ} , A^*)$ and
$(R^*_{_\prec}, 0 , 0, L^*_{_\succ}, A^*)$
are also bimodules of the pre-anti-flexible algebra $( A, \prec, \succ)$.
\end{ex}
\begin{rmk}\label{rmk_useful}
For a given bimodule $(l_{_\succ},r_{_\succ}, l_{_\prec}, r_{_\prec}, V)$
of a pre-anti-flexible algebra $(A, \prec, \succ)$ we have
\begin{enumerate}
\item
If both side of
Eqs.~\eqref{eq_bimodule_pre_anti_flexible1}~-~\eqref{eq_bimodule_pre_anti_flexible5}
are zero i.e.
the linear maps $l_{_\succ}, l_{_\prec},r_{_\succ}, r_{_\prec}$ satisfy
\begin{eqnarray*}
r_{_\prec}(x)l_{_\succ}(y)=l_{_\succ}(y)r_{_\prec}(x),\;
l_{_\prec}(x\succ y)=l_{_\succ}(x)l_{_\prec}(y),\;
r_{_\prec}(x)r_{_\succ}(y)=r_{_\succ}(y\prec x),\\
l_{_\succ}(x\cdot y)=l_{_\succ}(x)l_{_\succ}(y),\;
r_{_\prec}(x)r_{_\prec}(y)=r_{_\prec}(y\cdot x),\;
r_{_\succ}(x)l_{\cdot}(y)=l_{_\succ}(y)r_{_\succ}(x),\\
r_{_\prec}(y)l_{_\prec}(x)=l_{_\prec}(x)r_{\cdot}(y),\;
r_{_\succ}(x)r_{\cdot}(y)=r_{_\succ}(y\succ x),\;
l_{_\prec}(x\prec y)=l_{_\prec}(x)l_{\cdot}(y),
\end{eqnarray*}
with $l_{\cdot}=l_{_\succ}+l_{_\prec}$ and $r_{\cdot}=r_{_\succ}+r_{_\prec}$.
\item
For any $x,y \in A$ we have
\begin{eqnarray}\label{eq:useful}
L_{\cdot}(x)L_{\cdot}(y)+R_{\cdot}(x) R_{\cdot}(y)&=&
(L_{\prec}(x)+L_{\succ}(x))(L_{\prec}(y)+L_{\succ}(y))+
(R_{\prec}(x)+R_{\succ}(x))(R_{\prec}(y)+R_{\succ}(y))\cr
&=&(L_{\succ}(x)L_{\succ}(y)+R_{\prec}(x)R_{\prec}(y))+
(L_{\succ}(x)L_{\prec}(y)+L_{\prec}(x)R_{\succ}(y))\cr
&+&(L_{\prec}(x)L_{\succ}(y)+L_{\prec}(x)L_{\prec}(y)+
(R_{\succ}(x)R_{\succ}(y)+R_{\succ}(x)L_{\prec}(y) )\cr
L_{\cdot}(x)L_{\cdot}(y)+R_{\cdot}(x) R_{\cdot}(y)&=&L_{\cdot}(x\cdot y)+R_{\cdot}(y\cdot x)
\end{eqnarray}
\item Besides, for any $x,y\in A$
\begin{eqnarray}\label{eq:useful1}
[L_{\cdot}(x), R_{\cdot}(y)]-[L_{\cdot}(y), R_{\cdot}(x)]&=&
\{L_{_\prec}(x)R_{_\cdot}(y)+R_{_\succ}(x)L_{_\cdot}(y)-L_{_\succ}(y)R_{_\succ}(x)-R_{_\prec}(y)L_{_\prec}(x) \}\cr
&-&\{ L_{_\prec}(y)R_{_\cdot}(x)+R_{_\succ}(y)L_{_\cdot}(x)-L_{_\succ}(x)R_{_\succ}(y)-R_{_\prec}(x)L_{_\prec}(y) \}\cr
&+&\{[L_{_\succ}(x), R_{_\prec}(y)]-[L_{_\succ}(y), R_{_\prec}(x)] \}=0
\end{eqnarray}
\item\label{dual-bimodule}
Both dendriform and pre-anti-flexible algebras have the same shape of dual bimodules.
This fact induces some consequences which we will derive and explain in the following of this paper.
\end{enumerate}
\end{rmk}
\begin{thm}\label{Theo_pre_Sum}
Let $( A , \prec_{_A }, \succ_{_A })$ be a pre-anti-flexible algebra. Suppose there is a pre-anti-flexible algebra structure
"$ \prec_{_{ A^*}}, \succ_{_{A^*}}$" on $ A^*$. The following statements are equivalent:
\begin{enumerate}
\item\label{1} $(R^*_{\prec_{_ A}},L^*_{\succ_{_ A}}, R^*_{\prec_{_{ A^*}}}, L^*_{\succ_{_{ A^*}}}, A, A^*)$ is a matched pair of anti-flexible algebras $aF(A)$ and $aF(A^*)$.
\item\label{2} There exists an anti-flexible algebra structure on $ A\oplus A^*$ given by for any $x,y\in A$ and for any $a,b\in A^*$,
\begin{eqnarray}\label{eq_anti_flexible_sum}
(x+a)\star(y+b)=
(x\cdot y+R^*_{\prec_{_ A}}(a)y+L^*_{\succ_{_ A}}(b)x)+
(a\circ b+R^*_{\prec_{_{ A^*}}}(x)b+L^*_{\succ_{_{ A^*}}}(y)a),
\end{eqnarray} where $x\cdot y= x\prec_{_ A}y+ x\succ_{_ A} y$, $a\circ b= a\prec_{_{ A^*}}b+a\succ_{_{ A^*}}b$,
and a non-degenerate closed skew-symmetric bilinear form $\omega$ defined on $ A\oplus A^*$ given by
for any $x,y\in A$ and for any $a,b \in A^*$,
\begin{eqnarray}\label{eq_skew_symmetric_form}
\omega(x+a, y+b)=\langle x,b\rangle -\langle y,a\rangle.
\end{eqnarray}
\end{enumerate}
\end{thm}
\begin{proof}
Let $( A , \prec_{_A }, \succ_{_A })$ be a pre-anti-flexible algebra.
Suppose there is a pre-anti-flexible algebra structure
"$ \prec_{_{ A^*}}, \succ_{_{A^*}}$" on $ A^*$.
\begin{itemize}
\item[$\eqref{1}\Longrightarrow \eqref{2}$]
Suppose that $(R^*_{\prec_{_ A}},L^*_{\succ_{_ A}}, R^*_{\prec_{_{ A^*}}},L^*_{\succ_{_{ A^*}}}, A, A^*)$
is a matched pair of anti-flexible algebras $aF(A)$ and $aF(A^*)$.
Then, there is an anti-flexible algebra structure "$\star$" on $ A\oplus A^*$
given by for any $x,y\in A$ and for any $a,b, \in A^*$,
\begin{eqnarray*}
(x+a)\star(y+b)=
(x\cdot y+R^*_{\prec_{_ A}}(a)y+L^*_{\succ_{_ A}}(b)x)+
(a\circ b+R^*_{\prec_{_{ A^*}}}(x)b+L^*_{\succ_{_{ A^*}}}(y)a),
\end{eqnarray*}
where, $\forall x,y\in A$, $\forall a,b\in A^*$,
\begin{eqnarray*}
x\cdot y= x\prec_{_ A}y+ x\succ_{_ A} y, a\circ b= a\prec_{_{ A^*}}b+a\succ_{_{ A^*}}b.
\end{eqnarray*}
Besides, considering the skew-symmetric bilinear form $\omega$ defined on $ A\oplus A^*$ by
Eq.~\eqref{eq_skew_symmetric_form}, we have for any $x,y\in A$ and for any $a,b\in A^*$,
\begin{eqnarray*}
&&\omega((x+a)\star(y+b), (z+c))+\omega((y+b)\star(z+c), (x+a))+\omega((z+c)\star(x+a), (y+b))\cr
&&=
\langle x\cdot y, c\rangle +\langle y, c\prec_{_{ A^*}} a\rangle +\langle x, b\succ_{_{ A^*}} c\rangle
-\langle z, a\circ b\rangle -\langle z\prec_{_ A} x, b\rangle -\langle y\succ_{_{ A}} z, a\rangle \cr&&+
\langle y\cdot z, a\rangle +\langle z, a\prec_{_{ A^*}} b\rangle +\langle y, c\succ_{_{ A^*}} a\rangle
-\langle x, b\circ c\rangle -\langle x\prec_{_ A} y, c\rangle -\langle z\succ_{_{ A}} x, b\rangle \cr&&
+\langle z\cdot y-x, b\rangle +\langle x, b\prec_{_{ A^*}} c\rangle +\langle z, a\succ_{_{ A^*}} b\rangle
-\langle y, c\circ a\rangle -\langle y\prec_{_ A} z, a\rangle -\langle x\succ_{_{ A}} y, c\rangle =0.
\end{eqnarray*}
Clearly, $\omega$ is closed and $\omega( A, A)=0=\omega( A^*, A^*)$, then $( A, \cdot)$
and $( A^*, \circ)$ are Lagrangian anti-flexible subalgebras
of the anti-flexible algebra $( A\oplus A^*, \star)$.
\item[$\eqref{2}\Longrightarrow \eqref{1}$]
Suppose that there exists an anti-flexible algebra structure "$\star$" on $ A\oplus A^*$ given by Eq.~\eqref{eq_anti_flexible_sum}
and a non-degenerate closed skew-symmetric bilinear form on $ A\oplus A^*$ given by Eq.~\eqref{eq_skew_symmetric_form}.
According to Proposition~\ref{prop_operation_bimodule_pre_anti_flexible} the triple
$(R^*_{\prec_{_ A}},L^*_{\succ_{_ A}}, A^*)$ is a bimodule
of $aF(A)$ and $(R^*_{\prec_{_{ A^*}}},L^*_{\succ_{_{ A^*}}}, A)$ is a bimodule of $aF(A^*)$, thus "$\star$" defines
an anti-flexible algebra structure on
$ A\oplus A^*$ if $(R^*_{\prec_{_ A}},L^*_{\succ_{_ A}},R^*_{\prec_{_{ A^*}}},L^*_{\succ_{_{ A^*}}}, A, A^*)$ is a
matched pair of the anti-flexible algebras $aF(A)$ and $aF(A^*)$.
\end{itemize}
Therefore, holds the conclusion.
\end{proof}
\begin{thm}\label{thm_matchedpair}
Let $( A, \prec_{_ A}, \succ_{_ A})$ and $(B, \prec_{_B}, \succ_{_B})$ be two pre-anti-flexible algebras.
Suppose that there are four linear maps
$ l_{_{\succ_ A}}, r_{_{\succ_ A}}, l_{_{\prec_ A}}, r_{_{\prec_ A}}: A\rightarrow \End(B)$
such that $(l_{_{\succ_ A}}, r_{_{\succ_ A}}, l_{_{\prec_ A}}, r_{_{\prec_ A}}, B)$ is a bimodule of
$( A, \prec_{_ A}, \succ_{_ A})$ and another four linear maps
$ l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}, r_{_{\prec_B}}:B\rightarrow \End( A)$
such that $(l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}, r_{_{\prec_B}}, A)$ is a bimodule of
$(B, \prec_{_B}, \succ_{_B})$. If in addition the eight linear maps
$l_{_{\succ_ A}}, r_{_{\succ_ A}}, l_{_{\prec_ A}},$ $r_{_{\prec_ A}},
l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}$, and $r_{_{\prec_B}}$
satisfying the relations, for any $ x,y\in A$, and for any $a,b\in B$,
\begin{subequations}
\begin{eqnarray}\label{eq_matched_pre_1}
(l_{_\succ{_{_B}}}(a)x)\prec_{_ A}y+ l_{_{\succ_{_B}}}(r_{_\succ{_ A}}(x)a)y-
l_{_{\succ{_B}}}(a)(x\prec_{_ A} y)=\cr r_{_{\prec_{_B}}}(a)(y\succ_{_ A}x)-
y\succ_{_ A}(r_{_{\prec_{_B}}}(a)x)-r_{_{\succ_{_B}}}(l_{_{\prec_{_ A}}}(x)a)y,
\end{eqnarray}
\begin{eqnarray}\label{eq_matched_pre_2}
(l_{_{\succ_{_ A}}}(x)b)\prec_{_B}a+l_{_{\prec_{_ A}}}(r_{_{\succ_{_B}}}(b)x)a-
l_{_{\succ_{_ A}}}(x)(b\prec_{_B}a)=\cr
r_{_{\prec_{_ A}}}(x)(a\succ_{_B} b)-a\succ_{_B}(r_{_{\prec_{_ A}}}(x)b)-
r_{_{\succ_{_ A}}}(l_{_{\prec_{_B}}}(b)x)a,
\end{eqnarray}
\begin{eqnarray}\label{eq_matched_pre_3}
(l_{_{\cdot_B}}(a)x )\succ_{_ A}y+l_{_{\succ_{_B}}}(r_{_{ \cdot_ A}}(x)a)y-
l_{_{\succ_{B}}}(a)(x\succ_{_ A}y) =\cr
r_{_{\prec_{_B}}}(a)(y\prec_{_ A} x)-y\prec_{_ A}(r_{\cdot_{_B}}(a)x)-
r_{_{\prec_{_B}}}(l_{{\cdot_{_ A}}}(x)a)y,
\end{eqnarray}
\begin{eqnarray}\label{eq_matched_pre_4}
r_{_{\succ_{B}}}(a)(x\cdot_{_ A} y)-x\succ{_{_ A}}(r_{_\succ{_B}}(a)y)-
r_{_{_\succ{_B}}}(l_{_{\succ{_ A}}}(y)a)x =\cr
(l_{_{\prec_{_B}}}(a)y)\prec_{_ A}x+l_{_{\prec_{_B}}}(r_{_{\prec_{_ A}}}(y)a)x-
l_{_{\prec_{_B}}}(a)(y\cdot_{_ A} x),
\end{eqnarray}
\begin{eqnarray}\label{eq_matched_pre_5}
(l_{_{\cdot_ A}}(x)b )\succ_{_B}a+l_{_{\succ_{_ A}}}(r_{_{\cdot_B}}(b)x)a-
l_{_{\succ_{ A}}}(x)(b\succ_{_B}a) =\cr
r_{_{\prec_{_ A}}}(x)(a\prec_{_B} b)-a\prec_{_B}(r_{\cdot_{_ A}}(x)b)-
r_{_{\prec_{_ A}}}(l_{{\cdot_{_B}}}(b)x)a,
\end{eqnarray}
\begin{eqnarray}\label{eq_matched_pre_6}
r_{_{\succ_{ A}}}(x)(a\cdot_{_B} b)-a\succ{_{_B}}(r_{_\succ{_ A}}(x)b)-
r_{_{_\succ{_ A}}}(l_{_{\succ{_B}}}(b)x)a=\cr
(l_{_{\prec_{_ A}}}(x)b)\prec_{_ A}a+l_{_{\prec_{_ A}}}(r_{_{\prec_{_B}}}(b)x)a-
l_{_{\prec_{_ A}}}(x)(b\cdot_{_B} a),
\end{eqnarray}
\begin{eqnarray}\label{eq_matched_pre_7}
(r_{_{\succ_B}}(a)x)\prec_{_ A} y+l_{_{\prec_B}}( l_{_{\succ_ A}}(x)a)y
-x\succ_{_ A}(l_{_{\prec_{_B}}}(a)y)-r_{_{\succ_{_B}}}(r_{_{\prec_{_ A}}}(y)a )x=\cr
(r_{_{\succ_B}}(a)y)\prec_{_ A} x+l_{_{\prec_B}}( l_{_{\succ_ A}}(y)a)x
-y\succ_{_ A}(l_{_{\prec_{_B}}}(a)x)-r_{_{\succ_{_B}}}(r_{_{\prec_{_ A}}}(x)a )y,
\end{eqnarray}
\begin{eqnarray}\label{eq_matched_pre_8}
(r_{_{\succ_ A}}(x)a)\prec_{_B} b+l_{_{\prec_ A}}( l_{_{\succ_B}}(a)x)b
-a\succ_{_B}(l_{_{\prec_{_ A}}}(x)b)-r_{_{\succ_{_ A}}}(r_{_{\prec_{_B}}}(b)x )a=\cr
(r_{_{\succ_ A}}(x)b)\prec_{_B} a+l_{_{\prec_ A}}( l_{_{\succ_B}}(b)x)a
-b\succ_{_B}(l_{_{\prec_{_ A}}}(x)a)-r_{_{\succ_{_ A}}}(r_{_{\prec_{_B}}}(a)x )b,
\end{eqnarray}
\begin{eqnarray}\label{eq_matched_pre_9}
(r_{ \cdot_{_B}}(a)x)\succ_{_ A} y +l_{_{_\succ{_B}}}(l_{_{\cdot_{_ A}}}(x)a)y
-x\succ_{_ A}(l_{_{\succ_{_B}}}(a)y)-r_{_{\succ_{_B}}}(r_{_{\succ_{_ A}}}(y)a)x=\cr
(r_{_{\prec{_B}}}(a)y)\prec_{_ A} x+l_{_{\prec_{_B}}}(l_{_{\prec_{_ A}}}(y) a)x
-y\prec_{_ A}(l_{\cdot_{_B}} (a)x)-r_{_{\prec_{_B}}}(r_{\cdot_{_ A}}(x)a)y ,
\end{eqnarray}
\begin{eqnarray}\label{eq_matched_pre_10}
(r_{\cdot_{_ A}}(x)a)\succ_{_B} b+l_{_{_\succ{_ A}}}(l_{_{\cdot_{_B}}}(a)x)b
-a\succ_{_B}(l_{_{\succ_{_ A}}}(x)b)-r_{_{\succ_{_ A}}}(r_{_{\succ_{_B}}}(b)x)a=\cr
(r_{_{\prec{_ A}}}(x)b)\prec_{_B} a+ l_{_{\prec_{_ A}}}(l_{_{\prec_{_B}}}(b) x)a
-b\prec_{_B}(l_{\cdot_{_ A}} (x)a)-r_{_{\prec_{_ A}}}(r_{\cdot_{_B}}(a)x)b,
\end{eqnarray}
\end{subequations}
there is a pre-anti-flexible product on $ A\oplus B$ given by, for any $x,y\in A$, and for any $a,b\in B$,
\begin{eqnarray}\label{eq_pre_anti_matched}
\begin{array}{cccc}
(x+a)\prec (y+b)= \{x\prec_{_ A} y+l_{_{\prec_{_B}}}(a)y+r_{_{\prec_{_B}}}(b)x\}+
\{a\prec_{_B} b+l_{_{\prec_{_ A}}}(x)b+r_{_{\prec_{_ A}}}(y)a\}, \cr
(x+a)\succ (y+b)=
\{ x\succ_{_ A} y+l_{_{\succ_{_B}}}(a)y+r_{_{\succ_{_B}}}(b)x\}+
\{a\succ_{_B} b+l_{_{\succ_{_ A}}}(x)b+r_{_{\succ_{_ A}}}(y)a\},
\end{array}
\end{eqnarray}
where for any $x,y, \in A$ and any $a,b\in B$,
\begin{eqnarray*}
x\cdot_{_ A} y=x\prec_{_ A} y+x\succ_{_ A} y,\quad l_{ \cdot_{_ A}}
=l_{_\prec{_{_ A}}}+l_{_\succ{_{_ A}}}, \quad r_{\cdot_{_ A}}
=r_{_\prec{_{_ A}}}+r_{_\succ{_{_ A}}},\cr
a\cdot_{_B} b=a\prec_{_B} b+a\succ_{_B} b,\quad l_{\cdot_{_B}}
=l_{_\prec{_{_B}}}+l_{_\succ{_{_B}}}, \quad r_{\cdot{_B}}
=r_{_\prec{_{_B}}}+r_{_\succ{_{_B}}}.
\end{eqnarray*}
\end{thm}
\begin{proof}
Let $( A, \prec_{_ A}, \succ_{_ A})$ and $(B, \prec_{_B}, \succ_{_B})$ be two pre-anti-flexible algebras.
Suppose in addition that $(l_{_{\succ_ A}}, r_{_{\succ_ A}}, l_{_{\prec_ A}}, r_{_{\prec_ A}}, B)$
is a bimodule of $( A, \prec_{_ A}, \succ_{_ A})$ and $(l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}, r_{_{\prec_B}}, A)$
is a bimodule of $(B, \prec_{_B}, \succ_{_B})$, where
$ l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}, r_{_{\prec_B}}:B\rightarrow \End( A)$ and
$ l_{_{\succ_ A}}, r_{_{\succ_ A}}, l_{_{\prec_ A}}, r_{_{\prec_ A}}: A\rightarrow \End(B)$
are eight linear maps. Considering the product given in Eq.~\eqref{eq_pre_anti_matched}, we have
for any $x,y, z\in A$ and any $a,b,c\in B$,
\begin{eqnarray*}
(x+a, y+b, z+c)_{_m}&=&(x,y,z)_{_m}+(a,b,c)_{_m}+
\{l_{_{\prec_B}}(a\succ_{_B}b)z -l_{_\succ{_B}}(a)( l_{_{\prec_B}}(b)z) \}\cr
&+&\{(l_{_\succ{_{_B}}}(a)y)\prec_{_ A}z+ l_{_\succ{_B}}(r_{_\succ{_ A}}(y)a)z-
l_{_{\succ{_B}}}(a)(y\prec_{_ A} z) \}\cr
&+&\{r_{_{\prec_{_B}}}(c)(x\succ_{_ A} y)-x\succ_{_ A}(r_{_{\prec_{_B}}}(c)y)-
r_{_{\succ_{_B}}}(l_{_{\prec_{_ A}}}(y)c)x \}\cr
&+&\{ r_{_{\prec_{_ A}}}(z)(a\succ_{_B} b)-a\succ_{_B}(r_{_{\prec_{_ A}}}(z)b)-
r_{_{\succ_{_ A}}}(l_{_{\prec_{_B}}}(b)z)a \}\cr
&+&\{ (l_{_{\succ_{_ A}}}(x)b)\prec_{_B}c+l_{_{\prec_{_ A}}}(r_{_{\succ_{_B}}}(b)x)c-
l_{_{\succ_{_ A}}}(x)(b\prec_{_B}c) \}\cr
&+&\{r_{_{\prec_{_B}}}(c)(l_{_{\succ_{_B}}}(a)y)-
l_{_{\succ_{_B}}}(a)(r_{_{\prec_{_B}}}(c)y) \}+\{ (r_{_{\succ_B}}(b)x)\prec_{_ A} z
\cr &+&l_{_{\prec_B}}( l_{_{\succ_ A}}(x)b)z
-x\succ_{_ A}(l_{_{\prec_{_B}}}(b)z)
-r_{_{\succ_{_B}}}(r_{_{\prec_{_ A}}}(z)b )x \}\cr
&+&\{ r_{_{\prec_{_ A}}}(z)(r_{_{\succ_{_ A}}}(y)a)-
r_{_{\succ_{_ A}}}(y\prec_{_ A}z)a \}+
\{ (r_{_{\succ_ A}}(y)a)\prec_{_B} c
\cr&+&l_{_{\prec_ A}}( l_{_{\succ_B}}(a)y)c
-a\succ_{_B}(l_{_{\prec_{_ A}}}(y)c)
-r_{_{\succ_{_ A}}}(r_{_{\prec_{_B}}}(c)y )a \}\cr
&+&\{r_{_{\prec_{_B}}}(c)(r_{_{\succ_{_B}}}(b)x)-
r_{_{\succ_{_B}}}(b\prec_{_B}c)x \}\cr
&+&\{ l_{_{\prec_ A}}(x\succ_{_ A}y)c -
l_{_\succ{_ A}}(x)( l_{_{\prec_ A}}(y)c) \}+
\{ r_{_{\prec_{_ A}}}(z)(l_{_{\succ_{_ A}}}(x)b)-
l_{_{\succ_{_ A}}}(x)(r_{_{\prec_{_ A}}}(z)b) \}
\end{eqnarray*}
\begin{eqnarray*}
(x+a, y+b, z+c)_{_l}&=&(x,y,z)_{_l}+(a,b,c)_{_l}+
\{r_{_{\succ_{_A}}}(z)(r_{\cdot_{A}}(y)a)-r_{_{\succ_{_A}}}(y\succ_{_A} y)a \}\cr
&+&\{(l_{_{\cdot_B}}(a)y )\succ_{_A}z+l_{_{\succ_{_B}}}(r_{_{\cdot_A}}(y)a)z-
l_{_{\succ_{B}}}(a)(y\succ_{_A}z) \}\cr
&+&\{r_{_{\succ_{B}}}(c)(x\ast_{_A} y)-x\succ{_{_A}}(r_{_\succ{_B}}(c)y)-
r_{_{_\succ{_B}}}(l_{_{\succ{_A}}}(y)c)x \}\cr
&+&\{(l_{_{\cdot_A}}(x)b )\succ_{_B}c+l_{_{\succ_{_A}}}(r_{_{\cdot_B}}(b)x)c-
l_{_{\succ_{A}}}(x)(b\succ_{_B}c) \}\cr
&+&\{ r_{_{\succ_{A}}}(z)(a\cdot_{_B} b)-a\succ{_{_B}}(r_{_\succ{_A}}(z)b)-
r_{_{_\succ{_A}}}(l_{_{\succ{_B}}}(b)z)a \}\cr
&+&\{ l_{_\succ{_B}}(a\cdot_{_B} b)z-l_{_{\succ_{_B}}}(a)(l_{_{\succ_{_B}}}(b)z)\}+
\{(r_{\cdot_{_B}}(b)x)\succ_{_A} z \cr
&+&l_{_{_\succ{_B}}}(l_{_{\cdot_{_A}}}(x)b)z-x\succ_{_A}(l_{_{\succ_{_B}}}(b)z)-
r_{_{\succ_{_B}}}(r_{_{\succ_{_A}}}(z)b)x \}\cr
&+&\{ l_{_\succ{_A}}(x\cdot_{_A} y)c-l_{_{\succ_{_A}}}(x)(l_{_{\succ_{_A}}}(y)c) \}+
\{ (r_{\cdot_{_A}}(y)a)\succ_{_B} c \cr
&+&l_{_{_\succ{_A}}}(l_{_{\cdot_{_B}}}(a)y)c-a\succ_{_B}(l_{_{\succ_{_A}}}(y)c)-
r_{_{\succ_{_A}}}(r_{_{\succ_{_B}}}(c)y)a \}\cr
&+&\{ r_{_{\succ_{_B}}}(c)(l_{_\cdot{_B}}(a)y)-l_{_{_{\succ{_B}}}}(a)(r_{_{\succ{_B}}}(c)y) \}\cr
&+&\{ r_{_{\succ_{_B}}}(c)(r_{_\cdot{_B}}(b)x)-r_{_{\succ_{_B}}}(b\succ_{_B} c)x \}+
\{ r_{_{\succ_{_A}}}(z)(l_{_\cdot{_A}}(x)b)-l_{_{_{\succ{_A}}}}(x)(r_{_{\succ{_A}}}(z)b) \}
\end{eqnarray*}
\begin{eqnarray*}
(z+c, y+b, x+a)_{_r}&=&(z,y,x)_{_r}+(c,b,a)_{_r}
+\{r_{_{\prec_{_B}}}(a)(r_{_{\prec_{_B}}}(b)z)-r_{_{\prec_{_B}}}(b\ast_{_B} a)z \}\cr
&+&\{(l_{_{\prec_{_B}}}(c)y)\prec_{_A}x+l_{_{\prec_{_B}}}(r_{_{\prec_{_A}}}(y)c)x-
l_{_{\prec_{_B}}}(c)(y\cdot_{_A} x) \}\cr
&+&\{r_{_{\prec_{_B}}}(a)(z\prec_{_A} y)-z\prec_{_A}(r_{\cdot_{_B}}(a)y)-
r_{_{\prec_{_B}}}(l_{{\cdot_{_A}}}(y)a)z \}\cr
&+&\{ (l_{_{\prec_{_A}}}(z)b)\prec_{_A}a+l_{_{\prec_{_A}}}(r_{_{\prec_{_B}}}(b)z)a-
l_{_{\prec_{_A}}}(z)(bAst_{_B} a) \}\cr
&+&\{ r_{_{\prec_{_A}}}(x)(c\prec_{_B} b)-c\prec_{_B}(r_{\cdot_{_A}}(x)b)-
r_{_{\prec_{_A}}}(l_{{\cdot_{_B}}}(b)x)c \}\cr
&+&\{l_{_{\prec_{_B}}}(c\prec_{_B} b)x-l_{_{\prec_{_B}}}(c)(l_{_{\cdot_{_B}}}(b)c) \}+
\{(r_{_{\prec{_B}}}(b)z)\prec_{_A} x\cr
&+&l_{_{\prec_{_B}}}(l_{_{\prec_{_A}}}(z) b)x-z\prec_{_A}(l_{\cdot_{_B}} (b)x)-
r_{_{\prec_{_B}}}(r_{\cdot_{_A}}(x)b)z \}\cr
&+&\{r_{_{\prec{_B}}}(a)(l_{_{_\prec{_B}}}(c)y)-l_{_{\prec{_B}}}(c)(r_{_{\cdot{_B}}}(a)y) \}+
\{ (r_{_{\prec{_A}}}(y)c)\prec_{_B} a \cr
&+& l_{_{\prec_{_A}}}(l_{_{\prec_{_B}}}(c) y)a-c\prec_{_B}(l_{\cdot_{_A}} (y)a)-
r_{_{\prec_{_A}}}(r_{\cdot_{_B}}(a)y)c \}\cr
&+&\{r_{_{\prec_{_A}}}(x)(r_{_{\prec_{_A}}}(y)c)-r_{_{\prec_{_A}}}(y\cdot_{_A} x)c \}\cr
&+&\{l_{_{\prec_{_A}}}(z\prec_{_A} y)a-l_{_{\prec_{_A}}}(z)(l_{_{\cdot_{_A}}}(y)z)\}
+\{r_{_{\prec{_A}}}(x)(l_{_{_\prec{_A}}}(z)b)-l_{_{\prec{_A}}}(z)(r_{_{\cdot{_A}}}(x)b) \}
\end{eqnarray*}
Besides, for any $x,y, z\in A$ and for any $a,b,c\in B$,
$
(x+a,y+b,z+c)_{_m}=(z+c,y+b,x+a)_{_m}
$
and
$
(x+a,y+b,z+c)_{_l}=(z+c,y+b,x+a)_{_r}
$ are equivalent to
$(l_{_{\succ_A}}, r_{_{\succ_A}}, l_{_{\prec_A}}, r_{_{\prec_A}}, B)$ is a bimodule of
$(A, \prec_{_A}, \succ_{_A})$, $(l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}, r_{_{\prec_B}}, A)$
is a bimodule of $(B, \prec_{_B}, \succ_{_B})$ and
Eqs.~\eqref{eq_matched_pre_1}~-~\eqref{eq_matched_pre_10} are satisfied.
\end{proof}
\begin{defi}
Let $(A, \prec_{_A}, \succ_{_A})$ and $(B, \prec_{_B}, \succ_{_B})$ be two
pre-anti-flexible algebras. Suppose that there are four linear maps
$ l_{_{\succ_A}}, r_{_{\succ_A}}, l_{_{\prec_A}}, r_{_{\prec_A}}:A\rightarrow \End(B)$ such that
$(l_{_{\succ_A}}, r_{_{\succ_A}}, l_{_{\prec_A}}, r_{_{\prec_A}}, B)$ is a bimodule of
$(A, \prec_{_A}, \succ_{_A} )$ and another four linear maps
$l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}, r_{_{\prec_B}}:B\rightarrow \End(A)$
such that
$(l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}, r_{_{\prec_B}}, A)$
is a bimodule of $(B, \prec_{_B}, \succ_{_B} )$ and
Eqs.~\eqref{eq_matched_pre_1}~-~\eqref{eq_matched_pre_10} hold.
Then we call the ten-tuple $(A, B,l_{_{\succ_A}}, r_{_{\succ_A}}, l_{_{\prec_A}},
r_{_{\prec_A}},l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}, r_{_{\prec_B}} )$ a
{\bf matched pair of the pre-anti-flexible} algebras $(A, \prec_{_A}, \succ_{_A})$
and $(B, \prec_{_B}, \succ_{_B})$.
We also denote the pre-anti-flexible algebra defined by Eq.~\eqref{eq_pre_anti_matched} by
$A\bowtie^{l_{_{\succ_A}}, r_{_{\succ_A}}, l_{_{\prec_A}},
r_{_{\prec_A}}}_{l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}, r_{_{\prec_B}}} B$ or simply by
$A \bowtie B$.
\end{defi}
\begin{cor}\label{corollary_matched_pair_pre}
If
$(A, B, l_{_{\succ_A}}, r_{_{\succ_A}}, l_{_{\prec_A}}, r_{_{\prec_A}},
l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}, r_{_{\prec_B}} )$
is a matched pair of the pre-anti-flexible algebras
$(A, \prec_{_A}, \succ_{_A})$ and $(B, \prec_{_B}, \succ_{_B})$ then
$(l_{_{\cdot_{_A}}}, r_{_{\cdot_{_A}}}, l_{_{\cdot_{_B}}}, r_{_{\cdot_{_B}}}, A, B )$
is a matched pair of anti-flexible algebras $aF(A)$ and $aF(B)$.
\end{cor}
\begin{proof}
Suppose that $(A, B, l_{_{\succ_A}}, r_{_{\succ_A}}, l_{_{\prec_A}},
r_{_{\prec_A}}, l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}, r_{_{\prec_B}} )$
is a matched pair of the pre-anti-flexible algebras $(A, \prec_{_A}, \succ_{_A})$
and $(B, \prec_{_B}, \succ_{_B})$, where
$(l_{_{\succ_A}}, r_{_{\succ_A}}, l_{_{\prec_A}}, r_{_{\prec_A}}, B)$
is a bimodule of the pre-anti-flexible algebra $(A, \prec_{_A}, \succ_{_A})$ and
$(l_{_{\succ_B}}, r_{_{\succ_B}}, l_{_{\prec_B}}, r_{_{\prec_B}}, A)$
is a bimodule of the pre-anti-flexible algebra $(B, \prec_{_B}, \succ_{_B})$.
According to Proposition~\ref{prop_operation_bimodule_pre_anti_flexible},
$(l_{_{\cdot_{_A}}}, r_{_{\cdot_{_A}}}, B)$
is a bimodule of the anti-flexible algebra $aF(A)$ and
$(l_{_{\cdot_{_B}}}, r_{_{\cdot_{_B}}}, A)$ is a bimodule of the anti-flexible algebra $aF(B)$.
In addition, underlying anti-flexible product defined on
$A\oplus B$ in Eq.~\eqref{eq_pre_anti_matched} is exactly that obtained from the matched pair
$(l_{_{\cdot_{_A}}}, r_{_{\cdot_{_A}}},l_{_{\cdot_{_B}}}, r_{_{\cdot_{_B}}}, A, B)$
of anti-flexible algebra $aF(A)$ and $aF(B)$.
\end{proof}
\begin{thm}
Let $(A, \prec_{_A}, \succ_{_A})$ be a pre-anti-flexible algebra.
Suppose there is a pre-anti-flexible algebra structure
"$ \prec_{_{A^*}}, \succ_{_{A^*}}$" on its dual space $A^*$.
The following statements are equivalent:
\begin{enumerate}
\item\label{un}
$(A, A^*, R^*_{\prec_{_A}},L^*_{\succ_{_A}}, R^*_{\prec_{_{A^*}}}, L^*_{\succ_{_{A^*}}})$
is a matched pair of anti-flexible algebras $aF(A)$ and $aF(A^*)$.
\item\label{deux}
$(A, A^*, -R^*_{\succ_{_A}},-L^*_{\prec_{_A}} ,R^*_{\cdot_{_A}},L^*_{\cdot_{_A}} ,
-R^*_{\succ_{_{A^*}}},-L^*_{\prec_{_{A^*}}},
R^*_{\circ_{_{A^*}}}, L^*_{\circ_{_{A^*}}})$
is a matched pair of the pre-anti-flexible algebras $(A, \prec_{_A}, \succ_{_A})$ and
$(A^*, \prec_{_{A^*}}, \succ_{_{A^*}})$.
\end{enumerate}
\end{thm}
\begin{proof}
Let $(A, \prec_{_A}, \succ_{_A})$ be a pre-anti-flexible algebra.
Suppose there is a pre-anti-flexible algebra structure
"$ \prec_{_{A^*}}, \succ_{_{A^*}}$" on its dual space $A^*$.
According to Corollary~\ref{corollary_matched_pair_pre},
we have $\eqref{deux} \Longrightarrow \eqref{un}$.
If $(A, A^*, R^*_{\prec_{_A}},L^*_{\succ_{_A}}, R^*_{\prec_{_{A^*}}}, L^*_{\succ_{_{A^*}}})$
is a matched pair of anti-flexible algebras $aF(A)$ and $aF(A^*)$,
According to Theorem~\ref{Theo_pre_Sum},
there exists an anti-flexible algebra structure on $A\oplus A^*$ given by Eq.~\eqref{eq_anti_flexible_sum}
and a non-degenerate closed skew-symmetric bilinear form on $A\oplus A^*$
given by Eq.~\eqref{eq_skew_symmetric_form}. In view of Theorem~\ref{Theo_existance_pre_anti_flexible},
there exists a pre-anti-flexible algebra structure "$\prec, \succ$" defined on
$A\oplus A^*$ and satisfying Eq.~\eqref{eq_useful1}
i.e. for any $x,y, z\in A$ and for any $a, b, c\in A^*$,
\begin{eqnarray*}
\omega((x+a)\prec (y+b), (z+c))=\omega((x+a), (y+b)\star(z+c) ),\cr
\omega((x+a)\succ (y+b), (z+c))=\omega((y+b), (z+c)\star(x+a)),
\end{eqnarray*}
where "$\star$" is given by Eq.~\eqref{eq_anti_flexible_sum}.
More precisely, we have for any $x,y,z\in A$ and for any $a, b, c\in A^*$,
\begin{eqnarray*}
\omega(x+a, (y+b)\star(z+c) )&=&
\omega(x+a, (y\cdot z+R^*_{\prec_{_A}}(b)z+L^*_{\succ_{_A}}(c)y)\cr&+&
(b\circ c+R^*_{\prec_{_{A^*}}}(y)c+L^*_{\succ_{_{A^*}}}(z)b))\cr &=&
\langle x,b\circ c+R^*_{\prec_{_{A^*}}}(y)c+L^*_{\succ_{_{A^*}}}(z)b) \rangle
\cr&-&\langle y\cdot z+R^*_{\prec_{_A}}(b)z+L^*_{\succ_{_A}}(c)y, a\rangle
\cr &=&\langle x, b\circ c\rangle+
\langle x\prec_{_{A}}y, c\rangle+
\langle z\succ_{_{A}}x, b\rangle
-\langle y\cdot z, a\rangle\cr
&-&\langle z, a\prec_{_{_{A^*}}}b\rangle
-\langle y, c\succ_{_{_{A^*}}}a\rangle\cr
&=&\langle x\prec_{_{A}}y+L_{\circ}^*(b)x-R_{_{_{\succ_{A^*}}}}^*(a)y, c\rangle
\cr&-&
\langle z, a\prec_{_{_{A^*}}}b+L_{\cdot}^*(y)a-R_{_{\succ_{A}}}^*(x)b\rangle \cr
&=&\omega((x\prec_{_{A}}y-R_{_{_{\succ_{A^*}}}}^*(a)y+L_{\circ}^*(b)x)\cr&+&
( a\prec_{_{_{A^*}}}b-R_{_{\succ_{A}}}^*(x)b+L_{\cdot}^*(y)a), z+c).
\end{eqnarray*}
Thus
\begin{eqnarray*}
(x+a)\prec (y+b)
=(x\prec_{_{A}}y-R_{_{_{\succ_{A^*}}}}^*(a)y+L_{\circ}^*(b)x)
+( a\prec_{_{_{A^*}}}b-R_{_{\succ_{A}}}^*(x)b+L_{\cdot}^*(y)a).
\end{eqnarray*}
Similarly, we have
\begin{eqnarray*}
(x+a)\succ (y+b)=(x\prec{_{_A}}y+R_{\circ}^*(a)y-L_{_{\prec_{A^*}}}^*(b)x)+
(a\prec{_{_{A^*}}}b+R_{\cdot}^*(x)b-L_{_{\prec_{A}}}^*(y)a)
\end{eqnarray*}
Therefore, $( A, A^*, -R_{\succ_{_A}}^*,-L^*_{\prec_{_A}} ,R^*_{\cdot},L^*_{\cdot} ,
-R^*_{\succ_{_{A^*}}}, -L^*_{\prec_{_{A^*}}},
R^*_{\circ}, L^*_{\circ})$
is a matched pair of the pre-anti-flexible algebras $(A, \prec_{_A}, \succ_{_A})$ and
$(A^*, \prec_{_{A^*}}, \succ_{_{A^*}})$. Hence \eqref{un} $\Longrightarrow$ \eqref{deux}
\end{proof}
\section{Pre-anti-flexible bialgebras}\label{section3}
In this section, we are going to provide the definition of a pre-anti-flexible bialgebra
and provide they equivalent notions previously announced.
To achieve this goal, we have
\begin{thm}
Let $(A, \prec_{_A}, \succ_{_A})$ be a pre-anti-flexible algebra whose products are given
by two linear maps
$\beta_{_{\succ}}^*, \beta_{_{\prec}}^*: A\otimes A\rightarrow A$.
Suppose in addition that there is a pre-anti-flexible algebra structure
"$ \prec_{_{A^*}}, \succ_{_{A^*}}$" on $A^*$ given by:
$\Delta_{_{\succ}}^*, \Delta_{_{\prec}}^*: A^*\otimes A^*\rightarrow A^*$.
Then the following relations are equivalent:
\begin{enumerate}
\item $(A, A^*, R^*_{\prec_{_A}},L^*_{\succ_{_A}}, R^*_{\prec_{_{A^*}}}, L^*_{\succ_{_{A^*}}})$
is matched pair of anti-flexible algebras $aF(A)$ and $aF(A^*)$.
\item
The fourth linear maps $\beta_{_{\succ}}, \beta_{_{\prec}}:A^*\rightarrow A^*\otimes A^*$ and
$\Delta_{_{\succ}}, \Delta_{_{\prec}}:A\rightarrow A\otimes A$ satisfying
for any $x,y\in A$ and for any $a,b\in A^*$,
\begin{subequations}
\begin{eqnarray}\label{eq_pre_matched_1}
\begin{array}{llll}
\Delta_{_{\succ}}(x\cdot y)-(R_{_{\prec_A}}(y)\otimes \id)\Delta_{_{\succ}}(x)-
(\id\otimes L_{\cdot}(x))\Delta_{_{\succ}}(y)=\cr
\sigma(\id \otimes L_{_{\succ_{A}}}(y))\Delta_{_{\prec}}(x)+
\sigma(R_{\cdot}(x)\otimes \id)\Delta_{_{\prec}}(y)-\sigma \Delta_{_{\prec}}(y\cdot x),
\end{array}
\end{eqnarray}
\begin{eqnarray}\label{eq_pre_matched_3}
\begin{array}{llll}
&&(\sigma(L_{\cdot}(y)\otimes \id-\id\otimes R_{_{\prec_{_A}}}(y)))\Delta_{_{\prec}}(x)
+(L_{_{\succ_{A}}}(x)\otimes\id-\id\otimes R_{\cdot}(x))\Delta_{_{\succ}}(y)=\cr&&
(\sigma(L_{\cdot}(x)\otimes\id -\id\otimes R_{_{\prec_{_A}}}(x) ) )\Delta_{_{\prec}}(y)+
(L_{_{\succ_{_{A}}}}(y)\otimes\id -\id\otimes R_{{\cdot}}(y))\Delta_{_{\succ}}(x),
\end{array}
\end{eqnarray}
\begin{eqnarray}\label{eq_pre_matched_2}
\begin{array}{llll}
\beta_{_{\succ}}(a\circ b)-
(R_{_{{\prec_{A^*}}}}(b)\otimes \id)\beta_{_{\succ}}(a)-
(\id\otimes L_{\circ}(a))\beta_{_{\succ}}(b)=\cr
\sigma(\id \otimes L_{_{\succ_{A^*}}}(b))\beta_{_{\prec}}(a)+
\sigma(R_{\circ}(a)\otimes \id)\beta_{_{\prec}}(b)-\sigma \beta_{_{\prec}}(b\circ a),
\end{array}
\end{eqnarray}
\begin{eqnarray}\label{eq_pre_matched_4}
\begin{array}{llll}
&&(\sigma(L_{\circ}(b)\otimes \id-\id\otimes R_{_{\prec_{A^*}}}(b))\beta_{_{\prec}}(a)+
(L_{_{\succ_{A^*}}}(a)\otimes\id-\id\otimes R_{\circ}(a))\beta_{_{\succ}}(b)=\cr &&
(\sigma(L_{\circ}(a)\otimes\id -\id\otimes R_{_{\prec_{A^*}}}(a) ) )\beta_{_{\prec}}(b)+
(L_{_{\succ_{A^*}}}(b)\otimes\id-\id\otimes R_{{\circ}}(b) )\beta_{_{\succ}}(a),
\end{array}
\end{eqnarray}
\end{subequations}
where
$R_{\cdot}=R_{_{\succ_{A}}}+R_{_{\prec_{A}}}$,
$L_{\cdot}=L_{_{\succ_{A}}}+L_{_{\prec_{A}}}$,
$R_{\circ}=R_{_{\succ_{A^*}}}+R_{_{\prec_{A^*}}}$ and
$L_{\circ}=L_{_{\succ_{A^*}}}+L_{_{\prec_{A^*}}}.$
\end{enumerate}
\end{thm}
\begin{proof}
According to Remark~\ref{rmk_1}, $(R^*_{\prec_{_A}},L^*_{\succ_{_A}}, A^*)$ and
$(R^*_{\prec_{_{A^*}}}, L^*_{\succ_{_{A^*}}},A)$ are bimodules of $aF(A)$ and $aF(A^*)$, respectively.
Taking into account the following, for any $x,y\in A$ and any $a,b\in A^*$
\begin{eqnarray*}
&&\langle \sigma\circ \Delta_{_{\prec}}(x\cdot y), a\otimes b \rangle=
\langle \Delta_{_{\prec}}(x\cdot y), b\otimes a \rangle=
\langle x\cdot y, R_{_{\prec_{A^*}}}(a)b \rangle=
\langle R_{_{\prec_{A^*}}}^*(a)(x\cdot y), b \rangle, \cr
&& \langle \Delta_{_{\succ}}(y\cdot x), a\otimes b \rangle=
\langle y\cdot x, a\succ_{_{A^*}} b \rangle=
\langle y\cdot x, L_{_{_{\succ_{A^*}}}}(a)b \rangle=
\langle L_{_{{\succ_{A^*}}}}^*(a)(y\cdot x), b \rangle, \cr
&&\langle (R_{_{\prec_A}}(x)\otimes \id)\Delta_{_{\succ}}(y), a\otimes b \rangle=
\langle \Delta_{_{\succ}}(y),R_{_{\prec_A}}^*(x)a\otimes b \rangle=
\langle L_{_{\succ_{A^*}}}^*(R_{_{\prec_A}}^*(x)a)y, b \rangle, \cr
&&\langle (\id\otimes L_{\cdot}(y))\Delta_{_{\succ}}(x), a\otimes b \rangle=
\langle x, a\succ_{_{A^*}}(L_{\cdot}^*(y)b) \rangle=
\langle L_{_{_{\succ_{A^*}}}}^*(a)x ,L_{\cdot}^*(y)b \rangle=
\langle y\cdot (L_{_{{\succ_{A^*}}}}^*(a)x) , b \rangle, \cr
&&\langle \sigma(\id \otimes L_{_{\succ_{A}}}(x))\Delta_{_{\prec}}(y), a\otimes b \rangle=
\langle y, b\prec_{_{A^*}} (L_{_{\succ_{_{A}}}}^*(x)a) \rangle=
\langle R_{_{\prec_{_{A^*}}}}^*(L_{_{\succ_{_{A}}}}^*(x)a)y, b \rangle, \cr
&&\langle \sigma(R_{\cdot}(y)\otimes \id)\Delta_{_{\prec}}(x), a\otimes b \rangle=
\langle x, (R_{\cdot}^*(y)b)\prec_{_{A^*}} a \rangle=
\langle R_{_{{\prec_{_{A^*}}}}}^*(a)x ,R_{\cdot}^*(y)b \rangle=
\langle (R_{_{{\prec_{_{A^*}}}}}^*(a)x)\cdot y ,b \rangle,
\end{eqnarray*}
we deduce that Eq.~\eqref{eq_pre_matched_1} is equivalent to Eq.~\eqref{eqq1}.
Similarly, we get equivalence between Eqs.~\eqref{eq_pre_matched_2} and \eqref{eqq2}.
Besides, taking into account the following,
\begin{eqnarray*}
&&\langle (\sigma(L_{\cdot}(y)\otimes \id)\Delta_{_{\prec}}(x), a\otimes b\rangle=
\langle x, (L_{\cdot}^*(y)b) \prec_{_{A^*}} a \rangle=
\langle R_{\prec_{_{A^*}}}^*(a)x, L_{\cdot}^*(y)b \rangle=
\langle y\cdot (R_{\prec_{_{A^*}}}^*(a)x) ,b\rangle, \cr
&&\langle (\sigma(\id\otimes R_{_{\prec_{_A}}}(y)))\Delta_{_{\prec}}(x), a\otimes b\rangle=
\langle x, b\prec_{_{A^*}} (R_{_{\prec_{_A}}}^*(y)a)\rangle=
\langle R_{\prec_{_{A^*}}}^*(R_{_{\prec_{_A}}}^*(y)a)x,b\rangle, \cr
&&\langle (L_{_{\succ_{_{A}}}}(y)\otimes\id) \Delta_{_{\succ}}(x), a\otimes b\rangle=
\langle x, (L_{_{\succ_{_{A}}}}^*(y)a)\succ_{_{ A^*}} b \rangle=
\langle L_{\succ_{_{ A^*}}}^*(L_{_{\succ_{_{A}}}}^*(y)a)x,b\rangle, \cr
&&\langle (\id\otimes R_{{\cdot}}(y))\Delta_{_{\succ}}(x), a\otimes b\rangle=
\langle x, a \succ_{_{ A^*}} (R_{{\cdot}}^*(y)b) \rangle=
\langle L_{\succ_{_{ A^*}}}^*(a)x, R_{{\cdot}}^*(y)b \rangle=
\langle (L_{\succ_{_{ A^*}}}^*(a)x)\cdot y,b\rangle,
\end{eqnarray*}
we deduce equivalence between Eqs.~\eqref{eq_pre_matched_3} and \eqref{eqq3}.
Similarly, we get equivalence between Eqs.~\eqref{eq_pre_matched_4} and \eqref{eqq4}.
\end{proof}
\begin{rmk}\label{rmk:identities}
Let $x,y\in A$ and $a, b\in A^*$. Setting by
$\Delta=\Delta_{_\prec}+\Delta_{_\succ}$ we have the following
\begin{eqnarray*}
\langle \beta_{_\succ}(a\circ b), x\otimes y \rangle=
\langle a\otimes b,\Delta (x\succ y) \rangle, \quad
\langle \sigma\beta_{_\prec}(b\circ a), x\otimes y \rangle=
\langle a\otimes b, \sigma\Delta(y\prec x)\rangle,
\end{eqnarray*}
\begin{eqnarray*}
\langle( R_{\prec{_{A^*}}}(b)\otimes \id )\beta_{_\succ}(a), x\otimes y \rangle=
\langle a\otimes b, (R_{_{\succ}}(y)\otimes \id)\Delta_{_{\prec}}(x) \rangle,
\end{eqnarray*}
\begin{eqnarray*}
\langle (\id \otimes L_{\circ}(a))\beta_{{_\succ}}(b), x\otimes y\rangle=
\langle a\otimes b, (\id\otimes L_{{_\succ}}(x))\Delta(y)\rangle,
\end{eqnarray*}
\begin{eqnarray*}
\langle \sigma(\id \otimes L_{{_\succ}}(b))\beta_{{_\prec}}(a), x\otimes y\rangle=
\langle a\otimes b,(L_{_{\prec}}(y)\otimes \id)\sigma\Delta_{_{\succ}}(x) \rangle,
\end{eqnarray*}
\begin{eqnarray*}
\langle \sigma(R_{\circ}(a)\otimes \id )\beta_{{_\prec}}(b), x\otimes y\rangle=
\langle a\otimes b, (\id \otimes R_{{_\prec}}(x))\sigma\Delta(y)\rangle,
\end{eqnarray*}
\begin{eqnarray*}
\langle (L_{{_\succ}}(a)\otimes \id )\beta_{{_\succ}}(b), x\otimes y\rangle=
\langle a\otimes b, (\id \otimes R_{_{\succ}}(y))\Delta_{_{\succ}}(x)\rangle,
\end{eqnarray*}
\begin{eqnarray*}
\langle (\id\otimes R_{\circ}(a))\beta_{_{\succ}}(b), x\otimes y\rangle=
\langle a\otimes b, (\id\otimes L_{{_\succ}}(x))\sigma\Delta(y) \rangle,
\end{eqnarray*}
\begin{eqnarray*}
\langle \sigma(L_{\circ}(b)\otimes \id)\beta_{_{\prec}} (a), x\otimes y\rangle=
\langle a\otimes b, (R_{_{\prec}}(x)\otimes \id )\sigma\Delta(y)\rangle,
\end{eqnarray*}
\begin{eqnarray*}
\langle \sigma(\id \otimes R_{_{\prec}})\beta_{_{\prec}}(a), x\otimes y\rangle=
\langle a\otimes b, (L_{_{\prec}}(y)\otimes \id )\Delta_{_{\prec}}(x)\rangle.
\end{eqnarray*}
\end{rmk}
Considering above identities, we derive and can prove the following lemma and theorem
\begin{lem}
Let $(A, \prec_{_A}, \succ_{_A})$ be a pre-anti-flexible algebra whose
products are given by the linear maps
$\beta_{_{\succ}}^*, \beta_{_{\prec}}^*: A\otimes A\rightarrow A$.
Suppose in addition that there is a pre-anti-flexible algebra structure
"$ \prec_{_{A^*}}, \succ_{_{A^*}}$" on its dual space $A^*$ given by:
$\Delta_{_{\succ}}^*, \Delta_{_{\prec}}^*: A^*\otimes A^*\rightarrow A^*$.
Let $x,y\in A$. We have
\begin{subequations}
\mbox{ Eq.~\eqref{eq_pre_matched_2} is equivalent to}
\begin{eqnarray}\label{eq_pre_matched_2'}
\Delta (x\succ y) -(R_{_{\succ}}(y)\otimes \id)\Delta_{_{\prec}}(x) -
(\id\otimes L_{{_\succ}}(x))\Delta(y)=\cr
(L_{_{\prec}}(y)\otimes \id)\sigma\Delta_{_{\succ}}(x) +
(\id \otimes R_{{_\prec}}(x))\sigma\Delta(y)
-\sigma\Delta(y\prec x).
\end{eqnarray}
\mbox{ Eq.~\eqref{eq_pre_matched_4} is equivalent to}
\begin{eqnarray}\label{eq_pre_matched_4'}
(\id \otimes R_{_{\succ}}(y))\Delta_{_{\succ}}(x)-
(L_{_{\prec}}(y)\otimes \id )\Delta_{_{\prec}}(x)
+(R_{_{\prec}}(x)\otimes \id-\id\otimes L_{{_\succ}}(x))\sigma\Delta(y)=\cr
(R_{_{\succ}}(y) \otimes \id)\sigma\Delta_{_{\succ}}(x)-
(\id\otimes L_{_{\prec}}(y) )\sigma\Delta_{_{\prec}}(x)
+(\id \otimes R_{_{\prec}}(x)- L_{{_\succ}}(x)\otimes \id)\Delta(y).
\end{eqnarray}
\end{subequations}
\end{lem}
\begin{thm}
Let $(A, \prec_{_A}, \succ_{_A})$ be a pre-anti-flexible algebra.
Suppose in addition that there is a pre-anti-flexible algebra structure
"$ \prec_{_{A^*}}, \succ_{_{A^*}}$" on $A^*$ given by:
$\Delta_{_{\succ}}^*, \Delta_{_{\prec}}^*: A^*\otimes A^*\rightarrow A^*$.
Then the following relations are equivalent:
\begin{enumerate}
\item $(A, A^*, R^*_{\prec_{_A}},L^*_{\succ_{_A}}, R^*_{\prec_{_{A^*}}}, L^*_{\succ_{_{A^*}}})$
is matched pair of anti-flexible algebras $aF(A)$ and $aF(A^*)$.
\item The two linear maps
$\Delta_{_{\succ}}, \Delta_{_{\prec}}:A\rightarrow A\otimes A$ satisfying
Eqs.~\eqref{eq_pre_matched_1}, \eqref{eq_pre_matched_3}, \eqref{eq_pre_matched_2'}
and \eqref{eq_pre_matched_4'}.
\end{enumerate}
\end{thm}
In addition we have
\begin{defi}
A pre-anti-flexible bialgebra structure on a vector space $A$ is given by the four linear maps
$\Delta_{_{\prec}}, \Delta_{_{\succ}}: A\rightarrow A\otimes A$
and $\beta_{_{\prec}}, \beta_{_{\succ}}:A^*\rightarrow A^*\otimes A^*$,
where $A^*$ is the dual of $A$, such that:
\begin{enumerate}
\item the dual maps $\Delta_{_{\prec}}^*, \Delta_{_{\succ}}^*: A^*\otimes A^*\rightarrow A^*$
induce a pre-anti-flexible algebra structure $\prec_{_{A^*}}, \succ_{_{A^*}}$ on $A^*$,
\item the dual maps $\beta_{_{\prec}}^*, \beta_{_{\succ}}^*:A\otimes A \rightarrow A$
induce a pre-anti-flexible algebra structure $\prec_{_{A}}, \succ_{_{A}}$ on $A$,
\item the linear maps $\Delta_{_{\prec}}, \Delta_{_{\succ}}$
satisfy Eqs.~\eqref{eq_pre_matched_1}, \eqref{eq_pre_matched_3}, \eqref{eq_pre_matched_2'}
and \eqref{eq_pre_matched_4'}.
\end{enumerate}
\end{defi}
\begin{thm}
Let $(A, \prec_{_A}, \succ_{_A})$ and $(A^*, \prec_{_{A^*}}, \succ_{_{A^*}})$
be two pre-anti-flexible algebras. The following relations are equivalent:
\begin{enumerate}
\item there is an anti-flexible algebra structure on $aF(A)\oplus aF(A^*)$ and a
nondegenerate skew-symmetric bilinear form $\omega$
is given by Eq.~\eqref{eq_skew_symmetric_form} and satisfying Eq.~\eqref{eq:simplectic_form},
\item $(-R^*_{\succ_{_A}},-L^*_{\prec_{_A}} ,R^*_{\cdot_{_A}},L^*_{\cdot_{_A}} ,
-R^*_{\succ_{_{A^*}}},-L^*_{\prec_{_{A^*}}},
R^*_{\circ_{_{A^*}}}, L^*_{\circ_{_{A^*}}}, A, A^*)$
is a matched pair of the pre-anti-flexible algebras $(A, \prec_{_A}, \succ_{_A})$ and
$(A^*, \prec_{_{A^*}}, \succ_{_{A^*}})$,
\item $(R^*_{\prec_{_A}},L^*_{\succ_{_A}}, R^*_{\prec_{_{A^*}}},L^*_{\succ_{_{A^*}}},A, A^*)$
is a matched pair of the underlying anti-flexible algebras $aF(A)$ and $aF(A^*)$,
\item $(A, A^*)$ is a pre-anti-flexible bialgebra.
\end{enumerate}
\end{thm}
\begin{defi}
A homomorphism of pre-anti-flexible bialgebras
$(A, A^*, \Delta_{_{\prec_{A}}}, \Delta_{_{\succ_{A}}},\beta_{_{\prec_{A^*}}}, \beta_{_{\succ_{A^*}}})$
and\\
$(B,B^*,\Delta_{_{\prec_{B}}},\Delta_{_{\succ_{B}}},\beta_{_{\prec_{B^*}}},\beta_{_{\succ_{B^*}}})$
is a homomorphism of pre-anti-flexible algebras
$\psi:A\rightarrow B$ such that its dual $\psi^*:B^*\rightarrow A^*$
is also a homomorphism of pre-anti-flexible algebras i.e. for any $x\in A$,
$a\in B^*,$
\begin{eqnarray*}
(\psi\otimes \psi)\Delta_{_{\prec_{A}}}(x)=\Delta_{_{\prec_{B}}}(\psi(x)),\;\;
(\psi\otimes\psi) \Delta_{_{\succ_{A}}}(x)= \Delta_{_{\succ_{B}}}(\psi(x)),
\end{eqnarray*}
\begin{eqnarray*}
(\psi^*\otimes \psi^*)\beta_{_{\prec_{B^*}}}(a)=\beta_{_{\prec_{A^*}}}(\psi^*(a)),\;\;
(\psi^*\otimes \psi^*)\beta_{_{\succ_{B^*}}}(a)=\beta_{_{\succ_{A^*}}}(\psi^*(a)).
\end{eqnarray*}
An invertible homomorphism of pre-anti-flexible bialgebras is an isomorphism of
pre-anti-flexible algebras.
\end{defi}
\begin{rmk}\label{rmk:dual_preantiflexible}
According to Remark~\ref{rmk:identities}, if
$(A, A^*, \Delta_{\succ}, \Delta_{\prec}, \beta_{{_\succ}}, \beta_{{_\prec}})$
be a pre-anti-flexible
bialgebra, then its associated dual
$(A^*, A, \beta_{{_\succ}}, \beta_{{_\prec}}, \Delta_{\succ}, \Delta_{\prec})$ is also
a pre-anti-flexible bialgebra.
\end{rmk}
\section{Special pre-anti-flexible bialgebras and pre-anti-flexible Yang-Baxter equation}\label{section4}
We deal here with a special class of pre-anti-flexible bialgebras
provided by the linear maps $\Delta_{_{\prec}}, \Delta_{_{\succ}}:A\rightarrow A\otimes A$
defined by for any $x\in A$
\begin{subequations}\label{eq:coboundary}
\begin{equation}\label{eq:coboundary_a}
\Delta_{\succ}(x)=(\id \otimes L_{\cdot}(x))\mathrm{r}_{_\succ}+
(R_{\prec}(x)\otimes \id)\sigma \mathrm{r}_{_\prec},
\end{equation}
\begin{equation}\label{eq:coboundary_b}
\Delta_{\prec}(x)=(\id\otimes L_{\succ}(x))\mathrm{r}_{_\prec}+
(R_{\cdot}(x)\otimes \id)\sigma \mathrm{r}_{_\succ},
\end{equation}
\end{subequations}
which generate the an analogue equation of $\mathcal{D}$-equation of dendriform algebras
called pre-anti-flexible Yang-Baxter equation.
In the following of this paper, we will refer to both Eqs.~\eqref{eq:coboundary_a} and
\eqref{eq:coboundary_b} by Eq.~\eqref{eq:coboundary}. Other similar referring are
scattered throughout this paper.
\begin{lem}\label{lem:sigma}
Let $(A, \prec, \succ)$ be a pre-anti-flexible algebra and
$\mathrm{r}_{_\prec}, \mathrm{r}_{_\succ}\in A\otimes A$. Consider
$\Delta_{\prec}, \Delta_{\succ}:A\rightarrow A\otimes A$
two linear maps defined by Eq.~\eqref{eq:coboundary}. Then for any $x\in A,$
\begin{subequations}
\begin{eqnarray}\label{eq:sigma_succ}
\sigma\Delta_{\succ}(x)=(L_{\cdot}(x)\otimes \id )\sigma \mathrm{r}_{_\succ}+
(\id\otimes R_{\prec}(x)) \mathrm{r}_{_\prec},
\end{eqnarray}
\begin{eqnarray}\label{eq:sigma_prec}
\sigma\Delta_{\prec}(x)=(L_{\succ}(x)\otimes \id )\sigma \mathrm{r}_{_\prec}+
(\id\otimes R_{\cdot}(x)) \mathrm{r}_{_\succ},
\end{eqnarray}
\begin{eqnarray}\label{eq:delta}
\Delta(x)=(\id \otimes L_{\cdot}(x))\mathrm{r}_{_\succ}+
(R_{\prec}(x)\otimes \id)\sigma \mathrm{r}_{_\prec}+
(\id\otimes L_{\succ}(x))\mathrm{r}_{_\prec}+
(R_{\cdot}(x)\otimes \id)\sigma \mathrm{r}_{_\succ},
\end{eqnarray}
\begin{eqnarray}\label{eq:sigma_delta}
\sigma\Delta(x)=
(L_{\cdot}(x)\otimes \id )\sigma \mathrm{r}_{_\succ}+
(\id\otimes R_{\prec}(x)) \mathrm{r}_{_\prec}+
(L_{\succ}(x)\otimes \id )\sigma \mathrm{r}_{_\prec}+
(\id\otimes R_{\cdot}(x)) \mathrm{r}_{_\succ}.
\end{eqnarray}
\end{subequations}
\end{lem}
As consequences following the definition given by
Eq.~\eqref{eq:coboundary} we have
\begin{itemize}
\item For any $x,y \in A$. By Eq.~\eqref{eq:sigma_prec} we have
\begin{eqnarray*}
\Delta_{\succ}(x\cdot y)+\sigma\Delta_{\prec}(y\cdot x)=
(\id \otimes(L_{\cdot}(x\cdot y)+R_{\cdot}(y\cdot x)))\mathrm{r}_{_\succ}+
((R_{\prec}(x\cdot y)+L_{\succ}(y\cdot x))\otimes\id)\sigma \mathrm{r}_{_\prec}.
\end{eqnarray*}
According to Eq.~\eqref{eq:useful} we have
\begin{eqnarray*}
\Delta_{\succ}(x\cdot y)+\sigma\Delta_{\prec}(y\cdot x)&=&
(\id \otimes(L_{\cdot}(x)L_{\cdot}(y)+R_{\cdot}(x)R_{\cdot}(y)))\mathrm{r}_{_\succ}\cr&+&
((R_{\prec}(y)R_{\prec}(x)+L_{\succ}(y)L_{\succ}( x))\otimes\id)\sigma \mathrm{r}_{_\prec}.
\end{eqnarray*}
In addition
\begin{eqnarray*}
(R_{\prec}(y)\otimes\id)\Delta_{\succ}(x)&=&
(R_{\prec}(y)\otimes L_{\cdot}(x))\mathrm{r}_{_\succ}+
(R_{\prec}(y)R_{\prec}(x)\otimes \id)\sigma \mathrm{r}_{_\prec}\cr
(\id \otimes L_{\cdot}(x))\Delta_{\succ}(y)&=&
(\id\otimes L_{\cdot}(x)L_{\cdot}(y))\mathrm{r}_{_\succ}+
(R_{\prec}(y)\otimes L_{\cdot}(x))\sigma \mathrm{r}_{_\prec}\cr
\sigma(\id \otimes L_{\succ}(y))\Delta_{\prec}(x)&=&
(L_{\succ}(y)L_{\succ}(x)\otimes \id )\sigma \mathrm{r}_{_\prec}+
(L_{\succ}(y)\otimes R_{\cdot}(x) ) \mathrm{r}_{_\succ}\cr
\sigma(R_{\cdot}(x)\otimes \id )\Delta_{\prec}(y)&=&
(L_{\succ}(y)\otimes R_{\cdot} (x))\sigma \mathrm{r}_{_\prec}+
(\id \otimes R_{\cdot}(x)R_{\cdot}(y))\mathrm{r}_{_\succ}.
\end{eqnarray*}
Thus
\begin{eqnarray*}
&&(R_{\prec}(y)\otimes\id)\Delta_{\succ}(x)+
(\id \otimes L_{\cdot}(x))\Delta_{\succ}(y)-
\Delta_{\succ}(x\cdot y)+
\sigma(\id \otimes L_{\succ}(y))\Delta_{\prec}(x)-
\sigma\Delta_{\prec}(y\cdot x)\cr&&+
\sigma(R_{\cdot}(x)\otimes \id )\Delta_{\prec}(y)=
(R_{\prec}(y) \otimes L_{\cdot}(x) +
L_{\succ}(y) \otimes R_{\cdot}(x) )(\mathrm{r}_{_\succ}+\sigma \mathrm{r}_{_\prec}).
\end{eqnarray*}
Therefore
Eq.~\eqref{eq_pre_matched_1} is equivalent to the following
\begin{eqnarray}{\label{eq:coboundary1}}
(R_{_\prec}(y)\otimes L_{\cdot}(x)+L_{\succ}(y)\otimes R_{\cdot}(x))(\mathrm{r}_{_\succ}+
\sigma \mathrm{r}_{_\prec})=0, \;\; \forall x,y\in A.
\end{eqnarray}
\item Besides, for any $x, y\in A$ we have
\begin{eqnarray*}
(L_{\succ}(x)\otimes \id -\id \otimes R_{\cdot}(x))\Delta_{_\succ}(y)&=&
(L_{\succ}(x)\otimes L_{\cdot}(y))\mathrm{r}_{_\succ}+
(L_{\succ}(x)R_{\prec}(y)\otimes\id )\sigma \mathrm{r}_{_\prec}\cr
&-&(\id\otimes R_{\cdot}(x)L_{\cdot}(y) )\mathrm{r}_{_\succ}-
(R_{\prec}(y)\otimes R_{\cdot}(x))\sigma \mathrm{r}_{_\prec}\cr
\sigma(L_{\cdot}(y)\otimes \id -\id \otimes R_{\prec}(y) )\Delta_{\prec}(x)&=&
(L_{\succ}(x)\otimes L_{\cdot}(y))\sigma \mathrm{r}_{_\prec}+
(\id\otimes L_{\cdot}(y)R_{\cdot}(x))\mathrm{r}_{_\succ}\cr
&-&(R_{\prec}(y)L_{\succ}(x)\otimes\id)\sigma \mathrm{r}_{_\prec}-
(R_{\prec}(y)\otimes R_{\cdot}(x))\mathrm{r}_{_\succ}
\end{eqnarray*}
Then
\begin{eqnarray*}
&&(L_{\succ}(x)\otimes \id -\id \otimes R_{\cdot}(x))\Delta_{_\succ}(y)+
\sigma(L_{\cdot}(y)\otimes \id -\id \otimes R_{\prec}(y) )\Delta_{\prec}(x)\cr
&&-(L_{\succ}(y)\otimes \id -\id \otimes R_{\cdot}(y))\Delta_{_\succ}(x)-
\sigma(L_{\cdot}(x)\otimes \id -\id \otimes R_{\prec}(x) )\Delta_{\prec}(y)\cr&&=
(L_{\succ}(x)\otimes L_{\cdot}(y)-R_{\prec}(y)\otimes R_{\cdot}(x)-
L_{\succ}(y)\otimes L_{\cdot}(x)+
R_{\prec}(x)\otimes R_{\cdot}(y))(\mathrm{r}_{_\succ}+\sigma \mathrm{r}_{_\prec})\cr
&&+ (([L_{\succ}(x), R_{\prec}(y)]-
[L_{\succ}(y), R_{\prec}(x)])\otimes\id)\sigma \mathrm{r}_{_\prec}+
(\id\otimes ([L_{\cdot}(y), R_{\cdot}(x)]-
[L_{\cdot}(x), R_{\cdot}(y)]))\mathrm{r}_{_\succ}\cr
&&=(L_{\succ}(x)\otimes L_{\cdot}(y)-R_{\prec}(y)\otimes R_{\cdot}(x)-
L_{\succ}(y)\otimes L_{\cdot}(x)+
R_{\prec}(x)\otimes R_{\cdot}(y))(\mathrm{r}_{_\succ}+\sigma \mathrm{r}_{_\prec})
\end{eqnarray*}
Note that the last equal sign in above equation is due to
Eq.~\eqref{eq_bimodule_pre_anti_flexible1} and Eq.~\eqref{eq:useful1}.
Therefore, Eq.~\eqref{eq_pre_matched_3} is equivalent to the following
\begin{eqnarray}{\label{eq:coboundary2}}
(L_{\succ}(x)\otimes L_{\cdot}(y)-R_{\prec}(y)\otimes R_{\cdot}(x)-
L_{\succ}(y)\otimes L_{\cdot}(x)+R_{\prec}(x)\otimes R_{\cdot}(y))(\mathrm{r}_{_\succ}+
\sigma \mathrm{r}_{_\prec})=0,\; \forall x,y\in A.
\end{eqnarray}
\item Furthermore, by Eqs.~\eqref{eq:delta} and
\eqref{eq:sigma_delta} we have for any $x,y\in A$
\begin{eqnarray*}
\Delta(x\succ y)+\sigma\Delta(y\prec x)&=&
(\id \otimes (L_{\cdot}(x\succ y)+R_{\cdot}(y\prec x)))\mathrm{r}_{_\succ}+
(\id \otimes(R_{_{\prec}}(y\prec x)+L_{_{\succ}}(x\succ y)))\mathrm{r}_{_\prec}\cr&+&
((R_{_{\prec}}(x\succ y)+L_{_{\succ}}(y\prec x))\otimes \id)\sigma \mathrm{r}_{_\prec}+
((R_{\cdot}(x\succ y)+L_{\cdot}(y\prec x))\otimes \id)\sigma \mathrm{r}_{_\succ}
\end{eqnarray*}
Taking into account to Eqs.~\eqref{eq_bimodule_pre_anti_flexible2} and
\eqref{eq_bimodule_pre_anti_flexible5} we have
\begin{eqnarray*}
\Delta(x\succ y)+\sigma\Delta(y\prec x)&=&
(\id\otimes (L_{_{\succ}}(x\succ y)+
R_{_{\prec}}(y\prec x)))(\mathrm{r}_{_\succ}+\mathrm{r}_{_\prec})\cr
&+&
((L_{_{\succ}}(y\prec x)+R_{_{\prec}}(x\succ y))\otimes \id)(\sigma \mathrm{r}_{_\succ}+
\sigma \mathrm{r}_{_\prec})\cr
&+&
(\id \otimes (L_{_{\succ}}(x)L_{_{\prec}}(y)+
R_{_{\prec}}(x)R_{_{\succ}}(y)))\mathrm{r}_{_\succ}+\cr
&+&
((L_{_{\prec}}(y)L_{\cdot}(x)+R_{_{\succ}}(y)R_{\cdot}(x))\otimes \id )\sigma \mathrm{r}_{_\succ}.
\end{eqnarray*}
Using Eqs.~ \eqref{eq:sigma_succ} , \eqref{eq:delta} and
\eqref{eq:sigma_delta} we have for any $x,y\in A$
\begin{eqnarray*}
(R_{_{\succ}}(y)\otimes \id)\Delta_{_{\prec}}(x)&=&
(R_{_{\succ}}(y)\otimes L_{_{\succ}}(x))\mathrm{r}_{_{\prec}}+
(R_{_{\succ}}(y)R_{\cdot}(x)\otimes\id)\sigma \mathrm{r}_{_{\succ}}\cr
(L_{_{\prec}}(y)\otimes \id )\sigma\Delta_{_{\succ}}(x)&=&
(L_{_{\prec}}(y)L_{\cdot}(x)\otimes\id)\sigma \mathrm{r}_{_{\succ}}+
(L_{_{\prec}}(y)\otimes R_{_{\prec}}(x))\mathrm{r}_{_{\prec}}\cr
(\id \otimes L_{_{\succ}} (x))\Delta(y)&=&
(\id\otimes L_{_{\succ}}(x)L_{\cdot}(y))\mathrm{r}_{_{\succ}}+
(R_{_{\prec}}(y)\otimes L_{_{\succ}}(x)) \sigma \mathrm{r}_{_{\prec}}
\cr&+&
(\id\otimes L_{_{\succ}}(x)L_{_{\succ}}(y))\mathrm{r}_{_{\prec}}+
(R_{\cdot}(y)\otimes L_{_{\succ}}(x))\sigma \mathrm{r}_{_{\succ}}\cr
(\id \otimes R_{_{\prec}}(x) )\sigma\Delta(y)&=&
(L_{\cdot}(y)\otimes R{_{\prec}}(x))\sigma \mathrm{r}_{_{\succ}}+
(\id\otimes R_{_{\prec}}(x)R_{_{\prec}}(y))\mathrm{r}_{_{\prec}}\cr
&+&
(L_{_{\succ}}(y)\otimes R_{_{\prec}}(x)) \sigma \mathrm{r}_{_{\prec}}+
(\id\otimes R_{_{\prec}}(x)R_{\cdot}(y)) \mathrm{r}_{_{\succ}}
\end{eqnarray*}
Thus
\begin{eqnarray*}
&&(R_{_{\succ}}(y)\otimes \id)\Delta_{_{\prec}}(x)+
(L_{_{\prec}}(y)\otimes \id )\sigma\Delta_{_{\succ}}(x)+
(\id \otimes L_{_{\succ}} (x))\Delta(y)+
(\id \otimes R_{_{\prec}}(x) )\sigma\Delta(y)
\cr&&=
(\id\otimes(L_{_{\succ}}(x)L_{_{\succ}}(y)+
R_{_{\prec}}(x)R_{_{\prec}}(y)) )(\mathrm{r}_{_{\succ}}+\mathrm{r}_{_{\prec}})+
((R_{_{\succ}}(y)R_{\cdot}(x)+
L_{_{\prec}}(y)L_{\cdot}(x))\otimes\id )\sigma \mathrm{r}_{_{\succ}}\cr&&+
(R_{_{\prec}}(y)\otimes L_{_{\succ}}(x) +
L_{_{\succ}}(y)\otimes R_{_{\prec}}(x) )(\sigma \mathrm{r}_{_{\succ}}+
\sigma \mathrm{r}_{_{\prec}})+
(\id \otimes(L_{_{\succ}}(x)L_{_{\prec}}(y)+
R_{_{\prec}}(x)R_{_{\succ}}(y)) )\mathrm{r}_{_{\succ}}\cr&&+
(R_{_{\succ}}(y)\otimes L_{_{\succ}}(x) +
L_{_{\prec}}(y)\otimes R_{_{\prec}}(x) )(\mathrm{r}_{_{\prec}}+
\sigma \mathrm{r}_{_{\succ}})
\end{eqnarray*}
Therefore, using Eq.~\eqref{eq_bimodule_pre_anti_flexible3} we deduce that
Eq.~\eqref{eq_pre_matched_2'} is equivalent to the following
\begin{eqnarray}{\label{eq:coboundary3}}
\begin{array}{llll}
&&
(R_{_{\succ}}(y)\otimes L_{_{\succ}}(x) +
L_{_{\prec}}(y) \otimes R_{_{\prec}}(x) )(\mathrm{r}_{_{\prec}}+
\sigma \mathrm{r}_{_{\succ}})\cr&&+
(R_{_{\prec}}(y)\otimes L_{_{\succ}}(x) +
L_{_{\succ}}(y)\otimes R_{_{\prec}}(x) )(\sigma \mathrm{r}_{_{\succ}}+
\sigma \mathrm{r}_{_{\prec}})\cr&&+
(\id\otimes(L_{_{\succ}}(x\prec y)+R_{_{\prec}}(y\succ x)))(\mathrm{r}_{_{\succ}}+\mathrm{r}_{_{\prec}})\cr&&-
((L_{_{\succ}}(y\prec x)+
R_{_{\prec}}(x\succ y))\otimes \id)(\sigma \mathrm{r}_{_{\succ}}+\sigma \mathrm{r}_{_{\prec}})=0.
\end{array}
\end{eqnarray}
\item Finally, in view of Lemma~\ref{lem:sigma} we have for any $x,y\in A$
\begin{eqnarray*}
(\id \otimes R_{_{\succ}}(y))\Delta_{_{\succ}}(x)&=&
(\id\otimes R_{_{\succ}}(y)L_{\cdot}(x))\mathrm{r}_{_{\succ}}+
(R_{_{\prec}}(x)\otimes R_{_{\succ}}(y))\sigma \mathrm{r}_{_{\prec}}
\cr
(L_{_{\prec}}(y)\otimes \id)\Delta_{_{\prec}}(x)&=&
(L_{_{\prec}}(y)\otimes L_{_{\succ}}(x))\mathrm{r}_{_{\prec}}+
(L_{_{\prec}}(y)R_{\cdot}(x)\otimes \id)\sigma \mathrm{r}_{_{\succ}}
\cr
(R_{_{\prec}}(x)\otimes \id)\sigma \Delta(y)&=&
(R_{_{\prec}}(x)L_{\cdot}(y)\otimes\id )\sigma \mathrm{r}_{_{\succ}}+
(R_{_{\prec}}(x)\otimes R_{_{\prec}}(y))\mathrm{r}_{_{\prec}}\cr&+&
(R{_{\prec}}(x)L_{_{\succ}}(y)\otimes \id)\sigma \mathrm{r}_{_{\prec}}+
(R_{_{\prec}}(x)\otimes R_{\cdot}(y) )\mathrm{r}_{_{\succ}}
\cr
(\id \otimes L_{_{\succ}}(x))\sigma \Delta(y)&=&
(L_{\cdot}(y)\otimes L_{_{\succ}}(x) )\sigma \mathrm{r}_{_{\succ}}+
(\id\otimes L_{_{\succ}}(x)R_{_{\prec}}(y))\mathrm{r}_{_{\prec}}\cr&+&
(L_{_{\succ}}(y)\otimes L_{_{\succ}}(x) )\sigma \mathrm{r}_{_{\prec}}+
(\id\otimes L_{_{\succ}}(x)R_{\cdot}(y) )\mathrm{r}_{_{\succ}}
\cr
(R_{_{\succ}}(y)\otimes \id)\sigma\Delta_{_{\succ}}(x)&=&
(R_{_{\succ}}(y)L_{\cdot}(x)\otimes\id)\sigma \mathrm{r}_{_{\succ}}+
(R_{_{\succ}}(y)\otimes R_{_{\prec}}(x))\mathrm{r}_{_{\prec}}\cr
(\id \otimes L_{_{\prec}}(y))\sigma\Delta_{_{\prec}}(x)&=&
(L_{_{\succ}}(x)\otimes L_{_{\prec}}(y))\sigma \mathrm{r}_{_{\prec}}+
(\id\otimes L_{_{\prec}}(y)R_{\cdot}(x))\mathrm{r}_{_{\succ}}\cr
(\id \otimes R_{_{\prec}}(x))\Delta(y)&=&
(\id\otimes R_{_{\prec}}(x)L_{\cdot}(y))\mathrm{r}_{_{\succ}}+
(R_{_{\prec}}(y)\otimes R_{_{\prec}}(x))\sigma \mathrm{r}_{_{\prec}}\cr
&+&
(\id\otimes R_{_{\prec}}(x)L_{_{\succ}}(y))\mathrm{r}_{_{\prec}}+
(R_{\cdot}(y)\otimes R_{_{\prec}}(x))\sigma \mathrm{r}_{_{\succ}}\cr
(L_{_{\succ}}(x)\otimes \id)\Delta(y)&=&
(L_{_{\succ}}(x)\otimes L_{\cdot}(y))\mathrm{r}_{_{\succ}}+
(L_{_{\succ}}(x)R_{_{\prec}}(y)\otimes \id)\sigma \mathrm{r}_{_{\prec}}\cr
&+&
(L_{_{\succ}}(x)\otimes L_{_{\succ}}(y))\mathrm{r}_{_{\prec}}+
(L_{_{\succ}}(x)R_{\cdot}(y)\otimes\id )\sigma \mathrm{r}_{_{\succ}}
\end{eqnarray*}
According to Eq.~\eqref{eq_bimodule_pre_anti_flexible4} we have for any $x,y\in A$
\begin{eqnarray*}
&&(\id \otimes R_{_{\succ}}(y))\Delta_{_{\succ}}(x)-
(L_{_{\prec}}(y)\otimes \id )\Delta_{_{\prec}}(x)
+(R_{_{\prec}}(x)\otimes \id-\id\otimes L_{{_\succ}}(x))\sigma\Delta(y)\cr
&&-(R_{_{\succ}}(y) \otimes \id)\sigma\Delta_{_{\succ}}(x)+
(\id\otimes L_{_{\prec}}(y) )\sigma\Delta_{_{\prec}}(x)
-(\id \otimes R_{_{\prec}}(x)- L_{{_\succ}}(x)\otimes \id)\Delta(y)\cr
&=&((R_{_{\prec}}(x)\otimes R_{_{\prec}}(x)+ L_{_{\succ}}(x)\otimes L_{_{\succ}}(y))
-(\id\otimes (L_{_{\succ}}(x)R_{_{\prec}}(y)+
R_{_{\prec}}(x)L_{_{\succ}}(y))))(\mathrm{r}_{_{\succ}}+\mathrm{r}_{_{\prec}})\cr
&+&
((R_{_{\prec}}(x)L_{_{\succ}}(y)+L_{_{\succ}}(x)R_{_{\prec}}(y))\otimes \id-
(L_{_{\succ}}(y)\otimes L_{_{\succ}}(x) +
R_{_{\prec}}(y)\otimes R_{_{\prec}}(x)) )(\sigma \mathrm{r}_{_{\prec}}+
\sigma \mathrm{r}_{_{\succ}})\cr
&+&
(R_{_{\prec}}(x)\otimes R_{_{\succ}}(y)+L_{_{\succ}}(x)\otimes
L_{_{\prec}}(y))(\mathrm{r}_{_{\succ}}+\sigma \mathrm{r}_{_{\prec}})-
(L_{_{\prec}}(y)\otimes L_{_{\succ}}(x)+R_{_{\succ}}(y)\otimes
R_{_{\prec}}(x))(\mathrm{r}_{_{\prec}}+\sigma \mathrm{r}_{_{\succ}})
\end{eqnarray*}
Therefore, Eq.~\eqref{eq_pre_matched_4'} is equivalent to the following
\begin{eqnarray}{\label{eq:coboundary4}}
&&0=((R_{_{\prec}}(x)\otimes R_{_{\prec}}(x)+
L_{_{\succ}}(x)\otimes L_{_{\succ}}(y))
-(\id\otimes(L_{_{\succ}}(x)R_{_{\prec}}(y)+
R_{_{\prec}}(x)L_{_{\succ}}(y))))(\mathrm{r}_{_{\succ}}+\mathrm{r}_{_{\prec}})\cr
&&+
((R_{_{\prec}}(x)L_{_{\succ}}(y)+L_{_{\succ}}(x)R_{_{\prec}}(y))\otimes \id-
(L_{_{\succ}}(y)\otimes L_{_{\succ}}(x) +
R_{_{\prec}}(y)\otimes R_{_{\prec}}(x)) )(\sigma \mathrm{r}_{_{\prec}}+\sigma \mathrm{r}_{_{\succ}})\\
&&+
(R_{_{\prec}}(x)\otimes R_{_{\succ}}(y)+
L_{_{\succ}}(x)\otimes L_{_{\prec}}(y))(\mathrm{r}_{_{\succ}}+\sigma \mathrm{r}_{_{\prec}})-
(L_{_{\prec}}(y)\otimes L_{_{\succ}}(x)+
R_{_{\succ}}(y)\otimes R_{_{\prec}}(x))(\mathrm{r}_{_{\prec}}+\sigma \mathrm{r}_{_{\succ}})\nonumber
\end{eqnarray}
\end{itemize}
Clearly, we have provided the proof of the following theorem
\begin{thm}
Let $(A, \prec, \succ)$ be a pre-anti-flexible algebra and
$\mathrm{r}_{_\prec}, \mathrm{r}_{_\succ}\in A\otimes A$. Consider
$\Delta_{_\prec}, \Delta_{_\succ}:A\rightarrow A\otimes A$
two linear maps defined by Eq.~\eqref{eq:coboundary} such that their dual maps
$\Delta_{\prec}^*, \Delta_{\succ}^*:A^*\otimes A^*\rightarrow A^*$
define a pre-anti-flexible algebra on $A^*$.
Then $(A, A^*)$ is a pre-anti-flexible bialgebra if and only if
$\Delta_{\prec}, \Delta_{\succ}$ satisfying
Eqs.~\eqref{eq:coboundary1}~-~\eqref{eq:coboundary4}.
\end{thm}
\begin{lem}
Let $A$ be a vector space and let
$\Delta_{_\prec}, \Delta_{\succ} :A\rightarrow A\otimes A$ be two linear maps.
Then $\Delta_{_{\prec}}^*, \Delta_{_{\succ}}^*:A^*\otimes A^*\rightarrow A^*$
define a pre-anti-flexible algebra structure
on $A^*$ if and only if the following conditions are satisfied
\begin{subequations}
\begin{eqnarray}\label{eq:rmatrix1}
(\Delta_{_\succ}\otimes\id )\Delta_{_{\prec}}-
(\id\otimes\Delta_{_\prec})\Delta_{_{\succ}}=
(\id\otimes\sigma\Delta_{_\succ})\sigma\Delta_{_{\prec}}-
(\sigma\Delta_{_{\prec}}\otimes\id)\sigma\Delta_{_{\succ}},
\end{eqnarray}
\begin{eqnarray}\label{eq:rmatrix2}
((\Delta_{_{\prec}}+\Delta_{_{\succ}})\otimes\id)\Delta_{\succ}-
(\id\otimes\Delta_{\succ})\Delta_{\succ}=
(\id\otimes \sigma\Delta_{_{\prec}})\sigma\Delta_{_\prec}-
(\sigma(\Delta_{_{\prec}}+\Delta_{_{\succ}})\otimes\id)\sigma\Delta_{_\prec}.
\end{eqnarray}
\end{subequations}
\end{lem}
\begin{proof}
Denote by "$\prec_{_{A^*}}, \succ_{_{A^*}}$" the bilinear products on
$A^*$ defined respectively by
$\Delta_{_{\prec}}, \Delta_{_{\succ}}$, i.e.
for any $x\in A$ and for any $a, b\in A^*$
\begin{equation*}
\langle a\prec_{_{A^*}} b, x \rangle=\langle \Delta_{_{\prec}}^*(a\otimes b), x \rangle
=\langle a\otimes b,\Delta_{_{\prec}}(x)\rangle;\;\;
\langle a\succ_{_{A^*}} b, x\rangle=\langle \Delta_{_{\succ}}^*(a\otimes b), x \rangle=
\langle a\otimes b, \Delta_{_{\succ}}(x)\rangle.
\end{equation*}
Furthermore, according to Eq.~\eqref{eq:biasso} for any $a,b, c\in A^*$
and any $x\in A$, we have
\begin{eqnarray*}
\langle (a,b,c)_{_m}, x \rangle
&=&\langle (a\succ_{_{A^*}} b)\prec_{_{A^*}} c-a\succ_{_{A^*}}
(b\prec_{_{A^*}} c), x\rangle \cr&=&
\langle(\Delta_{_{\prec}}^*(\Delta_{_\succ}^*\otimes\id )-
\Delta_{_{\succ}}^*(\id\otimes\Delta_{_\prec}^*))(a\otimes b\otimes c), x\rangle\cr
\langle (a,b,c)_{_m}, x \rangle&=&\langle a\otimes b\otimes c,
( (\Delta_{_\succ}\otimes\id )\Delta_{_{\prec}}-
(\id\otimes\Delta_{_\prec})\Delta_{_{\succ}})(x)\rangle,\cr
\langle (c,b,a)_{_m}, x \rangle&=&\langle (c\succ_{_{A^*}} b)\prec_{_{A^*}} a-
c\succ_{_{A^*}} (b\prec_{_{A^*}} a), x\rangle \cr&=&
\langle( \Delta_{_{\prec}}^*\sigma(\id\otimes\Delta_{_\succ}^*\sigma)-
\Delta_{_{\succ}}^*\sigma(\Delta_{_{\prec}}^*\sigma\otimes\id))
(a\otimes b\otimes c), x\rangle\cr
\langle (c,b,a)_{_m}, x \rangle&=&\langle a\otimes b\otimes c,
((\id\otimes\sigma\Delta_{_\succ})\sigma\Delta_{_{\prec}}-
(\sigma\Delta_{_{\prec}}\otimes\id)\sigma\Delta_{_{\succ}})(x)\rangle,\cr
\langle (a,b,c)_{_l}, x \rangle&=&\langle (a \prec_{_{A^*}}b+
a\succ_{_{A^*}} b) \succ_{_{A^*}} c- a\succ_{_{A^*}} (b \succ_{_{A^*}} c), x\rangle \cr
&=&\langle(\Delta_{\succ}^*((\Delta_{_{\prec}}^*+\Delta_{_{\succ}}^*)\otimes\id)-
\Delta_{\succ}^*(\id\otimes\Delta_{\succ}^*))(a\otimes b\otimes c) ,
x\rangle\cr&=&\langle a\otimes b\otimes c,
(((\Delta_{_{\prec}}+\Delta_{_{\succ}})\otimes\id)\Delta_{\succ}-
(\id\otimes\Delta_{\succ})\Delta_{\succ})(x) \rangle,\cr
\langle (c,b,a)_{_r}, x \rangle&=& \langle (c\prec_{_{A^*}} b) \prec_{_{A^*}}a-
c\prec_{_{A^*}}(b\prec_{_{A^*}} a+b\succ_{_{A^*}} a), x\rangle\cr
&= &\langle ((\id\otimes \Delta_{_{\prec}}^*\sigma)\Delta_{_\prec}^*\sigma-
((\Delta_{_{\prec}}^*+\Delta_{_{\succ}}^*)\sigma\otimes\id)
\Delta_{_\prec}^*\sigma)(a\otimes b\otimes c),
x\rangle\cr
&=&\langle a\otimes b\otimes c, ((\id\otimes \sigma\Delta_{_{\prec}})\sigma\Delta_{_\prec}-
(\sigma(\Delta_{_{\prec}}+\Delta_{_{\succ}})\otimes\id)\sigma\Delta_{_\prec})(x)\rangle.
\end{eqnarray*}
Therefore, $(A^*, \prec_{_{A^*}}, \succ_{_{A^*}})$ is a
pre-anti-anti-flexible algebra if and only if
Eqs.~\eqref{eq:rmatrix1} and \eqref{eq:rmatrix2} are satisfied.
\end{proof}
For a given pre-anti-flexible algebra $(A,\prec, \succ)$ and
two elements $\mathrm{r}_{_\prec}, \mathrm{r}_{_\succ}$ in $A\otimes A$ given by
$\displaystyle \mathrm{r}_{_\succ}=\sum_i{a_i\otimes b_i}$ and
$\displaystyle \mathrm{r}_{_\prec}=\sum_i{c_i\otimes d_i},$
for any $a_i, b_i, c_i$ and $d_i$ in $A$, we designate by
\begin{equation*}
\mathrm{r}_{_{\succ, 12}}=\sum_ia_i\otimes b_i\otimes 1,\quad
\mathrm{r}_{_{\succ, 13}}=\sum_{i}a_i\otimes 1\otimes b_i,
\quad \mathrm{r}_{_{\succ, 23}}=\sum_i1\otimes a_i\otimes b_i,
\mbox{ etc} \cdots,
\end{equation*}
\begin{equation*}
\mathrm{r}_{_{\prec, 12}}=\sum_i c_i\otimes d_i\otimes 1,\quad
\mathrm{r}_{_{\prec, 13}}=\sum_{i}c_i\otimes 1\otimes d_i,
\quad \mathrm{r}_{_{\prec, 23}}=\sum_i1\otimes c_i\otimes d_i,
\mbox{ etc} \cdots,
\end{equation*}
where $1$ is the unit element if $(A,\prec, \succ)$ unitary, otherwise is a
symbol playing a similar role as that of the unit element on $A$. Then operations
between two $\mathrm{r}_{_{\prec, ..}}, \mathrm{r}_{_{\succ, ..}}$ are in an obvious way. For instance,
\begin{equation*}
\mathrm{r}_{_{\succ,12}}\cdot \mathrm{r}_{_{\succ, 13}}=
\sum_{i,j}a_i\cdot a_j\otimes b_i\otimes b_j,
\mathrm{r}_{_{\succ, 13}}\prec \mathrm{r}_{_{\succ, 23}}=
\sum_{i,j}a_i\otimes a_j\otimes b_i\prec b_j,
\mathrm{r}_{_{\prec, 23}}\succ \mathrm{r}_{_{\prec, 12}}=
\sum_{i,j}c_j\otimes c_i\succ d_j\otimes d_i,
\mbox{ etc}
\end{equation*}
and similarly
\begin{equation*}
\mathrm{r}_{_{\prec, 12}}\succ \mathrm{r}_{_{\succ, 13}}=
\sum_{i,j}c_i\succ a_j\otimes d_i\otimes b_j,
\mathrm{r}_{_{\succ,13}}\prec \mathrm{r}_{_{\prec, 23}}=
\sum_{i,j}a_i\otimes c_j\otimes b_i\prec d_j,
\mathrm{r}_{_{\prec, 23}}\cdot \mathrm{r}_{_{\succ,12}}=
\sum_{i,j}a_j\otimes c_i\cdot b_j\otimes d_i,
\end{equation*}
and so on.
\begin{pro}
Let $(A, \prec, \succ)$ be a pre-anti-flexible algebra and
$\mathrm{r}_{_\prec}, \mathrm{r}_{_\succ} \in A\otimes A$. Define
$\Delta_{_{\prec}}, \Delta_{_{\succ}}:A\rightarrow A\otimes A$ by
Eqs.~\eqref{eq:coboundary1} and \eqref{eq:coboundary2}.
Then $\Delta_{_\prec}^*, \Delta_{_{\succ}}^*: A^*\otimes A^*\rightarrow A^*$
define a pre-anti-flexible algebra structure on $A^*$
if and only if the following equations are satisfied for any $x\in A$
\begin{subequations}
\begin{eqnarray}\label{eq:ybe1}
&&(\id\otimes\id\otimes L_{\succ}(x))M(\mathrm{r})-
(R_{\prec}(x)\otimes \id\otimes\id)N(\mathrm{r})\cr
&&+(\id\otimes \id \otimes R_{\prec}(x))P(\mathrm{r})-
(L_{\succ}(x)\otimes\id \otimes \id)Q(\mathrm{r})=0,
\end{eqnarray}
\begin{eqnarray}\label{eq:ybe2}
\begin{array}{llll}
&&(\id \otimes\id\otimes L_{\cdot}(x))M'(\mathrm{r})
+(\id \otimes \id \otimes R_{\cdot}(x))N'(\mathrm{r})+R'(x)
\cr&&-(R_{\prec}(x)\otimes\id \otimes \id)P'(\mathrm{r})
-(L_{\succ}(x)\otimes \id \otimes \id)Q'(\mathrm{r})=0,
\end{array}
\end{eqnarray}
\end{subequations}
where
\begin{eqnarray*}
M(\mathrm{r})=\mathrm{r}_{_{\prec, 23}}\cdot \mathrm{r}_{_{\succ, 12}}+
\mathrm{r}_{_{\prec, 21}}\prec \mathrm{r}_{_{\prec, 13}}-
\mathrm{r}_{_{\succ, 13}} \succ \mathrm{r}_{_{\prec, 23}},\;
P(\mathrm{r})=\mathrm{r}_{_{\succ, 12}}\cdot \mathrm{r}_{_{\prec, 23}}+
\mathrm{r}_{_{\prec, 13}}\succ \mathrm{r}_{_{\prec, 21}}-
\mathrm{r}_{_{\prec, 23}} \prec \mathrm{r}_{_{\succ, 13}},\cr
N(\mathrm{r})=
\mathrm{r}_{_{\succ, 32}}\cdot \mathrm{r}_{_{\prec, 21}}+
\mathrm{r}_{_{\prec, 31}}\succ \mathrm{r}_{_{\prec, 23}}-
\mathrm{r}_{_{\prec, 21}} \prec \mathrm{r}_{_{\succ,31}},\;
Q(\mathrm{r})=
\mathrm{r}_{_{\prec , 21}}\cdot \mathrm{r}_{_{\succ, 32}}+
\mathrm{r}_{_{\prec, 23}}\prec \mathrm{r}_{_{\prec, 31}}-
\mathrm{r}_{_{\succ, 31}} \succ \mathrm{r}_{_{\prec, 21}},
\end{eqnarray*}
\begin{eqnarray*}
M'(\mathrm{r})&=&
\mathrm{r}_{_{\succ, 23}}\prec \mathrm{r}_{_{\succ, 12}}+
\mathrm{r}_{_{\succ, 21}}\succ \mathrm{r}_{_{\succ, 13}}-
\mathrm{r}_{_{\succ, 13}}\cdot \mathrm{r}_{_{\succ,23}}+
\mathrm{r}_{_{\succ, 23}}\succ (\mathrm{r}_{_{\prec, 12}}+\mathrm{r}_{_{\succ, 12}})+
(\mathrm{r}_{_{\prec, 21}}+\mathrm{r}_{_{\succ, 21}})\prec \mathrm{r}_{_{\succ, 13}},\cr
N'(\mathrm{r})&=&
\mathrm{r}_{_{\succ, 12}}\succ \mathrm{r}_{_{\succ, 23}}+
\mathrm{r}_{_{\succ, 13}}\prec \mathrm{r}_{_{\succ, 21}}-
\mathrm{r}_{_{\succ,23}}\cdot \mathrm{r}_{_{\succ, 13}}+
(\mathrm{r}_{_{\prec, 12}}+\mathrm{r}_{_{\succ, 12}})\prec \mathrm{r}_{_{\succ, 23}}+
\mathrm{r}_{_{\succ, 13}}\succ (\mathrm{r}_{_{\prec, 21}}+\mathrm{r}_{_{\succ, 21}}),\cr
P'(\mathrm{r})&=&
\mathrm{r}_{_{\prec, 32}}\prec \mathrm{r}_{_{\prec, 21}}+
\mathrm{r}_{_{\prec, 31}}\cdot \mathrm{r}_{_{\succ, 23}}-
\mathrm{r}_{_{\succ, 21}}\succ \mathrm{r}_{_{\prec , 31}}-
(\mathrm{r}_{_{\prec, 21}}+\mathrm{r}_{_{\succ, 21}})\prec \mathrm{r}_{_{\prec, 31}},\cr
Q'(\mathrm{r})&=&
\mathrm{r}_{_{\prec, 21}}\succ \mathrm{r}_{_{\prec, 32}}+
\mathrm{r}_{_{\succ, 23}}\cdot \mathrm{r}_{_{\prec, 31}}
-\mathrm{r}_{_{\prec,31}}\prec \mathrm{r}_{_{\succ, 21}}-
\mathrm{r}_{_{\prec,31}}\succ (\mathrm{r}_{_{\prec, 21}}+\mathrm{r}_{_{\succ, 21}}),
\end{eqnarray*}
and
\begin{eqnarray*}
R'(x)&=&[(\id\otimes (R_{_{\prec}}(x)+L_{_{\succ}}(x))\otimes \id)\mathrm{r}_{_{\prec, 32}}]
\succ(\mathrm{r}_{_{\prec, 12}}+\mathrm{r}_{_{\succ, 12}})\cr
&-&[((L_{_{\prec}}(x)+R_{_{\succ}}(x))\otimes\id \otimes \id)\mathrm{r}_{_{\prec, 31}}]
\succ(\mathrm{r}_{_{\prec, 21}}+\mathrm{r}_{_{\succ, 21}}).
\end{eqnarray*}
\end{pro}
\begin{proof}
Let $x\in A$.
Setting $\displaystyle \mathrm{r}_{_\succ}=\sum_i a_i\otimes b_i, \;\;
\mathrm{r}_{_\prec}=\sum_i c_i\otimes d_i$ we have
\begin{eqnarray*}
(\Delta_{_\succ}\otimes\id )\Delta_{_{\prec}}(x)&=&
\sum_i\{a_{j}\otimes(c_i\cdot b_j)\otimes (x\succ d_{i})+
(d_j\prec c_i)\otimes c_j\otimes (x\succ d_i)\cr&+&
a_j\otimes ((b_i\cdot x)\cdot b_j)\otimes a_i+
(d_j\prec (b_i\cdot x))\otimes c_j\otimes a_i\}\cr
&=&
(\id\otimes\id\otimes L_{\succ}(x))(\mathrm{r}_{_{\prec, 23}}\cdot \mathrm{r}_{_{\succ, 12}}+
\mathrm{r}_{_{\prec, 21}}\prec \mathrm{r}_{_{\prec, 13}})\cr
&+&\sum_i\{a_j\otimes ((b_i\cdot x)\cdot b_j)\otimes a_i+
(d_j\prec (b_i\cdot x))\otimes c_j\otimes a_i\}
\cr
(\id\otimes\Delta_{_\prec})\Delta_{_{\succ}}(x)&=&
\sum_i\{a_i\otimes c_j\otimes ((x\cdot b_i)\prec d_j)+
a_i\otimes (b_j\cdot (x\cdot b_i))\otimes a_j\cr&+&
(d_i\prec x)\otimes c_j\otimes (c_i\succ d_j)+
(d_i\prec x)\otimes (b_j\cdot c_i)\otimes a_j\}\cr
&=&
(R_{\prec}(x)\otimes \id\otimes\id)(\mathrm{r}_{_{\prec, 31}}\succ \mathrm{r}_{_{\prec, 23}}+
\mathrm{r}_{_{\succ, 32}}\cdot \mathrm{r}_{_{\prec, 21}})\cr
&+&\sum_i \{a_i\otimes c_j\otimes ((x\cdot b_i)\prec d_j)+
a_i\otimes (b_j\cdot (x\cdot b_i))\otimes a_j\}\cr
(\id\otimes\sigma\Delta_{_\succ})\sigma\Delta_{_{\prec}}(x)&=&
\sum_i\{(x\succ d_i)\otimes (c_i\cdot b_j)\otimes a_j+
(x\succ d_i)\otimes c_j\otimes (d_j\prec c_i)\cr&+&
a_i\otimes ((b_i\cdot x)\cdot b_j)\otimes a_j+
a_i\otimes c_j\otimes (d_j\prec (b_i\cdot x))\}\cr&=&
(L_{\succ}(x)\otimes\id \otimes \id)(\mathrm{r}_{_{\prec , 21}}\cdot \mathrm{r}_{_{\succ, 32}}+
\mathrm{r}_{_{\prec, 23}}\prec \mathrm{r}_{_{\prec, 31}})\cr
&+&\sum_i\{a_i\otimes ((b_i\cdot x)\cdot b_j)\otimes a_j+
a_i\otimes c_j\otimes (d_j\prec (b_i\cdot x))\}\cr
(\sigma\Delta_{_{\prec}}\otimes\id)\sigma\Delta_{_{\succ}}(x)&=&
\sum_i\{((x\cdot b_i)\succ d_j)\otimes c_j\otimes a_i+
a_j\otimes (b_j\cdot (x\cdot b_i))\otimes a_i\cr
&+&(c_i\succ d_j)\otimes c_j\otimes (d_i \prec x)+
a_j\otimes (b_j\cdot c_i)\otimes (b_i\prec x)\}\cr
&=&
(\id\otimes \id \otimes R_{\prec}(x))(\mathrm{r}_{_{\prec, 13}}\succ \mathrm{r}_{_{\prec, 21}}+
\mathrm{r}_{_{\succ, 12}}\cdot \mathrm{r}_{_{\prec, 23}})\cr
&+&\sum_i\{((x\cdot b_i)\succ d_j)\otimes c_j\otimes a_i+
a_j\otimes (b_j\cdot (x\cdot b_i))\otimes a_i\}
\end{eqnarray*}
\begin{eqnarray*}
((\Delta_{_{\prec}}+\Delta_{_{\succ}})\otimes\id)\Delta_{\succ}(x)&=&
\sum_i\{c_j\otimes (a_i\succ d_j)\otimes (x\cdot b_i)+
(b_j\cdot a_i)\otimes a_j\otimes (x\cdot b_i)\cr&+&
a_j\otimes (a_i\cdot b_j)\otimes (x\cdot b_i)
+(d_j\prec a_i)\otimes c_j\otimes(x\cdot b_i)\cr&+&
c_j\otimes ((d_i\prec x)\succ d_j)\otimes c_i+
(b_j\cdot(d_i\prec x))\otimes a_j\otimes c_i\cr&+&
a_j\otimes ((d_i\prec x)\cdot b_j)\otimes c_i+
(d_j\prec (d_i\prec x))\otimes c_j\otimes c_i\}\cr
&=&
(\id \otimes\id\otimes L_{\cdot}(x))(\mathrm{r}_{_{\succ, 23}}\succ \mathrm{r}_{_{\prec, 12}}+
\mathrm{r}_{_{\succ, 21}}\cdot \mathrm{r}_{_{\succ, 13}}\cr&+&
\mathrm{r}_{_{\succ, 23}}\cdot \mathrm{r}_{_{\succ, 12}}+
\mathrm{r}_{_{\prec, 21}}\prec \mathrm{r}_{_{\succ, 13}})\cr
&+&\sum_i\{c_j\otimes ((d_i\prec x)\succ d_j)\otimes c_i+
(b_j\cdot(d_i\prec x))\otimes a_j\otimes c_i\cr&+&
a_j\otimes ((d_i\prec x)\cdot b_j)\otimes c_i+
(d_j\prec (d_i\prec x))\otimes c_j\otimes c_i\}\cr
(\id\otimes\Delta_{\succ})\Delta_{\succ}(x)&=&
\sum_i\{a_i\otimes a_j\otimes ((x\cdot b_i)\cdot b_j)
+a_i\otimes (d_j\prec (x\cdot b_i))\otimes c_j\cr
&+&(d_i\prec x)\otimes a_j\otimes (c_i\cdot b_j)+
(d_i\prec x)\otimes (d_j\prec c_i)\otimes c_j\}\cr
&=&
(R_{\prec}(x)\otimes\id \otimes \id)(\mathrm{r}_{_{\prec, 31}}\cdot \mathrm{r}_{_{\succ, 23}}+
\mathrm{r}_{_{\prec, 32}}\prec \mathrm{r}_{_{\prec, 21}})\cr
&+&\sum_i\{a_i\otimes a_j\otimes ((x\cdot b_i)\cdot b_j)
+a_i\otimes (d_j\prec (x\cdot b_i))\otimes c_j\}\cr
(\id\otimes \sigma\Delta_{_{\prec}})\sigma\Delta_{_\prec}(x)&=&
\sum_i\{(x\succ d_i)\otimes (c_i\succ d_j)\otimes c_j+
(x\succ d_i)\otimes a_j\otimes (b_j\cdot c_i)\cr
&+&a_i\otimes ((b_i\cdot x)\succ d_j)\otimes c_j+
a_i\otimes a_j\otimes (b_j\cdot(b_i\cdot x))\}\cr
&=&(L_{\succ}(x)\otimes \id \otimes \id)(\mathrm{r}_{_{\prec, 21}}\succ \mathrm{r}_{_{\prec, 32}}+
\mathrm{r}_{_{\succ, 23}}\cdot \mathrm{r}_{_{\prec, 31}})\cr
&+& \sum_i\{a_i\otimes ((b_i\cdot x)\succ d_j)\otimes c_j+
a_i\otimes a_j\otimes (b_j\cdot(b_i\cdot x))\}\cr
(\sigma(\Delta_{_{\prec}}+\Delta_{_{\succ}})\otimes\id)\sigma\Delta_{_\prec}(x)&=&
\sum_i\{((x\succ d_i)\succ d_j)\otimes c_j\otimes c_i+
a_j\otimes (b_j\cdot(x\succ d_i))\otimes c_i\cr
&+&
((x\succ d_i)\cdot b_j)\otimes a_j\otimes c_i+
c_j\otimes (d_j\prec(x\succ d_i))\otimes c_i\cr
&+&
(a_i\succ d_j)\otimes c_j\otimes (b_i\cdot x)+
a_j\otimes (b_j\cdot a_i)\otimes (b_i\cdot x)
\cr&+&
(a_i\cdot b_j)\otimes a_j\otimes (b_i\cdot x)+
c_j\otimes (d_j\prec a_i)\otimes (b_i\cdot x)\}\cr
&=&
(\id \otimes \id \otimes R_{\cdot}(x))(\mathrm{r}_{_{\succ, 13}}\succ \mathrm{r}_{_{\prec, 21}}+
\mathrm{r}_{_{\succ, 12}}\cdot \mathrm{r}_{_{\succ, 23}}\cr&+&
\mathrm{r}_{_{\succ, 13}}\cdot \mathrm{r}_{_{\succ, 21}}+
\mathrm{r}_{_{\prec, 12}}\prec \mathrm{r}_{_{\succ, 23}})\cr
&+&
\sum_i\{((x\succ d_i)\succ d_j)\otimes c_j\otimes c_i+
a_j\otimes (b_j\cdot(x\succ d_i))\otimes c_i\cr
&+&
((x\succ d_i)\cdot b_j)\otimes a_j\otimes c_i+
c_j\otimes (d_j\prec(x\succ d_i))\otimes c_i\}
\end{eqnarray*}
Then
\begin{eqnarray*}
&&(\Delta_{_\succ}\otimes\id )\Delta_{_{\prec}}(x)-
(\id\otimes\Delta_{_\prec})\Delta_{_{\succ}}(x)-
(\id\otimes\sigma\Delta_{_\succ})\sigma\Delta_{_{\prec}}(x)+
(\sigma\Delta_{_{\prec}}\otimes\id)\sigma\Delta_{_{\succ}}(x)=A1(x)+A2(x)\cr&&+
(\id\otimes\id\otimes L_{\succ}(x))(\mathrm{r}_{_{\prec, 23}}\cdot \mathrm{r}_{_{\succ, 12}}+
\mathrm{r}_{_{\prec, 21}}\prec \mathrm{r}_{_{\prec, 13}})
-(R_{\prec}(x)\otimes \id\otimes\id)(\mathrm{r}_{_{\prec, 31}}\succ \mathrm{r}_{_{\prec, 23}}+
\mathrm{r}_{_{\succ, 32}}\cdot \mathrm{r}_{_{\prec, 21}})\cr &&
+(\id\otimes \id \otimes R_{\prec}(x))(\mathrm{r}_{_{\prec, 13}}\succ \mathrm{r}_{_{\prec, 21}}+
\mathrm{r}_{_{\succ, 12}}\cdot \mathrm{r}_{_{\prec, 23}})
-(L_{\succ}(x)\otimes\id \otimes \id)(\mathrm{r}_{_{\prec , 21}}\cdot \mathrm{r}_{_{\succ, 32}}+
\mathrm{r}_{_{\prec, 23}}\prec \mathrm{r}_{_{\prec, 31}})
\end{eqnarray*}
where
\begin{eqnarray*}
A1(x)&=&\sum_i\{ a_j\otimes ((b_i\cdot x)\cdot b_j+b_j\cdot (x\cdot b_i))\otimes a_i-
a_i\otimes (b_j\cdot (x\cdot b_i)+(b_i\cdot x)\cdot b_j)\otimes a_j\}\\
A2(x)&=&\sum_i\{(d_j\prec (b_i\cdot x)+(x\cdot b_i)\succ d_j)\otimes c_j\otimes a_i -
a_i\otimes c_j\otimes ((x\cdot b_i)\prec d_j+d_j\prec (b_i\cdot x)) \}\\
\end{eqnarray*}
By exchanging $i$ and $j$, and using Remark~\ref{rmk_1}~\eqref{rmk_flex},
we have $A1(x)=0$.
Using Eqs.~\eqref{eq:pre-antiflexible2} we have
\begin{eqnarray*}
A2(x)&=&(L_{\succ}(x)\otimes\id \otimes\id)
(\mathrm{r}_{_{\succ, 31}} \succ \mathrm{r}_{_{\prec, 21}})
-(\id \otimes \id \otimes L_{\succ}(x))
(\mathrm{r}_{_{\succ, 13}} \succ \mathrm{r}_{_{\prec, 23}})
\cr&+&
(R_{\prec}(x)\otimes\id \otimes\id) (\mathrm{r}_{_{\prec, 21}} \prec \mathrm{r}_{_{\succ,31}})-
(\id \otimes \id \otimes R_{\prec}(x))(\mathrm{r}_{_{\prec, 23}} \prec \mathrm{r}_{_{\succ, 13}}).
\end{eqnarray*}
Besides,
\begin{eqnarray*}
&&((\Delta_{_{\prec}}+\Delta_{_{\succ}})\otimes\id)\Delta_{\succ}(x)-
(\id\otimes\Delta_{\succ})\Delta_{\succ}(x)
-(\id\otimes \sigma\Delta_{_{\prec}})\sigma\Delta_{_\prec}(x)
+(\sigma(\Delta_{_{\prec}}+
\Delta_{_{\succ}})\otimes\id)\sigma\Delta_{_\prec}(x)\cr
&=&(\id \otimes\id\otimes L_{\cdot}(x))(\mathrm{r}_{_{\succ, 23}}\succ \mathrm{r}_{_{\prec, 12}}
+\mathrm{r}_{_{\succ, 21}}\cdot \mathrm{r}_{_{\succ, 13}}+
\mathrm{r}_{_{\succ, 23}}\cdot \mathrm{r}_{_{\succ, 12}}+
\mathrm{r}_{_{\prec, 21}}\prec \mathrm{r}_{_{\succ, 13}})+B(x)
\cr &+&(\id \otimes \id \otimes R_{\cdot}(x))(\mathrm{r}_{_{\succ, 13}}\succ \mathrm{r}_{_{\prec, 21}
}+\mathrm{r}_{_{\succ, 12}}\cdot \mathrm{r}_{_{\succ, 23}}+
\mathrm{r}_{_{\succ, 13}}\cdot \mathrm{r}_{_{\succ, 21}}+
\mathrm{r}_{_{\prec, 12}}\prec \mathrm{r}_{_{\succ, 23}})+
\cr&-&(R_{\prec}(x)\otimes\id \otimes \id)(\mathrm{r}_{_{\prec, 31}}\cdot \mathrm{r}_{_{\succ, 23}
}+\mathrm{r}_{_{\prec, 32}}\prec \mathrm{r}_{_{\prec, 21}})
-(L_{\succ}(x)\otimes \id \otimes \id)(\mathrm{r}_{_{\prec, 21}}\succ \mathrm{r}_{_{\prec, 32}}+
\mathrm{r}_{_{\succ, 23}}\cdot \mathrm{r}_{_{\prec, 31}}),
\end{eqnarray*}
where $B(x)=B1(x)+B2(x)+B3(x)+B4(x)+B5(x)$ with
$$
B1(x)=-\sum_i\{a_i\otimes a_j\otimes ((x\cdot b_i)\cdot b_j+
b_j\cdot(b_i\cdot x))\},\;\;
B2(x)=\sum_i\{c_j\otimes ((d_i\prec x)\succ d_j+
d_j\prec(x\succ d_i))\otimes c_i\},
$$
$$
B3(x)=\sum_i\{((x\succ d_i)\succ d_j+
d_j\prec(d_i\prec x))\otimes c_j\otimes c_i\},\;\;
B4(x)=\sum_i\{ (b_j\cdot(d_i\prec x)+(x\succ d_i)\cdot b_j)\otimes a_j\otimes c_i\}
$$
\begin{eqnarray*}
B5(x)=\sum_i\{a_j\otimes (b_j\cdot(x\succ d_i)+(d_i\prec x)\cdot b_j)\otimes c_i-
a_i\otimes ((b_i\cdot x)\succ d_j+d_j\prec (x\cdot b_i))\otimes c_j\}.
\end{eqnarray*}
Considering Remark~\ref{rmk_1}~\eqref{rmk_flex} we have
\begin{eqnarray*}
B1(x)=-(\id \otimes \id \otimes L_{\cdot}(x))
(\mathrm{r}_{_{\succ, 13}}\cdot \mathrm{r}_{_{\succ,23}})-
(\id \otimes \id \otimes R_{\cdot}(x))(\mathrm{r}_{_{\succ,23}}\cdot \mathrm{r}_{_{\succ, 13}}).
\end{eqnarray*}
Using Eq.~\eqref{eq_bimodule_pre_anti_flexible5} we have
\begin{eqnarray*}
B3(x)&=&(x\succ (d_i\succ d_j)+(d_j\prec d_i)\prec x)\otimes c_j\otimes c_i-
((x\prec d_i)\succ d_j+d_j\prec (d_i\succ x))\otimes c_j\otimes c_i\cr
&=&(L_{_{\succ}}(x)\otimes\id \otimes\id)(\mathrm{r}_{_{\prec,31}}\succ \mathrm{r}_{_{\prec, 21}})+
(R_{_{\prec}}(x)\otimes\id \otimes\id)(\mathrm{r}_{_{\prec, 21}}\prec \mathrm{r}_{_{\prec, 31}})
\cr&-&((x\prec d_i)\succ d_j+d_j\prec (d_i\succ x))\otimes c_j\otimes c_i.
\end{eqnarray*}
By Eqs.~\eqref{eq_bimodule_pre_anti_flexible2} and \eqref{eq_bimodule_pre_anti_flexible5} we have
\begin{eqnarray*}
B4(x)&=& (b_j\succ(d_i\prec x)+(x\succ d_i)\prec b_j ) \otimes a_j\otimes c_i+
(b_j\prec(d_i\prec x)+(x\succ d_i)\succ b_j ) \otimes a_j\otimes c_i\cr
&=&(L_{_{\succ}}(x)\otimes\id \otimes\id)(\mathrm{r}_{_{\prec, 31}}\prec \mathrm{r}_{_{\succ, 21}}+
\mathrm{r}_{_{\prec, 31}}\succ \mathrm{r}_{_{\succ, 21}})\cr
&+&
(R_{_{\prec}}(x)\otimes\id \otimes\id)(\mathrm{r}_{_{\succ, 21}}\succ \mathrm{r}_{_{\prec , 31}}+
\mathrm{r}_{_{\succ, 21}}\prec \mathrm{r}_{_{\prec, 31}})\cr
&-&(b_j\prec(d_i\succ x)+(x\prec d_i)\succ b_j ) \otimes a_j\otimes c_i.
\end{eqnarray*}
Furthermore, we have
\begin{eqnarray*}
B5(x)&=&\sum_i\{a_j\otimes (b_j\cdot(x\succ d_i)+(d_i\prec x)\cdot b_j)\otimes c_i-
a_i\otimes ((b_i\cdot x)\succ d_j+d_j\prec (x\cdot b_i))\otimes c_j\}\cr
&=&\sum_i\{a_j\otimes (b_j\prec (x\succ d_i)+b_j\succ (x\succ d_i))\otimes c_i+
a_j\otimes ((d_i\prec x)\prec b_j+(d_i\prec x)\succ b_j) \otimes c_i\cr
&-&a_i\otimes ((b_i\cdot x)\succ d_j+d_j\prec (x\cdot b_i))\otimes c_j\}
=\sum_i\{a_j\otimes (b_j\succ (x\succ d_i)+(d_i\prec x)\prec b_j)\otimes c_i\cr
&-&a_i\otimes ((b_i\cdot x)\succ d_j+d_j\prec (x\cdot b_i))\otimes c_j\}+
\sum_i\{a_j\otimes (b_j\prec (x\succ d_i)+(d_i\prec x)\succ b_j)\otimes c_i\}\cr
B5(x)&=&\sum_i\{a_j\otimes (b_j\prec (x\succ d_i)+(d_i\prec x)\succ b_j)\otimes c_i\}
\end{eqnarray*}
The last equal sign in above equation is due to
Eq.~\eqref{eq:pre-antiflexible2} and changing indices
$i$ to $j$ in the last term of the first summation.
Finally, we have
\begin{eqnarray*}
R'(x)&=&\sum_i\{c_j\otimes ((d_i\prec x)\succ d_j+d_j\prec(x\succ d_i))\otimes c_i-
((x\prec d_i)\succ d_j+d_j\prec (d_i\succ x))\otimes c_j\otimes c_i\cr
&-&(b_j\prec(d_i\succ x)+(x\prec d_i)\succ b_j ) \otimes a_j\otimes c_i+
a_j\otimes (b_j\prec (x\succ d_i)+(d_i\prec x)\succ b_j)\otimes c_i\}\cr
R'(x)&=&
[(\id\otimes R_{_{\prec}}(x)\otimes \id)\mathrm{r}_{_{\prec, 32}}]
\succ(\mathrm{r}_{_{\prec, 12}}+\mathrm{r}_{_{\succ, 12}})
+
[(\id\otimes L_{_{\succ}}(x)\otimes \id)\mathrm{r}_{_{\prec, 32}}]
\prec(\mathrm{r}_{_{\prec, 12}}+\mathrm{r}_{_{\succ, 12}})\cr
&-&
[(L_{_{\prec}}(x)\otimes\id \otimes \id)\mathrm{r}_{_{\prec, 31}}]
\succ(\mathrm{r}_{_{\prec, 21}}+\mathrm{r}_{_{\succ, 21}})
-
[(R_{_{\succ}}(x)\otimes\id \otimes \id)\mathrm{r}_{_{\prec, 31}}]
\prec (\mathrm{r}_{_{\prec, 21}}+\mathrm{r}_{_{\succ, 21}}).
\end{eqnarray*}
Therefore, hold the equivalences.
\end{proof}
\begin{rmk}
Considering the flipping map "$flp$" defined on $A\otimes A$ such that for any elements
$\mathrm{r}, \mathrm{r}'\in A\otimes A$,
$flp(\mathrm{r}\prec \mathrm{r}' )=
\mathrm{r}\succ \mathrm{r}'$,
$flp( \mathrm{r}\succ \mathrm{r}'=
\mathrm{r}\prec \mathrm{r}'$, we establish finally
the following relations
$
P(\mathrm{r})=flp(M(\mathrm{r})),\;
N(\mathrm{r})=\sigma_{13}(flp(M(\mathrm{r}))),\;
Q(\mathrm{r})=flp( \sigma_{13} (flp(M(\mathrm{r})))),
$
with $\sigma_{13}(x\otimes y\otimes z)=z\otimes y\otimes x$
for any $x,y,z \in A$. Besides,
we also have $N'(\mathrm{r})=flp(M'(\mathrm{r}))$ and
$Q'(\mathrm{r})=flp(P'(\mathrm{r}))$.
\end{rmk}
\begin{pro}
Let $(A, \prec, \succ)$ be a pre-anti-flexible algebra and
$\mathrm{r}_{_\prec}, \mathrm{r}_{_\succ} \in A\otimes A$. Define
the linear maps
$\Delta_{_{\prec}}, \Delta_{_{\succ}}:A\rightarrow A\otimes A$ by
Eq.~\eqref{eq:coboundary}.
Then $\Delta_{_\prec}^*, \Delta_{_{\succ}}^*: A^*\otimes A^*\rightarrow A^*$
define a pre-anti-flexible algebra structure on $A^*$
if and only if the following equations are satisfied for any $x\in A$
\begin{subequations}
\begin{eqnarray}\label{eq:ybe1'}
&&((\id\otimes\id\otimes L_{\succ}(x))-
(R_{\prec}(x)\otimes \id\otimes\id)\sigma_{13}\circ flp
+(\id\otimes \id \otimes R_{\prec}(x))flp
\cr&&-(L_{\succ}(x)\otimes\id \otimes \id)flp\circ
\sigma_{13} \circ flp)M(\mathrm{r})=0,
\end{eqnarray}
\begin{eqnarray}\label{eq:ybe2'}
\begin{array}{llll}
&&((\id \otimes\id\otimes L_{\cdot}(x))
+(\id \otimes \id \otimes R_{\cdot}(x))flp)M'(\mathrm{r})
\cr&&-((R_{\prec}(x)\otimes\id \otimes \id)
+(L_{\succ}(x)\otimes \id \otimes \id)flp)P'(\mathrm{r})+R'(x)=0,
\end{array}
\end{eqnarray}
\end{subequations}
where
$M(\mathrm{r})=\mathrm{r}_{_{\prec, 23}}\cdot \mathrm{r}_{_{\succ, 12}}+
\mathrm{r}_{_{\prec, 21}}\prec \mathrm{r}_{_{\prec, 13}}-
\mathrm{r}_{_{\succ, 13}} \succ \mathrm{r}_{_{\prec, 23}}$,
\begin{eqnarray*}
M'(\mathrm{r})&=&
\mathrm{r}_{_{\succ, 23}}\prec \mathrm{r}_{_{\succ, 12}}+
\mathrm{r}_{_{\succ, 21}}\succ \mathrm{r}_{_{\succ, 13}}-
\mathrm{r}_{_{\succ, 13}}\cdot \mathrm{r}_{_{\succ,23}}+
\mathrm{r}_{_{\succ, 23}}\succ (\mathrm{r}_{_{\prec, 12}}+
\mathrm{r}_{_{\succ, 12}})+(\mathrm{r}_{_{\prec, 21}}+
\mathrm{r}_{_{\succ, 21}})\prec \mathrm{r}_{_{\succ, 13}},\cr
P'(\mathrm{r})&=&
\mathrm{r}_{_{\prec, 32}}\prec \mathrm{r}_{_{\prec, 21}}+
\mathrm{r}_{_{\prec, 31}}\cdot \mathrm{r}_{_{\succ, 23}}-
\mathrm{r}_{_{\succ, 21}}\succ \mathrm{r}_{_{\prec , 31}}-
(\mathrm{r}_{_{\prec, 21}}+\mathrm{r}_{_{\succ, 21}})\prec \mathrm{r}_{_{\prec, 31}},\cr
R'(x)&=&
[(\id\otimes (R_{_{\prec}}(x)+L_{_{\succ}}(x))\otimes \id)
\mathrm{r}_{_{\prec, 32}}]\succ(\mathrm{r}_{_{\prec, 12}}+\mathrm{r}_{_{\succ, 12}})\cr
&-&
[((L_{_{\prec}}(x)+R_{_{\succ}}(x))\otimes\id \otimes \id)
\mathrm{r}_{_{\prec, 31}}]\succ(\mathrm{r}_{_{\prec, 21}}+\mathrm{r}_{_{\succ, 21}}).
\end{eqnarray*}
\end{pro}
\begin{thm}\label{thm_coboundary}
Let $(A, \prec, \succ)$ be a pre-anti-flexible algebra and
$\mathrm{r}_{_\prec}, \mathrm{r}_{_\succ} \in A\otimes A$. Define the linear maps
$\Delta_{_{\prec}}, \Delta_{_{\succ}}:A\rightarrow A\otimes A$ by
Eq.~\eqref{eq:coboundary}.
Then $(A, A^*)$ is a pre-anti-flexible bialgebra
if and only if $\mathrm{r}_{_\prec}, \mathrm{r}_{_\succ}$ satisfy
Eqs.~\eqref{eq:coboundary1}~-~\eqref{eq:coboundary4},
Eqs.~\eqref{eq:ybe1'} and \eqref{eq:ybe2'}.
\end{thm}
In view of symmetries brought out by equations characterizing
pre-anti-flexible bialgebras provided by the linear maps given
by Eq.~\eqref{eq:coboundary}, i.e.
Eqs.~\eqref{eq:coboundary1}~-~\eqref{eq:coboundary4},
Eqs.~\eqref{eq:ybe1'} and \eqref{eq:ybe2'}, we will consider pre-anti-flexible balgebras
generated by $\mathrm{r}\in A\otimes A$ in the following cases.
\begin{itemize}
\item[Case1]
\begin{eqnarray}\label{eq:particular1-r}
\mathrm{r}_{_{\prec}}=\mathrm{r},\;\; \mathrm{r}_{_{\succ}}=
-\sigma \mathrm{r}, \quad \mathrm{r}\in A\otimes A.
\end{eqnarray}
\begin{cor}
Let $(A, \prec, \succ)$ be a pre-anti-flexible algebra and
$\mathrm{r}\in A\otimes A$. Then the maps $\Delta_{_{\prec}}, \Delta_{_{\succ}}$
defined by Eq.~\eqref{eq:coboundary} with $\mathrm{r}_{_{\succ}}, \mathrm{r}_{_{\prec}}$ given by
Eq.~\eqref{eq:particular1-r} induce a pre-anti-flexible algebra structure on $A^*$
such that $(A, A^*)$ is a pre-anti-flexible bialgebra if and only if
$\mathrm{r}$ satisfies the following equations
\begin{subequations}
\begin{eqnarray}\label{eq:A}
\begin{array}{lll}
&&(\id\otimes(L_{_{\succ}}(x\prec y)+R_{_{\prec}}(y\succ x))+
((L_{_{\succ}}(y\prec x)+R_{_{\prec}}(x\succ y))\otimes \id)
(\mathrm{r}-\sigma \mathrm{r})\cr
&&-
(R_{_{\prec}}(y)\otimes L_{_{\succ}}(x) +
L_{_{\succ}}(y)\otimes R_{_{\prec}}(x) )
(\mathrm{r}-\sigma \mathrm{r})=0.
\end{array}
\end{eqnarray}
\begin{eqnarray}\label{eq:B}
&&0=((R_{_{\prec}}(x)\otimes R_{_{\prec}}(x)+
L_{_{\succ}}(x)\otimes L_{_{\succ}}(y))+(L_{_{\succ}}(y)\otimes L_{_{\succ}}(x) +
R_{_{\prec}}(y)\otimes R_{_{\prec}}(x))
)(\mathrm{r}-\sigma \mathrm{r})\cr
&&+
((R_{_{\prec}}(x)L_{_{\succ}}(y)+L_{_{\succ}}(x)R_{_{\prec}}(y))\otimes \id+
(\id\otimes(L_{_{\succ}}(x)R_{_{\prec}}(y)+
R_{_{\prec}}(x)L_{_{\succ}}(y))))(\sigma \mathrm{r}- \mathrm{r})
\end{eqnarray}
\begin{eqnarray}\label{eq:C}
&&((\id\otimes\id\otimes L_{\succ}(x))-
(R_{\prec}(x)\otimes \id\otimes\id)\sigma_{13}\circ flp
+(\id\otimes \id \otimes R_{\prec}(x))flp\cr&&-
(L_{\succ}(x)\otimes\id \otimes \id)flp\circ \sigma_{13} \circ flp)M_1(\mathrm{r})=0,
\end{eqnarray}
\begin{eqnarray}\label{eq:D}
\begin{array}{llll}
&&((\id \otimes\id\otimes L_{\cdot}(x))
+(\id \otimes \id \otimes R_{\cdot}(x))flp)M_1'(\mathrm{r})+R_1'(x)
\cr&&-((R_{\prec}(x)\otimes\id \otimes \id)
+(L_{\succ}(x)\otimes \id \otimes \id)flp)P_1'(\mathrm{r})=0,
\end{array}
\end{eqnarray}
\end{subequations}
where $x,y\in A$,
\begin{eqnarray*}
M_1(\mathrm{r})&=&-\mathrm{r}_{_{23}}\cdot \mathrm{r}_{_{21}}+
\mathrm{r}_{_{21}}\prec \mathrm{r}_{_{13}}+
\mathrm{r}_{_{31}} \succ \mathrm{r}_{_{23}}\cr
M_1'(\mathrm{r})&=&
\mathrm{r}_{_{32}}\prec \mathrm{r}_{_{21}}+
\mathrm{r}_{_{12}}\succ \mathrm{r}_{_{31}}-
\mathrm{r}_{_{31}}\cdot \mathrm{r}_{_{32}}-
\mathrm{r}_{_{32}}\succ (\mathrm{r}_{_{12}}- \mathrm{r}_{_{21}})-
(\mathrm{r}_{_{21}}- \mathrm{r}_{_{12}})\prec \mathrm{r}_{_{31}},\cr
P_1'(\mathrm{r})&=&\mathrm{r}_{_{32}}\prec \mathrm{r}_{_{21}}-
\mathrm{r}_{_{31}}\cdot \mathrm{r}_{_{32}}+
\mathrm{r}_{_{12}}\succ \mathrm{r}_{_{31}}-
(\mathrm{r}_{_{21}}- \mathrm{r}_{_{12}})\prec \mathrm{r}_{_{31}}, \; \; \mbox{ and }\cr
R_1'(x)&=&[(\id\otimes (R_{_{\prec}}(x)+
L_{_{\succ}}(x))\otimes \id)\mathrm{r}_{_{\prec, 32}}]
\succ(\mathrm{r}_{_{12}}-\mathrm{r}_{_{21}})\cr&-&
[((L_{_{\prec}}(x)+R_{_{\succ}}(x))\otimes\id \otimes \id)\mathrm{r}_{_{31}}]
\succ(\mathrm{r}_{_{21}}-\mathrm{r}_{_{12}}).
\end{eqnarray*}
\end{cor}
\begin{rmk}
It is straight to identify $M_1'(\mathrm{r})=
P_1'(\mathrm{r})-\mathrm{r}_{_{32}}\succ (\mathrm{r}_{_{12}}- \mathrm{r}_{_{21}})$ and
setting in addition
$\sigma_{_{123}}: A\otimes A\otimes A \rightarrow A\otimes A\otimes A$, by
$\sigma_{_{123}}(x\otimes y\otimes z)=z\otimes y\otimes x$, we have
\begin{eqnarray*}
M_1'(\mathrm{r})=\sigma_{_{123}}(M_1(\mathrm{r}))-
\mathrm{r}_{_{32}}\succ (\mathrm{r}_{_{12}}- \mathrm{r}_{_{21}})-
(\mathrm{r}_{_{21}}- \mathrm{r}_{_{12}})\prec \mathrm{r}_{_{31}}.
\end{eqnarray*}
Besides, if in addition $\mathrm{r}$ commutes then $R_1'(x)=0$ and
Eqs.~\eqref{eq:A} and \eqref{eq:B}
are satisfied and finally $\mathrm{r}$ satisfies the following equation
\begin{eqnarray}\label{eq:AFPYBE}
\mathrm{r}_{_{23}}\cdot \mathrm{r}_{_{12}}=
\mathrm{r}_{_{12}}\prec \mathrm{r}_{_{13}}+
\mathrm{r}_{_{13}} \succ \mathrm{r}_{_{23}}.
\end{eqnarray}
\end{rmk}
\item[Case2]
\begin{eqnarray}\label{eq:particular2-r}
\mathrm{r}_{_{\prec}}+\mathrm{r}_{_{\succ}}=0,\;
\mathrm{r}_{_{\succ}}=\mathrm{r}, \quad \mathrm{r}\in A\otimes A.
\end{eqnarray}
\begin{cor}
Let $(A, \prec, \succ)$ be a pre-anti-flexible algebra and
$\mathrm{r}\in A\otimes A$. Then the maps $\Delta_{_{\prec}}, \Delta_{_{\succ}}$
defined by Eq.~\eqref{eq:coboundary} with
$\mathrm{r}_{_{\succ}}, \mathrm{r}_{_{\prec}}$ given by
Eq.~\eqref{eq:particular2-r} induce a pre-anti-flexible algebra structure on $A^*$
such that $(A, A^*)$ is a pre-anti-flexible bialgebra if and only if
$\mathrm{r}$ satisfies the following equations for any $x,y\in A$
\begin{subequations}
\begin{eqnarray}\label{eq:A'}
(R_{_\prec}(y)\otimes L_{\cdot}(x)+L_{\succ}(y)\otimes R_{\cdot}(x))(\mathrm{r}-
\sigma \mathrm{r})=0,
\end{eqnarray}
\begin{eqnarray}\label{eq:B'}
(L_{\succ}(x)\otimes L_{\cdot}(y)-R_{\prec}(y)\otimes R_{\cdot}(x)-
L_{\succ}(y)\otimes L_{\cdot}(x)+R_{\prec}(x)\otimes R_{\cdot}(y))(\mathrm{r}-
\sigma \mathrm{r})=0,
\end{eqnarray}
\begin{eqnarray}\label{eq:C'}
(R_{_{\succ}}(y)\otimes L_{_{\succ}}(x) +
L_{_{\prec}}(y) \otimes R_{_{\prec}}(x) )(\mathrm{r}-\sigma \mathrm{r}),
\end{eqnarray}
\begin{eqnarray}\label{eq:D'}
(R_{_{\prec}}(x)\otimes R_{_{\succ}}(y)+
L_{_{\succ}}(x)\otimes L_{_{\prec}}(y)+
L_{_{\prec}}(y)\otimes L_{_{\succ}}(x)+
R_{_{\succ}}(y)\otimes R_{_{\prec}}(x))(\mathrm{r}-\sigma \mathrm{r})=0,
\end{eqnarray}
\begin{eqnarray}\label{eq:E'}
&&((\id\otimes\id\otimes L_{\succ}(x))-
(R_{\prec}(x)\otimes \id\otimes\id)\sigma_{13}\circ flp
+(\id\otimes \id \otimes R_{\prec}(x))flp
\cr&&-(L_{\succ}(x)\otimes\id \otimes \id)flp\circ
\sigma_{13} \circ flp)M_2(\mathrm{r})=0,
\end{eqnarray}
\begin{eqnarray}\label{eq:F'}
\begin{array}{llll}
&&((\id \otimes\id\otimes L_{\cdot}(x))
+(\id \otimes \id \otimes R_{\cdot}(x))flp)M_2'(\mathrm{r})
\cr&&-((R_{\prec}(x)\otimes\id \otimes \id)
+(L_{\succ}(x)\otimes \id \otimes \id)flp)P_2'(\mathrm{r})=0,
\end{array}
\end{eqnarray}
\end{subequations}
where
\begin{eqnarray*}
M_2(\mathrm{r})&=&-\mathrm{r}_{_{23}}\cdot \mathrm{r}_{_{12}}+
\mathrm{r}_{_{21}}\prec \mathrm{r}_{_{13}}+
\mathrm{r}_{_{13}} \succ \mathrm{r}_{_{23}},\cr
M_2'(\mathrm{r})&=&
-\mathrm{r}_{_{13}}\cdot \mathrm{r}_{_{23}}+
\mathrm{r}_{_{23}}\prec \mathrm{r}_{_{12}}+
\mathrm{r}_{_{21}}\succ \mathrm{r}_{_{13}},\cr
P_2'(\mathrm{r})&=&
-\mathrm{r}_{_{31}}\cdot \mathrm{r}_{_{23}}+
\mathrm{r}_{_{32}}\prec \mathrm{r}_{_{21}}+
\mathrm{r}_{_{21}}\succ \mathrm{r}_{_{31}}.
\end{eqnarray*}
\end{cor}
\begin{rmk}
Clearly, we have $P_2'(\mathrm{r})=\sigma_{_{123}}(M_2(\mathrm{r}))$
and if $\mathrm{r}$ commutes then
$P_2'(\mathrm{r})=M_2'(\mathrm{r})$ and Eqs.~\eqref{eq:A'}~-~\eqref{eq:D'}
are satisfied and finally
$\mathrm{r}$ satisfied Eq.~\eqref{eq:AFPYBE}.
\end{rmk}
\end{itemize}
We finally deduct the following pre-anti-flexible bialgebras
provided by a given $\mathrm{r}\in A\otimes A$ possessing
some internal symmetries while browsing $A\otimes A$.
\begin{cor}
Let $(A, \prec, \succ)$ be a pre-anti-flexible algebra and
consider symmetric element $\mathrm{r}\in A\otimes A$
satisfying Eq.~\eqref{eq:AFPYBE}. Then the linear maps
$\Delta_{_{\prec}}, \Delta_{_{\succ}}$
defined by Eq.~\eqref{eq:coboundary} with $\mathrm{r}_{_{\succ}}=\mathrm{r}$
and $\mathrm{r}_{_{\prec}}=-\mathrm{r}$ induce a
pre-anti-flexible algebra structure on $A^*$ such that
$(A, A^*)$ is a pre-anti-flexible bialgebra.
\end{cor}
\begin{defi}
Let $(A, \prec, \succ)$ be a pre-anti-flexible algebra and $\mathrm{r} \in A \otimes A$.
The Eq.~\eqref{eq:AFPYBE} is called the
\textbf{pre-anti-flexible Yang-Baxter equation} (PAFYBE) in $(A, \prec, \succ)$.
\end{defi}
\begin{rmk}
We due the notion of pre-anti-flexible Yang-Baxter equation in pre-anti-flexible algebras
as an analogue of the anti-flexible Yang-Baxter
equation in anti-flexible algebras (\cite{DBH3}) or
classical Yang-Baxter equation in Lie algebras (\cite{Drinfeld}) or
the associative Yang-Baxter equation in associative algebras (\cite{Aguiar, Bai_Double})
and $\mathcal{D}$-equation in dendriform algebras (\cite{Bai_Double}).
For no other specific reason than which showing that
both dendriform and pre-anti-flexible algebras possessing the
same shape of dual bimodules (see Remark~\ref{rmk_useful}~\eqref{dual-bimodule}),
to our amazement, $\mathcal{D}$-equation
in dendriform algebras and PAFYBE in pre-anti-flexible algebras
own the same form translated by Eq.~\eqref{eq:AFPYBE}.
This could making parallel with associative Yang-Baxter equation in associative algebras
and anti-flexible Yang-Baxter equation in anti-flexible algebras (\cite{DBH3}).
\end{rmk}
\section{Solutions of the pre-anti-flexible Yang-Baxter equation}\label{section5}
Let $A$ be a vector space. For any $\mathrm{r} \in A \otimes A$, $\mathrm{r}$
can be regarded as a linear map $\mathrm{r}:A^*\rightarrow A$
in the following way:
\begin{equation*}
\langle \mathrm{r}, u^*\otimes v^*\rangle=
\langle \mathrm{r}(u^*), v^* \rangle, \;\; \forall u^*, v^*\in A^*.
\end{equation*}
As PAFYBE in pre-anti-flexible algebras have the same form of the
$\mathcal{D}$-equation in dendriform algebra, we omitted proofs (too similar
to the case of dendriform algebra and related $\mathcal{D}$-equation) of the following
in which $(A, \prec, \succ)$ is a pre-anti-flexible algebra.
\begin{pro}\label{pro_pre_anti_flexible dual}
For a given $\mathrm{r}\in A\otimes A$, $r$ is a symmetric solution of
the PAFYBE in $A$ if and only if for any $x\in A$ and any $a, b\in A^*$
\begin{eqnarray}\label{eq:pre-anti-flexible-dual}
\begin{array}{llllllllll}
a\prec b&=&-R_{_{\succ}}^*(\mathrm{r}(a))b+L_{\cdot}^*(\mathrm{r}(b))a,\;
a\succ b= R^*_{\cdot}(\mathrm{r}(a))b-L_{_{\prec}}^*(\mathrm{r}(b))a,\;\cr
a\cdot b&=& a\prec b+a\succ b =
R^*_{_{\prec}}(\mathrm{r}(a))b+L_{_{\succ}}^*(\mathrm{r}(b))a, \;\cr
x\prec a&=& x\prec \mathrm{r}(a)+\mathrm{r}(R^*_{_{\succ}}(x)a)-R^*_{_{\succ}}(x)a,\;
x\succ a=x\succ \mathrm{r}(a)-\mathrm{r}(R^*_{\cdot}(x)a)+R_{\cdot}^*(x)a, \cr
x\cdot a&=&x\cdot \mathrm{r}(a)-R^*_{_{\prec}}(x)a+R^*_{_{\prec}}(x)a,\;
a\cdot x= \mathrm{r}(a)\cdot x-\mathrm{r}(L^*_{_{\succ}}(x)a)+L^*_{_{\succ}}(x)a, \cr
a\prec x&=&\mathrm{r}(a)\prec x- \mathrm{r}(L^*_{\cdot}(x)a)+L^*_{\cdot}(x)a, \;
a\succ x=\mathrm{r}(a)\succ x+ \mathrm{r}(L^*_{_{\prec}}(x)a)-L^*_{_{\prec}}(x)a.
\end{array}
\end{eqnarray}
\end{pro}
\begin{thm}
Consider a symmetric and non-degenerate element $\mathrm{r}\in A\otimes A$.
Then $\mathrm{r}$ is a solution of the PAFYBE in $A$ if and only if
the inverse homomorhpism $A^*\rightarrow A$ induced by $\mathrm{r}$
regarded as a bilinear form $\mathfrak{B}$ on $A$
(i.e. $\mathfrak{B}(x, y)= \langle \mathrm{r}^{-1}(x), y\rangle$,
for any $x,y\in A$) and satisfies
\begin{equation}\label{eq:2-cocycle}
\mathfrak{B}(x\cdot y, z)=\mathfrak{B}(y, z\prec x)+
\mathfrak{B}(x, y\succ z), \mbox{ for any } x,y,z\in A.
\end{equation}
\end{thm}
\begin{cor}
Let $\mathrm{r}\in A\otimes A$ be a symmetric solution of PAFYBE in $A$.
Suppose in addition
by "$\prec_{_{ A^*}}, \succ_{_{ A^*}}$" the pre-anti-flexible algebra structure on $A^*$
induced by $r$ via Proposition~\ref{pro_pre_anti_flexible dual}. Then we have for
any $a,b\in A^*$
\begin{equation*}
a\prec_{_{ A^*}} b=\mathrm{r}^{-1}(\mathrm{r}(a)\prec_{_ A} \mathrm{r}(b)), \;
a\succ_{_{ A^*}} b=\mathrm{r}^{-1}(\mathrm{r}(a)\succ_{_ A} \mathrm{r}(b)).
\end{equation*}
Therefore, $\mathrm{r}:A^*\rightarrow A$ is an isomorphism of pre-anti-flexible algebras.
\end{cor}
\begin{thm}
Let $(A, \prec, \succ)$ be a pre-anti-flexible algebra and
$\mathrm{r}\in A\otimes A$ symmetric.
Then, $\mathrm{r}$ is a solution of {PAFYBE} if and only if its satisfies
\begin{equation*}
\mathrm{r}(a)\cdot\mathrm{r}(b)=\mathrm{r}(R^*_{_{\prec}}(\mathrm{r}(a))b+
L^*_{_{\succ}}(\mathrm{r}(b))a ), \;\forall a,b \in A^*.
\end{equation*}
\end{thm}
Recall that a $\mathcal{O}$-operator related to the bimodule $(l, r, V )$
of an anti-flexible algebra $(A, \cdot)$ is a linear map $T :V\rightarrow A$ satisfies
\begin{equation*}
T (u) \cdot T (v) = T (l(T (u))v + r(T (v))u), \; \forall u, v \in V.
\end{equation*}
In addition, for a given pre-anti-flexible algebra $(A, \prec, \succ)$, according
to Proposition~\ref{prop_operation_bimodule_pre_anti_flexible}~\eqref{eq:one},
$(L_{_{\succ}},R_{_{\prec}}, A)$ is a bimodule of
its underlying anti-flexible algebra $aF(A)$. Furthermore, for any $x,y\in A$
\begin{eqnarray}\label{eq:o-operator}
\id(x)\cdot \id(y)= \id (L_{_{\succ}}(\id(x))y+R_{_{\prec}}(\id(y))x),
\end{eqnarray}
then $\id:A\rightarrow A$ is an $\mathcal{O}$-operator of $aF(A)$ associated to
the bimodule $(L_{_{\succ}}, R_{_{\prec}}, A)$.
\begin{cor}
Consider a symmetric element $\mathrm{r}\in A\otimes A$.
Then $\mathrm{r}$ is a solution
of PAFYBE in $A$ if and only if it is an $\mathcal{O}$-operator
of the underlying anti-flexible $aF(A)$ associated to the bimodule
$(R^*_{_{\prec}}, L^*_{_{\succ}}, A^*)$. Furthermore, there is a pre-anti-flexible
algebra structure on $A^*$ given by
\begin{eqnarray*}
a\prec b=L^*_{_{\succ}}(\mathrm{r}(b))a;\;\;
a\succ b=R^*_{_{\prec}}(\mathrm{r}(a))b;\; \forall a,b\in A^*,
\end{eqnarray*}
which is the same of that associated to the pre-anti-flexible bialgebra derived on $A^*$
by Eq~\eqref{eq:pre-anti-flexible-dual}. If in addition $\mathrm{r}$ is
non degenerate, then there is a new compatible pre-anti-flexible algebraic
structure given on $A$ by
\begin{eqnarray*}
x\prec'y=\mathrm{r}(L^*_{_{\succ}}(y)\mathrm{r}^{-1}(x)),\;
x\succ'y=\mathrm{r}(R^*_{_{\prec}}(x)\mathrm{r}^{-1}(y)), \;\forall x,y\in A,
\end{eqnarray*}
which is the pre-anti-flexible algebra structure given by
\begin{eqnarray*}
\mathfrak{B}(x\prec'y, z)=\mathfrak{B}(x, y\ast z), \;
\mathfrak{B}(x\succ'y, z)=\mathfrak{B}(y, z\cdot x), \;\forall x,y,z\in A,
\end{eqnarray*}
where $\mathfrak{B}$ is given by $
\mathfrak{B}(x,y)=\langle \mathrm{r}^{-1}(x), y\rangle$ for
any $x,y\in A$ and satisfies Eq.~\eqref{eq:2-cocycle}.
\end{cor}
Taking into account \cite[Proposition 2.7.]{DBH3} we have
\begin{thm}
Let $(A, \cdot)$ be an anti-flexible algebra, $(l,r, V)$ a bimodule
of $(A, \cdot)$ and $T:V\rightarrow A$ an $\mathcal{O}$-operator associated
to $(l,r, V)$. Then $\mathrm{r}=T+\sigma T$ is a symmetric solution of
the PAFYBE in $T(V)\ltimes_{r^*, 0,0, l^*} V^*$, where
$T(V)\subset A$ is endowed with a pre-anti-flexible given by
for any $u,v\in V, $
\begin{eqnarray*}
T(v)\prec T(v)=T(r(T(v))u), \;
T(u)\succ T(v)=T(l(T(u))v),
\end{eqnarray*}
such that $(r^*, 0,0, l^*, T(V)^*)$ is its associated bimodule
and underlying anti-flexible algebra is a sub-algebra of $A$, and finally
$T$ can be identified with an element in
$T(V)\otimes V^*\subset(T(V)\ltimes_{r^*, 0,0, l^*} V^*)\otimes
T(V)\ltimes_{r^*, 0,0, l^*} V^*$.
\end{thm}
Considering the above theorem,
Proposition~\ref{prop_operation_bimodule_pre_anti_flexible}~\eqref{eq:one} and
Eq.~\eqref{eq:o-operator}, we have
\begin{cor}
Let $(A, \prec, \succ)$ be a $n$-dimensional pre-anti-flexible algebra. Then the element
\begin{eqnarray*}
\mathrm{r}=\sum_{i}^{n} (e_i\otimes e_i^*+e_i^*\otimes e_i)
\end{eqnarray*}
is a symmetric solution of PAFYBE in
$A\ltimes_{R_{_{\prec}}^*, 0,0, L_{_{\succ}}^*} A^*$,
where $\{e_1, \cdots , e_n\}$ it a basis of $A$ and
$\{e^*_1, \cdots , e^*_n\}$ its associated dual basis.
Furthermore, $\mathrm{r}$ is non degenerate and it
induced bilinear form $\mathfrak{B}$ on
$A\ltimes_{R_{_{\prec}}^*, 0,0, L_{_{\succ}}^*} A^*$
is given by
\begin{eqnarray*}
\mathfrak{B}(x+a, y+b)=\langle x, b\rangle+\langle y, a\rangle, \;
\forall x,y\in A, a, b\in A^*.
\end{eqnarray*}
\end{cor}
\noindent
{\bf Acknowledgments.}
The author thanks Professor C. Bai for helpful discussions and
his encouragement, and Nankai ZhiDe Foundation.
\end{document} |
\ensuremath{\mathfrak{B}}egin{document}
\theoremstyle{plain}
\ensuremath{\mathfrak{n}}ewtheorem{example}{Example}
\ensuremath{\mathfrak{n}}ewtheorem{definition}{Definition}
\ensuremath{\mathfrak{n}}ewtheorem{remark}{Remark}
\ensuremath{\mathfrak{n}}ewtheorem{theorem}{Theorem}
\ensuremath{\mathfrak{n}}ewtheorem{lemma}{Lemma}
\ensuremath{\mathfrak{n}}ewtheorem{proposition}{Proposition}
\ensuremath{\mathfrak{n}}ewtheorem{corollary}{Corollary}
\ensuremath{\mathfrak{n}}ewtheorem{conjecture}{Conjecture}
\title{A pairing in homology and the category of linear complexes
of tilting modules for a quasi-hereditary algebra}
\operatorname{Aut}hor{Volodymyr Mazorchuk and Serge Ovsienko}
\ensuremath{\mathcal{D}}ate{}
\maketitle
\ensuremath{\mathfrak{B}}egin{abstract}
We show that there exists a natural non-degenerate pairing of
the homomorphism space between two neighbor standard modules over a
quasi-hereditary algebra with the first extension space between the
corresponding costandard modules and vise versa. Investigation
of this phenomenon leads to a family of pairings involving
standard, costandard and tilting modules. In the graded case,
under some "Koszul-like" assumptions (which we prove are
satisfied for example for the blocks of the category $\mathcal{O}$),
we obtain a non-degenerate pairing between certain graded
homomorphism and graded extension spaces. This motivates the study
of the category of linear tilting complexes for graded
quasi-hereditary algebras. We show that, under assumptions,
similar to those mentioned above, this category realizes
the module category for the Koszul dual of the Ringel dual
of the original algebra. As a corollary we obtain that under
these assumptions the Ringel and Koszul dualities commute.
\end{abstract}
\section{Introduction and description of the results}\ensuremath{\lambda}abel{s1}
Let $\Bbbk$ be an algebraically closed field. If the opposite is
not emphasized, in this paper by a module we mean a {\em left} module
and we denote by $\ensuremath{\mathbb{R}}ad(M)$ the radical of a module, $M$. For a
${\Bbbk}$-vector space, $V$, we denote the dual space by $V^*$.
Let $A$ be a basic $\Bbbk$-algebra, which is quasi-hereditary
with respect to the natural order on the indexing set
$\{1,2,\ensuremath{\mathcal{D}}ots,n\}$ of pairwise-orthogonal primitive idempotents
$e_i$ (see \cite{CPS1,DR,DR2} for details). Let $P(i)$, $\Delta(i)$,
$\ensuremath{\mathfrak{n}}abla(i)$, $L(i)$, and $T(i)$ denote the projective, standard,
costandard, simple and tilting $A$-modules, associated to $e_i$,
$i=1,\ensuremath{\mathcal{D}}ots, n$, respectively. Set $P=\oplus_{i=1}^n P(i)$,
$\Delta=\oplus_{i=1}^n \Delta(i)$,$\ensuremath{\mathfrak{n}}abla=\oplus_{i=1}^n \ensuremath{\mathfrak{n}}abla(i)$,
$L=\oplus_{i=1}^n L(i)$, $T=\oplus_{i=1}^n T(i)$.
We remark that, even if the standard $A$-modules are fixed, the
linear order on the indexing set of primitive idempotents, with respect
to which the algebra $A$ is quasi-hereditary, is not unique in general.
We denote by $R(A)$ and $E(A)$ the Ringel and Koszul duals of $A$
respectively. A graded algebra, $B=\oplus_{i\in\ensuremath{\mathbb{Z}}}B_i$, will be called
{\em positively graded} provided that $B_i=0$ for all $i<0$ and
$\ensuremath{\mathbb{R}}ad(B)=\oplus_{i>0}B_i$.
This paper has started from an attempt to give a conceptual explanation
for the equality
\ensuremath{\mathfrak{B}}egin{equation}\ensuremath{\lambda}abel{introeq1}
\ensuremath{\mathcal{D}}im\ensuremath{\mathcal{H}}om_{A}(\Delta(i-1),\Delta(i))=
\ensuremath{\mathcal{D}}im\operatorname{Ext}^1_A(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(i-1)),
\end{equation}
which is proved at the beginning of Section~\ref{s2}. Our first main
result, proved also in Section~\ref{s2}, is the following statement:
\ensuremath{\mathfrak{B}}egin{theorem}\ensuremath{\lambda}abel{introt1}
\ensuremath{\mathfrak{B}}egin{enumerate}[(1)]
\item\ensuremath{\lambda}abel{introt1.1} Let $i,j\in\{1,\ensuremath{\mathcal{D}}ots,n\}$ and $j<i$. Then there
exists a bilinear pairing,
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\lambda}angle\cdot\, , \cdot\rangle:
\ensuremath{\mathcal{H}}om_A\ensuremath{\lambda}eft(\Delta(j),\Delta(i)\right)\times
\operatorname{Ext}_A^1\ensuremath{\lambda}eft(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(j)\right)\to\Bbbk.
\end{displaymath}
\item\ensuremath{\lambda}abel{introt1.2} If $j=i-1$, then $\ensuremath{\lambda}angle\cdot\, ,\cdot\rangle$
is non-degenerate.
\end{enumerate}
\end{theorem}
Theorem~\ref{introt1} explains the origins of \eqref{introeq1} and motivates
the study of $\ensuremath{\lambda}angle\cdot\, ,\cdot\rangle$. It happens that in the
general case, that is for $j<i-1$, the analogue of
Theorem~\ref{introt1}\eqref{introt1.2} is no longer true. We give an example at
the beginning of Section~\ref{s3}. In the same section we present some
special results and a modification of $\ensuremath{\lambda}angle\cdot\, ,\cdot\rangle$
in the general case.
An attempt to lift the above results to higher $\operatorname{Ext}$'s naturally led us
to the definition of a different pairing, which uses a minimal tilting resolution
of the costandard module. In Section~\ref{s4} we construct and investigate
a pairing between $\operatorname{Ext}^l_A(\ensuremath{\mathfrak{n}}abla,\ensuremath{\mathfrak{n}}abla)$ and $\ensuremath{\mathcal{H}}om_A(\Delta,T_l)$, where
$T_l$ is the $l$-th component of a minimal tilting resolution of $\ensuremath{\mathfrak{n}}abla$.
In the case $l=1$ this new pairing induces the one we have constructed
in Section~\ref{s2}.
The new pairing is rarely non-degenerate. In an attempt to find some
conditions, which would ensure this property, we naturally came to the
graded case. In Section~\ref{s5} we show that in the graded case our new
pairing induces a non-degenerate pairing between the graded homomorphism
and the graded first extension spaces under the condition that the
costandard modules admit linear tilting resolutions. Here the linearity of
the resolution means the following: we show that for a positively graded
quasi-hereditary algebra all tilting modules are gradable and thus we can
fix their graded lifts putting their "middles" in degree $0$; the linearity
of the resolution now means that the $i$-th term of the resolution consists
only of tilting modules, whose "middles" are exactly in degree $i$.
This observation brings the linear complexes of tilting modules into
the picture and serves as a bridge to the second part of the paper, in which
we study the category of all such linear complexes.
The above mentioned condition of the existence of a linear tilting
resolution for costandard $A$-modules immediately resembles the conditions,
which appeared in \cite{ADL} during the study of the following question: when
the Koszul dual of a quasi-hereditary algebra is quasi-hereditary with
respect to the opposite order? In \cite[Theorem~3]{ADL} it was shown that
this is the case if and only if both the standard and costandard $A$-modules
admit a linear projective and injective (co)resolution respectively
(algebras, satisfying these conditions, were called {\em standard Koszul}
in \cite{ADL}). This resemblance motivated us to take a closer look at the
category of linear complexes of tilting $A$-modules. The most striking property
of this category is the fact that it combines two objects of completely different
natures: tilting modules for a quasi-hereditary algebra, which give rise
to the so-called {\em Ringel duality}; and linear resolutions, which are
the source of a completely different duality, namely the {\em Koszul duality}.
Under some natural assumptions, which roughly mean that all objects we consider
are well-defined and well-coordinated with each other, in
Section~\ref{s55} we prove our second main result:
\ensuremath{\mathfrak{B}}egin{theorem}\ensuremath{\lambda}abel{introt2}
Assume that $A$ is a positively graded quasi-hereditary algebra, such that
\ensuremath{\mathfrak{B}}egin{enumerate}[(i)]
\item standard $A$-modules admit a linear tilting coresolution,
\item costandard $A$-modules admit a linear tilting resolution.
\end{enumerate}
The above conditions imply that the quadratic dual $R(A)^!$ of $R(A)$ is
quasi-hereditary (with respect to the same order as for $A$), and we
further assume that
\ensuremath{\mathfrak{B}}egin{enumerate}[(i)]
\setcounter{enumi}{3}
\item the grading on $R(R(A)^!)$, induced from the category of graded
$R(A)^!$-modules, is positive.
\end{enumerate}
Then the algebras $A$, $R(A)$, $E(A)$, $R(E(A))$ and $E(R(A))$ are standard
Koszul quasi-hereditary algebras, moreover, $E(R(A))\cong R(E(A))$ as
quasi-hereditary algebras. In other words, Koszul and Ringel dualities commute
on $A$.
\end{theorem}
As a preparatory result for this theorem we show that, under the same
assumptions, the category of bounded linear complexes of tilting $A$-modules
is equivalent to the category of graded modules over $E(R(A))^{opp}$.
Moreover, this realization preserves (in some sense) standard and costandard
modules but switches simple and tilting modules.
We finish the paper with proving that all conditions of Theorem~\ref{introt2}
are satisfied for the associative algebras, associated with the blocks of the
BGG category $\mathcal{O}$. This is done in Section~\ref{s6}.
In the same section we also derive some consequences for these algebras,
in particular, about the structure of tilting modules. The paper is finished
with an Appendix, written by Catharina Stroppel, where it is shown that
all conditions of Theorem~\ref{introt2} are satisfied for the associative
algebras, associated with the blocks of the parabolic category $\mathcal{O}$
in the sense of \cite{RC}. As the main tool in the proof of the last result,
it is shown that Arkhipov's twisting functor on $\mathcal{O}$ (see \cite{AS,KM})
is gradable.
For an abelian category, $\mathcal{A}$, we denote by
$D^b(\mathcal{A})$ the corresponding bounded derived category and by
$K(\mathcal{A})$ the corresponding homotopic category.
In particular, for an associative algebra, $B$, we denote by $D^b(B)$
the bounded derived category of $B\mathrm{-mod}$ and by $K(B)$ the homotopic
category of $B\mathrm{-mod}$. For $M\in B\mathrm{-mod}$
we denote by $M^{\ensuremath{\mathfrak{B}}ullet}$ the complex defined via
$M^{0}=M$ and $M^{i}=0$, $i\ensuremath{\mathfrak{n}}eq 0$.
We will say that a module, $M$, is {\em Ext-injective} (resp.
{\em Ext-projective}) {\em with respect to a module}, $N$, provided that
$\operatorname{Ext}^{k}(X,M)=0$, $k>0$, (resp. $\operatorname{Ext}^{k}(M,X)=0$, $k>0$) for any
subquotient $X$ of $N$.
When we say that a graded algebra is Koszul, we mean that it is Koszul
with respect to this grading.
\section{A bilinear pairing between $\ensuremath{\mathcal{H}}om_A$ and $\operatorname{Ext}^1_A$}\ensuremath{\lambda}abel{s2}
The following observation is the starting point of this paper.
Fix $1<i\ensuremath{\lambda}eq n$. According to the classical BGG-reciprocity for
quasi-hereditary algebras (see for example \cite[Lemma~2.5]{DR2}),
we have that $[I(i-1):\ensuremath{\mathfrak{n}}abla(i)]=[\Delta(i):L(i-1)]$,
where the first number is the multiplicity of $\ensuremath{\mathfrak{n}}abla(i)$ in a
costandard filtration of $I(i-1)$, and the second number is the
usual composition multiplicity. The quasi-heredity of $A$,
in particular, implies that $\Delta(i-1)$ is Ext-projective with
respect to $\ensuremath{\mathbb{R}}ad(\Delta(i))$ and
hence $[\Delta(i):L(i-1)]=\ensuremath{\mathcal{D}}im\ensuremath{\mathcal{H}}om_{A}(\Delta(i-1),\Delta(i))$.
The number $[I(i-1):\ensuremath{\mathfrak{n}}abla(i)]$ can also be reinterpreted. Again,
the quasi-heredity of $A$ implies that any non-zero element from
$\operatorname{Ext}^1_A(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(i-1))$ is in fact lifted from a
non-zero element of $\operatorname{Ext}^1_A(L(i),\ensuremath{\mathfrak{n}}abla(i-1))$ via the map,
induced by the projection $\Delta(i)\twoheadrightarrow L(i)$. Since
$\ensuremath{\mathfrak{n}}abla(i-1)$ has simple socle, it further follows
that any non-zero element from $\operatorname{Ext}^1_A(L(i),\ensuremath{\mathfrak{n}}abla(i-1))$ corresponds
to a submodule of $I(i-1)$ with simple top $L(i)$. From this one easily
derives that $[I(i-1):\ensuremath{\mathfrak{n}}abla(i)]=\ensuremath{\mathcal{D}}im\operatorname{Ext}^1_A(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(i-1))$.
Altogether, we obtain that $\ensuremath{\mathcal{D}}im\ensuremath{\mathcal{H}}om_{A}(\Delta(i-1),\Delta(i))=
\ensuremath{\mathcal{D}}im\operatorname{Ext}^1_A(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(i-1))$. In the present section we show
that the spaces $\ensuremath{\mathcal{H}}om_{A}(\Delta(i-1),\Delta(i))$ and
$\operatorname{Ext}^1_A(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(i-1))$ are connected via a non-degenerate
bilinear pairing in a natural way.
For every $i=1,\ensuremath{\mathcal{D}}ots,n$ we fix a non-zero homomorphisms,
$\alphalpha_i:\Delta(i)\to\ensuremath{\mathfrak{n}}abla(i)$.
Remark that $\alphalpha_i$ is unique up to a scalar and maps the top of
$\Delta(i)$ to the socle of $\ensuremath{\mathfrak{n}}abla(i)$. For $j<i$ let
$f:\Delta(j)\to\Delta(i)$ be some homomorphism and
$\xi:\ensuremath{\mathfrak{n}}abla(j)\overset{\ensuremath{\mathfrak{B}}eta}{\ensuremath{\mathfrak{h}}ookrightarrow} X
\overset{\ensuremath{\mathfrak{g}}amma}{\twoheadrightarrow}\ensuremath{\mathfrak{n}}abla(i)$ be a short exact sequence.
Consider the following diagram:
\ensuremath{\mathfrak{B}}egin{equation}\ensuremath{\lambda}abel{eq2.1}
\xymatrix{
0\alphar[rr] && \ensuremath{\mathfrak{n}}abla(j)\alphar[rr]^{\ensuremath{\mathfrak{B}}eta} && X\alphar[rr]^{\ensuremath{\mathfrak{g}}amma}
&& \ensuremath{\mathfrak{n}}abla(i)\alphar[rr] && 0 \\
&& \Delta(j)\alphar@{-->}[u]^{\alphalpha_j}\alphar[rrrr]^{f} && &&
\Delta(i) \alphar@{-->}[u]_{\alphalpha_i}\alphar@{=>}[ull]_{\varphi} &&
}.
\end{equation}
Since $j<i$, we have $\operatorname{Ext}_A^1(\Delta(i),\ensuremath{\mathfrak{n}}abla(j))=0$ and
$\ensuremath{\mathcal{H}}om_A(\Delta(i),\ensuremath{\mathfrak{n}}abla(j))=0$. Hence
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\mathcal{H}}om_A(\Delta(i),X)\cong
\ensuremath{\mathcal{H}}om_A(\Delta(i),\ensuremath{\mathfrak{n}}abla(i)),
\end{displaymath}
which means that $\alphalpha_i$ admits
a unique lifting, $\varphi:\Delta(i)\to X$, such that the triangle in
\eqref{eq2.1} commutes. Further, $L(j)$ occurs exactly once in the socle
of $X$ and $\ensuremath{\mathfrak{B}}eta\circ \alphalpha_j$ is a projection of
$\Delta(j)$ onto this socle $L(j)$-component of $X$.
On the other hand, since $\ensuremath{\mathfrak{g}}amma\circ \varphi=\alphalpha_i$, it follows that
$\varphi(\ensuremath{\mathbb{R}}ad(\Delta(i)))\subset\ensuremath{\mathfrak{B}}eta(\ensuremath{\mathfrak{n}}abla(j))$.
Since $[\ensuremath{\mathfrak{n}}abla(j):L(j)]=1$, it follows that
the composition $\varphi\circ f$ is a projection of
$\Delta(j)$ onto the socle $L(j)$-component of $X$ as well.
Since $\Bbbk$ is algebraically closed, we get that
$\ensuremath{\mathfrak{B}}eta\circ \alphalpha_j$ and $\varphi\circ f$ differ only by a
scalar (they are not the same in general as
$\ensuremath{\mathfrak{B}}eta\circ \alphalpha_j$ does depend on the choice of $\alphalpha_j$ and
$\varphi\circ f$ does not). Hence we can denote by
$\ensuremath{\lambda}angle f,\xi\rangle$ the unique element from ${\Bbbk}$
such that $\ensuremath{\lambda}angle f,\xi\rangle \ensuremath{\lambda}eft(\varphi\circ f\right)=
\ensuremath{\mathfrak{B}}eta\circ \alphalpha_j$.
\ensuremath{\mathfrak{B}}egin{lemma}\ensuremath{\lambda}abel{l2.1}
\ensuremath{\mathfrak{B}}egin{enumerate}[(1)]
\item Let $\xi':\ensuremath{\mathfrak{n}}abla(j)\ensuremath{\mathfrak{h}}ookrightarrow Y\twoheadrightarrow\ensuremath{\mathfrak{n}}abla(i)$ be a short
exact sequence, which is congruent to $\xi$. Then
$\ensuremath{\lambda}angle f,\xi\rangle=\ensuremath{\lambda}angle f,\xi'\rangle$ for any $f$ as above.
In particular, $\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle$ induces a map
from $\ensuremath{\mathcal{H}}om_A(\Delta(j),\Delta(i))\times \operatorname{Ext}_A^1(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(j))$
to ${\Bbbk}$ (we will denote the induced map by the same symbol
$\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle$ abusing notation).
\item The map $\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle:
\ensuremath{\mathcal{H}}om_A(\Delta(j),\Delta(i))\times \operatorname{Ext}_A^1(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(j))\to
{\Bbbk}$ is bilinear.
\end{enumerate}
\end{lemma}
\ensuremath{\mathfrak{B}}egin{proof}
This is a standard direct calculation.
\end{proof}
Note that the form $\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle$ is independent,
up to a non-zero scalar, of the choice of $\alphalpha_i$ and $\alphalpha_j$.
Since the algebras $A$ and $A^{opp}$ are quasi-hereditary
simultaneously, using the dual arguments one constructs a form,
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle':
\ensuremath{\mathcal{H}}om_A(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(j))\times \operatorname{Ext}_A^1(\Delta(j),\Delta(i))\to
{\Bbbk},
\end{displaymath}
and proves a dual version of Lemma~\ref{l2.1}.
\ensuremath{\mathfrak{B}}egin{theorem}\ensuremath{\lambda}abel{t2.2}
Let $j=i-1$. Then the bilinear from $\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle$
constructed above is non-degenerate.
\end{theorem}
We remark that in the case $j<i-1$ the analogous statement is not
true in general, see the example at the beginning of Section~\ref{s3}.
\ensuremath{\mathfrak{B}}egin{proof}
First let us fix a non-zero $f:\Delta(i-1)\to\Delta(i)$. Since
$\Delta(i-1)$ has simple top, there exists a unique submodule
$M\subset \Delta(i)$, which is maximal with respect to the condition
$p\circ f\ensuremath{\mathfrak{n}}eq 0$,
where $p:\Delta(i)\to \Delta(i)/M$ is the natural projection. Denote
$N=\Delta(i)/M$. The module $N$ has simple socle, which is isomorphic
to $L(i-1)$, and $p\circ f:\Delta(i-1)\to N$ is a non-zero projection
onto the socle of $N$. Now we claim that
$\ensuremath{\mathbb{R}}ad(N)\ensuremath{\mathfrak{h}}ookrightarrow \ensuremath{\mathfrak{n}}abla(i-1)$. Indeed, $\ensuremath{\mathbb{R}}ad(N)\subset
\ensuremath{\mathbb{R}}ad(\Delta(i))$ and hence it can have only composition subquotients
of the form $L(t)$, $t<i$, since $A$ is quasi-hereditary. But since
$\ensuremath{\mathbb{R}}ad(N)$ has the simple socle $L(i-1)$, the quasi-heredity of $A$ implies
$\ensuremath{\mathbb{R}}ad(N)\ensuremath{\mathfrak{h}}ookrightarrow \ensuremath{\mathfrak{n}}abla(i-1)$ as well. Let $C$ denote the
cokernel of this inclusion. The module $N$ is an extension of
$\ensuremath{\mathbb{R}}ad(N)$ by $L(i)$ and is indecomposable. This implies that the short
exact sequence $\xi:\ensuremath{\mathbb{R}}ad(N)\ensuremath{\mathfrak{h}}ookrightarrow N\twoheadrightarrow L(i)$ represents a
non-zero element in $\operatorname{Ext}_A^1(L(i),\ensuremath{\mathbb{R}}ad(N))$. Let us apply
$\ensuremath{\mathcal{H}}om_A(L(i),{}_-)$ to the short exact sequence
$\ensuremath{\mathbb{R}}ad(N)\ensuremath{\mathfrak{h}}ookrightarrow \ensuremath{\mathfrak{n}}abla(i-1)\twoheadrightarrow C$
and remark that $\ensuremath{\mathcal{H}}om_A(L(i),C)=0$ as $[C:L(s)]\ensuremath{\mathfrak{n}}eq 0$ implies
$s<i-1$ by above. This gives us an inclusion,
$\operatorname{Ext}_A^1(L(i),\ensuremath{\mathbb{R}}ad(N))\ensuremath{\mathfrak{h}}ookrightarrow \operatorname{Ext}_A^1(L(i),\ensuremath{\mathfrak{n}}abla(i-1))$,
and hence there exists a short exact sequence,
$\xi':\ensuremath{\mathfrak{n}}abla(i-1))\ensuremath{\mathfrak{h}}ookrightarrow N'\twoheadrightarrow L(i)$, induced
by $\xi$.
\ensuremath{\mathfrak{B}}egin{lemma}\ensuremath{\lambda}abel{l2.3}
$\operatorname{Ext}_A^1(L(i),\ensuremath{\mathfrak{n}}abla(i-1))\cong \operatorname{Ext}_A^1(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(i-1))$.
\end{lemma}
\ensuremath{\mathfrak{B}}egin{proof}
We apply $\ensuremath{\mathcal{H}}om_A({}_-,\ensuremath{\mathfrak{n}}abla(i-1))$ to the short exact sequence
$L(i)\ensuremath{\mathfrak{h}}ookrightarrow \ensuremath{\mathfrak{n}}abla(i)\twoheadrightarrow D$, where $D$ is the
cokernel of the inclusion $L(i)\ensuremath{\mathfrak{h}}ookrightarrow \ensuremath{\mathfrak{n}}abla(i)$.
This gives the following part in the long exact sequence:
\ensuremath{\mathfrak{B}}egin{displaymath}
\operatorname{Ext}_A^{1}(D,\ensuremath{\mathfrak{n}}abla(i-1))\to\operatorname{Ext}_A^1(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(i-1))\to
\operatorname{Ext}_A^1(L(i),\ensuremath{\mathfrak{n}}abla(i-1))\to\operatorname{Ext}_A^{2}(D,\ensuremath{\mathfrak{n}}abla(i-1)).
\end{displaymath}
But $D$ contains only simple subquotients of the form
$L(s)$, $s\ensuremath{\lambda}eq i-1$. This means that $\ensuremath{\mathfrak{n}}abla(i-1)$ is Ext-injective
with respect to $D$ because of the quasi-heredity of $A$ and
proves the statement.
\end{proof}
Applying Lemma~\ref{l2.3} we obtain that the sequence $\xi'$ gives
rise to the unique short exact sequence
$\xi'':\ensuremath{\mathfrak{n}}abla(i-1)\ensuremath{\mathfrak{h}}ookrightarrow N''\twoheadrightarrow \ensuremath{\mathfrak{n}}abla(i)$. Moreover,
by construction it also follows that $N$ is isomorphic to a submodule
in $N''$. Consider $\xi''$ with $X=N''$ in \eqref{eq2.1}. Using the
inclusion $N\ensuremath{\mathfrak{h}}ookrightarrow N''$ we obtain that the composition
$\varphi\circ f$ is non-zero, implying $\ensuremath{\lambda}angle f,\xi''\rangle\ensuremath{\mathfrak{n}}eq 0$.
This proves that the left kernel of the form $\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle$
is zero.
To prove that the right kernel is zero, we, basically, have to
reverse the above arguments. Let
$\eta:\ensuremath{\mathfrak{n}}abla(i-1)\ensuremath{\mathfrak{h}}ookrightarrow X\twoheadrightarrow \ensuremath{\mathfrak{n}}abla(i)$ be a non-split
short exact sequence. Quasi-heredity of $A$ implies that $\ensuremath{\mathfrak{n}}abla(i-1)$
is Ext-injective with respect to $\ensuremath{\mathfrak{n}}abla(i)/\soc(\ensuremath{\mathfrak{n}}abla(i))$.
Hence $\eta$ is in fact a lifting of some non-split short exact
sequence, $\eta':\ensuremath{\mathfrak{n}}abla(i-1)\ensuremath{\mathfrak{h}}ookrightarrow X'\twoheadrightarrow L(i)$ say. In
particular, it follows that $X'$ and thus also $X$ has simple socle,
namely $L(i-1)$. Further, applying $\ensuremath{\mathcal{H}}om_A(\Delta(i),{}_-)$
to $\eta$, and using the fact that $\Delta(i)$ is Ext-projective
with respect to $X$, one obtains that there is a unique (up to a scalar)
non-trivial map from $\Delta(i)$ to $X$. Let $Y$ be its image. Then
$Y$ has simple top, isomorphic to $L(i)$. Furthermore, all other simple
subquotients of $X$ are isomorphic to $L(s)$, $s<i$, and hence $Y$ is a
quotient of $\Delta(i)$. Since $\Delta(i-1)$ is Ext-projective with respect to
$\ensuremath{\mathbb{R}}ad(\Delta(i))$, we can find a map, $\Delta(i-1)\to \ensuremath{\mathbb{R}}ad(\Delta(i))$,
whose composition with the inclusion $\ensuremath{\mathbb{R}}ad(\Delta(i))\ensuremath{\mathfrak{h}}ookrightarrow
\Delta(i)$ followed by the projection from $\Delta(i)$ onto $Y$ is
non-zero. The composition of the first two maps gives us a map,
$h:\Delta(i-1)\to\Delta(i)$, such that
$\ensuremath{\lambda}angle h,\eta\rangle\ensuremath{\mathfrak{n}}eq 0$. Therefore the right kernel of the form
$\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle$ is zero as well, completing the proof.
\end{proof}
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c2.4}
\ensuremath{\mathfrak{B}}egin{enumerate}[(1)]
\item $\ensuremath{\mathcal{H}}om_A(\Delta(i-1),\Delta(i))\cong \operatorname{Ext}_A^1(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(i-1))^*$.
\item $\operatorname{Ext}_A^1(\Delta(i-1),\Delta(i))\cong \ensuremath{\mathcal{H}}om_A(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(i-1))^*$.
\end{enumerate}
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
The first statement is an immediate corollary of Theorem~\ref{t2.2}
and the second statement follows by duality since $A^{opp}$ is
quasi-hereditary as soon as $A$ is, see \cite{CPS1}.
\end{proof}
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c2.5}
Assume that $A$ has a {\em simple preserving duality}, that is a contravariant
exact equivalence, which preserves the iso-classes of simple modules. Then
\ensuremath{\mathfrak{B}}egin{enumerate}[(1)]
\item $\ensuremath{\mathcal{H}}om_A(\Delta(i-1),\Delta(i))\cong \operatorname{Ext}_A^1(\Delta(i-1),\Delta(i))^*$.
\item $\operatorname{Ext}_A^1(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(i-1))\cong \ensuremath{\mathcal{H}}om_A(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(i-1))^*$.
\end{enumerate}
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
Apply the simple preserving duality to the statement of Corollary~\ref{c2.4}.
\end{proof}
\section{Homomorphisms between arbitrary standard modules}\ensuremath{\lambda}abel{s3}
It is very easy to see that the statement of Theorem~\ref{t2.2} does not
extend to the case $j<i-1$. For example, consider the path algebra $A$ of
the following quiver:
\ensuremath{\mathfrak{B}}egin{displaymath}
\xymatrix{
1 && 2\alphar[ll] && 3 \alphar[ll]\\
}.
\end{displaymath}
This algebra is hereditary and thus quasi-hereditary. Moreover, it is
directed and thus standard modules are projective and costandard modules are
simple. One easily obtains that $\ensuremath{\mathcal{H}}om_A(\Delta(1),\Delta(3))={\Bbbk}$
whereas $\operatorname{Ext}^1_A(\ensuremath{\mathfrak{n}}abla(3),\ensuremath{\mathfrak{n}}abla(1))=0$. The main reason why this
happens is the fact that the non-zero homomorphism $\Delta(1)\to\Delta(3)$
factors through $\Delta(2)$ (note that $1<2<3$).
Let us define another pairing in homology. Denote by $\overline{\alphalpha}_i$ the
natural projection of $\Delta(i)$ onto $L(i)$ and consider
(for $j<i$) the following diagram:
\ensuremath{\mathfrak{B}}egin{equation}\ensuremath{\lambda}abel{eq3.1}
\xymatrix{
0\alphar[rr] && \ensuremath{\mathfrak{n}}abla(j)\alphar[rr]^{\ensuremath{\mathfrak{B}}eta} && X\alphar[rr]^{\ensuremath{\mathfrak{g}}amma}
&& L(i)\alphar[rr] && 0 \\
&& \Delta(j)\alphar@{-->}[u]^{\alphalpha_j}\alphar[rrrr]^{f} && &&
\Delta(i) \alphar@{-->}[u]_{\overline{\alphalpha}_i}\alphar@{=>}[ull]_{\varphi} &&
}.
\end{equation}
Using this diagram, the same arguments as in Section~\ref{s2} allow us to
define the map
\ensuremath{\mathfrak{B}}egin{displaymath}
\overline{\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle}:
\ensuremath{\mathcal{H}}om_A(\Delta(j),\Delta(i))\times \operatorname{Ext}_A^1(L(i),\ensuremath{\mathfrak{n}}abla(j))\to
{\Bbbk}
\end{displaymath}
and one can check that this map is bilinear.
\ensuremath{\mathfrak{B}}egin{proposition}\ensuremath{\lambda}abel{p3.1}
Let $N$ be the quotient of $\Delta(i)$, maximal with respect to the
following conditions: $[\ensuremath{\mathbb{R}}ad(N):L(s)]\ensuremath{\mathfrak{n}}eq 0$ implies $s\ensuremath{\lambda}eq j$;
$[\ensuremath{\mathcal{S}}oc(N):L(s)]\ensuremath{\mathfrak{n}}eq 0$ implies $s=j$. Then
\ensuremath{\mathfrak{B}}egin{enumerate}[(1)]
\item the rank of the form
$\overline{\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle}$ equals
the multiplicity $[\ensuremath{\mathcal{S}}oc(N):L(j)]$, which, in turn, is equal to
$\ensuremath{\mathcal{D}}im\operatorname{Ext}_A^1(L(i),\ensuremath{\mathfrak{n}}abla(j))$;
\item the left kernel of $\overline{\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle}$
is the set of all morphisms $f:\Delta(j)\to\Delta(i)$ such that
$\ensuremath{\mathfrak{P}}i\circ f=0$, where $\ensuremath{\mathfrak{P}}i:\Delta(i)\twoheadrightarrow N$ is the natural projection.
\end{enumerate}
\end{proposition}
\ensuremath{\mathfrak{B}}egin{proof}
The proof is analogous to that of Theorem~\ref{t2.2}.
\end{proof}
Analyzing the proof of Lemma~\ref{l2.3} it is easy to see that there is
no chance to hope for any reasonable relation between
$\operatorname{Ext}_A^1(L(i),\ensuremath{\mathfrak{n}}abla(j))$ and $\operatorname{Ext}_A^1(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(j))$ in general.
However, we have the following:
\ensuremath{\mathfrak{B}}egin{proposition}\ensuremath{\lambda}abel{c3.3}
\ensuremath{\mathfrak{B}}egin{enumerate}[(1)]
\item The right kernel of $\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle$ coincides with
the kernel of the homomorphism $\tau:\operatorname{Ext}_A^1(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(j))\to
\operatorname{Ext}_A^1(L(i),\ensuremath{\mathfrak{n}}abla(j))$ coming from the long exact sequence in homology.
\item
Let $j=i-2$. Then $\tau$ is surjective; the rank of
$\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle$ coincides with the rank of
$\overline{\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle}$; and the left kernel of
$\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle$ coincides with the left kernel of
$\overline{\ensuremath{\lambda}angle \cdot \, ,\cdot\rangle}$.
\end{enumerate}
\end{proposition}
\ensuremath{\mathfrak{B}}egin{proof}
The first statement follows from the proof of Theorem~\ref{t2.2}.
To prove the second statement we remark that for $j=i-2$ we have
$\operatorname{Ext}_A^{k}(X,\ensuremath{\mathfrak{n}}abla(i-2))=0$, $k>1$, for any simple subquotient $X$ of
$\ensuremath{\mathfrak{n}}abla(i)/L(i)$. This gives the surjectivity of $\tau$, which implies
all other statements.
\end{proof}
We remark that all results of this section have appropriate dual analogues.
\section{A generalization of the bilinear pairing to higher $\operatorname{Ext}$'s}\ensuremath{\lambda}abel{s4}
Let us go back to the example at the beginning of Section~\ref{s3}, where
we had a hereditary algebra with $\operatorname{Ext}^1_A(\ensuremath{\mathfrak{n}}abla(3),\ensuremath{\mathfrak{n}}abla(1))=0$,
$\ensuremath{\mathcal{H}}om_A(\Delta(1),\Delta(3))={\Bbbk}$, and such that any morphism from
the last space factors through $\Delta(2)$.
One can have the following idea: $\ensuremath{\mathcal{H}}om_A(\Delta(1),\Delta(3))$ decomposes into
a product of $\ensuremath{\mathcal{H}}om_A(\Delta(1),\Delta(2))$ and $\ensuremath{\mathcal{H}}om_A(\Delta(2),\Delta(3))$,
by Theorem~\ref{t2.2} the space $\ensuremath{\mathcal{H}}om_A(\Delta(1),\Delta(2))$ is dual to
$\operatorname{Ext}^1_A(\ensuremath{\mathfrak{n}}abla(2),\ensuremath{\mathfrak{n}}abla(1))$ and the space $\ensuremath{\mathcal{H}}om_A(\Delta(2),\Delta(3))$ is
dual to $\operatorname{Ext}^1_A(\ensuremath{\mathfrak{n}}abla(3),\ensuremath{\mathfrak{n}}abla(2))$, perhaps this means that the product of
$\ensuremath{\mathcal{H}}om_A(\Delta(1),\Delta(2))$ and $\ensuremath{\mathcal{H}}om_A(\Delta(2),\Delta(3))$ should correspond to
the product of the spaces
$\operatorname{Ext}^1_A(\ensuremath{\mathfrak{n}}abla(2),\ensuremath{\mathfrak{n}}abla(1))$ and $\operatorname{Ext}^1_A(\ensuremath{\mathfrak{n}}abla(3),\ensuremath{\mathfrak{n}}abla(2))$
and thus should be perhaps paired with $\operatorname{Ext}^2_A(\ensuremath{\mathfrak{n}}abla(3),\ensuremath{\mathfrak{n}}abla(1))$ and not
$\operatorname{Ext}^1_A(\ensuremath{\mathfrak{n}}abla(3),\ensuremath{\mathfrak{n}}abla(1))$? In our example this argument does not work
directly either since the algebra we consider is hereditary and thus $\operatorname{Ext}^2_A$
simply vanish. However, on can observe that for
$j=i-k$, $k\in\mathbb{N}$, one could define a $\Bbbk$-linear
map from $\operatorname{Ext}_A^k(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(j))^*$ to $\ensuremath{\mathcal{H}}om_A(\Delta(j),\Delta(i))$ via
\ensuremath{\mathfrak{B}}egin{multline*}
\operatorname{Ext}_A^k\ensuremath{\lambda}eft(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(j)\right)^*\overset{f}{\rightarrow}
\ensuremath{\mathfrak{B}}igotimes_{l=0}^{k-1}
\operatorname{Ext}_A^1\ensuremath{\lambda}eft(\ensuremath{\mathfrak{n}}abla(i-l),\ensuremath{\mathfrak{n}}abla(i-l-1)\right)^*\cong
\text{ (by Corollary~\ref{c2.4}) } \\ \cong
\ensuremath{\mathfrak{B}}igotimes_{l=0}^{k-1}
\ensuremath{\mathcal{H}}om_A^1\ensuremath{\lambda}eft(\Delta(i-l-1),\Delta(i-l)\right)\overset{g}{\rightarrow}
\ensuremath{\mathcal{H}}om_A\ensuremath{\lambda}eft(\Delta(j),\Delta(i)\right),
\end{multline*}
where $g$ is the usual composition of $k$ homomorphisms, and $f$ is the dual map
to the Yoneda composition of $k$ extensions. This map would give a bilinear
pairing between $\operatorname{Ext}_A^k\ensuremath{\lambda}eft(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(j)\right)^*$ and
$\ensuremath{\mathcal{H}}om_A\ensuremath{\lambda}eft(\Delta(j),\Delta(i)\right)^*$, which could also be interesting.
However, we do not study this approach in the present paper.
Instead, we are going to try to extend the pairing we discussed
in the previous sections to higher extensions using some resolutions. This
leads us to the following definition. Choose a minimal tilting resolution,
\ensuremath{\mathfrak{B}}egin{displaymath}
\mathcal{C}^{\ensuremath{\mathfrak{B}}ullet}:\quad\quad
0\ensuremath{\lambda}ongrightarrow T_k \overset{\varphi_k}{\ensuremath{\lambda}ongrightarrow}\ensuremath{\mathcal{D}}ots
\overset{\varphi_2}{\ensuremath{\lambda}ongrightarrow} T_1 \overset{\varphi_1}{\ensuremath{\lambda}ongrightarrow}
T_0\overset{\varphi_0}{\ensuremath{\lambda}ongrightarrow}\ensuremath{\mathfrak{n}}abla\ensuremath{\lambda}ongrightarrow 0,
\end{displaymath}
of $\ensuremath{\mathfrak{n}}abla$ (see \cite[Section~5]{Ri} for the existence of such resolution).
Denote by $\mathcal{T}(\ensuremath{\mathfrak{n}}abla)^{\ensuremath{\mathfrak{B}}ullet}$ the corresponding complex of tilting
modules. Fix $l\in\{0,\ensuremath{\mathcal{D}}ots,k\}$ and consider the following part of the resolution above:
\ensuremath{\mathfrak{B}}egin{displaymath}
\xymatrix{
&&\Delta\alphar@{.>}[d]^{f} && \\
T_{l+1}\alphar[rr]^{\varphi_{l+1}} && T_l\alphar@{.>}[d]^{g}\alphar[rr]^{\varphi_{l}}
&& T_{l-1} \\
&&\ensuremath{\mathfrak{n}}abla && \\
}.
\end{displaymath}
For every $f\in \ensuremath{\mathcal{H}}om_A(\Delta,T_l)$ and every $g\in \ensuremath{\mathcal{H}}om_A(T_l,\ensuremath{\mathfrak{n}}abla)$
the composition gives
\ensuremath{\mathfrak{B}}egin{displaymath}
g\circ f\in \ensuremath{\mathcal{H}}om_A(\Delta,\ensuremath{\mathfrak{n}}abla)=\oplus_{i=1}^n\ensuremath{\mathcal{H}}om_A(\Delta(i),\ensuremath{\mathfrak{n}}abla(i))=
\oplus_{i=1}^n{\Bbbk}\alphalpha_i.
\end{displaymath}
Hence $g\circ f=\sum_{i=1}^n a_i \alphalpha_i$ for some $a_i\in\Bbbk$ and we can denote
$\widetilde{\ensuremath{\lambda}angle f,g\rangle}^{(l)}= \sum_{i=1}^n a_i\in {\Bbbk}$. Obviously
$\widetilde{\ensuremath{\lambda}angle \cdot\, ,\cdot\rangle}^{(l)}$ defines a bilinear map from
$\ensuremath{\mathcal{H}}om_A(\Delta,T_l)\times \ensuremath{\mathcal{H}}om_A(T_l,\ensuremath{\mathfrak{n}}abla)$ to ${\Bbbk}$.
This map induces the bilinear map
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\lambda}angle f,g\rangle^{(l)}:
\ensuremath{\mathcal{H}}om_A(\Delta,T_l)\times \ensuremath{\mathcal{H}}om_{Com}
\ensuremath{\lambda}eft(\mathcal{T}(\ensuremath{\mathfrak{n}}abla)^{\ensuremath{\mathfrak{B}}ullet},\ensuremath{\mathfrak{n}}abla^{\ensuremath{\mathfrak{B}}ullet}[l]\right)\to {\Bbbk}
\end{displaymath}
(where $\ensuremath{\mathcal{H}}om_{Com}$ means the homomorphisms of complexes).
We remark that we have an obvious inclusion $\ensuremath{\mathcal{H}}om_{Com}
\ensuremath{\lambda}eft(\mathcal{T}(\ensuremath{\mathfrak{n}}abla)^{\ensuremath{\mathfrak{B}}ullet},\ensuremath{\mathfrak{n}}abla^{\ensuremath{\mathfrak{B}}ullet}[l]\right)\subset
\ensuremath{\mathcal{H}}om_A(T_l,\ensuremath{\mathfrak{n}}abla)$ since the complex $\ensuremath{\mathfrak{n}}abla^{\ensuremath{\mathfrak{B}}ullet}[l]$ is
concentrated in one degree.
\ensuremath{\mathfrak{B}}egin{theorem}\ensuremath{\lambda}abel{t4.1}
Let $f\in \ensuremath{\mathcal{H}}om_A(\Delta,T_l)$ and
$g\in \ensuremath{\mathcal{H}}om_{Com}\ensuremath{\lambda}eft(\mathcal{T}(\ensuremath{\mathfrak{n}}abla)^{\ensuremath{\mathfrak{B}}ullet},\ensuremath{\mathfrak{n}}abla^{\ensuremath{\mathfrak{B}}ullet}[l]\right)$.
Assume that $g$ is homotopic to zero. Then $\ensuremath{\lambda}angle f,g\rangle^{(l)}=0$. In
particular, $\ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}$ induces a bilinear map,
$\ensuremath{\mathcal{H}}om_A(\Delta,T_l)\times \operatorname{Ext}_A^l(\ensuremath{\mathfrak{n}}abla,\ensuremath{\mathfrak{n}}abla)\to {\Bbbk}$.
\end{theorem}
The form, constructed in Theorem~\ref{t4.1} will be denoted also by
$\ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}$ abusing notation. We remark that
both the construction above and Theorem~\ref{t4.1}
admit appropriate dual analogues.
\ensuremath{\mathfrak{B}}egin{proof}
Since $\mathcal{T}(\ensuremath{\mathfrak{n}}abla)^{\ensuremath{\mathfrak{B}}ullet}$ is a complex of tilting modules,
the second statement of the theorem follows from the first one and
\cite[Chapter III(2), Lemma~2.1]{Ha}. To prove the first
statement we will need the following auxiliary statement.
\ensuremath{\mathfrak{B}}egin{lemma}\ensuremath{\lambda}abel{l4.2}
Let $\ensuremath{\mathfrak{B}}eta :\Delta(i)\to T(j)$ and $\ensuremath{\mathfrak{g}}amma:T(j)\to \ensuremath{\mathfrak{n}}abla(k)$. Then
$\ensuremath{\mathfrak{g}}amma\circ\ensuremath{\mathfrak{B}}eta\ensuremath{\mathfrak{n}}eq 0$ if and only if $i=j=k$, $\ensuremath{\mathfrak{B}}eta\ensuremath{\mathfrak{n}}eq 0$ and
$\ensuremath{\mathfrak{g}}amma\ensuremath{\mathfrak{n}}eq 0$.
\end{lemma}
\ensuremath{\mathfrak{B}}egin{proof}
Using the standard properties of tilting modules, see for example \cite{Ri},
we have $[T(i):L(i)]=1$, $\ensuremath{\mathcal{D}}im\ensuremath{\mathcal{H}}om_A(\Delta(i),T(i))=1$ and any non-zero
element in this space is injective, $\ensuremath{\mathcal{D}}im\ensuremath{\mathcal{H}}om_A(T(i),\ensuremath{\mathfrak{n}}abla(i))=1$ and
any non-zero element in this space is surjective. Hence in the case
$i=j=k$ the composition of non-zero $\ensuremath{\mathfrak{g}}amma$ and $\ensuremath{\mathfrak{B}}eta$ is a non-zero
projection of the top of $\Delta(i)$ to the socle of $\ensuremath{\mathfrak{n}}abla(i)$.
This proves the "if" statement.
To prove the "only if" statement we note that $\ensuremath{\mathfrak{g}}amma\circ\ensuremath{\mathfrak{B}}eta\ensuremath{\mathfrak{n}}eq 0$ obviously
implies $i=k$. Assume that $j\ensuremath{\mathfrak{n}}eq i$ and
$\ensuremath{\mathfrak{g}}amma\circ\ensuremath{\mathfrak{B}}eta\ensuremath{\mathfrak{n}}eq 0$. The module $T(j)$ has a costandard filtration,
which we fix, and $\Delta(i)$ is a standard module. Hence,
by \cite[Theorem~4]{Ri}, $\ensuremath{\mathfrak{B}}eta$ is a linear combination of some maps, each of
which comes from a homomorphism, which maps the top of $\Delta(i)$ to
the socle of some $\ensuremath{\mathfrak{n}}abla(i)$ in the costandard filtration of $T(j)$
(we remark that this $\ensuremath{\mathfrak{n}}abla(i)$ is a subquotient of $T(j)$ but not a
submodule in general). Since the composition
$\ensuremath{\mathfrak{g}}amma\circ\ensuremath{\mathfrak{B}}eta$ is non-zero and $\ensuremath{\mathfrak{n}}abla(i)$ has simple socle, we have
that at least one whole copy of $\ensuremath{\mathfrak{n}}abla(i)$ in the costandard filtration of
$T(j)$ survives under $\ensuremath{\mathfrak{g}}amma$. But, by \cite[Theorem~1]{Ri}, any costandard
filtration of $T(j)$ ends with the subquotient $\ensuremath{\mathfrak{n}}abla(j)\ensuremath{\mathfrak{n}}eq \ensuremath{\mathfrak{n}}abla(i)$.
This implies that the dimension of the image of $\ensuremath{\mathfrak{g}}amma$ must be strictly bigger
than $\ensuremath{\mathcal{D}}im \ensuremath{\mathfrak{n}}abla(i)$, which is impossible. The obtained contradiction
shows that $i=j=k$. The rest follows from the standard facts, used in the proof
of the "if" part.
\end{proof}
We can certainly assume that $f\in\ensuremath{\mathcal{H}}om_A(\Delta(i),T_l)$ and
$g\in\ensuremath{\mathcal{H}}om_{Com}\ensuremath{\lambda}eft(\mathcal{T}(\ensuremath{\mathfrak{n}}abla)^{\ensuremath{\mathfrak{B}}ullet},
\ensuremath{\mathfrak{n}}abla(i)^{\ensuremath{\mathfrak{B}}ullet}[l]\right)$ for some $i$.
Consider now any homomorphism $h:T_{l-1}\to\ensuremath{\mathfrak{n}}abla(i)$. Our aim is to show that
the composition $h\circ \varphi_l\circ f=0$. Assume that this is not the case
and apply Lemma~\ref{l4.2} to the components of the following two
pairs:
\ensuremath{\mathfrak{B}}egin{enumerate}[(a)]
\item\ensuremath{\lambda}abel{ppp1} $f:\Delta(i)\to T_l$ and $h\circ \varphi_l:T_l\to \ensuremath{\mathfrak{n}}abla(i)$
\item\ensuremath{\lambda}abel{ppp2} $\varphi_l\circ f:\Delta(i)\to T_{l-1}$ and
$h:T_{l-1}\to \ensuremath{\mathfrak{n}}abla(i)$.
\end{enumerate}
If $h\circ \varphi_l\circ f\ensuremath{\mathfrak{n}}eq 0$, we obtain that
both $T_l$ and $T_{l-1}$ contain a direct summand isomorphic to $T(i)$,
such that the map $\varphi_l$ induces a map, $\overline{\varphi}_l:T(i)\to T(i)$,
which does not annihilate the unique copy of $L(i)$ inside $T(i)$. Since
$T(i)$ is indecomposable, we have that $\operatorname{End}_A(T(i))$ is local and thus the
non-nilpotent element $\overline{\varphi}_l\in\operatorname{End}_A(T(i))$ must be an
isomorphism. This contradicts the minimality of the resolution
$\mathcal{T}(\ensuremath{\mathfrak{n}}abla)^{\ensuremath{\mathfrak{B}}ullet}$.
\end{proof}
We remark that the sequence
\ensuremath{\mathfrak{B}}egin{displaymath}
0\to \ensuremath{\mathcal{H}}om_A(\Delta,T_k)\to\ensuremath{\mathcal{D}}ots\to \ensuremath{\mathcal{H}}om_A(\Delta,T_1)\to\ensuremath{\mathcal{H}}om_A(\Delta,T_0)
\to \ensuremath{\mathcal{H}}om_A(\Delta,\ensuremath{\mathfrak{n}}abla)\to 0,
\end{displaymath}
obtained from $\mathcal{C}^{\ensuremath{\mathfrak{B}}ullet}$ using $\ensuremath{\mathcal{H}}om_A(\Delta,{}_-)$, is exact, and
that Theorem~\ref{t4.1} defines a bilinear pairing between
$\operatorname{Ext}_A^l(\ensuremath{\mathfrak{n}}abla,\ensuremath{\mathfrak{n}}abla)$ and the $l$-th element of this exact sequence. It is also
easy to see that the pairing, given by Theorem~\ref{t4.1}, does not depend (up to
an isomorphism of bilinear forms) on the choice of a minimal tilting resolution
of $\ensuremath{\mathfrak{n}}abla$. In particular, for every $l$ the rank of
$\ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}$ is an invariant of the algebra $A$.
By linearity we have that
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}=\oplus_{i,j=1}^n
\ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}_{i,j},
\end{displaymath}
where $\ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}_{i,j}$ is obtained by restricting
the definition of $\ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}$ to the homomorphisms
from $\Delta(j)$ (instead of $\Delta$) to the tilting resolution of $\ensuremath{\mathfrak{n}}abla(i)$
(instead of $\ensuremath{\mathfrak{n}}abla$). The relation between
$\ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}_{(i,j)}$ and the forms we have studied
in the previous section can be described as follows:
\ensuremath{\mathfrak{B}}egin{proposition}\ensuremath{\lambda}abel{p4.3}
$\operatorname{rank} \ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(1)}_{i,i-1}=
\ensuremath{\mathcal{D}}im\operatorname{Ext}_A^1(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(i-1))= \operatorname{rank} \ensuremath{\lambda}angle \cdot\, ,\cdot\rangle$.
\end{proposition}
\ensuremath{\mathfrak{B}}egin{proof}
Straightforward.
\end{proof}
In the general case we have the following:
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c4.4}
$\operatorname{rank} \ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}_{i,j}$ equals the
multiplicity of $T(j)$ as a direct summand in the $l$-th term of
the minimal tilting resolution of $\ensuremath{\mathfrak{n}}abla(i)$.
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
Let $T_l=\oplus_{k=1}^n T(k)^{l_k}$
and $p:T_l\twoheadrightarrow \oplus_{k=1}^n \ensuremath{\mathfrak{n}}abla(k)^{l_k}$ be a projection.
Since the complex $\mathcal{C}^{\ensuremath{\mathfrak{B}}ullet}$ is exact and consists of elements,
having a costandard filtration, the cokernel of any map in this complex
has a costandard filtration itself since the category of modules with
costandard filtration is closed with respect to taking cokernels of
monomorphisms, see for example \cite[Theorem~1]{DR2}. This implies that
$\varphi_l$ induces a surjection from $T_l$ onto a module
having a costandard filtration. Moreover, the minimality of the
resolution means that this surjection does not annihilate any of the
direct summands. In other words, the kernel of $\varphi_l$ is
contained in the kernel of $p$. This implies that for the cokernel $N$
of $\varphi_{l+1}$ we have $\ensuremath{\mathcal{D}}im\ensuremath{\mathcal{H}}om(N,\ensuremath{\mathfrak{n}}abla(j))=l_j$. Using
Lemma~\ref{l4.2} it is easy to see that $\ensuremath{\mathcal{D}}im\ensuremath{\mathcal{H}}om(N,\ensuremath{\mathfrak{n}}abla(j))$, in fact,
equals $\operatorname{rank} \ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}_{i,j}$. This
completes the proof.
\end{proof}
We remark that, using
Corollary~\ref{c4.4} and the Ringel duality (see \cite[Chapter~6]{Ri}),
we can also interpret $\operatorname{rank} \ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}_{i,j}$ as
the dimension of $l$-th extension space (over $R(A)$) from the $i$-th
standard $R(A)$-module to the $j$-th simple $R(A)$-module. For the BGG
category $\mathcal{O}$ the dimensions of these spaces are given by
the Kazhdan-Lusztig combinatorics.
\section{Graded non-degeneracy in a graded case}\ensuremath{\lambda}abel{s5}
The form $\ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}$ is degenerate in the
general case. However, in this section we will show that it induces a
non-degenerate pairing between the graded homomorphism and extension spaces
for graded algebras under some assumptions in the spirit of Koszulity
conditions.
Throughout this section we assume that $A$ is positively graded (recall that
this means that $A=\oplus_{i\ensuremath{\mathfrak{g}}eq 0} A_i$ and $\ensuremath{\mathbb{R}}ad(A)=\oplus_{i> 0} A_i$).
We remark that this automatically guarantees that the simple $A$-modules
can be considered as graded modules. We denote by $A\mathrm{-gmod}$ the
category of all graded (with respect to the grading fixed above) finitely
generated $A$-modules. The morphisms in $A\mathrm{-gmod}$ are morphisms
of $A$-modules, which {\em preserve} the grading, that is these morphisms are
homogeneous morphisms of degree $0$. We denote by
$\ensuremath{\lambda}angle 1\rangle:A\mathrm{-gmod}\to A\mathrm{-gmod}$ the functor,
which shifts the grading as follows: $(M\ensuremath{\lambda}angle 1\rangle)_i=M_{i+1}$.
Forgetting the grading defines a faithful functor from $A\mathrm{-gmod}$
to $A\mathrm{-mod}$. We say that $M\in A\mathrm{-mod}$ admits the {\em
graded lift} $\tilde{M}\in A\mathrm{-gmod}$ (or, simply, is {\em gradable})
provided that, after forgetting the grading, the
module $\tilde{M}$ becomes isomorphic to $M$. If $M$ is indecomposable and
admits a graded lift, then this lift is unique up to an isomorphism in
$A\mathrm{-gmod}$ and a shift of grading, see for example
\cite[Lemma~2.5.3]{BGS}.
For $M,N\in A\mathrm{-gmod}$ we set $\operatorname{ext}_A^{i}(M,N)=
\operatorname{Ext}_{A\mathrm{-gmod}}^{i}(M,N)$, $i\ensuremath{\mathfrak{g}}eq 0$. It is clear that, forgetting the
grading, we have
\ensuremath{\mathfrak{B}}egin{equation}\ensuremath{\lambda}abel{greq}
\operatorname{Ext}_A^{i}(M,N)=\oplus_{j\in\ensuremath{\mathbb{Z}}}\operatorname{ext}_A^{i}(M,N\ensuremath{\lambda}angle j\rangle), \quad\quad
i\ensuremath{\mathfrak{g}}eq 0
\end{equation}
(see for example \cite[Lemma~3.9.2]{BGS}).
\ensuremath{\mathfrak{B}}egin{lemma}\ensuremath{\lambda}abel{l5.1}
Let $M,N\in A\mathrm{-gmod}$. Then the non-graded trace $\mathrm{Tr}_M(N)$ of
$M$ in $N$, that is the sum of the images of all (non-graded) homomorphism
$f:M\to N$, belongs to $A\mathrm{-gmod}$.
\end{lemma}
\ensuremath{\mathfrak{B}}egin{proof}
Any $f:M\to N$ can be written as a sum of homogeneous components
$f_i:M\to N\ensuremath{\lambda}angle i\rangle$, $i\in\ensuremath{\mathbb{Z}}$, in particular, the image of $f$ is
contained in the sum of the images of all $f_i$. Since the image of a
homogeneous map is a graded submodule of $N$, the statement follows.
\end{proof}
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c5.2}
All standard and costandard $A$-modules are gradable.
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
By duality it is enough to prove the statement for standard modules.
The module $\Delta(i)$ is defined as a quotient of $P(i)$ modulo the
trace of $P(i+1)\oplus\ensuremath{\mathcal{D}}ots\oplus P(n)$ in $P(0)$. For positively graded
algebras all projective modules are obviously graded and hence the
statement follows from Lemma~\ref{l5.1}.
\end{proof}
\ensuremath{\mathfrak{B}}egin{proposition}\ensuremath{\lambda}abel{p5.3}
Let $M,N\in A\mathrm{-gmod}$. Then the universal extension of $M$ by
$N$ (in the category $A\mathrm{-mod}$) is gradable.
\end{proposition}
\ensuremath{\mathfrak{B}}egin{proof}
As we have mentioned before, we have
$\operatorname{Ext}_A^{1}(M,N)=\oplus_{j\in\ensuremath{\mathbb{Z}}}\operatorname{ext}_A^{1}(M,N\ensuremath{\lambda}angle j\rangle)$.
Every homogeneous extension obviously produces a gradable module.
Since we can construct the universal extension
of $N$ by $M$ choosing a homogeneous basis in $\operatorname{Ext}_A^1(M,N)$, the
previous argument shows that the obtained module will be gradable.
This completes the proof.
\end{proof}
We would like to fix a grading on all modules, related to
the quasi-hereditary structure. We concentrate $L$ in degree
$0$ and fix the gradings on $P$, $\Delta$, $\ensuremath{\mathfrak{n}}abla$ and $I$ such that the
canonical maps $P\twoheadrightarrow L$, $\Delta\twoheadrightarrow L$, $L\ensuremath{\mathfrak{h}}ookrightarrow \ensuremath{\mathfrak{n}}abla$
and $L\ensuremath{\mathfrak{h}}ookrightarrow I$ are all morphism in $A\mathrm{-gmod}$. The
only structural modules, which are left, are tilting modules.
However, to proceed, we have to show first that tilting modules are gradable.
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c5.4}
All tilting $A$-modules admit graded lifts. Moreover, for $T$ this lift can be
chosen such that both the inclusion $\Delta\ensuremath{\mathfrak{h}}ookrightarrow T$ and the
projection $T\twoheadrightarrow \ensuremath{\mathfrak{n}}abla$ are morphisms in $A\mathrm{-gmod}$.
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
By \cite[Proof of Lemma~3]{Ri}, the tilting $A$-module $T(i)$ is produced
by a sequence of universal extensions as follows: we start from the
(gradable) module $\Delta(i)$, and on each step we extend some (gradable)
module $\Delta(j)$, $j<i$, with the module, obtained on the previous step.
Using Proposition~\ref{p5.3} and induction we see that all modules, obtained
during this process, are gradable. The statement about the choice of the
lift is obvious.
\end{proof}
We fix the grading on $T$, given by Corollary~\ref{c5.4}. This automatically
induces a grading on the Ringel dual $R(A)=\operatorname{End}_A(T)^{opp}$. In what follows
we always will consider $R(A)$ as a graded algebra with respect to this
induced grading.
Note that the
same ungraded $A$-module can occur as a part of different structures, for
example, a module can be projective, injective and tilting at the same time.
In this case it is possible that the lifts of this module, which we fix for
different structures, are different. For example, if we have a non-simple
projective-injective module, then, considered as a projective module, it is
graded in non-negative degrees with top being in degree $0$; considered as
an injective module, it is graded in non-positive degrees with socle being
in degree $0$; and, considered as a tilting module, it has non-trivial
components both in negative and positive degrees.
A complex, $\mathcal{X}^{\ensuremath{\mathfrak{B}}ullet}$, of graded projective (resp.
injective, resp. tilting) modules will be called {\em linear} provided
that $\mathcal{X}^{i}\in\mathrm{add} (P\ensuremath{\lambda}angle i\rangle)$
(resp. $\mathcal{X}^{i}\in\mathrm{add} (I\ensuremath{\lambda}angle i\rangle)$, resp.
$\mathcal{X}^{i}\in\mathrm{add} (T\ensuremath{\lambda}angle i\rangle)$) for all $i\in\ensuremath{\mathbb{Z}}$.
To avoid confusions between the degree of a graded component of a module and
the degree of a component in some complex, to indicate the place of a component
in a complex we will use the word {\em position} instead of the word degree.
We say that $M\in A\mathrm{-gmod}$ admits an {\em LT-resolution},
$\mathcal{T}^{\ensuremath{\mathfrak{B}}ullet}\twoheadrightarrow M$, (here LT stands for linear-tilting) if
$\mathcal{T}^{\ensuremath{\mathfrak{B}}ullet}$ is a linear complex of tilting modules from
$A\mathrm{-gmod}$, such that $\mathcal{T}^{i}=0$, $i>0$, and the
homology of $\mathcal{T}^{\ensuremath{\mathfrak{B}}ullet}$ is concentrated in position $0$
and equals $M$ in this position. One also defines {\em LT-coresolution}
in the dual way. The main result of this section is the following:
\ensuremath{\mathfrak{B}}egin{theorem}\ensuremath{\lambda}abel{t5.5}
Let $A$ be a positively graded quasi-hereditary algebra and $1\ensuremath{\lambda}eq i,j\ensuremath{\lambda}eq n$.
Assume that
\ensuremath{\mathfrak{B}}egin{enumerate}[(i)]
\item\ensuremath{\lambda}abel{l5.5.1} $\ensuremath{\mathfrak{n}}abla(i)$ admits an LT-resolution,
$\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{\ensuremath{\mathfrak{B}}ullet}\twoheadrightarrow\ensuremath{\mathfrak{n}}abla(i)$;
\item\ensuremath{\lambda}abel{l5.5.2} the induced grading on $R(A)$ is positive.
\end{enumerate}
Then the form $\ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}_{i,j}$ induces a
non-degenerate bilinear pairing between
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\mathfrak{h}}om_A(\Delta(j)\ensuremath{\lambda}angle -l\rangle, \mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{-l})\quad
\text{ and }\quad \operatorname{ext}_A^l(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(j)\ensuremath{\lambda}angle -l\rangle).
\end{displaymath}
\end{theorem}
We remark that Theorem~\ref{t5.5} has a dual analogue.
\ensuremath{\mathfrak{B}}egin{proof}
The assumption \eqref{l5.5.2} means that
\ensuremath{\mathfrak{B}}egin{gather}
\ensuremath{\mathfrak{h}}om_A(\Delta\ensuremath{\lambda}angle s\rangle,T)\ensuremath{\mathfrak{n}}eq 0 \quad\quad\mathrm{ implies }
\quad\quad s\ensuremath{\lambda}eq 0 \ensuremath{\lambda}abel{bl1}\\
\ensuremath{\mathfrak{h}}om_A(\Delta(k)\ensuremath{\lambda}angle s\rangle,T(m))\ensuremath{\mathfrak{n}}eq 0 \quad\mathrm{ and }\quad k\ensuremath{\mathfrak{n}}eq m
\quad\quad\mathrm{ implies }\quad\quad s<0\ensuremath{\lambda}abel{bl2}.
\end{gather}
Hence, it follows that
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\mathcal{D}}im \ensuremath{\mathfrak{h}}om_A\ensuremath{\lambda}eft(\Delta(j)\ensuremath{\lambda}angle -l\rangle, \mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{-l}\right)
\end{displaymath}
equals the multiplicity of $T(j)\ensuremath{\lambda}angle -l\rangle$ as a direct summand of
$\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{-l}$, which, using the dual arguments, in turn, equals
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\mathcal{D}}im \ensuremath{\mathfrak{h}}om_A\ensuremath{\lambda}eft(\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{-l},\ensuremath{\mathfrak{n}}abla(j)\ensuremath{\lambda}angle -l\rangle\right).
\end{displaymath}
From the definition of an LT-resolution and \eqref{bl1}-\eqref{bl2} we also obtain
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\mathfrak{h}}om_A\ensuremath{\lambda}eft(\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{-l+1},\ensuremath{\mathfrak{n}}abla(j)\ensuremath{\lambda}angle -l\rangle\right)=0,
\end{displaymath}
which means that there is no homotopy from $\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{\ensuremath{\mathfrak{B}}ullet}$
to $\ensuremath{\mathfrak{n}}abla(j)\ensuremath{\lambda}angle -l\rangle^{\ensuremath{\mathfrak{B}}ullet}$. The arguments, analogous to those,
used in Corollary~\ref{c4.4}, imply that any map from
$\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{-l}$ to $\ensuremath{\mathfrak{n}}abla(j)\ensuremath{\lambda}angle -l\rangle$ induces a
morphism of complexes from $\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{\ensuremath{\mathfrak{B}}ullet}$ to
$\ensuremath{\mathfrak{n}}abla(j)\ensuremath{\lambda}angle -l\rangle^{\ensuremath{\mathfrak{B}}ullet}$. Hence
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\mathcal{D}}im \operatorname{ext}_A^l\ensuremath{\lambda}eft(\ensuremath{\mathfrak{n}}abla(i),\ensuremath{\mathfrak{n}}abla(j)\ensuremath{\lambda}angle -l\rangle\right)=
\ensuremath{\mathcal{D}}im \ensuremath{\mathfrak{h}}om_A\ensuremath{\lambda}eft(\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{-l},\ensuremath{\mathfrak{n}}abla(j)\ensuremath{\lambda}angle -l\rangle\right).
\end{displaymath}
We can now interpret every $f\in \ensuremath{\mathfrak{h}}om_A\ensuremath{\lambda}eft(\Delta(j)\ensuremath{\lambda}angle -l\rangle,
\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{-l}\right)$ as a fixation of a direct summand of
$\mathcal{T}(\ensuremath{\mathfrak{n}}abla)^{-l}$, which is isomorphic to $T(i)\ensuremath{\lambda}angle -l\rangle$.
Projecting it further onto $\ensuremath{\mathfrak{n}}abla(j)\ensuremath{\lambda}angle -l\rangle$ shows that
the left kernel of the form $\ensuremath{\lambda}angle \cdot\, ,\cdot\rangle^{(l)}_{i,j}$
is zero. Since the dimensions of the left and the right spaces coincide
by the arguments above, we obtain that the form is
non-degenerate. This completes the proof.
\end{proof}
It is easy to see that the condition \eqref{l5.5.2} of Theorem~\ref{t5.5} does
not imply the condition \eqref{l5.5.1} in general. Further, it is also easy to
see, for example for the path algebra of the following quiver:
\ensuremath{\mathfrak{B}}egin{displaymath}
\xymatrix{ 1 && 2\alphar[rr] && 3\alphar@/_1pc/[llll] && 4\alphar[ll]},
\end{displaymath}
that the condition \eqref{l5.5.1} (even if we assume it to be satisfied for
all $i$) does not imply the condition \eqref{l5.5.2} in general,
However, we do not know if the assumptions of the existence of an
$LT$-resolution for $\ensuremath{\mathfrak{n}}abla$ and, simultaneously, an $LT$-coresolution
for $\Delta$, would imply the condition \eqref{l5.5.2}.
We also would like to remark that the conditions of Theorem~\ref{t5.5} are
not at all automatic even in very good cases. For example one can check that the
path algebra of the following quiver:
\ensuremath{\mathfrak{B}}egin{displaymath}
\xymatrix{ 1 && 2\alphar[ll]\alphar[rr] && 3\alphar[rr] && 4}
\end{displaymath}
is standard Koszul, however, both conditions of Theorem~\ref{t5.5} fail.
Let $A$ be a positively graded quasi-hereditary algebra. We say that
$A$ is an {\em SCK-algebra} (abbreviating standard-costandard-Koszul)
provided that $A$ is standard Koszul and the induced grading on $R(A)$
is positive. We say that $A$ is an {\em SCT-algebra} (abbreviating
standard-costandard-tilting) provided that all standard and costandard
modules admit LT-(co)resolutions. By \cite[theorem~1]{ADL}, any standard
Koszul algebra, and thus any SCK-algebra, is Koszul. We finish this
section with the following observation.
\ensuremath{\mathfrak{B}}egin{theorem}\ensuremath{\lambda}abel{t5.6}
Any SCK-algebra is an SCT-algebra and vice versa.
\end{theorem}
\ensuremath{\mathfrak{B}}egin{proof}
Our first observation is that for any SCT-algebra $A$ the induced grading
on the $R(A)$ is positive. To prove this it is enough to show that all
subquotients in any standard filtration of the cokernel of the morphism
$\Delta(i)\ensuremath{\mathfrak{h}}ookrightarrow T(i)$ have the form $\Delta(j)\ensuremath{\lambda}angle l\rangle$,
$l>0$. This follows by induction in $i$. For $i=1$ the statement is obvious,
and the induction step follows from the inductive assumption applied
to the first term of the linear tilting coresolution of $\Delta(i)$.
Now we claim that the Ringel dual of an SCT algebra is SCK and vice versa.
Assume that $A$ is SCT. Applying $\ensuremath{\mathcal{H}}om_A(T,{}_-)$ to the LT-resolution
of $\ensuremath{\mathfrak{n}}abla$ we obtain that the $k$-th component of the projective
resolution of the standard $R(A)$-module is generated in degree $k$.
Applying analogous arguments to the LT-coresolution of $\Delta$ we obtain
that the $k$-th component of the injective resolution of the costandard
$R(A)$-module is generated in degree $-k$. As we have already shown, the
induced grading on $R(A)$ is positive. Furthermore, the (graded) Ringel
duality maps injective $A$-modules to tilting $R(A)$-modules, which implies
that the grading, induced on $A$ from $R(A)\mathrm{-gmod}$, will
coincide with the original grading on $A$, and hence will be positive
as well. This means that $R$ is SCK. The arguments in the opposite direction
are similar.
To complete the proof it is now enough to show, say, that any SCT algebra is
SCK. The existence of a linear tilting coresolution for $\Delta$ and the
above proved fact that for an SCT-algebra $A$ the induced grading
on the $R(A)$ is positive, imply $\operatorname{ext}^{k}(\Delta\ensuremath{\lambda}angle l\rangle,\Delta)=0$
unless $l\ensuremath{\lambda}eq k$. Since $A$ is positively graded, we have that the
$k$-th term of the projective resolution of $\Delta$ consists of
modules of the form $P(i)\ensuremath{\lambda}angle -l\rangle$, $l\ensuremath{\mathfrak{g}}eq k$. Assume that
for some $k$ we have that $P(i)\ensuremath{\lambda}angle -l\rangle$ with $l>k$ occurs.
Since every kernel and cokernel in our resolution has a standard
filtration, we obtain that $\operatorname{ext}^{k}(\Delta,\Delta(i)\ensuremath{\lambda}angle -l\rangle)\ensuremath{\mathfrak{n}}eq 0$
with $l> k$, which contradicts $l\ensuremath{\lambda}eq k$ above. This implies that
$\Delta$ has a linear projective resolution. Analogous arguments imply that
$\ensuremath{\mathfrak{n}}abla$ has a linear injective coresolution. This completes the proof.
\end{proof}
\section{The category of linear complexes of tilting modules}\ensuremath{\lambda}abel{s55}
We continue to work under the assumptions of Section~\ref{s5}, moreover,
we assume, until the end of this section, that $A$ is such that both
$A$ and $R(A)$ are positively graded.
The results of Section~\ref{s5} motivate the following definition:
We say that $M\in A\mathrm{-gmod}$ is {\em $T$-Koszul}
provided that $M$ is isomorphic in $D^b(A\mathrm{-gmod})$ to a linear
complex of tilting modules. Thus any module, which admits an
$LT$-(co)resolution, is $T$-Koszul.
We denote by $\mathfrak{T}=\mathfrak{T}(A)$ the category, whose objects are
linear complexes of tilting modules and morphisms are all
morphisms of graded complexes (which means that all components of these
morphisms are homogeneous homomorphisms of $A$-modules of degree $0$).
We also denote by $\mathfrak{T}^b$ the full subcategory of
$\mathfrak{T}$, which consists of bounded complexes.
\ensuremath{\mathfrak{B}}egin{lemma}\ensuremath{\lambda}abel{l55.1}
\ensuremath{\mathfrak{B}}egin{enumerate}[(1)]
\item $\mathfrak{T}$ is an abelian category.
\item $\ensuremath{\lambda}angle -1\rangle[1]:\mathfrak{T}\to \mathfrak{T}$ is
an auto-equivalence.
\item The complexes $\ensuremath{\lambda}eft(T(i)^{\ensuremath{\mathfrak{B}}ullet}\right)\ensuremath{\lambda}angle -l\rangle[l]$
constitute an exhaustive list of simple objects in $\mathfrak{T}$.
\end{enumerate}
\end{lemma}
\ensuremath{\mathfrak{B}}egin{proof}
The assumption that the grading on $R(A)$, induced from $A\mathrm{-gmod}$,
is positive, implies that the algebra $\mathrm{end}_A(T^{\ensuremath{\mathfrak{B}}ullet})$ is
semi-simple. Using this it is easy to check that taking the usual kernels
and cokernels of morphisms of complexes defines on $\mathfrak{T}$ the
structure of an abelian category. That
$\ensuremath{\lambda}angle -1\rangle[1]:\mathfrak{T}\to \mathfrak{T}$ is an auto-equivalence
follows from the definition.
The fact that $\mathrm{end}_A(T^{\ensuremath{\mathfrak{B}}ullet})$ is semi-simple and the above
definition of the abelian structure on $\mathfrak{T}$ imply that any
non-zero homomorphism in $\mathfrak{T}$ to the complex
$\ensuremath{\lambda}eft(T(i)^{\ensuremath{\mathfrak{B}}ullet}\right)\ensuremath{\lambda}angle -l\rangle[l]$ is surjective. Hence
the objects $\ensuremath{\lambda}eft(T(i)^{\ensuremath{\mathfrak{B}}ullet}\right)\ensuremath{\lambda}angle -l\rangle[l]$ are simple.
On the other hand,
it is easy to see that for any linear complex $\mathcal{T}^{\ensuremath{\mathfrak{B}}ullet}$
and for any $k\in\ensuremath{\mathbb{Z}}$ the complex $\ensuremath{\lambda}eft(\mathcal{T}^{k}\right)^{\ensuremath{\mathfrak{B}}ullet}$
is a subquotient of $\mathcal{T}^{\ensuremath{\mathfrak{B}}ullet}$ provided that
$\mathcal{T}^{k}\ensuremath{\mathfrak{n}}eq 0$. Hence any simple object in $\mathfrak{T}$ should
contain only one non-zero component. In order to be a simple object, this
component obviously should be an indecomposable $A$-module. Therefore any
simple object in $\mathfrak{T}$ is isomorphic to
$\ensuremath{\lambda}eft(T(i)^{\ensuremath{\mathfrak{B}}ullet}\right)\ensuremath{\lambda}angle -l\rangle[l]$ for some $i$ and $l$.
This completes the proof.
\end{proof}
Our aim is to show that $\mathfrak{T}$ has enough projective objects.
However, to do this it is more convenient to switch to a different
language and to prove a more general result.
Let $B=\oplus_{i\in\ensuremath{\mathbb{Z}}}B_i$ be a basic positively graded $\Bbbk$-algebra such
that $\ensuremath{\mathcal{D}}im_{\Bbbk}B_i<\infty$ for all $i\ensuremath{\mathfrak{g}}eq 0$. Denote by $\mathfrak{B}$
the category of linear complexes of projective $B$-modules, and by
$\tilde{\mathfrak{B}}$ the category, whose objects are all sequences
$\mathcal{P}^{\ensuremath{\mathfrak{B}}ullet}$ of projective $B$-modules, such that
$\mathcal{P}^{i}\in\mathrm{add}(P\ensuremath{\lambda}angle -i\rangle)$ for all $i\in\ensuremath{\mathbb{Z}}$,
and whose morphisms are all morphisms of graded sequences (consisting
of homogeneous maps of degree $0$). The objects of
$\tilde{\mathfrak{B}}$ will be called {\em linear sequences of projective
modules}.
Denote by $\mu: B_1\otimes_{B_0}B_1\to B_2$ the multiplication
map and by $\mu^{*}:B_2^*\to B_1^*\otimes_{B_0^*}B_1^*$ the dual map.
Define the algebra $\ensuremath{A}ambda$ as the quotient of the free
positively graded tensor algebra $B_0[B_1^*]$ modulo the homogeneous
ideal generated by $\mu^*(B_2^*)$.
A graded module, $M=\oplus_{i\in\ensuremath{\mathbb{Z}}}M_i$, over a graded algebra is
called {\em locally finite} provided that $\ensuremath{\mathcal{D}}im M_i<\infty$ for all $i$.
Note that a locally finite module does not need to be finitely generated.
For a graded algebra, $C$, we denote by $C\mathrm{-lfmod}$ the category
of all locally finite graded $C$-modules (with morphisms being homogeneous
maps of degree $0$).
The following statement was proved in \cite[Theorem~2.4]{MS}. For the
sake of completeness we present a short version of the proof.
\ensuremath{\mathfrak{B}}egin{theorem}\ensuremath{\lambda}abel{t55.2}
There is an equivalence of categories, $\overline{F}:
B_0[B_1^*]\mathrm{-lfmod}\to \tilde{\mathfrak{B}}$,
which induces an equivalence,
$F:\ensuremath{A}ambda\mathrm{-lfmod}\to \mathfrak{B}$.
\end{theorem}
\ensuremath{\mathfrak{B}}egin{proof}
Let $P$ denote the projective generator of $B$.
We construct the functor $\overline{F}$ in the following way:
Let $X=\oplus_{j\in\ensuremath{\mathbb{Z}}}X_j\in B_0[B_1^*]\mathrm{-lfmod}$. We define
$\overline{F}(X)=\mathcal{P}^{\ensuremath{\mathfrak{B}}ullet}$, where
$\mathcal{P}^{j}=P\ensuremath{\lambda}angle j\rangle \otimes_{B_0} X_{j}$, $j\in \ensuremath{\mathbb{Z}}$.
To define the differential $d_j:\mathcal{P}^{j}\to \mathcal{P}^{j+1}$
we note that $P\cong {}_B B$ and use the following bijections:
\ensuremath{\mathfrak{B}}egin{equation}\ensuremath{\lambda}abel{eq55.4}
\ensuremath{\mathfrak{B}}egin{array}{lcl}
\ensuremath{\mathcal{D}}isplaystyle
\{M\in B_0[B_1^*]\mathrm{-lfmod}:
M|_{B_0}= X|_{B_0} \} & \cong &
\text{ (since $B_0[B_1^*]$ is free) } \\ \ensuremath{\mathcal{D}}isplaystyle
\ensuremath{\mathfrak{P}}rod_{j\in\ensuremath{\mathbb{Z}}}\mathrm{hom}_{B_0-B_0}\ensuremath{\lambda}eft(B_1^*\ensuremath{\lambda}angle j+1\rangle,
\mathrm{Hom}_{\Bbbk}\ensuremath{\lambda}eft(X_{j},X_{j+1}\right)\right)
& \cong & \text{ (by adjoint associativity)} \\\ensuremath{\mathcal{D}}isplaystyle
\ensuremath{\mathfrak{P}}rod_{j\in\ensuremath{\mathbb{Z}}}\mathrm{hom}_{B_0}\ensuremath{\lambda}eft(X_{j},
B_1\ensuremath{\lambda}angle j+1\rangle\otimes_{B_0} X_{j+1}, \right)
& \cong & \text{ (because of grading) } \\ \ensuremath{\mathcal{D}}isplaystyle
\ensuremath{\mathfrak{P}}rod_{j\in\ensuremath{\mathbb{Z}}}\mathrm{hom}_{B_0}\ensuremath{\lambda}eft(X_{j},
B\ensuremath{\lambda}angle j+1\rangle\otimes_{B_0} X_{j+1}, \right) &\cong &
\text{ (by projectivity of ${}_B B$)}\\ \ensuremath{\mathcal{D}}isplaystyle
\ensuremath{\mathfrak{P}}rod_{j\in\ensuremath{\mathbb{Z}}}\mathrm{hom}_B\ensuremath{\lambda}eft(B\ensuremath{\lambda}angle j\rangle\otimes_{B_0} X_{j},
B\ensuremath{\lambda}angle j+1\rangle\otimes_{B_0} X_{j+1} \right). & & \\
\end{array}
\end{equation}
Thus, starting from the fixed $X$, the equalities of \eqref{eq55.4}
produce for each $j\in Z$ a unique map from the space
$\mathrm{hom}_B\ensuremath{\lambda}eft(B\ensuremath{\lambda}angle j\rangle\otimes_{B_0} X_{j},
B\ensuremath{\lambda}angle j+1\rangle\otimes_{B_0} X_{j+1} \right)$, which defines the
differential in $\mathcal{P}^{\ensuremath{\mathfrak{B}}ullet}$.
Tensoring with the identity map on ${}_B B$ the correspondence
$\overline{F}$, defined above on objects, extends to a functor
from $B_0[B_1^*]\mathrm{-lfmod}$ to $\tilde{\mathfrak{B}}$. Since
$\ensuremath{\mathfrak{h}}om({}_B B,{}_B B)\cong B_0$ is a direct sum of several copies
of $\Bbbk$, it follows by a direct calculation that
$\overline{F}$ is full and faithful. It is also easy to derive from the
construction that $\overline{F}$ is dense. Hence it is an equivalence
of categories $B_0[B_1^*]\mathrm{-lfmod}$ and $\tilde{\mathfrak{B}}$.
Now the principal question is: when $\overline{F}(X)$ is a complex? Let
\ensuremath{\mathfrak{B}}egin{gather*}
d_j: B\ensuremath{\lambda}angle j\rangle\otimes_{B_0} X_{j}\to
B\ensuremath{\lambda}angle j+1\rangle\otimes_{B_0} X_{j+1},\\
d_{j-1}: B\ensuremath{\lambda}angle j-1\rangle\otimes_{B_0} X_{j-1}\to
B\ensuremath{\lambda}angle j\rangle\otimes_{B_0} X_{j}
\end{gather*}
be as constructed above.
Let further
\ensuremath{\mathfrak{B}}egin{gather*}
\ensuremath{\mathcal{D}}elta_{j}:X_{j}\to B_1 \ensuremath{\lambda}angle j+1\rangle\otimes_{B_0} X_{j+1},\\
\ensuremath{\mathcal{D}}elta_{j-1}:X_{j-1}\to B_1 \ensuremath{\lambda}angle j\rangle\otimes_{B_0} X_{j}
\end{gather*}
be the corresponding maps, given by \eqref{eq55.4}. Then
$d_jd_{j-1}=0$ if and only if
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\lambda}eft(\mu\otimes \mathrm{Id}_{X_{j+1}}\right)\circ
\ensuremath{\lambda}eft(\mathrm{Id}_{B_1}\otimes\ensuremath{\mathcal{D}}elta_j\right)\circ\ensuremath{\mathcal{D}}elta_{j-1}=0.
\end{displaymath}
The last equality, in turn, is equivalent to the fact that the
global composition of morphisms in the
following diagram is zero:
\ensuremath{\mathfrak{B}}egin{displaymath}
B_2^* \xrightarrow{\mu^*}
B_1^*\otimes B_1^* \xrightarrow{b}
\mathrm{Hom}_{\Bbbk}\ensuremath{\lambda}eft(X_{j},X_{j+1}\right)\otimes
\mathrm{Hom}_{\Bbbk}\ensuremath{\lambda}eft(X_{j-1},X_{j}\right)\xrightarrow{c}
\mathrm{Hom}_{\Bbbk}\ensuremath{\lambda}eft(X_{j-1},X_{j+1}\right),
\end{displaymath}
where the map $b$ is given by two different applications of
\eqref{eq55.4} and $c$ denotes the
usual composition. Hence $\overline{F}(X)$ is a complex
if and only if $\mathrm{Im}(\mu^*) X=0$ or, equivalently,
$X\in \ensuremath{A}ambda\mathrm{-lfmod}$.
\end{proof}
It is clear that the equivalence, constructed in the proof of
Theorem~\ref{t55.2}, sends the auto-equivalence $\ensuremath{\lambda}angle 1\rangle$ on
$\ensuremath{A}ambda\mathrm{-lfmod}$ (resp. on $B_0[B_1^*]\mathrm{-lfmod}$) to
the auto-equivalence $\ensuremath{\lambda}angle -1\rangle[1]$
on $\mathfrak{B}$ (resp. on $\tilde{\mathfrak{B}}$).
Now we are back to the original setup of this section.
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c55.6}
Let $R=R(A)=\oplus_{i\ensuremath{\mathfrak{g}}eq 0}R_i$ and set
$\ensuremath{A}ambda=R_0[R_1^*]/(\mu^*(R_2^*))$,
where $\mu$ denotes the multiplication in $R$. Then the category
$\mathfrak{T}$ is equivalent to $\ensuremath{A}ambda\mathrm{-lfmod}$.
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
Apply first the graded Ringel duality and then Theorem~\ref{t55.2}.
\end{proof}
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c55.7}
Assume that $R=R(A)$ is Koszul. Set
$\ensuremath{A}ambda=(E(R(A)))^{opp}$. Then
\ensuremath{\mathfrak{B}}egin{enumerate}[(1)]
\item $\mathfrak{T}$ is equivalent to the category $\ensuremath{A}ambda\mathrm{-lfmod}$.
\item The category $\mathfrak{T}^b$ is equivalent to $\ensuremath{A}ambda\mathrm{-gmod}$.
\end{enumerate}
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
If the algebra $R=\oplus_{i\ensuremath{\mathfrak{g}}eq 0}R_i$ is Koszul then,
by \cite[Section~2.9]{BGS}, the
formal quadratic dual algebra $R_0[R_1^*]/(\mu^*(R_2^*))$ is isomorphic
to $(E(R))^{opp}$. Now everything follows from Corollary~\ref{c55.6}.
\end{proof}
Corollary~\ref{c55.6} motivates the further study of the categories
$\mathfrak{T}$ and $\mathfrak{T}^b$. We start with a description of
the first extension spaces between the simple objects in $\mathfrak{T}$.
Surprisingly enough, this result can be obtained without any
additional assumptions.
\ensuremath{\mathfrak{B}}egin{lemma}\ensuremath{\lambda}abel{l55.8}
Let $i,j\in\{1,\ensuremath{\mathcal{D}}ots,n\}$ and $l\in \ensuremath{\mathbb{Z}}$. Then
$\mathrm{ext}_{\mathfrak{T}}^1\ensuremath{\lambda}eft(
T(i)^{\ensuremath{\mathfrak{B}}ullet},T(j)^{\ensuremath{\mathfrak{B}}ullet}\ensuremath{\lambda}angle -l\rangle[l]\right)\ensuremath{\mathfrak{n}}eq 0$
implies $l=-1$. Moreover,
\ensuremath{\mathfrak{B}}egin{displaymath}
\mathrm{ext}_{\mathfrak{T}}^1\ensuremath{\lambda}eft(
T(i)^{\ensuremath{\mathfrak{B}}ullet},T(j)^{\ensuremath{\mathfrak{B}}ullet}\ensuremath{\lambda}angle 1\rangle[-1]\right)\cong
\mathrm{hom}_A\ensuremath{\lambda}eft(T(i),T(j)\ensuremath{\lambda}angle 1\rangle\right).
\end{displaymath}
\end{lemma}
\ensuremath{\mathfrak{B}}egin{proof}
A direct calculation, using the definition of the first extension
via short exact sequences and the abelian structure on $\mathfrak{T}$.
\end{proof}
Recall from \cite{CPS1,DR} that an associative algebra is quasi-hereditary
if and only if its module category is a highest weight category.
Our goal is to establish some conditions under which $\mathfrak{T}^b$
becomes a highest weight category. To prove that a category is a
highest weight category one has to determine the (co)standard objects.
\ensuremath{\mathfrak{B}}egin{proposition}\ensuremath{\lambda}abel{p55.9}
\ensuremath{\mathfrak{B}}egin{enumerate}[(1)]
\item Assume that $\Delta(i)$ admits an LT-coresolution,
$\Delta(i)\ensuremath{\mathfrak{h}}ookrightarrow \mathcal{T}(\Delta(i))^{\ensuremath{\mathfrak{B}}ullet}$, for all $i$.
Then $\mathrm{ext}_{\mathfrak{T}}^1\ensuremath{\lambda}eft(\mathcal{T}(\Delta(i))^{\ensuremath{\mathfrak{B}}ullet},
T(j)^{\ensuremath{\mathfrak{B}}ullet}\ensuremath{\lambda}angle -l\rangle[l]\right)= 0$
for all $l\in\ensuremath{\mathbb{Z}}$ and $j\ensuremath{\lambda}eq i$.
\item Assume that $\ensuremath{\mathfrak{n}}abla(i)$ admits an LT-resolution,
$\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{\ensuremath{\mathfrak{B}}ullet}\twoheadrightarrow \ensuremath{\mathfrak{n}}abla(i)$, for all $i$. Then we have
$\mathrm{ext}_{\mathfrak{T}}^1\ensuremath{\lambda}eft(T(j)^{\ensuremath{\mathfrak{B}}ullet}\ensuremath{\lambda}angle -l\rangle[l],
\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{\ensuremath{\mathfrak{B}}ullet}\right)= 0$
for all $l\in\ensuremath{\mathbb{Z}}$ and $j\ensuremath{\lambda}eq i$.
\end{enumerate}
\end{proposition}
\ensuremath{\mathfrak{B}}egin{proof}
By duality, it is certainly enough to prove only the first statement.
Using the induction with respect to the quasi-hereditary structure it
is even enough to show that $\mathcal{T}(\Delta(n))^{\ensuremath{\mathfrak{B}}ullet}$ is
projective in $\mathfrak{T}$. By Lemma~\ref{l55.8} we can also assume
that $l<0$. Let
\ensuremath{\mathfrak{B}}egin{equation}\ensuremath{\lambda}abel{eq55.9.1}
0\to T(j)^{\ensuremath{\mathfrak{B}}ullet}\ensuremath{\lambda}angle -l\rangle[l]\to \mathcal{X}^{\ensuremath{\mathfrak{B}}ullet}
\to \mathcal{T}(\Delta(n))^{\ensuremath{\mathfrak{B}}ullet}\to 0
\end{equation}
be a short exact sequence in $\mathfrak{T}$. Let further $d^{\ensuremath{\mathfrak{B}}ullet}$
denote the differential in $\mathcal{T}(\Delta(n))^{\ensuremath{\mathfrak{B}}ullet}$. Consider
the short exact sequence
\ensuremath{\mathfrak{B}}egin{equation}\ensuremath{\lambda}abel{eq55.9.2}
0\to \ensuremath{\mathcal{K}}er(d^{-l})\to \mathcal{T}(\Delta(n))^{-l}\to \ensuremath{\mathcal{K}}er(d^{-l+1})\to 0.
\end{equation}
Since $\mathcal{T}(\Delta(n))^{\ensuremath{\mathfrak{B}}ullet}$ is a tilting coresolution of
a standard module, it follows that all modules in \eqref{eq55.9.2} have
standard filtration. Hence, applying $\ensuremath{\mathcal{H}}om_A({}_-,T(j))$ to
\eqref{eq55.9.2}, and using the fact that
$T(j)$ has a costandard filtration, we obtain the surjection
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\mathcal{H}}om_A\ensuremath{\lambda}eft(\mathcal{T}(\Delta(n))^{-l},T(j)\right)\twoheadrightarrow
\ensuremath{\mathcal{H}}om_A\ensuremath{\lambda}eft(\ensuremath{\mathcal{K}}er(d^{-l}),T(j)\right),
\end{displaymath}
which induces the graded surjection
\ensuremath{\mathfrak{B}}egin{displaymath}
\ensuremath{\mathfrak{h}}om_A\ensuremath{\lambda}eft(\mathcal{T}(\Delta(n))^{-l},T(j)\ensuremath{\lambda}angle -l\rangle\right)\twoheadrightarrow
\ensuremath{\mathfrak{h}}om_A\ensuremath{\lambda}eft(\ensuremath{\mathcal{K}}er(d^{-l}),T(j)\ensuremath{\lambda}angle -l\rangle\right).
\end{displaymath}
The last surjection allows one to perform a base change in
$\mathcal{X}^{\ensuremath{\mathfrak{B}}ullet}$, which splits the sequence \eqref{eq55.9.1}.
This proves the statement.
\end{proof}
For $R=R(A)$ we
introduce the notation $R^{!}=R_0[R_1^*]/(\mu^*(R_2^*))$
(if $R$ is Koszul, this notation coincides
with the one used for the formal quadratic dual in \cite[2.8]{BGS}).
We have $\mathfrak{T}\cong R^{!}\mathrm{-lfmod}$ by Corollary~\ref{c55.6}.
\cite[Theorem~1]{ADL} states that a standard Koszul quasi-hereditary
algebra is Koszul (which means that if standard modules admit linear projective
resolutions and costandard modules admit linear injective resolutions, then
simple modules admit both linear projective and linear injective resolutions).
An analogue of this statement in our case is the following:
\ensuremath{\mathfrak{B}}egin{theorem}\ensuremath{\lambda}abel{t55.12}
Let $A$ be an SCT algebra. Then
\ensuremath{\mathfrak{B}}egin{enumerate}[(1)]
\item\ensuremath{\lambda}abel{t55.12.1} $R^{!}$ is quasi-hereditary with respect to the
usual order on $\{1,2,\ensuremath{\mathcal{D}}ots,n\}$, or, equivalently,
$\mathfrak{T}^b\simeq R^{!}\mathrm{-gmod}$ is a highest weight category;
\item\ensuremath{\lambda}abel{t55.12.2} $\mathcal{T}(\Delta(i))^{\ensuremath{\mathfrak{B}}ullet}$, $i=1,\ensuremath{\mathcal{D}}ots,n$, are standard
objects in $\mathfrak{T}^b$;
\item\ensuremath{\lambda}abel{t55.12.3}$\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{\ensuremath{\mathfrak{B}}ullet}$, $i=1,\ensuremath{\mathcal{D}}ots,n$, are costandard
objects in $\mathfrak{T}^b$;
\end{enumerate}
Assume further that the algebra $R^{!}$ is SCT. Then
\ensuremath{\mathfrak{B}}egin{enumerate}[(1)]
\setcounter{enumi}{3}
\item\ensuremath{\lambda}abel{t55.12.4} simple $A$-modules are $T$-Koszul, in particular, for every
$i=1,\ensuremath{\mathcal{D}}ots,n$ there exists a linear complex, $\mathcal{T}(L(i))^{\ensuremath{\mathfrak{B}}ullet}$,
of tilting modules, which is isomorphic to $L(i)$ in $D^b(A\mathrm{-gmod})$;
\item\ensuremath{\lambda}abel{t55.12.5} $\mathcal{T}(L(i))^{\ensuremath{\mathfrak{B}}ullet}$, $i=1,\ensuremath{\mathcal{D}}ots,n$, are tilting
objects with respect to the quasi-hereditary structure on $\mathfrak{T}^b$.
\end{enumerate}
\end{theorem}
\ensuremath{\mathfrak{B}}egin{proof}
The algebra $R$ is quasi-hereditary with respect to the opposite order
on $\{1,2,\ensuremath{\mathcal{D}}ots,n\}$. Moreover, $R$ is SCK by Theorem~\ref{t5.6}, in particular,
it is standard Koszul, thus also Koszul by \cite[Theorem~1]{ADL}. Hence its
Koszul dual, which is isomorphic to $(R^{!})^{opp}$ by \cite[2.10]{BGS},
is quasi-hereditary with respect to the usual order on $\{1,2,\ensuremath{\mathcal{D}}ots,n\}$
by \cite[Theorem~2]{ADL}. This certainly means that $R^{!}$ is
quasi-hereditary with respect to the usual order on $\{1,2,\ensuremath{\mathcal{D}}ots,n\}$.
From Corollary~\ref{c55.7} we also obtain $R^{!}\mathrm{-gmod}\simeq
\mathfrak{T}^b$. This proves the first statement.
That the objects $\mathcal{T}(\Delta(i))^{\ensuremath{\mathfrak{B}}ullet}$, $i=1,\ensuremath{\mathcal{D}}ots,n$, are
standard and the objects $\mathcal{T}(\ensuremath{\mathfrak{n}}abla(i))^{\ensuremath{\mathfrak{B}}ullet}$, $i=1,\ensuremath{\mathcal{D}}ots,n$,
are costandard follows from Proposition~\ref{p55.9} and \cite[Theorem~1]{DR2}.
This proves \eqref{t55.12.2} and \eqref{t55.12.3}.
Now we can assume that $R^{!}$ is an SCT-algebra. In particular, it is
quasi-hereditary, and hence the category
$\mathfrak{T}^b$ must contain tilting objects with respect to the
corresponding highest weight structure. By \cite[Proof of Lemma~3]{Ri},
the tilting objects in $\mathfrak{T}^b$ can be constructed via a
sequence of universal extensions, which starts with some standard
object and proceeds by extending other (shifted) standard objects
by objects, already constructed on previous steps. The assumption
that $R^{!}$ is SCT=SCK means that new standard objects should be shifted by
$\ensuremath{\lambda}angle -l\rangle[l]$ with $l>0$. From the second statement
of our theorem, which we have already proved above, it follows that the
standard objects in $\mathfrak{T}^b$ are exhausted by
$\mathcal{T}(\Delta(i))^{\ensuremath{\mathfrak{B}}ullet}$, $i=1,\ensuremath{\mathcal{D}}ots,n$, and their shifts.
The homology of $\mathcal{T}(\Delta(i))^{\ensuremath{\mathfrak{B}}ullet}$ is concentrated in
position $0$ and in non-negative degrees. It follows that the homology
of the tilting object in $\mathfrak{T}^b$, which we obtain, using this
construction, will be concentrated in non-positive positions and in
non-negative degrees.
On the other hand, a dual construction, that is the one, which uses
costandard objects, implies that the homology of the same tilting
object in $\mathfrak{T}^b$
will be concentrated in non-negative positions and in non-positive degrees.
This means that the homology of an indecomposable tilting object in
$\mathfrak{T}^b$ is concentrated in position $0$ and in degree $0$ and
hence is a simple $A$-module. This proves two last statements of our
theorem and completes the proof.
\end{proof}
In the next section we will show that all the above conditions are satisfied
for the associative algebras, associated with the blocks of the
BGG category $\mathcal{O}$.
We remark that, under conditions of Theorem~\ref{t55.12}, in the category
$\mathfrak{T}$ the standard and costandard $A$-modules remain standard and
costandard objects respectively via their tilting (co)resolutions. Tilting
$A$-modules become simple objects, and simple $A$-modules become tilting
objects via $\mathcal{T}(L(i))^{\ensuremath{\mathfrak{B}}ullet}$.
An SCT algebra $A$ for which $R(A)^!$ is SCT will be called {\em balanced}.
The results of this section allow us to formulae a new type of duality
for balanced algebras (in fact, this just means that we can perform in one
step the following path $A\ensuremath{\lambda}eadsto R\ensuremath{\lambda}eadsto R^{!}\ensuremath{\lambda}eadsto R(R^{!})$,
which consists of already known dualities for quasi-hereditary algebras).
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c55.88}
Let $A$ be balanced and $\mathcal{T}(L(i))^{\ensuremath{\mathfrak{B}}ullet}$, $i=1,\ensuremath{\mathcal{D}}ots,n$,
be a complete list of indecomposable tilting objects in $\mathfrak{T}^b$,
constructed in Theorem~\ref{t55.12}\eqref{t55.12.5}. Then
$\ensuremath{\lambda}angle -1\rangle[1]$ induces a (canonical) $\ensuremath{\mathbb{Z}}$-action on the algebra
\ensuremath{\mathfrak{B}}egin{displaymath}
\overline{C}(A)=\operatorname{End}_A\ensuremath{\lambda}eft(\oplus_{l\in\ensuremath{\mathbb{Z}}}\oplus_{i=1}^n
\mathcal{T}(L(i))^{\ensuremath{\mathfrak{B}}ullet}\ensuremath{\lambda}angle -l\rangle[l]\right),
\end{displaymath}
which makes $\overline{C}(A)$ into the covering of some algebra $C(A)$.
The algebra $C(A)$ is balanced and $C(C(A))\cong A$.
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
From Theorem~\ref{t55.12} it follows that $C(A)\cong (R(R^{!}))^{opp}$.
From Lemma~\ref{l55.8} and the assumption that $R(A)^!$ is SCT it follows that
the grading on both $R^{!}$ and $C(A)$, induced from $\mathfrak{T}$, is positive.
In particular, Theorem~\ref{t5.6} and \cite[Theorem~2]{ADL} now imply that
$C(A)$ is balanced. Since both Ringel and Koszul
dualities are involutive, we also have $A\cong (R(R(C(A))^{!}))^{opp}$.
\end{proof}
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c55.89}
Let $A$ be balanced. Then
$A$ is standard Koszul and $C(A)\cong (A^{!})^{opp}\cong E(A)$.
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
$A$ is standard Koszul by Theorem~\ref{t5.6}, in particular, it is
Koszul by \cite[Theorem~1]{ADL}. Further, since no homotopy
is possible in $\mathfrak{T}$, it follows that
\ensuremath{\mathfrak{B}}egin{displaymath}
\operatorname{ext}_A^{l}\ensuremath{\lambda}eft(L(i),L(j)\ensuremath{\lambda}angle -l\rangle\right)\cong
\ensuremath{\mathcal{H}}om_{\mathfrak{T}}\ensuremath{\lambda}eft(\mathcal{T}(L(i))^{\ensuremath{\mathfrak{B}}ullet},
\mathcal{T}(L(j))^{\ensuremath{\mathfrak{B}}ullet}\ensuremath{\lambda}angle -l\rangle[l]\right).
\end{displaymath}
The last equality is obviously compatible with the $\ensuremath{\mathbb{Z}}$-actions and the compositions
on both sides, which implies that the Koszul dual $(A^{!})^{opp}$ of $A$ is isomorphic
to $C(A)$.
\end{proof}
And now we can formulate, probably, the most surprising
result of this section.
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c55.90}
Let $A$ be balanced. Then the algebras $R(A)$,
$E(A)$ and $E(R(A))$ and $R(E(A))$ are also balanced, moreover
\ensuremath{\mathfrak{B}}egin{displaymath}
E(R(A))\cong R(E(A))
\end{displaymath}
as quasi-hereditary algebras. In other words, both the Ringel and Koszul
dualities preserve the class of balanced algebras and commute on this class.
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
Follows from Theorem~\ref{t55.12}, Corollary~\ref{c55.88} and
Corollary~\ref{c55.89}.
\end{proof}
The results, presented in this section motivate the following
natural question: {\em is any SCT=SCK algebra balanced?}
\section{The graded Ringel dual for the category $\mathcal{O}$}\ensuremath{\lambda}abel{s6}
In this section we prove that the conditions of Theorem~\ref{t5.5}
are satisfied for the associative algebra, associated with a
block of the BGG category $\mathcal{O}$. To do this we will use the
graded approach to the category $\mathcal{O}$, worked out in
\cite{St}. So, in this section we assume that $A$ is the basic
associative algebra of an indecomposable integral (not necessarily
regular) block of the BGG category $\mathcal{O}$, \cite{BGG}.
The (not necessarily bijective) indexing set for simple modules will
be the Weyl group $W$ with the usual Bruhat order (such that the
identity element is the maximal one and corresponds to the projective
Verma=standard module). This algebra is Koszul by \cite{BGS,So}, and
thus we can fix on $A$ the Koszul grading, which leads us to the
situation, described in Section~\ref{s5}. Recall that a module, $M$,
is called {\em rigid} provided that its socle and radical filtrations
coincide, see for example \cite{Ir}. Our main result in this
section is the following:
\ensuremath{\mathfrak{B}}egin{theorem}\ensuremath{\lambda}abel{t6.1}
$\operatorname{End}_A(T)$ is positively graded, moreover, it is generated in
degrees $0$ and $1$. Furthermore, $\ensuremath{\mathfrak{n}}abla$ admits an LT-resolution.
\end{theorem}
\ensuremath{\mathfrak{B}}egin{proof}
From \cite[Section~7]{FKM} it follows that
$T\cong \mathrm{Tr}_{P(w_0)}(P)$ and thus, by Lemma~\ref{l5.1},
there is a graded submodule, $T'$ of $P$, which is isomorphic
to $T$ after forgetting the grading. Moreover, again by \cite[Section~7]{FKM},
the restriction from $P$ to $T'$ induces an isomorphism of $\operatorname{End}_A(P)$ and
$R=\operatorname{End}_A(T)$. So, to prove that $\operatorname{End}_A(T)$ is positively graded it is
enough to show that $T'\cong T\ensuremath{\lambda}angle -l\rangle$ for some $l$. Actually,
we will show that this $l$ equals the Loewy length of $\Delta(e)$.
Let $\theta_s$ denote the graded translation functor through the
$s$-wall, see \cite[3.2]{St}. Let $w_0$ denote the longest element
in the Weyl group. The socle of any Verma module in the category
$\mathcal{O}$ is the simple Verma module $\Delta(w_0)$, see
\cite[Chapter~7]{Di}. This gives, for some $l\in\ensuremath{\mathbb{Z}}$, a graded
inclusion, $T(w_0)\ensuremath{\lambda}angle -l\rangle\cong \Delta(w_0)\ensuremath{\lambda}angle -l
\rangle\ensuremath{\mathfrak{h}}ookrightarrow \Delta(e)$. Moreover, since Verma modules in
$\mathcal{O}$ are rigid by \cite{Ir}, and since their graded filtration
in the Loewy one by \cite[Proposition~2.4.1]{BGS}, it follows that this $l$
equals the Loewy length of $\Delta(e)$. Now we would like to prove by
induction that $T(w_0w)\ensuremath{\lambda}angle -l\rangle\ensuremath{\mathfrak{h}}ookrightarrow P(w)$ for
any $w\in W$. Assume that this is proved for some $w$ and let
$s$ be a simple reflection such that $l(ws)>l(w)$. Translating through
the $s$-wall we obtain
$\theta_s T(w_0w)\ensuremath{\lambda}angle -l\rangle\ensuremath{\mathfrak{h}}ookrightarrow \theta_s P(w)$.
Further, the module $P(ws)$ is a direct summand of $\theta_s P(w)$
(after forgetting the grading). However, from \cite[Theorem~3.6]{St}
it follows that the inclusion $P(ws)\ensuremath{\mathfrak{h}}ookrightarrow\theta_s P(w)$
is homogeneous and has degree $0$. The same
argument implies that the inclusion
$T(w_0ws)\ensuremath{\mathfrak{h}}ookrightarrow\theta_s T(w_0w)$ is homogeneous and
has degree $0$. This gives us the desired inclusion
$T(w_0ws)\ensuremath{\lambda}angle -l\rangle\ensuremath{\mathfrak{h}}ookrightarrow P(ws)$ of degree $0$
and completes the induction. Adding everything up we obtain a
graded inclusion of degree $0$ from $T\ensuremath{\lambda}angle -l\rangle$ to $P$.
Recall once more that the restriction from $P$ to $T$ induces
an isomorphism of $\operatorname{End}_A(P)$ and $R=\operatorname{End}_A(T)$. Since
$\operatorname{End}_A(P)=A$ is positively graded and is generated in degrees $0$
and $1$, we obtain that $\operatorname{End}_A(T)$ is positively graded and
is generated in degrees $0$ and $1$ as well.
It is now left to prove the existence of an LT-resolution for
$\ensuremath{\mathfrak{n}}abla$. Consider the minimal tilting resolution of $\ensuremath{\mathfrak{n}}abla$.
In Section~\ref{s5} we have defined the grading on $T$ such that
the canonical projection $T\to \ensuremath{\mathfrak{n}}abla$
is a homogeneous map of degree $0$. The kernel of this
projection is thus graded and has a graded $\ensuremath{\mathfrak{n}}abla$-filtration.
Proceeding by induction we obtain that the minimal tilting resolution
of $\ensuremath{\mathfrak{n}}abla$ is graded. Let $R=R(A)$. Using the functor
$F=\ensuremath{\mathcal{H}}om_A(T,{}_-)$ we transfer this graded tilting resolution
to a graded projective resolution
of the direct sum $\Delta^{(R)}$ of standard $R$-modules. By
\cite{So2} we have $A\cong R$, moreover, we have just
proved that the grading on $R$, which is induced from
$A\mathrm{-gmod}$, is the Koszul one. By \cite[3.11]{BGS},
the standard $A$-modules are Koszul, implying that
the $l$-th term of the projective resolution of
$\Delta^{(R)}$ is generated in degree $l$. Applying
$F^{-1}$ we thus obtain an LT-resolution of $\ensuremath{\mathfrak{n}}abla$.
This completes the proof.
\end{proof}
Catharina Stroppel gave an alternative argument for Theorem~\ref{t6.1}
(see Appendix), which uses graded twisting functors. The advantage of
her approach is that it can be generalized also to the parabolic
analogue of the category $\mathcal{O}$ defined in \cite{RC}.
The arguments, used in the proof of Theorem~\ref{t6.1} also imply
the following technical result:
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c6.2}
\ensuremath{\mathfrak{B}}egin{enumerate}[(1)]
\item The Loewy length $\mathrm{l.l.}(P(w))$ of $P(w)$
equals $2\mathrm{l.l.}(\Delta(e))-\mathrm{l.l.}(\Delta(w))$. In particular,
for the regular block of $\mathcal{O}$ we have
$\mathrm{l.l.}(P(w))=l(w_0)+l(w)+1$.
\item The Loewy length $\mathrm{l.l.}(T(w))$ of $T(w)$
equals $2\mathrm{l.l.}(\Delta(w))-1$. In particular,
for the regular block of $\mathcal{O}$ we have
$\mathrm{l.l.}(T(w))=2(l(w_0)-l(w))+1$.
\end{enumerate}
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
We start with the second statement. Recall that $\Delta(w)\ensuremath{\mathfrak{h}}ookrightarrow
T(w)$, $T(w)\twoheadrightarrow \ensuremath{\mathfrak{n}}abla(w)$, $[T(w):L(w)]=1$ and $L(w)$ is the simple top
of $\Delta(w)$ and the simple socle of $\ensuremath{\mathfrak{n}}abla(w)$. It follows that
$\mathrm{l.l.}(T(w))\ensuremath{\mathfrak{g}}eq \mathrm{l.l.}(\Delta(w))+\mathrm{l.l.}(\ensuremath{\mathfrak{n}}abla(w))-1=
2\mathrm{l.l.}(\Delta(w))-1$ since $\mathcal{O}$ has a simple preserving duality.
However, the graded filtration of the tilting module we have just constructed
certainly has semi-simple subquotients (since $A_0$ is positively graded).
All $\Delta(w')$ occurring in it have
Loewy length less than or equal to that of $\Delta(w)$ and start in negative
degrees since $\operatorname{End}_A(T)$ is positively graded. This implies that
$\mathrm{l.l.}(T(w))\ensuremath{\lambda}eq 2\mathrm{l.l.}(\Delta(w))-1$ and completes the proof
of the first part.
Since $P(w)$ has simple top, its graded filtration is the radical one
by \cite[Proposition~2.4.1]{BGS}. However, from the proof of
Theorem~\ref{t6.1} and from the second part of this corollary, which we have
just proved, it follows that the length of the graded filtration of $P(w)$
is exactly $2\mathrm{l.l.}(\Delta(e))-\mathrm{l.l.}(\Delta(w))$.
The computations for the regular block follow from the results
of \cite{Ir1} and the proof is complete.
\end{proof}
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c6.3}
Let $w\in W$. Then the following conditions for $T(w)$ are equivalent:
\ensuremath{\mathfrak{B}}egin{enumerate}[(a)]
\item\ensuremath{\lambda}abel{c.6.3.1} $T(w)$ is rigid.
\item\ensuremath{\lambda}abel{c.6.3.2} $\operatorname{End}_A(T(w))$ is commutative.
\item\ensuremath{\lambda}abel{c.6.3.3} $T(w)$ has simple top (or, equivalently, simple socle).
\item\ensuremath{\lambda}abel{c.6.3.35} The center of the universal enveloping algebra surjects
onto $\operatorname{End}_A(T(w))$.
\item\ensuremath{\lambda}abel{c.6.3.4} $T(w)\ensuremath{\mathfrak{h}}ookrightarrow P(w_0)$.
\item\ensuremath{\lambda}abel{c.6.3.5} $P(w_0)\twoheadrightarrow T(w)$.
\item\ensuremath{\lambda}abel{c.6.3.6} $[T(w):\Delta(w')]\ensuremath{\lambda}eq 1$ for all $w'\in W$.
\item\ensuremath{\lambda}abel{c.6.3.7} $[T(w):\ensuremath{\mathfrak{n}}abla(w')]\ensuremath{\lambda}eq 1$ for all $w'\in W$.
\item\ensuremath{\lambda}abel{c.6.3.8} $[T(w):\Delta(w_0)]=1$.
\item\ensuremath{\lambda}abel{c.6.3.9} $[T(w):\ensuremath{\mathfrak{n}}abla(w_0)]=1$.
\end{enumerate}
\end{corollary}
We remark that, though $\Delta(w_0)\cong \ensuremath{\mathfrak{n}}abla(w_0)$ is a simple
module, the numbers $[T(w):\Delta(w_0)]$ and $[T(w):\ensuremath{\mathfrak{n}}abla(w_0)]$ are not the
composition multiplicities, but the multiplicities in the standard and the
costandard filtrations of $T(w)$ respectively.
\ensuremath{\mathfrak{B}}egin{proof}
By \cite[Section~7]{FKM}, $T(w)\ensuremath{\mathfrak{h}}ookrightarrow P(w_0w)$ and the restriction
induces an isomorphism for the endomorphism rings. Hence the equivalence of
\eqref{c.6.3.2}, \eqref{c.6.3.3}, and \eqref{c.6.3.35} follows from the
self-duality of $T(w)$ and \cite[Theorem~7.1]{St2}. That
\eqref{c.6.3.3} implies \eqref{c.6.3.1} follows from
\cite[Proposition~2.4.1]{BGS}. From the proof of Theorem~\ref{t6.1} and
\cite[Theorem~3.6]{St} it follows that the highest and the lowest
graded components of $T(w)$ are one-dimensional. Hence if $T(w)$ does not
have simple top, its graded filtration, which is a Loewy one, does not
coincide with the radical filtration and thus $T(w)$ is not rigid.
This means that \eqref{c.6.3.1} implies \eqref{c.6.3.3}.
Since $L(w_0)$ is the socle of any Verma module, it follows that
\eqref{c.6.3.5} is equivalent to \eqref{c.6.3.3}. And, using the
self-duality of both $T(w)$ and $P(w_0)$ we have that \eqref{c.6.3.5} is
equivalent to \eqref{c.6.3.4}.
The equivalence of \eqref{c.6.3.6} and \eqref{c.6.3.7}
and the equivalence of \eqref{c.6.3.8} and \eqref{c.6.3.9} follows
using the simple preserving duality on $\mathcal{O}$. Since
$[P(w_0):\Delta(w')]=1$ for all $w'$, we get that \eqref{c.6.3.5}
implies \eqref{c.6.3.6}. Let $T(w)$ be such that
\eqref{c.6.3.6} is satisfied. Then, in particular,
$[T(w):\Delta(w_0)]\ensuremath{\lambda}eq 1$. Since $L(w_0)$ is a simple socle
of any Verma module, the self-duality of $T(w)$ implies
$[T(w):\Delta(w_0)]=1$, which, in turn, implies that
$T(w)$ has simple top, giving \eqref{c.6.3.3}. Moreover, the same
arguments shows that \eqref{c.6.3.8} implies \eqref{c.6.3.3}.
That \eqref{c.6.3.6} implies \eqref{c.6.3.8} is obvious,
and the proof is complete.
\end{proof}
We remark that (in the case when the equivalent conditions of
Corollary~\ref{c6.3} are satisfied) the surjection of the center of the
universal enveloping algebra onto $\operatorname{End}_A(T(w))$ is graded with respect to
the grading on the center, considered in \cite{So}.
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c6.101}
Let $w\in W$, and $s$ be a simple reflection. Then
$\theta_s T(w)=T(w)\ensuremath{\lambda}angle 1\rangle \oplus
T(w)\ensuremath{\lambda}angle -1\rangle $ if $l(ws)>l(w)$
and $\theta_s T(w)\in\mathrm{add}(T)$ (as a graded module) otherwise.
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
In the case $l(ws)>l(w)$ the statement follows from
\cite[Section~7]{FKM} and \cite[Section~8]{St2}. If $l(ws)< l(w)$
then Theorem~\ref{t6.1} and \cite[Section~8]{St2}
implies that $\theta_s T(w)$ has a graded Verma flag, and all
Verma subquotients in this flag are of the form
$\Delta(x)\ensuremath{\lambda}angle k\rangle$, $k\ensuremath{\mathfrak{g}}eq 0$. The self-duality of
$\theta_s T(w)$ now implies that $\theta_s T(w)\in\mathrm{add}(T)$.
\end{proof}
One more corollary of Theorem~\ref{t6.1} is the following:
\ensuremath{\mathfrak{B}}egin{proposition}\ensuremath{\lambda}abel{p6.5}
$A$ is a balanced algebra, in particular, all standard, costandard,
and simple $A$-modules are $T$-Koszul.
\end{proposition}
\ensuremath{\mathfrak{B}}egin{proof}
That standard and costandard $A$-modules are $T$-Koszul follows from
the fact that $A$ is standard Koszul (see \cite{ADL}) and
Theorem~\ref{t6.1}. Hence $A$ is SCT by Theorem~\ref{t6.1} and
Corollary~\ref{c55.89}. Further, the Koszul grading on $A\mathrm{-mod}$
induces on $R(A)^!\mathrm{-mod}$ the Koszul grading by
\cite[Theorem~3]{ADL}. In particular, from Theorem~\ref{t6.1} it follows
that $R(A)^!$ is SCK, that is $A$ is balanced. That simple $A$-modules
are $T$-Koszul now follows from Theorem~\ref{t55.12}.
\end{proof}
With the same argument and using the result of Catharina Stroppel presented
in the Appendix, one gets that the algebras of the
blocks of the parabolic analogue of the category $\mathcal{O}$ in the
sense of \cite{RC} are also balanced.
We also remark that projective $A$-modules are not $T$-Koszul in general.
For example, already for $\mathfrak{sl}_2$ we have
$P(s_{\alphalpha})\cong T(e)\ensuremath{\lambda}angle -1\rangle$ and thus $P(s_{\alphalpha})$
is not $T$-Koszul.
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{c6.7}
Let $A$ be the associative algebra of the regular block of the
category $\mathcal{O}$ endowed with Koszul grading. Then the
category of linear bounded tilting complexes of $A$-modules is
equivalent to $A\mathrm{-gmod}$.
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
Since $A$ has a simple preserving duality, it is
isomorphic to $A^{opp}$, moreover, $A$ is Koszul self-dual
by \cite{So} and Ringel self-dual by \cite{So2}. Hence the
necessary statement follows from Corollary~\ref{c55.7}.
\end{proof}
For singular blocks Corollary~\ref{c55.7} and \cite{BGS}
imply that the category of linear bounded tilting
complexes of $A$-modules is equivalent to the category of
graded modules over the regular block of the parabolic
category $\mathcal{O}$ with the same stabilizer (and vice versa).
\section{Appendix (written by Catharina Stroppel)}\ensuremath{\lambda}abel{sapp}
In this appendix we reprove Theorem~\ref{t6.1} in a way which implies
the corresponding statement for the parabolic category $\mathcal{O}$.
Our methods also provide an example for the theory developed in the paper
in the context of properly stratified algebras. Since we do not use any new
techniques, we refer mainly to the literature. We have to recall several
constructions and definitions. We restrict ourselves to the case of the
principal block to avoid even more notation.
For an algebra $A$ we denote by $\mathrm{mod-}A$ ($A\mathrm{-mod-}A$
respectively) the category of finitely generated right $A$-modules (finitely
generated $A$-bimodules). If $A$ is graded, then we denote by $\mathrm{gmod-}A$
and $A\mathrm{-gmod-}A$ the corresponding categories of graded modules.
Let $\mathfrak{g}$ be a semisimple Lie algebra with fixed Borel and Cartan
subalgebras $\mathfrak{b}$, $\mathfrak{h}$, Weyl group $W$ with longest element
$w_0$, and corresponding category $\mathcal{O}$. Let $\mathcal{O}_0$ be the
principal block of $\mathcal{O}$ with the simple modules $L(x\cdot0)$ of highest
weight $x(\rho)-\rho$, where $x\in W$ and $\rho$ denotes the half-sum of positive
roots. Let $P(x\cdot0)$ be the projective cover of $L(x\cdot0)$.
Let $\mathcal{H}$ denote the category of Harish-Chandra bimodules with generalized
trivial central character from both sides (as considered for example in \cite{SHC}).
Let $\ensuremath{\operatorname{ch}}\xspacei$ denote the trivial central character. For any $n\in\mathbb{Z}_{>0}$ we have
the full subcategories $\mathcal{H}^n$ (and $^n\mathcal{H}$ respectively) of
$\mathcal{H}$ given by objects $X$ such that $X\mathrm{Ker}\ensuremath{\operatorname{ch}}\xspacei^n=0$ ($\mathrm{Ker}\ensuremath{\operatorname{ch}}\xspacei^n X=0$
respectively). There is an auto-equivalence $\eta$ of $\mathcal{H}$, given by switching
the left and right action of $U(\mathfrak{g})$ (see \cite[6.3]{Ja}), and giving rise
to equivalences $\mathcal{H}^n\cong{}^n\mathcal{H}$. For $s$ a simple reflection we have
translation functors through the $s$-wall: $\theta_s$ from the left hand side and
$\theta_s^r$ from the right hand side (for a definition see \cite[6.33]{Ja} or more
explicitly \cite[2.1]{Sthom}). In particular, $\eta\theta_s\cong\theta_s^r\eta$.
Recall the equivalence (\cite[Theorem 5.9]{BG}) $\epsilon:\mathcal{H}^1\cong\mathcal{O}_0$.
We denote by $L_x=\epsilon^{-1} L(x\cdot 0)$ and consider it also as an object in
$\mathcal{H}$. Note that $\eta L_x\cong L_{x^{-1}}$ (see \cite[6.34]{Ja}). Let $P^n_x$
and ${}^nP_x$ be the projective cover of $L_x$ in $\mathcal{H}^n$ and ${}^n\mathcal{H}$
respectively. In particular, $\eta P^n_x\cong {}^n P_x$.
Recall the structural functor $\mathbb{V}:\mathcal{H}\rightarrow
S(\mathfrak{h})\mathrm{-mod-}S(\mathfrak{h})$ from \cite{SHC}. We equip the algebra
$S=S(\mathfrak{h})$ with a $\mathbb{Z}$-grading such that $\mathfrak{h}$ is sitting in degree two.
In \cite{SHC} it is proved that $\mathbb{V} P^n_x$ has a graded lift. By abuse of language,
we denote the graded lift having $-l(x)$ as its lowest degree also by $\mathbb{V} P^n_x$. Let
$A^n=\mathrm{End}_{S-\mathrm{gmod-}S}(\ensuremath{\mathfrak{B}}igoplus_{x\in W}\mathbb{V} P^n_x)$. Then $A^n$ is a graded
algebra such that $\mathcal{H}^n\cong\mathrm{mod-}A^n$. In particular, $A^1$ is the Koszul
algebra corresponding to $\mathcal{O}_0$ (\cite{BGS}). On the other hand we have
${}^nA=\mathrm{End}_\mathcal{H}(\ensuremath{\mathfrak{B}}igoplus_{x\in W}{}^nP_x)$ and the corresponding equivalence
${}^n\mathcal{H}\cong\mathrm{mod-}{}^nA$. Concerning the notation we will not
distinguish between objects in $\mathcal{H}^n$ and $\mathrm{mod-}A^n$ or between objects
in $^n\mathcal{H}$ and $\mathrm{mod-}{}^nA$. We fix a grading on $^nA$ such
that $\eta$ lifts to equivalences $\tilde\eta:\mathrm{gmod-}A_n\cong \mathrm{gmod-}A^n$
preserving the degrees in which a simple module is concentrated. More
precisely, $\tilde\eta L(x)\cong L(x^{-1})$, where $L(x)$ denotes the graded
lift of $L_x$, concentrated in degree zero, in the corresponding category.
Let us fix $n$. For $s$ a simple reflection we denote by $S^s$ the $s$-invariants in $S$.
We define $\tilde\theta_s:\mathrm{gmod-}A^n\rightarrow\mathrm{gmod-}A^n$ as tensoring
with the graded $A^n=\mathrm{End}_{S\mathrm{-gmod-}S}(\ensuremath{\mathfrak{B}}igoplus_{x\in W}\mathbb{V} P^n_x)$ bimodule
$\mathrm{Hom}_{S\mathrm{-gmod-}S}(\ensuremath{\mathfrak{B}}igoplus_{x\in W}\mathbb{V} P^n_x,\ensuremath{\mathfrak{B}}igoplus_{x\in W}S\otimes_{S^s}\mathbb{V}
P^n_x\ensuremath{\lambda}angle -1\rangle)$. Because of \cite[Lemma 10]{SHC}, this is a graded lift
(in the sense of \cite{St}) of the translation functor
$\theta_s:\mathcal{H}^n\rightarrow\mathcal{H}^n$. As in \cite{St} we have the adjunction
morphisms $\mathrm{ID}\ensuremath{\lambda}angle 1\rangle\rightarrow\tilde\theta_s$ and
$\tilde\theta_s\rightarrow\mathrm{ID}\ensuremath{\lambda}angle -1\rangle$. Define
$\tilde\theta_s^r=\eta\tilde\theta_s\eta:{}^nA\mathrm{-gmod}\rightarrow
{}^nA\mathrm{-gmod}$. We have again the adjunction morphism $a_s^{(n)}:\mathrm{ID}\ensuremath{\lambda}angle
1\rangle\rightarrow\tilde\theta_s^r$. Let $T_s^{(n)}$ denote the functor given by
taking the cokernel of $a_s^{(n)}$. We fix a compatible system of surjections
$P^n\twoheadrightarrow P^m$ for $n\ensuremath{\mathfrak{g}}eq m$. It gives rise to a system of graded projections
$p_{n,m}:{_{}^nA}\twoheadrightarrow ^mA$ for $n\ensuremath{\mathfrak{g}}eq m$. Let $^\infty A=\varprojlim\;_{}^nA$ and
$^\infty T_s=\varprojlim T^{(n)}:\mathrm{gmod-}^\infty A\rightarrow \mathrm{gmod-}^\infty A$.
Note that $^\infty T_s$ preserves the category $\mathrm{gmod-}A^1$ (considered as a
subcategory of $^\infty A$). In fact, it is a graded lift of Arkhipov's twisting functor
(as considered in \cite{AS}, \cite{KM}). Let $T_s:\mathrm{gmod-}A^1\rightarrow
\mathrm{gmod-}A^1$. For $x\in W$ with reduced expression ${[x]}=s_{i_1}s_{i_2}\cdots s_{i_r}$ set
$T_{[x]}=T_{s_1}T_{s_2}\cdots T_{s_r}$. Set $A=A^1$.
\ensuremath{\mathfrak{B}}egin{proposition}\ensuremath{\lambda}abel{prop}
Let $x$, $s\in W$ and let $s$ be a simple reflection. Then the following holds
\ensuremath{\mathfrak{B}}egin{enumerate}
\item The functor $T_{[x]}$ is (up to isomorphism) independent of the chosen
reduced expression.
\item Moreover, if $sx>x$ and $\Delta(x)\in\mathrm{gmod-}A$ denotes the graded lift
of the Verma module with simple head $L(x)$ (concentrated in degree zero), then
$T_s\Delta(x)\cong\Delta(sx)$ and $T_s\ensuremath{\mathfrak{n}}abla(sx)\cong\ensuremath{\mathfrak{n}}abla(x)$, where
$\ensuremath{\mathfrak{n}}abla(x)$ denotes the graded lift of the dual Verma module with socle
$L(x)$ (concentrated in degree zero).
\end{enumerate}
\end{proposition}
\ensuremath{\mathfrak{B}}egin{proof}
We consider now the adjunction morphism $b_s:\mathrm{ID}\rightarrow\tilde\theta_s^r$
between endofunctors on $\mathrm{mod-}_{}^nA$. Let $\tilde{T}_s$ denote the functor given by
taking the cokernel of $b_s$, restricted to $\mathrm{mod-}A$. Let $\tilde T_{[x]}=\tilde{T}_{s_1}\tilde{T}_{s_2}\cdots\tilde{T}_{s_r}$. Then $\tilde{T}_{[x]}$ does
not depend on the chosen reduced expression (\cite{Joseph}, \cite{KM}). If we show that
$\tilde{T}_{[x]}$ is indecomposable, then a graded lift is unique up to isomorphism and
grading shift, and the statement follows say from the second part of the theorem. Set
$G=\tilde{T}_x$. Let us prove the indecomposability: We claim that the canonical evaluation
morphism $\mathrm{End}(G)\rightarrow \mathrm{End}_\mathfrak{g}(G P(w_0\cdot0))$,
$\ensuremath{\mathfrak{P}}hi\mapsto\ensuremath{\mathfrak{P}}hi_{P(w_0\cdot0)}$, is an isomorphism. Assume $\ensuremath{\mathfrak{P}}hi_{P(w_0\cdot0)}=0$. Let $P$
be a projective object in $\mathcal{O}_0$. Then there is a short exact sequence
\ensuremath{\mathfrak{B}}egin{equation}\ensuremath{\lambda}abel{eq:ses}
P\rightarrow\oplus_{I} P(w_0\cdot0)\rightarrow Q
\end{equation}
for some finite set $I$ and some module $Q$ having a Verma flag. (To see this consider the
projective Verma module. It is the unique Verma submodule of ${P(w_0\cdot0)}=0$, hence the desired
sequence exists. The existence of the sequence for any projective object follows then using
translation functors.) By \cite[Lemma 2.1]{AS}, we get an exact sequence $G P\rightarrow\oplus_{I}
GP(w_0\cdot0)\rightarrow GQ$. Hence $\ensuremath{\mathfrak{P}}hi_{P(w_0\cdot0)}=0$ implies $\ensuremath{\mathfrak{P}}hi_P=0$ for any
projective object $P$. Since $G$ is right exact, it follows $\ensuremath{\mathfrak{P}}hi=0$. Let
$g\in\mathrm{End}_\mathfrak{g}(G P(w_0\cdot0))$. Since $\mathrm{End}_\mathfrak{g}(G P(w_0\cdot0))\cong\mathrm{End}_\mathfrak{g}(P(w_0\cdot0))$ (\cite[Proposition 5.3]{AS}), $g$ defines
an endomorphism of $G$ when restricted to the additive category generated by $P(w_0\cdot0)$. Note
that (by taking the injective hull of $Q$) the sequence \eqref{eq:ses} gives rise to an exact sequence
\ensuremath{\mathfrak{B}}egin{equation*}
0\rightarrow P\rightarrow\oplus_{I} P(w_0\cdot0)\rightarrow \oplus_{I'} P(w_0\cdot0)
\end{equation*}
for some finite sets $I$, $I'$. Using again \cite[Lemma 2.1]{AS} we get an exact sequence
\ensuremath{\mathfrak{B}}egin{equation*}
0\rightarrow G P\rightarrow\oplus_{I} G P(w_0\cdot0)\rightarrow\oplus_{I'} G P(w_0\cdot0).
\end{equation*}
Hence $g$ defines an endomorphism $g_P$ of $P$. Standard arguments show that this is
independent of the chosen exact sequence. Since $G$ is right exact, $g$ extends uniquely
to an endomorphism $\ensuremath{\mathfrak{P}}hi$ of $G$. By construction $\ensuremath{\mathfrak{P}}hi_{P(w_0\cdot0)}=g$. This proves
the surjectivity. Since $\mathrm{End}_\mathfrak{g}(G P(w_0\cdot0))\cong
\mathrm{End}_\mathfrak{g}(P(w_0\cdot0))$ is a local ring, the functor $G$ is indecomposable.
This proves the first part of the proposition.
We have $\tilde{T}_s f(\Delta(x))\cong f(\Delta(sx))$, where $f$ denotes the grading forgetting
functor. Hence, $T_s(\Delta(x))\cong \Delta(sx)\ensuremath{\lambda}angle k\rangle$ for some $k\in\mathbb{Z}$. On the
other hand $\eta T_s\eta\Delta(x^{-1})\cong\Delta(x^{-1}s)$ (\cite[Theorem~3.6]{St}). Hence
$k=0$ and $T_s\Delta(x)\cong\Delta(sx)$. Forgetting the grading we have $\tilde{T}_s f(\ensuremath{\mathfrak{n}}abla(sx))\cong
f(\ensuremath{\mathfrak{n}}abla(x))$. On the other hand $\eta T_s\eta\ensuremath{\mathfrak{n}}abla((sx)^{-1})\cong\ensuremath{\mathfrak{n}}abla(x^{-1})$
(\cite[Theorem 3.10]{St}). The second part of the proposition follows.
\end{proof}
Since $T_{[x]}$ does not depend on the chosen reduced expression, we denote it
just $T_x$ in the following. Let $P(x)\in\mathrm{gmod-}A$ be the indecomposable
projective module with simple head $L(x)$ concentrated in degree zero. Set
$P=\ensuremath{\mathfrak{B}}igoplus_{x\in W}P(x)$. Let $T(x)$ denote the graded lift of an
indecomposable tilting module, characterized by the property that $\Delta(x)$
is a submodule and $\ensuremath{\mathfrak{n}}abla(x)$ is a quotient. Let $T=\ensuremath{\mathfrak{B}}igoplus_{x\in W}
T(x)$.
\ensuremath{\mathfrak{B}}egin{theorem}\ensuremath{\lambda}abel{tapp}
Let $x\in W$. There is an isomorphism of graded algebras
\ensuremath{\mathfrak{B}}egin{eqnarray*}
\mathrm{End}_{A}(P)\cong\mathrm{End}_{A}(T_x P).
\end{eqnarray*}
For $x=w_0$ we get in particular
\ensuremath{\mathfrak{B}}egin{eqnarray*}
\mathrm{End}_{A}(P)\cong\mathrm{End}_{A}(T).
\end{eqnarray*}
\end{theorem}
\ensuremath{\mathfrak{B}}egin{proof}
The first isomorphism follows directly from \cite[Lemma 2.1]{AS} and the definition of
$T_x$. For the second we claim that $T_{w_0} P(y)\cong T(w_0y)$. By
Proposition~\ref{prop} we have $T_{w_0}P(0)\cong \Delta(w_0)$. Hence, the statement
is true for $y=e$. Using translation functors we directly get $T_{w_0} P(y)\cong
T(w_0y)\ensuremath{\lambda}angle k\rangle$ for some $k\in\mathbb{Z}$. On the other hand $P(y)$ surjects onto
$\Delta(y)$. Then $T_{w_0}P(y)$ surjects onto $T_{w_0}\Delta(y)$. The latter is
isomorphic to $\ensuremath{\mathfrak{n}}abla(w_0y)$, since $\Delta(w_0)\cong\ensuremath{\mathfrak{n}}abla(w_0)$.
\end{proof}
Let $\ensuremath{\mathfrak{p}}$ be a parabolic subalgebra of $\mathfrak{g}$ with corresponding parabolic
subgroup $W_\ensuremath{\mathfrak{p}}$ of $W$. Let $\mathcal{O}_0^\ensuremath{\mathfrak{p}}$ be the full subcategory of $\mathcal{O}_0$
given by locally $\ensuremath{\mathfrak{p}}$-finite objects. If $P\in\mathcal{O}_0$ is a minimal projective
generator, then its maximal quotient $P^\ensuremath{\mathfrak{p}}$ contained in $\mathcal{O}_0^\ensuremath{\mathfrak{p}}$ is a
minimal projective generator of $\mathcal{O}_0^\ensuremath{\mathfrak{p}}$ and $\mathrm{End}_\mathfrak{g}(P^\ensuremath{\mathfrak{p}})$
inherits a grading from $A=\operatorname{End}_\mathfrak{g}(P)$. We will consider then the category
$\mathrm{gmod-}A^\ensuremath{\mathfrak{p}}$ as the full subcategory of $\mathrm{gmod-}A$ given by all objects having
only composition of the form $L(x)\ensuremath{\lambda}angle k\rangle$, where $k\in\mathbb{Z}$ and $x\in W^\ensuremath{\mathfrak{p}}$, the
set a shortest coset representative of $W_\ensuremath{\mathfrak{p}}\ensuremath{\mathfrak{B}}ackslash W$. Let $\Delta^\ensuremath{\mathfrak{p}}(x)\in\mathrm{gmod-}A^\ensuremath{\mathfrak{p}}$,
$\ensuremath{\mathfrak{n}}abla^\ensuremath{\mathfrak{p}}(x)$ be the standard graded lifts of the standard and costandard modules in
$\mathcal{O}_0^\ensuremath{\mathfrak{p}}$ (which were denoted by $\Delta(x)$ and $\ensuremath{\mathfrak{n}}abla(x)$ in Section~\ref{s6}). Let
$T^\ensuremath{\mathfrak{p}}$ be the module $T$ from Corollary~\ref{c6.101} for the category $\mathrm{gmod}-A^\ensuremath{\mathfrak{p}}$.
Then Theorem~\ref{t6.1} generalizes to the following
\ensuremath{\mathfrak{B}}egin{corollary}\ensuremath{\lambda}abel{capp}
$\mathrm{End}_{A^\ensuremath{\mathfrak{p}}}(T^\ensuremath{\mathfrak{p}})$ is positively graded, moreover, it is generated in degrees
$0$ and $1$. Furthermore, $\ensuremath{\mathfrak{n}}abla$ admits an LT-resolution.
\end{corollary}
\ensuremath{\mathfrak{B}}egin{proof}
Let $w=w_0^\ensuremath{\mathfrak{p}}\in W^\ensuremath{\mathfrak{p}}$ be the longest element. Then $\Delta^\ensuremath{\mathfrak{p}}(w)$
is a tilting module and canonically a quotient of $\Delta(w)\cong
T_w\Delta(e)=T_wP(e)$. Using translation functors we get that $T^\ensuremath{\mathfrak{p}}$ is a
quotient of $T_w P$. Hence, there is a surjection of graded algebras from
$\mathrm{End}_{A}(T_wP)\cong\mathrm{End}_{A}(P)$ onto
$\mathrm{End}_{A}(T^\ensuremath{\mathfrak{p}})$. Hence $\mathrm{End}(T_w P^\ensuremath{\mathfrak{p}})\cong\operatorname{End}_{A^\ensuremath{\mathfrak{p}}}(T^\ensuremath{\mathfrak{p}})$
is positively graded and generated in degrees 0 and 1. The existence of the
resolution follows using the same arguments as in the proof of Theorem~\ref{t6.1}.
\end{proof}
\ensuremath{\mathfrak{B}}egin{center}
{\ensuremath{\mathfrak{B}}f Acknowledgments}
\end{center}
The research was done during the visit of the second author to Uppsala
University, which was partially supported by the Royal Swedish
Academy of Sciences, and by The Swedish Foundation for International
Cooperation in Research and Higher Education (STINT). This support and
the hospitality of Uppsala University are gratefully acknowledged. The
first author was also partially supported by the Swedish Research Council.
We thank Catharina Stroppel for many useful remarks and comments on the
preliminary version of the paper and for writing the Appendix.
\ensuremath{\mathfrak{B}}egin{thebibliography}{99999}
\ensuremath{\mathfrak{B}}ibitem[ADL]{ADL}
{\em I.~{\'A}goston, V.~Dlab, E.~Luk{\'a}cs}, Quasi-hereditary extension
algebras. Algebr. Represent. Theory 6 (2003), no. 1, 97--117.
\ensuremath{\mathfrak{B}}ibitem[AS]{AS}
{\em H.~H.~Andersen, C.~Stroppel},
Twisting functors on $\mathcal{O}$, Represent. Theory 7 (2003), 681-699.
\ensuremath{\mathfrak{B}}ibitem[BGS]{BGS}
{\em A.~Beilinson, V.~Ginzburg, W.~Soergel}, Koszul duality patterns in
representation theory. J. Amer. Math. Soc. 9 (1996), no. 2, 473--527.
\ensuremath{\mathfrak{B}}ibitem[BG]{BG}
{\em I.~Bernstein, S.~Gelfand}, Tensor products of finite and infinite
di\-men\-sio\-nal representations of semisimple Lie algebras,
Compositio math. 41 (1980), 245-285.
\ensuremath{\mathfrak{B}}ibitem[BGG]{BGG}
{\em I.~N.~Bernstein, I.~M.~Gelfand, S.~I.~Gelfand}, A certain category of
${\mathfrak g}$-modules. (Russian) Funkcional. Anal. i Prilo\v zen. 10
(1976), no. 2, 1--8.
\ensuremath{\mathfrak{B}}ibitem[CPS]{CPS1}
{\em E.~Cline, B.~Parshall, L.~Scott}, Finite-dimensional algebras and
highest weight categories. J. Reine Angew. Math. 391 (1988), 85--99.
\ensuremath{\mathfrak{B}}ibitem[DR1]{DR}
{\em V.~Dlab, C.~M.~Ringel}, Quasi-hereditary algebras. Illinois J. Math.
33 (1989), no. 2, 280--291.
\ensuremath{\mathfrak{B}}ibitem[DR2]{DR2}
{\em V.~Dlab, C.~M.~Ringel}, The module theoretical approach to
quasi-hereditary algebras. Representations of algebras and related topics
(Kyoto, 1990), 200--224, London Math. Soc. Lecture Note Ser., 168,
Cambridge Univ. Press, Cambridge, 1992.
\ensuremath{\mathfrak{B}}ibitem[Di]{Di}
{\em J.~Dixmier}, Enveloping algebras. Graduate Studies in Mathematics,
11. American Mathematical Society, Providence, RI, 1996.
\ensuremath{\mathfrak{B}}ibitem[FKM]{FKM}
{\em V.~Futorny, S.~K{\"o}nig, V.~Mazorchuk}, $\mathcal{S}$-subcategories
in $\ensuremath{\mathcal{O}}$. Manuscripta Math. 102 (2000), no. 4, 487--503.
\ensuremath{\mathfrak{B}}ibitem[GM]{GM}
{\em S.~Gelfand, Yu.~Manin}, Methods of homological algebra. Second
edition. Springer Monographs in Mathematics. Springer-Verlag, Berlin, 2003.
\ensuremath{\mathfrak{B}}ibitem[Ha]{Ha}
{\em D.~Happel}, Triangulated categories in the representation theory
of finite-di\-men\-sio\-nal algebras. London Math. Soc. Lecture Note
Ser., vol. 119, Camb\-ri\-dge University Press, Cambridge, 1988.
\ensuremath{\mathfrak{B}}ibitem[Ir1]{Ir1}
{\em R.~Irving}, Projective modules in the category $\mathcal{O}_S$:
Loewy series. Trans. Amer. Math. Soc. 291 (1985), no. 2, 733--754.
\ensuremath{\mathfrak{B}}ibitem[Ir2]{Ir}
{\em R.~Irving}, The socle filtration of a Verma module. Ann. Sci.
{\'E}cole Norm. Sup. (4) 21 (1988), no. 1, 47--65.
\ensuremath{\mathfrak{B}}ibitem[Ja]{Ja}
{\em J.~C.~Jantzen}, Einh{\"u}llende Algebren halbeinfacher Liealgebren,
Springer 1983.
\ensuremath{\mathfrak{B}}ibitem[Jo]{Joseph}
{\em A.~Joseph}, Completion functors in the ${\mathcal O}$ category.
Noncommutative harmonic analysis and Lie groups (Marseille, 1982),
80-106, Lecture Notes in Math., 1020, Springer, Berlin, 1983.
\ensuremath{\mathfrak{B}}ibitem[KM]{KM}
{\em O.~Khomenko, V.~Mazorchuk},
On Arkhipov's and Enright's functors. Preprint 2003:07, Uppsala University,
to appear in Math. Zeitschrift.
\ensuremath{\mathfrak{B}}ibitem[MS]{MS}
{\em R.~Martínez Villa, M.~Saor{\'i}n}, Koszul equivalences and dualities.
Pacific J. Math. 214 (2004), no. 2, 359--378.
\ensuremath{\mathfrak{B}}ibitem[Ri]{Ri}
{\em C.~M.~Ringel}, The category of modules with good filtrations over
a quasi-hereditary algebra has almost split sequences. Math. Z. 208
(1991), no. 2, 209--223.
\ensuremath{\mathfrak{B}}ibitem[RC]{RC}
{\em A.~Rocha-Caridi}, Splitting criteria for $\mathfrak{g}$-modules
induced from a parabolic and the Bernstein-Gelfand-Gelfand resolution
of a finite-dimensional, irreducible $\mathfrak{g}$-module. Trans.
Amer. Math. Soc. 262 (1980), no. 2, 335--366.
\ensuremath{\mathfrak{B}}ibitem[So1]{So}
{\em W.~Soergel}, Kategorie $\mathcal{O}$, perverse Garben und Moduln
{\"u}ber den Koinvarianten zur Weylgruppe. (German) J. Amer. Math. Soc.
3 (1990), no. 2, 421--445.
\ensuremath{\mathfrak{B}}ibitem[So2]{So2}
{\em W.~Soergel}, Character formulas for tilting modules over Kac-Moody
algebras. Represent. Theory 2 (1998), 432--448.
\ensuremath{\mathfrak{B}}ibitem[So3]{SHC}
{\em W.~Soergel}, The combinatorics of Ha\-rish\--Chand\-ra bimodules, Journal
Reine An\-gew. Math. 429 (1992), 49-74.
\ensuremath{\mathfrak{B}}ibitem[St1]{St}
{\em C.~Stroppel}, Category $\mathcal{O}$: gradings and translation
functors. J. Algebra 268 (2003), no. 1, 301--326.
\ensuremath{\mathfrak{B}}ibitem[St2]{St2}
{\em C.~Stroppel}, Category $\mathcal{O}$: quivers and endomorphism rings
of projectives. Represent. Theory 7 (2003), 322--345.
\ensuremath{\mathfrak{B}}ibitem[St2]{Sthom}
{\em C.~Stroppel}. Homomorphisms and extensions of principal series
representations. J. Lie Theory 13 (2003), no. 1, 193-212.
\end{thebibliography}
\ensuremath{\mathfrak{n}}oindent
Volodymyr Mazorchuk, Department of Mathematics, Uppsala University,
Box 480, 751 06, Uppsala, SWEDEN,
e-mail: {\tt mazor\symbol{64}math.uu.se},
web: {``http://www.math.uu.se/$\tilde{\ensuremath{\mathfrak{h}}space{1mm}}$mazor/''}.
\ensuremath{\mathfrak{n}}oindent
Serge Ovsienko, Department of Mechanics and Mathematics, Kyiv Taras
Shevchenko University, 64, Volodymyrska st., 01033, Kyiv, Ukraine,
e-mail: {\tt ovsienko\symbol{64}zeos.net}.
\end{document} |
\begin{document}
\title{f Cospectral lifts of graphs} \footnotetext[1]{\tt Corresponding Author, Email
address: [email protected]}
\begin{abstract}\rm\noindent
We prove that for a pair of cospectral graphs $G$ and $H$, there
exist their non trivial lifts $G'$ and $H'$ which are cospectral.
More over for a pair of cospectral graphs on $6$ vertices, we
find some cospectral lifts of them.
\noindent {\bf AMS Subject Classification:} 05C50.\\
{\bf Keywords:} lifts of graphs, eigenvalues, cospectral graphs.
\end{abstract}
\section{Introduction}
Let $G=(V,E)$ be a simple graph on the vertex set
$V(G)=\{v_1,v_2,\ldots,v_n\}$ and edge set $E$. The {\it adjacency
matrix} of $G$ is an $n$ by $n$ matrix $A(G)$ whose $(i,j)$-th
entry is $1$ if vertices $v_i$ and $v_j$ are adjacent and $0$,
otherwise. The \textit{spectrum} of $G$ is the multi-set of
eigenvalues of $A(G)$. Two graphs $G$ and $G'$ are called {\it
cospectral} if they share the same spectrum. We say $G$ is
\textit{determined by spectrum} (\textit{DS} for short) if it has
no non-isomorphic cospectral mate.
The problem of constructing cospectral graphs, has been
investigated by some authors. For a survey of results on this
area we refer the reader to \cite{DH,DH1,RA}. In \cite{GHRATA} the
authors have used the concept $m$-cospectrality to construct new
cospectral graphs. Haemers et all in \cite{HABR} have considered
Godsil-McKay switching method to construct non-isomorphic
cospectral graphs see the paper for more details. In this article
we use the concept lift of graphs to construct new non-isomorphic
cospectral graphs from given small cospectral pairs of graphs.
\section{Preliminaries}
In this section we mention some basic definitions and results
which will be used during the paper. We denote by $\dot{E}$ the
set of all ordered pairs $\{(i,j)|\textrm{ } i< j , \textrm{ }
\{v_i,v_j\}\in E \}$. For an Abelian group of order $k$, say $Gr$,
a $k-$Abelian signature $s$ of the graph $G$ is a map
$s:\dot{E}\longrightarrow Gr$. A $k$-Abelian lift of the graph
$G$, associated with the signature $s$, which is denoted by
$G(s)$, is a graph on the vertex set $V(G)\times [k]$
($[k]=\{0,1,\ldots,k-1\}$, $Gr=(\{g_0,g_1,\ldots,g_{k-1}\},*$)),
where for any $(i,j)\in \dot{E}$ and $a,b\in [k]$ there is an edge
between $(v_i,a)$ and $(v_j,b)$ if and only if $s(i,j)*g_a=g_b$.
Note that in the graph $G(s)$, for any $(i,j)\in \dot{E}$, there
is a matching between the vertex sets $V_i=\{v_i\}\times [k]$ and
$V_j=\{v_j\}\times [k]$. If a graph have $m$ edges there may be
$k^m$ different $k$-Abelian lifts of $G$, since the sets $V_i,V_j$
are matched in $k$ different ways. If the signature $s$ maps all
pairs to the same element $g\in Gr$, then we denote the
corresponding lift $G(s)$ with $G_g$. We illustrate the
definition of the $k$-lifts of a graph in the following figure.
In the following graph the graph $G$ is the cycle $C_4,$ and the
corresponding signature is $s:\dot{E}\longrightarrow
\mathbb{Z}_2$, with $s(1,3)=0,s(1,4)=0,s(2,3)=1,s(2,4)=0.$
${\hspace{1cm}\put(100,0){\line(0,1){50}}\put(100,0){\line(1,1){50}}\put(150,0){\vdots
}\put(150,12){\vdots}\put(150,24){\vdots}\put(150,36){\vdots}\put(150,48){.}
\put(150,0){\line(-1,1){50}}\put(100,0){\circle*{5}}\put(92,-5){4}\put(93,53){1}\put(153,-5){3}\put(153,53){2}\put(150,50){\circle*{5}}
\put(100,50){\circle*{5}}\put(150,0){\circle*{5}}\put(170,25){$\longrightarrow$}
\put(210,0){\circle*{4}}\put(220,0){\circle*{4}}\put(210,50){\circle*{4}}\put(220,50){\circle*{4}}
\put(270,0){\circle*{4}}\put(260,0){\circle*{4}}\put(270,50){\circle*{4}}\put(260,50){\circle*{4}}
\put(210,0){\line(0,1){50}}\put(220,0){\line(0,1){50}}\put(210,0){\line(1,1){50}}\put(220,0){\line(1,1){50}}
\put(270,0){\line(-1,1){50}}\put(260,0){\line(-1,1){50}}\put(270,0){\line(-1,5){10}}\put(260,0){\line(1,5){10}}}\put(150,50){\circle*{5}}
\put(100,50){\circle*{5}}\put(150,0){\circle*{5}}$
$$\hspace{-1.5cm}G\hspace{3.5cm} G(s)$$
$$\textrm{\textbf{Figure1.} 2-\textrm{lift of } G \textrm{ corresponding to the signature }s}$$
Let $Gr=(\{g_1=1,g_2,\ldots,g_{n}\},*)$ be a group of order $n$.
For any group element say $g\in Gr$ there is an $n\times n$
permutation matrix $P_g$ in correspondence, which is defined
bellow,$$P_g(i,j)=
\begin{cases}
1 & \text{ \textrm{if} } g_i*g=g_j, \\
0 & \text{otherwise}.
\end{cases}
$$
\begin{lem} {\rm The function $\phi:Gr\rightarrow SL(n,\mathbb{R})$,
where $SL(n,\mathbb{R})$ is the set of $n\times n$ real
non-singular matrices and $\phi(g)=P_g$, is a group homomorphism.}
\end{lem}
The eigenvalues of the graph $G(s)$ has been studied in the
literature. For instance in the following theorem from
\cite{MOTA} the authors have obtained the eigenvalues of Abelian
$t$-lifts. See \cite{MOTA} for more details and the notations.
\begin{thm}\label{mota} { \rm Let $G$ be a multigraph and $\phi$ be a
signature assignment to an Abelian group. Let $\beta$ be a common
basis of eigenvectors of the permutation matrices in the image of
$\phi$. For every $\mathbf{x}\in \beta$, let $A_\mathbf{x}$ be
the matrix obtained from the adjacency matrix of $G$ by replacing
any $(u,v)$-entry of $A(G)$ by $\sum_{(e,u,v)\in
\overrightarrow{E}(G)}\lambda_\mathbf{x}(\phi(e,u,v))$. Then the
spectrum of the $t$-lift $G(\phi)$ of $G$ is the multiset union
of the spectra of the matrices $A_\mathbf{x} (\mathbf{x} \in
\beta)$.}
\end{thm}
\section{Main result}
Our main problem here is "for given pair of cospectral graphs $G$
and $H$, is there $k$-Abelian signatures $s,s'$ which $G(s)$ and
$H(s')$ are cospectral?". We look for general answers of this
question.
It is known that for $l,l'\in Gr$ the permutation matrices
$P_l,P_{l'}$ commute, so they have common basis of eigenvectors.
The following theorem is a straight consequence of Theorem
\ref{mota}.
\begin{thm} \label{ma} {\rm Let G be a graph and $s$ be a $k$-cyclic signature of $G$. Let
$\beta$ be a common basis of eigenvectors of the permutation
matrices in the image of $s$. For every $\mathbf{x} \in \beta$,
let $A_\mathbf{x}$ be the matrix defined bellow
$$A_\mathbf{x}(i,j)=
\begin{cases}
\lambda_\mathbf{x}(P_{s(i,j)}) & i<j, \\
0& i=j,\\
\lambda_\mathbf{x}^{-1}(P_{s(i,j)}) & i>j.
\end{cases}
$$ Then
the spectrum of $G(s)$ is the multi-set union of the spectra of
the matrices $A_\mathbf{x}(\mathbf{x}\in\beta)$.}
\end{thm}
\begin{lem} {\rm Let $Gr$ be a group of order $n$. For any $g\in Gr$, any eigenvalues of the permutation
matrix $P_g$ is an $n$'th root of unity.}
\end{lem}
\begin{proof} {The assertion follows by the fact that the order of any element
in the group divides the order of group. Hence $g^n=1$, and
therefore $P_g^n=I_n.$ Hence the minimal polynomial of $P_g$, say
$m(P_g,x)$ divides the polynomial $x^n-1,$ thus the assertion
follows. }
\end{proof}
\begin{lem} {\rm If $G$ and $H$ are cospectral graphs on $n$ verices
and $Gr$ be a finite group of order $t$. If for $g\in Gr$ the
matrix $P_g$ is symmetric, then the graphs $G_g$ and $H_g$ are
cospectral. }
\end{lem}
\begin{proof} {Since the signature corresponds the fixed element $g$ to all the edges
of the graph $G$, and $P_g^{-1}=P_g,$ then by Theorem \ref{mota},
the eigenvalues of the graph $G_g$ are the multi-set union of the
matrices $\omega_i A(G)$, where $\omega_i$'s are the eigenvalues
of $P_g$ for $i=1,2,\ldots,n.$ On the other hand the eigenvalues
of $\omega_i A(G)$ are $\omega_i \lambda_j$ where $\lambda_j$ is
the $j$'th eigenvalue of $G$. Hence the spectrum of $G_g$ and
$H_g$ are the multi-set $\{\omega_i
\lambda_j\}_{i=1,\ldots,t}^{j=1,\ldots,n}.$
}
\end{proof}
\subsection{Examples} We consider two cospectral graphs $G$ and
$H$, shown in the Figure 1. We try to find possible Abelian lifts
of them say $G(s)$ and $H(s')$ which are also cospectral.
\includegraphics[width=18cm]{ax2.pdf}\\
$$ \textrm{\textbf{Figure1.} Two cospectral graphs } G \textrm{ and } H$$\label{as}
We first consider all possible $t$-Abelian signatures of the
graphs $G$ and $H$, suppose that the matrices
$A(G)_\mathbf{x},A(H)_\mathbf{x}$, corresponding to the graphs
$G$, $H$ and their prescribed signatures, which is introduced in
Theorem \ref{mota} are of the following general forms,
$$A(G)_{\mathbf{x}}=\begin{pmatrix}
0 & u & 0 & 0 & 0 & 0 \\
u^{-1} & 0 & v & w & 0 & 0 \\
0 & v^{-1} & 0 & x & y & 0 \\
0 & w^{-1} & x^{-1} & 0 & z & 0 \\
0 & 0 & y^{-1} & z^{-1} & 0 & r \\
0 & 0 & 0 & 0 & r^{-1} & 0
\end{pmatrix}\textrm{, }A(H)_\mathbf{x}=\begin{pmatrix}
0 & u_1 & v_{1} & 0 & 0 & 0 \\
u^{-1}_{1} & 0 & w_{1} & 0 &0 & 0 \\
v^{-1}_{1} & w^{-1}_{1} & 0 & x_{1} & y_{1} & z_{1} \\
0 &0 & x^{-1}_{1} & 0 & 0 & 0 \\
0 & 0 & y^{-1}_{1} & 0 & 0 & r_1 \\
0 & 0 & z^{-1}_{1} & 0 & r^{-1}_{1} & 0
\end{pmatrix}.$$
note that $r,u,v,\ldots,z,r_1,u_1,v_1,\ldots,z_1$ are the complex
variables and stand for the eigenvalues of the permutation
matrices corresponding to each edge. Using Theorem \ref{ma} we
find sufficient conditions on the signatures such that the
corresponding lifts become cospectral.
\begin{thm} \label{as}{ Let $s,s'$ be $k$-Abelian lifts on the graphs $G,H$
respectively. If the following situations hold, the graphs $G(s)$
and $H(s')$ are cospectral.
{\large \begin{itemize}
\item $\frac{w}{v}=\frac{y}{z}$
\item $2(\frac{xv}{w}+\frac{w}{xv})=\frac{y_1r_1}
{z_1}+\frac{z_1}{y_1r_1}+\frac{u_1w_1}{v_1}+\frac{v_1}{u_1w_1}.$
\end{itemize}}
}
\end{thm}
\begin{proof} {We consider all posibilities for the matrices $A(G)_\mathbf{x},A(H)_\mathbf{x}$
corresponding to the graphs $G,H$. Comparing the coefficients of
$\chi(A(G)_\mathbf{x},t),\chi(A(H)_\mathbf{x},t)$, the equality
holds if and only if
$$2=\frac{wz}{vy}+\frac{vy}{wz},\hspace{2cm}(1)$$
$$\frac{xz}{y}+\frac{y}{xz}+\frac{xv}{w}+\frac{w}{xv}=\frac{y_1r_1}
{z_1}+\frac{z_1}{y_1r_1}+\frac{u_1w_1}{v_1}+\frac{v_1}{u_1w_1}.\hspace{2cm}(2)$$
The first equation follows by comparing the coefficients of $t^2$
and the second one follows by comparing the coefficients of
$t,t^3$. Consider the first equality above, note the variables
are the $n$'th roots of unity hence the equality $(1)$ holds if
and only if $wz=vy$, hence the first assertion of the statement
follows. The second assertion follows by the Equation (2) and the
first equality. Hence the graphs according to the mentioned
signatures are cospectral.}
\end{proof}
\begin{cor} {The following constraints on the variables $r,u,v,\ldots,z,r_1,u_1,v_1,\ldots,z_1$
will give cospectral lifts of the graphs $G$ and $H$.
$$z=\frac{vy}{w},\textrm{ }u_1=y_1=x,\textrm{ }w_1=r_1=v,\textrm{ }z_1=w$$ }
\end{cor}
\begin{ex} {\rm In the graphs $G$ and $H$ the following signatures have the condition stated in
Theorem \ref{as}. Hence the graphs $G(s)$ and $H(s')$ are
cospectral. The group members are denoted in the cyclic
representation.
$$s(1,2)=(1,2,3),s(2,3)=(1,3,2),s(2,4)=\textrm{id},$$$$ s(3,4)=(1,3,2),s(3,5)=(1,3,2),s(4,5)=(1,2,3),s(5,6)=(1,2)$$
$$s'(1,2)=(1,3,2),s'(1,3)=\textrm{id},s'(2,3)=(1,3,2),$$$$s'(3,4)=\textrm{id},s'(3,5)=(1,3,2),s'(3,6)=\textrm{id},s'(5,6)=(1,3,2)$$
The adjacency matrices of the graphs $G(s)$ and $H(s')$ are of
the following forms.
$$A(G(s)) = \begin{pmatrix}0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0\\ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0\\ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0\\ 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0\\ 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0\\
0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0\\ 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0\\ 0, 0, 0, 0, 0, 1,
0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0\\ 0, 0, 0, 1, 0, 0, 0, 0, 0,
0, 1, 0, 0, 1, 0, 0, 0, 0\\ 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0\\ 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1,
0, 0, 0\\ 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0\\
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0\\ 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0\\0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1\\ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0\\ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0\\ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0\end{pmatrix},A(H(s'))=\begin{pmatrix}0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0\\ 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\\
0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\\ 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\\
0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0\\ 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\\
0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1\\ 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0\\
1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0\\ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0\\
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\\ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\\
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1\\ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0\\
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0\\ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0\\
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0\\ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0\end{pmatrix}.$$}
\end{ex}
\end{document} |
\begin{document}
\title{AUTHOR GUIDELINES FOR IGARSS 2020 MANUSCRIPTS}
\begin{abstract}
The abstract should appear at the top of the left-hand column of text, about
0.5 inch (12 mm) below the title area and no more than 3.125 inches (80 mm) in
length. Leave a 0.5 inch (12 mm) space between the end of the abstract and the
beginning of the main text. The abstract should contain about 100 to 150
words, and should be identical to the abstract text submitted electronically
along with the paper cover sheet. All manuscripts must be in English, printed
in black ink.
\end{abstract}
\begin{keywords}
One, two, three, four, five
\end{keywords}
\section{Introduction}
\label{sec:intro}
These guidelines include complete descriptions of the fonts, spacing, and
related information for producing your proceedings manuscripts. Please follow
them and if you have any questions, direct them to Conference Management
Services, Inc.: Phone +1-979-846-6800 or Fax +1-979-846-6900 or email
to \[email protected]+.
\section{Formatting your paper}
\label{sec:format}
All printed material, including text, illustrations, and charts, must be kept
within a print area of 7 inches (178 mm) wide by 9 inches (229 mm) high. Do
not write or print anything outside the print area. The top margin must be 1
inch (25 mm), except for the title page, and the left margin must be 0.75 inch
(19 mm). All {\it text} must be in a two-column format. Columns are to be 3.39
inches (86 mm) wide, with a 0.24 inch (6 mm) space between them. Text must be
fully justified.
\section{PAGE TITLE SECTION}
\label{sec:pagestyle}
The paper title (on the first page) should begin 1.38 inches (35 mm) from the
top edge of the page, centered, completely capitalized, and in Times 14-point,
boldface type. The authors' name(s) and affiliation(s) appear below the title
in capital and lower case letters. Papers with multiple authors and
affiliations may require two or more lines for this information.
\section{TYPE-STYLE AND FONTS}
\label{sec:typestyle}
To achieve the best rendering in the proceedings, we
strongly encourage you to use Times-Roman font. In addition, this will give
the proceedings a more uniform look. Use a font that is no smaller than ten
point type throughout the paper, including figure captions.
In ten point type font, capital letters are 2 mm high. If you use the
smallest point size, there should be no more than 3.2 lines/cm (8 lines/inch)
vertically. This is a minimum spacing; 2.75 lines/cm (7 lines/inch) will make
the paper much more readable. Larger type sizes require correspondingly larger
vertical spacing. Please do not double-space your paper. True-Type 1 fonts
are preferred.
The first paragraph in each section should not be indented, but all the
following paragraphs within the section should be indented as these paragraphs
demonstrate.
\section{MAJOR HEADINGS}
\label{sec:majhead}
Major headings, for example, "1. Introduction", should appear in all capital
letters, bold face if possible, centered in the column, with one blank line
before, and one blank line after. Use a period (".") after the heading number,
not a colon.
\subsection{Subheadings}
\label{ssec:subhead}
Subheadings should appear in lower case (initial word capitalized) in
boldface. They should start at the left margin on a separate line.
\subsubsection{Sub-subheadings}
\label{sssec:subsubhead}
Sub-subheadings, as in this paragraph, are discouraged. However, if you
must use them, they should appear in lower case (initial word
capitalized) and start at the left margin on a separate line, with paragraph
text beginning on the following line. They should be in italics.
\section{PRINTING YOUR PAPER}
\label{sec:print}
Print your properly formatted text on high-quality, 8.5 x 11-inch white printer
paper. A4 paper is also acceptable, but please leave the extra 0.5 inch (12 mm)
empty at the BOTTOM of the page and follow the top and left margins as
specified. If the last page of your paper is only partially filled, arrange
the columns so that they are evenly balanced if possible, rather than having
one long column.
In LaTeX, to start a new column (but not a new page) and help balance the
last-page column lengths, you can use the command ``$\backslash$pagebreak'' as
demonstrated on this page (see the LaTeX source below).
\section{PAGE NUMBERING}
\label{sec:page}
Please do {\bf not} paginate your paper. Page numbers, session numbers, and
conference identification will be inserted when the paper is included in the
proceedings.
\section{ILLUSTRATIONS, GRAPHS, AND PHOTOGRAPHS}
\label{sec:illust}
Illustrations must appear within the designated margins. They may span the two
columns. If possible, position illustrations at the top of columns, rather
than in the middle or at the bottom. Caption and number every illustration.
All illustrations should be clear wwhen printed on a black-only printer. Color
may be used.
Since there are many ways, often incompatible, of including images (e.g., with
experimental results) in a LaTeX document, below is an example of how to do
this \cite{Lamp86}.
\begin{figure}
\caption{Example of placing a figure with experimental results.}
\label{fig:res}
\end{figure}
\pagebreak
\section{FOOTNOTES}
\label{sec:foot}
Use footnotes sparingly (or not at all!) and place them at the bottom of the
column on the page on which they are referenced. Use Times 9-point type,
single-spaced. To help your readers, avoid using footnotes altogether and
include necessary peripheral observations in the text (within parentheses, if
you prefer, as in this sentence).
\section{COPYRIGHT FORMS}
\label{sec:copyright}
You must also electronically sign the IEEE copyright transfer
form when you submit your paper. We {\bf must} have this form
before your paper can be sent to the reviewers or published in
the proceedings. The copyright form is provided through the IEEE
website for electronic signature. A link is provided upon
submission of the manuscript to enter the IEEE Electronic
Copyright Form system.
\section{REFERENCES}
\label{sec:ref}
List and number all bibliographical references at the end of the paper. The references can be numbered in alphabetic order or in order of appearance in the document. When referring to them in the text, type the corresponding reference number in square brackets as shown at the end of this sentence \cite{C2}.
\end{document} |
\begin{document}
\title{Finding Alternate Features in Lasso}
\begin{abstract}
We propose a method for finding alternate features missing in the Lasso optimal solution.
In ordinary Lasso problem, one global optimum is obtained and the resulting features are interpreted as task-relevant features.
However, this can overlook possibly relevant features not selected by the Lasso.
With the proposed method, we can provide not only the Lasso optimal solution but also possible alternate features to the Lasso solution.
We show that such alternate features can be computed efficiently by avoiding redundant computations.
We also demonstrate how the proposed method works in the 20 newsgroup data, which shows that reasonable features are found as alternate features.
\end{abstract}
\section{Introduction}
\label{sec:intro}
Feature selection is a procedure that selects a subset of relevant features (i.e., variables) for model construction.
It helps users to understand which features are contributing to the model.
Hence, it is a most basic approach for model interpretation in machine learning.
It is important to note that the quality of selected features heavily affects the user's trust on the resulting model.
It is common that domain experts have some prior knowledge about data which features are important for the proper task.
If such features are not selected by the feature selection, the experts will not trust the model because it does not agree with their intuition.
If the model is not trusted by the experts, the model will be never used even if it may perform well in practice.
It is therefore important for feature selection methods to meet the user's demand by not missing important features.
One of the most common feature selection methods is Lasso~\cite{tibshirani1996regression,chen2001atomic}.
We consider a prediction problem with $n$ observations and $p$ predictors.
Here, we have a response vector $y \in \mathcal{Y}^n$ and a predictor matrix~$X \in \mathbb{R}^{n \times p}$ where $\mathcal{Y}$ is the domain of the response (e.g., $\mathcal{Y}=\mathbb{R}$ for regression, and $\mathcal{Y}=\{-1, 1\}$ for classification).
In the Lasso problem, we seek $\beta \in \mathbb{R}^p$ that minimizes $\ell_1$-regularized objective function:
\begin{align}
L(\beta) := f(X\beta, y) + \rho \| \beta \|_1
\label{eq:lasso}
\end{align}
where $f: \mathbb{R}^n \times \mathcal{Y}^n \to \mathbb{R}_{\ge 0}$ is a loss function, and $\rho \in \mathbb{R}_{\ge 0}$ is a regularization parameter.
The optimal solution $\beta^* \in \mathbb{R}^p$ to \eqref{eq:lasso} is usually sparse;
therefore, we can extract a set of features as the support of the optimal solution, $\mathrm{supp}(\beta^*)=\{i : |\beta^*_i| > 0\}$.
In ordinary Lasso problem, one global optimum $\beta^*$ is obtained and the resulting features are interpreted as task-relevant features.
However, this can overlook possibly relevant features not selected by the Lasso.
Indeed, Lasso can recover \emph{true} features only under some limited conditions~\cite{knight2000asymptotics,wainwright2009sharp}.
We are therefore in a risk of missing important features if the conditions are not met.
One particular example of the Lasso failure is when two features $x_i$ and $x_j$ are highly correlated.
In such a situation, Lasso tends to select only one of these two features (e.g., $\beta_i^* \neq 0$ while $\beta_j^* = 0$).
That is, we may overlook one of these two features although both of them may contribute to the task equally.
In this study, we propose a method for finding task-relevant features missing in the Lasso optimal solution $\beta^*$.
In particular, we seek for whether there are any alternate feature $x_j$ that can be replaced with a feature $x_i$ selected by the Lasso.
With this procedure, we can provide not only the Lasso optimal solution but also alternate features missed in the solution to the users.
Even if some important features are missed in the Lasso optimal solution, such features are likely to be selected as a part of alternate features.
Hence, the user's trust on the resulting model will be greatly improved because they can find out that important features are actually not missed but replaced with some other features.
Moreover, the users can customize the model based on the information about alternate features; one can remove the feature $x_i$ selected by the Lasso and add an alternate feature $x_j$ to the model instead so that the model to agree with the user's background knowledge.
\section{Finding Alternate Features}
\label{sec:method}
Given the Lasso optimal solution $\beta^*$, we seek for whether there are any alternate feature $x_j$ with $\beta^*_j=0$ that can be replaced with a feature $x_i$ selected by the Lasso (i.e., $\beta^*_i \neq 0$).
We solve this problem by optimizing $\beta_j$ in (\ref{eq:lasso}) while fixing as $\beta_i = 0$ and $\beta_k = \beta^*_k \, (k \neq i, j)$.
The optimization problem can be expressed as
\begin{align}
\beta^{(i)}_j = \mathop{\mathrm{argmin}}\limits_{\beta_j} f(z^{(i)} + X_j \beta_j, y) + \rho | \beta_j | ,
\label{eq:sublasso}
\end{align}
where $X_j$ denotes the $j$-th column of $X$ and $z^{(i)} = \sum_{k \neq i} X_k \beta^*_k$.
If $\beta^{(i)}_j \neq 0$, the feature $x_j$ can be an alternative of $x_i$.
We note that the problem (\ref{eq:sublasso}) is a univariate optimization problem, and can be solved easily, e.g., by using the proximal gradient method~\cite{boyd2004convex}.
To find out all possible $(i, j)$-pairs, we basically need to solve the problem (\ref{eq:sublasso}) for all $i \in \mathrm{supp}(\beta^*)$ and $j \in \mathrm{supp}(\beta^*)^c$.
Here, we show that we actually need to solve the problem (\ref{eq:sublasso}) only on a fraction of $j$ instead of all $j \in \mathrm{supp}(\beta^*)^c$.
This is because we can check that $\beta^{(i)}_j = 0$ \emph{without} solving the problem (\ref{eq:sublasso}) from the optimality condition; $\beta^{(i)}_j = 0$ holds when $|X_j^\top \nabla f(z^{(i)}, y)| \le \rho$ where $\nabla f$ is the derivative of $f$ over the first element.
Hence, we need to solve the problem (\ref{eq:sublasso}) only for $j \in \mathrm{supp}(\beta^*)^c$ with $|X_j^\top \nabla f(z^{(i)}, y)| > \rho$.
\paragraph{Scoring Alternate Features}
By using the proposed method, we can find a set of alternate features of $x_i$, namely $\{j : \beta_j^{(i)} \neq 0\}$.
Among several alternate features, it is of great interest to find alternate features that closely relates to the original feature $x_i$.
Here, we propose a scoring method for each alternate feature so that we can find such interesting features.
The proposed scoring method is based on the Lasso objective function $L(\beta)$.
Let $\beta^*$ be the Lasso optimal solution, and $\beta^{i \rightarrow j}$ be the alterante solution defined by $\beta^{i \rightarrow j}_i = 0$, $\beta^{i \rightarrow j}_j = \beta_j^{(i)}$, and $\beta^{i \rightarrow j}_k = \beta_k^* \, (k \neq i, j)$.
The relevance of the alternate feature $x_j$ to the original feature $x_i$ can be measured by using the increase of the objective function value, ${\rm score}(x_i \rightarrow x_j) = L(\beta^{i \rightarrow j}) - L(\beta^*)$.
If the alternate feature $x_j$ is almost identical to the original feature $x_i$, it is likely that the objective function value $L(\beta^{i \rightarrow j})$ is almost the same as $L(\beta^*)$, which results in small ${\rm score}(x_i \rightarrow x_j)$.
Hence, we can use this score to order alternate features so that we can find out particularly related features.
\section{Experimental Results on 20 Newsgroups Data}
\label{sec:exp}
The 20 Newsgroups\footnote{\url{http://qwone.com/~jason/20Newsgroups/}} is a dataset for text categorization.
In this experiment, we tried to find discriminative words between the two categories\footnote{The experiment codes are available at \url{https://github.com/sato9hara/LassoVariants}}.
In the first task, we considered categories \texttt{ibm.pc.hardware} and \texttt{mac.hardware}, and in the second task, we considered \texttt{sci.med} and \texttt{sci.space}.
As a feature vector $x$, we used tf-idf weighted bag-of-words expression, with stop words and some common verbs removed.
See \tablename~\ref{tab:data} for the detail of the datasets.
The tasks were to find discriminative words that were relevant to classification.
Because the task was binary classification between the two categories, we used Lasso logistic regression~\cite{lee2006efficient}.
The logistic loss function is defined by $f(z, y) := \sum_{m=1}^n \log (\exp{(-y_m z_m)} + 1)$.
In the experiment, we set the regularization parameter as $\rho=0.001n$, and derived the Lasso optimal solution $\beta^*$.
As the optimal solution $\beta^*$, 39 words and 31 words were selected as relevant for classification in the first task and the second task, respectively.
To find out all $(i, j)$-pairs, in the first task, the naive approach required solving the problem (\ref{eq:sublasso}) for $39 \times 11,609 \approx 450,000$ times, while, by using the proposed checking method, this number was reduced to only $53$ times which was almost 10,000 times smaller than the naive approach.
The found feature pairs are shown in \figurename~\ref{fig:news20_1} and \ref{fig:news20_2}.
From \figurename~\ref{fig:news20_1}, we can find several interesting feature pairs.
For instance, the alternate word \emph{drive} is paired with many words such as \emph{windows}, \emph{bus}, and \emph{bios}, which are all related to the Windows machine (i.e., the words related to \texttt{ibm.pc.hardware}).
Another interesting finding is the pair \emph{centris} and \emph{610}, both of which are from the Mac's product name Centris 610 (i.e., the words related to \texttt{mac.hardware}).
Not limited to the examples above, but the found pairs seem to be quite reasonable.
Hence, providing these found alternate words together with the Lasso optimal solution will make the resulting model more trustful to the users compared to just providing the Lasso optimal solution.
\begin{table}[t]
\centering
\caption{20 Newsgroups Data: Each feature corresponds to each word appearing in the texts.}
\label{tab:data}
\begin{tabular}{c|cc}
& \# of features $p$ & \# of observations $n$ \\ \hline
\texttt{ibm.pc.hardware} vs \texttt{mac.hardware} & 11,648 & 1,168 \\
\texttt{sci.med} vs \texttt{sci.space} & 21,369 & 1,187
\end{tabular}
\end{table}
\begin{figure}
\caption{[Found feature pairs in \texttt{ibm.pc.hardware}
\label{fig:news20_1}
\end{figure}
\figurename~\ref{fig:news20_2} shows that two words \emph{space} and \emph{gordon} are connected with many alternate words.
The word \emph{space} and its alternate words such as \emph{shuttle} and \emph{satellite} are convincing as these words are all related with the category \texttt{sci.space}.
On the other hand, the word \emph{gordon} and its alternate words such as \emph{banks}, \emph{skepticism}, and \emph{shameful} seem not to be relevant to neither of the categories \texttt{sci.med} nor \texttt{sci.space}.
These words actually come from the frequently appearing signature in \texttt{sci.med}: \emph{Gordon Banks N3JXP, [email protected], Skepticism is the chastity of the intellect, and it is shameful to surrender it too soon.}
That is, the model is trained to find this signature and classify the text into the category \texttt{sci.med}.
In practice, this model is not preferable as the model is too specialized to this specific task; the trained model may perform poorly for the texts that do not include this signature.
To see the result in the second task in detail, we scored alternate features of \textit{space} and \textit{gordon} as shown in \figurename~\ref{fig:news20_score}.
Here, we can find two interesting results.
First, the word \textit{space} is particularly closely related with \textit{shuttle} among several alternate words.
This is a reasonable result because the existence of the word \textit{shuttle} in the text implies that the text's category is \texttt{sci.space} rather than \texttt{sci.med}.
Second, the word \textit{gordon} is particularly closely related with the words appearing in the signature.
Indeed, all of the top 12 words (from \textit{banks} to \textit{soon} in the figure) come from the signature.
This result presents one particular use case of the proposed method.
By scoring several alternate features, we can find out seemingly not preferable features and remove them in the training phase so that the resulting model to agree with our intuition.
\begin{figure}
\caption{[Found feature pairs in \texttt{sci.med}
\label{fig:news20_2}
\end{figure}
\begin{figure}
\caption{Alternate features of \textit{space}
\label{fig:news20_score}
\end{figure}
\section{Conclusion}
\label{sec:concl}
We proposed a method for finding alternate features missing in the Lasso optimal solution.
With the proposed method, we can provide not only the Lasso optimal solution but also alternate features to the users.
We believe that providing such surrogate information helps the users to interpret the model and encourage them to use the model in practice.
There remains several open issues.
First, we need to make it easy for the users to check all the found feature pairs.
The bipartite graph expression as in \figurename~\ref{fig:news20_1} and \ref{fig:news20_2} would be one possible approach, although it may become too complicated when there are more features.
We think the bipartite graph clustering will be a promising method to simplify the graph.
Second, in the current study we considered replacing only one feature with another feature.
This framework can be naturally extended to replacing multiple features.
Developing an efficient algorithm for the generalized problem remains open.
Finally, there remains a fundamental question that in what circumstances we can find \emph{true} alternate features.
To provide a reliable surrogate information to the users, we need to study the theoretical aspects of the proposed method.
\end{document} |
\begin{document}
\parindent = 0pt
\parskip = 8pt
\begin{abstract}
Two new Banach space moduli, that involve weak convergent sequences, are introduced. It is shown that if either one of these moduli are strictly less than 1 then the Banach space has
Property($K$).
\end{abstract}
\title{$D(X) < 1$ or $\hat{D}
\section{Introduction}
A Banach space, $X$, has the weak fixed point property, w-FPP, if every nonexpansive mapping, $T$, on every weak compact convex nonempty subset, $C$, has a fixed point. The past forty or so years has seen a number of Banach space properties shown to imply the w-FPP. Some such properties are weak normal structure, Opial's condition, Property($K$) and Property($M$). Here two new moduli are introduced and are linked to one of these properties, Property($K$). More information on the w-FPP and associated Banach space properties and moduli can be found in [3].
The key definitions and terminology are below.
\begin{definition}
Sims, [6]
A Banach space $X$ has property($K$) if there exists $K \in [0, 1)$ such that whenever $x_n \rightharpoonup 0, \lim_{ n \rightarrow \infty} \| x_n \| = 1 \mbox{ and } \liminf_{n \rightarrow \infty} \| x_n - x \| \leqslant 1 \mbox{ then } \| x \| \leqslant K.$
\end{definition}
\begin {definition}
Opial [5]
A Banach space has Opial's condition if
\[ x_n \rightharpoonup 0 \ \mbox {and } x \not = 0 \mbox { implies } \limsup_n \| x_n \| < \limsup_n \| x_n - x \|. \]
The condition remains the same if both the $\limsup$s are replaced by $\liminf$s.
\end {definition}
Later a modulus was introduced to gauge the strength of Opial's condition and a stronger version of the condition was defined.
\begin{definition}
Lin, Tan and Xu, [4]
Opial's modulus is
\[ r_X(c) = \inf \{ \liminf_{n\rightarrow \infty} \| x_n - x \| - 1: c \geqslantqslant 0, \| x \| \geqslantqslant c, x_n \rightharpoonup 0 \mbox{ and }\liminf_{n\rightarrow \infty} \| x_n \| \geqslantqslant 1 \}. \]
$X$ is said to have uniform Opial's condition if $r_X(c) > 0$ for all $c > 0.$ See [4] for more details.
\end{definition}
There is a direct link between Opial's modulus and Property($K$). Dalby proved in [1] that $r_{X}(1) > 0$ is equivalent to $X$ having Property($K$). This will be used in the next section.
The two new moduli are defined next.
\begin{definition}
Let $X$ be a Banach space. Let
\[ D(X) = \sup\{ \liminf_{n \rightarrow \infty}\| x_n - x \|: x_n \rightharpoonup x, \| x_n \| = 1 \mbox{ for all } n\} \]
and let
\[ \hat{D}(X) = \sup\{ \| x \|: x_n \rightharpoonup x, \| x_n \| = 1 \mbox{ for all } n\}. \]
\end{definition}
So $0 \leqslant D(X) \leqslant 2$ and $ \hat{D}(X) \leqslant 1.$
Some values for $D(X)$ are $D(\ell_1) = 0, D(c_0) = 1 \mbox{ and } D(\ell_p) = 2^{1/p}.$
The reason that these two moduli are introduced is that in [2] Dalby showed that if in the dual, $X^*,$ a certain weak* convergent sequence, $(w_n^*),$ satisfies either one of two properties then $X$ satisfied the w-FPP. Let $w_n^* \stackrel{*}{\rightharpoonup} w^* \mbox { where } \| w^* \| \leqslant 1$ then if $w^*$ is `deep' within the dual unit ball or $w_n^* - w^*$ eventually `deep' within the dual unit ball then $X$ has the w-FPP. So $D(X^*) < 1$ or $\hat{D}(X^*) < 1$ ensures this.
The w-FPP is known to be separably determined so all Banach spaces are assumed to be separable.
\section{Results}
\begin{proposition}
Let $X$ be a separable Banach space. If $D(X) < 1$ then $r_X(1) > 0.$ That is, $X$ has Property($K$).
\end{proposition}
\begin{proof}
Let $x_n \rightharpoonup 0, \liminf_{n \rightarrow \infty}\| x_n \| \geqslantqslant 1 \mbox{ and } \| x \| \geqslantqslant 1.$
Using the lower semi-continuity of the norm, $\liminf_{n \rightarrow \infty}\| x_n + x \| \geqslantqslant \| x \| \geqslantqslant 1.$ By taking subsequences if necessary we may assume that $\| x_n + x \| \not = 0$ for all $n.$
Now $ \left \| \frac{\displaystyle {x_n + x}}{ \displaystyle {\| x_n + x \|}} \right \| = 1 \mbox{ for all } n, {\frac{\displaystyle{x_n + x}}{ \displaystyle{\| x_n + x \| }}} \rightharpoonup \frac{\displaystyle{x}}{ \displaystyle \liminf_{n \rightarrow \infty}\| x_n + x \| }.$
For ease of reading let $\alpha = \liminf_{n \rightarrow \infty}\| x_n + x \|.$
Then
\begin{align*}
\liminf_{n \rightarrow \infty}\left \| \frac{\displaystyle x_n + x}{\| x_n + x \|} - \frac{\displaystyle x}{\displaystyle \alpha} \right \|
& = \liminf_{n \rightarrow \infty} \frac{\displaystyle 1}{\| x_n + x \|} \liminf_{n \rightarrow \infty}\left \| x_n + x - \| x_n + x \| \frac{\displaystyle x}{\displaystyle \alpha} \right \| \\
& = \frac{\displaystyle 1}{\alpha} \liminf_{n \rightarrow \infty}\left \| x_n + x - \| x_n + x \| \frac{\displaystyle x}{\displaystyle \alpha} \right \| \\
& = \frac{\displaystyle 1}{\alpha} \liminf_{n \rightarrow \infty}\left \| x_n - \left (\frac{\displaystyle \| x_n + x \|}{ \alpha} - 1 \right ) x \right \| \\
& \geqslantqslant \frac{\displaystyle 1}{\alpha} \left | \liminf_{n \rightarrow \infty} \| x_n \| - \liminf_{n \rightarrow \infty} \left | \frac{\displaystyle \| x_n + x \|}{ \alpha} - 1 \right | \| x \| \right | \\
& = \frac{\displaystyle 1}{\alpha} \left | \liminf_{n \rightarrow \infty} \| x_n \| + \left | \frac{\displaystyle \alpha}{\alpha} - 1 \right | \| x \| \right ) \\
& = \frac{\displaystyle 1}{\alpha} \liminf_{n \rightarrow \infty} \| x_n \| \\
& \geqslantqslant \frac{\displaystyle 1}{\alpha} \\
& = \frac{\displaystyle 1}{\liminf_{n \rightarrow \infty}\| x_n + x \|}.
\end{align*}
We have
\[ D(X) \geqslantqslant \liminf_{n \rightarrow \infty}\left \| \frac{\displaystyle x_n + x}{\displaystyle \| x_n + x \|} - \frac{\displaystyle x}{\displaystyle \liminf_{n \rightarrow \infty}\| x_n + x \|} \right \| \geqslantqslant \frac{1}{\displaystyle \liminf_{n \rightarrow \infty} \| x_n + x \|}. \qquad \dag \]
So $\liminf_{n \rightarrow \infty} \| x_n + x \| \geqslantqslant \frac{\displaystyle 1}{\displaystyle D(X)}.$
This means that $r_X(1) + 1 \geqslantqslant \frac{\displaystyle 1}{\displaystyle D(X)} \mbox{ or } r_X(1) \geqslantqslant \frac{\displaystyle 1}{\displaystyle D(X)} - 1 > 0.$
\end{proof}
A second way to prove this proposition is via a contradiction as shown below.
\begin{proof}
Assume that $D(X) < 1 \mbox{ and } r_X(1) \not > 0.$ Then $r_X(1) = 0.$
Given $\epsilon > 0 $ there exists a sequence $(x_n) \mbox{ in } X \mbox{ where } x_n \rightharpoonup 0, \newline \liminf_{n \rightarrow \infty}\| x_n \| \geqslantqslant 1 \mbox{ and } x \in X, \| x \| \geqslantqslant 1 \mbox{ such that } \liminf_{n \rightarrow \infty}\| x_n + x \| < 1 + \epsilon.$
Therefore $1 \leqslant \| x \| \leqslant \liminf_{n \rightarrow \infty}\| x_n + x \| < 1 + \epsilon.$ So apart from the last inequality the set up is the same as in the previous proof and this proof follows the same pathway. So now jumping to a line above, the one labeled with \dag,
\[ D(X) \geqslantqslant \frac{1}{\displaystyle \liminf_{n \rightarrow \infty} \| x_n + x \|} > \frac{1}{ \displaystyle 1 + \epsilon }. \]
Letting $\epsilon \rightarrow 0 \mbox{ gives } D(X) \geqslantqslant 1 \mbox{ but } D(X) < 1.$
So the desired contradiction is arrived at.
\end{proof}
Next is the second moduli's turn.
\begin{proposition}
Let $X$ be a separable Banach space. If $\hat{D}(X) < 1$ then $r_X(1) > 0.$ That is, $X$ has Property($K$).
\end{proposition}
\begin{proof}
Let $x_n \rightharpoonup 0, \liminf_{n \rightarrow \infty}\| x_n \| \geqslantqslant 1 \mbox{ and } \| x \| \geqslantqslant 1.$
Now $x_n + x \rightharpoonup x \mbox{ so } \liminf_{n \rightarrow \infty}\| x_n + x \| \geqslantqslant \| x \| \geqslantqslant 1.$ Without loss of generality we may assume $\| x_n + x \| \not = 0$ for all $n.$
Then $\left \| \frac{\displaystyle x_n + x}{\displaystyle \|x_n + x \|} \right \| = 1 \mbox{ for all } n, \frac{\displaystyle x_n + x}{\displaystyle \|x_n + x \|} \rightharpoonup \frac{\displaystyle x}{ \displaystyle \liminf_{n \rightarrow \infty}\| x_n + x \| }.$
Hence $1 > \hat{D}(X) \geqslantqslant \frac{\displaystyle \| x \|}{\displaystyle \liminf_{n \rightarrow \infty}\| x_n + x \| }$ leading to
\begin{align*}
\| x \| & \leqslant \liminf_{n \rightarrow \infty}\| x_n + x \| \hat{D}(X) \\
\liminf_{n \rightarrow \infty}\| x_n + x \| & \geqslantqslant \frac{\displaystyle \| x \|}{\displaystyle \hat{D}(X)} \\
& \geqslantqslant \frac{\displaystyle 1}{\displaystyle \hat{D}(X)} \\
\mbox{ Thus } r_X(1) + 1 & \geqslantqslant \frac{\displaystyle 1}{\displaystyle \hat{D}(X)} \\
r_X(1) & > \frac{\displaystyle 1}{\displaystyle \hat{D}(X)} - 1 \\
& > 0.
\end{align*}
\end{proof}
A second way to prove this proposition is by finding a value of $K$ for Property($K$).
\begin{proof}
Let $x_n \rightharpoonup 0, \| x_n \| = 1 \mbox{ for all } n \mbox{ and }\liminf_{n \rightarrow \infty}\| x_n - x \| \leqslant 1.$
If $\liminf_{n \rightarrow \infty}\| x_n - x \| = 0$ then because $\| x \| \leqslant \liminf_{n \rightarrow \infty}\| x_n - x \|$ we have $x = 0$ and $K$ can be taken as zero.
So assume $\liminf_{n \rightarrow \infty}\| x_n - x \| > 0$ and by taking subsequences if necessary, assume $\| x_n - x \| \not = 0$ for all $n.$
Using the same argument as in the previous proof
\[\| x \| \leqslant \liminf_{n \rightarrow \infty}\| x_n - x \| \hat{D}(X) \leqslant \hat{D}(X) < 1.\]
So $K$ can be taken as $\hat{D}(X).$
\end{proof}
\end{document} |
\begin{document}
\title{Quantum information and precision measurement hanks{CALT-68-2217}
\begin{abstract}
We describe some applications of quantum information theory to the analysis of
quantum limits on measurement sensitivity. A measurement of a weak force acting
on a quantum system is a determination of a classical parameter appearing in
the master equation that governs the evolution of the system; limitations on
measurement accuracy arise because it is not possible to distinguish perfectly
among the different possible values of this parameter.
Tools developed in the study of quantum information and computation can be
exploited to improve the precision of physics experiments; examples include
superdense coding, fast database search, and the quantum Fourier transform.
\end{abstract}
\section{Introduction: Distinguishability of superoperators}
The exciting recent developments in the theory of quantum information and
computation have already established an enduring legacy. The two most
far-reaching results --- that a quantum computer (apparently) can solve
problems that will forever be beyond the reach of classical computers
\cite{qc}, and that quantum information can be protected from errors if
properly encoded \cite{qec} --- have surely earned a prominent place at the
foundations of computer science.
The implications of these ideas for the future of physics are less clear, but
we expect them to be profound. In particular, we anticipate that our deepening
understanding of quantum information will lead to new strategies for pushing
back the boundaries of quantum-limited measurements. Quantum entanglement,
quantum error correction, and quantum information processing can all be
exploited to improve the information-gathering capability of physics
experiments.
In a typical quantum-limited measurement, a classical signal is conveyed over a
quantum channel \cite{mabuchi}. Nature sends us a message, such as the value of
a weak force, that can be regarded as a classical parameter appearing in the
Hamiltonian of the apparatus (or more properly, if there is noise, its master
equation). The apparatus undergoes a quantum operation $\$(a)$, and we are to
extract as much information as we can about the parameter(s) $a$ by choosing an
initial preparation of the apparatus, and a positive-operator-valued measure
(POVM) to read it out. Quantum information theory should be able to provide a
theory of the {\sl distinguishability of superoperators}, a measure of how much
information we can extract that distinguishes one superoperator from another,
given some specified resources that are available for the purpose. This
distinguishability measure would characterize the inviolable limits on
measurement precision that can be achieved with fixed resources.
Many applications of quantum information theory involve the problem of
distinguishing nonorthogonal quantum {\sl states}. For example, a density
operator $\rho_a $ is chosen at random from an ensemble ${\cal
E}=\{\rho_a,p_a\}$ (where $p_a$ is an {\it a priori} probability), and a
measurement is performed to extract information about which $\rho_a$ was
chosen. The problem of distinguishing {\sl superoperators} is rather
different, but the two problems are related. For example, let us at first
ignore noise, and also suppose that the classical force we are trying to detect
is static. Then we are trying to identify a particular time-independent
Hamiltonian $H_a$ that has been drawn from an ensemble $\{H_a,p_a\}$. We may
choose a particular initial pure state $|\psi_0\rangle$, and then allow the
state to evolve, as governed by the unknown Hamiltonian, for a time $t$; our
ensemble of possible Hamiltonians generates an ensemble of pure states
\begin{equation}
\{|\psi_a(t)\rangle = e^{-itH_a}|\psi_0\rangle, p_a\}~.
\end{equation}
Since our goal is to gain as much information as possible about the applied
Hamiltonian, we should choose the initial state $|\psi_0\rangle$ so that the
resulting final states are maximally distinguishable.
There are many variations on the problem, distinguished in part by the
resources we regard as most valuable. We might have the freedom to chose the
elapsed time as we please, or we might impose constraints on $t$. We might
have the freedom to modify the Hamiltonian by adding an additional ``driving''
term that is under our control. We might use an {\sl adaptive} strategy, where
we make repeated (possibly weak) measurements, and our choice of initial state
or driving term in later measurements takes into account the information
already collected in earlier measurements \cite{wiseman_adaptive}.
Imposing an appropriate cost function on resources is an important aspect of
the formulation of the problem, particularly in the case of the detection of a
static (DC) signal. For example, we could in principle repeat the measurement
procedure many times to continually improve the accuracy of our estimate. In
this respect, the problem of distinguishing superoperators does not have quite
so fundamental a character as the problem of distinguishing states, as in the
latter case the no-cloning principle \cite{no_clone} prevents us from making
repeated measurements on multiple copies of the unknown state. But for a
time-dependent signal that stays ``on'' for a finite duration, there will be a
well-defined notion of the optimal strategy for distinguishing one possible
signal from another, once our apparatus and its coupling to the classical
signal have been specified. Still, for the sake of simplicity, we will mostly
confine our attention here to the case of DC signals.
We don't know exactly what shape this nascent theory of the distinguishability
of superoperators should take, but we hope that further research can promote
the development of new strategies for performing high-precision measurements.
On the one hand we envision a program of research that will be relevant to real
laboratory situations. On the other hand, we seek results that are to some
degree robust and general (not tied to some particular model of decoherence, or
to a particular type of coupling between quantum probe and classical signal).
Naturally, there is some tension between these two central desiderata; rather
than focus on a specific experimental context, we lean here toward more
abstract formulations of the problem.
Our discussion is far from definitive; its goal is to invite a broader
community to consider these issues. We will mostly be content to observe that
some familiar concepts from the theory of quantum information and computation
can be translated into tools for the measurement of classical forces. Some
examples include superdense coding, fast database search, and the quantum
Fourier transform.
Naturally, the connections between quantum information theory and precision
measurement have been recognized previously by many authors. Especially
relevant is the work by Wootters \cite{wootters}, by Braunstein\cite{braun},
and by Braunstein and Caves \cite{braunstein} on state distinguishability and
parameter estimation, and by Braginsky and others \cite{braginsky} on quantum
nondemolition measurement. Though what we have to add may be relatively modest,
we hope that it may lead to further progress.
\section{Superdense coding: improved distinguishability through entanglement}
\label{sec:superdense}
Recurring themes of quantum information theory are that entanglement can be a
valuable resource, and that entangled measurements sometimes can collect more
information than unentangled measurements. It should not be surprising, then,
if the experimental physicist finds that the best strategies for detecting a
weak classical signal involve the preparation of entangled states and the
measurement of entangled observables.
Suppose, for example, that our apparatus is a single-qubit, whose
time-independent Hamiltonian (aside from an irrelevant additive constant), can
be expressed as
\begin{equation}
H_{\vec a}=\vec a\cdot \vec \sigma~;
\end{equation}
here $\vec a=(a_1,a_2,a_3)$ is an unknown three-vector, and $\sigma_{1,2,3}$
are the Pauli matrices. (We may imagine that a spin-${1\over 2}$ particle with
a magnetic moment is employed to measure a static magnetic field.) By preparing
an initial state of the qubit, allowing the qubit to evolve, and then
performing a single measurement, we can extract at best one bit of information
about the magnetic field (as Holevo's theorem \cite{holevo} ensures that the
optimal POVM in a two-dimensional Hilbert space can acquire at most one bit of
information about a quantum state).
If we have two qubits, and measure them one at a time, we can collect at best
two bits of information about the magnetic field. In principle, this could be
enough to distinguish perfectly among four possible values of the field. In
practice, for a generic choice of four Hamiltonians labeled by vectors $\vec
a^{(1,2,3,4)}$, the optimal information gain cannot be achieved by measuring
the qubits one at a time. Rather a better strategy exploits quantum
entanglement.
An improved strategy can be formulated by following the paradigm of superdense
coding \cite{wiesner}, whereby shared entanglement is exploited to enhance
classical communication between two parties. To implement superdense coding,
the sender (Alice) and the receiver (Bob) use a shared Bell state
\begin{equation}
|\phi^+\rangle= {1\over\sqrt{2}}\left(|00\rangle + |11\rangle\right)
\end{equation}
that they have prepared previously. Alice applies one of the four unitary
operators $\{I,\sigma_1,\sigma_2,\sigma_3\}$ to her member of the entangled
pair, and then sends it to Bob. Upon receipt, Bob possesses one of the four
mutually orthogonal Bell states\begin{eqnarray}
\label{bellbasis}
|\phi^+\rangle & = & {1\over\sqrt{2}}\left(|00\rangle+|11\rangle\right) = I
\otimes I |\phi^+\rangle ~,\cr
|\psi^+\rangle & = & {1\over\sqrt{2}}\left(|01\rangle+|10\rangle\right) =
\sigma_1 \otimes I |\phi^+\rangle ~,\cr
-i|\psi^-\rangle & = & {-i\over\sqrt{2}}\left(|01\rangle-|10\rangle\right) =
\sigma_2 \otimes I |\phi^+\rangle ~,\cr
|\phi^-\rangle & = & {1\over\sqrt{2}}\left(|00\rangle-|11\rangle\right) =
\sigma_3 \otimes I |\phi^+\rangle~;\end{eqnarray}
by performing an entangled Bell measurement (simultaneous measurements of the
commuting collective observables $\sigma_1\otimes\sigma_1$ and
$\sigma_3\otimes\sigma_3$), Bob can perfectly distinguish the states. Although
only one qubit passes from Alice to Bob, two classical bits of information are
transmitted and successfully decoded. In fact, this enhancement of the
transmission rate is optimal -- with shared entanglement, no more than two
classical bits can be carried by each transmitted qubit \cite{hausladen}.
The lesson of superdense coding is that entanglement can allow us to better
distinguish
operations on quantum states, and we may apply this method to the problem of
distinguishing Hamiltonians.\footnote{This idea was suggested to us by Chris
Fuchs \cite{fuchs_private}.} Let us imagine that the magnitude of the magnetic
field is known, but not its direction -- then we can choose our unit of time so
that $|\vec a|=1$. We may prepare a pair of qubits in the entangled state
$|\phi^+\rangle$, and expose only one member of the pair to the magnetic field
while the other remains well shielded. In time $t$, the state evolves to
\begin{eqnarray}
|\psi_{\hat a}(t)\rangle & \equiv &\exp\left(-itH_{\hat a}\otimes
I\right)|\phi^+\rangle \cr
& = & \left[\cos t(I\otimes I) -i \sin t(\hat a\cdot\vec\sigma\otimes I)
\right]|\phi^+\rangle\cr
& = & \cos t |\phi^+\rangle \cr
&&-i\sin t
\left[a_1|\psi^+\rangle -ia_2|\psi^-\rangle + a_3|\phi^+\rangle\right]~;
\end{eqnarray}
the inner product between the states arising from Hamiltonians $H_{\hat a}$ and
$H_{\hat b}$ becomes
\begin{equation}
\label{superip}
\langle\psi_{\hat a}(t)|\psi_{\hat b}(t)\rangle = \cos^2 t + (\hat a \cdot \hat
b) \sin^2 t~.
\end{equation}
For these states to be orthogonal, we require
\begin{equation}
\hat a \cdot \hat b = -\cot^2 t~.
\end{equation}
Since $\cot^2 t \ge 0$, the states are not orthogonal for any value of $t$
unless the two magnetic field directions $\hat a$ and $\hat b$ are separated by
at least $90^\circ$.
Now suppose that the magnetic field (of known magnitude) points in one of three
directions that are related by three-fold rotational symmetry. These
directions could form a planar trine with
$\hat a\cdot \hat b=\hat a\cdot \hat c= \hat b\cdot \hat c = -1/2$, or a
``lifted trine'' with angle $\theta$ between each pair of directions, where
$-1/2\le \cos\theta \le 0$. For any such trine of field directions, we may
evolve for a time $t$ such that
\begin{equation}
\cot^2 t = -\cos\theta ~,
\end{equation}
and perform an (entangled) orthogonal measurement to determine the field. At
the point of tetrahedral symmetry, $\cos\theta=-1/3$, we may add a fourth field
direction such that
the inner product for each pair of field directions is $-1/3$; then all four
directions can be perfectly distinguished by Bell measurement.
In this case of four field directions with tetrahedral symmetry, the two-bit
measurement outcome achieves a two-bit information gain, if the four directions
were equally likely {\it a priori}. In contrast, no adaptive strategy in which
single qubits are measured one at a time can attain a two-bit information gain.
This separation between the information gain attainable through entangled
measurement and that attainable through adaptive nonentangled measurement, for
the problem of distinguishing Hamiltonians, recalls the analogous separation
noted by Peres and Wootters \cite{peres} for the problem of distinguishing
nonorthogonal states.
\section{Grover's database search: improved distinguishability through driving}
Another instructive example is Grover's method \cite{grover} for searching an
unsorted database, which (as formulated by Farhi and Gutmann \cite{farhi}) we
may interpret as a method for improving the distinguishability of a set of
Hamiltonians by adding a controlled driving term.
Consider an $N$-dimensional Hilbert space with orthonormal basis
$\{|x\rangle\}, ~x=0,1,2,\dots,N-1$, and suppose that the Hamiltonian for this
system is known to be one of the $N$ operators
\begin{equation}
H_x=E|x\rangle\langle x| ~.
\end{equation}
We are to perform an experiment that will allow us to estimate the value of
$x$.
We could, for example, prepare the initial state ${1\over \sqrt{2}}(|y\rangle +
|y'\rangle)$, allow the system to evolve for a time $T=\pi/E$, and then perform
an orthogonal measurement in the basis $|\pm\rangle={1\over \sqrt{2}}(|y\rangle
\pm |y'\rangle)$. Then we will obtain the outcome $|-\rangle$ if and only if
one of $y,y'$ is $x$. Searching for $x$ by this method, we would have to
repeat the experiment for O($N$) distinct initial states to have any
reasonable chance of successfully inferring the value of $x$.
Our task becomes easier if we are able to modify the Hamiltonian by adding a
term that we control to drive the system. We choose the driving term to be
\begin{equation}
\label{grover_drive}
H_D=E|s\rangle\langle s|~,
\end{equation}
where $|s\rangle$ denotes the state
\begin{equation}
|s\rangle={1\over \sqrt{N}}\sum_{y=0}^{N-1}|y\rangle~.
\end{equation}
Then the full Hamiltonian is
\begin{equation}
H'_x=H_x+H_D=E(|x\rangle\langle x| + |s\rangle\langle s|)~,
\end{equation}
and we can readily verify that the vectors
\begin{equation}
|E_{\pm}\rangle\equiv |s\rangle \pm |x\rangle
\end{equation}
are (unconventionally normalized!) eigenstates of $H$ with the eigenvalues
\begin{equation}
E_{\pm}=E\left(1\pm {1\over\sqrt{N}}\right)~.
\end{equation}
We may prepare the initial state
\begin{equation}
|s\rangle= {1\over 2}(|E_+\rangle + |E_-\rangle)~;
\end{equation}
since the energy splitting is $\Delta E=2E/\sqrt{N}$, after a time
\begin{equation}
T=\pi/\Delta E= \pi\sqrt{N}/2E~,
\end{equation}
this state flops to the state
\begin{equation}
{1\over 2}(|E+\rangle - |E-\rangle)=|x\rangle~.
\end{equation}
Thus, by performing an orthogonal measurement, we can learn the value of $x$
with certainty \cite{farhi}.
The driving term we have chosen is the continuous time analog of the iteration
employed by Grover \cite{grover} for rapid searching. And as the Grover search
algorithm can be seen to be optimal, in the sense that a marked state can be
identified with high probability with the minimal number of oracle calls
\cite{bbbv}, so the driving term we have chosen is optimal in the sense that it
enables us to identify the value of the classical parameter labeling the
Hamiltonian in the minimal time, at least asymptotically for $N$ large. (In a
physics experiment, the ``oracle'' is Nature, whose secrets we are eager to
expose.) For this Grover-Farhi-Gutmann problem, we can make a definite
statement about how to optimize expenditure of a valuable resource (time) in
the identification of a system Hamiltonian.
We also note that adding a driving term can sometimes improve the efficacy of
the superdense coding method described in \S\ref{sec:superdense}. For example,
in the case of three magnetic fields of equal magnitude with threefold
symmetry, but with an
angle between fields of less than $90^\circ$, applying a driving field along
the line of
symmetry can make the resultant field directions perfectly distinguishable.
In fact, Beckman \cite{beckman} has shown that for any three field vectors
forming a triangle that is isosceles or nearly isosceles, a suitable driving
field can always by found such that the field directions can be distinguished
perfectly.
\section{Distinguishing two alternatives}
Let's consider the special case in which our apparatus is known to be governed
by one of two possible Hamiltonians $H_1$ or $H_2$. If the system is two
dimensional, we are trying to distinguish two possible values $\vec a,\vec b$
of the magnetic field with a spin-${1\over 2}$ probe. Suppose for simplicity
that the two fields have the same magnitude (normalized to unity), but
differing directions.
Assuming that we are unable to modify the Hamiltonian by adding a driving term,
the optimal strategy is to choose an initial polarization vector that bisects
the two field directions $\hat a, \hat b$. Depending on the actual value of the
field, the polarization will precess on one of two possible cones. If the angle
between $\hat a$ and $\hat b$ is $\theta\ge 90^\circ$, then the two possible
polarizations will eventually be back-to-back; an orthogonal measurement
performed at that time will distinguish $\hat a$ and $\hat b$ perfectly. But if
$\theta < 90^\circ$, the two polarizations are never back-to-back; the best
strategy is to wait until the angle between the polarizations is maximal, and
to then perform the orthogonal measurement that best distinguishes them. We
cannot perfectly distinguish the two field directions by this method.
On the other hand, if we are able to apply a known driving magnetic field in
addition to the unknown field that is to be determined, then two fields $\vec
a$ and $\vec b$ can always be perfectly distinguished. If we apply the field
$-\vec b$, then the problem is one of distinguishing the trivial Hamiltonian
from
\begin{equation}
H_{\rm diff}=(\vec a - \vec b)\cdot \vec\sigma~.
\end{equation}
We can choose an initial polarization orthogonal to $\vec a - \vec b$, and wait
just long enough for $H_{\rm diff}$ to rotate the polarization by $\pi$. Then
an orthogonal measurement perfectly distinguishes $H_{\rm diff}$ from the
trivial Hamiltonian.
Evidently, the same strategy can be applied to distinguish two Hamiltonians
$H_1$ and $H_2$ in a Hilbert space of arbitrary dimension. We drive the system
with $-H_2$; then to distinguish the trivial Hamiltonian from $H_1-H_2$, we
chose the initial state
\begin{equation}
{1\over\sqrt{2}}\left(|E_{\rm min}\rangle + |E_{\rm max}\rangle\right)~,
\end{equation}
where $E_{\rm min},E_{\rm max}$ are the minimal and maximal eigenvalues of
$H_1-H_2$. After a time $t$ with
\begin{equation}
t(E_{\rm max}-E_{\rm min})=\pi~,
\end{equation}
this state evolves to the orthogonal state ${1\over\sqrt{2}}\left(|E_{\rm
min}\rangle - |E_{\rm max}\rangle\right)$, so that the trivial and nontrivial
Hamiltonians can be perfectly distinguished.
In the case of the two-dimensional version of the ``Grover problem'' with $H_1=
|0\rangle\langle 0|$ and $H_2=|1\rangle\langle 1 |$, this choice for the
driving Hamiltonian actually outperforms the Grover driving term of
Eq.~(\ref{grover_drive}) --- the two Hamiltonians can be distinguished in a
time that is shorter by a factor of $\sqrt{2}$. So while the Grover strategy
is optimal for asymptotically large $N$, it is not actually optimal for $N=2$.
\section{Distinguishing two alternatives in a fixed time}
Let us now suppose that we are to distinguish between two time-independent
Hamiltonians $H_1$ and $H_2$, and that a {\sl fixed duration} $t$ has been
allotted to perform the experiment. Is the driving strategy described above
(in which $-H_2$ is added to the Hamiltonian) always the best possible?
If we have the freedom to add a driving term of our choice, then we may assume
without loss of generality that we are to distinguish the nontrivial
Hamiltonian $H$ from the trivial Hamiltonian $0$. As already noted, if the
largest difference $\Delta E=E_{\rm max}-E_{\rm min}$ of eigenvalues of $H$
satisfies $t\Delta E \ge \pi$, then $H$ can be perfectly distinguished from
$0$; let us therefore suppose that $t \Delta E < \pi$.
If we add a {\sl time-independent} driving term $K$ to the Hamiltonian, and
choose an initial state $|\psi_0\rangle$, then after a time t, we will need to
distinguish the two states
\begin{equation}
\label{two_states}
e^{-i t K}|\psi_0\rangle~, \quad e^{-it(H + K)}|\psi_0\rangle~.
\end{equation}
Two pure states will be more distinguishable when their inner product is
smaller. Therefore, to best distinguish $H+K$ from $K$, we should choose
$|\psi_0\rangle$ to minimize the inner product
\begin{equation}
\left|\langle\psi_0|e^{it K} e^{-it(H + K)}|\psi_0\rangle\right|~.
\end{equation}
If we expand $|\psi_0\rangle$ in terms of the eigenstates $\{|a\rangle\}$ of
$e^{it K} e^{-it(H + K)}$ with eigenvalues $\{e^{-itE_a}\}$,
\begin{equation}
|\psi_0\rangle=\sum_a \alpha_a|a\rangle~,
\end{equation}
this inner product becomes
\begin{equation}
\label{you_tee}
\left|\langle\psi_0|e^{it K} e^{-it(H + K)}|\psi_0\rangle\right|= \left|\sum_a
|\alpha_a|^2 e^{-itE_a}\right|~.
\end{equation}
The right-hand side of Eq.~(\ref{you_tee}) is the modulus of a convex sum of
points on the unit circle. Assuming the modulus is bounded from zero, it
attains its minimum when $|\psi_0\rangle$ is the equally weighted superposition
of the extremal eigenstates of $e^{it K} e^{-it(H + K)}$ -- those whose
eigenvalues are maximally separated on the unit circle.
For $K=0$, the minimum is
$\cos\left( t\Delta E/2\right)$, where $\Delta E$ is the difference of the
maximal and minimal eigenvalues of $H$.
We prove in Appendix A that turning on a nonzero driving term $K$ can never
cause the extremal eigenvalues to separate further, and therefore can never
improve the distinguishability of the two states in
Eq.~{\ref{two_states}.\footnote{That this might be the case was suggested to us
by Chris Fuchs \cite{fuchs_private}.} Therefore, $K=0$ is the optimal driving
term for distinguishing two Hamiltonians. In other words, if we wish to
distinguish between two Hamiltonians $H_1$ and $H_2$, it is always best to turn
on a driving term that precisely cancels one of the two.
The above discussion encompasses the strategy of introducing an ancilla
entangled with the probe (which proved effective for the problem of
distinguishing three or more alternatives). If we wish to distinguish two
Hamiltonians $H_1\otimes I$ and $H_2\otimes I$ that both act trivially on the
ancilla, the optimal driving term exactly cancels one of them ({\it e.g.}, $K=
- H_2\otimes I$), and so it too acts trivially on the ancilla. We derive no
benefit from the ancilla when there are only two alternatives.
Similarly, if we are trying to distinguish only two time-independent signals in
an allotted time, it seems likely there is no advantage to performing a
sequence of weak measurements, and adapting the driving field in response to
the incoming stream of measurement data.
\section{More alternatives: adaptive driving}
Now suppose that there are $N$ possible Hamiltonians ${H_1, H_2,
\ldots, H_N}$. If there is no time limitation, we can distinguish them
perfectly by implementing an adaptive procedure; we make a series of
measurements, modifying our driving term and initial state in response to the
stream of measurement outcomes.
The correct Hamiltonian can be identified by pairwise elimination. First,
assume that either $H_1$ or $H_2$ is the actual Hamiltonian, and apply a
driving term to perfectly
distinguish them, say $H_{D}=-H_1$. After preparing the appropriate initial
state and waiting the appropriate time, we make an orthogonal measurement with
two outcomes --- the result indicates that either $H_1$ or $H_2$ is the
actual Hamiltonian.\footnote{Actually, in a Hilbert space of high dimension, we
can make a more complete measurement that will typically return the result that
neither $H_1$ nor $H_2$ is the actual Hamiltonian.} If the result is $H_1$,
there are two possibilities:
either $H_1$ really is the Hamiltonian, or the assumption that one of $H_1$ or
$H_2$ is the Hamiltonian was wrong. Either way, $H_2$ has been eliminated.
Similarly, if
$H_2$ is found, $H_1$ is eliminated. This procedure can then be repeated,
eliminating one Hamiltonian per measurement, thereby perfectly distinguishing
among the $N$ Hamiltonians in a total of $N-1$ measurements.
This algorithm is quite inefficient, however. The measurement record is $N-1$
bits long, while the information gain is only $\log N$ bits.
\section{Adaptive phase measurement and the semiclassical quantum Fourier
transform}
Far more efficient adaptive procedures can be formulated in some cases.
Consider, for example, a single qubit in a magnetic field of known direction
but unknown magnitude, so that
\begin{equation}
H_\omega= {\omega\over 2}\sigma_3~,
\end{equation}
and let us imagine that the value of the frequency $\omega$ is chosen
equiprobably from among $N=2^n$ equally spaced possible values. Without loss of
generality, we may normalize the field so that the possible values range from 0
to $1-2^{-n}$; then $\omega$ has a binary expansion
\begin{equation}
\omega = .\omega_1 \omega_2 \ldots \omega_n
\end{equation}
that terminates after at most $n$ bits.
The initial state $|\psi_0\rangle={1\over \sqrt{2}}(|0\rangle + |1\rangle)$
evolves in time $t$ to
\begin{equation}
|\psi(t)\rangle_\omega= e^{-itH_\omega}|\psi_0\rangle ={1\over
\sqrt{2}}(|0\rangle + e^{- i \omega t} |1\rangle)
\end{equation}
(up to an overall phase). If we wait for a time $t_n=\pi 2^n$, the
final state is
\begin{equation}
|\psi(t_n)\rangle_\omega={1\over \sqrt{2}}(|0\rangle + e^{-i \pi \omega_n }
|1\rangle) ~.
\end{equation}
Now measurement in the $\{{1\over\sqrt{2}}(|0\rangle \pm |1\rangle\})$ basis
indicates (with certainty)
whether the bit $\omega_n$ is 0 or 1. This outcome divides the set of possible
Hamiltonians in half, providing one bit of classical information.
The set of remaining possible Hamiltonians is still evenly spaced, but it may
have a constant offset, depending on the value of $\omega_n$. However, the
value of
$\omega_n$ is now known, so the offset can be eliminated. Specifically, if we
again prepare $|\psi_0\rangle$ and now evolve for a time $t_{n-1}=\pi
2^{n-1}$, we obtain the final state
\begin{equation}
|\psi(t_{n-1})\rangle_\omega={1\over \sqrt{2}}(|0\rangle + e^{-i \pi \>
(\omega_{n-1} . \omega_n)} |1\rangle)~.
\end{equation}
Since $\omega_n$ is known, we can perform a phase transformation (perhaps by
applying an additional driving magnetic field) to eliminate the phase $e^{-i
\pi \> (. \omega_n)}$;
Measuring again in the $\{{1\over\sqrt{2}}(|0\rangle \pm |1\rangle\})$ basis
determines the value of $\omega_{n-1}$.
By continuing this procedure until all bits of $\omega$ are known, we perfectly
distinguish the $2^n$ possible Hamiltonians in just $n$ measurements. The
procedure is optimal in the sense that we gain one full bit of information
about the Hamiltonian in each measurement.
Up until now we have imagined that the frequency $\omega$ takes one of $2^n$
equally spaced discrete values, but no such restriction is really necessary.
Indeed, what we have described is precisely the implementation of the $n$-qubit
semiclassical quantum Fourier transform as formulated by Griffiths and Niu
\cite{griffiths} (whose relevance to phase estimation was emphasized by Cleve
{\it et al.} \cite{cleve}). Thus the same procedure can be applied to obtain
an estimate of the frequency to $n$-bit precision, even if the frequency is
permitted to take an arbitrary real value in the interval $[0,1)$.
Suppose that we attach to $n$ spins the labels $\{0,1,\ldots,n-2,n-1\}$, and
expose the $k$th spin to the field for time $\pi 2^{k+1}$; we thus prepare the
$n$-qubit state
\begin{equation}
\prod_{k=0}^{n-1} {1\over\sqrt{2}}\left(|0\rangle + e^{-i\pi\omega\cdot
2^{k+1}}|1\rangle\right)={1\over 2^{n/2}}\sum_{y=0}^{2^n-1} e^{-2\pi i \omega
\cdot y}|y\rangle~.
\end{equation}
The adaptive algorithm is equivalent to the quantum Fourier transform followed
by measurement;
hence the $n$-bit measurement outcome $\tilde \omega$ occurs with probability
\begin{equation}
{\rm Prob}_\omega(\tilde \omega) = \left| {1 \over 2^n} \sum_{y=0}^{2^n - 1}
\exp[-2 \pi i y (\omega-\tilde \omega)] \right|^2.
\end{equation}
If $\omega$ really does terminate in $n$ bits, then the outcome $\tilde\omega$
is
guaranteed to be its correct binary expansion. But even if the binary
expansion of $\omega$ does not terminate, the probability that our estimate
$\tilde \omega$ is correct to $n$ bits of precision is still of order
one.\footnote{We might also use the QFT to {\sl compute} eigenvalues of a known
many-body Hamiltonian, rather than {\sl measure} eigenvalues of an unknown one
\cite{lloyd}.}
Of course, to measure the frequency to a precision $\Delta \omega$ of order
$2^{-n}$, we need to expose our probe spins to the unknown Hamiltonian for a
total time $T$ of order $2\pi\cdot 2^{n}$. The accuracy is limited by an
energy-time uncertainty relation of the form $T\Delta\omega\sim 1$.
The semiclassical quantum Fourier transform provides an elegant solution to the
problem of performing an ideal ``phase measurement'' in the Hilbert space of
$n$ qubits. More broadly, any $N$-dimensional Hilbert space with a preferred
basis $\{|k\rangle, ~k=0,1,\dots, N-1\}$ has a complementary basis of {\sl
phase states}
\begin{equation}
|\varphi\rangle={1\over\sqrt{N}} \sum_{k=o}^{N-1}e^{ik\varphi}|k\rangle~,
\end{equation}
with
\begin{equation}
\varphi = 2\pi j/N~,\quad j=0,1,\dots,N-1~.
\end{equation}
For example, the Hilbert space could be the truncated space of a harmonic
oscillator like a mode of the electromagnetic field, with the occupation number
restricted to be less than $N$; then the states $|\varphi\rangle$ are the
``phase squeezed'' states of the oscillator that have minimal phase
uncertainty. Since a POVM in an $N$-dimensional Hilbert space can acquire no
more than $\log N$ bits of information about the preparation of the quantum
state, the phase of an oscillator with occupation number less than $N$ can be
measured to at best $\log N$ bits of accuracy. While it is easy to do an
orthogonal measurement in the occupation number basis with an efficient
photodetector, an orthogonal measurement in the $|\varphi\rangle$ basis is
quite difficult to realize in the laboratory \cite{wiseman}.
But if the standard basis is the computational basis in the $2^n$-dimensional
Hilbert space of $n$ qubits, then an ideal phase measurement is simple to
realize. Since the phase eigenstates are actually not entangled states, we can
carry out the measurement -- {\sl adaptively} -- one qubit at a time.
Note that if we had an arbitrarily powerful quantum computer with an
arbitrarily large amount of quantum memory, then adaptive measurement
strategies might seem superfluous. We could achieve the same effect by
introducing a large ancilla and a driving Hamiltonian that acts on probe and
ancilla, with all measurements postponed to the very end. But the
semiclassical quantum Fourier transform illustrates that adaptive techniques
can reduce the complexity of the quantum information processing required to
perform the measurement. In many cases, an adaptive strategy may be realizable
in practice, while the equivalent unitary strategy is completely infeasible.
\section{Distinguishability and decoherence}
In all of our examples so far, we have ignored noise and decoherence. In
practice, decoherence may compromise our ability to decipher the classical
signal with high confidence. Finding ways to improve measurement accuracy by
effectively coping with decoherence is an important challenge faced by quantum
information theory.
If there is decoherence, our aim is to gain information about the value of a
parameter in a master equation rather than a Hamiltonian. To be concrete,
consider a single qubit governed by an unknown Hamiltonian $H$, and also
subject to decoherence described by the ``depolarizing channel;'' the density
matrix $\rho$ of the qubit obeys the master equation
\begin{equation}
\dot \rho= -i[H,\rho] - \Gamma\left(\rho-{1\over 2} I\right)~,
\end{equation}
where $\Gamma$ is the (known) damping rate.
If we express $\rho$ in terms of the polarization vector $\vec P$,
\begin{equation}
\rho={1\over 2}(I+\vec P\cdot\vec\sigma)~,
\end{equation}
and the Hamiltonian as
\begin{equation}
H={\omega\over 2}~\hat a\cdot \sigma~,
\end{equation}
then the master equation becomes
\begin{equation}
\dot{\vec P}=\omega(\hat a \times \vec P) - \Gamma \vec P~.\
\end{equation}
The polarization precesses uniformly with circular frequency $\omega$ about the
$\hat a$-axis as it contracts with lifetime $\Gamma^{-1}$.
Suppose that we are to distinguish among two possible Hamiltonians, which are
assumed to be equiprobable. If we are able to add a driving term, we may assume
that the two are the trivial Hamiltonian and
\begin{equation}
H={\omega\over 2}~\sigma_3~.
\end{equation}
We choose the initial polarization vector $P_0=(1,0,0)$. Then if the
Hamiltonian is trivial, the polarization contracts as
\begin{equation}
\vec P(t)_{\rm triv}= e^{-\Gamma t}(1,0,0)~,
\end{equation}
while under the nontrivial Hamiltonian it contracts and rotates as
\begin{equation}
\vec P(t)_{\rm nontriv}=e^{-\Gamma t}(\cos \omega t,\sin \omega t, 0)~.
\end{equation}
When is the best time to measure the polarization? We should wait until $\vec
P_{\rm triv}$ and $\vec P_{\rm nontriv}$ point in distinguishable directions,
but if we wait too long, the states will depolarize. The optimal measurement
to distinguish the two is an orthogonal measurement of the polarization along
the axis normal to the bisector of the vectors $\vec P(t)_{\rm triv}$ and $\vec
P(t)_{\rm nontriv}$. At time $t$ the probability that this measurement
identifies the Hamiltonian incorrectly is
\begin{equation}
P_{\rm error}= {1\over 2} - {1\over 2} e^{-\Gamma t}\left|\sin\left({\omega
t\over 2}\right)\right|~.
\end{equation}
This error probability is minimized, and the information gain from the
measurement is maximized, at a time $t$ such that
\begin{equation}
\tan\left({\omega t\over 2}\right)= {\omega\over 2\Gamma}~.
\end{equation}
If $\Gamma/\omega<<1$, this time is close to $\pi/\omega$, the time we would
measure to perfectly distinguish the Hamiltonians in the absence of
decoherence. But if $\Gamma/\omega >>1$, then we should measure after a time
$t\sim \Gamma^{-1}$ comparable to the lifetime.
More generally, consider an ensemble of two density operators $\rho_1$ and
$\rho_2$ with {\it a priori}} probabilities $p_1$ and $p_2$ (where $p_1 +
p_2=1$), and imagine that an unknown state has been drawn from this ensemble. A
procedure for deciding whether the unknown state is $\rho_1$ or $\rho_2$ can be
modeled as a POVM with two outcomes. The two-outcome POVM that minimizes the
probability of making an incorrect decision is a measurement of the orthogonal
projection onto the space spanned by the eigenstates of $p_1\rho_1-p_2 \rho_2$
with positive eigenvalues \cite{helstrom,fuchs_thesis}. The minimal error
probability achieved by this measurement is
\begin{equation}
P_{\rm error}={1\over 2}- {1\over 2}{\rm tr}\left|p_1\rho_1-p_2\rho_2\right|~.
\end{equation}
Correspondingly, if we are to identify an unknown superoperator as one of
$\$_1$ and $\$_2$ (with {\it a priori} probabilities $p_1$ and $p_2$), then the
way to distinguish $\$_1,\$_2$ with minimal probability of error is to choose
our initial state $\rho_0=|\psi_0\rangle\langle \psi_0|$ to
minimize\footnote{We thank Chris Fuchs for a helpful discussion of this point.}
\begin{equation}
\label{super_error}
P_{\rm error}={1\over 2}- {1\over 2}{\rm
tr}\left|\left(p_1\$_1-p_2\$_2\right)\rho_0\right|~.
\end{equation}
In the case of interest to us, the superoperators $\$_1$ and $\$_2$ are
obtained my integrating, for time $t$, master equations with Hamiltonians $H_1$
and $H_2$ respectively. We minimize the error probability in
Eq.~(\ref{super_error}) with respect to $t$ to complete the optimization.
\section{Entanglement and frequency measurement}
Consider again the case in which the Hamiltonian is known to be of the form
\begin{equation}
H_{\omega}={\omega\over 2}~\sigma_3~,
\end{equation}
but where the frequency $\omega$ is unknown. For the moment, let us neglect
decoherence, but suppose that we have been provided with a large number $n$ of
qubits that we may use to perform an experiment to determine $\omega$ in a {\sl
fixed total time} $t$. What is the most effective way to employ our qubits?
Consider two strategies. In the first, we prepare $n$ identical qubits
polarized along the $x$-axis. They precess in the field described by
$H_\omega$ for time $t$, and then the spin along the $x$-axis is measured. Each
spin will be found to be pointing ``up'' with probability
\begin{equation}
P = {1\over 2}(1+ \cos\omega t)
\end{equation}
Because the measurement is repeated many times, we will be able to estimate the
probability $P$ to an accuracy
\begin{equation}
\label{n_qubits}
\Delta P=\sqrt{P(1-P)/n}={|\sin\omega t|\over 2\sqrt{n}}~.
\end{equation}
and so determine the value of $\omega$ to accuracy
\begin{equation}
\label{shot_noise}
\Delta \omega = {\Delta P\over t |dP/d(\omega t)|}={1\over t \sqrt n}~.
\end{equation}
The accuracy improves like $1/\sqrt{n}$ as we increase the number of available
qubits with the time $t$ fixed.
The second strategy is to prepare an entangled ``cat'' state of $n$ ions
\begin{equation}
|\psi_0\rangle = {1\over \sqrt{2}}(|000\dots0\rangle + |111\dots 1\rangle)~.
\end{equation}
The advantage of the entangled state is that it precesses $n$ times faster than
a single qubit; in time $t$ it evolves to
\begin{equation}
|\psi(t)\rangle= {1\over \sqrt{2}}(|000\dots0\rangle + e^{i n\omega t}|111\dots
1\rangle)
\end{equation}
(up to an overall phase).
If we now perform an orthogonal measurement that projects onto the basis
${1\over \sqrt{2}}(|000\dots0\rangle \pm |111\dots 1\rangle)$ ({\it e.g.} a
measurement of the entangled observable
$\sigma_1\otimes\sigma_1\otimes\cdots\otimes\sigma_1$) then we will obtain the
``+'' outcome with probability
\begin{equation}
P={1\over 2}(1+\cos n\omega t)~.
\end{equation}
By this method, $n\omega t$ can be measured to order one accuracy, so that
\begin{equation}\
\label{linear_noise}
\Delta\omega\simeq { 1\over tn}~,
\end{equation}
a more favorable scaling with $n$ than in Eq.~(\ref{shot_noise}).
This idea of exploiting the rapid precession of entangled states to achieve a
precision beyond the shot-noise limit has been proposed in both frequency
measurement \cite{wineland} and optical interferometry \cite{yurke}. (One
realization of this idea is the proposal by Caves \cite{caves} to allow a
squeezed vacuum state to enter the dark port of an interferometer; the
squeezing induces the $n$ photons entering the other port to make correlated
``decisions'' about which arm of the interferometer to follow.)
\section{Entanglement versus decoherence}
In both Eq.~(\ref{shot_noise}) and Eq.~(\ref{linear_noise}), the accuracy of
the frequency measurement improves with the elapsed time $t$ as $1/t$. But so
far we have neglected decoherence. If the single-qubit state decays at a rate
$\Gamma$, then we have seen that the optimal time at which to perform a
measurement will be of order $\Gamma^{-1}$. The entangled strategy will still
be better if we are constrained to perform the measurement in a time
$t<<\Gamma^{-1}$, but further analysis is needed to determine which method is
better if we are free to choose the time $t$ to optimize the accuracy.
In fact, as Huelga {\it et al.} \cite{huelga} have emphasized, an entangled
state is fragile, and its faster precession can be offset by its faster decay
rate. Suppose that two qubits are available, both independently subjected to
the depolarizing channel with decay rate $\Gamma$.
If we prepare the unentangled state, each qubit has the initial pure-state
density matrix
\begin{equation}
\rho_0={1\over 2}(I+\sigma_1)~
\end{equation}
polarized along the $x$-axis, and evolves in time $t$ to
\begin{equation}
\rho(t)={1\over 2}[I+e^{-\Gamma t}(\sigma_1~\cos\omega t+ \sigma_2~\sin\omega
t)]~.
\end{equation}
If we now measure $\sigma_1$, we obtain the $+$ result with probability
\begin{equation}
\label{nonentangle_prob}
\label{single_decohere}
P={\rm tr}\left({1\over 2}(I+\sigma_1)\rho(t)\right)={1\over 2}(1+e^{-\Gamma
t}\cos\omega t)~.
\end{equation}
Now suppose that the initial state is the Bell state $|\phi^+\rangle$ of two
qubits, with density matrix
\begin{equation}
\rho_0={1\over 4}\left(I\otimes I+ \sigma_3\otimes\sigma_3 +
\sigma_1\otimes\sigma_1-\sigma_2\otimes\sigma_2\right)~.
\end{equation}
If both spins precess and depolarize independently, this state evolves to
\begin{eqnarray}
\rho(t)& = & {1\over 4} [I\otimes I+ e^{-2\Gamma t}\big(\sigma_3\otimes\sigma_3
\nonumber\\
& + &\cos 2\omega t(\sigma_1\otimes\sigma_1-\sigma_2\otimes\sigma_2)
\nonumber\\
& + & \sin 2\omega t(\sigma_1\otimes\sigma_2+\sigma_2\otimes\sigma_1)\big)]~;
\end{eqnarray}
if we measure the observable $\sigma_1\otimes\sigma_1$, we find the + outcome
with probability
\begin{eqnarray}
\label{entangle_prob}
P & = &{\rm tr}\left({1\over 2}(I\otimes I
+\sigma_1\otimes\sigma_1)\rho(t)\right)\nonumber \\
& = & {1\over 2}(1+e^{-2\Gamma t}\cos2 \omega t)~.
\end{eqnarray}
Note that Eq.~(\ref{entangle_prob}) has exactly the same functional form as
Eq.~(\ref{nonentangle_prob}), but with $t$ replaced by $2t$. Therefore, the
entangled measurement performed in time $t/2$ collects exactly as much
information about the frequency $\omega$ as the measurement of a single ion
performed in time $t$. If we have two qubits and total time $t$ available, we
can either perform the entangled measurement twice (taking time $t/2$ each
time), or perform measurements on each qubit independently (taking time $t$).
Either way, we obtain two outcomes and collect exactly the same amount of
information on the average.
More generally, suppose that we have $n$ qubits and a total time $T>> 1/
\Gamma$ available. We can use these qubits to perform altogether $nT/t$
independent single-qubit measurements, where each measurement requires time
$t$. Plugging Eq.~(\ref{single_decohere}) into Eq.~(\ref{n_qubits}) and
Eq.~(\ref{shot_noise}) (with $n$ replaced by $nT/t$), and choosing $\cos\omega
t\sim 0$ to optimize the precision, we find that the frequency can be
determined to accuracy
\begin{equation}
\Delta\omega=\left({1\over t}\right)\cdot {e^{\Gamma t}\over
\sqrt{nT/t}}={1\over \sqrt{nT}}\cdot {e^{\Gamma t}\over \sqrt{t}}~.
\end{equation}
This precision is optimized if we choose $\Gamma t=1/2$, where we obtain
\cite{huelga}
\begin{equation}
\Delta\omega = \sqrt{2e\Gamma\over nT}~.
\end{equation}
On the other hand, we could repeat the experiment $T/t$ times using the
$n$-qubit entangled state. Then we would obtain a precision
\begin{equation}
\Delta\omega = \left({1\over nt}\right)\cdot {e^{n\Gamma t}\over
\sqrt{T/t}}={1\over \sqrt{nT}}\cdot {e^{n\Gamma t}\over \sqrt{nt}}~,
\end{equation}
the same function as for uncorrelated qubits, but with $t$ replaced by $nt$.
Thus the optimal precision is the same in both cases, but is attained in the
uncorrelated case by performing experiments that take $n$ times longer than in
the correlated case.
That the entangled states offer no advantage in the determination of $\omega$
was one of the main conclusions of Huelga {\it et al.} \cite{huelga}. A
similar conclusion applies to estimating the difference in path length between
two arms of an interferometer using a specified optical power, if we take into
account losses and optimize with respect to the number of times the light
bounces inside the interferometer before it escapes and is detected.
We would like to make the (rather obvious) point that this conclusion can
change if we adopt a different model of decoherence, and in particular if the
qubits do not decohere independently. As a simple example of correlated
decoherence, consider the case of two qubits with $4 \times 4$ density matrix
$\rho$ evolving according to the master equation
\begin{equation}
\dot \rho = -i[H,\rho] - \Gamma \left(\rho-I/4\right)~.
\end{equation}
This master equation exhibits the analog, in the four-dimensional Hilbert
space, of the uniform contraction of the Bloch sphere described by the
depolarizing channel in the case of a qubit. Because the decoherence picks out
no preferred direction in the Hilbert space (or any preferred tensor-product
decomposition), we call this model ``symmetric decoherence.''
Under this master equation, with both qubits subjected to $H_\omega$ and to
symmetric decoherence, the Bell state $\rho_0=|\phi^+\rangle\langle\phi^+|$
evolves in time $t$ to the state
\begin{eqnarray}
\rho(t)& = & {1\over 4} [I\otimes I+ e^{-\Gamma t}\big(\sigma_3\otimes\sigma_3
\nonumber\\
& + &\cos 2\omega t(\sigma_1\otimes\sigma_1-\sigma_2\otimes\sigma_2)
\nonumber\\
& + & \sin 2\omega t(\sigma_1\otimes\sigma_2+\sigma_2\otimes\sigma_1)\big)]~,
\end{eqnarray}
so that a measurement of $\sigma_1\otimes\sigma_1$ yields the + outcome with
probability
\begin{equation}
\label{ent_symmetric}
P ={1\over 2}(1+e^{-\Gamma t}\cos2 \omega t)~.
\end{equation}
On the other thing, the initial product state
\begin{equation}
\rho_0={1\over 4} (I+\sigma_1)\otimes(I+\sigma_1)
\end{equation}
becomes entangled as a result of symmetric decoherence. Were the Hamiltonian
trivial, it would evolve to
\begin{equation}
\rho(t)={1\over 4} I\otimes I + {1\over 4}e^{-\Gamma t}(\sigma_1\otimes I+
I\otimes\sigma_1 +\sigma_1\otimes\sigma_1)~.
\end{equation}
Including the precession
\begin{equation}
\sigma_1\to \sigma_1 \cos\omega t + \sigma_2\sin\omega t~,
\end{equation}
we obtain
\begin{equation}
\rho(t)={1\over 4} I\otimes I + {1\over 4}e^{-\Gamma t}( \sigma_1\otimes
I~\cos\omega t + \cdots~)~,
\end{equation}
so that measurement of the single-qubit observable $\sigma_1\otimes I$ yields
the + outcome with probability
\begin{equation}
\label{single_symmetric}
P={\rm tr}\left({1\over 2}(I\otimes I +\sigma_1\otimes I)\rho(t)\right)={1\over
2}(1+e^{-\Gamma t}\cos\omega t)~.
\end{equation}
Comparing Eq.~(\ref{single_symmetric}) and Eq.~(\ref{ent_symmetric}), the
important thing to notice is that with symmetric decoherence, entangled states
decay no faster than product states; therefore, we can enjoy the benefit of
entanglement (faster precession) without paying the price (faster decay).
To establish more firmly that entangled strategies outperform nonentangled
strategies in the symmetric decoherence model, we should consider more closely
what are the optimal final measurements for these two types of initial states.
To give the problem a precise information-theoretic formulation, we return to
the problem of distinguishing two cases, the trivial Hamiltonian and
$H_\omega$, which are assumed to be equiprobable. For either the product
initial state or the entangled initial state, we evolve for time $t$, and then
perform the best measurement that distinguishes between evolution governed by
$H_\omega$ and trivial evolution. In both cases, the measurement is permitted
to be an entangled measurement; that is, we optimize with respect to all POVM's
in the four-dimensional Hilbert space.
In either case (initial product state or initial entangled state), we can find
the two-outcome POVM that identifies the Hamiltonian with minimal probability
of error. When there is no decoherence, this POVM (when restricted to the
two-dimensional subspace containing the two pure states to be distinguished) is
the familiar orthogonal measurement that best distinguishes two pure states of
a qubit. In fact, for symmetric decoherence, this same measurement minimizes
the error probability for any value of the damping rate $\Gamma$. It is thus
the two-outcome measurement with the maximal information gain (the measurement
outcome has maximal mutual information with the choice of the Hamiltonian).
Although we don't have a proof, we can make a reasonable guess that, for
symmetric decoherence, this two-outcome measurement has the maximal information
gain of any measurement, including POVM's with more outcomes.
If either initial state evolves for time $t$, and then this optimal POVM is
performed, the error probability can be expressed as
\begin{equation}
P_{\rm error} = {1\over 2} - {1\over 2}e^{-\Gamma
t}\left|\sin\theta(t)\right|~;
\end{equation}
here $\theta(t)$ is the angle between the states --- that is, $\cos \theta(t)$
is the inner product of the evolving and static states, in the limit of no
damping ($\Gamma=0$). For the entangled initial state, we have
\begin{equation}
\theta_{\rm entangled}=\omega t~,
\end{equation}
and for the product initial state, we have
\begin{equation}
\cos\theta_{\rm product}=\cos^2 \left({\omega t\over 2}\right)~.
\end{equation}
Since
\begin{equation}
|\cos \theta_{\rm entangled}| = |\cos\omega t| \le {1\over 2}(1+\cos \omega t)
=|\cos\theta_{\rm product}|
\end{equation}
for $\cos\theta_{\rm entangled}\ge 0$, the error probability achieved by the
entangled initial state is smaller than that achieved by the product state for
$0 < \omega t< \pi/2$, which is sufficient to ensure that the error probability
optimized with respect to $t$ is always smaller in the entangled case for any
nonzero value of $\Gamma$.
Similarly, if we optimize the information gain with respect to $t$, the
entangled strategy has the higher information gain for all $\Gamma>0$. The
improvement in information gain (in bits) achieved using an entangled initial
state rather than a product initial state is plotted in Fig.~1 as a function of
$\Gamma/\omega$. The maximum improvement of about .136 bits occurs for
$\Gamma/\omega\sim .379$.
\begin{figure}
\caption{Improvement in information gain (in bits) achieved by using an
entangled initial state, as a function of the ratio of decoherence rate
$\Gamma$ to precession frequency $\omega$.}
\end{figure}
We have already seen in \S II that, even in the absence of decoherence, an
entangled strategy may outperform an unentangled strategy if we are trying to
distinguish more than two alternatives. This advantage will persist when
sufficiently weak decoherence is included, whether correlated or uncorrelated.
In that event, since only one member of an entangled pair is exposed to the
unknown Hamiltonian, we may be able to shelter the other member of the pair
from the ravages of the environment, slowing the decay of the state and
strengthening the signal.
\section{Conclusions}
We feel that quantum information theory, having already secured a central
position at the foundations of computer science, will eventually erect bridges
connecting with many subfields of physics. The results reported here (and other
related examples) give strong hints that ideas emerging from the theory of
quantum information and computation are destined to profoundly influence the
experimental physics techniques of the future.
We have only scratched the surface of this important subject. Among the many
issues that deserve further elaboration are the connections between
superoperator distinguishability and superoperator norms, the efficacy of the
quantum Fourier transform in the presence of decoherence, the measurement of
continuous quantum variables, the applications of quantum error correction, and
the detection of time-dependent signals.
\acknowledgments
We thank Constantin Brif, Jon Dowling, Steven van Enk, Jeff Kimble, Alesha
Kitaev, and Kip Thorne for instructive discussions about quantum measurement.
We are especially grateful to Hideo Mabuchi for introducing us to this
fascinating subject, to Chris Fuchs for sharing his insights into state
distinguishability, and to Dave Beckman for discussions on improving the
superdense coding method by applying a driving field. Thanks to Barry Simon for
useful comments on the theorem in Appendix A, and for persuading us that it is
not completely trivial. We also thank C. Woodward for helpful correspondence.
A.~M.~C. and J.~R. received support from Caltech's Summer Undergraduate
Research Fellowship (SURF) program, and A.~M.~C. received a fellowship endowed
by Arthur R. Adams. This work has been supported in part by the Department of
Energy under Grant
No. DE-FG03-92-ER40701, and by DARPA through the Quantum Information and
Computation (QUIC) project administered by the Army Research Office under Grant
No. DAAH04-96-1-0386.
\section*{Appendix A: Fixed-time-driving theorem}
In this appendix, we sketch the proof of the theorem stated in \S V.
For a unitary $N\times N$ matrix $U$, we define ${\rm maxarg}(U)$ to be the
largest argument of an eigenvalue of $U$, where the argument takes values in
the interval $(-\pi,\pi]$. Similarly, ${\rm minarg}(U)$ is the minimum
argument of an eigenvalue of $U$. Our theorem is:
{\bf Theorem 1.} {\sl If $H$ and $K$ are finite-dimensional Hermitian matrices,
and $\parallel H\parallel_{\rm sup} <\pi$, then
\begin{eqnarray}
\label{thm1}
{\rm maxarg}\left(e^{iK} e^{-i(H+K)}\right) & \le & {\rm maxarg}(e^{-iH}) ~,\\
\label{thm2}
{\rm minarg}\left(e^{iK} e^{-i(H+K)}\right) & \ge & {\rm minarg}(e^{-iH})~.
\end{eqnarray}
}
To prove the theorem, we begin with:
{\bf Lemma 2}. {\sl For unitary $U$ with ${\rm maxarg}(U) \ne \pi$, and
Hermitian $A$,
\begin{eqnarray}
\label{maxlemma2}
& &{\rm maxarg} (Ue^{i\varepsilon A})\le {\rm maxarg}(U) + {\rm
maxarg}(e^{i\varepsilon A})+ O(\varepsilon^2)~,\nonumber\\
& & \\
\label{minlemma2}
& &{\rm minarg} (Ue^{i\varepsilon A})\ge {\rm minarg}(U) + {\rm
minarg}(e^{i\varepsilon A})- O(\varepsilon^2)~.\nonumber\\
& &
\end{eqnarray}
}
\noindent {\sl Proof}: Write $U=e^{iB}$, where $B$ is Hermitian and
\newline $\parallel B \parallel_{\rm sup} < \pi$; then maxarg$(e^{iB})={\rm
max}(B)$, where ${\rm max}(B)$ denotes the maximum eigenvalue of $B$. From the
Baker-Campbell-Hausdorff formula, we have
\begin{equation}
e^{iB}e^{i\varepsilon A}= \exp i\left(B+\varepsilon A + {i\over 2}\varepsilon
[C,B] + O(\varepsilon^2)\right) ~,
\end{equation}
where $C$ is linear in $A$. Then lowest-order eigenvalue perturbation theory
tells us that
\begin{eqnarray}
& &{\rm max}\left(B+\varepsilon A + {i\over 2}\varepsilon
[C,B]\right)\nonumber\\
&=& {\rm max}(B) +\langle \psi|\left(\varepsilon A + {i\over 2}\varepsilon
[C,B]\right)|\psi\rangle +O(\varepsilon^2)\nonumber\\
&=&{\rm max}(B)+\langle\psi|\left(\varepsilon A
\right)|\psi\rangle+O(\varepsilon^2)\nonumber\\
&\le&{\rm max}(B) +{\rm max}(\varepsilon A)+O(\varepsilon^2)
\end{eqnarray}
(where $|\psi\rangle$ is in the eigenspace of $B$ with maximal eigenvalue).
This proves Eq.~(\ref{maxlemma2}). Eq.~(\ref{minlemma2}) is proved similarly.
Note that the condition ${\rm maxarg(U)}\ne\pi$ is necessary so that the
singularity of the maxarg function can be avoided for $\varepsilon$
sufficiently small.
Lemma 2 is all we will need for the proof of Theorem 1. But it is useful to
note that Lemma 2 may be invoked to prove:
{\bf Lemma 3}.\footnote{Strangely, we could find only one reference to this
proposition in the literature; it is a special case of Eq.~(8) in
\cite{woodward}.} {\sl For unitary $U_1$ and $U_2$, such that
\begin{eqnarray}
\label{maxargcond}
{\rm maxarg}(U_1) + {\rm maxarg}(U_2) &<& \pi~,\\
\label{minargcond}
{\rm minarg}(U_1) + {\rm minarg}(U_2) &>& -\pi ~,
\end{eqnarray}
we have
\begin{eqnarray}
\label{maxarg}
{\rm maxarg}(U_1 U_2) & \le & {\rm maxarg}(U_1) + {\rm maxarg}(U_2) ~,\\
\label{minarg}
{\rm minarg}(U_1 U_2) & \ge & {\rm minarg}(U_1) + {\rm minarg}(U_2)~.
\end{eqnarray}
}
\noindent{\sl Proof}: We write
\begin{equation}
U_1U_2 = U_1 e^{iA}= U_1 \left(e^{iA/n}\right)^n~,
\end{equation}
where the eigenvalues of A lie in the interval $(-\pi,\pi)$, and apply Lemma 2
repeatedly, obtaining
\begin{eqnarray}
{\rm maxarg}\left(U_1e^{iA}\right)&\le& {\rm maxarg}(U_1)\nonumber\\
&+& n\left[{\rm maxarg}(e^{iA/n})+ O(n^{-2})\right]~.
\end{eqnarray}
Taking the $n\to\infty$ limit proves Eq.~(\ref{maxarg}). Eq.~(\ref{minarg}) is
proved similarly.
Note that because of the conditions Eq.~(\ref{maxargcond}) and
Eq.~(\ref{minargcond}), Lemma 2 can be safely applied $n$ times in succession;
the accumulated maxarg and minarg of the product never approach $\pi$.
To complete the proof of Theorem 1, we invoke the Lie product formula
\begin{equation}
\lim_{n \to \infty} (e^{A/n} e^{B/n})^n = e^{A+B}~,
\end{equation}
to write
\begin{eqnarray}
\label{expexpand}
&&e^{iK}e^{-i(H+K)} = \lim_{n \to \infty} (e^{iK/n})^n (e^{-iH/n}
e^{-iK/n})^n \nonumber \\
& = & \lim_{n \to \infty} e^{iK/n} \cdots e^{iK/n}
e^{-iH/n} e^{-iK/n} \cdots e^{-iH/n} e^{-iK/n}~.
\end{eqnarray}
Since $e^{iK/n} e^{-iH/n} e^{-iK/n}$ and $e^{-iH/n}$ have the same eigenvalues,
Lemma 3 implies that
\begin{eqnarray}
&{\rm maxarg}&(e^{iK/n} e^{-iH/n} e^{-iK/n} e^{-iH/n}) \nonumber\\
& \le & 2 \cdot {\rm maxarg}(e^{-iH/n}) ~.
\end{eqnarray}
Similarly, we have
\begin{eqnarray}
&{\rm maxarg}&\left(e^{iK/n}\left(e^{iK/n} e^{-iH/n} e^{-iK/n} e^{-iH/n}\right)
e^{-iK/n}e^{-iH/n}\right) \nonumber\\
& \le & 3 \cdot {\rm maxarg}(e^{-iH/n}) ~,
\end{eqnarray}
and so on. Hence, applying Lemma 3 altogether $n$ times to the right-hand side
of Eq.~(\ref{expexpand}), we find that
\begin{eqnarray}
{\rm maxarg}\left(e^{iK} \left(e^{-iH/n} e^{-iK/n}\right)^n\right)
& \le & n \cdot {\rm maxarg}\left(e^{-iH/n}\right) \nonumber\\
&= &{\rm maxarg}(e^{-iH}) \\
\end{eqnarray}
Taking the $n\to\infty$ limit completes the proof of Eq.~(\ref{thm1}).
Eq.~(\ref{thm2}) is proved similarly.
The upper bound on $\parallel H\parallel_{\rm sup}$ is a key feature of the
formulation of Theorem 1. This bound ensures that the conditions
Eq.~(\ref{maxargcond}) and Eq.~(\ref{minargcond}) are satisfied each time that
Lemma 3 is invoked in the proof. If $\parallel H\parallel_{\rm sup}$ is too
large, then counterexamples can be constructed.
In any event, for the discussion in \S V, we are interested in the case where
the maximal and minimal eigenvalues of $H$ differ by less than $\pi$, and by
shifting $H$ by a constant we can ensure that $\parallel H\parallel_{\rm sup}<
\pi/2$. Therefore, the theorem enforces the conclusion that if we are to
distinguish a nontrivial Hamiltonian from the trivial Hamiltonian in an
experiment conducted in a fixed elapsed time, turning on a nonzero
time-independent ``driving term'' $K$ provides no advantage.
\begin{references}
\bibitem{qc} P.~W.~Shor, ``Polynomial-time algorithms for
prime factorization and discrete logarithms on a quantum computer,''
{\it Proceedings of the 35th Annual Symposium on the Foundations of
Computer Science}, p. 124 (IEEE Computer Society Press, Los Alamitos,
CA, 1994), quant-ph/9508027.
\bibitem{qec} P.~W. Shor, ``Scheme for reducing decoherence in quantum
memory,'' Phys. Rev. A {\bf 52}, 2493 (1995); A.~M. Steane, ``Error correcting
codes in quantum
theory,'' Phys. Rev. Lett. {\bf 77}, 793 (1996).
\bibitem{mabuchi} H. Mabuchi, ``Dynamical identification of open quantum
systems, Quantum Semiclass. Opt. {\bf 8}, 1103 (1996), quant-ph/9608020.
\bibitem{wiseman_adaptive} H.~M. Wiseman, ``Adaptive phase measurements of
optical modes: going beyond the marginal $Q$ distribution,'' Phys. Rev. Lett.
{\bf 75}, 4587 (1995); H.~M. Wiseman, ``Using feedback to eliminate back-action
in quantum measurements,'' Phys. Rev. A {\bf 51}, 2459 (1995).
\bibitem{no_clone} W.~K. Wootters and W.~H. Zurek, ``A single quantum cannot be
cloned,'' Nature {\bf 299}, 802 (1982); D. Dieks, ``Communication by EPR
devices,'' Phys. Lett. {\bf A}, 271 (1982).
\bibitem{wootters} W.~K. Wootters, ``Statistical distance and Hilbert space,''
Phys. Rev. D {\bf 23}, 357 (1981).
\bibitem{braun} S.~L. Braunstein, ``Quantum limits on precision measurement of
phase,'' Phys. Rev. Lett. {\bf 69}, 3598 (1992)
\bibitem{braunstein} S.~L. Braunstein and C. Caves, ``Statistical distance and
the geometry of quantum states,'' Phys. Rev. Lett. {\bf 72}, 3439 (1994).
\bibitem{braginsky} V.~B. Braginsky and F. Ya. Khalili, {\it Quantum
Measurement} (Cambridge University Press, Cambridge, 1992).
\bibitem{holevo} A.~S. Holevo, ``Statistical problems in quantum physics,'' in
{\it Proceedings of the 2nd Japan-USSR Symposium on Probability Theory}, eds.
G. Maruyama and J.~V. Prokhorov (Springer-Verlag, Berlin, 1973), p. 104.
\bibitem{wiesner} C.~H. Bennett and S.~J. Wiesner, ``Communication via one and
two-particle operators on Einstein-Podolsky-Rosen states,'' Phys. Rev. Lett.
{\bf 69}, 2881 (1992).
\bibitem{hausladen} P. Hausladen, R. Jozsa, B. Schumacher, M. Westmoreland, and
W.~K. Wootters, ``Classical information capacity of a quantum channel,'' Phys.
Rev. A {\bf 54}, 1869 (1996).
\bibitem{fuchs_private} C.~A. Fuchs, private communication (1998).
\bibitem{peres} A. Peres and W.~K. Wootters, ``Optimal detection of quantum
information,'' Phys. Rev. Lett. {\bf 66}, 1119 (1991).
\bibitem{grover} L.~Grover, ``Quantum mechanics helps in searching for
a needle in a haystack,'' Phys. Rev. Lett. {\bf 79}, 325 (1997),
quant-ph/9706033.
\bibitem{farhi} E. Farhi and S. Gutmann, ``An analog analogue of a digital
quantum computation,'' quant-ph/9612026.
\bibitem{bbbv} C.~B. Bennett, E.~Bernstein, G.~Brassard, and
U. Vazirani, ``Strengths and weaknesses of quantum computing,''
quant-ph/9701001 (1997); C. Zalka, ``Grover's quantum searching algorithm is
optimal,'' quant-ph/9711070 (1997).
\bibitem{beckman} D. Beckman, private communication (1998).
\bibitem{griffiths} R.~B. Griffiths and C.-S. Niu, ``Semiclassical Fourier
transform for quantum computation,'' Phys. Rev. Lett. {\bf 76}, 3228 (1996),
quant-ph/9511007.
\bibitem{cleve} R. Cleve, A. Ekert, C. Macchiavello, M. Mosca, ``Quantum
algorithms revisited,'' Proc. Roy. Soc. Lond. A {\bf 454}, 339 (1998),
quant-ph/9708016.
\bibitem{lloyd} D.~S. Abrams and S. Lloyd, ``A quantum algorithm providing
exponential speed increase for finding eigenvalues and eigenvectors,''
quant-ph/9807070 (1998).
\bibitem{wiseman} H.~M. Wiseman, ``Quantum trajectories and quantum measurement
theory,'' Quantum Semiclass. Opt {\bf 8}, 205 (1996); H,~M. Wiseman and R.~B.
Killip, ``Adaptive single-shot phase measurements: the full quantum theory,''
quant-ph/9710056.
\bibitem{helstrom} C.~W. Helstrom, {\it Quantum Detection and Estimation
Theory} (Academic Press, New York, 1976).
\bibitem{fuchs_thesis} C.~A. Fuchs, ``Distinguishability and accessible
information in quantum theory,'' quant-ph/9601020 (1996).
\bibitem{wineland} J.~J. Bollinger, W.~M. Itano, D.~J. Wineland, and D.~J.
Heinzen,
``Optical frequency measurements with maximally correlated states,'' Phys.
Rev. A {\bf 54}, R4649 (1996).
\bibitem{yurke} B. Yurke, S.~L. McCall, and J.~R. Klauder, ``$SU(2)$ and
$SU(1,1)$ interferometers,'' Phys. Rev. A {\bf 33}, 4033 (1986).
\bibitem{caves} C. Caves, ``Quantum noise in an interferometer,'' Phys. Rev.
{\bf D23}, 1693 (1981).
\bibitem{huelga} S.~F. Huelga, C. Macchiavello, T. Pellizzari, A.~K. Ekert,
M.~B. Plenio, and J.~I. Cirac, ``On the improvement of frequency standards with
quantum entanglement,'' Phys. Rev. Lett. {\bf 79}, 3865 (1997),
quant-ph/9707014.
\bibitem{woodward}S. Agnihotri and C. Woodward, ``Eigenvalues of products of
unitary matrices and quantum Schubert calculus,'' Math. Res. Lett. {\bf 5}, 817
(1998).
\end{references}
\end{document} |
\begin{document}
\mathbf{t}itle[ Involutes of Polygons of Constant Width in Minkowski Planes]
{Involutes of Polygons of Constant Width in Minkowski Planes}
\mathbf{a}uthor[M.Craizer]{Marcos Craizer}
\mathbf{a}ddress{
Departamento de Matem\'{a}tica- PUC-Rio\br
Rio de Janeiro\br
BRAZIL}
\mathbf{e}mail{[email protected]}
\mathbf{a}uthor[H.Martini]{Horst Martini}
\mathbf{a}ddress{
Faculty of Mathematics\br
University of Technology\br
09107 Chemnitz\br
GERMANY}
\mathbf{e}mail{[email protected]}
\mathbf{t}hanks{The first named author wants to thank CNPq for financial support during the preparation of this manuscript. \mathbf{n}ewline E-mail of the corresponding author: [email protected]}
\subjclass{ 52A10, 52A21, 53A15, 53A40}
\keywords{area evolute, Barbier's theorem, center symmetry set, curvature, curves of constant width, Discrete Differential Geometry, evolutes, Minkowski Geometry, normed plane, equidistants, involutes, support function,
width function}
\date{June 18, 2015}
\begin{abstract}
Consider a convex polygon $P$ in the plane, and denote by $U$ a homothetical copy of the vector sum of $P$ and $-P$.
Then the polygon $U$, as unit ball, induces a norm such that, with respect to this norm, $P$ has constant Minkowskian width.
We define notions like Minkowskian curvature, evolutes and involutes for polygons of constant $U$-width, and we prove that many properties of the smooth case, which is already completely studied, are preserved.
The iteration of involutes generates a pair of sequences of polygons of constant width with respect to the Minkowski norm and its dual norm, respectively.
We prove that these sequences are converging to symmetric polygons with the same center, which can be regarded as a central point of the polygon $P$.
\mathbf{e}nd{abstract}
\maketitle
\section{Introduction}
A {\it Minkowski} or {\it normed plane} is a $2$-dimensional vector space with a norm. This norm is induced by its \mathbf{e}mph{unit ball} $U$, which is a compact, convex set centered at the origin
(or, shortly, \mathbf{e}mph{centered}). Thus, we write $(\mathbb{R}^2,U)$ for a Minkowski plane with unit ball $U$, whose boundary is the \mathbf{e}mph{unit circle} of $(\mathbb{R}^2,U)$.
The geometry of normed planes and spaces, usually called \mathbf{e}mph{Minkowski Geometry} (see \cite{Thompson96}, \cite{Ma-Sw-We}, and \cite{Ma-Sw}), is
strongly related to and influenced by the fields of Convexity, Banach Space Theory, Finsler Geometry and, more recently, Discrete and Computational Geometry.
The present paper can be considered as one of the possibly first contributions to Discrete Differential Geometry in the spirit of Minkowski Geometry.
The study of special types of curves in Minkowski planes is a promising subject (see the survey \cite{Ma-Wu}), and the particular case of curves of constant Minkowskian width has been studied for a long time
(see \cite{Chakerian66}, \cite{Chakerian83}, \cite{He-Ma}, and \S~2 of \cite{Ma-Sw}).
A curve $\gamma$ has constant Minkowskian width with respect to the unit ball $U$ or, shortly, \mathbf{e}mph{constant $U$-width},
if $h(\gamma) + h(-\gamma)$ is constant with respect to the norm induced by $U$, where $h(\gamma)$ denotes the support
function of $\gamma$. Another concept from the classical theory of planar curves important for our paper is that of \mathbf{e}mph{involutes and evolutes}; see, e.g., Chapter 5 of \cite{Gray} and,
respectively, \cite{GAS}. For natural
generalizations of involutes, which also might be extended from the Euclidean case to normed planes, we refer to \cite{So} and \cite{Ap-Mn}. And in \cite{Tanno} it is
shown how the concept of evolutes and involutes can help to construct curves
of constant width in the Euclidean plane.
In this paper, we consider convex polygons $P$ of constant Minkowskian width in a normed plane, for short calling them \mathbf{e}mph{CW-polygons}. If $P$ is a CW polygon, then the unit ball $U$ is necessarily a centered polygon
whose sides and diagonals are suitably parallel to corresponding sides and diagonals of $P$ (sometimes with diagonals suitably meaning also
sides; see \S\S~ 2.1 below). If, in particular, $U$ is homothetic to $P+(-P)$, then, and only then, $P$ is
of constant $U$-width in the Minkowski plane induced by $U$.
There are many results concerning \mathbf{e}mph{smooth CW curves} in normed planes: Barbier's theorem fixing their circumference only by the diameter of the curve (cf. \cite{Petty} and \cite{Ma-Mu});
relations between curvature, evolutes, involutes, and equidistants
(see \cite{Tabach97} and, for applications of Minkowskian evolutes in computer graphics, \cite{Ait-Haddou00}); mixed areas, and the relation between the area and length
of a CW curve cut off along a diameter (see \cite{Chakerian66}, (2.1)). In this paper we prove corresponding results for \mathbf{e}mph{CW polygons}.
We note that our results
are direct discretizations of the corresponding results for the smooth case, where the derivatives and integrals are replaced by differences and sums.
It is meant in this sense that the results of this paper can be considered as one of the first contributions to Discrete Differential Geometry in the framework of normed planes.
Among the $U$-equidistants of a smooth CW curve $\gamma$, there is a particular one called {\it central equidistant}. The central equidistant of $\gamma$ coincides with its {\it area evolute}, while the evolute of $\gamma$ coincides with its {\it center symmetry set} (see \cite{Craizer14} and \cite{Giblin08}). We show that for a CW polygon $P$ the same results hold: The central equidistant $M$ coincides with the area evolute, and the evolute $E$ coincides with the central symmetry set (see \cite{Craizer13}).
Since the equidistants of $P$ are the involutes of $E$, we shall choose the central equidistant as a representative of them, and we write $M=Inv(E)$.
For a Minkowski plane whose unit ball $U$ is a centered convex (2n)-gon, the \mathbf{e}mph{dual unit ball} $V$ is also a centered convex (2n)-gon
with diagonals parallel to the sides of $U$, and the sides parallel to diagonals of $U$. As in the smooth case (cf. \cite{Craizer14}), the involutes of the central equidistant of $P$ form a
one-parameter family
of polygons having constant $V$-width. This one-parameter family consists of the $V$-equidistants of any of its members, and we shall choose the central equidistant $N$ as its representative.
Thus we write $N=Inv(M)$.
In \cite{Craizer14} it is proved that, for smooth curves, the analogous
involute
$N$ is contained in the region bounded by $M$ and has smaller or equal signed area.
In this paper we prove the corresponding fact for polygons, namely, that $N$ is contained in the region bounded by $M$ and the signed area of $N$ is not larger than the signed area of $M$.
What happens if we iterate the involutes? Let $N(0)=E$, $M(0)=M$, $N(1)=N$ and define $M(k)=Inv(N(k))$, $N(k+1)=Inv(M(k))$. Then we obtain two
sequences
$M(k)$ and $N(k)$, the first being of constant $U$-width and the
second of constant $V$-width. Moreover, we have
$$
\overline{N(0)}\supset\overline {M(0)}\supset \overline {N(1)}\supset \overline {M(1)}\supset ...\,,
$$
where ${\overline R}$ denotes the closure of the region bounded by $R$.
Denoting by $O=O(P)$ the intersection of all these sets, we shall prove that $O$ is in fact a single point.
Another form of describing the convergence of $M(k)$ and $N(k)$ to $O$ is as follows: For fixed $c$ and $d$, consider the sequences $M(k)+cU$ of polygons of constant $U$-width, and the sequences $N(k)+dV$
of polygons of constant $V$-width. Then these sequences are converging to $O+cU$ and $O+dV$, respectively, which are $U$- and $V$-balls centered at $O$.
For smooth curves the analogous results were proved in \cite{Craizer14}.
Our paper is organized as follows: In Section 2 we describe geometrically the unit ball of a Minkowski plane for which a given convex polygon has constant Minkowskian width. In Section 3, we define Minkowskian curvature, evolutes and involutes for
CW polygons and prove many properties of them. In Section 4 we consider the involute of the central equidistant, and in Section 5 we prove that the involutes iterates are converging to a single point.
\section{Polygonal Minkowskian balls, their duals, and constant Minkowskian width}
Since faces and also width functions of convex sets behave additively under (vector or) Minkowski addition, it is clear that a polygon $P$ is of constant Minkowskian width if and only if $P+(-P)$ is a
homothetical copy of the unit ball $U$ of the respective normed plane; see, e.g., \S\S~2.3 of \cite{Ma-Sw}.
If, moreover, the homothety of $U$ and $P + (-P)$ is only possible when $P$ itself is already centrally symmetric, then the only sets of constant $U$-width are the balls of that norm; cf., e.g., \cite{Yost}.
In the following we will have a closer look at various geometric relations between polygons $P$ of constant $U$-width and the unit ball $U$, since we
need them later.
Thus, let $P$ be an arbitrary planar convex polygon. By an abuse of notation, we shall denote by the same letter $P$ also the set of vertices of the polygon, the closed polygonal arc formed by the union of
its sides, and the convex region bounded by $P$.
\subsection{A centered polygon with parallel sides and diagonals}
Assume that $P=\{P_1,...,P_{2n}\}$ is a planar convex polygon with parallel opposite sides, i.e., the segments $P_iP_{i+1}$ and $P_{i+n}P_{i+n+1}$, $1\leq i\leq n$, are parallel.
\begin{lemma}\label{lemma:SymmetricBall}
Fix an origin $Z$ and take $U_1$ such that $U_1-Z=\frac{1}{2a}\left(P_{1}-P_{1+n}\right)$, for some $a>0$. Consider the polygon $U$ whose vertices are
\begin{equation}\label{eq:defineU}
U_i=Z+\frac{1}{2a}\left( P_i-P_{i+n} \right),
\mathbf{e}nd{equation}
$1\leq i\leq 2n$. Then $U$ is convex, symmetric with respect to $Z$,
$U_{i+1}-U_i\parallel P_{i+1}-P_i$ and $U_i-Z\parallel P_{i}-P_{i+n}$ for $1\leq i\leq n$ (see Figure \ref{fig:HexagonSym}). Moreover, $U$ is the unique polygon
with these properties.
\mathbf{e}nd{lemma}
\begin{figure}[htb]
\centering
\includegraphics[width=0.90\linewidth]{MPFig1.eps}
\caption{ A hexagon $P$ with parallel opposite sides and the corresponding homothet $U$ of $P+(-P)$. }
\label{fig:HexagonSym}
\mathbf{e}nd{figure}
\begin{proof}
It is clear that $U$ is symmetric with respect to $Z$, $U_{i+1}-U_i\parallel P_{i+1}-P_i$ and $U_i-Z\parallel P_{i}-P_{i+n}$ for $1\leq i\leq n$. Moreover $U_{i+1}-U_i$ has the same orientation as $P_{i+1}-P_i$, which implies that $U$ is convex.
To prove the uniqueness of $U$, observe that the point $U_2$ is obtained as the intersection of the lines parallel to $P_1P_2$ through $U_1$ and parallel to $P_2P_{2+n}$ through $Z$.
The points $U_3,...,U_n$ are obtained inductively in a similar way, while $U_{n+1},..,U_{2n}$ are reflections of $U_1,...U_n$ with respect to $Z$.
\mathbf{e}nd{proof}
Consider now a convex polygon $\mathbf{t}ilde{P}=\{\mathbf{t}ilde{P}_1,...,\mathbf{t}ilde{P}_k\}$ that has not necessarily all opposite sides parallel. Suppose that exactly $0\leq j\leq \frac{k}{2}$ pairs are parallel. Our next
lemma shows that the list of vertices of this polygon can be re-written as $P=\{P_1,P_2,..,P_{2n}\}$, $n=k-j$, with "parallel opposite sides" in a broader sense.
\begin{lemma}\label{lemma:Reorder}
We may re-write the list of vertices of $\mathbf{t}ilde{P}$ as $\{P_1,P_2,..,P_{2n}\}$ such that, for each $1\leq i\leq n$,
$P_{i}P_{i+1}$ is parallel to $P_{i+n}P_{i+n+1}$ or else one of these sides, say $P_{i+n}P_{i+n+1}$, degenerates to a point, in which case the other side $P_iP_{i+1}$
is not degenerated and the line through $P_{i+n}=P_{i+n+1}$ parallel to $P_iP_{i+1}$ is outside $P$ (see Figure \ref{fig:QuadrangleSym2}).
\mathbf{e}nd{lemma}
\begin{proof}
The polygon $\mathbf{t}ilde{P}=\{\mathbf{t}ilde{P}_1,...,\mathbf{t}ilde{P}_k\}$ defines exactly $n=k-j$ directions $\mathbf{t}heta_1,..., \mathbf{t}heta_n$, in increasing order, in the plane. We may assume that $\mathbf{t}ilde{P}_1\mathbf{t}ilde{P}_2$ is in direction $\mathbf{t}heta_1$ and define $P_1=\mathbf{t}ilde{P}_1$, $P_2=\mathbf{t}ilde{P}_2$. For the induction step write $P_i=\mathbf{t}ilde{P}_{l}$. If $P_i\mathbf{t}ilde{P}_{l+1}$ is in direction $\mathbf{t}heta_i$, define $P_{i+1}=\mathbf{t}ilde{P}_{l+1}$, otherwise define $P_{i+1}=\mathbf{t}ilde{P}_l$. It is now easy to verify that the polygon $P=\{P_1,P_2,..,P_{2n}\}$ satisfies the properties of the lemma.
\mathbf{e}nd{proof}
The construction of Lemma \ref{lemma:SymmetricBall} can be applied to the polygon $P$ obtained in Lemma \ref{lemma:Reorder}
(see Figure \ref{fig:QuadrangleSym2}).
If, for example, $P$ is a triangle, then $P+(-P)$ is an affinely regular hexagon (see Figure \ref{fig:Triangle}). From now on, we shall assume that $Z$ coincides with the origin of $\mathbb{R}^2$ and that $P=\{P_1,...,P_{2n}\}$, with $P_iP_{i+1}$ parallel to $U_iU_{i+1}$.
\begin{figure}[htb]
\centering
\includegraphics[width=0.70\linewidth]{MPFig2.eps}
\caption{ A quadrangle and the corresponding symmetric octagon. }
\label{fig:QuadrangleSym2}
\mathbf{e}nd{figure}
\begin{figure}[htb]
\centering
\includegraphics[width=0.70\linewidth]{MPFig3.eps}
\caption{ When $P$ is a triangle of constant $U$-width, then $U$ is an affinely regular hexagon. }
\label{fig:Triangle}
\mathbf{e}nd{figure}
\subsection{The dual Minkowskian ball}
Now we introduce the type of duality which is very useful for our investigations.
Let $(\mathbb{R}^2)^*$ denote the space of linear functionals in $\mathbb{R}^2$. The dual norm in $(\mathbb{R}^2)^*$ is defined as
$$
|| f ||=\sup\{f(u), u\in U\}.
$$
We shall identify $(\mathbb{R}^2)^*$ with $\mathbb{R}^2$ by $f(\cdot)=[\cdot,v]$, where $[\cdot,\cdot]$ denotes the determinant of a pair of planar vectors.
Under this identification, the dual norm in $\mathbb{R}^2$ is given by
$$
|| v ||=\sup\{[u,v], u\in U\}.
$$
We shall construct below a centered polygon $V$ such that, for $v$ in any side of $V$, we have $||v||=1$. Such a polygon defines a Minkowski norm equivalent to the dual norm of $U$.
Now assume that the unit ball $U$ is a centered polygon with vertices $\{U_1,...,U_{2n}\}$, $U_{i+n}=-U_i, \ 1\leq i\leq n$. Define the polygon $V$ with vertices
\begin{equation*}
V_{i+\frac{1}{2}}=\frac{U_{i+1}-U_i}{[U_i,U_{i+1}]}.
\mathbf{e}nd{equation*}
Observe that $V_{i+n+\frac{1}{2}}=-V_{i+\frac{1}{2}}$, i.e., $V$ is centered.
Now $[V_{i+\frac{1}{2}}-V_{i-\frac{1}{2}},U_i]=0$, which implies that $V_{i+\frac{1}{2}}-V_{i-\frac{1}{2}}=-aU_i$. Multiplying both sides by $V_{i+\frac{1}{2}}$ we obtain
\begin{equation*}
U_i=-\frac{V_{i+\frac{1}{2}}-V_{i-\frac{1}{2}}}{[V_{i-\frac{1}{2}},V_{i+\frac{1}{2}}]},
\mathbf{e}nd{equation*}
for $1\leq i\leq 2n$.
\begin{figure}[htb]
\centering
\includegraphics[width=0.90\linewidth]{MPFig4.eps}
\caption{ The centered hexagon $U$ and its dual $V$. }
\label{fig:HexagonDuals}
\mathbf{e}nd{figure}
\begin{lemma}\label{lemma:DualBall}
The polygon $V$ is the dual unit ball.
\mathbf{e}nd{lemma}
\begin{proof}
We have that, for $1\leq i\leq 2n$,
\begin{equation}
[tU_i+(1-t)U_{i+1},V_{i+\frac{1}{2}}]=1,
\mathbf{e}nd{equation}
for any $t\in\mathbb{R}$ and for $j\mathbf{n}otin \{ i,i+1\}$, $[U_j, V_{i+\frac{1}{2}}]\leq1$. This implies that the vertex $V_{i+\frac{1}{2}}$ is from the dual unit circle.
Moreover,
\begin{equation}
[U_i,tV_{i-\frac{1}{2}}+(1-t)V_{i+\frac{1}{2}}]=1,
\mathbf{e}nd{equation}
and for $j\mathbf{n}eq i$ we have $[U_j,tV_{i-\frac{1}{2}}+(1-t)V_{i+\frac{1}{2}}]\leq 1$, which implies that also the side $tV_{i-\frac{1}{2}}+(1-t)V_{i+\frac{1}{2}}$ is from the dual unit circle.
\mathbf{e}nd{proof}
\subsection{Polygons of constant Minkowskian width}
Consider a Minkowski plane $(\mathbb{R}^2,U)$, and let $P$ be a convex curve. For $f$ in the dual unit ball, the {\it support function} $h(P)(f)$ of $P$ at $f$ is defined as
\begin{equation}
h(P)(f)=\sup\{f(p), p\in P\}.
\mathbf{e}nd{equation}
The {\it width} of $P$ in the direction $f$ is defined as $w(P)(f)=h(P)(f)+h(P)(-f)$. We say that $P$ is of {\it constant Minkowskian width} if $w(P)(f)$ does not depend on $f$.
Consider now a Minkowski plane whose unit ball $U$ is a centered polygon, and let $P$ be a polygon with parallel corresponding sides and diagonals.
\begin{lemma}
In the Minkowski plane $(\mathbb{R}^2,U)$, $P$ has constant $U$-width.
\mathbf{e}nd{lemma}
\begin{proof}
By Lemma \ref{lemma:SymmetricBall}, we have that $P_{i}-P_{i+n}=a(U_i-U_{i+n})$, for some constant $a$. Since
$$
w(P)(V_{i+\frac{1}{2}})=h(P)(V_{i+\frac{1}{2}})+h(P)(-V_{i+\frac{1}{2}})=[P_i-P_{i+n},V_{i+\frac{1}{2}}],
$$
we obtain
$$
w(P)(V_{i+\frac{1}{2}})=2a,
$$
$1\leq i\leq 2n$, thus proving the lemma.
\mathbf{e}nd{proof}
Our next corollary says that in fact $U$ is homothetic to the Minkowski sum $P+(-P)$ (see \cite{Thompson96}, Th. 4.2.3).
\begin{corollary}
Let $P$ be a convex planar polygon and let $U$ be as in Lemma \ref{lemma:SymmetricBall}. Then $U$ is homothetic to $P+(-P)$.
\mathbf{e}nd{corollary}
\begin{proof}
We have that $2a=h(P)+h(-P)=h(P+(-P))=h(2aU)$, which implies that $P+(-P)$ is homothetic to $U$.
\mathbf{e}nd{proof}
\begin{corollary}\label{cor:CWequivalence}
Consider a centered polygon $U$ and a polygon $P$ whose sides are parallel to the corresponding sides of $U$. The following statements are equivalent:
\begin{enumerate}
\item
$P$ has constant $U$-width.
\item
$P+(-P)$ is homothetic to $U$.
\item
The corresponding diagonals of $U$ and $P$ are parallel to each other.
\item
$P_{i}-P_{i+n}=2a(U_i-U_{i+n})$, $1\leq i\leq n$, for some constant $a$.
\mathbf{e}nd{enumerate}
\mathbf{e}nd{corollary}
\section{Geometric properties of polygons of constant Minkowskian width}
Consider a convex polygon $P=\{P_1,...,P_{2n}\}$ with parallel opposite sides and let $U=\{U_1,...,U_{2n}\}$ be the symmetric polygon obtained
from $P$ by the construction of Lemma \ref{lemma:SymmetricBall}.
\subsection{Central Equidistant, $V$-length, and Barbier's theorem}
\paragraph{Central equidistant} Any equidistant can be written as $P_i(c)=P_i+cU_i$, $1\leq i\leq 2n$. If we take $c=-a$, we obtain
\begin{equation}\label{eq:defineCentral}
M_i=P_i+\frac{c}{2a}\left( P_{i}-P_{i+n} \right)=\frac{1}{2}\left(P_i+P_{i+n}\right), \ 1\leq i\leq 2n,
\mathbf{e}nd{equation}
called the {\it central equidistant} of $P$. It is characterized by the condition $M_i=M_{i+n}$ (see Figure \ref{fig:OctoEqui}). If we re-scale the one-parameter family of equidistants as
\begin{equation}\label{eq:REescalaEqui}
P_i(c)=M_i+cU_i,\ 1\leq i\leq 2n,
\mathbf{e}nd{equation}
we get that the $0$-equidistant is exactly the central equidistant.
A vertex $M_i$ of the central equidistant is called a {\it cusp} if $M_{i-1}$ and $M_{i+1}$ are in the same half-plane defined by the diagonal at $P_i$.
The central equidistant coincides with the {\it area evolute} of polygons defined in \cite{Craizer13}. There
it is proved that it has an odd number of cusps, at least three (see Figures \ref{fig:OctoEqui} and \ref{fig:OctoEvolute}).
\begin{figure}[htb]
\centering
\includegraphics[width=0.90\linewidth]{MPFig5.eps}
\caption{ The two traced octagons are ordinary equidistants. The thick quadrangle is the central equidistant. }
\label{fig:OctoEqui}
\mathbf{e}nd{figure}
\paragraph{$V$-Length}
Let $P$ be a polygonal arc whose sides are parallel to the corresponding ones of $U$. More precisely, we shall denote by
$\{P_s,...,P_t\}$ the vertices of $P$ and assume that $P_{i+1}-P_{i}$ is parallel to $V_{i+\frac{1}{2}}$.
We can write
\begin{equation}\label{eq:defineVlength}
P_{i+1}-P_i=\lambda_{i+\frac{1}{2}}V_{i+\frac{1}{2}}
\mathbf{e}nd{equation}
for some $\lambda_{i+\frac{1}{2}}\geq 0$. Then the {\it $V$-length} of the edge $P_iP_{i+1}$ is exactly $\lambda_{i+\frac{1}{2}}$, and we write
\begin{equation}\label{eq:defineVlength2}
L_V(P)=\sum_{i=s}^{t-1}\lambda_{i+\frac{1}{2}}.
\mathbf{e}nd{equation}
\paragraph{Barbier's theorem}
The classical Theorem of Barbier on curves of constant width in the Euclidean plane says that any such curve of diameter $d$ has circumference $d\pi$.
For Minkowski planes, it appears in \cite{Petty},
Th. 6.14(a), and
in \cite{Ma-Mu}. We prove here the version of this theorem for polygons.
Define $\mathbf{a}lpha_{i+\frac{1}{2}}$, $1\leq i\leq 2n$, by the equation
\begin{equation}\label{eq:defineAlpha}
M_{i+1}-M_i=\mathbf{a}lpha_{i+\frac{1}{2}} \left( U_{i+1}-U_i \right)=\mathbf{a}lpha_{i+\frac{1}{2}}[U_i,U_{i+1}] V_{i+\frac{1}{2}}.
\mathbf{e}nd{equation}
\begin{Proposition}
Let $P(c)$ be defined by equation \mathbf{e}qref{eq:REescalaEqui}. Then
the $V$-length of $P(c)$ is
\begin{equation}\label{eq:Barbier}
L_V(P)=2cA(U),
\mathbf{e}nd{equation}
where $A(U)$ denotes the area of the polygon $U$.
\mathbf{e}nd{Proposition}
\begin{proof}
The $V$-length of the polygon $P(c)$ is given by
$$
L_V(P(c))=\sum_{i=1}^{2n} (\mathbf{a}lpha_{i+\frac{1}{2}}+c) [U_i,U_{i+1}].
$$
Since $\mathbf{a}lpha_{i+n+\frac{1}{2}}=-\mathbf{a}lpha_{i+\frac{1}{2}}$, we obtain
$$
L_V(P(c))=c\sum_{i=1}^{2n}[U_i,U_{i+1}],
$$
which proves the proposition.
\mathbf{e}nd{proof}
If we admit signed lengths, equation \mathbf{e}qref{eq:Barbier} holds even for equidistants with cusps. In particular, for $c=0$ we obtain
\begin{equation}\label{eq:MlengthZero}
L_V(M)=0.
\mathbf{e}nd{equation}
For smooth closed curves this result was obtained in \cite{Tabach97} .
\subsection{Curvature and evolutes}
\paragraph{Minkowskian normals and evolutes} In the smooth case, the Minkowskian normal at a point $P$ is the line $P+sU$, where $P$ and $U$ have parallel tangents (see \cite{Tabach97}).
The evolute is the envelope of Minkowskian normals. For a polygon $P$, define the {\it Minkowskian normal} at a vertex $P_i$ as the line $P_i+sU_i$, $1\leq i\leq 2n$, and the {\it evolute} as the polygonal arc whose vertices
are the intersections of $P_i+sU_i$ and $P_{i+1}+sU_{i+1}$. These intersections are given by
\begin{equation}\label{eq:defineEvoluta}
E_{i+\frac{1}{2}}=P_i-\mu_{i+\frac{1}{2}}U_i=P_{i+1}-\mu_{i+\frac{1}{2}}U_{i+1},
\mathbf{e}nd{equation}
where $\mu_{i+\frac{1}{2}}$, $1\leq i\leq 2n$, is defined by
\begin{equation}\label{eq:defineCurvature}
P_{i+1}-P_i=\mu_{i+\frac{1}{2}}\left( U_{i+1}-U_i \right).
\mathbf{e}nd{equation}
\paragraph{Curvature center and radius} In \cite{Petty}, three different notions of Minkowskian curvature are defined, where the circular curvature is directly related to evolutes.
The circular center $E$ and the corresponding radius of curvature $\mu$ are defined by the condition that $E+\mu U$ has a $3$-order contact with the curve at a given point (see \cite{Tabach97}).
For polygons, we define the {\it center of curvature} $E_{i+\frac{1}{2}}$ and the {\it curvature radius} $\mu_{i+\frac{1}{2}}$ of the side $P_iP_{i+1}$ by the condition that
the $(i+\frac{1}{2})$-side of $E_{i+\frac{1}{2}}+\mu_{i+\frac{1}{2}}U$ matches exactly $P_iP_{i+1}$ (see Figure \ref{fig:Curvature}). Thus we get equations \mathbf{e}qref{eq:defineEvoluta} and \mathbf{e}qref{eq:defineCurvature}.
From equations \mathbf{e}qref{eq:defineVlength} and \mathbf{e}qref{eq:defineCurvature} we obtain that
the curvature radius of the side $P_iP_{i+1}$ is also given by
\begin{equation}\label{eq:Curvature2}
\mu_{i+\frac{1}{2}}=\frac{\lambda_{i+\frac{1}{2}}}{[U_i,U_{i+1}]}.
\mathbf{e}nd{equation}
\begin{figure}[htb]
\centering
\includegraphics[width=0.50\linewidth]{MPFig6.eps}
\caption{ The center of curvature of the side $P_3P_4$. }
\label{fig:Curvature}
\mathbf{e}nd{figure}
A vertex $E_{i+\frac{1}{2}}$ is a cusp of the evolute if the vertices $E_{i-\frac{1}{2}}$ and $E_{i+\frac{3}{2}}$ are in the same half-plane defined by the parallel to $P_iP_{i+1}$ through $E_{i+\frac{1}{2}}$.
The evolute of a CW polygon coincides with its \mathbf{e}mph{center symmetry set} as defined in \cite{Craizer13}, where
it is proved that it coincides with the union of cusps of all equidistants of $P$. It is also proved in \cite{Craizer13} that the number of cusps of the evolute is odd and at least the number of cusps of the central equidistant (see Figure \ref{fig:OctoEvolute}).
\begin{figure}[htb]
\centering
\includegraphics[width=0.80\linewidth]{MPFig7.eps}
\caption{ The inner polygonal arc is the central equidistant $M$ of $P$, and the outer polygonal arc is its evolute $E$. }
\label{fig:OctoEvolute}
\mathbf{e}nd{figure}
\paragraph{Sum of curvature radii}
Consider equation \mathbf{e}qref{eq:defineCurvature} for two opposite sides, and sum up to obtain, for $1\leq i\leq n$,
\begin{equation*}
P_{i+1}-P_{i+n+1}+P_{i+n}-P_i=(\mu_{i+\frac{1}{2}}+\mu_{i+n+\frac{1}{2}}) (U_{i+1}-U_i).
\mathbf{e}nd{equation*}
Since $P$ has constant Minkowskian width,
\begin{equation*}
2c(U_{i+1}-U_i)=(\mu_{i+\frac{1}{2}}+\mu_{i+n+\frac{1}{2}}) (U_{i+1}-U_i).
\mathbf{e}nd{equation*}
We conclude that
\begin{equation}\label{eq:SumReciprocalCurvature}
\mu_{i+\frac{1}{2}}+\mu_{i+n+\frac{1}{2}}=2c.
\mathbf{e}nd{equation}
The corresponding result for smooth curves is given in \cite{Petty}, Th. 6.14.(c).
\paragraph{Involutes and equidistants }
Consider the one-parameter family of equidistants given by equation \mathbf{e}qref{eq:REescalaEqui}.
The radius of curvature of $P_i(c)P_{i+1}(c)$ is the radius of curvature of $M_iM_{i+1}$ plus $c$. Thus, for $1\leq i\leq 2n$,
\begin{equation}
E_{i+\frac{1}{2}}(c)=M_i+cU_i-\left(\mu_{i+\frac{1}{2}}+c\right)U_i=E_{i+\frac{1}{2}}.
\mathbf{e}nd{equation}
We conclude that the evolute of any equidistant of $P$ is equal to the evolute of $P$. Reciprocally, any polygonal arc
whose evolute is equal to $E(P)$ is an equidistant of $P$. We define an {\it involute} of $E$ as any polygonal arc whose evolute is $E$.
Thus the involutes of $E$ are the equidistants of $P$.
\subsection{The signed area of the central equidistant} \label{sec:SignedAreas}
Given two closed curves $P$ and $Q$, the mixed area of their convex hulls is defined by the equation
$$
A(P+tQ)=A(P)+2tA(P,Q)+t^2A(Q).
$$
The Minkowski inequality says that $A(P,Q)^2\geq A(P)A(Q)$. The next lemma is well-known, see \cite[\S\S~6.3]{Gruber}.
\begin{lemma}\label{lemma:MixedArea}
Take $P$ and $Q$ as convex polygons with $k$ parallel corresponding sides. The {\it mixed area} of $P$ and $Q$ is given by
\begin{equation*}
A(P,Q)=\frac{1}{2}\sum_{i=1}^{k}[Q_{i},P_{i+1}-P_i]=\frac{1}{2}\sum_{i=1}^{k}[P_{i+1}, Q_{i+1}-Q_i].
\mathbf{e}nd{equation*}
\mathbf{e}nd{lemma}
Assume that $P$ is a closed convex polygon whose sides are parallel to the sides of the centered polygon $U$, and take $Q=U$ in Lemma \ref{lemma:MixedArea}. We obtain
$$
A(P,U)=\frac{1}{2}\sum_{i=1}^{2n} [U_{i},P_{i+1}-P_i]=\frac{1}{2}\sum_{i=1}^{2n}\lambda_{i+\frac{1}{2}}=\frac{1}{2}L_V(P),
$$
where we have used \mathbf{e}qref{eq:defineVlength} and \mathbf{e}qref{eq:defineVlength2}. Moreover, the Minkowski inequality becomes
\begin{equation}\label{eq:Isoperimetric}
L^2_V(P)\geq 4A(U)A(P).
\mathbf{e}nd{equation}
\begin{lemma}
Let $M$ be the central equidistant of a CW-polygon $P$. Then the mixed area $A(M,M)$ is non-positive.
\mathbf{e}nd{lemma}
\begin{proof}
Let $P(c)$ be defined by equation \mathbf{e}qref{eq:REescalaEqui}. Then
$$
A(P(c),P(c))=A(M,M)+2cA(M,U)+c^2A(U,U).
$$
Now equation \mathbf{e}qref{eq:MlengthZero} says that $A(M,U)=0$. Moreover, the isoperimetric inequality \mathbf{e}qref{eq:Isoperimetric} for curves of constant width says that
$$
A(P)\leq c^2A(U).
$$
We conclude that
$$
A(M,M)\leq 0.
$$
\mathbf{e}nd{proof}
Define the {\it signed area} of $M$ as $SA(M)=-A(M,M)$. In general, the signed area
is a sum of positive and negative areas, but when $M$ is a simple curve, it coincides with the area bounded by $M$.
\subsection{Relation between length and area of a half polygon}
Define $\beta_i$ by
\begin{equation}\label{eq:defineBeta}
\beta_i=\frac{1}{2}\sum_{j=i}^{n+i-1}\mathbf{a}lpha_{j+\frac{1}{2}}[U_j,U_{j+1}].
\mathbf{e}nd{equation}
Observe that $\beta_{i+n}=-\beta_i$, $1\le i\leq n$, and
\begin{equation}\label{eq:deriveBeta}
\beta_{i+1}-\beta_i =-\mathbf{a}lpha_{i+\frac{1}{2}}[U_i,U_{i+1}].
\mathbf{e}nd{equation}
Denote by $A_{1}(i,c)$ and $A_2(i,c)$ the areas of the polygons with vertices \linebreak
$\{P_i,P_{i+1},...,P_{i+n}\}$ and $\{P_{i+n},P_{i+n+1},...,P_{i}\}$. Observe that
these polygons are bounded by $P$ and the diagonal $P_iP_{i+n}$.
\begin{Proposition}\label{prop:AreaProperty}
We have that
$$
A_1(i,c)-A_2(i,c)=4c\beta_i,
$$
for $1\leq i\leq 2n$.
\mathbf{e}nd{Proposition}
\begin{proof}
Lemma 4.1. of \cite{Craizer13} says that
$$
A_1(i,c)-A_2(i,c)=-2\sum_{j=i}^{i+n-1} [M_{j+1}-M_j, cU_j]
$$
$$
=-2c\sum_{j=i}^{i+n-1}[\mathbf{a}lpha_{j+\frac{1}{2}}[U_j,U_{j+1}]V_{j+\frac{1}{2}},U_j].
$$
Thus
$$
A_1(i,c)-A_2(i,c)=2c\sum_{j=i}^{i+n-1}\mathbf{a}lpha_{j+\frac{1}{2}}[U_j,U_{j+1}]=4c\beta_i.
$$
\mathbf{e}nd{proof}
Denote by $L_V(i,c)$ the $V$-length of the polygonal arc whose vertices are $\{P_i(c),P_{i+1}(c),...,P_{i+n}(c)\}$. Then
\begin{equation}\label{eq:defineLi}
L_V(i,c)=\sum_{j=i}^{i+n-1}(\mathbf{a}lpha_{i+\frac{1}{2}}+c)[U_j,U_{j+1}]=2cA(U)+2\beta_i.
\mathbf{e}nd{equation}
\begin{corollary} For $1\leq i\leq 2n$, the expression
$A_1(i,c)-cL_V(i,c)$ is independent of $i$.
\mathbf{e}nd{corollary}
\begin{proof}
By equation \mathbf{e}qref{eq:defineLi} and Proposition \ref{prop:AreaProperty}, we get
$$
2cL_V(i,c)-2A_1(i,c)=4c^2A(U)+4c\beta_i-2A_1(i,c)=4c^2A(U)-A(P),
$$
which proves the corollary.
\mathbf{e}nd{proof}
The above corollary presents the ``polygonal analogue'' of a known theorem holding for strictly convex curves (see \cite{Chakerian83}, eq. (2.1)).
\section{The involute of the central equidistant}
Recall that $P=\{P_1,...,P_{2n}\}$ is a convex polygon with parallel opposite sides and $U=\{U_1,...,U_{2n}\}$ is the Minkowski ball obtained
from $P$ by the construction of Lemma \ref{lemma:SymmetricBall}. The polygon $V=\{V_1,...,V_{2n}\}$ represents the dual Minkowski ball (see Lemma \ref{lemma:DualBall})
and $M=\{M_1,...,M_n\}$ is the central equidistant of $P$ (see equation \mathbf{e}qref{eq:defineCentral}).
\subsection{Basic properties of the involute $N$ of $M$}
Define the polygon $N$ by
\begin{equation}\label{eq:defineInvoluta}
N_{i+\frac{1}{2}}=M_i+\beta_i V_{i+\frac{1}{2}},
\mathbf{e}nd{equation}
$1\leq i\leq 2n$.
Observe that $N_{i+\frac{1}{2}}=N_{i+n+\frac{1}{2}}$. Due to equations \mathbf{e}qref{eq:defineAlpha} and \mathbf{e}qref{eq:deriveBeta}, we can also write
\begin{equation}\label{eq:defineInvoluta2}
N_{i+\frac{1}{2}}=M_{i+1}+\beta_{i+1} V_{i+\frac{1}{2}}.
\mathbf{e}nd{equation}
\begin{lemma}
The polygon $N$ has constant $V$-width, and the evolute of $N$ is $M$.
\mathbf{e}nd{lemma}
\begin{proof}
Since
\begin{equation}
N_{i+\frac{1}{2}}-N_{i-\frac{1}{2}}=\beta_i \left( V_{i+\frac{1}{2}}- V_{i-\frac{1}{2}} \right),
\mathbf{e}nd{equation}
$1\leq i\leq n$, the sides of $N$ are parallel to the sides of $V$. Moreover, the diagonals of $N$ are zero, so they are multiples of the diagonals of $V$. We conclude from
Corollary \ref{cor:CWequivalence} that $N$ has constant $V$-width. Finally, from equation \mathbf{e}qref{eq:defineInvoluta} we conclude that the evolute of $N$ is $M$.
\mathbf{e}nd{proof}
The equidistants of $N$, which are the involutes of $M$, are curves of constant $V$-width
(see Figure \ref{fig:OctoInvolute}). In \cite{Craizer13}, these polygons were called the Parallel Diagonal Transforms of $P$.
\begin{figure}[htb]
\centering
\includegraphics[width=0.80\linewidth]{MPFig8.eps}
\caption{ The central equidistant $M$ together with two involutes of $M$: The inner curve is the central equidistant $N$, and the traced curve is an ordinary involute. }
\label{fig:OctoInvolute}
\mathbf{e}nd{figure}
\subsection{ The signed area of the involute of the central equidistant}
For smooth convex curves of constant Minkowskian width, the signed area of $N$ is not larger than the signed area of $M$ (see \cite{Craizer14}). We prove here the corresponding result for polygons.
\begin{Proposition}\label{prop:signedMN}
Denoting by $SA(M)$ and $SA(N)$ the signed areas of $M$ and $N$, we have
$$
SA(M)-SA(N)=\sum_{i=1}^{n}\beta_i^2 \left[ V_{i-\frac{1}{2}},V_{i+\frac{1}{2}} \right].
$$
\mathbf{e}nd{Proposition}
\begin{proof}
Observe that
$$
\left[ M_i,M_{i+1} \right]= \left[ N_{i+\frac{1}{2}}-\beta_i V_{i+\frac{1}{2}}, \mathbf{a}lpha_{i+\frac{1}{2}} (U_{i+1}-U_i )\right]= \mathbf{a}lpha_{i+\frac{1}{2}}[N_{i+\frac{1}{2}},U_{i+1}-U_i]=
$$
$$
-(\beta_{i+1}-\beta_i)[N_{i+\frac{1}{2}},V_{i+\frac{1}{2}} ],\ \ \left[ N_{i-\frac{1}{2}},N_{i+\frac{1}{2}}\right] =\beta_i \left[ N_{i+\frac{1}{2}}, V_{i+\frac{1}{2}}-V_{i-\frac{1}{2}} \right],
$$
and so
$$
-\left[ M_i,M_{i+1} \right]+\left[ N_{i-\frac{1}{2}},N_{i+\frac{1}{2}} \right] =[N_{i+\frac{1}{2}}, \beta_{i+1}V_{i+\frac{1}{2}}- \beta_{i}V_{i-\frac{1}{2}} ] .
$$
Thus
$$
SA(M)-SA(N)= \sum_{i=1}^{n} - \left[ M_i,M_{i+1} \right]+\left[ N_{i-\frac{1}{2}} ,N_{i+\frac{1}{2}} \right] =
$$
$$
=-\sum_{i=1}^{n} \left[ N_{i+\frac{1}{2}}-N_{i-\frac{1}{2}}, \beta_iV_{i-\frac{1}{2}} \right]=\sum_{i=1}^{n}\beta_i^2 \left[ V_{i-\frac{1}{2}},V_{i+\frac{1}{2}} \right],
$$
where we have used that the difference
$$
[N_{i+\frac{1}{2}},\beta_{i+1}V_{i+\frac{1}{2}}]-[N_{i-\frac{1}{2}},\beta_{i}V_{i-\frac{1}{2}}]
$$
is equal to
$$
[N_{i+\frac{1}{2}}-N_{i-\frac{1}{2}},\beta_iV_{i-\frac{1}{2}}]+[N_{i+\frac{1}{2}},\beta_{i+1}V_{i+\frac{1}{2}}-\beta_{i}V_{i-\frac{1}{2}}],
$$
the discrete version of "integration by parts".
\mathbf{e}nd{proof}
\subsection{The involute is contained in the interior of the central equidistant}
We prove now that the region bounded by the central equidistant $M$ contains its involute $N$. For smooth convex curves, this result was proved in \cite{Craizer14}.
The exterior of the curve $M$ is defined as the set of points of the plane that can be reached from a point of $P$ by a path that does not cross $M$.
The region $\overline{M}$ bounded by $M$ is the complement of its exterior. It is well known that a point in the exterior of $M$ is the center of exactly one chord of $P$ (see \cite{Craizer13}).
\begin{Proposition}\label{prop:NsubsetM}
The involute $N$ is contained in the region $\overline{M}$ bounded by $M$.
\mathbf{e}nd{Proposition}
The proof is based on two lemmas. For a fixed index $i$, denote by $l(i)$ the line parallel to $P_{i+n}-P_i$ through $N_{i-\frac{1}{2}}$ and $N_{i+\frac{1}{2}}$. Then $l(i)$ divides the interior of $P$ into two regions of areas $B_1=B_1(i)$ and $B_2=B_2(i)$, where the second one contains $P_i$ and $P_{i+n}$.
\begin{lemma}
We have that $B_1(i)\geq B_2(i)$, $1\leq i\leq n$.
\mathbf{e}nd{lemma}
\begin{proof}
We have that
$$
B_1(i)=A_1(i)-(2c\beta_i-\delta_i-\mathbf{e}ta_i),\ \ B_2(i)=A_2(i)+(2c\beta_i-\delta_i-\mathbf{e}ta_i),
$$
where $\delta_i$ is the area of the regions outside $P$ and between $l(i)$, $P_iP_{i+n}$ and the support lines of $P_iP_{i+1}$ and $P_{i+n-1}P_{i+n}$, and $\mathbf{e}ta_i$ is the area of the triangle
$M_iN_{i+\frac{1}{2}}N_{i-\frac{1}{2}}$ (see Figure \ref{fig:NAreas1}). Since,
by Proposition \ref{prop:AreaProperty}, $4c\beta_i=A_1-A_2$, we conclude that
$$
B_1(i)=\frac{A(P)}{2}+\delta_i+\mathbf{e}ta_i,\ \ B_2(i)=\frac{A(P)}{2}-\delta_i-\mathbf{e}ta_i,
$$
which proves the lemma.
\mathbf{e}nd{proof}
\begin{figure}[htb]
\centering
\includegraphics[width=0.70\linewidth]{MPFig9.eps}
\caption{ The line through $N_{i+\frac{1}{2}}$ and $N_{i-\frac{1}{2}}$ divides the polygon into two regions of areas $B_1$ and $B_2$. }
\label{fig:NAreas1}
\mathbf{e}nd{figure}
\begin{lemma}\label{lemma:interiorM}
Choose $C$ in the segment $N_{i-\frac{1}{2}}N_{i+\frac{1}{2}}$. Then $C$ is in the region bounded by $M$.
\mathbf{e}nd{lemma}
\begin{proof}
By an affine transformation of the plane, we may assume that $l(i)$ and $M_iC$ are orthogonal.
Consider polar coordinates $(r,\phi)$ with center $C$ and describe $P$ by $r(\phi)$. Assume that $\phi=0$ at the line $l(i)$ and that $\phi=-\phi_0$ at $P_i$.
Denote the area of the sector bounded by $P$ and the rays $\phi_1,\phi_2$ by
$$
A(\phi_1,\phi_2)=\frac{1}{2}\int_{\phi_1}^{\phi_2}r^2(\phi)d\phi.
$$
Consider a line parallel to $M_iC$ and passing through the point $Q_0$ of $P$ corresponding to $\phi=0$, and denote by $Q_1$ and $Q_2$ its intersection with the rays $\phi=-\phi_0$
and $\phi=\phi_0$, respectively (see Figure \ref{fig:NAreas2}). By convexity, we have that
$$
A(0,\phi_0)\leq A(CQ_0Q_1)=A(CQ_0Q_2)\leq A(-\phi_0,0).
$$
A similar reasoning shows that
$ A(\pi-\phi_0,\pi)\leq A(\pi,\pi+\phi_0)$. Observe also that, by convexity, $r(\phi_0)\leq r(\phi_0+\pi)$ and $r(\pi-\phi_0)\leq r(-\phi_0)$.
Now, if $r(\phi+\pi)>r(\phi)$ for any $\phi_0<\phi<\pi-\phi_0$, we would have $B_1(C)<B_2(C)$, contradicting the previous lemma. We conclude that
$r(\phi+\pi)=r(\phi)$ for at least two values of $\phi_0<\phi<\pi-\phi_0$. Since equality holds also for some $\pi-\phi_0<\phi<\pi+\phi_0$,
there are at least three chords of $\gamma$ having $C$ as midpoint. Thus $C$ is contained in the region bounded by $M$.
\mathbf{e}nd{proof}
\begin{figure}[htb]
\centering
\includegraphics[width=0.60\linewidth]{MPFig10.eps}
\caption{ The line parallel to $M_iC$ through $Q_0$ determines the points $Q_1$ and $Q_2$. }
\label{fig:NAreas2}
\mathbf{e}nd{figure}
We can now complete the proof of Proposition \ref{prop:NsubsetM}. In fact, from Lemma \ref{lemma:interiorM} we have that each side $N_{i-\frac{1}{2}}N_{i+\frac{1}{2}}$ is contained in the region
$\overline {M}$
bounded by $M$. Therefore, no point on the boundary of $N$ can be connected with the boundary of $P$ by a curve that does not intersect $M$. This implies that the region $\overline{N}$ bounded by $N$
is contained in $\overline{M}$.
\section{Iterating involutes}
Starting with the central equidistant $M=M(0)$ and its involute $N=N(1)$, we can iterate the involute operation. We obtain two sequences of $n$-gons $M(k)$ and $N(k)$ defined by
$M(k)={\mathcal Inv}(N(k))$ and $N(k+1)={\mathcal Inv}(M(k))$. For smooth curves of constant Minkowskian width, it is proved in \cite{Craizer14} that these sequences converge to a constant. We
prove here the corresponding result for polygons.
From Proposition \ref{prop:NsubsetM}, we have
$$
\overline {M(0)}\supset\overline {N(1)}\supset\overline {M(1)}\supset ...,
$$
and we denote by $O=O(P)$ the intersection of all these sets.
If we represent a polygon by its vertices, we can embed the space ${\mathcal P}_{n}$ of all $n$-gons in $(\mathbb{R}^2)^{n}$. In ${\mathcal P}_{n}$ we consider the topology induced
by $\mathbb{R}^{2n}$.
\begin{thm}\label{thm:ConvMiNi}
The set $O=O(P)$ consists of a unique point, and the polygons $M(k)$ and $N(k)$ are converging to $O$ in ${\mathcal P}_{n}$.
\mathbf{e}nd{thm}
We shall call $O=O(P)$ the {\it central point} of $P$. A natural question that arises is the following.
\paragraph{Question} Is there a direct method to obtain the central point $O$ from the polygon $P$?
For fixed $c$ and $d$ construct the sequences of convex polygons $P(k,c)$ and $Q(k,d)$ whose vertices are
$$
P_i(k)=M_i(k)+cU_i(k), \ \ Q_{i+\frac{1}{2}}(k)=N_{i+\frac{1}{2}}(k)+dV_{i+\frac{1}{2}}(k)\,,
$$
respectively.
The polygons $P(k,c)$ are of constant $U$-width, while the polygons $Q_{i+\frac{1}{2}}(k,d)$ are of constant $V$-width.
We can re-state Theorem \ref{thm:ConvMiNi} as follows:
\begin{thm}
The sequences of polygons $P(k,c)$ and $Q(k,d)$ are converging in ${\mathcal P}_{2n}$ to $O+c\partial\mathcal{U}$ and $O+d\partial\mathcal{V}$, respectively.
\mathbf{e}nd{thm}
\begin{figure}[htb]
\centering
\includegraphics[width=0.90\linewidth]{MPFig11.eps}
\caption{ The inner curves are $M=M(0)$, $N=N(1)$ and $M(1)$. One traced curve is an ordinary $V$-equidistant of $N$, and the other one is
an ordinary $U$-equidistant of $M(1)$. }
\label{fig:OctoIterates}
\mathbf{e}nd{figure}
\mathbf{n}oindent
We shall prove now Theorem \ref{thm:ConvMiNi}.
\begin{proof}
Denote the signed areas of $M(k)$ and $N(k)$ by $SA(M(k))$ and $SA(N(k))$, respectively.
By Section \ref{sec:SignedAreas}, $SA(M(k))\geq 0$, $SA(N(k))\geq 0$, and Proposition \ref{prop:signedMN} implies that
$$
SA(M(k))-SA(N(k+1))=\sum_{i=1}^{n} \beta_{i}^2(k)[U_i,U_{i+1}],
$$
$$
SA(N(k))-SA(M(k))=\sum_{i=1}^{n} \mathbf{a}lpha_{i+\frac{1}{2}}^2(k)[V_{i-\frac{1}{2}},V_{i+\frac{1}{2}}],
$$
where $\mathbf{a}lpha_{i+\frac{1}{2}}(k)$ and $\beta_i(k)$ are defined by
\begin{equation*}
\begin{array}{l}
M_{i+1}(k)-M_i(k)=\mathbf{a}lpha_{i+\frac{1}{2}}(k)( U_{i+1}-U_i),\ \\
N_{i+\frac{1}{2}}(k)-N_{i-\frac{1}{2}}(k)=\beta_{i}(k) (V_{i+\frac{1}{2}}-V_{i-\frac{1}{2}}).
\mathbf{e}nd{array}
\mathbf{e}nd{equation*}
We conclude that
\begin{equation}\label{eq:sumsquares}
\sum_{k=1}^{\infty} \sum_{i=1}^{n} \beta_{i}^2(k)[U_i,U_{i+1}] +\sum_{k=0}^{\infty} \sum_{i=1}^{n} \mathbf{a}lpha_{i+\frac{1}{2}}^2(k)[V_{i-\frac{1}{2}},V_{i+\frac{1}{2}}] \leq SA(M(0)).
\mathbf{e}nd{equation}
From the above equation, we obtain that the sequences $\mathbf{a}lpha_{i+\frac{1}{2}}(k)$ and $\beta_i(k)$ are converging to $0$ in $\mathbb{R}^{n}$.
So the diameters of $M(k)$ and $N(k)$ are converging to zero, and thus $O$ is in fact a set consisting of a unique point.
\mathbf{e}nd{proof}
\begin{thebibliography}{n}
\bibitem{Ait-Haddou00} Ait-Haddou, R., Biard, L., Slawinski, M.A.: {\it Minkowski isoperimetric-hodograph curves}. Computer Aided Geometric Design \mathbf{t}extbf{17}, 835-861 (2000).
\bibitem{Ap-Mn} Apostol, T. M., Mnatsakanian, M. A.: \mathbf{e}mph{Tanvolutes: generalized involutes}. Amer. Math. Monthly \mathbf{t}extbf{117}, 701-713 (2010).
\bibitem{Chakerian66} Chakerian, G.D.: {\it Sets of constant width}. Pacific J. Math. \mathbf{t}extbf{19}(1), 13-21, (1966).
\bibitem{Chakerian83} Chakerian, G.D. and Groemer, H.: {\it Convex bodies of constant width}. Convexity and its Applications, Eds. P. M. Gruber and J. M. Wills, Birkh\"auser, Basel, pp. 49-96 (1983).
\bibitem{Craizer13}
Craizer, M., Teixeira, R.C., da Silva, M.A.H.B.: {\it Polygons with parallel opposite sides}, Discrete and Computational Geometry \mathbf{t}extbf{50}(2), 474-490 (2013).
\bibitem{Craizer14} Craizer, M.: {\it Iteration of involutes of constant width curves in the Minkowski plane}. Beitr. Algebra Geom. \mathbf{t}extbf{55}, 479-496 (2014).
\bibitem{Giblin08} Giblin, P.J.: {\it Affinely invariant symmetry sets}. Geometry and Topology of Caustics (Caustics 06), Banach Center Publications \mathbf{t}extbf{82}, 71-84 (2008).
\bibitem{Gray} Gray, A.: \mathbf{e}mph{Modern Differential Geometry of Curves and Surfaces}. Studies in Advanced Mathematics, CRC Press, Boca Raton, 1993.
\bibitem{GAS} Gray, A., Abbena, E., Salamon, S.: \mathbf{e}mph{Modern Differential Geometry of Curves and Surfaces with Mathematica}. Studies in Advanced Mathematics, Chapman \& Hall/CRC,
Boca Raton, 2006.
\bibitem{Gruber} Gruber, P. M.: \mathbf{e}mph{Convex and Discrete Geometry}. Springer, Berlin and Heidelberg, 2007.
\bibitem{He-Ma} Heil, E., Martini. H.: \mathbf{e}mph{Special convex bodies}. In: Handbook for Convex Geometry, Eds. P. M. Gruber and J. M. Wills, North-Holland, Amsterdam, pp. 347-385 (1993).
\bibitem{Ma-Mu} Martini, H., Mustafaev, Z.: {\it On Reuleaux triangles in Minkowski planes.} Beitr. Algebra Geom. \mathbf{t}extbf{48}, 225-235 (2007).
\bibitem{Ma-Sw} Martini, H., Swanepoel, K.J.: {\it The geometry of Minkowski spaces - a survey. Part II.} Expositiones Math. \mathbf{t}extbf{22}, 93-144 (2004).
\bibitem{Ma-Sw-We} Martini, H., Swanepoel, K.J., Weiss, G.: {\it The geometry of Minkowski spaces - a survey. Part I.} Expositiones Math. \mathbf{t}extbf{19}, 97-142 (2001).
\bibitem{Ma-Wu} Martini, H., Wu, Senlin: {\it Classical curve theory in normed planes.} Computer Aided Geometric Design \mathbf{t}extbf{31}, 373-397 (2014).
\bibitem{Petty} Petty, C. M.: {\it On the geometry of the Minkowski plane.} Riv. Mat. Univ. Parma \mathbf{t}extbf{6}, 269-292 (1955).
\bibitem{So} Solov'ev, P. A.: \mathbf{e}mph{Maximum length of the closed involute of a class of curves} (Russian). Ukrain. Geom. Sb. \mathbf{t}extbf{3}, 112-122 (1966).
\bibitem{Tabach97} Tabachnikov, S.: {\it Parameterized plane curves, Minkowski caustics, Minkowski vertices and conservative line fields}. L'Enseign. Math. \mathbf{t}extbf{43}, 3-26 (1997).
\bibitem{Tanno} Tanno, S.: \mathbf{e}mph{$C^\infty$-approximation of continuous ovals of constant width}. J. Math. Soc. Japan, \mathbf{t}extbf{28} 384-395 (1976).
\bibitem{Thompson96} Thompson, A.C.: {\it Minkowski Geometry}. Encyclopedia of Mathematics and its Applications, \mathbf{t}extbf{63}, Cambridge University Press, (1996).
\bibitem{Yost} Yost, D.: \mathbf{e}mph{Irreducible convex sets}. Mathematika \mathbf{t}extbf{38}, 134-155 (1991).
\mathbf{e}nd{thebibliography}
\mathbf{e}nd{document} |
\begin{document}
\title{NSGA-PINN: A Multi-Objective Optimization Method for Physics-Informed Neural Network Training}
\begin{abstract}
This paper presents NSGA-PINN, a multi-objective optimization framework for effective training of Physics-Informed Neural Networks (PINNs). The proposed framework uses the Non-dominated Sorting Genetic Algorithm (NSGA-II) to enable traditional stochastic gradient optimization algorithms (e.g., ADAM) to escape local minima effectively. Additionally, the NSGA-II algorithm enables satisfying the initial and boundary conditions encoded into the loss function during physics-informed training precisely. We demonstrate the effectiveness of our framework by applying NSGA-PINN to several ordinary and partial differential equation problems. In particular, we show that the proposed framework can handle challenging inverse problems with noisy data.
\end{abstract}
\keywords{Machine learning \and Data-driven scientific computing \and Multi-Objective Optimization}
\section{Introduction}
\label{sec:introduction}
\input Introduction
\section{Background}
\label{sec:background-description}
\input background
\section{The NSGA-PINN Framework}
\label{sec:proposed-method}
\input method
\section{Numerical Experiments}
\label{sec:numerical-experiments}
\input experiments
\section{Discussion} \label{sec:discussion}
\input discussion
\section{Conclusion} \label{sec:conclusion}
\input conclusion
\begin{thebibliography}{10}
\bibitem{raissi2019physics}
Maziar Raissi, Paris Perdikaris, and George~E Karniadakis.
\newblock Physics-informed neural networks: A deep learning framework for
solving forward and inverse problems involving nonlinear partial differential
equations.
\newblock {\em Journal of Computational physics}, 378:686--707, 2019.
\bibitem{chen2021physics}
Zhao Chen, Yang Liu, and Hao Sun.
\newblock Physics-informed learning of governing equations from scarce data.
\newblock {\em Nature communications}, 12(1):6136, 2021.
\bibitem{raissi2020hidden}
Maziar Raissi, Alireza Yazdani, and George~Em Karniadakis.
\newblock Hidden fluid mechanics: Learning velocity and pressure fields from
flow visualizations.
\newblock {\em Science}, 367(6481):1026--1030, 2020.
\bibitem{karniadakis2021physics}
George~Em Karniadakis, Ioannis~G Kevrekidis, Lu~Lu, Paris Perdikaris, Sifan
Wang, and Liu Yang.
\newblock Physics-informed machine learning.
\newblock {\em Nature Reviews Physics}, 3(6):422--440, 2021.
\bibitem{mao2020physics}
Zhiping Mao, Ameya~D Jagtap, and George~Em Karniadakis.
\newblock Physics-informed neural networks for high-speed flows.
\newblock {\em Computer Methods in Applied Mechanics and Engineering},
360:112789, 2020.
\bibitem{fernandez2018towards}
Xos{\'e} Fern{\'a}ndez-Fuentes, David Mera, Andr{\'e}s G{\'o}mez, and Ignacio
Vidal-Franco.
\newblock Towards a fast and accurate eit inverse problem solver: A machine
learning approach.
\newblock {\em Electronics}, 7(12):422, 2018.
\bibitem{ruder2016overview}
Sebastian Ruder.
\newblock An overview of gradient descent optimization algorithms.
\newblock {\em arXiv preprint arXiv:1609.04747}, 2016.
\bibitem{cheridito2021non}
Patrick Cheridito, Arnulf Jentzen, and Florian Rossmannek.
\newblock Non-convergence of stochastic gradient descent in the training of
deep neural networks.
\newblock {\em Journal of Complexity}, 64:101540, 2021.
\bibitem{bottou1991stochastic}
L{\'e}on Bottou et~al.
\newblock Stochastic gradient learning in neural networks.
\newblock {\em Proceedings of Neuro-N{\i}mes}, 91(8):12, 1991.
\bibitem{jain2017non}
Prateek Jain, Purushottam Kar, et~al.
\newblock Non-convex optimization for machine learning.
\newblock {\em Foundations and Trends{\textregistered} in Machine Learning},
10(3-4):142--363, 2017.
\bibitem{szu1986non}
Harold~H Szu.
\newblock Non-convex optimization.
\newblock In {\em Real-Time Signal Processing IX}, volume 698, pages 59--67.
SPIE, 1986.
\bibitem{krishnapriyan2021characterizing}
Aditi Krishnapriyan, Amir Gholami, Shandian Zhe, Robert Kirby, and Michael~W
Mahoney.
\newblock Characterizing possible failure modes in physics-informed neural
networks.
\newblock {\em Advances in Neural Information Processing Systems},
34:26548--26560, 2021.
\bibitem{konak2006multi}
Abdullah Konak, David~W Coit, and Alice~E Smith.
\newblock Multi-objective optimization using genetic algorithms: A tutorial.
\newblock {\em Reliability engineering \& system safety}, 91(9):992--1007,
2006.
\bibitem{gunantara2018review}
Nyoman Gunantara.
\newblock A review of multi-objective optimization: Methods and its
applications.
\newblock {\em Cogent Engineering}, 5(1):1502242, 2018.
\bibitem{deb2016multi}
Kalyanmoy Deb, Karthik Sindhya, and Jussi Hakanen.
\newblock Multi-objective optimization.
\newblock In {\em Decision Sciences}, pages 161--200. CRC Press, 2016.
\bibitem{lu2021physics}
Lu~Lu, Raphael Pestourie, Wenjie Yao, Zhicheng Wang, Francesc Verdugo, and
Steven~G Johnson.
\newblock Physics-informed neural networks with hard constraints for inverse
design.
\newblock {\em SIAM Journal on Scientific Computing}, 43(6):B1105--B1132, 2021.
\bibitem{996017}
K.~Deb, A.~Pratap, S.~Agarwal, and T.~Meyarivan.
\newblock A fast and elitist multiobjective genetic algorithm: Nsga-ii.
\newblock {\em IEEE Transactions on Evolutionary Computation}, 6(2):182--197,
2002.
\bibitem{de1993structured}
Bart De~Moor.
\newblock Structured total least squares and l2 approximation problems.
\newblock {\em Linear algebra and its applications}, 188:163--205, 1993.
\bibitem{hure2019some}
C{\^o}me Hur{\'e}, Huy{\^e}n Pham, and Xavier Warin.
\newblock Some machine learning schemes for high-dimensional nonlinear pdes.
\newblock 2019.
\bibitem{germain2022approximation}
Maximilien Germain, Huyen Pham, and Xavier Warin.
\newblock Approximation error analysis of some deep backward schemes for
nonlinear pdes.
\newblock {\em SIAM Journal on Scientific Computing}, 44(1):A28--A56, 2022.
\bibitem{zhang2016dnn}
Junbo Zhang, Yu~Zheng, Dekang Qi, Ruiyuan Li, and Xiuwen Yi.
\newblock Dnn-based prediction model for spatio-temporal data.
\newblock In {\em Proceedings of the 24th ACM SIGSPATIAL international
conference on advances in geographic information systems}, pages 1--4, 2016.
\bibitem{jagtap2020adaptive}
Ameya~D Jagtap, Kenji Kawaguchi, and George~Em Karniadakis.
\newblock Adaptive activation functions accelerate convergence in deep and
physics-informed neural networks.
\newblock {\em Journal of Computational Physics}, 404:109136, 2020.
\bibitem{baydin2018automatic}
Atilim~Gunes Baydin, Barak~A Pearlmutter, Alexey~Andreyevich Radul, and
Jeffrey~Mark Siskind.
\newblock Automatic differentiation in machine learning: a survey.
\newblock {\em Journal of Marchine Learning Research}, 18:1--43, 2018.
\bibitem{yu2010introduction}
Xinjie Yu and Mitsuo Gen.
\newblock {\em Introduction to evolutionary algorithms}.
\newblock Springer Science \& Business Media, 2010.
\bibitem{van1998multiobjective}
David~A Van~Veldhuizen and Gary~B Lamont.
\newblock Multiobjective evolutionary algorithm research: A history and
analysis.
\newblock Technical report, Citeseer, 1998.
\end{thebibliography}
\end{document} |
\begin{document}
\title{A generic characterization of \pol{\ensuremath{\mathcal{C}}\xspace}}
\author[1]{Thomas Place}
\author[2]{Marc Zeitoun}
\affil[1]{Bordeaux University, Labri}
\affil[2]{Bordeaux University, Labri}
\maketitle
\section{Introduction}
We investigate the polynomial closure operation ($\ensuremath{\mathcal{C}}\xspace \mapsto \pol{\ensuremath{\mathcal{C}}\xspace}$) defined on classes of regular languages. We present an interesting and useful connection relating the separation problem for the class \ensuremath{\mathcal{C}}\xspace and the membership problem for it polynomial closure \pol{\ensuremath{\mathcal{C}}\xspace}. It was first discovered in~\cite{pzqalt}. This connection is formulated as an algebraic characterization of \pol{\ensuremath{\mathcal{C}}\xspace} which holds when \ensuremath{\mathcal{C}}\xspace is an arbitrary quotienting lattice\xspace of regular languages and whose statement is parameterized by \ensuremath{\mathcal{C}}\xspace-separation. Its main application is an effective reduction from \pol{\ensuremath{\mathcal{C}}\xspace}-membership to \ensuremath{\mathcal{C}}\xspace-separation. Thus, as soon as one designs a \ensuremath{\mathcal{C}}\xspace-separation algorithm, this yields ``for free'' a membership algorithm for the more complex class \pol{\ensuremath{\mathcal{C}}\xspace}.
Additionally, we present a second transfer theorem which applies to a smaller class than \pol{\ensuremath{\mathcal{C}}\xspace}: the intersection class \capol{\ensuremath{\mathcal{C}}\xspace}. This is the class containing all languages $L$ such that both $L$ and its complement belong to \pol{\ensuremath{\mathcal{C}}\xspace}. This second transfer theorem is a simple corollary of the first one ans was originally formulated in~\cite{AlmeidaBKK15}. However it is also stronger: it yields a reduction from $\capol{\ensuremath{\mathcal{C}}\xspace}$-membership to \ensuremath{\mathcal{C}}\xspace-{\bf membership}.
\section{Preliminary definitions}
\label{sec:prelims}
In this section, we fix the terminology and introduce several objects that we shall need to formulate and prove the results presented in the paper.
\ensuremath{\textup{SU}}\xspacebsection{Words and languages}
For the whole paper, we fix an arbitrary finite alphabet $A$. We denote by $A^*$ the set of all finite words over $A$, and by $\varepsilon \in A^*$ the empty word. Given two words $u,v \in A^*$, we write $u\cdot v$ (or simply $uv$) their concatenation. A \emph{language (over $A$)} is a subset of $A^*$. Abusing terminology, we denote by $u$ the singleton language $\{u\}$. It is standard to extend the concatenation operation to languages: given $K,L \ensuremath{\textup{SU}}\xspacebseteq A^*$, we write~$KL$ for the language $KL = \{uv \mid u \in K \text{ and } v \in L\}$. Moreover, we also consider marked concatenation, which is less standard. Given $K,L \ensuremath{\textup{SU}}\xspacebseteq A^*$, \emph{a marked concatenation} of $K$ with $L$ is a language of the form $KaL$ for some $a \in A$.
A class of languages \ensuremath{\mathcal{C}}\xspace is simply a set of languages. We say that \ensuremath{\mathcal{C}}\xspace is a \emph{lattice} when $\emptyset\in\ensuremath{\mathcal{C}}\xspace$, $A^* \in \ensuremath{\mathcal{C}}\xspace$ and \ensuremath{\mathcal{C}}\xspace is closed under union and intersection: for any $K,L \in \ensuremath{\mathcal{C}}\xspace$, we have $K \cup L \in \ensuremath{\mathcal{C}}\xspace$ and $K \cap L \in \ensuremath{\mathcal{C}}\xspace$. Moreover, a \emph{Boolean algebra} is a lattice \ensuremath{\mathcal{C}}\xspace which is additionally closed under complement: for any $L \in \ensuremath{\mathcal{C}}\xspace$, we have $A^* \setminus L \in \ensuremath{\mathcal{C}}\xspace$. Finally, a class \ensuremath{\mathcal{C}}\xspace is \emph{quotienting} if it is closed under quotients. That is, for any $L \in \ensuremath{\mathcal{C}}\xspace$ and any word $u \in A^*$, the following properties~hold:
\[
u^{-1}L \stackrel{\text{def}}{=}\{w\in A^*\mid uw\in L\} \text{\quad and\quad} Lu^{-1} \stackrel{\text{def}}{=}\{w\in A^*\mid wu\in L\}\text{\quad both belong to \ensuremath{\mathcal{C}}\xspace}.
\]
All classes that we consider are quotienting Boolean algebra\xspaces of regular languages. These are the languages that can be equivalently defined by nondeterministic finite automata, finite monoids or monadic second-order logic. In the paper, we work with the definition by monoids, which we recall now.
\noindent
{\bf Recognition by a monoid.} A \emph{monoid} is a set $M$ endowed with an associative multiplication $(s,t)\mapsto s\cdot t$ (we often write $st$ for $s\cdot t$) having a neutral element $1_M$, \emph{i.e.}, such that $1_M\cdot s=s\cdot 1_M=s$ for every $s \in M$. An \emph{idempotent} of a monoid $M$ is an element $e \in M$ such that $ee = e$. It is folklore that for any \emph{finite} monoid $M$, there exists a natural number $\omega(M)$ (denoted by $\omega$ when $M$ is understood) such that for any $s \in M$, the element $s^\omega$ is an idempotent.
We may now explain how to recognize languages with monoids. Observe that $A^{*}$ is a monoid whose multiplication is concatenation (the neutral element is $\varepsilon$). Thus, we may consider monoid morphisms $\alpha: A^* \to M$ where $M$ is an arbitrary monoid. Given such a morphism and some language $L \ensuremath{\textup{SU}}\xspacebseteq A^*$, we say that $L$ is \emph{recognized} by $\alpha$ when there exists a set $F \ensuremath{\textup{SU}}\xspacebseteq M$ such that $L = \alpha^{-1}(F)$. It is known that $L$ is regular if and only if it can be recognized by a morphism into a \textbf{finite} monoid.
Moreover, since we consider classes of languages that are not closed under complement (i.e. they are only lattices), we need to work with recognition by ordered monoids. An ordered monoid is a pair $(M,\leq)$ such that ``$\leq$'' is an order relation defined on $M$ which i compatible with its multiplication: given $s_1s_2,t_1,t_2 \in M$, if $s_1 \leq t_1$ and $s_2 \leq t_2$, then $s_1s_2 \leq t_1t_2$. Furthermore, we say that a subset $F \ensuremath{\textup{SU}}\xspacebseteq M$ is a \emph{upper set} for $\leq$ when given any $s \in F$ and any $t \in M$ such that $s \leq t$, we have $t \in F$ as well. Consider a morphism $\alpha: A^* \to M$ and ``$\leq$'' an order on $M$ such that $(M,\leq)$ is an ordered monoid. We say that some language $L \ensuremath{\textup{SU}}\xspacebseteq A^*$ is \emph{$\leq$-recognized} by $\alpha$ when there exists a \textbf{upper set} $F \ensuremath{\textup{SU}}\xspacebseteq M$ for $\leq$ such that $L = \alpha^{-1}(F)$.
\begin{remark}
The key idea behind the definition is that the set of languages which are recognized by $\alpha: A^* \to M$ is necessarily closed under copmplement: if $L = \alpha^{-1}(F)$, then $A^* \setminus L = \alpha^{-1}(M \setminus F)$. However, this is not the case for the set of languages which are $\leq$-recognized by $\alpha$: while $F$ is an upper set, this need not be the case for $M \setminus F$.
\end{remark}
Finally, given any regular language $L$, one may define (and compute) a canonical morphism into a finite monoid which recognizes $L$: the syntactic morphism of $L$. Let us briefly recall its definition. One may associate to $L$ an equivalence $\equiv_L$ over $A^*$: the \emph{syntactic congruence of $L$}. Given $u,v \in A^*$, $u \equiv_L v$ if and only if $xuy \in L \Leftrightarrow xvy \in L$ for any $x,y \in A^*$. It is known and simple to verify that ``$\equiv_L$'' is a congruence on $A^*$. Thus, the set of equivalence classes $M_L = {A^*}/{\equiv_L}$ is a monoid and the map $\alpha_L: A^* \to M_L$ which maps any word to its equivalence class is a morphism. The monoid $M_L$ is called the syntactic monoid of $L$ and $\alpha_L$ its syntactic morphism. Finally, we may define a canonical order relation ``$\leq_L$'' (called syntactic order) on the syntactic monoid $M_L$. Given $s,t \in M_L$, we write $s \leq_L t$ when for any $x,y \in M_L$, $xsy \in \alpha_L(L) \Rightarrow xty \in \alpha_L(L)$. It is simple to verify that $(M_L,\leq_L)$ is an ordered monoid and that $L$ is $\leq_L$-recognized by $\alpha_L$.
It is known that $L$ is regular if and only if $M_L$ is finite (\emph{i.e.}, $\equiv_L$ has finite index): this is Myhill-Nerode theorem. In that case, one may compute the syntactic morphism $\alpha_L: A^* \to M_L$ (and the syntactic order on $M_L$) from any representation of $L$ (such as a finite automaton).
\noindent
{\bf Membership and separation.} In the paper, we are interested in two decision problems which we define now. Both are parameterized by some class of languages \ensuremath{\mathcal{C}}\xspace. Given a class of languages \ensuremath{\mathcal{C}}\xspace, the \ensuremath{\mathcal{C}}\xspace-membership problem is as follows:
\begin{tabular}{rl}
{\bf INPUT:} & A regular language $L$. \\
{\bf OUTPUT:} & Does $L$ belong to \ensuremath{\mathcal{C}}\xspace ?
\end{tabular}
Separation is slightly more involved. Given three languages $K,L_1,L_2$, we say that $K$ \emph{separates} $L_1$ from $L_2$ if $L_1 \ensuremath{\textup{SU}}\xspacebseteq L \text{ and } L_2 \cap K = \emptyset$. Given a class of languages \ensuremath{\mathcal{C}}\xspace, we say that $L_1$ is \emph{$\ensuremath{\mathcal{C}}\xspace$-separable} from $L_2$ if some language in \ensuremath{\mathcal{C}}\xspace separates $L_1$ from $L_2$. Observe that when \ensuremath{\mathcal{C}}\xspace is not closed under complement (which is the case for all classes investigated in the paper), the definition is not symmetrical: $L_1$ could be \ensuremath{\mathcal{C}}\xspace-separable from $L_2$ while $L_2$ is not \ensuremath{\mathcal{C}}\xspace-separable from $L_1$. The separation problem associated to a given class \ensuremath{\mathcal{C}}\xspace is as follows:
\begin{tabular}{rl}
{\bf INPUT:} & Two regular languages $L_1$ and $L_2$. \\
{\bf OUTPUT:} & Is $L_1$ $\ensuremath{\mathcal{C}}\xspace$-separable from $L_2$ ?
\end{tabular}
We use membership and separation as a mathematical tools for investigating classes of languages: given a fixed class \ensuremath{\mathcal{C}}\xspace, obtaining a \ensuremath{\mathcal{C}}\xspace-separation algorithm usually requires a solid understanding of~\ensuremath{\mathcal{C}}\xspace.
\ensuremath{\textup{SU}}\xspacebsection{Factorization forest theorem of Simon}
When proving our main theorem, we shall need the factorization forest theorem of Simon which is a combinatorial result about finite monoids. We briefly recall it here. We refer the reader to~\cite{kfacto,bfacto,cfacto} for more details and a proof.
Consider a finite monoid $M$ and a morphism $\alpha: A^* \rightarrow M$. An \emph{$\alpha$-factorization forest} is an ordered unranked tree whose nodes are labeled by words in $A^*$. For any inner node $x$ with label $w \in A^*$, if $w_1,\dots,w_n \in A^*$ are the labels of its children listed from left to right, then $w = w_1\cdots w_n$. Moreover, all nodes $x$ in the forest must be of the three following kinds:
\begin{itemize}
\item \emph{Leaves} which are labeled by either a single letter or
the empty word.
\item \emph{Binary inner nodes} which have exactly two children.
\item \emph{Idempotent inner nodes} which may have an arbitrary number of children. However, the labels $w_1,\dots,w_n$ of these children must satisfy $\alpha(w_1) = \cdots = \alpha(w_n) = e$ where $e$ is an idempotent element of $M$.
\end{itemize}
Note that an idempotent node with exactly two children is also a binary node. This is harmless.
Given a word $w \in A^*$, an \emph{$\alpha$-factorization forest for $w$} is an $\alpha$-factorization forest whose root is labeled by $w$. The \emph{height} of a factorization forest is the largest $h \in \nat$ such that it contains a branch with $h$ inner nodes (a single leaf has height $0$). We turn to the factorization forest theorem of Simon: there exists a bound depending only on $M$ such that any word admits an $\alpha$-factorization forest of height at most this bound.
\begin{theorem}[\cite{simonfacto,kfacto}] \label{thm:facto}
Consider a morphism $\alpha: A^* \rightarrow M$. For all words $w \in A^*$, there exists an $\alpha$-factorization forest for $w$ of height at most $3|M|-1$.
\end{theorem}
\ensuremath{\textup{SU}}\xspacebsection{Finite lattices}
We finish the section with useful tools that we use to manipulate classes that are finite lattices (i.e. one that contains finitely many languages). Consider a finite lattice \ensuremath{\mathcal{C}}\xspace. One may associate a \emph{canonical preorder relation over $A^*$} to \ensuremath{\mathcal{C}}\xspace. The definition is as follows. Given $w,w' \in A^*$, we write $w \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace w'$ if and only if the following holds:
\[
\text{For all $L \in \ensuremath{\mathcal{C}}\xspace$,} \quad w \in L \ \Rightarrow\ w' \in L.
\]
It is immediate from the definition that \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace is transitive and reflexive, making it a preorder. The relation \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace has many applications. We start with an important lemma, which relies on the fact that \ensuremath{\mathcal{C}}\xspace is finite. We say that a language $L \ensuremath{\textup{SU}}\xspacebseteq A^*$ is an \emph{upper set} (for \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace) when for any two words $u,v \in A^*$, if $u \in L$ and $u \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace v$, then $v \in L$.
\begin{lemma} \label{lem:canosatur}
Let $\ensuremath{\mathcal{C}}\xspace$ be a finite lattice. Then, for any $L \ensuremath{\textup{SU}}\xspacebseteq A^*$, we have $L \in \ensuremath{\mathcal{C}}\xspace$ if and only if $L$ is an upper set for \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace. In particular, \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace has finitely many upper sets.
\end{lemma}
\begin{proof}
Assume first that $L \in \ensuremath{\mathcal{C}}\xspace$. Then, for all $w \in L$ and all $w'$ such that $w \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace w'$, we have $w' \in L$ by definition of \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace. Hence, $L$ is an upper set. Assume now that $L$ is an upper set. For any word $w$, we write $\uclos w$ for the upper set $\uclos w = \{u \mid w \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace u\}$. By definition of \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace $\uclos w$ is the intersection of all $L \in \ensuremath{\mathcal{C}}\xspace$ such that $w \in L$. Therefore, $\uclos w \in \ensuremath{\mathcal{C}}\xspace$ since \ensuremath{\mathcal{C}}\xspace is a finite lattice (and is therefore closed under intersection). Finally, since $L$ is an upper set, we have,
\[
L = \bigcup_{w \in L} \uclos w.
\]
Hence, since \ensuremath{\mathcal{C}}\xspace is closed under union and is finite, $L$ belongs to \ensuremath{\mathcal{C}}\xspace.
\end{proof}
We complete this definition with another useful result. When \ensuremath{\mathcal{C}}\xspace is additionally closed under quotients, the canonical preorder \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace is compatible with word concatenation.
\begin{lemma} \label{lem:canoquo}
Let \ensuremath{\mathcal{C}}\xspace be a quotienting lattice\xspace. Then, the associated canonical preorder \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace is compatible with word concatenation. That is, for any words $u,v,u',v'$,
\[
u \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace u' \quad \text{and} \quad v \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace v' \quad \Rightarrow \quad uv \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace u'v'.
\]
\end{lemma}
\begin{proof}
Let $u,u',v,v'$ be four words such that $u \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace u'$ and $v \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace v'$. We have to prove that $uv \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace u'v'$. Let $L \in \ensuremath{\mathcal{C}}\xspace$ and assume that $uv \in L$. We use closure under left quotients to prove that $uv' \in L$ and then closure under right quotients to prove that $u'v' \in L$ which terminates the proof of this direction. Since $uv \in L$, we have $v \in u^{-1} \cdot L$. By closure under left quotients, we have $u^{-1} \cdot L \in \ensuremath{\mathcal{C}}\xspace$, hence, since $v \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace v'$, we obtain that $v'\in u^{-1} \cdot L$ and therefore that $uv' \in L$. It now follows that $u \in L \cdot (v')^{-1}$. Using closure under right quotients, we obtain that $L \cdot (v')^{-1} \in \ensuremath{\mathcal{C}}\xspace$. Therefore, since $u \ensuremath{\leqslant_\ensuremath{\mathcal{C}}\xspace}\xspace u'$, we conclude that $u' \in L \cdot (v')^{-1}$ which means that $u'v' \in L$, as desired.
\end{proof}
\section{Polynomial closure}
\label{secpolc}
In this section, we define the polynomial closure operation defined on classes of languages. It is the main focus of the paper. We also prove a characteristic property of this operation that will be useful in proofs later.
\ensuremath{\textup{SU}}\xspacebsection{Definition}
Given an arbitrary class \ensuremath{\mathcal{C}}\xspace, the \emph{polynomial closure} of \ensuremath{\mathcal{C}}\xspace, denoted by \pol{\ensuremath{\mathcal{C}}\xspace}, is the smallest class containing \ensuremath{\mathcal{C}}\xspace and closed under marked concatenation and union: for any $H,L \in \pol{\ensuremath{\mathcal{C}}\xspace}$ and $a \in A$, we have $HaL \in \pol{\ensuremath{\mathcal{C}}\xspace}$ and $H \cup L \in \pol{\ensuremath{\mathcal{C}}\xspace}$.
It is not immediate that \pol{\ensuremath{\mathcal{C}}\xspace} has robust closure properties beyond those that are explicitly stated in the definitions. However, it turns out that when \ensuremath{\mathcal{C}}\xspace satisfies robust properties itself, this is the case for \pol{\ensuremath{\mathcal{C}}\xspace} as well. It was shown by Arfi~\cite{arfi91} that when \ensuremath{\mathcal{C}}\xspace is a quotienting lattice\xspace of regular languages, then \pol{\ensuremath{\mathcal{C}}\xspace} is one as well. Note that this result is not immediate (the difficulty is to prove that \pol{\ensuremath{\mathcal{C}}\xspace} is closed under intersection).
\begin{theorem} \label{thm:polclos}
Let \ensuremath{\mathcal{C}}\xspace be a quotienting lattice\xspace of regular languages. Then, \pol{\ensuremath{\mathcal{C}}\xspace} is a quotienting lattice\xspace of regular languages closed under concatenation and marked concatenation.
\end{theorem}
We shall obtain an alternate proof of Theorem~\ref{thm:polclos} as a corollary of our main result (i.e. our algebraic characterization of \pol{Cs}.
Finally, we shall consider two additional operations which are defined by building on polynomial closure. Given a class \ensuremath{\mathcal{C}}\xspace, we denote by \copol{\ensuremath{\mathcal{C}}\xspace} the class containing all complements of languages in \pol{\ensuremath{\mathcal{C}}\xspace}: $L \in \copol{\ensuremath{\mathcal{C}}\xspace}$ when $A^* \setminus L \in \pol{\ensuremath{\mathcal{C}}\xspace}$. Finally, we also write \capol{\ensuremath{\mathcal{C}}\xspace} for the class of all languages that belong to both \pol{\ensuremath{\mathcal{C}}\xspace} and \copol{\ensuremath{\mathcal{C}}\xspace}. The following result is an immediate corollary of Theorem~\ref{thm:polclos}.
\begin{corollary}
Let \ensuremath{\mathcal{C}}\xspace be a quotienting lattice\xspace of regular languages. Then, \copol{\ensuremath{\mathcal{C}}\xspace} is a quotienting lattice\xspace of regular languages and \capol{\ensuremath{\mathcal{C}}\xspace} is a quotienting Boolean algebra\xspace of regular languages.
\end{corollary}
\begin{proof}
By Theorem~\ref{thm:polclos}, \pol{\ensuremath{\mathcal{C}}\xspace} is a quotienting lattice\xspace of regular languages. Since quotients commute with Boolean operations, it follows from De Morgan's laws that \copol{\ensuremath{\mathcal{C}}\xspace} is a quotienting lattice\xspace of regular languages as well. Consequently, \capol{\ensuremath{\mathcal{C}}\xspace} is a quotienting lattice\xspace of regular languages and since it must be closed under complement by definition, it is actually a quotienting Boolean algebra\xspace of regular languages.
\end{proof}
\ensuremath{\textup{SU}}\xspacebsection{Characteristic property}
We complete the definitions with a property which applies to the polynomial closure of any \textbf{finite} quotienting lattice\xspace \ensuremath{\mathcal{C}}\xspace. Recall that in this case, we associate a canonical preorder $\leq_{\ensuremath{\mathcal{C}}\xspace}$ over $A^*$ (two words are comparable when any language in \ensuremath{\mathcal{C}}\xspace containing the first word contains the second word as well). Since \ensuremath{\mathcal{C}}\xspace is closed under quotients, $\leq_\ensuremath{\mathcal{C}}\xspace$ must be compatible with word concatenation by Lemma~\ref{lem:canoquo}.
\begin{proposition} \label{prop:upol:mainprop}
Let \ensuremath{\mathcal{C}}\xspace be a finite quotienting lattice\xspace. Consider a language $L \ensuremath{\textup{SU}}\xspacebseteq A^*$ in \pol{\ensuremath{\mathcal{C}}\xspace}. Then, there exist natural numbers $h,p \geq 1$ such that for any $\ell \geq h$ and $u,v,x,y \in A^*$ satisfying $u \leq_\ensuremath{\mathcal{C}}\xspace v$, we have,
\[
xu^{p\ell+1} y \in L \quad \Rightarrow \quad xu^{p\ell} v u^{p\ell} y \in L
\]
\end{proposition}
We now concentrate on proving Proposition~\ref{prop:upol:mainprop}. We fix the finite quotienting lattice\xspace \ensuremath{\mathcal{C}}\xspace for the proof. Consider a language $L \ensuremath{\textup{SU}}\xspacebseteq A^*$ in \pol{\ensuremath{\mathcal{C}}\xspace}. We first need to choose the natural numbers $h,p \geq 1$ depending on $L$ and \ensuremath{\mathcal{C}}\xspace. We start by choosing $p$ with the following fact.
\begin{fact} \label{fct:period}
There exists $p \geq 1$ such that for any $m,m' \geq 1$ and $w \in A^*$, $w^{pm} \leq_\ensuremath{\mathcal{C}}\xspace w^{pm'}$.
\end{fact}
\begin{proof}
Let $\sim$ be the equivalence on $A^*$ generated by $\leq_{\ensuremath{\mathcal{C}}\xspace}$. Since $\leq_\ensuremath{\mathcal{C}}\xspace$ is a preorder with finitely many upper sets which is compatible with concatenation (see Lemma~\ref{lem:canosatur} and~\ref{lem:canoquo}), $\sim$ must be a congruence of finite index. Therefore, the set ${A^*}/{\sim}$ of $\sim$-classes if a finite monoid. It suffices to choose $p$ as the idempotent power of this finite monoid.
\end{proof}
It remains to choose $h$. Since $L$ belongs to $\pol{\ensuremath{\mathcal{C}}\xspace}$, it is built from languages in \ensuremath{\mathcal{C}}\xspace using only union and marked concatenations. It is simple to verify that these two operations commute. Hence, $L$ is a finite union of products having the form:
\[
L_0 a_1L_1 \cdots a_mL_m,
\]
where $a_1,\dots,a_m \in A$ and $L_0,\dots,L_m \in \ensuremath{\mathcal{C}}\xspace$. We define $n \in \nat$ as a natural number such that for any product $L_0 a_1L_1 \cdots a_mL_m$ in the union, we have $m \leq n$. Finally, we let,
\[
h = 2n+1
\]
It remains to show that $h$ and $p$ satisfy the desired property. Let $\ell \geq h$ and $u,v,x,y \in A^*$ satisfying $u \leq_\ensuremath{\mathcal{C}}\xspace v$. We have to show that,
\[
xu^{p\ell+1} y \in L \quad \Rightarrow \quad xu^{p\ell} v u^{p\ell} y \in L
\]
Consequently, we assume that $xu^{p\ell+1} y \in L$. By hypothesis, we know that there exists a product $L_0 a_1L_1 \cdots a_mL_m \ensuremath{\textup{SU}}\xspacebseteq L$ with $a_1,\dots,a_m \in A$, $L_0,\dots,L_m \in \ensuremath{\mathcal{C}}\xspace$ and $m \leq n$ such that $xu^{p\ell+1} y \in L_0 a_1L_1 \cdots a_mL_m$. It follows that $xu^{p\ell+1} y$ admits a unique decomposition,
\[
xu^{p\ell+1} y = w_0a_1w_1 \cdots a_m w_m
\]
such that $w_i \in L_i$ for all $i \leq m$. Recall that by definition $\ell \geq h = 2n+1 \geq 2m+1$. Therefore, it is immediate from a pigeon-hole principle argument that an infix $u^p$ of $xu^{p\ell+1} y$ must be contained within one of the infixes $w_i$. In other words, we have the following lemma.
\begin{lemma} \label{lem:upol:mainprop}
There exist $i \leq m$, $j_1,j_2 < \ell$ such that $j_1+1+j_2 = \ell$ and $x_1,x_2 \in A^*$ satisfying,
\begin{itemize}
\item $w_i = x_1u^px_2$.
\item $w_0a_1w_1 \cdots a_i x_1 = xu^{pj_1}$.
\item $x_2 a_{i+1} \cdots a_m w_m = u^{pj_2+1} y$.
\end{itemize}
\end{lemma}
We may now finish the proof. By Fact~\ref{fct:period}, we have the following inequality,
\[
u^p \leq_\ensuremath{\mathcal{C}}\xspace u^{p(\ell+1)} = u^{p(j_1+1+j_2+1)} = u^{p(j_2+1)} u u^{p(j_1+1)-1}
\]
Moreover, since $u \leq_\ensuremath{\mathcal{C}}\xspace v$ and $\leq_\ensuremath{\mathcal{C}}\xspace$ is compatible with concatenation this yields that,
\[
u^p \leq_{\ensuremath{\mathcal{C}}\xspace} u^{p(j_2+1)} v u^{p(j_1+1)-1}
\]
Using again compatibility with concatenation we obtain,
\[
w_i = x_1u^px_2 \leq_{\ensuremath{\mathcal{C}}\xspace} x_1u^{p(j_2+1)} v u^{p(j_1+1)-1}x_2
\]
Therefore, since $w_i \in L_i$ which is a language of \ensuremath{\mathcal{C}}\xspace, it follows from the definition of $\leq_\ensuremath{\mathcal{C}}\xspace$ that $x_1u^{p(j_2+1)} v u^{p(j_1+1)-1}x_2 \in L_i$. Therefore, since $w_j \in L_j$ for all $j$,
\[
w_0a_1w_1 \cdots a_i x_1u^{p(j_2+1)} v u^{p(j_1+1)-1}x_2 a_{i+1} \cdots a_m w_m \in L_0 a_1L_1 \cdots a_mL_m
\]
By the last two items in Lemma~\ref{lem:upol:mainprop}, this exactly says that $xu^{p\ell} v u^{p\ell} y \in L_0 a_1L_1 \cdots a_mL_m$. Since we have $L_0 a_1L_1 \cdots a_mL_m \ensuremath{\textup{SU}}\xspacebseteq L$ by definition, this implies that $xu^{p\ell} v u^{p\ell} y \in L$, finishing the proof.
\section{Membership for \pol{\ensuremath{\mathcal{C}}\xspace}}
\label{sec:polc}
In this section, we prove the main theorem of the paper. Given an arbitrary quotienting lattice\xspace of regular languages \ensuremath{\mathcal{C}}\xspace, \pol{\ensuremath{\mathcal{C}}\xspace}-membership reduces to \ensuremath{\mathcal{C}}\xspace-separation. We state this result in the following theorem.
\begin{theorem} \label{thm:trans:polreduc}
Let \ensuremath{\mathcal{C}}\xspace be a quotienting lattice\xspace of regular languages and assume that \ensuremath{\mathcal{C}}\xspace-separation is decidable. Then \pol{\ensuremath{\mathcal{C}}\xspace}-membership is decidable as well.
\end{theorem}
\begin{remark} \label{rem:trans:remtrans}
Theorem~\ref{thm:trans:polreduc} is a generalization of a result from~\cite{pzqalt} which applies only to specific quotienting lattice\xspaces \ensuremath{\mathcal{C}}\xspace belonging to a hierarchy of classes called the Straubing-Thérien hierarchy. However, let us point out that the main ideas behind the proof are all captured by the special case presented in~\cite{pzqalt}.
\end{remark}
This section is devoted to proving Theorem~\ref{thm:trans:polreduc}. It is based on an algebraic characterization of \pol{\ensuremath{\mathcal{C}}\xspace}. This characterization is formulated using equations on the syntactic ordered monoid of the language. These equations are parameterized by a relation on the syntactic monoid: the \emph{\ensuremath{\mathcal{C}}\xspace-pairs}. As we shall see, computing this relation requires an algorithm for \ensuremath{\mathcal{C}}\xspace-separation which explains the statement of Theorem~\ref{thm:trans:polreduc}.
We first present the definition of \ensuremath{\mathcal{C}}\xspace-pairs. We then use them to present the algebraic characterization of \pol\ensuremath{\mathcal{C}}\xspace and explain why Theorem~\ref{thm:trans:polreduc} is an immediate corollary. Finally, we then present a proof of this characterization. It relies on Simon's factorization forest theorem (Theorem~\ref{thm:facto}).
\ensuremath{\textup{SU}}\xspacebsection{\texorpdfstring{\ensuremath{\mathcal{C}}\xspace-pairs}{C-pairs}}
Consider a class of languages \ensuremath{\mathcal{C}}\xspace, an alphabet $A$, a finite monoid $M$ and a \emph{surjective} morphism $\alpha: A^* \to M$. We define a relation on $M$: the \ensuremath{\mathcal{C}}\xspace-pairs (for $\alpha$). Consider a pair $(s,t) \in M \times M$. We say that,
\begin{equation} \label{def:trans:cpairs}
\text{$(s,t)$ is a \emph{\ensuremath{\mathcal{C}}\xspace-pair} (for $\alpha$) if and only if $\alpha^{-1}(s)$ is {\bf not} \ensuremath{\mathcal{C}}\xspace-separable from $\alpha^{-1}(t)$}
\end{equation}
\begin{remark}
While we often make this implicit, being a \ensuremath{\mathcal{C}}\xspace-pair depends on the morphism $\alpha$.
\end{remark}
\begin{remark}
While we restrict ourselves to \emph{surjective} morphisms, observe that the definition makes sense for arbitrary ones. We choose to make this restriction to ensure that we get a reflexive relation, which is not the case when $\alpha$ is not surjective (if $s \in M$ has no antecedent $(s,s)$ is not a \ensuremath{\mathcal{C}}\xspace-pair). However this restriction is harmless: we use \ensuremath{\mathcal{C}}\xspace-pairs together with syntactic morphisms which are surjective.
\end{remark}
By definition, the set of \ensuremath{\mathcal{C}}\xspace-pairs for $\alpha$ is finite: it is a subset of $M \times M$. Moreover, having a \ensuremath{\mathcal{C}}\xspace-separation algorithm in hand is clearly enough to compute all \ensuremath{\mathcal{C}}\xspace-pairs for any input morphism $\alpha$. While simple, this property is crucial, we state it in the following lemma.
\begin{lemma} \label{lem:trans:septopairs}
Let \ensuremath{\mathcal{C}}\xspace be a class of languages and assume that \ensuremath{\mathcal{C}}\xspace-separation is decidable. Then, given an alphabet $A$, a finite monoid $M$ and a surjective morphism $\alpha: A^* \to M$ as input, one may compute all \ensuremath{\mathcal{C}}\xspace-pairs for $\alpha$.
\end{lemma}
We complete the definition with a few properties of \ensuremath{\mathcal{C}}\xspace-pairs. A simple and useful one is that the \ensuremath{\mathcal{C}}\xspace-pair relation is reflexive (it is not transitive in general).
\begin{lemma} \label{lem:trans:pairsreflex}
Let \ensuremath{\mathcal{C}}\xspace be a class of languages, $A$ an alphabet, $M$ a finite monoid and $\alpha: A^* \to M$ a surjective morphism. Then, the \ensuremath{\mathcal{C}}\xspace-pair relation is reflexive: for any $s \in M$, $(s,s)$ is a \ensuremath{\mathcal{C}}\xspace-pair.
\end{lemma}
\begin{proof}
Given $s \in M$, since $\alpha$ is surjective, we have $\alpha^{-1}(s) \neq \emptyset$. Therefore, $\alpha^{-1}(s) \cap \alpha^{-1}(s) \neq \emptyset$ and we obtain that $\alpha^{-1}(s)$ is not \ensuremath{\mathcal{C}}\xspace-separable from $\alpha^{-1}(s)$. This exactly says that $(s,s)$ is a \ensuremath{\mathcal{C}}\xspace-pair.
\end{proof}
Finally, we prove that when \ensuremath{\mathcal{C}}\xspace is a quotienting lattice\xspace of regular languages (which is the only case that we shall consider), the \ensuremath{\mathcal{C}}\xspace-pair relation is compatible with multiplication.
\begin{lemma} \label{lem:trans:mult}
Let \ensuremath{\mathcal{C}}\xspace be a quotienting lattice\xspace of regular languages, $A$ an alphabet $M$ a finite monoid and $\alpha: A^* \to M$ a surjective morphism. For any two \ensuremath{\mathcal{C}}\xspace-pairs $(s_1,t_1),(s_2,t_2) \in M \times M$, $(s_1s_2,t_1t_2)$ is a \ensuremath{\mathcal{C}}\xspace-pair as well.
\end{lemma}
\begin{proof}
We prove the contrapositive. Assume that $(s_1s_2,t_1t_2)$ is not a \ensuremath{\mathcal{C}}\xspace-pair. We show that either $(s_1,t_1)$ is not a \ensuremath{\mathcal{C}}\xspace-pair or $(s_2,t_2)$ is not a \ensuremath{\mathcal{C}}\xspace-pair. By hypothesis, we have a separator $K \in \ensuremath{\mathcal{C}}\xspace$ such that $\alpha^{-1}(s_1s_2) \ensuremath{\textup{SU}}\xspacebseteq K$ and $K \cap \alpha^{-1}(t_1t_2) = \emptyset$. We define,
\[
H = \bigcap_{w \in \alpha^{-1}(s_2)} Kw^{-1}
\]
By definition, $H \in \ensuremath{\mathcal{C}}\xspace$ since \ensuremath{\mathcal{C}}\xspace is a quotienting lattice\xspace and contains only regular languages (thus $K$ has finitely many right quotients by the Myhill-Nerode theorem)). Moreover, since $\alpha^{-1}(s_1s_2) \ensuremath{\textup{SU}}\xspacebseteq K$, one may verify from the definition that $\alpha^{-1}(s_1) \ensuremath{\textup{SU}}\xspacebseteq H$. There are now two cases. If $\alpha^{-1}(t_1) \cap H = \emptyset$ then $H \in \ensuremath{\mathcal{C}}\xspace$ separates $\alpha^{-1}(s_1)$ from $\alpha^{-1}(t_1)$ and we are finished: $(s_1,t_1)$ is not a \ensuremath{\mathcal{C}}\xspace-pair. Otherwise, there exists a word $u \in \alpha^{-1}(t_1) \cap H \neq \emptyset$. Let $G = u^{-1} K \in \ensuremath{\mathcal{C}}\xspace$. We claim that $G$ separates $\alpha^{-1}(s_2)$ from $\alpha^{-1}(t_2)$ which concludes the proof: $(s_1,t_1)$ is not a \ensuremath{\mathcal{C}}\xspace-pair. Indeed, given $w \in \alpha^{-1}(s_2)$, we have $u \in H \ensuremath{\textup{SU}}\xspacebseteq Kw^{-1}$ which means that $uw \in K$ and therefore that $w \in G = u^{-1} K$. Moreover, assume by contradiction that there exists $v \in \alpha^{-1}(t_2) \cap G$. Since $G = u^{-1} K$, it follows that $uv \in K$. Finally, since $\alpha(u)=t_1$ and $\alpha(v)= t_2$, it follows that $uv \in \alpha^{-1}(t_1t_2)$. Thus, $uv \in K \cap \alpha^{-1}(t_1t_2)$ which is a contradiction since this language is empty by hypothesis.
\end{proof}
\ensuremath{\textup{SU}}\xspacebsection{Characterization theorem}
We now characterize of $\pol{\ensuremath{\mathcal{C}}\xspace}$ when \ensuremath{\mathcal{C}}\xspace is an arbitrary quotienting lattice\xspace by a property of the syntactic morphism of the languages in \pol{\ensuremath{\mathcal{C}}\xspace}. As we announced, the characterization is parametrized by the \ensuremath{\mathcal{C}}\xspace-pair relation that we defined above.
\begin{theorem}\label{thm:trans:caracsig}
Let $\ensuremath{\mathcal{C}}\xspace$ be a quotienting lattice\xspace of regular languages and let $L$ be a regular language. Then, the three following properties are equivalent:
\begin{enumerate}
\item $L \in \pol{\ensuremath{\mathcal{C}}\xspace}$.
\item The syntactic morphism $\alpha_L: A^* \to M_L$ of $L$ satisfies
the following property:
\begin{equation}\label{eq:trans:sig}
s^{\omega+1} \leq_L s^{\omega}ts^{\omega} \quad \text{for all \ensuremath{\mathcal{C}}\xspace-pairs $(s,t) \in M_L^2$}.
\end{equation}
\item The syntactic morphism $\alpha_L: A^* \to M_L$ of $L$ satisfies the following property:
\begin{equation}\label{eq:trans:sig2}
e \leq_L ete \quad \text{for all \ensuremath{\mathcal{C}}\xspace-pairs $(e,t) \in M_L^2$ with $e$ idempotent}.
\end{equation}
\end{enumerate}
\end{theorem}
Theorem~\ref{thm:trans:caracsig} states a reduction from \pol{\ensuremath{\mathcal{C}}\xspace}-membership to \ensuremath{\mathcal{C}}\xspace-separation. Indeed, the syntactic morphism of a regular language can be computed and Equation~\eqref{eq:trans:sig} can be decided as soon as one is able to compute all $\ensuremath{\mathcal{C}}\xspace$-pairs (which is equivalent to deciding \ensuremath{\mathcal{C}}\xspace-separation by Lemma~\ref{lem:trans:septopairs}). Hence, we obtain Theorem~\ref{thm:trans:polreduc} as an immediate corollary. Moreover, Theorem~\ref{thm:polclos} is also a simple corollary of Theorem~\ref{thm:trans:caracsig} (it is straightforward to verify that any class satisfying Item~\eqref{eq:trans:sig} in the theorem has to be a quotienting lattice\xspace)
Moreover, observe that one may also use Theorem~\ref{thm:trans:caracsig} to obtain a symmetrical characterization for the class \copol{\ensuremath{\mathcal{C}}\xspace}. Recall that $\copol{\ensuremath{\mathcal{C}}\xspace}$ contains all languages whose complement is in \pol{\ensuremath{\mathcal{C}}\xspace}. It is straightforward to verify that a language and its complement have the same syntactic monoid but opposite syntactic orders. Therefore, we obtain the following corollary.
\begin{corollary} \label{cor:trans:caracpi}
Let $\ensuremath{\mathcal{C}}\xspace$ be a quotienting lattice\xspace of regular languages and let $L$ be a regular language. Then, the two following properties are equivalent:
\begin{enumerate}
\item $L \in \copol{\ensuremath{\mathcal{C}}\xspace}$.
\item The syntactic morphism $\alpha_L: A^* \to M_L$ of $L$ satisfies the following property:
\begin{equation}\label{eq:trans:pi}
s^{\omega}ts^{\omega} \leq_L s^{\omega+1} \quad \text{for all \ensuremath{\mathcal{C}}\xspace-pairs $(s,t) \in M_L^2$}.
\end{equation}
\item The syntactic morphism $\alpha_L: A^* \to M_L$ of $L$ satisfies the following property:
\begin{equation}\label{eq:trans:pi2}
ete \leq_L e \quad \text{for all \ensuremath{\mathcal{C}}\xspace-pairs $(e,t) \in M_L^2$ with $e$ idempotent}.
\end{equation}
\end{enumerate}
\end{corollary}
This terminates the presentation of the algebraic characterization of \pol{\ensuremath{\mathcal{C}}\xspace}. We now turn to its proof.
\ensuremath{\textup{SU}}\xspacebsection{Proof of Theorem~\ref{thm:trans:caracsig}}
We prove Theorem~\ref{thm:trans:caracsig}. Let \ensuremath{\mathcal{C}}\xspace be a quotienting lattice\xspace of regular languages, and let us fix a regular language $L$. Let $\alpha_L: A^* \to M_L$ be its syntactic morphism. We prove that $1) \Rightarrow 2) \Rightarrow 3) \Rightarrow 1)$. We start with $1) \Rightarrow 2)$: when $L \in \pol{\ensuremath{\mathcal{C}}\xspace}$, $\alpha_L$ satisfies Equation~\eqref{eq:trans:sig}.
\ensuremath{\textup{SU}}\xspacebsubsection*{Direction $1) \Rightarrow 2)$}
Assume that $L \in \pol{\ensuremath{\mathcal{C}}\xspace}$. We have to show that $\alpha_L$ satisfies Equation~\eqref{eq:trans:sig}. Given a \ensuremath{\mathcal{C}}\xspace-pair $(s,t) \in M_L^2$, we have to show that $s^{\omega+1} \leq_L s^{\omega}ts^{\omega}$. We first prove the following simple fact.
\begin{fact} \label{fct:upol:upolstrat}
There exists a finite quotienting lattice\xspace $\ensuremath{\mathcal{D}}\xspace \ensuremath{\textup{SU}}\xspacebseteq \ensuremath{\mathcal{C}}\xspace$ such that $L \in \pol{\ensuremath{\mathcal{D}}\xspace}$.
\end{fact}
\begin{proof}
Since $L \in \pol{\ensuremath{\mathcal{C}}\xspace}$, it is built from finitely many languages in \ensuremath{\mathcal{C}}\xspace using unions and marked concatenations. We let $\ensuremath{\mathcal{F}}\xspace \ensuremath{\textup{SU}}\xspacebseteq \ensuremath{\mathcal{C}}\xspace$ as the finite class containing all basic languages in \ensuremath{\mathcal{C}}\xspace used in the construction. Moreover, we let \ensuremath{\mathcal{D}}\xspace as the smallest quotienting lattice\xspace containing \ensuremath{\mathcal{F}}\xspace. Clearly $\ensuremath{\mathcal{D}}\xspace \ensuremath{\textup{SU}}\xspacebseteq \ensuremath{\mathcal{C}}\xspace$ since \ensuremath{\mathcal{C}}\xspace is a quotienting lattice\xspace itself. Moreover, $L \in \pol{\ensuremath{\mathcal{D}}\xspace}$ since \ensuremath{\mathcal{D}}\xspace contains all languages in \ensuremath{\mathcal{C}}\xspace required to build $L$ by definition. It remains to show that \ensuremath{\mathcal{D}}\xspace remains finite. By definition, the languages in \ensuremath{\mathcal{D}}\xspace are built from those in \ensuremath{\mathcal{F}}\xspace by applying unions and intersections. Therefore, since quotients commute with Boolean operations, any language in \ensuremath{\mathcal{D}}\xspace is built by applying intersections and unions to languages in \ensuremath{\mathcal{F}}\xspace. Finally, any regular language has finitely many quotients by Myhill-Nerode theorem. Thus, since \ensuremath{\mathcal{F}}\xspace was finite, this is the case for \ensuremath{\mathcal{D}}\xspace as well.
\end{proof}
We work with the canonical preorder $\leq_\ensuremath{\mathcal{D}}\xspace$ over $A^*$ associated to the finite quotienting lattice\xspace \ensuremath{\mathcal{D}}\xspace. Since $(s,t)$ is a \ensuremath{\mathcal{C}}\xspace-pair, we know that $\alpha^{-1}(s)$ is not \ensuremath{\mathcal{C}}\xspace-separable from $\alpha^{-1}(t)$. Therefore, since $\ensuremath{\mathcal{D}}\xspace \ensuremath{\textup{SU}}\xspacebseteq \ensuremath{\mathcal{C}}\xspace$, it follows that $\alpha^{-1}(s)$ is not \ensuremath{\mathcal{D}}\xspace-separable from $\alpha^{-1}(t)$. Consider the language,
\[
H = \{v \in A^* \mid u \leq_\ensuremath{\mathcal{D}}\xspace v \text{ for some $u \in \alpha^{-1}(s)$}\}
\]
By definition, $H$ is an upper set for $\leq_\ensuremath{\mathcal{D}}\xspace$ and therefore belongs to \ensuremath{\mathcal{D}}\xspace by Lemma~\ref{lem:canosatur}. Moreover, $H$ includes $\alpha^{-1}(s)$ by definition. Consequently, since $\alpha^{-1}(s)$ is not \ensuremath{\mathcal{D}}\xspace-separable from $\alpha^{-1}(t)$, we know that $H$ intersects $\alpha^{-1}(t)$. This yields $u \in \alpha^{-1}(s)$ and $v \in \alpha^{-1}(t)$ such that $u \leq_\ensuremath{\mathcal{D}}\xspace v$. Hence, we may apply Proposition~\ref{prop:upol:mainprop} which yields natural numbers $h,p \geq 1$ such that for any $x,y \in A^*$,
\[
xu^{ph\omega+1} y \in L \quad \Rightarrow \quad xu^{ph\omega}v u^{ph\omega} y \in L
\]
By definition of the syntactic order on $M_L$, it then follows that,
\[
s^{\omega+1} = \alpha(u^{ph\omega+1}) \leq_L \alpha(u^{ph\omega}v u^{ph\omega}) = s^\omega t s^\omega
\]
This concludes the proof for this direction.
\ensuremath{\textup{SU}}\xspacebsubsection*{Direction $2) \Rightarrow 3)$}
Let us assume that the syntactic morphism $\alpha_L: A^* \to M_L$ of $L$ satisfies~\eqref{eq:trans:sig}. We need to prove that it satisfies~\eqref{eq:trans:sig2} as well. Let $(e,t) \in M_L^2$ be a \ensuremath{\mathcal{C}}\xspace-pair with $e$ idempotent. We have to show that $e \leq_L ete$. Since~\eqref{eq:trans:sig} holds, we know that $e^{\omega+1} \leq_L e^{\omega}te^{\omega}$. Moreover, since $e$ is idempotent, we have $e = e^{\omega+1} = e^{\omega}$. Thus, we get $e \leq_L ete$ as desired.
\ensuremath{\textup{SU}}\xspacebsubsection*{Direction $3) \Rightarrow 1)$}
It now remains to prove the harder ``$3) \Rightarrow 1)$'' direction of Theorem~\ref{thm:trans:caracsig}. We use induction to prove that for any finite ordered monoid $(M,\leq)$ and any surjective morphism $\alpha: A^* \to M$ satisfying~\eqref{eq:trans:sig2}, any language $\leq$-recognized by $\alpha$ may be constructed from languages of \ensuremath{\mathcal{C}}\xspace using unions and (marked) concatenations (thus showing that it belongs to \pol{\ensuremath{\mathcal{C}}\xspace}). Since $L$ is $\leq_L$-recognized by its syntactic morphism, this ends the proof.
We fix a surjective morphism $\alpha: A^* \to M$ satisfying~\eqref{eq:trans:sig2}: for any \ensuremath{\mathcal{C}}\xspace-pair $(e,t) \in M^2$ with $e$ idempotent, we have $e \leq ete$. The proof is based on Simon's factorization forest theorem (see Section~\ref{sec:prelims}). We state it in the following proposition.
\begin{proposition} \label{prop:trans:signec2}
For all $h \in \nat$ and all $s \in M$, there exists $H_{s,h} \in \pol{\ensuremath{\mathcal{C}}\xspace}$ such that for all $w \in A^*$:
\begin{itemize}
\item If $w \in H_{s,h}$ then $s \leq \alpha(w)$.
\item If $\alpha(w) = s$ and $w$ admits an $\alpha$-factorization forest of height at most $h$ then $w \in H_{s,h}$.
\end{itemize}
\end{proposition}
Assume for now that Proposition~\ref{prop:trans:signec2} holds. Given $h = 3|M|-1$, for all $s \in M$, consider the language $H_{s,h} \in \pol{\ensuremath{\mathcal{C}}\xspace}$ associated to $s$ and $h$ by Proposition~\ref{prop:trans:signec2}. We know from Simon's Factorization Forest theorem (Theorem~\ref{thm:facto}) that all words in $A^*$ admit an $\alpha$-factorization forest of height at most $3|M| - 1$. Therefore, for all $w \in A^*$ we have,
\begin{enumerate}
\item\label{item:trans:4} If $w \in H_{s,h}$ then $s \leq \alpha(w)$.
\item\label{item:trans:5} If $\alpha(w) = s$ then $w \in H_{s,h}$.
\end{enumerate}
Let $L$ be some language $\leq$-recognized by $\alpha$ and let $F$ be its accepting set. Observe that $L = \bigcup_{s \in F} H_{s,h}$. Indeed, by Item~\ref{item:trans:5} above, we have $L \ensuremath{\textup{SU}}\xspacebseteq \bigcup_{s \in F} H_{s,h}$. Moreover, by definition of $\leq$-recognizability, $F$ has to be an upper set, that is, if $s \in F$ and $s \leq t$ then $t\in F$. Hence, Item~\ref{item:trans:4} above implies that $\cup_{s \in F} H_{s,h} \ensuremath{\textup{SU}}\xspacebseteq L$. We conclude that $L \in \pol{\ensuremath{\mathcal{C}}\xspace}$ since it is a union of languages $H_{s,h} \in \pol{\ensuremath{\mathcal{C}}\xspace}$. This finishes the proof of Theorem~\ref{thm:trans:caracsig}. It now remains to prove Proposition~\ref{prop:trans:signec2}.
We begin with a lemma which defines the basic languages in \ensuremath{\mathcal{C}}\xspace that we will use in the construction of our languages in \pol{\ensuremath{\mathcal{C}}\xspace}. Note that this is also where we use the fact that~\eqref{eq:trans:sig2} holds.
\begin{lemma} \label{lem:trans:kisright}
For any idempotent $e \in M$, there exists a language $K_e$ belonging to \ensuremath{\mathcal{C}}\xspace (and therefore to \pol{\ensuremath{\mathcal{C}}\xspace}) which satisfies the two following properties,
\begin{enumerate}
\item For all $u \in K_e$, we have $e \leq e\alpha(u)e$.
\item $\alpha^{-1}(e) \ensuremath{\textup{SU}}\xspacebseteq K_e$.
\end{enumerate}
\end{lemma}
\begin{proof}
Let $T \ensuremath{\textup{SU}}\xspacebseteq M$ be the set of all elements $t \in M$ such that $(e,t)$ is {\bf not} a \ensuremath{\mathcal{C}}\xspace-pair (\emph{i.e.}, $\alpha^{-1}(e)$ is \ensuremath{\mathcal{C}}\xspace-separable from $\alpha^{-1}(t)$). By definition, for all $t \in T$, there exists a language $G_{t} \in \ensuremath{\mathcal{C}}\xspace$ which separates $\alpha^{-1}(e)$ from $\alpha^{-1}(t)$. We let $K_e = \bigcap_{t \in T} G_{t}$. Clearly, $K_e \in \ensuremath{\mathcal{C}}\xspace$ since \ensuremath{\mathcal{C}}\xspace is a quotienting lattice\xspace, and is therefore closed under intersection. Moreover, $\alpha^{-1}(e) \ensuremath{\textup{SU}}\xspacebseteq K_e$ since the inclusion holds for all languages $G_{t}$. Finally, given $u \in K_e$, it is immediate from the definition that $\alpha(u)$ does not belong to $T$ which means that $(e,\alpha(u))$ is a \ensuremath{\mathcal{C}}\xspace-pair. The first item is now immediate from~\eqref{eq:trans:sig2} since $e$ is idempotent.
\end{proof}
We may now start the proof of Proposition~\ref{prop:trans:signec2}. Let $h \geq 1$ and $s \in M$. We construct $H_{s,h} \in \pol{\ensuremath{\mathcal{C}}\xspace}$ by induction on $h$. Assume first that $h = 0$. Note that the nonempty words having an $\alpha$-factorization forest of height at most $0$ are all single letters. We let $B = \{b \in A \mid \alpha(b) = s\}$. Moreover, we use the language $K_{1_{M}}$ as defined in Lemma~\ref{lem:trans:kisright} for the neutral element $1_{M}$ (which is an idempotent). There are two cases depending on whether $s = 1_{M}$ or not. If $s \neq 1_{M}$, we let,
\[
H_{s,0} = \bigcup_{b \in B} K_{1_{M}}bK_{1_{M}}.
\]
Otherwise, when $s = 1_{M}$, we let,
\[
H_{s,0} = K_{1_{M}} \cup \bigcup_{b \in B} K_{1_{M}}bK_{1_{M}}.
\]
Note that $H_{s,0} \in \pol{\ensuremath{\mathcal{C}}\xspace}$ since we only used marked concatenation and unions and $K_{1_{M}} \in \ensuremath{\mathcal{C}}\xspace \ensuremath{\textup{SU}}\xspacebseteq \pol{\ensuremath{\mathcal{C}}\xspace}$ by definition in Lemma~\ref{lem:trans:kisright}. We now prove that this definition satisfies the two conditions in Proposition~\ref{prop:trans:signec2}. We do the proof for the case when $s \neq 1_{M}$ (the other case is similar).
Assume first that $w \in H_{s,0}$, we have to prove that $s \leq \alpha(w)$. By definition $w = ubu'$ with $u,u' \in K_{1_{M}}$ and $b \in B$. Hence, $\alpha(w) = \alpha(u)s\alpha(u')$. Since $u,u' \in K_{1_{M}}$, we obtain from the second item in Lemma~\ref{lem:trans:kisright} that $1_{M} \leq \alpha(u)$ and $1_{M} \leq \alpha(u')$. It follows that $s \leq \alpha(u)s\alpha(u') = \alpha(w)$.
We turn to the second item. Let $w \in A^*$ such that $\alpha(w) = s$ and $w$ admits an $\alpha$-factorization forest of height at most $0$. Since we assumed that $s \neq 1_M$, $w$ cannot be empty. We have to prove that $w \in H_{s,0}$. By hypothesis, $w$ is a one letter word $b \in B$.
Hence, $w \in K_{1_{M}}bK_{1_{M}}$ since $\varepsilon \in K_{1_{M}}$ by the first item in Lemma~\ref{lem:trans:kisright}.
Assume now that $h > 0$. There are two cases depending on whether $s$ is idempotent or not. We treat the idempotent case (the other case is essentially a simpler version of the same proof). Hence, we assume that $s$ is an idempotent, that we denote by $e$. We begin by constructing $H_{e,h}$ and then prove that it satisfies the conditions in the proposition. For all $t \in M$, one can use induction to construct $H_{t,h-1} \in \pol{\ensuremath{\mathcal{C}}\xspace}$ such that for all $w \in A^*$:
\begin{itemize}
\item If $w \in H_{t,h-1}$ then $t \leq \alpha(w)$.
\item If $\alpha(w) = t$ and $w$ is empty or admits an $\alpha$-factorization
forest of height at most $h-1$, then $w \in H_{t,h-1}$.
\end{itemize}
We now define $H_{e,h}$ as the union of three languages. Intuitively, the first one contains the words which are either empty or have an $\alpha$-factorization forest of height at most $h-1$, the second one, words having an $\alpha$-factorization forest of height $h$ and whose root is a binary node, and the third one, words with an $\alpha$-factorization forest of height $h$ and whose root is an idempotent node.
\[
H_{e,h} = H_{e,h-1} \ \cup\ \bigcup_{t_1t_2=e} (H_{t_1,h-1}H_{t_2,h-1})\ \cup\ H_{e,h-1}K_eH_{e,h-1} \quad \text{with $K_e$ as defined in Lemma~\ref{lem:trans:kisright}}
\]
Note that by definition, $H_{e,h}$ is a union of concatenations of languages in \pol{\ensuremath{\mathcal{C}}\xspace} and therefore belongs to \pol{\ensuremath{\mathcal{C}}\xspace} itself. We need to prove that it satisfies the conditions of the proposition. Choose some $w \in A^*$ and assume first that $w \in H_{e,h}$. We need to prove that $e \leq \alpha(w)$.
\begin{itemize}
\item If $w \in H_{e,h-1}$, then this is by definition of $H_{e,h-1}$.
\item If $w \in H_{t_1,h-1}H_{t_2,h-1}$ for $t_1,t_2 \in M$ such that $t_1t_2 = e$, then by definition, $w = w_1w_2$ with $t_1 \leq \alpha(w_1)$ and ${t_2} \leq \alpha(w_2)$. It follows that $e = t_1t_2 \leq \alpha(w_1w_2) = \alpha(w)$.
\item Finally, if $w \in H_{e,h-1}K_eH_{e,h-1}$, we obtain that $w = w_1uw_2$ with $e \leq \alpha(w_1)$, $u \in K_e$ and $e \leq \alpha(w_2)$. In particular, by the second item in Lemma~\ref{lem:trans:kisright}, $e \leq e\alpha(u)e$. Hence, since $e\alpha(u)e \leq \alpha(w_1)\alpha(u)\alpha(w_2) = \alpha(w)$, we conclude that $e \leq \alpha(w)$.
\end{itemize}
Conversely, assume that $\alpha(w) = e$ and that $w$ admits an $\alpha$-factorization forest of height at most $h$. We have to prove that $w \in H_{e,h}$. There are again three cases.
\begin{itemize}
\item First, if $w$ is empty or admits an $\alpha$-factorization forest of height at most
$h-1$, then $w \in H_{e,h-1}$ by definition.
\item Second, if $w$ admits an $\alpha$-factorization forest of height $h$ whose root is a binary node, then $w = w_1w_2$ with $w_1,w_2$ admitting forests of height at most $h-1$. Let $t_1= \alpha(w_1)$ and ${t_2} = \alpha(w_2)$. Observe that $t_1t_2 = \alpha(w) = e$. By the definition, we have $w_1 \in H_{t_1,h-1}$ and $w_2 \in H_{t_2,h-1}$. Hence, $w \in H_{t_1,h-1}H_{t_2,h-1} \ensuremath{\textup{SU}}\xspacebseteq H_{e,h}$ and we are finished.
\item Finally, if $w$ admits an $\alpha$-factorization forest of height $h$ whose root is an idempotent node, then $w = w_1uw_2$ with $\alpha(w_1) = \alpha(u) = \alpha(w_2) = e$ and $w_1,w_2$ admitting forests of height at most $h-1$. It follows that $w_1,w_2 \in H_{e,h-1}$ and since $\alpha(u) = e$, it is immediate that $u \in K_e$ by first item in Lemma~\ref{lem:trans:kisright}. We conclude that $w \in H_{e,h-1}K_eH_{e,h-1} \ensuremath{\textup{SU}}\xspacebseteq H_{e,h}$.
\end{itemize}
This concludes the proof of Proposition~\ref{prop:trans:signec2}.
\section{\texorpdfstring{Membership for \capol{\ensuremath{\mathcal{C}}\xspace}}{Membership for the intersection between Pol(C) and co-Pol(C)}}
\label{sec:capolc}
In this last section, we present a second transfer theorem which applies to the intersection class \capol{\ensuremath{\mathcal{C}}\xspace}. Recall that this denotes the class made of all languages which belong to both \pol{\ensuremath{\mathcal{C}}\xspace} and \copol{\ensuremath{\mathcal{C}}\xspace}.
The membership problem is simpler to handle for \capol{\ensuremath{\mathcal{C}}\xspace} than it is for \pol{\ensuremath{\mathcal{C}}\xspace}. Recall that using the generic characterization of \pol{\ensuremath{\mathcal{C}}\xspace} (i.e. Theorem~\ref{thm:trans:caracsig}) to decide \pol{\ensuremath{\mathcal{C}}\xspace}-membership requires an algorithm for \ensuremath{\mathcal{C}}\xspace-separation. In other words, we reduced \pol{\ensuremath{\mathcal{C}}\xspace}-membership to a stronger problem for \ensuremath{\mathcal{C}}\xspace: separation. It turns out that deciding membership for \capol{\ensuremath{\mathcal{C}}\xspace} only requires an algorithm for \ensuremath{\mathcal{C}}\xspace-{\bf membership}: the same problem is used on both ends of the reduction. Intuitively, this second transfer result is much stronger than the previous one. However, it turns out that the former is a simple corollary of the latter: it is obtained via a few algebraic manipulations on the generic characterization of \pol{\ensuremath{\mathcal{C}}\xspace} (i.e. Theorem~\ref{thm:trans:caracsig}). This was first observed by Almeida, Bartonov{\'{a}}, Kl{\'{\i}}ma and Kunc~\cite{AlmeidaBKK15}.
\begin{theorem}\label{thm:trans:polcopolreduc}
Let \ensuremath{\mathcal{C}}\xspace be a quotienting lattice\xspace of regular languages and assume that \ensuremath{\mathcal{C}}\xspace-membership is decidable. Then $(\capol{\ensuremath{\mathcal{C}}\xspace})$-membership is decidable as well.
\end{theorem}
This section is devoted to proving Theorem~\ref{thm:trans:polcopolreduc}. Similarly to Theorem~\ref{thm:trans:polreduc}, the argument is based on an algebraic characterization of \capol{\ensuremath{\mathcal{C}}\xspace} parametrized by a relation depending on \ensuremath{\mathcal{C}}\xspace. However, unlike the \ensuremath{\mathcal{C}}\xspace-pairs that we used in the \pol{\ensuremath{\mathcal{C}}\xspace}-characterization (i.e. Theorem~\ref{thm:trans:caracsig}), this new relation can be computed as soon as \ensuremath{\mathcal{C}}\xspace-membership is decidable. We speak of \emph{saturated \ensuremath{\mathcal{C}}\xspace-pairs}. We first define this new object and then use it to present the characterization of \capol{\ensuremath{\mathcal{C}}\xspace}.
\ensuremath{\textup{SU}}\xspacebsection{\texorpdfstring{Saturated \ensuremath{\mathcal{C}}\xspace-pairs}{Saturated C-pairs}}
Consider a class of languages \ensuremath{\mathcal{C}}\xspace, an alphabet $A$, a finite monoid $M$ and a \emph{surjective} morphism $\alpha: A^*\to M$. We define a new relation on $M$: the \emph{saturated \ensuremath{\mathcal{C}}\xspace-pairs} (for $\alpha$). Consider a pair $(s,t) \in M \times M$. We say that,
\begin{equation} \label{def:trans:scpairs}\begin{array}{c}
\text{$(s,t)$ is a saturated \emph{\ensuremath{\mathcal{C}}\xspace-pair} (for $\alpha$)} \\
\text{if and only if} \\
\text{{\bf no} language $K \in \ensuremath{\mathcal{C}}\xspace$ {\bf recognized by $\alpha$} separates $\alpha^{-1}(s)$ from $\alpha^{-1}(t)$}
\end{array}
\end{equation}
Clearly, this new notion is closely related to the \ensuremath{\mathcal{C}}\xspace-pairs that we defined in Section~\ref{sec:polc}. When $(s,t)$ is a \ensuremath{\mathcal{C}}\xspace-pair, $\alpha^{-1}(s)$ is not \ensuremath{\mathcal{C}}\xspace-separable from $\alpha^{-1}(t)$. This means that no language $K \in \ensuremath{\mathcal{C}}\xspace$ (including those recognized by $\alpha$) separates $\alpha^{-1}(s)$ from $\alpha^{-1}(t)$. Thus, $(s,t)$ is also a saturated \emph{\ensuremath{\mathcal{C}}\xspace-pair}.
\begin{fact} \label{fct:trans:satunsat}
Consider a class \ensuremath{\mathcal{C}}\xspace, an alphabet $A$, a finite monoid $M$ and a surjective morphism $\alpha: A^* \to M$. Then, any \ensuremath{\mathcal{C}}\xspace-pair $(s,t) \in M \times M$ is also a saturated \ensuremath{\mathcal{C}}\xspace-pair.
\end{fact}
\begin{remark}
The converse of Fact~\ref{fct:trans:satunsat} is false in general: an arbitrary saturated \ensuremath{\mathcal{C}}\xspace-pair need not be a \ensuremath{\mathcal{C}}\xspace-pair. Indeed, we shall later prove that the saturated \ensuremath{\mathcal{C}}\xspace-pair relation is transitive and we already stated that the \ensuremath{\mathcal{C}}\xspace-pair relation is not. In fact, we prove below that the saturated \ensuremath{\mathcal{C}}\xspace-pairs are exactly the transitive closure of the original \ensuremath{\mathcal{C}}\xspace-pairs.
\end{remark}
While very similar to \ensuremath{\mathcal{C}}\xspace-pairs, saturated \ensuremath{\mathcal{C}}\xspace-pairs are also simpler to handle. In particular, having an algorithm for \ensuremath{\mathcal{C}}\xspace-membership suffices to compute all saturated \ensuremath{\mathcal{C}}\xspace-pairs. Indeed, with such a procedure in hand, it is possible to compute all subsets $F \ensuremath{\textup{SU}}\xspacebseteq M$ such that $\alpha^{-1}(F) \in \ensuremath{\mathcal{C}}\xspace$. One may then decide whether $(s,t) \in M \times M$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair by checking whether one of these subsets $F$ satisfies $s \in F$ and $t \not\in F$. We state this in the following lemma.
\begin{lemma} \label{lem:trans:membtopairs}
Let \ensuremath{\mathcal{C}}\xspace be a class of languages and assume that \ensuremath{\mathcal{C}}\xspace-membership is decidable. Then, given an alphabet $A$, a finite monoid $M$ and a surjective morphism $\alpha: A^* \to M$ as input, one may compute all saturated \ensuremath{\mathcal{C}}\xspace-pairs for $\alpha$.
\end{lemma}
Furthermore, saturated \ensuremath{\mathcal{C}}\xspace-pairs satisfy stronger properties than the original \ensuremath{\mathcal{C}}\xspace-pairs: they correspond to a \emph{transitive relation}. Altogether, this means that the saturated \ensuremath{\mathcal{C}}\xspace-pair relation is a preorder for an arbitrary class \ensuremath{\mathcal{C}}\xspace.
\begin{lemma} \label{lem:trans:satquot}
Let \ensuremath{\mathcal{C}}\xspace be a class of languages, $A$ an alphabet, $M$ a finite monoid and $\alpha: A^* \to M$ a surjective morphism. Then, the three following properties hold:
\begin{itemize}
\item The saturated \ensuremath{\mathcal{C}}\xspace-pair relation is reflexive: for any $s \in M$, $(s,s)$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair.
\item The saturated \ensuremath{\mathcal{C}}\xspace-pair relation is transitive: for any $r,s,t \in M$ such that $(r,s)$ and $(s,t)$ are saturated \ensuremath{\mathcal{C}}\xspace-pairs, $(r,t)$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair as well.
\end{itemize}
\end{lemma}
\begin{proof}
For the first item, we know from Lemma~\ref{lem:trans:pairsreflex} that for any $s \in M$, $(s,s)$ is a \ensuremath{\mathcal{C}}\xspace-pair. Therefore, it is also a saturated \ensuremath{\mathcal{C}}\xspace-pair by Fact~\ref{fct:trans:satunsat}.
We turn to the second item. Consider $r,s,t \in M$ such that $(r,s)$ and $(s,t)$ are saturated \ensuremath{\mathcal{C}}\xspace-pairs. We show that $(r,t)$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair as well. That is, we must show that no language of \ensuremath{\mathcal{C}}\xspace recognized by $\alpha$ separates $\alpha^{-1}(r)$ from $\alpha^{-1}(t)$. Thus, consider $L \in \ensuremath{\mathcal{C}}\xspace$ recognized by $\alpha$ such that $\alpha^{-1}(r) \ensuremath{\textup{SU}}\xspacebseteq L$. We have to show that $\alpha^{-1}(t) \cap L = \emptyset$. Since $(r,s)$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair, $L$ cannot separate $\alpha^{-1}(r)$ from $\alpha^{-1}(s)$. Thus, $\alpha^{-1}(s) \cap L = \emptyset$. Moreover, since $L$ is recognized by $\alpha$, this implies that $\alpha^{-1}(s) \ensuremath{\textup{SU}}\xspacebseteq L$. Finally, since $(s,t)$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair, $L$ cannot separate $\alpha^{-1}(s)$ from $\alpha^{-1}(t)$. Thus, $\alpha^{-1}(t) \cap L = \emptyset$ and we are finished.
\end{proof}
Another useful property is that the saturated \ensuremath{\mathcal{C}}\xspace-pairs characterize exactly the languages in \ensuremath{\mathcal{C}}\xspace which are also recognized by the morphism $\alpha$ (provided that \ensuremath{\mathcal{C}}\xspace is a lattice).
\begin{lemma} \label{lem:trans:satpairdef}
Let \ensuremath{\mathcal{C}}\xspace be a lattice, $A$ an alphabet, $M$ a finite monoid and $\alpha: A^* \to M$ a surjective morphism. Then, for any $F \ensuremath{\textup{SU}}\xspacebseteq M$, the two following properties are equivalent:
\begin{enumerate}
\item $\alpha^{-1}(F) \in \ensuremath{\mathcal{C}}\xspace$.
\item $F$ is a upper set for the saturated \ensuremath{\mathcal{C}}\xspace-pair relation: for any $s \in F$ and any $t \in M$ such that $(s,t)$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair, we have $t \in F$.
\end{enumerate}
\end{lemma}
\begin{proof}
We start with the direction $(1) \Rightarrow (2)$. Assume that $\alpha^{-1}(F) \in \ensuremath{\mathcal{C}}\xspace$. Consider $s \in F$ and $t \in M$ such that $(s,t)$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair, we show that $t \in F$. We proceed by contradiction, assume that $t \not\in F$. In that case it is immediate that $\alpha^{-1}(F)$ separates $\alpha^{-1}(s)$ from $\alpha^{-1}(t)$. Since we have $\alpha^{-1}(F) \in \ensuremath{\mathcal{C}}\xspace$, this contradicts the hypothesis that $(s,t)$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair and we are finished.
We turn to the direction $(2) \Rightarrow (1)$. Assume that for any $s \in F$ and any $t \in M$ such that $(s,t)$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair, we have $t \in F$. We show that $\alpha^{-1}(F) \in \ensuremath{\mathcal{C}}\xspace$. Consider $s \in F$ and $r \not\in F$. By hypothesis, we know that $(s,r)$ is {\bf not} a saturated \ensuremath{\mathcal{C}}\xspace-pair. Thus, we have $G_{s,r} \ensuremath{\textup{SU}}\xspacebseteq M$ such that $\alpha^{-1}(G_{s,r}) $ belongs to \ensuremath{\mathcal{C}}\xspace and separates $\alpha^{-1}(s)$ from $\alpha^{-1}(r)$. One may then verify that,
\[
\alpha^{-1}(F) = \bigcup_{s \in F} \bigcap_{r \not\in F} \alpha^{-1}(G_{s,r})
\]
Since $\ensuremath{\mathcal{C}}\xspace$ is a lattice, follows that $\alpha^{-1}(F) \in \ensuremath{\mathcal{C}}\xspace$. This concludes the proof.
\end{proof}
We may now further connect the saturated \ensuremath{\mathcal{C}}\xspace-pair relation with original \ensuremath{\mathcal{C}}\xspace-pair relation. We show that the former is the transitive closure of the latter.
\begin{lemma} \label{lem:trans:transclos}
Consider a lattice \ensuremath{\mathcal{C}}\xspace, an alphabet $A$, a finite monoid $M$ and a surjective morphism $\alpha: A^* \to M$. Then, for any $(s,t) \in M \times M$, the following properties are equivalent,
\begin{enumerate}
\item $(s,t)$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair.
\item There exist $n \in \nat$ and $r_0,\dots,r_{n+1} \in M$ such that $r_0 = s$, $r_{n+1} = t$ and $(r_{i},r_{i+1})$ is a \ensuremath{\mathcal{C}}\xspace-pair for all $i \leq n$.
\end{enumerate}
\end{lemma}
\begin{proof}
We already proved the direction $(2) \Rightarrow (1)$. Indeed, we know from Fact~\ref{fct:trans:satunsat} that any \ensuremath{\mathcal{C}}\xspace-pair is also a saturated \ensuremath{\mathcal{C}}\xspace-pair. Moreover, we showed in Lemma~\ref{lem:trans:transclos} that the saturated \ensuremath{\mathcal{C}}\xspace-pair relation is transitive. Therefore, we concentrate on the direction $(1) \Rightarrow (2)$. Let $(s,t)$ be a saturated \ensuremath{\mathcal{C}}\xspace-pair. Let $F \ensuremath{\textup{SU}}\xspacebseteq M$ as the smallest subset of $M$ satisfying the two following properties:
\begin{enumerate}
\item $s \in F$.
\item For any \ensuremath{\mathcal{C}}\xspace-pair $(u,v) \in M \times M$, if $u \in F$, then $v \in F$ as well.
\end{enumerate}
We have $s \in F$ by definition. We show that $\alpha^{-1}(F) \in \ensuremath{\mathcal{C}}\xspace$. By Lemma~\ref{lem:trans:satpairdef}, this will imply that $t \in F$ as well since $(s,t)$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair. Thus, $(2)$ holds.
Observe that for any $u \in F$, we may build a language $H_u \in \ensuremath{\mathcal{C}}\xspace$ such that $\alpha^{-1}(u) \ensuremath{\textup{SU}}\xspacebseteq H_u \ensuremath{\textup{SU}}\xspacebseteq \alpha^{-1}(F)$. Indeed, for any $v \not\in F$, we know that $(u,v)$ is not a \ensuremath{\mathcal{C}}\xspace-pair by definition of $F$. Thus, we have $H_{u,v} \in \ensuremath{\mathcal{C}}\xspace$ which separates $\alpha^{-1}(u)$ from $\alpha^{-1}(v)$. We may now define,
\[
H_u = \bigcap_{v \not \in F} H_{u,v}
\]
Clearly $H_u \in \ensuremath{\mathcal{C}}\xspace$ since \ensuremath{\mathcal{C}}\xspace is a lattice. It now suffices to observe that,
\[
\alpha^{-1}(F) = \bigcup_{u \in F} \alpha^{-1}(u) \ensuremath{\textup{SU}}\xspacebseteq \bigcup_{u \in F} H_u \ensuremath{\textup{SU}}\xspacebseteq \alpha^{-1}(F)
\]
Thus, $\alpha^{-1}(F) =\bigcup_{u \in F} H_u$ belong to \ensuremath{\mathcal{C}}\xspace since \ensuremath{\mathcal{C}}\xspace is lattice.
\end{proof}
Finally, we prove that when \ensuremath{\mathcal{C}}\xspace is a quotienting lattice\xspace the saturated \ensuremath{\mathcal{C}}\xspace-pair relation is compatible with multiplication.
\begin{lemma} \label{lem:trans:smult}
Let \ensuremath{\mathcal{C}}\xspace be a quotienting lattice\xspace of regular languages, $A$ an alphabet $M$ a finite monoid and $\alpha: A^* \to M$ a surjective morphism. For any two saturated \ensuremath{\mathcal{C}}\xspace-pairs $(s_1,t_1),(s_2,t_2) \in M \times M$, $(s_1s_2,t_1t_2)$ is a saturated \ensuremath{\mathcal{C}}\xspace-pair as well.
\end{lemma}
\begin{proof}
Immediate from Lemma~\ref{lem:trans:transclos} since we already know that the \ensuremath{\mathcal{C}}\xspace-pair relation is compatible with multiplication by Lemma~\ref{lem:trans:mult}.
\end{proof}
\ensuremath{\textup{SU}}\xspacebsection{Characterization theorem}
We may now present the announced algebraic characterization of \capol{\ensuremath{\mathcal{C}}\xspace} and use it to prove Theorem~\ref{thm:trans:polcopolreduc}.
\begin{theorem} \label{thm:trans:caracint}
Let \ensuremath{\mathcal{C}}\xspace be a quotienting lattice\xspace of regular languages and $L$ a regular language. Then, the three following properties are equivalent:
\begin{enumerate}
\item $L \in \capol{\ensuremath{\mathcal{C}}\xspace}$.
\item The syntactic morphism $\alpha_L: A^* \to M_L$ of $L$ satisfies
the following property:
\begin{equation}
s^{\omega+1} = s^{\omega}ts^{\omega} \quad \text{for all \ensuremath{\mathcal{C}}\xspace-pairs $(s,t) \in M_L^2$} \label{eq:trans:caraint}
\end{equation}
\item The syntactic morphism $\alpha_L: A^* \to M_L$ of $L$ satisfies
the following property:
\begin{equation}
s^{\omega+1} = s^{\omega}ts^{\omega} \quad \text{for all saturated \ensuremath{\mathcal{C}}\xspace-pairs $(s,t) \in M_L^2$} \label{eq:trans:caraint2}
\end{equation}
\end{enumerate}
\end{theorem}
As announced, Theorem~\ref{thm:trans:caracint} states a reduction from $(\capol{\ensuremath{\mathcal{C}}\xspace})$-membership to \ensuremath{\mathcal{C}}\xspace-membership. Indeed, the syntactic morphism of a regular language can be computed and Equation~\eqref{eq:trans:caraint2} can be decided as soon as one is able to compute all saturated $\ensuremath{\mathcal{C}}\xspace$-pairs (as we explained, this amounts to deciding \ensuremath{\mathcal{C}}\xspace-membership). Hence, we obtain Theorem~\ref{thm:trans:polcopolreduc} as an immediate corollary. We turn to the proof of Theorem~\ref{thm:trans:caracint}.
\begin{proof}[Proof of Theorem~\ref{thm:trans:caracint}]
The equivalence $(1) \Leftrightarrow (2)$ follows from Theorem~\ref{thm:trans:caracsig} and Corollary~\ref{cor:trans:caracpi}. Indeed, by definition $L \in \capol{\ensuremath{\mathcal{C}}\xspace}$ if and only if $L \in \pol{\ensuremath{\mathcal{C}}\xspace}$ and $L \in \copol{\ensuremath{\mathcal{C}}\xspace}$. By Theorem~\ref{thm:trans:caracsig} and Corollary~\ref{cor:trans:caracpi} respectively, this is equivalent to $\alpha_L$ satisfying the two following properties:
\[
\begin{array}{lll}
s^{\omega+1} & \leq_L & s^{\omega}ts^{\omega} \quad \text{for all \ensuremath{\mathcal{C}}\xspace-pairs $(s,t) \in M^2$} \\
s^{\omega+1} & \geq_L & s^{\omega}ts^{\omega} \quad \text{for all \ensuremath{\mathcal{C}}\xspace-pairs $(s,t) \in M^2$}
\end{array}
\]
Clearly, when put together, these two equations are equivalent to~\eqref{eq:trans:caraint}. This concludes the proof of $(1) \Leftrightarrow (2)$.
We now show that $(2) \Leftrightarrow (3)$. The direction $(3) \Rightarrow (2)$ is immediate from Fact~\ref{fct:trans:satunsat}. Indeed, since any \ensuremath{\mathcal{C}}\xspace-pair is also a saturated \ensuremath{\mathcal{C}}\xspace-pair, it is immediate that when~\eqref{eq:trans:caraint2} holds, then~\eqref{eq:trans:caraint} holds as well. Therefore, we concentrate on the direction $(2) \Rightarrow (3)$. We assume that~\eqref{eq:trans:caraint} holds and prove that this is the case for~\eqref{eq:trans:caraint2} as well. Consider a saturated \ensuremath{\mathcal{C}}\xspace-pair $(s,t) \in M_L^2$. We have to show that $s^{\omega+1} = s^{\omega}ts^{\omega}$.
By Lemma~\ref{lem:trans:transclos}, we know that there exist $n \in \nat$ and $r_0,\dots,r_{n+1} \in M$ such that $r_0 = s$, $r_{n+1} = t$ and $(r_{i},r_{i+1})$ is a \ensuremath{\mathcal{C}}\xspace-pair for all $i \leq n$. We prove by induction that for all $1 \leq k \leq n+1$, we have,
\[
s^{\omega+1} = s^{\omega}r_ks^{\omega}
\]
The case $k = n+1$ yields the desired result since $r_{n+1} = t$. When $k = 1$, it is immediate from~\eqref{eq:trans:caraint} that $s^{\omega+1} = s^{\omega}r_1s^{\omega}$ since $(s,r_1)$ is a \ensuremath{\mathcal{C}}\xspace-pair. We now assume that $k > 1$. Using induction, we get that,
\[
s^{\omega+1} = s^{\omega}r_{k-1}s^{\omega}
\]
Therefore, we obtain,
\[
s^{\omega} = (s^{\omega+1})^\omega
= (s^{\omega}r_{k-1}s^{\omega})^\omega
\]
Since $(r_{k-1},r_k)$ is a \ensuremath{\mathcal{C}}\xspace-pair, It is immediate from Lemma~\ref{lem:trans:satquot} that, $(s^{\omega}r_{k-1}s^{\omega},s^{\omega}r_{k}s^{\omega})$ is a \ensuremath{\mathcal{C}}\xspace-pair as well. Thus, it follows from~\eqref{eq:trans:caraint} that,
\[
(s^{\omega}r_{k-1}s^{\omega})^{\omega+1} = (s^{\omega}r_{k-1}s^{\omega})^{\omega}s^{\omega}r_{k}s^{\omega}(s^{\omega}r_{k-1}s^{\omega})^{\omega}
\]
Since $s^{\omega+1} = s^{\omega}r_{k-1}s^{\omega}$ and $s^{\omega} = (s^{\omega}r_{k-1}s^{\omega})^\omega$, this yields,
\[
s^{\omega+1} = (s^{\omega+1})^{\omega+1} = s^{\omega}s^{\omega}r_{k}s^{\omega}s^{\omega} = s^{\omega}r_{k}s^{\omega}
\]
This concludes the proof.
\end{proof}
\end{document} |
\begin{equation}gin{document}
\title{An example of the difference between quantum and classical random
walks}
\author{Andrew M. Childs,$^1$ Edward Farhi,$^1$ and Sam Gutmann$^2$}
\address{$^1$ Center for Theoretical Physics, Massachusetts Institute of
Technology, Cambridge, MA 02139 \\
$^2$ Department of Mathematics, Northeastern University, Boston,
MA 02115}
\date{5 March 2001}
\maketitle
\begin{equation}gin{abstract}
In this note, we discuss a general definition of quantum random walks on
graphs and illustrate with a simple graph the possibility of very different
behavior between a classical random walk and its quantum analogue. In this
graph, propagation between a particular pair of nodes is exponentially
faster in the quantum case.
[MIT-CTP \#3093]
\end{abstract}
\begin{equation}gin{multicols}{2}
\noindent {\bf Introduction.}
Many classical algorithms are based on random walks, so it is natural to ask
whether quantum random walks might be useful for quantum computation. A
framework for using quantum random walks to solve decision problems was
investigated in~\cite{FG98}. There also, an exponential separation was
found between the classical and quantum times to propagate through a certain
tree.
In this note, we describe a general definition of continuous-time random
walks on graphs and give a simpler example of a graph for which the quantum
time to propagate between a particular pair of nodes is exponentially
shorter than the analogous classical propagation time. We also discuss
advantages of the continuous time formulation over discrete versions.
\noindent {\bf Random walks.}
A continuous time classical random walk on a graph is a Markov process. A
graph is a set of $v$ vertices $\{1, 2, \ldots, v\}$ and a set of edges that
specifies which pairs of vertices are connected in the graph. A step in a
classical random walk on a graph only occurs between two vertices connected
by an edge. Let $\gamma$ denote the jumping rate. Starting at any vertex,
the probability of jumping to any connected vertex in a time $\epsilon$ is
$\gamma \epsilon$ (in the limit $\epsilon \to 0$). This random walk can be
described by the $v \times v$ infinitesimal generator matrix $M$ defined by
\begin{equation}
M_{ab} = \left\{
\begin{equation}gin{array}{ll}
-\gamma & \textrm{$a \ne b$, $a$ and $b$ connected by an edge} \\
0 & \textrm{$a \ne b$, $a$ and $b$ not connected} \\
k\gamma & \textrm{$a=b$, $k$ is the valence of vertex $a$.}
\end{array} \right.
\end{equation}
If $p_a(t)$ denotes the probability of being at vertex $a$ at time $t$, then
\begin{equation}
{{\mathrm d} p_a(t) \over {\mathrm d}t} = - \sum_b M_{ab} \, p_b(t)
\,.
\label{eq:diffeq}
\end{equation}
Consider quantum evolution in a $v$-dimensional Hilbert space according to a
Hamiltonian $H$. In a basis $|1\rangle, |2\rangle, \ldots, |v\rangle$, the Schr\"odinger
equation for $|\psi(t)\rangle$ can be written
\begin{equation}
i {{\mathrm d} \over {\mathrm d}t} \langlea|\psi(t)\rangle
= \sum_b \langlea|H|b\rangle \langleb|\psi(t)\rangle
\,.
\label{eq:schrodinger}
\end{equation}
Note the similarity between (\ref{eq:diffeq}) and (\ref{eq:schrodinger}).
Whereas (\ref{eq:diffeq}) conserves probability in the sense that
\begin{equation}
\sum_a p_a(t) = 1
\,,
\end{equation}
the Schr\"odinger equation preserves probability as the sum of the
amplitudes squared:
\begin{equation}
\sum_a |\langlea|\psi(t)\rangle|^2 = 1
\,.
\end{equation}
In some sense, {\em any} evolution in a finite-dimensional Hilbert space can
be thought of as a ``quantum random walk.'' However, the analogy is
clearest when $H$ has an obvious local structure.
A quantum random walk on a graph is naturally defined in a Hilbert space
spanned by basis elements corresponding to the vertices. To respect the
structure of the graph, we require that for $a \ne b$,
\begin{equation}
\langlea|H|b\rangle \ne 0~\textrm{iff $a$ and $b$ are connected by an edge.}
\end{equation}
This is a very weak requirement, so we can impose more structure on $H$. A
natural quantum analogue to the classical random walk described above is
given by the quantum Hamiltonian with matrix elements~\cite{FG98}
\begin{equation}
\langlea|H|b\rangle = M_{ab}
\,.
\label{eq:ham}
\end{equation}
Note that on a one-dimensional lattice, this results in the Hamiltonian
defined by
\begin{equation}
H|j\rangle = -{1 \over \Delta^2}(|j-1\rangle-2|j\rangle+|j+1\rangle)
\,,
\end{equation}
which is just a discrete approximation to the operator $-{\mathrm
d}^2/{\mathrm d}x^2$ (where $\Delta=\gamma^{-1/2}$ is the lattice spacing).
The difference between the quantum and classical evolution comes from the
$i$ which appears in (\ref{eq:schrodinger}) but not in (\ref{eq:diffeq}).
This can result in radically different behavior, as seen in~\cite{FG98}. A
simpler example is given next.
\noindent {\bf An example.}
Here we define a sequence of graphs $G_n$. The number of vertices in $G_n$
is $2^{n+1}+2^n-2$. In Figure~\ref{fig:graph} we show $G_4$. In general,
$G_n$ consists of two balanced binary trees of depth $n$ with the $2^n$
$n$th-level vertices of the two trees pairwise identified.
\begin{equation}gin{figure}
\begin{equation}gin{center}
\psfig{figure=graph.eps,width=3.25in}
\end{center}
\caption{The graph $G_4$.}
\label{fig:graph}
\end{figure}
For both the classical and quantum random walks, we start at the root of one
tree and want the probability as a function of time of being at the other
root. In other words, we are interested in how long it takes to propagate
from the leftmost vertex to the rightmost vertex as a function of $n$.
Consider the classical case first. The vertices of $G_n$ can be grouped in
columns indexed by $j \in \{0, 1, \ldots, 2n\}$. Column $0$ contains the
root of the left tree, column $1$ contains the two vertices connected to
that root, etc. Note that column $n$ contains the $2^n$ vertices in the
middle of the graph and column $2n$ is the root at the right.
To analyze the classical walk from the left root to the right root, we need
only keep track of the probabilities of being in the columns. In the left
tree, for $0<j<n$, the probability of stepping from column $j$ to column
$j+1$ is twice as great as the probability of stepping from column $j$ to
column $j-1$. However, in the right tree, for $n<j<2n$, the probability of
stepping from column $j$ to column $j+1$ is half as great as the probability
of stepping from column $j$ to column $j-1$. This means that if you start
at the left root, you quickly move to the middle of the graph, but then it
takes a time exponential in $n$ to reach your destination. More precisely,
starting in column $0$, the probability of being in column $2n$ after any
number of steps is less than $2^{-n}$. This implies that the probability of
reaching column $2n$ in a time that is polynomial in $n$ must be
exponentially small as a function of $n$.
We now analyze the quantum walk on $G_n$ starting in the state corresponding
to the left root and evolving with the Hamiltonian given by (\ref{eq:ham}).
With this initial state, the symmetries of $H$ keep the evolution in a
$(2n+1)$-dimensional subspace of the $(2^{n+1}+2^n-2)$-dimensional Hilbert
space. This subspace is spanned by states $|{\mathrm col}~j\rangle$ (where $0
\le j \le 2n$), the uniform superposition over all vertices in column $j$,
that is,
\begin{equation}
|{\mathrm col}~j\rangle
= {1 \over \sqrt{N_j}} \sum_{a \in {\mathrm column}~j} |a\rangle
\,,
\end{equation}
where
\begin{equation}
N_j = \left\{ \begin{equation}gin{array}{ll}
2^j & 0 \le j \le n \\
2^{2n-j} & n \le j \le 2n \,.
\end{array} \right.
\end{equation}
In this basis, the non-zero matrix elements of $H$ are
\begin{equation}a
\langle{\mathrm col}~j|H|{\mathrm col}~j\pm1\rangle &=& -\sqrt{2}\gamma \\
\langle{\mathrm col}~j|H|{\mathrm col}~j\rangle &=& \left\{ \begin{equation}gin{array}{ll}
2\gamma & j=0,n,2n \\
3\gamma & {\mathrm otherwise,} \\
\end{array} \right.
\end{equation}a
which is depicted in Figure~\ref{fig:line} (for $n=4$) as a quantum random
walk on a line with $2n+1$ vertices.
\begin{equation}gin{figure}
\begin{equation}gin{center}
\psfig{figure=line.eps,width=3.25in}
\end{center}
\caption{The reduction of $G_4$ to a quantum random walk on a line.
Vertices correspond to columns and are labeled with the diagonal matrix
elements of $H/\gamma$, whereas edges are labeled with its matrix elements
between adjacent columns.}
\label{fig:line}
\end{figure}
\begin{equation}gin{figure}
\begin{equation}gin{center}
\psfig{figure=infline.eps,width=3.25in}
\end{center}
\caption{Quantum random walk on an infinite, translationally invariant
line.}
\label{fig:infline}
\end{figure}
Starting at the leftmost vertex of Figure~\ref{fig:line}, there is an
appreciable probability of being at the rightmost vertex after a time
proportional to $n$. To see this, first consider quantum propagation on an
infinite, translationally invariant line of vertices as depicted in
Figure~\ref{fig:infline}. Here it is straightforward to compute the
amplitude to go from vertex $l$ to vertex $m$ in a time $t$ (for example,
see~\cite{FG92}):
\begin{equation}
\langlem|e^{-iHt}|l\rangle = e^{-i 3 \gamma t} i^{m-l} J_{m-l}(2\sqrt2 \gamma t)
\,,
\end{equation}
where $J_{m-l}$ is a Bessel function of order $m-l$. This corresponds to
propagation with speed $2\sqrt2 \gamma$. More precisely, for any
$\epsilon>0$ and $|m-l| \gg 1$, for $t < \left({1 \over 2 \sqrt2
\gamma}-\epsilon\right)|m-l|$, the amplitude is exponentially small in
$|m-l|$, whereas there are values of $t$ between $({1 \over 2 \sqrt2
\gamma})|m-l|$ and $({1 \over 2 \sqrt2 \gamma}+\epsilon)|m-l|$ at which the
amplitude is of order $|m-l|^{-1/2}$.
In the limit of large $n$, the reduced version of $G_n$ is nearly identical
to the infinite, translationally invariant line, so it is plausible that
propagation on $G_n$ will also occur with speed $2\sqrt2\gamma$. To verify
this, we numerically compute the probability $|\langle{\mathrm
col}~j|\psi(t)\rangle|^2$ of being in column $j$ at various times $t$, where
$|\psi(0)\rangle=|{\mathrm col}~0\rangle$ and we choose $\gamma=1$. This is shown in
Figure~\ref{fig:propagation} with $n=500$ for $t=100$, $250$, and $400$.
These plots clearly show a wave packet which propagates with speed
$2\sqrt2$, with the amplitude near the wavefront decreasing like $t^{-1/2}$.
In the first plot, at $t=100$, the leading edge of the distribution is at
column $200\sqrt2 \approx 283$. The packet has not yet encountered the
small defect at the center, so it has a relatively simple shape. At
$t=250$, the wavefront has passed the center, and a small reflection can be
seen propagating backward. However, the leading edge is relatively
undisturbed, having propagated to column $500\sqrt2 \approx 707$. The
wavefront continues to propagate with speed $2\sqrt2$ until it reaches the
right root, where the packet is reflected. The last plot, at $t=400$, shows
the distribution shortly after this first reflection. Even after the
reflection, there is still an appreciable probability of being at the right
root.
\noindent {\bf The limiting distribution.}
In this section, we consider the distribution over the vertices after a long
time. We emphasize that although the mixing times (the characteristic times
to reach the limiting distribution) may be similar in the classical and
quantum cases~\cite{AAKV00}, this is in no way indicative of similar
dynamics, as the limiting distributions may be radically different.
In the classical case, the limiting distribution is defined as
\begin{equation}
\pi_b = \lim_{T \to \infty} p_b(T)
\,,
\end{equation}
which is independent of the starting state. It is easy to see that the
limiting distribution on $G_n$ is uniform over the vertices: this
distribution is the unique eigenvector of $M$ with eigenvalue $0$, so it is
the only component that remains after a long time. Thus $\pi_b =
(2^{n+1}+2^n-2)^{-1}$ for each vertex $b$, which is exponentially small.
In the quantum case, unitarity prevents the walk from reaching a steady
state. However, a sensible definition of the limiting distribution, which
depends on the initial state $|a\rangle$, is given by~\cite{AAKV00}
\begin{equation}
\chi_b = \lim_{T \to \infty} {1 \over T} \int_0^T |\langleb|e^{-iHt}|a\rangle|^2
\, {\mathrm d}t
\,.
\end{equation}
This is the distribution resulting from a measurement done after a time
chosen uniformly in $[0,T]$, in the limit of large $T$. By expanding over
the energy eigenstates $|E_r\rangle$, we find
\begin{equation}a
\chi_b &=& \sum_{r,s} \langleb|E_r\rangle\langleE_r|a\rangle\langlea|E_s\rangle\langleE_s|b\rangle \nonumber\\
&& \quad\times \lim_{T \to \infty} {1 \over T}
\int_0^T e^{-i(E_r-E_s)t} \, {\mathrm d}t \\
&=& \sum_r |\langlea|E_r\rangle|^2 \, |\langleb|E_r\rangle|^2
\end{equation}a
\begin{equation}gin{figure}
\begin{equation}gin{center}
\psfig{figure=100.eps,width=3.25in}
\psfig{figure=250.eps,width=3.25in}
\psfig{figure=400.eps,width=3.25in}
\end{center}
\caption{Propagation in $G_{500}$ starting at the left root. From top to
bottom, the times are $t=100$, $250$, and $400$.}
\label{fig:propagation}
\end{figure}
\noindent
(note that we have assumed $E_r \ne E_s$ for $r \ne s$, which is true for
$G_n$). In particular, consider the case where $|a\rangle=|{\mathrm col}~0\rangle$
corresponds to the left root and $|b\rangle=|{\mathrm col}~2n\rangle$ corresponds to
the right root. In this case, we may work in the reduced Hilbert space
spanned by the columns, so the number of energy eigenstates is $2n+1$. By
symmetry, $|\langle{\mathrm col}~0|E_r\rangle| = |\langle{\mathrm col}~2n|E_r\rangle|$. The
Cauchy-Schwartz inequality gives
\begin{equation}
\sum_r |\langle{\mathrm col}~0|E_r\rangle|^4 \, \sum_s 1
\ge \left(\sum_r |\langle{\mathrm col}~0|E_r\rangle|^2\right)^2 = 1
\,,
\end{equation}
which implies
\begin{equation}
\sum_r |\langle{\mathrm col}~0|E_r\rangle|^4 \ge {1 \over 2n+1}
\,.
\end{equation}
Thus in the limiting distribution, the probability of being at the right
root, starting at the left root, is
\begin{equation}
\chi_{{\mathrm col}~2n} \ge {1 \over 2n+1}
\,,
\end{equation}
which is much larger than in the classical case.
\noindent {\bf Discussion.}
The model of quantum random walks used in this note applies automatically to
any graph. In particular, the Hamiltonian is determined by the local
structure of the graph and its definition does not require knowledge of any
global properties. It is easy to imagine situations where the local
structure of a graph is readily accessible, but determining some global
property is difficult. For example, a computational problem may involve
searching a graph for a node with a certain property whose presence or
absence from the graph corresponds to the solution of an NP-complete
problem~\cite{FG98}.
The Hamiltonian-based approach to quantum random walks can be contrasted
with discrete time models (for example, see~\cite{AAKV00,ADZ93,NV00})
involving the extra state space of a ``quantum coin.'' This extra label
seems to be necessary in discrete time formulations of quantum random walks
(and is provably necessary in the one dimensional case~\cite{M96}).
However, for general graphs of mixed valence, it is not obvious how to
define the discrete time unitary evolution operator without knowledge of
global properties of the graph.
\noindent {\bf Acknowledgements.}
This work was supported in part by the Department of Energy under
cooperative agreement DE-FC02-94ER40818. AMC is supported by the Fannie and
John Hertz Foundation.
\begin{equation}gin{thebibliography}{9}
\bibitem{FG98}
E. Farhi and S. Gutmann, {\em Quantum computation and decision trees},
Phys. Rev. A {\bf 58}, 915 (1998).
\bibitem{FG92}
E. Farhi and S. Gutmann, {\em The functional integral constructed directly
from the Hamiltonian}, Ann. Phys. {\bf 213}, 182 (1992).
\bibitem{AAKV00}
D. Aharonov, A. Ambainis, J. Kempe, and U. Vazirani, {\em Quantum walks on
graphs}, quant-ph/0012090.
\bibitem{ADZ93}
Y. Aharonov, L. Davidovich, and N. Zagury, {\em Quantum random walks},
Phys. Rev. A {\bf 48}, 1687 (1993).
\bibitem{NV00}
A. Nayak and A. Vishwanath, {\em Quantum walk on the line},
quant-ph/0010117.
\bibitem{M96}
D. A. Meyer, {\em From quantum cellular automata to quantum lattice
gasses}, J. Stat. Phys. {\bf 85}, 551 (1996).
\end{thebibliography}
\end{multicols}
\end{document} |
\begin{document}
\title[Zero-Divisor Graphs of $\mathbb{Z}_n$, their products and $D_n$]{Zero-Divisor Graphs of $\mathbb{Z}_n$, their products and $D_n$}
\author{Amrita Acharyya}
\address{Department of Mathematics and Statistics\\
University of Toledo, Main Campus\\
Toledo, OH 43606-3390}
\email{[email protected]}
\author{Robinson Czajkowski}
\address{Department of Mathematics and Statistics\\
University of Toledo, Main Campus\\
Toledo, OH 43606-3390}
\email{ [email protected]}
\subjclass[2010]{68R10, 68R01, 03G10, 13A99}
\keywords{zero-divisor graph, commutative ring, finite products, poset, type graph}
\begin{abstract}
This paper is an endeavor to discuss some properties of zero-divisor graphs of the ring $\mathbb{Z}_n$, the ring of integers modulo $n$. The zero divisor graph of a commutative ring $R$, is an undirected graph whose vertices are the nonzero zero-divisors of $R$, where two distinct vertices are adjacent if their product is zero. The zero divisor graph of $R$ is denoted by $\Gamma(R)$. We discussed $\Gamma(\mathbb{Z}_n)$'s by the attributes of completeness, k-partite structure, complete k-partite structure, regularity, chordality, $\gamma - \beta$ perfectness, simplicial vertices. The clique number for arbitrary $\Gamma(\mathbb{Z}_n)$ was also found. This work also explores related attributes of finite products $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$, seeking to extend certain results to the product rings. We find all $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$ that are perfect. Likewise, a lower bound of clique number of $\Gamma(\mathbb{Z}_m\times\mathbb{Z}_n)$ was found. Later, in this paper we discuss some properties of the zero divisor graph of the poset $D_n$, the set of positive divisors of a positive integer $n$ partially ordered by divisibility.
\end{abstract}
\maketitle
\section{Introduction} \label{s:Intro}
Zero-divisor graphs were first discussed by Beck~\cite{nB66} as a way to color commutative rings. They were further discussed by Livingston and Anderson in ~\cite{jS83} and ~\cite{jW98}. A zero-divisor graph of a ring $R$, denoted by $\Gamma(R)$, is a graph whose vertices are all the zero divisors of $R$. Two distinct vertices $u$ and $v$ are adjacent if $uv = 0$. Beck~\cite{nB66} considered every element of $R$ a vertex, with 0 sharing an edge with all other vertices. Since then, others have chosen to omit 0 from zero-divisor graphs [2, 3, 4, 5]. For our purposes, we omit 0 so that the vertex set of $\Gamma(\mathbb{Z}_n)$ denoted by $ZD(\mathbb{Z}_n)$ will only be the non-zero zero-divisors.\\
In the first section, we explore a concept explored by Smith ~\cite{jK55} called type graphs. In ~\cite{jK55}, type graphs were used to find all perfect $\Gamma(\mathbb{Z}_n)$. We extended the notion of type graphs for $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$ to find all perfect zero-divisor graphs of such products, where $n_1, n_2, \cdots , n_k$ are positive integers and $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$ is the direct product of $Z_{n_i}$s, $1\le i\le k$. We then move on to various properties of $\Gamma(\mathbb{Z}_n)$ and $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$. AbdAlJawad and Al-Ezeh ~\cite{bH77} discussed the domination number of $\Gamma(\mathbb{Z}_n)$. We extend this result to find an upper bound and lower bound for the domination number of finite product $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$ and discussed coefficient of smallest degree of domination polynomial of $\Gamma(\mathbb{Z}_n)$. In the last section, we explore zero divisor graphs of the poset $D_n$, the set of positive divisors of a positive integer $n$ partially ordered by divisibility and we catalog them in a similar way. Zero divisor graph of poset is studied in \cite{jW99}, \cite{jW100}, \cite{jW101}.
\section{Type Graphs}
When we consider zero-divisor graphs of $\Gamma(\mathbb{Z}_n)$, it is useful to consider the type graphs of these rings. A type graph has vertices of $T_a$ where $a$ is a factor of $n$ that is neither 1 nor 0. The set of all $T_i$ forms a partition of the zero divisor graph by $T_a = \{ x \in ZD(\mathbb{Z}_n) | gcf(x, n) = a \}$. This concept was shown by Smith ~\cite{jK55}, where the type graph was used to find all perfect $\Gamma(\mathbb{Z}_n)$. Smith used the notation $\Gamma^T(\mathbb{Z}_n)$ to denote the type graph. In that paper, four key observations were shown to be true regarding the type graphs on $\mathbb{Z}_n$. In this section, we modify the definition of type graph to fit the graph of $\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k}$. Additionally, we show these observations to be true over this type graph as well. We then use analogues of some theorems from ~\cite{jK55} to characterize the perfectness of $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$.\\
The following are two important theorems from ~\cite{jK55}.
\begin{theorem}[Smith's Main Theore] ~\cite{jK55}
A graph $\Gamma(\mathbb{Z}_n)$ is perfect iff $n$ is of one of the following forms:
\begin{enumerate}
\item[1.] $n = p^a$ for prime $p$ and positive integer $a$.
\item[2.] $n = p^aq^b$ for distinct primes $p, q$ and positive integers $a, b$.
\item[3.] $n = p^aqr$ for distinct primes $p, q, r$ and positive integer $a$.
\item[4.] $n = pqrs$ for distinct primes $p, q, r, s$.
\end{enumerate}
\end{theorem}
\begin{theorem}[Simth's Theorem 4.1] ~\cite{jK55}
$\Gamma(\mathbb{Z}_n)$ is perfect iff its type graph $\Gamma^T(\mathbb{Z}_n)$ is perfect.\\
\end{theorem}
\begin{definition}[Type graph of $\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k}$]
The type graph of $\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k}$ denoted by $\Gamma^T(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$ has a vertex set of the type classes $T(x_1, x_2, \cdots, x_k)$ where $(x_1, x_2, \cdots, x_k) \neq (0, 0, \cdots, 0)$ nor $(1, 1, \cdots, 1)$, and $x_i$ is a divisor of $n_i$, 1, or 0.\\
$T(x_1, x_2, \cdots, x_k) = \{ (a_1, a_2, \cdots, a_k) \mid| a_i \in \mathbb{Z}_{n_i}/0$ and $gcf(a_i, n_i) = x_i$ or $a_i=0$ if $x_i=0$ $\}$. Arbitrary $T(x_1, x_2, \cdots, x_k)$ shares an edge with arbitrary $T(y_1, y_2, \cdots, y_k)$ iff $x_iy_i = 0$ for all $i$.
\end{definition}
Smith ~\cite{jK55} gave the following four observations for the type graph of $\Gamma(\mathbb{Z}_n)$.
\begin{theorem}\label{1.1} Each vertex of $\Gamma(\mathbb{Z}_{n})$ is in exactly one type class.
\end{theorem}
\begin{theorem}\label{1.2} Arbitrary distinct vertices $T_x$ and $T_y$ share an edge in $\Gamma^T(\mathbb{Z}_{n})$ iff each $a\in T_x$ shares an edge with each $b\in T_y$ in $\Gamma(\mathbb{Z}_{n})$.
\end{theorem}
\begin{theorem}\label{1.3} Arbitrary distinct vertices $T_x $ and $T_y $ don't share an edge in $\Gamma^T(\mathbb{Z}_{n})$ iff each $a\in T_x$ doesn't share an edge with each $b\in T_y$ in $\Gamma(\mathbb{Z}_{n})$.
\end{theorem}
\begin{theorem}\label{1.4} In $\Gamma(\mathbb{Z}_{n})$ consider arbitrary $a$ and $b$ in the same type class. An arbitrary vertex $c$ in $\Gamma(\mathbb{Z}_{n})$ shares an edge with $b$ iff it shares an edge with $a$ also.
\end{theorem}
Following are the four analogues to the above results for $\Gamma^T(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$.\\
\begin{theorem}\label{1.1 a} Each vertex of $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$ is in exactly one type class.
\end{theorem}
\begin{proof}
Assume otherwise. Then there is a vertex $v$ that is not in any type class, or $v$ is in multiple type classes.\\
\begin{enumerate}
\item[Case 1:] $v$ is not in any type class.\\
Then $v$ must have an element $a_i$ that is not 0 and whose gcf with $n_i$ is not a number $x_i$ which is clearly not true.\\
\item[Case 2:] $v$ is in multiple type classes.\\
Let $v = (a_1, a_2, \cdots, a_k) \in T(x_1, x_2, \cdots, x_k) \cap T(y_z, y_2, \cdots, y_k)$. Then for all $i \in \{1, 2, \cdots, k\}$ if $a_i = 0$, then $x_i = y_i = 0$ and if $a_i \neq 0$, then $gcd(a_i, n_i) = x_i = y_i$ giving $(x_1, x_2, \cdots, x_k) = (y_1, y_2, \cdots, y_k)$ which is a contradiction.
\end{enumerate}
\end{proof}
\begin{theorem}\label{1.2a} Arbitrary distinct vertices $T_x = T(x_1, x_2, \cdots, x_k)$ and $T_y = T(y_1, y_2, \cdots, y_k)$ share an edge in $\Gamma^T(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$ iff each $a\in T_x$ shares an edge with each $b\in T_y$ in $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$.
\end{theorem}
\begin{proof}
Let $T_x$ shares and edge with $T_y$. By the definition, $x_iy_i = 0$ for every $i$. Consider arbitrary $(a_1, \cdots, a_i)\in T_x$ and $(b_1, \cdots, b_i)\in T_y$.Since each $a_i$ is a multiple of $x_i$ and each $b_i$ is a multiple of $y_i$, $a_ib_i$ is a multiple of $x_iy_i$ and therefore equal to 0. Then $(a_1, \cdots, a_i)$ and $(b_1, \cdots, b_i)$ share an edge.\\
Conversely, let every $a\in T_x$ and $b\in T_y$ share an edge. Since $x = (x_1, x_2, \cdots, x_k)$ is an element of $T_x$, and $y = (y_1, y_2, \cdots, y_k)$ is an element of $T_y$, $x$ and $y$ share an edge. Then $T_x$ must share an edge with $T_y$.
\end{proof}
\begin{theorem}\label{1.3a} Arbitrary distinct vertices $T_x = T(x_1, x_2, \cdots, x_k)$ and $T_y = T(y_1, y_2, \cdots, y_k)$ don't share an edge in $\Gamma^T(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$ iff each $a\in T_x$ doesn't share an edge with each $b\in T_y$ in $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$.
\end{theorem}
\begin{proof}
Let $T_x$ does not share an edge with $T_y$. By the definition, $x_iy_i \neq 0$ for some $i$, which means $x_iy_i$ lacks some factor $f$ of $n_i$. Consider arbitrary $(a_1, \cdots, a_i\cdots, a_k)\in T_x$ and $(b_1, \cdots, b_i, \cdots b_k)\in T_y$. Now, $a_i$ is a multiple of $x_i$ and $b_i$ is a multiple of $y_i$, and thus, $a_ib_i$ is a multiple of $x_iy_i$. Since $gcf(a_i, n_i) = x_i$ and $gcf(b_i, n_i) = y_i$, $a_ib_i$ also lacks the factor $f$ from $n_i$ and is therefore non-zero. So $(a_1, \cdots, a_k)$ and $(b_1, \cdots, b_k)$ do not share an edge.\\
Conversely, let each $a\in T_x$ and $b\in T_y$ do not share an edge. Since $x = (x_1, x_2, \cdots, x_k)$ is an element of $T_x$, and $y = (y_1, y_2, \cdots, y_k)$ is an element of $T_y$, $x$ and $y$ don't share an edge. Then $T_x$ must not share an edge with $T_y$.
\end{proof}
\begin{theorem}\label{1.4a} In $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$ consider arbitrary $a = (a_1, a_2, \cdots, a_k)$ and $b = (b_1, b_2, \cdots, b_k)$ in the same type class $T(t_1, t_2, \cdots, t_k)$. An arbitrary vertex $c = (c_1, c_2, \cdots, c_k)$ shares an edge with $b$ iff it shares an edge with $a$ also.
\end{theorem}
\begin{proof}
Follows from Theorem \ref{1.2} and \ref{1.3}.
\end{proof}
Next, we want have the following theorem:\
\begin{theorem}\label{1.8} $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$ is perfect iff its type graph $\Gamma^T(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$ is perfect.
\end{theorem}
To show this, we will use the following three theorems, whose proofs are analogous to the corresponding proofs in ~\cite{jK55}.\\
\begin{theorem}\label{1.5} Given arbitrary hole or antihole $H$ of length greater than $4$ in $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$, every vertex in $H$ belongs to a different type class.
\end{theorem}
\begin{theorem}\label{1.6} Let there be a hole or antihole $H$ length $l>4$ in $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$. Then the type graph $\Gamma^T(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$ must also contain a hole or antihole length $l$.
\end{theorem}
\begin{theorem}\label{1.7}
Let there be a hole or antihole $H$ length $l>4$ in the type class $\Gamma^T(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$. Then the graph $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$ must also contain a hole or antihole length $l$.
\end{theorem}
Using these theorems, now we can establish the following proof of Theorem \ref{1.8}.\\
\begin{proof}
The proof is analogous to the proof in ~\cite{jK55}.
\end{proof}
Now that we know perfectness in the type graph implies perfectness in the zero-divisor graph, it is possible to find all such perfect $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$. As it turns out, for both $\Gamma^T(\mathbb{Z}_n)$ and $\Gamma^T(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$, we can exchange the primes of each $n_i$, and as long as the form of the primes (the amount of primes and the power of each prime) stays the same, the type graph will be isomorphic. To illustrate this, consider $\Gamma^T(\mathbb{Z}_{p^2q}\times\mathbb{Z}_p)$ where $p, q$ are prime. This type graph is isomorphic to $\Gamma^T(\mathbb{Z}_{r^2s}\times\mathbb{Z}_t)$ where $r, s, t$ are prime, even if the value of the primes change. We will use this to find all perfect $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots\times\mathbb{Z}_{n_k})$.
\begin{theorem}\label{1.9} Consider some $\Gamma^T(\mathbb{Z}_n)$ and $\Gamma^T(\mathbb{Z}_m)$ such that $n=p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_k^{\alpha_k}$ and $m=q_1^{\alpha_1}q_2^{\alpha_2}\cdots q_k^{\alpha_k}$. Then $\Gamma^T(\mathbb{Z}_n) \cong \Gamma^T(\mathbb{Z}_m)$.
\end{theorem}
\begin{proof}
Consider arbitrary vertex $u$ in $\Gamma^T(\mathbb{Z}_n)$. $u$ is a factor of $n$, so we can write $u=p_1^{x_1}p_2^{x_2}\cdots p_k^{x_k}$. Note that $0\leq x_i \leq \alpha_i$, $\bar forall i$. Define a function $f:\Gamma^T(\mathbb{Z}_n)\to \Gamma^T(\mathbb{Z}_m)$ as $f(u)=f(p_1^{x_1}p_2^{x_2}\cdots p_k^{x_k}) = q_1^{x_1}q_2^{x_2}\cdots q_k^{x_k}$. Since $n$ and $m$ both have the same amount of prime factors, and each corresponding prime has the same power $\alpha_i$, the result follows.\\
\end{proof}
\begin{theorem}\label{1.10} Consider $\Gamma^T(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$ and $\Gamma^T(\mathbb{Z}_{m_1}\times\cdots\times\mathbb{Z}_{m_k})$ where the prime factorization of $n_i$ has the same form as $m_i$ for each $i$. That is, $n_i$ and $m_i$ have the same amount of prime factors and the same power for each prime. Then $\Gamma^T(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k}) \cong \Gamma^T(\mathbb{Z}_{m_1}\times\cdots\times\mathbb{Z}_{m_k})$.
\end{theorem}
\begin{proof}
Take arbitrary $n_i$.\\
Denote the prime factorization of $n_i = p_{i, 1}^{\alpha_{i, 1}}\cdots p_{i, j_i}^{\alpha{i, j_i}}$ where $j_i$ is the amount of prime of $n_i$. Likewise, $m_i = q_{i, 1}^{\alpha_{i, 1}}\cdots q_{i, j_i}^{\alpha{i, j_i}}$. Note that the only difference between these factorizations are the value of the primes being used. The powers and amount of primes are the same. Consider arbitrary $(u_1, \cdots, u_k) \in \Gamma^T(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$. Each $u_i$ is a factor of $n_i$ or 0. We can write $u_i = p_{i, 1}^{x_{i, 1}}\cdots p_{i, j_i}^{x_{i, j_i}}$ where $0 \leq x_{i, l} \leq \alpha_{i, l}$. Note that if $u_i$ is 1, each $x_{i, l}$ is 0 and if $u_i$ is 0, $x_{i, l} = \alpha_{i, l}$ for every $l$.\\
Define a function $f: \Gamma^T(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k}) \to \Gamma^T(\mathbb{Z}_{m_1}\times\cdots\times\mathbb{Z}_{m_k})$ as $f(u_1, \cdots, u_k) = f(p_{1, 1}^{x_{1, 1}}\cdots p_{1, j_1}^{x_{1, j_1}}, \cdots, p_{k, 1}^{x_{k, 1}}\cdots p_{k, j_k}^{x_{k, j_k}})$\\
$= (q_{1, 1}^{x_{1, 1}}\cdots q_{1, j_1}^{x_{1, j_1}}, \cdots, q_{k, 1}^{x_{k, 1}}\cdots q_{k, j_k}^{x_{k, j_k}}) = (v_1, \cdots, v_k)$. Note that all we did was only replaced the primes. Hence the result follows as the previous one.
\end{proof}
\begin{theorem}\label{1.11} $\Gamma^T(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$ is isomorphic to $\Gamma^T(\mathbb{Z}_{n_1\cdots n_k})$ if all $n_i$'s are mutually co-prime.
\end{theorem}
\begin{proof}
The proof follows by Chineese Remainder theorem.
\end{proof}
The next theorem will show how we can characterize perfectness of $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$. Because now by the above three theorem without loss of generality we can simply choose primes that will make each $n_i$ co-prime. Then we know the type graph will be isomorphic to $\Gamma(\mathbb{Z}_n)$ where $n$ is the product of all such co-prime $n_i$. So $n$ will have a prime factorization with the total amount of primes in all $n_i$ and they will have corresponding powers. So, we have the following theorem.
\begin{theorem}\label{1.12} $\Gamma(\mathbb{Z}_{n_{1}}\times\mathbb{Z}_{n_{2}}\cdots \mathbb{Z}_{n_{k}})$ is perfect iff it is possible to find mutually co prime positive integers $m_1, m_2 \cdots m_k$, so that each $m_{i}$ has same amount of prime factors with same exponent in it's prime factorization as that in $n_i$ and $\Gamma(\mathbb{Z}_{m_{1}m_{2}\cdots m_{k}})$ is perfect.
\end{theorem}
\begin{example}
For example, $\Gamma(\mathbb{Z}_{p^2q}\times\mathbb{Z}_{p})$ is perfect because $\Gamma(\mathbb{Z}_{a^2bc})$ is perfect as shown by ~\cite{jK55}. Also note, no product with a dimension greater than four can be perfect. $\Gamma(\mathbb{Z}_{p_1}\times\cdots\times\mathbb{Z}_{p_5})$ is not perfect since no $\Gamma(\mathbb{Z}_{p_1\cdots p_5})$ is perfect as shown by ~\cite{jK55}.
\end{example}
\section{Some properties of $\Gamma(\mathbb{Z}_n)$}
In this section we characterize $\Gamma(\mathbb{Z}_n)$ by various qualities such as completeness, cordiality and clique number. A helpful construction used is the strong type graph. We define the strong type graph as the type graph with self loops. We normally do not consider self-loops, in zero-divisor graphs and type graphs, but in the strong type graph, a vertex has a loop at it if it annihilates itself. We denote the strong type graph of $\Gamma(\mathbb{Z}_n)$ as $\Gamma^S(\mathbb{Z}_n)$.\\
Another construction used commonly in this section is $n^*$. Consider some $\Gamma(\mathbb{Z}_n)$. Let $n = p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_m^{\alpha_m}$, then $n^*=p_1^{\beta_1}p_2^{\beta_2}\cdots p_m^{\beta_m}$ where $\beta_i$ is half of $\alpha_i$ rounded up. This construction is very useful, as some properties of vertices can be associated with whether or not the vertex is a multiple of $n^*$.
\begin{lemma}\label{2.0} Two arbitrary vertices $u$ and $v$ in $\Gamma(\mathbb{Z}_n)$ that are both in the same type class $T_i$ share an edge iff $T_i$ has a self-loop in the strong type graph.
\end{lemma}
\begin{proof}
Let $T_i$ have a self-loop. Then $i^2 = 0$. Since every $u, v\in T_i$ are multiples of $i$, $u$ and $v$ will share an edge.\\
Conversely, let $T_i$ does not have a self-loop. Take arbitrary $u$ and $v$ in $T_i$. According to the definition of type class, $u$ and $v$ are some multiple of $i$ where $gcf(u, n)=i$ and likewise for $v$. We can write $u=ai$ and $v=bi$ where $gcf(a, n/i)=1$ and $gcf(b, n/i)=1$. Assume $u$ and $v$ share and edge. Then $uv=cn$, $abi^2=cn$ where $c$ is a natural number. So $\bar frac{abi^2}{n}=c$. Since $T_i$ does not have a self-loop, $i^2\neq 0$ which means $n$ contains a factor not contained by $i^2$. Let this factor be called $d$. Let $\bar frac{g}{d}$ represent the simplified form of the fraction $\bar frac{i^2}{n}$ where $d$ is guaranteed to not be 1. By substitution, $\bar frac{abg}{d} = c$. But this is a contradiction since $a$, $b$ and $g$ do not share a factor with $n/i$, so cannot cancel the $d$ out of the denominator. Therefore, the expression cannot be equal to $c$, a natural number. $u$ and $v$ do not share and edge.
\end{proof}
\begin{theorem}\label{2.1} $\Gamma(\mathbb{Z}_{p^2})$ is complete where $p$ is prime.
\end{theorem}
\begin{proof}
Take arbitrary zero divisors of $\mathbb{Z}_n$, $u$ and $v$. $u$ and $v$ must both share a common factor with $n$, and the only possible factor is $p$ since $p^2$ is zero. So both $u$ and $v$ have a factor of $p$. Then $u$ and $v$ share an edge. $\Gamma(\mathbb{Z}_{p^2})$ is complete.
\end{proof}
\begin{theorem}\label{2.2} $\Gamma(\mathbb{Z}_{p^x})$ where $p$ is prime and $x \geq 3$ is not complete.
\begin{proof}
Let $x\geq 3$.
\begin{enumerate}
\item[Case 1:] $p=2$: $p$ and $3p$ are distinct non-zero zero-divisors that are not connected.
\item[Case 2:] $p\ne 2$: $p$ and $2p$ are distinct non-zero zero-divisors that are not connected.
\end{enumerate}
\end{proof}
\end{theorem}
\begin{theorem}\label{2.3} $\Gamma(\mathbb{Z}_n)$, where $n \geq 2$ is complete iff $n=p^2$.
\end{theorem}
\begin{proof}
Let $\Gamma(\mathbb{Z}_n)$ be complete. Assume two or more distinct prime factors of $n$ exist. Label the smallest such factor by $p$. Now choose another distinct prime factor of $n$ as $q$. $p$ is a zero divisor and shares an edge with $n/p$. Since $p$ and $q$ are both prime factors of $n$, $pq\leq n$. Also, since $p<q$, $p^2<pq$. So $p^2<pq\leq n$ which means $p^2$ is non-zero and distinct from $p$. $p^2$ shares an edge with $n/p$ so $p^2$ is a distinct zero-divisor that does not share an edge with $p$, making $\Gamma(\mathbb{Z}_n)$ not complete. So $n$ must only have one prime factor. Then, by Theorem 2.2, $\Gamma(\mathbb{Z}_{p^x})$ is not complete if $x\geq 3$. So $x=2$. So when $\Gamma(\mathbb{Z}_n)$ is complete, $n=p^2$.
The converse follows by Theorem \ref{2.1}.
\end{proof}
\begin{theorem}\label{2.4} $\Gamma(\mathbb{Z}_n)$ is k-partite if $\Gamma^S(\mathbb{Z}_n)$ is k-partite.
\end{theorem}
\begin{proof}
Let $\Gamma^S(\mathbb{Z}_n)$ be k-partite. Then $\Gamma^S(\mathbb{Z}_n)$ can be partitioned into $k$ disjoint subsets $S_1, S_2, \cdots, S_k$ such that no vertex in the same set share an edge. Partition $\Gamma(\mathbb{Z}_n)$ into a similar grouping $Q_1, Q_2, \cdots, Q_k$ where $u\in Q_i$ iff $u\in T_u \in S_i$. Consider arbitrary $u$ and $v$, vertices of $\Gamma(\mathbb{Z}_n)$ that are in the same partitioned set $Q_i$.\\
\begin{enumerate}
\item[Case 1:] $u$ and $v$ are in different type classes.\\
Call such classes $T_u$ and $T_v$. Then since $u$ and $v$ are both in $Q_i$, $T_u$ and $T_v$ are both in $S_i$ which means $T_u$ does not share an edge with $T_v$. So, by ~\cite{jK55} $u$ and $v$ do not share an edge.\\
\item[Case 2:] $u$ and $v$ are in the same type class.\\
Call this class $T_u$. Then since $\Gamma^S(\mathbb{Z}_n)$ is k-partite, $T_u$ does not form a loop with itself. Hence, by Lemma \ref{2.0}, $u$ and $v$ do not share an edge.
\end{enumerate}
\end{proof}
\begin{theorem}\label{2.5} $\Gamma(\mathbb{Z}_n)$ is complete k-partite if $\Gamma^S(\mathbb{Z}_n)$ is complete k-partite.
\begin{proof}
Let $\Gamma^S(\mathbb{Z}_n)$ be complete k-partite. Then by Theorem 2.4, $\Gamma(\mathbb{Z}_n)$ is k-partite.
Using the partition used in Theorem \ref{2.4}, if we let $\Gamma^S(\mathbb{Z}_n)$ be partitioned into $k$ disjoint subsets $S_1, S_2, \cdots, S_k$, then $\Gamma(\mathbb{Z}_n)$ can be partitioned into $k$ disjoint subsets $Q_1, Q_2, \cdots, Q_k$, where arbitrary vertex of $\Gamma(\mathbb{Z}_n)$ is in $Q_i$ if its type class is in $S_i$. Consider arbitrary vertices in $\Gamma(\mathbb{Z}_n)$, $u$ and $v$, that are not in the same $Q_i$. Then $u$ and $v$ must be in different type classes in two different $S_i$'s. Call these classes $T_u$ and $T_v$. Since $\Gamma^S(\mathbb{Z}_n)$ is complete k-partite, $T_u$ and $T_v$ share an edge. Then $u$ and $v$ share an edge by ~\cite{jK55}.
\end{proof}
\end{theorem}
\begin{remark}The converse of Theorem \ref{2.4} and \ref{2.5} is not always true. If the zero-divisor graph is k-partite, but has a self-annihilating vertex, the strong type graph will have a self-loop, which prevents it from being k-partite.
\end{remark}
\begin{theorem}\label{2.6} If $n$ is square free, $\Gamma(\mathbb{Z}_{n})$ is k-partite, where $k$ is the number of distinct prime factors of $n$.
\end{theorem}
\begin{proof}
Consider the strong type graph $\Gamma^S(\mathbb{Z}_{n})$. Let, $n = p_1p_2\cdots p_k$. Partition the graph into $k$ sets $S_1, S_2, \cdots, S_k$. A vertex $T_a$ in the strong type graph is in $S_i$ if $gcf(a, p_i) = 1$ and $gcf(a, p_h) > 1$ for all $h<i$.\\
We now claim that $S_1, S_2, \cdots, S_k$ covers all the vertices of $\Gamma^S(\mathbb{Z}_{n})$.\\
Assume there is a $T_a$ that is not in any $S_i$. Since $T_a$ is a vertex, $a$ must be a factor of $n$ that is also less than $n$. So $a$ must omit at least one $p_i$. So $gcf(a, p_i) = 1$. Since $T_a$ is not in any $S_i$, there must exist some $h<i$ such that $gcf(a, p_h) = 1$. Choose the smallest index $h$ of such $p_h$. Then $T_a$ must be in $S_h$ which is a contradiction.\\
Our next claim is any two vertices $u$ and $v$ in the same partition do not share an edge.\\
Consider arbitrary $u$ and $v$ in $S_i$. Both $u$ and $v$ do not contain $p_i$ so they do not share an edge. So the strong type graph is k-partite.\\
By Theorem \ref{2.4}, $\Gamma(\mathbb{Z}_{p_1p_2\cdots p_k})$ is k-partite.
\end{proof}
\begin{lemma}\label{2.7} Arbitrary type class $T_a$ in $\Gamma^T(\mathbb{Z}_n)$ contains only one element iff $a=\bar frac{n}{2}$.
\end{lemma}
\begin{proof}
Let $T_a \in \Gamma(\mathbb{Z}_n)$ have a type class that has only one element. Assume $a\neq \bar frac{n}{2}$. Since $a$ is a factor of $n$, $\bar frac{n}{a}=f$ is also a factor of $n$. Note that $f \geq 3$.\\
Consider the vertex $a(f-1)$. The quantity $(f-1)$ does not share any factors with $f$. Since $af = n$, $gcf(a(f-1), n) = a$. So $a(f-1)\in T_a$. Also note that $a < a(f-1) < n$. So $a(f-1)$ is a distinct vertex in $T_a$ which is a contradiction.
So $a= \bar frac{n}{2}$\\
\\
Let $a= \bar frac{n}{2}$. Then a is the only element in $T_a$ since $2a = n$.
\end{proof}
\begin{corollary}
Analogous to above, $T_{n/p}$ in $\Gamma^T(\mathbb{Z}_n)$ contains exactly $p - 1$ elements if $p$ is the smallest prime factor of $n$.
\end{corollary}
\begin{lemma}\label{2.8} There is at most one type class with only one element.
\begin{proof}
Assume there are two or more distinct type classes that have only one element. Call two of these classes $T_u$ and $T_v$. By Lemma \ref{2.7}, $u=v=\bar frac{n}{2}$ which is a contradiction.
\end{proof}
\end{lemma}
\begin{theorem}\label{2.9} $\Gamma(\mathbb{Z}_n)$ is k-partite if $\Gamma^S(\mathbb{Z}_n)$ is k-partite or $\Gamma^T(\mathbb{Z}_n)$ is k-partite and the only self-connected vertex of $\Gamma(\mathbb{Z}_n)$ is $T_\bar frac{n}{2}$.
\end{theorem}
\begin{proof}
Let $\Gamma^S(\mathbb{Z}_n)$ be k-partite. By Theorem \ref{2.4}, $\Gamma(\mathbb{Z}_n)$ is k-partite. Let $\Gamma^T(\mathbb{Z}_n)$ be k-partite and let $\Gamma^S(\mathbb{Z}_n)$ have only one self-connected vertex, $T_\bar frac{n}{2}$. Consider arbitrary distinct $u$ and $v$, zero divisors of $\Gamma(\mathbb{Z}_n)$, that are in the same partition.\\
\begin{enumerate}
\item[Case 1:] $u$ and $v$ are in the same type class.\\
By Lemma \ref{1.10}, $T_\bar frac{n}{2}$ has only one element, so if $u$ and $v$ are distinct, they cannot be in $T_\bar frac{n}{2}$. Then the type class they are in are not self-connected so $u$ and $v$ do not share an edge.\\
\item[Case 2:] $u$ and $v$ are in different type classes.\\
Since $u$ and $v$ are in the same partition, their type classes are in the same partition and do not share an edge. Thus, $u$ and $v$ do not share an edge.\\
\end{enumerate}
\end{proof}
\begin{lemma}\label{2.10} A vertex in $\Gamma(\mathbb{Z}_n)$ annihilates itself iff it is a multiple of $n^*$.
\end{lemma}
\begin{lemma}\label{2.11} Consider two arbitrary vertices in $\Gamma(\mathbb{Z}_n)$ $u$ and $v$ such that $u$ is a factor of $v$. The largest clique containing $v$, $M_v$ has a magnitude greater than or equal to the $M_u$, the largest clique containing $u$.
\end{lemma}
\begin{proof}
Take arbitrary vertices $u$ and $v$ in $\Gamma(\mathbb{Z}_n)$. Let $u$ be a factor of $v$. Assume the opposite, that $M_u$ has a larger magnitude than that of $M_v$. Every element $e$ in $M_u\setminus u$ has the property $eu=0$. Then $\bar forall e\in M_u$, $ev=0$. So a clique $C$ exists with $v$ and each $e$ in $M_u\setminus u$. $C$ has a magnitude equal to the magnitude of $M_u$ which is a contradiction since $M_v$ is the largest clique containing $v$.
\end{proof}
\begin{theorem}\label{2.12} $cl(\Gamma(\mathbb{Z}_n)) \geq \bar frac{n}{n^*} + k - 1$ where $k$ is the number of odd-power primes in the prime factorization of $n$.
\end{theorem}
\begin{proof}
We claim that any two multiples of $n^*$ share an edge.\\ Take two arbitrary multiples of $n^*$, $an^*$ and $bn^*$. Since $(n^*)^2 \geq n$ these two vertices will share an edge. So the multiples of $n^*$ form a clique. Call it $C$. An arbitrary vertex of $C$ will be of the form $an^*$ for $1<a<\bar frac{n}{n^*}$. The amount of elements in this clique is $\bar frac{n}{n^*} - 1$, so the clique number of the graph is at least $\bar frac{n}{n^*} - 1$. Now consider all vertices of the form $n^*/q$ where $q$ is an arbitrary odd-power prime in the prime factorization of $n$. Because $n^*$ has a factor of $q$ with power of half rounded up, and $n^*/q$ has a power of half rounded down, arbitrary $n^*/q$ shares an edge with each $an^*$ in $C$. Also, each $n^*/q_1$ shares an edge to each other $n^*/q_2$. This is because the power of $q_1$ in $n^*/q_1$ is half rounded down and in $n^*/q_2$ it is half rounded up, and likewise for $q_2$. Since $k$ is the number of distinct odd powered primes in the prime factorization of $n$, $cl(\Gamma(\mathbb{Z}_n)) \geq \bar frac{n}{n^*} + k - 1$.
\end{proof}
\begin{theorem}\label{2.13} $cl(\Gamma(\mathbb{Z}_n)) \leq \bar frac{n}{n^*} + k - 1$ where $k$ is the number of odd-power primes in the prime factorization of $n$.
\end{theorem}
\begin{proof}
Consider arbitrary clique $C$. Partition $C$ into sets $L$ and $N$ where $L$ is the set of vertices of $C$ that are not multiples of $n^*$ and $N$ is the set of vertices of $C$ that are multiples of $n^*$. Consider arbitrary vertex $l_1$ in $L$. Since $l_1$ is not a multiple of $n^*$, there must be some prime factor $p_1$ of $n$ whose power in $l_1$ is less than half of its power in $n$ (since if every prime factor was greater than or equal to half, $l_1$ would be a multiple of $n^*$). Every other $l_i$ in $L$ must have its $p_1$ factor with a power greater than or equal to half its power in $n$ for it to share an edge with $l_1$. Consider another vertex $l_2$ in $L$. $l_2$ must also have a prime factor whose power is less than half its power in $n$, but it cannot be $p_1$. Call it $p_2$. So each $l_i$ in $L$ must have a distinct prime factor $p_i$ that has a power less that or equal to half its power in $n$. Let $m$ be the number of distinct prime factors of $n$. Then there can be a maximum of $m$ many $l_i$ in $L$. $N$ has a maximum size of $\bar frac{n}{n^*}-1$, so the clique number is at most $\bar frac{n}{n^*}+m-1$.\\
Consider some $e_1$, a vertex in $L$ whose corresponding $p_1$, has an even power in $n$. $e_1$ does not share an edge with $n^*$. This means the clique number is one less if $n$ has an even-powered prime. Consider another $e_2$ that has an even $p_2$ whose power is less than half. Then $e_2$ does not share an edge with $p_1 n^*$. In general, a vertex $e_i$ whose corresponding $p_i$ has an even power does not share an edge with distinct vertices $p_1 p_2 \cdots p_{i-1} n^*$. So the size of $C$ is reduced by the number of even powered-primes of $n$. This value can be represented by $m-k$ where $k$ is the number of odd-powered primes of $n$. Hence, since $C$ is arbitrary, $cl(\Gamma(\mathbb{Z}_n)) \leq \bar frac{n}{n^*} + m - (m - k) - 1$. $cl(\Gamma(\mathbb{Z}_n)) \leq \bar frac{n}{n^*} + k - 1$.
\end{proof}
\begin{theorem}\label{2.14} $cl(\Gamma(\mathbb{Z}_n)) = \bar frac{n}{n^*} + k - 1$.
\end{theorem}
\begin{proof}
The proof follows by Theorem \ref{2.12} and Theorem \ref{2.13}.
\end{proof}
\begin{theorem}\label{2.15} There are no non-empty, non-complete, regular $\Gamma(\mathbb{Z}_n)$.
\end{theorem}
\begin{proof}
Consider all $\Gamma(\mathbb{Z}_n)$ that are non-empty and not complete. Assume $\exists$ some regular graph among these graphs.\\
\begin{enumerate}
\item[Case 1:] $n=p^x$ where $p$ is prime\\
If $x=1$, the graph is empty, and if $x=2$, the graph is complete, so $x\geq 3$.
Then $p$ is a vertex that shares an edge with $p-1$ many other vertices, and $p^2$ is a vertex that shares an edge with $p^2-1$ many other vertices. Since the graph is regular, $p-1=p^2-1$, thus $p=p^2$, which means $p=1$, a contradiction.\\
\item[Case 2:] $n=p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_m^{\alpha_m}$, $m \geq 2$ and $p_i$ are all prime\\
Vertex $p_1$ shares an edge with $p_1-1$ many other vertices, and the vertex $p_2$ shares an edge with $p_2-1$ many other vertices. Since the graph is regular, $p_1-1=p_2-1$, thus $p_1=p_2$ which is a contradiction since $p_1$ and $p_2$ are distinct.\\
So the only non-empty regular graphs are complete.
\end{enumerate}
\end{proof}
\begin{theorem}\label{2.16} $\Gamma(\mathbb{Z}_n)$ is chordal iff $n=p^x, 2p$ or $2p^2$, where $p$ is prime and $x$ is a positive integer.
\end{theorem}
\begin{proof}
Let $n=p^x$. Assume that $\Gamma(\mathbb{Z}_{p^x})$ is not chordal. Then $\exists$ a cycle $C$ of length $>$ 3, that has no chord. Let $y$ be a vertex of $C$ that is not a multiple of $n^*$. Then, since the power of $p$ in $y$ has a power strictly less than $\bar frac{x}{2}$, each neighbor must be a multiple of $n^*$. Then the two neighbors of $y$ in $C$ share an edge which is a chord. So all vertices in $C$ must be a multiple of $n^*$ which also causes a chord. So $\Gamma(\mathbb{Z}_{p^x})$ is chordal.\\
Let $n=2p$. $\Gamma(\mathbb{Z}_{2p})$ is a star because it is a line segment only. Then, $\Gamma(\mathbb{Z}_{2p})$ is chordal.\\
Let $n=2p^2$. Assume $\Gamma(\mathbb{Z}_{2p^2})$ is non-chordal. Then $\exists$ a cycle $C$ of length $>$ 3 that has no chord.\\
Let $a$ be a vertex of $C$ in the type class $T_p$. Each neighbor of $a$ must be a multiple of $2p$, and therefore, is in the type class $T_{2p}$. Each multiple of $2p$ shares an edge, so there exists a chord in $C$. So there can be no vertices in the type class $T_p$ in $C$.\\
Let $b$ be a vertex of $C$ that is in the type class $T_2$. Every neighbor of $b$ must be in the type class $T_{p^2}$. But there is only one element in $T_{p^2}$ so $b$ cannot have two distinct neighbors. So $b$ is not a vertex of $C$.\\
So each vertex of $C$ must be in either $T_{p^2}$ or $T_{2p}$. Then since there is only one element of $T_{p^2}$, and the magnitude of $C$ is at least 4, there are at least 3 elements of $T_{2p}$ in $C$. Those 3 elements form a triangle since each multiple of $2p$ annihilates each other multiple of $2p$. But $C$ can't have a triangle since it is chord-less. This is a contradiction.\\
\\
Let $n$ not be $p^x$, $2p$ or $2p^2$.\\
\begin{enumerate}
\item[Case 1:] $n=2^xp^y$ where $y\geq 3$, $x \geq 1$ and $p$ is an odd prime.\\
Then $2^xp-p^y-2^{x+1}p-p^{y-1}$ is a chord-less cycle.\\
\item[Case 2:] $n=2^xp^y$ where $x\geq 2$, $y \geq 1$ and $p$ is an odd prime.\\
Then $2p^y-2^x-p^y-2^{x+1}$ is a chord-less cycle.\\
\item[Case 3:] $n=p^xq^y$ where $p, q\geq 3$ where $p \neq q$ are primes and $x, y$ are non-zero.\\
Then $p^x-q^y-2p^x-2q^y$ is a chord-less cycle.\\
\item[Case 4:] $n=p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_k^{\alpha_k}$ where $k\geq 3$ and $\alpha_i$ is non-zero.\\ Since $k\geq 3, n$ has an odd prime factor $p_1$.
Then $p_1^{\alpha_1}-n/p_1^{\alpha_1}-2p_1^{\alpha_1}-2n/p_1^{\alpha_1}$ is a chord-less cycle.\\
So $\Gamma(\mathbb{Z}_n)$ is non-chordal if $n$ is not $p^x$, $2p$ or $2p^2$.
\end{enumerate}
\end{proof}
\begin{lemma}\label{2.17} If $n^* \neq n$, $\Gamma(\mathbb{Z}_n)$ has a simplicial vertex.
\end{lemma}
\begin{proof}
Let $n^* \neq n$. Then $n/n^*$ is a vertex since $n/n^*$ shares an edge with $n^*$ which is not a multiple of $n$. Since every neighbor of $n/n^*$ is a multiple of $n^*$ and every multiple of $n^*$ shares an edge, $n/n^*$ is a simplicial vertex. So $\Gamma(\mathbb{Z}_n)$ has a simplicial vertex.
\end{proof}
Another construction $n_*$ can be useful. It is similar to $n^*$, but for the odd powered primes, round down instead of up. Consider $\Gamma(\mathbb{Z}_n)$ where $n=p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_k^{\alpha_k}$. Define $n_*$ as $n_* = p_1^{\beta_1}p_2^{\beta_2}\cdots p_k^{\beta_k}$ and $\beta_i = \alpha_i/2$ if $\alpha_i$ is even and $\beta_i = (\alpha_i-1)/2$ if $\alpha_i$ is odd.\\
\\
Note that $n_*n^* = 0$ and if $n$ is square-free, $n_* = 1$.
\begin{lemma}\label{2.18} Arbitrary vertex $v$ in $\Gamma(\mathbb{Z}_n)$ is a simplicial vertex iff $v\in T_2$ or $v\in T_g$ where $g$ is a factor of $n_*$.
\end{lemma}
\begin{proof}
Take arbitrary $v$ in $\Gamma(\mathbb{Z}_n)$. Let $v\in T_2$. Then $v$ only shares an edge with vertices in $T_{n/2}$. By Lemma \ref{2.7}, $T_{n/2}$ has only one element, which makes a clique. So $v$ is simplicial.\\
Let $v\in T_g$ where $g$ is a factor of $n_*$. So $n_* = ag$ where $a$ is a positive integer. Consider some vertex $h$ in $T_j$ that shares an edge with $v$. Then $j*g = bn$ for positive integer $b$. $\bar frac{jn_*}{a} = bn_*n^*$. $\bar frac{j}{a} = bn^*$. Then $j=abn^*$. So $j$ is a multiple of $n^*$ and therefore, $h$ is a multiple of $n^*$. Since every multiple of $n^*$ shares an edge with every other such multiple, $v$ is a simplicial vertex.\\
Conversely, let $v$ be neither in $T_2$ nor in any $T_g$ where $g$ is a factor of $n_*$. Then, since $v$ is not in any $T_g$, $v$ has some prime with a power greater than half of that in $n$. Call that prime $p_x$ and its power in $v$, $\alpha_x$. Let the type class of $v$ be called $T_w$. Consider the type class $T_{n/w}$. Each vertex in $T_{n/w}$ shares an edge with $v$. Since $v\notin T_2$, $T_{n/w} \neq T_{n/2}$. So by Lemma \ref{2.7}, $T_{n/w}$ has more than one element. Since $n/w$ has a power of $p_x$ less than that of half in $n$, none of the vertices in $T_{n/w}$ share an edge with each other. So the neighbors of $v$ do not form a clique. Hence, $v$ is not simplicial.
\end{proof}
\begin{theorem}\label{2.19} $\Gamma(\mathbb{Z}_n)$ has a simplicial vertex iff the prime factorization of $n$ is not square free or $n$ is even.
\end{theorem}
\begin{proof}
Let $n$ not be square free. Then, $n^* \neq n$. So by Lemma \ref{2.17}, $\Gamma(\mathbb{Z}_n)$ has a simplicial vertex.\\
\\
Let $n$ be even. Then, 2 is a zero divisor. Every neighbor of 2 must be a multiple of $n/2$ which there is only one of, so 2 is a simplicial vertex.\\
\\
Let $n$ be square free and odd. 2 is therefore not a factor of $n$. Then consider arbitrary vertex $x$. $x$ shares an edge with both $n/x$ and $2n/x$. $2n/x$ is non-zero since $x$ is necessarily odd, and $n/x$ and $2n/x$ do not share an edge since $n$ is odd. For if $\bar frac{n}{x}\bar frac{2n}{x} = ny$, $2n = yx^2$ and $n = \bar frac{yx^2}{2}$ which is a contradiction. So there are no simplicial vertices of $\Gamma(\mathbb{Z}_n)$.
\end{proof}
Note: It follows by ~\cite{jK55}, (observation 3.2), if in $\Gamma(\mathbb{Z}_n)$ a vertex $u$ is simplicial then $T_u$ is simplicial in $\Gamma^{T}(\mathbb{Z}_n)$. But, not conversely. For example, in $\Gamma^{T}(\mathbb{Z}_{12}), T_3$ is simplicial, where as $3$ is not so in $\Gamma(\mathbb{Z}_{12})$.
\begin{lemma}\label{2.20} If $\Gamma(\mathbb{Z}_n)$ has three or more prime factors of $n$, $\Gamma(\mathbb{Z}_n)$ is not $\gamma-\beta$ perfect.
\end{lemma}
\begin{proof}
Let $n=p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_k^{\alpha_k}$ where $k\geq 3$. By ~\cite{bH77}, the domination number is $k$. Construct some vertex map $V$ whose size is $k$.\\
We claim that $V$ must contain the vertex $n/p_x$ for every $p_x$ prime factor of $n$.\\
Consider the vertex $n/p_x$ for some $p_x$ prime factor of $n$. Let $n/p_x$ not be in $V$. Construct set $C = \{ p_xp_i | 1\leq i\leq k \}$. $n/p_x$ shares an edge with every vertex in $C$. Since $n/p_x \notin V$, every element of $C$ is in $V$. $C$ has $k$ many vertices, so $V$ has at least $k$ many vertices. Consider vertex $p_x$. $p_x$ shares an edge with $n/p_x$ which is not covered by $V$, so $V$ has at least $k+1$ vertices. That is a contradiction since the size of $V$ is $k$. So each $n/p_x$ is in $V$.\\
\\
Consider the type classes $T_{n/p_1}$, $T_{n/p_2}$ and $T_{n/p_3}$. By Lemma \ref{2.8}, there can be at most one type class with only one element. At least two of these type classes have more than one element. Without loss of generality, let them be $T_{n/p_1}$ and $T_{n/p_2}$. Since $n/p_1$ and $n/p_2$ are both in $V$, choose different vertices in the type classes $u$ and $v$. $u$ and $v$ share an edge since they are multiples of $n/p_1$ and $n/p_2$ respectively, so they share an edge, but are not in $V$. Then $V$ must contain at least one other element making the size of $V$ at least $k+1$. This is a contradiction. We cannot construct a vertex map size $k$. So $\Gamma(\mathbb{Z}_n)$ is not $\gamma-\beta$ perfect.
\end{proof}
\begin{theorem}\label{2.21} The only $\gamma-\beta$ perfect $\Gamma(\mathbb{Z}_n)$ are $n=2^3, 3^2, p, 2p$ and $3p$.
\end{theorem}
\begin{proof}
Let $n=2^3$. The domination number clearly equals the smallest vertex map.\\
\begin{tikzpicture}
\node (2) at (1, 1) {2};
\node (4) at (2, 2) {4};
\node (6) at (3, 1) {6};
\bar foreach \colon/\to in {2/4, 4/6}
\draw (\colon) -- (\to);
\end{tikzpicture}\\
Let $n=3^2$. The domination number clearly equals the smallest vertex map.\\
\begin{tikzpicture}
\node (3) at (1, 1) {3};
\node (6) at (2, 1) {6};
\bar foreach \colon/\to in {3/6}
\draw (\colon) -- (\to);
\end{tikzpicture}\\
Let $n=2p$. Then the graph is a star, so the domination number and the smallest vertex map are both 1.\\
Let $n=3p$. Then $V = \{p, 2p\}$ is both a minimal dominating set and a minimal vertex map.\\
Let $n=p$. Then both the domination number and the smallest vertex map is 0 since the graph is empty.\\
\\
Now, we will show that all other $\Gamma(\mathbb{Z}_n)$ are not $\gamma-\beta$ perfect.\\\\
Let $n=2^2$. The empty set is a vertex map since there are no edges in this map, so smallest vertex map and the domination number are not the same.\\
Let $n=2^x$, $x\geq 4$. Then $2^{x-1}-2^{x-2}-3\cdot2^{x-2}$ is a triangle. Triangles prevent vertex maps of size 1, and by ~\cite{bH77} the domination number is 1, so the values do not match.\\
Let $n=3^x$, $x\geq 3$. Then $3^{x-1}-2\cdot3^{x-1}-3^{x-2}$ is a triangle that prevents vertex maps of size 1.\\
Let $n=p^x$, $p\geq 5$, $x\geq 2$. Then $p^{x-1}-2\cdot p^{x-1}-3\cdot p^{x-1}$ is a triangle.\\
Let $n=pq$, $q>p\geq 5$. The domination number is 2 by ~\cite{bH77}. $p-q-2p-2q-3p-2q$ is a hole size 6. There cannot be a vertex map that covers a hole of that size, so the smallest vertex map is not 2.\\
Let $n=p^xq$, $x\geq 2$. The domination number is 2.\\
\begin{enumerate}
\item[Case 1:] $p=2$.\\
Then $p^{x-1}q-p^x-q-p^{x+1}-pq$ is a non-induced sub-graph that cannot be covered by a vertex map size 2.\\
\item[Case 2:] $p\neq 2$.\\
Then $p^x-p^{x-1}q-p-2p^{x-1}q-2p$ is a non-induced sub-graph that cannot be covered by a vertex map size 2.
The smallest vertex map is larger than 2 making the graph not $\gamma-\beta$ perfect.\\
Let $n=p^xq^y$, $x, y \geq 2$. The domination number is 2 by ~\cite{bH77}. Assume there is a vertex map $V$ size 2. Consider the edges $p-p^{x-}q^y$ and $q-p^xq^{y-1}$. $V$ must contain at least vertex one of each edge. By Lemma \ref{2.8} only one type class can have only one vertex. Consider the type classes $T_{p^xq^{y-1}}$ and $T_{p^{x-1}q^y}$. At least one of them must contain more than one vertex. Without loss of generality let that be $T_{p^{x-1}q^y}$. Then there exists some $u\in T_{p^{x-1}q^y}$ that is not in $V$. The edge $p-u$ is not covered by $V$, so the size of $V$ is at least one more than 2 which is a contradiction.\\
Let $n=p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_k^{\alpha_k}$, $k\geq 3$. Then by Lemma \ref{2.20}, the graph is not $\gamma-\beta$ perfect.\\
So the only $\gamma-\beta$ graphs $\Gamma(\mathbb{Z}_n)$ are $2^3, 3^2, p, 2p$ and $3p$.
\end{enumerate}
\end{proof}
\section{Some properties of $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$}
In this section, we discuss some facts about $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$. It is often possible to relate some properties of the individual $\Gamma(\mathbb{Z}_{n_i})$ to the graph of the product. One example is that the domination number of $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$ has an upper and lower bound corresponding to the domination number of each $\Gamma(\mathbb{Z}_{n_i})$.
\begin{theorem}\label{3.0} Consider two arbitrary commutative rings with unity, $R$ and $S$. $\Gamma(R\times S)$ is complete iff $|R|=|S|=2$.
\end{theorem}
\begin{proof}
Consider some $R$ and $S$ such that $|R|=|S|=2$. Since both $R$ and $S$ have $1$, the only elements of $R$ and $S$ are $0$ and $1$, where by $1$ we denote the unity of the respective ring . Then the zero divisor graph is $(0, 1) - (1, 0)$ which is complete.\\
Conversely, let $R$ or $S$ have more than 2 elements. Without loss of generality, let $R$ have more than 2 elements. Then $R$ has some element $a$ that is neither $1$ nor $0$. The graph $\Gamma(R\times S)$ has vertices $(1, 0)$ and $(a, 0)$. These vertices do not share an edge because $1\cdot a = a$ which is not zero. So $\Gamma(R \times S)$ is not complete.\\
\end{proof}
\begin{theorem}\label{3.1} $\Gamma(R_1\times \cdots\times R_k)$ where $k \geq 2$ and each $R_i$ is a commutative ring with $1$. This graph is complete iff $k=2$ and $|R_i| = 2$ for all $i$.
\end{theorem}
\begin{proof}
Consider some $\Gamma(R_1\times\cdots\times R_k)$ where $k=2$ and all $|R_i| = 2$. Then by Theorem 3.0, $\Gamma(R_1\times\cdots\times R_k)$ is complete.\\
Consider some $\Gamma(R_1\times \cdots\times R_k)$ that does not meet this criteria. If $k \geq 3$, then $(1, 0, 1)$ and $(1, 1, 0)$ are two vertices that do not share an edge. If any $|R_i| \geq 2$, then $R_i$ has an element $a$ that is not 0 or 1. Then $(\cdots, a, \cdots)$ does not share an edge with $(\cdots, 1, \cdots)$, where $a$ and $1$ are placed in the $i-th$ entry of the respective elements. So $\Gamma(R_1\times \cdots\times R_k)$ is not complete.
\end{proof}
\begin{theorem}\label{3.2} $\Gamma(\mathbb{Z}_n\times\mathbb{Z}_m)$ where $n, m \geq 2$ is complete-bipartite iff $n$ and $m$ are prime.
\begin{proof}
Let $m$ and $n$ be prime. Then partition $\Gamma(\mathbb{Z}_n\times\mathbb{Z}_m)$ into sets $S_n$ and $S_m$ such that
$S_n=\{(x, 0)|0<x<n\}$ and $S_m=\{(0, y)|0<y<m\}$.\\
We claim that $S_n\cup S_m = \Gamma(\mathbb{Z}_n\times\mathbb{Z}_m)$.\\
Assume, $\exists$ a zero divisor $a=(a_1, a_2)$ that is not in $S_n\cup S_m$. Both $a_1$ and $a_2$ are non-zero as $m$ and $n$ are prime. Since $a$ is a zero-divisor, there must be some $b = (b_1, b_2)$ that shares an edge with $a$. So $a_1 b_1 = 0$. Since $\mathbb{Z}_n$ has no non-zero divisors, and $a_1$ is not zero, $b_1 = 0$. In the same way we find that $b_2$ is zero. This means $a$ is not a zero-divisor because it only shares an edge with 0. So $S_n\cup S_m = \Gamma(\mathbb{Z}_n\times\mathbb{Z}_m)$.\\
Take arbitrary $u, v\in S_n$. Then $u=(u_1, 0)$ and $v=(v_1, 0)$. Since $u_1 v_1 \neq 0$, $uv\neq (0, 0)$ which means $u$ and $v$ do not share an edge. In the same way $u$ and $v$ do not share an edge if they are both in $S_m$. So $u$ and $v$ do not share an edge if they are in the same partition which is the definition of bipartite.\\
Thus, it follows from the construction of $S_m$ and $S_m$, that $\Gamma(\mathbb{Z}_n\times\mathbb{Z}_m)$ is complete bipartite.\\
Conversely, let $\Gamma(\mathbb{Z}_n\times\mathbb{Z}_m)$ be complete bipartite. Assume one or both n and m are not prime. Let the non-prime be $n$. Then, there is a non-zero zero divisor of $\mathbb{Z}_n$. Call it $k$. Since $\Gamma(\mathbb{Z}_n\times\mathbb{Z}_m)$ is complete-bipartite, the vertices of $\Gamma(\mathbb{Z}_n\times\mathbb{Z}_m)$ can be partitioned into 2 disjoint subsets such that no edges exist between two vertices in the same partition, and every pair of vertices in different partitions share an edge. $(1, 0)$ is a zero divisor since it shares an edge with $(0, 1)$. $(k, 0)$ is also a zero divisor since it also shares an edge with $(0, 1)$. Since $(k, 0)$ does not share an edge with $(1, 0)$, they must be in the same partition. Call it $S_1$ and let the other partition be $S_2$. Since $k$ is a zero-divisor of $\mathbb{Z}_n$, $\exists k'$ not necessarily distinct such that $k\cdot k' = 0$. Then $(k', 1)$ shares an edge with $(k, 0)$ which means $(k', 1)\in S_2$. Since $\Gamma(\mathbb{Z}_n\times\mathbb{Z}_m)$ is complete-bipartite, $(1, 0)$ must share an edge with $(k', 1)$ since they are in opposite partitions, but their product is not $0$, which is a contradiction. So both $n$ and $m$ must be prime.
\end{proof}
\end{theorem}
\begin{corollary} From this theorem it follows that $\Gamma(\mathbb{Z}_n\times\mathbb{Z}_m)$ has a complete bipartite sub-graph.
\end{corollary}
This is formed by $S_n \cup S_m$. If one of them is not a prime, we can delete all vertices that has at least one an entry dividing either $n$ or $m$ respectively, to get a complete bipartite subgraph.
\begin{theorem}\label{3.3} $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$ where $\bar forall n_i \geq 2$ and $k\geq 2$ is bipartite iff $k=2$ and both $n_i$ are prime, or one $n_x$ is prime and the other is $4$.
\end{theorem}
\begin{proof}
Let $k=2$ and both $n_1$ and $n_2$ be prime. By Theorem \ref{3.2}, $\Gamma(\mathbb{Z}_{n_1}\times \mathbb{Z}_{n_2})$ is bipartite.\\
Let $k=2$ and let one of $n_i$ be 4 and the other be prime. Without loss of generality, let $n_1=4$. Then $n_2$ is prime. Partition the vertices into set $A$ and $B$ where $A$ is the set of all vertices of the form $(a, 0)$ where $a\in\mathbb{Z}_{n_1}/0$ and $B$ is everything else. Consider arbitrary, distinct elements of $A$, $(a_1, 0)$ and $(a_2, 0)$. They do not share an edge, since there are no two distinct $a_1$ and $a_2$ that share an edge in $\Gamma(\mathbb{Z}_{n_1})$. Consider all vertices in $B$. Assume $\exists u, v \in B$ such that $u$ shares an edge with $v$. Then, $u = (u_1, u_2)$ and $v = (v_1, v_2)$. Note that $u_2v_2 \neq 0$. $u_2 v_2 = 0$ which means $u_2$ and $v_2$ are zero divisors in $\Gamma(\mathbb{Z}_{n_2})$. This is impossible since there are no zero divisors in $\Gamma(\mathbb{Z}_{n_2})$. So $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$ is bipartite.\\
Conversely, let $\Gamma(\mathbb{Z}_{n_1}\times\cdots\times\mathbb{Z}_{n_k})$ be bipartite.\\
We first claim that $k=2$.\\
Assume $k\geq 3$. Then, $(1, 0, 0, \cdots, 0) - (0, 1, 0, \cdots, 0) - (0, 0, 1, \cdots, 0)$ is a triangle which cannot exist in a bipartite graph. So $k < 3$. By our definition, $k\geq 2$, so $k=2$\\
We now claim no $\Gamma(\mathbb{Z}_{n_i})$ can have two or more distinct zero divisors.\\
Assume otherwise. Call two such divisors $u$ and $v$ that share an edge in $\Gamma(\mathbb{Z}_{n_i})$. Without loss of generality, let $u$ and $v$ be in the first slot (so $i=1$). Then $(u, 0) - (v, 0) - (0, 1)$ is a triangle which cannot exist in a bipartite graph. The only $\Gamma(\mathbb{Z}_{n_i})$ that has one element is $\Gamma(\mathbb{Z}_4)$. So all $n_i$ must be either 4 or prime.\\
Our final claim is it is not possible for both $n_i$ to be 4.\\
Assume otherwise. Then $(2, 0) - (2, 2) - (0, 2)$ is a triangle which cannot exist in a bipartite graph.
So, because $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\cdots \times\mathbb{Z}_{n_k})$ is bipartite, $k=2$ and either both $n_i$ are prime, or one is 4 and the other is prime.
\end{proof}
\begin{theorem}\label{3.4} $\Gamma(R_1\times\cdots\times R_k)$ where each $R_i$ is a commutative ring with 1 is not perfect if some $\Gamma(R_i)$ is not perfect.
\end{theorem}
\begin{proof}
Let some $\Gamma(R_i)$ be non-perfect. Then by the Strong Perfect Graph theorem, there exists an odd hole or anti-hole $H$ of length 5 or greater. Let $H$ have a length $l$. Then we write it as, $v_1-v_2-\cdots-v_{l-1}-v_l-v_1$. Then a hole exists in $\Gamma(R_1\times\cdots\times R_k)$. Fill in the $i$th position with the vertices of $H$, and fill the rest in with zeros. The hole is $(0, \cdots, 0, v_1, 0, \cdots, 0)-(0, \cdots, 0, v_2, 0, \cdots, 0)-\cdots-(0, \cdots, 0, v_{l-1}, 0, \cdots, 0)-(0, \cdots, 0, v_l, 0, \cdots, 0)-(0, \cdots, 0, v_1, 0, \cdots, 0)$.\\
The same proof can be used for an anti-hole. So if any $\Gamma(R_i)$ are non-perfect, $\Gamma(R_1\times\cdots\times R_k)$ will also be non-perfect.
\end{proof}
\begin{note}
The converse of Theorem 3.4 is not true. In the graph $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{Z}_2)$, every $\Gamma(\mathbb{Z}_2)$ is perfect, but we find the hole $(1, 1, 0, 0, 0)-(0, 0, 1, 1, 0)-(1, 0, 0, 0, 1)-(0, 1, 1, 0, 0)-(0, 0, 0, 1, 1)$.
\end{note}
\begin{theorem}\label{3.5} $\Gamma(R_1\times\cdots\times R_x)$ where each $R_i$ is a commutative ring with 1 is not regular if any $\Gamma(R_i)$ is not empty.
\end{theorem}
\begin{proof}
Take $\Gamma(R_1\times\cdots\times R_x)$. Let some $\Gamma(R_i)$ be non-empty. Consider the vertex $g=(0, \cdots, 0, 1, 0, \cdots, 0)$ that has a 1 at the $i^{th}$ index and 0 filled in all other indices. All neighbors of $g$ must be of the form $(a_1, a_2, \cdots, a_{i-1}, 0, a_{i+1}\cdots, a_{x-1}, a_x)$, with a zero at the $i^{th}$ index and any value in the other indices, not all zero. Let there are $f$ many such vertices.
Since $\Gamma(R_i)$ is non-empty, $\exists k\in \Gamma(R_i)$. Since $k$ is a zero divisor, there must be some $k'\in \Gamma(R_i)$, not necessarily distinct, such that $k\cdot k'=0$. Consider the vertex $h=(0, \cdots, 0, k, 0, \cdots, 0)$ with $k$ in the $i^{th}$ index and the rest filled in with 0. This vertex shares an edge with all vertices that share an edge with $g$. So $h$ shares an edge with at least $f$ vertices. But it also shares an edge with $(1, \cdots, 1, k', 1\cdots, 1)$ which means $h$ shares an edge with at least $f+1$ vertices. This means $g$ and $h$ have a different number of neighbors, so $\Gamma(R_1\times\cdots\times R_x)$ is not regular.
\end{proof}
\begin{theorem}\label{3.6} For arbitrary rings $R$ and $S$, $cl(\Gamma(R\times S)) \geq cl(\Gamma(R)) + cl(\Gamma(S)) + |R'||S'|$ where $R'$ and $S'$ are any set of self-annihilating vertices in a maximal clique of $\Gamma(R)$ and $\Gamma(S)$.
\end{theorem}
\begin{proof}
Let $C$ be a maximal clique in $\Gamma(R)$ and $D$ be a maximal clique in $\Gamma(S)$. Construct an induced sub graph $X = \{(c, 0) or (0, d) | c\in C, d\in D\}$. Take two arbitrary, distinct vertices in $X$, call them $u$ and $v$.\\
\begin{enumerate}
\item[Case 1:] $u=(c_1, 0), v=(c_2, 0)$\\
Since $c_1$ shares an edge with $c_2$, $u$ and $v$ share an edge.\\
\item[Case 2:] $u=(0, d_1), v=(0, d_2)$\\
since $d_1$ shares an edge with $d_2$, $u$ and $v$ share an edge.\\
\item[Case 3:] $u$ and $v$ are not of the same form.\\
Then, without loss of generality, let $u = (c, 0)$ and $v = (0, d)$. $u$ shares an edge with $v$.\\
So $X$ is a clique in $\Gamma(R\times S)$ with size $cl(\Gamma(R)) + cl(\Gamma(S))$.\\
Now consider $R'$, the set of all self-annihilating vertices in $C$. Each vertex in $R'$ shares an edge with each other vertex in $R'$ because it is an induced sub-graph of a clique. It also shares an edge with every vertex in $C$. Likewise, every vertex in $S'$, the set of all self-annihilating vertices in $D$, shares an edge with every other vertex in $S'$ and every vertex in $D$. Define the induced sub-graph $Y = \{ (r, s) | r\in R', s\in S' \}$. Every vertex $(r, s) \in Y$ shares an edge with every other vertex in $Y$ and every vertex in $X$, so $X\cup Y$ forms a clique size $cl(\Gamma(R)) + cl(\Gamma(S)) + |R'||S'|$.
\end{enumerate}
\end{proof}
\begin{corollary}
Consider $n$ many arbitrary rings $R_1, R_2, \cdots R_n$. Then,
$cl(\Gamma(R_1\times R_2\cdots R_n)) \geq \sum_{i=1}^{n}cl(\Gamma(R_i))+\sum_{i\neq j, i, j \in \{1, 2, \cdots n\}}|R_{i}'||R_{j}'|+\sum_{i\neq j\neq k; i, j, k \in \{1, 2, \cdots n\}}|R_{i}'||R_{j}'||R_{k}'|+\cdots + |R_{1}'||R_{2}'|\cdots|R_{k}'| $, where each $R_{i}'$ is any set of self-annihilating vertices in a maximal clique in $\Gamma(R_{i})$.
\end{corollary}
\begin{proof}
Extending a similar type of construction in the proof of the above theorem, we can consider $C_1, C_2, \cdots C_n$, a collection of maximal cliques in $\Gamma(R_1), \Gamma(R_2), \cdots, \Gamma(R_n)$ respectively. Construct an induced sub graph $X_{i} = \{(0, 0, \cdots, c_{i}, \cdots 0)| c_{i}\in C_{i}, \}, X =\bigcup_{i=1}^{n} {X_{i}}$, where we place the $c_{i}$ in the $i-th$ coordinate. Then $X$ forms a click of cardinality $\sum_{i=1}^{n}cl(\Gamma(R_i))$. Then consider, $X_{ij} = \{(0, 0, \cdots, c_{i}, \cdots c_j \cdots 0)| c_{i}\in R'_{i}, c_{j} \in R'_{j}\}$, where $R'_{i}, R'_{j}$ are any set of self annihilating vertices in maximal clique in $\Gamma(R_{i})$ and $\Gamma(R_{j})$, where we place the $c_{i}$ and $c_{j}$ in the $i-th$ and $j-th$ entries respectively. Set $Y = \bigcup_{i\neq j; i, j \in \{1, 2, \cdots n\}} X_{ij}$. Then $Y$ forms a click of cardinality $\sum_{i\neq j; i, j \in \{1, 2, \cdots n\}}|R_{i}'||R_{j}'|$, that is disjoint from $X$. In a similar fashion we can construct $X_{ijk}$ for each distinct triplets $i, j, k \in \{1, 2, \cdots n\}$ and call their union $Z$ and $Z$ gives a click of cardinality $\sum_{i\neq j\neq k; i, j, k \in \{1, 2, \cdots n\}}|R_{i}'||R_{j}'||R_{k}'|$. Proceeding in this way the result follows.
\end{proof}
\begin{lemma}\label{3.7} Consider $\Gamma(\mathbb{Z}_n)$ for arbitrary $n$. There is a maximal clique $M$ that contains all self-annihilating vertices.
\end{lemma}
\begin{proof}
Follows from Theorem \ref{2.12} and Lemma \ref{2.10}.
\end{proof}
\begin{theorem}\label{3.8} The clique number of $\Gamma(\mathbb{Z}_n\times\mathbb{Z}_m)$ has a lower bound of $cl(\Gamma(\mathbb{Z}_n)+cl(\Gamma(\mathbb{Z}_m)) + (\bar frac{n}{n^*}-1)(\bar frac{m}{m^*}-1)$.
\end{theorem}
\begin{proof}
Follows from Theorem \ref{3.6} and the proof of Theorem \ref{2.12} and Lemma \ref{2.10}.
\end{proof}
\begin{theorem}\label{3.11} $\Gamma(R_1\times\cdots\times R_k)$ where $k \geq 2$ and $R_i$ is a commutative ring with 1 has a simplicial vertex iff some $\Gamma(R_i)$ has a simplicial vertex or some $|R_i| = 2$.
\end{theorem}
\begin{proof}
Take arbitrary $\Gamma(R_1\times\cdots\times R_k)$. Let some $\Gamma(R_i)$ have a simplicial vertex $c$. Then consider the vertex $(1, \cdots, 1, c, 1, \cdots, 1)$ where $c$ is in the $i$th slot. Each neighbor of $(1, \cdots, 1, c, 1, \cdots, 1)$ must have 0 in every slot except the $i$th slot, and the value of the $i$th slot must be a neighbor of $c$ in $\Gamma(R_i)$. Since each neighbor of $c$ shares an edge and each other slot is 0, all such neighbors of $(1, \cdots, 1, c, 1, \cdots, 1)$ form a clique. So $\Gamma(R_1\times\cdots\times R_k)$ has a simplicial vertex.\\
\\
Let some $|R_i| = 2$. Then $(1, \cdots, 1, 0, 1, \cdots, 1)$ only shares an edge with $(0, \cdots, 0, 1, 0, \cdots, 0)$ making $(1, \cdots, 1, 0, 1, \cdots, 1)$ simplicial.\\
\\
Let $\Gamma(R_1\times\cdots\times R_k)$ have a simplicial vertex $v$. Also,
assume all $|R_i| > 2$ and no $\Gamma(R_i)$ have any simplicial vertices. Consider arbitrary $v$ in $\Gamma(R_1\times\cdots\times R_k)$. Let $v$ have 0 at some index, $v = (\cdots, 0, \cdots)$. Then since no $|R_i| = 2$, there exists some vertex $a\in R_i$ that is not 0 or 1. $v$ then shares an edge with $(0, \cdots, 0, 1, 0, \cdots, 0)$ and $(0, \cdots, 0, a, 0, \cdots, 0)$ which do not share an edge. So for $v$ to be simplicial, it cannot contain any 0. Let $v$ have $a$ at some index, where $a$ is a zero divisor in its respective $\Gamma(R_i)$. $v = (\cdots, a, \cdots)$. Then $v$ shares an edge with every $(0, \cdots, 0, a', 0, \cdots, 0)$ where $a\cdot a' = 0$ in $\Gamma(R_i)$. $a$ is not simplicial since no $\Gamma(R_i)$ have any simplicial vertex, so some neighbor $(0, \cdots, 0, a', 0, \cdots, 0)$ will not share an edge with another neighbor of the same form. So $v$ is not simplicial if it has any zero-divisors in its slots. For $v$ to be simplicial, every slot must be a non-zero, non-zero-divisor. However, elements of that form are not vertices. So $\Gamma(R_1\times\cdots\times R_k)$ has no simplicial vertices, which is a contradiction. The assumption that all $|R_i| > 2$ and no $\Gamma(R_i)$ have any simplicial vertices is false. So some $|R_i| > 2$ or some $\Gamma(R_i)$ has a simplicial vertex.
\end{proof}
\begin{theorem}\label{3.12} $\Gamma(R_1\times\cdots\times R_k)$ where $R_i$ is a commutative ring with 1 is non-chordal if any $\Gamma(R_i)$ is non-chordal.
\end{theorem}
\begin{proof}
Consider arbitrary $\Gamma(R_1\times\cdots\times R_k)$. Then let some $\Gamma(R_i)$ be non-chordal. So there exists a cycle $a_1-a_2-\cdots-a_k-a_1$ greater than 3 with no chords. Then in $\Gamma(R_1\times\cdots\times R_k)$, there is a cycle $(0, .., a_1, \cdots, 0)-(0, \cdots, a_2, \cdots, 0)-\cdots-(0, \cdots, a_k, \cdots, 0)-(0, \cdots, a_1, \cdots, 0)$, which makes it non-chordal.
\end{proof}
\begin{lemma}\label{3.13} $\Gamma(R_1\times\cdots\times R_k)$ where $R_i$ is a commutative ring with 1 and $k\geq 2$ is non-chordal if more than one $|R_i| \geq 3$.
\end{lemma}
\begin{proof}
In $\Gamma(R_1\times\cdots\times R_k)$, let two or more $|R_i| \geq 3$. Without loss of generality, let the first two slots be the $R_i$ with a magnitude greater than or equal to 3. Then $(1, 0, \cdots, 0)-(0, 1, \cdots, 0)-(a, 0, \cdots, 0)-(0, b, \cdots, 0)$ where $a$ is a non-trivial element of $R_1$ and $b$ is a non-trivial element of $R_2$, is a cycle with no chord. So $\Gamma(R_1\times\cdots\times R_k)$ is non-chordal.
\end{proof}
\begin{lemma}\label{3.14} $\Gamma(R_1\times\cdots\times R_k)$ where $R_i$ is a commutative ring with 1 is non-chordal if $k \geq 4$.
\end{lemma}
\begin{proof}
Let $k>4$. Then $(1, 1, 0, 0, \cdots, 0)-(0, 0, 1, 1, \cdots, 0)-(1, 0, 0, 0, \cdots, 0)-(0, 0, 0, 1, \cdots, 0)$ is a chord-less cycle. So $\Gamma(R_1\times\cdots\times R_k)$ is non-chordal.
\end{proof}
\begin{lemma}\label{3.15} $\Gamma(\mathbb{Z}_{n_1}\times\mathbb{Z}_{n_2}\times\mathbb{Z}_{n_3})$ where at least one $n_i>2$ is non-chordal.
\end{lemma}
\begin{proof}
Without loss of generality, let $n_3 > 2$. Then, \\
$(1, 0, 0)-(0, 0, 2)-(1, 1, 0)-(0, 0, 1)$ is a chord-less cycle.
\end{proof}
\begin{theorem}\label{3.16} The only chordal $\Gamma(\mathbb{Z}_{n_1}\times \mathbb{Z}_{n_2}\times \cdots\times\mathbb{Z}_{n_k})$ where $n_i \geq 2$ and $k\geq 2$ are $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_p)$, $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_{p^2})$ and $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{Z}_2)$.
\end{theorem}
\begin{proof}
Consider $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_p)$. Since $\Gamma(\mathbb{Z}_p)$ has no vertices, the only vertices of $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_p)$ are $(1, 0)$ or of the form $(0, x)$ where $0<x<p$. So the graph is a star making it chordal. \\
Consider $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_{p^2})$. Assume that $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_{p^2})$ is non-chordal. Then there exists a cycle $C$ length greater than $3$ that has no chord. Let $v$ be an arbitrary vertex in $C$.\\
Let $v$ have a multiple of $p$ as its second element, $v=(a, bp)$. Then every vertex that is not a neighbor of $v$ in $C$ must have a non-zero non-multiple of $p$ as its second element. Therefore, both neighbors of $v$ must have 0 as their second element so that they share an edge with their other neighbor. So both neighbors of $v$ are $(1, 0)$. We cannot repeat vertices so $v$ cannot have a multiple of $p$ as its second element. That means the only possible vertices in $C$ are $(1, 0)$ and $(0, b)$ where $b$ is a non-zero non-multiple of $p$. A cycle of size $4$ or greater cannot be constructed out of these vertices since we cannot write $(1, 0)$ twice and $(0, b)$ does not share an edge with itself. $C$ cannot be constructed, so $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_{p^2})$ is chordal. \\
Consider $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{Z}_2)$. The graph of $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{Z}_2)$ is shown below and is chordal. \\
\begin{tikzpicture}
\node (a) at (1, 1) {(1, 0, 1)};
\node (b) at (2, 2) {(0, 1, 0)};
\node (c) at (4, 2) {(0, 0, 1)};
\node (d) at (3, 3) {(1, 0, 0)};
\node (e) at (3, 4) {(0, 1, 1)};
\node (f) at (5, 1) {(1, 1, 0)};
\bar foreach \colon/\to in {b/c, c/d, b/d, a/b, c/f, d/e}
\draw (\colon) -- (\to);
\end{tikzpicture}\\
To prove the converse, let's assume the opposite. Let there be a chordal $\Gamma(\mathbb{Z}_{n_1}\times \mathbb{Z}_{n_2}\times \cdots\times\mathbb{Z}_{n_k})$ not listed. By Lemma \ref{3.13}, only one $n_i$ can be greater than 2. By Lemma \ref{3.14}, $k \leq 3$. By Theorem \ref{3.12}, if any $n_i$ are non-chordal, $\Gamma(\mathbb{Z}_{n_1}\times \mathbb{Z}_{n_2}\times \cdots\times\mathbb{Z}_{n_k})$ will be non-chordal. So every $n_i$ must be $p^x$, $2p$, or $2p^2$ which was shown by Theorem \ref{3.15}.
So the only possible $\Gamma(\mathbb{Z}_{n_1}\times \mathbb{Z}_{n_2}\times \cdots\times\mathbb{Z}_{n_k})$ are $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_{p^x})$, $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_{2p})$, $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_{2p^2})$, $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{Z}_{p^x})$, $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{Z}_{2p})$ and $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{Z}_{2p^2})$.
In $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_{p^x})$ where $x \geq 3$ and $p$ is prime, $(1, p^{x-1})-(0, (p-1)p)-(1, 0)-(0, p)$ is a chord-less cycle.\\
In $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_{2p})$ where $p \geq 3$ is a prime, $(1, 0)-(0, 4)-(1, p)-(0, 2)$ is a chord-less cycle.\\
In $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_{2p^2})$ where $p \geq 3$ is a prime, $(1, 2p)-(0, p)-(1, 4p)-(0, p^2)$ is a chord-less cycle.\\
By Lemma 3.15, $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{Z}_{p^x})$, $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{Z}_{2p})$ and $\Gamma(\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{Z}_{2p^2})$ are all non-chordal where $p \geq 3$.\\
So there are no other chordal $\Gamma(\mathbb{Z}_{n_1}\times \mathbb{Z}_{n_2}\times \cdots\times\mathbb{Z}_{n_k})$.
\end{proof}
\begin{lemma}\label{3.17} $D(\Gamma(\mathbb{Z}_{n_1}\times \mathbb{Z}_{n_2}\times \cdots\times\mathbb{Z}_{n_k}))$ has an upper bound of $2[D(\Gamma(\mathbb{Z}_{n_1})) + D(\Gamma(\mathbb{Z}_{n_2})) + \cdots + D(\Gamma(\mathbb{Z}_{n_k}))]$.
\end{lemma}
\begin{proof}
Let $d_i$ be the domination number of $\Gamma(\mathbb{Z}_{n_i})$. Then each $\Gamma(\mathbb{Z}_{n_i})$ has a dominating set $D_i$ size $d_i$ Then consider the sets\\
$A_i = \{ (0, \cdots, 0, v, 0, \cdots, 0) \mid| v\in D_i\}$ where the $i$-th slot is filled with arbitrary vertex in $D_i$ and the rest are 0. Also consider $B_i$, the set of neighbors of each vertex in $A_i$, with only one neighbor for each vertex. Now consider $\cup_{i=1}^k (A_i\cup B_i)$ and arbitrary vertex $v \in \Gamma(\mathbb{Z}_{n_1}\times \mathbb{Z}_{n_2}\times \cdots\times\mathbb{Z}_{n_k})$.\\
\begin{enumerate}
\item[Case 1:] $v$ has an element $e$ in some $i$th slot that is a vertex of $\Gamma(\mathbb{Z}_{n_i})$.\\
If $e\in D_i$, then $v$ shares an edge with the corresponding vertex in $B_i$, and if $e\notin D_i$, then $v$ shares an edge with some vertex in $D_i$.\\
\item[Case 2:] $v$ has 0 in some $i$th slot.\\
Then $v$ shares an edge with some vertex in $A_i$.\\
So if neither of these cases is true, none of the elements of $v$ can be zero or a vertex in its corresponding $\Gamma(\mathbb{Z}_{n_i})$, so the only neighbor of $v$ is $(0, .., 0)$ which means $v$ is not a vertex. Then $\cup_{i=1}^k (A_i\cup B_i)$ is a dominating set. $A_i$ and $B_i$ both have size $D(\Gamma(\mathbb{Z}_{n_i}))$ since it only has one vertex for each vertex in its corresponding $D_i$. So the size of $\cup_{i=1}^k (A_i\cup B_i)$ is $2(D(\Gamma(\mathbb{Z}_{n_1})) + D(\Gamma(\mathbb{Z}_{n_2})) + \cdots + D(\Gamma(\mathbb{Z}_{n_k})))$ which is an upper bound of the domination number.
\end{enumerate}
\end{proof}
\begin{lemma}\label{3.18} $D(\Gamma(\mathbb{Z}_{n_1}\times \mathbb{Z}_{n_2}\times \cdots\times\mathbb{Z}_{n_k}))$ has a lower bound of $D(\Gamma(\mathbb{Z}_{n_1}))+D(\Gamma(\mathbb{Z}_{n_2}))\\
+\cdots+D(\Gamma(\mathbb{Z}_{n_k}))$.
\end{lemma}
\begin{proof}
Let $D$ be an arbitrary dominating set of $\Gamma(\mathbb{Z}_{n_1}\times \mathbb{Z}_{n_2}\times \cdots\times\mathbb{Z}_{n_k})$. Consider the vertex $v=(1, \cdots, 1, a, 1, \cdots, 1)$ where $a$ in the $i$th slot is a vertex of $\Gamma(\mathbb{Z}_{n_i})$. The only possible neighbors of $v$ are of the form $(0, \cdots, 0, b, 0, \cdots, 0)$ where $b$ is a neighbor of $a$ in $\Gamma(\mathbb{Z}_{n_i})$. Construct a subset $A_i$ that is all vertices in $D$ of the form $(0, .., 0, b, 0, \cdots, 0)$ or $(1, \cdots, 1, b, 1, \cdots, 1)$ where $b$ is a vertex in $\Gamma(\mathbb{Z}_{n_i})$.\\
We claim that arbitrary $A_i$ has a size of at least $d_i$, where $d_i$ is the domination number of $\Gamma(\mathbb{Z}_{n_i})$.\\
Assume otherwise. Then there are less than $d_i$ vertices of the form $(0, \cdots, 0, b, 0, \cdots, 0)$ and $(1, \cdots, 1, b, 1, \cdots, 1)$. Take some $a$ in $\Gamma(\mathbb{Z}_{n_i})$. Since some vertex in $D$ shares an edge with every vertex not in $D$, arbitrary $v=(1, \cdots, 1, a, 1, \cdots, 1)$ either shares an edge with some $(0, \cdots, 0, b, 0, \cdots, 0)$ or is itself in $D$ and therefore in $A_i$. If $v$ is not in $D$, then $v$ shares an edge with some $(0, \cdots, 0, b, 0, \cdots, 0)$ which means $a$ shares an edge with some $b$ in $\Gamma(\mathbb{Z}_{n_i})$. If $v$ is in $D$, then $(1, \cdots, 1, a, 1, \cdots, 1)$ is in $A_i$. Construct a set $H$ that contains all $a$ in the $i$th slot of all such $v$. $H$ forms a dominating set of $\Gamma(\mathbb{Z}_{n_i})$ size less than $d_i$ which is a contradiction since $D(\Gamma(\mathbb{Z}_{n_i}) = d_i$. So $A_i$ has a size of at least $d_i$.\\
Next, each $A_i$ is disjoint from each other since $b$ in the $i$th slot can never be 0 or 1, which means there will be no duplicate vertices. So the sum of the sizes of each $A_i\subseteq D$ will be greater than or equal to the sum of the domination number of each $\Gamma(\mathbb{Z}_{n_i})$. This is a lower bound of the domination number.
\end{proof}
Combining Lemma \ref{3.17} and \ref{3.18} we get the following.
\begin{theorem}\label{3.19} $D(\Gamma(\mathbb{Z}_{n_1}))+D(\Gamma(\mathbb{Z}_{n_2}))+\cdots+D(\Gamma(\mathbb{Z}_{n_k}))$ $\leq$ $D(\Gamma(\mathbb{Z}_{n_1}\times \mathbb{Z}_{n_2}\times \cdots\times\mathbb{Z}_{n_k}))$ $\leq$ $2[D(\Gamma(\mathbb{Z}_{n_1})) + D(\Gamma(\mathbb{Z}_{n_2})) + \cdots + D(\Gamma(\mathbb{Z}_{n_k}))]$.\\
\end{theorem}
The next theorem talks about the coefficients of a Domination Polynomial.\\
\begin{theorem}\label{3.20} In arbitrary $\Gamma(\mathbb{Z}_{p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_k^{\alpha_k}})$, $k \geq 3$, and each $p_i$ is a distinct prime number, the coefficient $c$ of the smallest degree of the domination polynomial is $(p_1-1)(p_2-1)\cdots(p_k-1)$.
\end{theorem}
\begin{proof}
Consider $\Gamma(\mathbb{Z}_{p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_k^{\alpha_k}})$, $k \geq 3$. Let $n = p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_k^{\alpha_k}$. Construct set $D$ that has exactly one element from each type class $T_{n/p_i}$. Since every vertex of $\Gamma(\mathbb{Z}_{p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_k^{\alpha_k}})$ must be a multiple of some $p_i$, every vertex shares an edge with some vertex in some $T_{n/p_i}$ and therefore, $D$. So $D$ is a dominating set.\\
We claim that for arbitrary minimal dominating set $D$, exactly one vertex must be present from each type class $T_{n/p_i}$.\\
Assume the opposite. Then there exists a dominating set $D$ that either doesn't have a vertex from some type class $T_{n/p_x}$ or has an extra vertex not in any type class $T_{n/p_x}$. Let $D$ not have any vertices from $T_{n/p_x}$. Since the only neighbors of vertices in $T_{p_x}$ are in $T_{n/p_x}$, every vertex in $T_{p_x}$ must be in $D$. $p_x \neq n/2$ because otherwise $2p_x=n$, $2p_x=p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_k^{\alpha_k}$ and $2 = \bar frac{p_1^{\alpha_1}p_2^{\alpha_2}\cdots p_k^{\alpha_k}}{p_x}$ which is not possible. So by Lemma 1.10, $T_{p_x}$ has more than one element. If $D$ contains at least one vertex from all $T_{n/p_x}$ except $T_{p_x}$, then the size of $D$ is larger than $A$ above. So $D$ is not a minimal dominating set. If $D$ lacks any vertices from other $T_{n/p_i}$, then for each vertex missing, two or more must be added from $T_{p_x}$. In which case $D$ would also not be minimal. So $D$ must contain at least one vertex from each $T_{n/p_i}$. There also cannot be any additional vertices, either from type classes not of the form $T_{n/p_x}$, nor multiple from the same type class. Otherwise $D$ would not be minimal.\\
Since there are $p_i - 1$ vertices in $T_{n/p_i}$, the total amount of possible minimal dominating sets $D$ is $(p_1-1)(p_2-1)\cdots(p_k-1)$.
\end{proof}
\begin{theorem}\label{3.21} $\Gamma(\mathbb{Z}_{p_1}\times\mathbb{Z}_{p_2}\times\cdots\times\mathbb{Z}_{p_k})$ is k-partite where every $p_i$ is prime.
\end{theorem}
\begin{proof}
Consider some graph $\Gamma(\mathbb{Z}_{p_1}\times\mathbb{Z}_{p_2}\times\cdots\times\mathbb{Z}_{p_k})$. Construct a collection of subsets $S_i$ which is the set of all vertices with a non-zero term in the $i$th slot and zero in any slot less than $i$.\\
$S_1 = \{ (a, \cdots.) | a\in \mathbb{Z}_{p_1}, a\neq 0 \}$\\
$S_2 = \{ (0, a, \cdots) | a\in \mathbb{Z}_{p_2}, a\neq 0 \}$\\
$\cdots$\\
$S_k = \{ (0, 0, \cdots, 0, a) | a\in \mathbb{Z}_{p_k}, a\neq 0 \}$\\
We claim that no two vertices $u, v$ from the same subset $S_x$ share an edge.\\
Consider arbitrary vertices $u$ and $v$ in some $S_x$. By the definition, the $x$th slot of $u$ and $v$ has a non-zero term from $\mathbb{Z}_{p_x}$. Since $\mathbb{Z}_{p_x}$ has no non-zero, zero divisors, the terms in the $x$th slot will not multiply to get 0, so $u$ and $v$ do not share an edge.\\
No we claim that all the $S_i$ form a partition of $\Gamma(\mathbb{Z}_{p_1}\times\mathbb{Z}_{p_2}\times\cdots\times\mathbb{Z}_{p_k})$.\\
Assume there is a vertex $v$ not in any $S_i$. Let $v$ have a non-zero element $a$ in the $x$th slot. The by definition it is in $S_x$, or in some $S_i$, $i<x$ if some $i$th slot also has a non-zero element. So $v$ cannot have any non-zero elements and thus, $v=0$ which is not a vertex. $\cup S_i$ is the vertex set of $\Gamma(\mathbb{Z}_{p_1}\times\mathbb{Z}_{p_2}\times\cdots\times\mathbb{Z}_{p_k})$.\\
Assume there is a $v$ in multiple $S_i$, say $S_x$ and $S_y$. Without loss of generality, let $x<y$. Then the $x$th slot of $v$ has a non-zero term $a$ because it is in $S_x$. But the $x$th slot must be zero because $v$ is in $S_y$. That is a contradiction, so there are no overlaps in the partition.\\
$S_i$ is a k-partite partition, so $\Gamma(\mathbb{Z}_{p_1}\times\mathbb{Z}_{p_2}\times\cdots\times\mathbb{Z}_{p_k})$ is k-partite.
\end{proof}
\section{Zero divisor graph of the poset $D_n$}
Zero divisor graph of a poset has been studied in ~\cite{jW99}, ~\cite{jW100}, ~\cite{jW101}. We always have Clique number of the zero divisor graph of a ring does not exceed the Chromatic number of that. Beck conjectured that that for an arbitrary ring $R$, they are same. But Anderson and Naseer~\cite{jW102} have shown that this is not the case in general, namely they presented an example of a commutative local ring $R$ with $32$ elements for which Chromatic number is strictly bigger than the clique number.In ~\cite{jW102} Nimbhorkar, Wasadikar and DeMeyer have shown that Beck's conjecture holds for meet-semilattices with $0$, i.e., commutative semigroups with $0$ in which each element is idempotent. Infact, it is valid for a much wider class of relational structures, namely for partially ordered sets (posets, briefly) with $0$. Now, to any poset $(P, \leq)$, with a least element $0$ we can assign the graph $G$ as follows: its vertices are the nonzero zero divisors of $P$, where a nonzero $x \in P$ is a called a zero divisor if there exists a non zero $y\in P$, so that $L(x, y)=0, L(x, y)=\{z\in P|z\leq x, y\} $. And $x, y$ are connected by an edge if $L(x, y)=0$.
We discuss here some properties of the zero divisor graph of a specific poset $D_n$. Very often we used the prime factorization of the positive integer $n$. By abuse of notation, let us call $D_n$ as the zero divisor graph of the poset $D_n$. Note that, the vertex set of $D_n$ is the set of all factors of $n$ that are not divisible by some prime factor of $n$. Also, note that, two vertices in $D_n$ are connected by an edge if and only if they are mutually co-prime.\\
\begin{remark}[Properties of $D_n$]
$\phantom e$\\
\begin{enumerate}
\item[i.] If $n =p^{m}$ for some prime $p$ and positive integer $m$, then $D_{n}$ is trivial.\\
So from now on consider $D_n$ where $n \neq p^{m}$ where $p$ and $m$ are as mentioned.\\
\item [ii.] The diameter of $D_n$ is 3 iff $n$ has three distinct prime factors namely $p$, $q$, $r$. This is shown by the path $pq - r - p - qr$. Otherwise, the diameter is 1 or 2, as $D_{p^mq^n}$ is complete bipartite which has diameter 2 or in the case of $m = n = 1$ has diameter 1. \cite{jW103} shows zero divisors of a poset have diameter of 1, 2, or 3.
\item[iii.] $D_n$ is complete only when $n=pq$, where $p$ and $q$ are two distinct primes. $D_n$ is complete bipartite iff $n = p^{m}q^{s}$ where $m$ and $s$ are two positive integers.\\
\item[iv.]We have the clique number of $D_n$ and a few coefficients of the clique polynomial.The clique number of $D_n$ is the number of distinct prime factors of $n$. If $n= p_{1}^{\alpha_{1}}p_{2}^{\alpha_{2}}p_{3}^{\alpha_{3}}\cdots p_{r}^{\alpha_{r}}$ where $p_{i}$'s are distinct primes $\bar forall i$, any set of vertices $\{ p_{1}^{\beta_{1}}, p_{2}^{\beta_{2}}, p_{3}^{\beta_{3}}\cdots p_{r}^{\beta_{r}}\}$, where $1\leq \beta_{i}\leq \alpha_{i}$ $\bar forall i$ forms a maximal clique. This is a clique because no two vertices in a clique can have a common prime factor. Also, if any vertex of a clique has more than one prime factor, the clique will not be maximal. Hence the clique number is $r$, the number of distinct primes of $n$. And the leading coefficient in the clique polynomial is $\alpha_{1}\alpha_{2}\cdots\alpha_{r}$. The coefficient of $x^{r-1}$ is $\sum_{i=1}^{r}(\alpha_1\alpha_2\cdots \alpha_{i-1}\alpha_{i+1}\cdots \alpha_{r})+\binom {r}{2}\alpha_1\alpha_2\cdots\alpha_r.$ Reason: Consider a clique of size $r-1$. If all the vertices has single prime factors then, there are $\sum_{i=1}^{r}(\alpha_1\alpha_2\cdots \alpha_{i-1}\alpha_{i+1}\cdots \alpha_{r})$ many of this type, as a typical clique of this type is a set of the form $\{p_{1}^{\beta_{1}}, p_{2}^{\beta_{2}}, \cdots p_{i-1}^{\beta_{i-1}}, p_{i+1}^{\beta_{i+1}}, \cdots p_{r}^{\beta_{r}} \}$, where $1\leq \beta_{j}\leq \alpha_{j}\bar forall j \in \{1, 2, \cdots r\}$. Otherwise, exactly one vertex will contain two primes. And in that case we will obtain $\binom {r}{2}\alpha_1\alpha_2\cdots\alpha_r$ many such clique sets with cardinality $r-1$. No element in a clique set can have three distinct primes in it's prime factorization. Hence the result follows.\\
\item[v.]The domination number of $D_n$ is the number of distinct prime factors of $n$, same as the clique number, as any dominating set must not omit a prime factor of $n$. If some $p_{i}$ is missing from a set of vertices $V$, then the vertex $p_{1}p_{2}\cdots p_{i-1}p_{i+1}\cdots p_{r}$ is not adjacent to any vertex in $V$. Furthermore, if we let $V$ be the set of all distinct primes of $n$, each vertex in $D_n$ must share an edge with at least one vertex in $V$ because each vertex in $D_n$ must omit at least one prime of $n$ from its prime factorization.\\
\item[vi.]$D_{n}$ is regular iff $n= (pq)^{m}$ for some positive integer $m$. If $n = p^{m}q^{r}, m\neq r$, then $D_{n}= K_{m, n}$ which is not regular. Then, if $n$ has more than two distinct primes in it's prime factorization, $p$ and $pq$ are vertices with a different degrees. Every vertex that shares an edge with $pq$ shares an edge with $p$, but $p$ shares an edge with $q$ while $pq$ does not, making the graph non-regular.\\
\item[vii.]In \cite{jW100}, it is discussed that the girth of the zero divisor graph of any poset is 3, 4, or $\infty$. The girth of $D_{n}$ is $\infty$ iff $n= p^{m}q$, where $p$ and $q$ are two distinct primes and $m$ is a positive integers bigger than $1$. The girth of $D_{n}$ is $4$, if and only if $n= p^{m}q^{r}$, where $p$ and $q$ are two distinct primes and $m$ and $r$ are both positive integers bigger than $1$. Otherwise, the girth of $D_{n}$ is $3$, because if $n$ has at least $3$ different prime factors $p$, $q$ and $r$, then $p-q-r-p$ is a triangle in $D_{n}$.\\
\item[viii.] $D_n$ is not perfect if $n$ is the product of least five different primes $p, q, r, s, t$ in it's prime factorization, then $ps-qt-pr-qs-tr-ps$ is a cycle of length five in $D_n$. Hence by Strong perfect graph theorem $D_n$ is not perfect.\\
Suppose $n$ has 4 distinct prime factors $p$, $q$, $r$ and $s$. Assume there is an odd cycle of length 5 or greater that contains a vertex $v$ that is the product of two such primes. Let $v = p^xq^y$. Then the two neighbors of $v$ cannot be a multiple of $p$ or $q$. Suppose the neighbors both consists of $r^a$ for some positive integer $a$. Then, we get part of the cycle as $r^a - p^xq^y - r^b$ for another positive integer $b$. Then, $r^a$ will necessarily share an edge with the other neighbors of $r^b$ making the cycle length 4. So the neighbors of $v$ must have both $r$ and $s$. Additionally, these part of the cycle must be of the form $r^a - p^xq^y - r^ws^z$, otherwise we get a cycle of length $4$ again. But any vertex that shares an edge with $r^ws^z$ must also share an edge with $r^a$ making such a cycle impossible. This means any odd cycle length greater than 5 cannot contain a vertex with two or more prime factors, making an odd cycle length greater than 4 impossible.The other two situations when $v$ consists of only one prime, or three primes also gives contradiction.Thus $D$ is perfect iff $n$ has 4 or fewer prime factors (the $n$ $< 4$ case follows).\\
\item[ix.] $D_n$ is chordal iff $n = p^mq$ or $n = pqr$ where $p$, $q$ and $r$ are distinct primes and $m \geq 1$. For if $n$ is not of that form, $p-q-p^{2}-q^{2}-p$ or $p - q - p^2 - qr - p$ or $p - r - pq - rs - p$ will give holes of length greater than $3$ in respective $D_n$'s.\\
\item[x.]Let, $n$ be a square free positive integer. Then, it's simplicial vertices are precisely those factors of $n$ which misses exactly one prime in it's prime factorization. Now, suppose $n$ is not square free. Then, if all primes in it's prime factorization are not square free, it has no simplicial vertex. Otherwise, the simplicial vertices are precisely those which misses exactly one square free prime factor. For example, if $n = p^2q^2r$, $pq$, $p^2q$, $pq^2$ and $p^2q^2$ are the only simplicial vertices because $r$ is the only square free prime factor.\\
\item[xi.] The only planar $D_n$ has $n$ of the form $n = p^mq$, $p^mq^2$, $pqr$ or $p^2qr$. First, let $n$ have only 2 prime factors. We will first examine this case. If $n = p^mq^l$ where $l \geq 3$ and $m \geq 3$, then $K_{3, 3}$ is a subgraph of $D_n$ and therefore a minor of $D_n$. Then by Wagner's theorem, $D_n$ is non planar. But in the case of $p^mq$, $D_n$ is a star, so it is planar. And in the case of $p^mq^2$, the graph can be drawn without any crossing edges. Next, let's examine $n$ with 3 prime factors. If $n = pqr$ or $n = p^2qr$ the graph is clearly planar if drawn. But, if $n = p^mqr$ where $m \geq 3$, The subgraph consisting of $p$, $p^2$, $p^3$, $q$, $r$ and $qr$ form $K_{3, 3}$ if we delete the edge between $q$ and $r$. Then by Wagner's theorem the graph is non-planar since $K_{3, 3}$ is a minor. Next, if $n = p^mq^lr$, where $m \geq 2$ and $l \geq 2$ the set of vertices $q$, $q^2$, $p$, $p^2$, $r$, $pr$ and $qr$ is a subdivision of $K_5$. Then, by Kuratowski's theorem, the graph is non-planar. So the only planar $D_n$ with only 3 primes in $n$ are $pqr$ and $p^2qr$. Lastly, consider the case where $n$ has 4 primes in its prime factorization, $n = pqrs$. Then, the vertex set of $p$, $q$, $r$, $s$, $pq$ and $rs$ can be made isomorphic to $K_5$ by contracting the edge between $pq$ and $rs$ to make a single vertex. Therefore, $K_5$ is a minor of $D_n$ for this case, and by Wagner's theorem the graph is non-planar.\\
\item[xii.] $D_n$ is Eulerian iff the power of each prime in the prime factorization of $n$ is even.
For, if $n$ has a prime $p^{\alpha}$ that appears in it's prime factorization where $\alpha$ is odd, then the vertex $\bar frac{n}{p^\alpha}$ has odd degree, otherwise every vertex has even degree.\\
\item[xiii.] If $n$ is square free, then we have the edge cardinality of $D_n$ as $\sum_{i=1}^{r-1}2^{r-i-1}\binom{r}{i}-2^{r-1}-1$, where $r$ is the number of distinct primes of $n$.
For, if we consider $n= p_{1}p_{2}\cdots p_{r}$, where $p_{i}$'s are distinct primes, then the degree of each vertex $p_{i}$ is $\sum_{i=1}^{r-1}\binom{r-1}{i}= 2^{r-1}-1$ giving $r(2^{r-1}-1)$ to the degree sum of the vertices. Similarly each vertex $p_{i}p_{j}$ is adjacent to $\sum_{i=1}^{r-2}\binom{r-2}{i} =2^{r-2}-1$
many vertices, giving $\binom{r}{2}(2^{r-1}-2)$ in the degree sum. Proceeding in this way, we obtain the sum of the vertex degrees are $\sum_{i=1}^{r-1}\binom{r}{i}(2^{r-i}-1) = \sum_{i=1}^{r-1}\binom{r}{i}2^{r-i}-2^{r}-2$. Then, as the sum of vertex degrees is twice the edge cardinalities the result follows.\\
\item[xiv.] We have a lower bound for Independence number of $D_n$. Let,
$n= p_{1}^{\alpha_{1}}p_{2}^{\alpha_{2}} \cdots p_{r}^{\alpha_{r}}$, where $p_{i}$'s are distinct primes.Then if $I$ is the independence number of $D_n$, \\$ I\geq Max_{1\leq i\leq r}[ \alpha_{i}\{1+\sum \alpha_{i_1}\alpha_{i_2}\cdots \alpha_{i_l} |\alpha_{i}\neq \alpha_{i_j}\neq \alpha_{i_k}, j, k \in\{1, 2, \cdots l\}\}]$, $\{\alpha_{i_1}, \alpha_{i_2}\cdots \alpha_{i_l}\}$ varies over all non empty proper subset of $\{\alpha_1, \alpha_2\cdots \alpha_{i-1}, \alpha_{i+1}\cdots \alpha_r\}$\\
\begin{proof} Consider any independent set containing $p_{i}$ from the list of primes in the prime factorization of $n$. Then, the largest possible independent set containing $p_{i}$, will have $p_{i}$ as a factor in all it's vertices. So, that must contain $p_i, p_{i}^{2}, \cdots, p_{i}^{\alpha_{i}}$ giving $\alpha_{i}$ many vertices in the independent set. In order to maximize the cardinality of the set, we need to consider all possible factors of $n$ that has a factor $p_{i}$ and that misses atleast one prime in the prime factorization of $n$. Thus we get\\
$p_{i}p_{1}, p_{i}p_{1}^{2}, \cdots p_{i}p_{1}^{\alpha_{1}}, p_{i}p_{2}, p_{i}p_{2}^{2}, \cdots p_{i}p_{2}^{\alpha_{2}}, \cdots p_{i}p_{r}, p_{i}p_{r}^{2}, \cdots p_{i}p_{r}^{\alpha_{r}}$ are inside the independent set giving $\alpha_{i}(\alpha_{1}+\alpha_{2}+\cdots \alpha_{i-1}+\alpha_{i+1}+\cdots \alpha_{r})=\alpha_{i}\sum_{j=1, j\neq i}^{r}\alpha_{j}$ many vertices. Similarly, we get more $\alpha_{i}\sum_{j=1, i\neq j\neq k}^{r}\alpha_{j}\alpha_{k}$ many vertices from the factors of $n$ that contains $p_{i}$ and that are product of three primes. Proceeding in this way get the necessary result.
\end{proof}
\item [xv.] Let, $n$ be square free. Then, a lower bound of the Independence number of $D_n$ is $ 2^{r-1}-r$, where $r$ is the number of prime factors of $n$. If,
$n= p_{1}p_{2} \cdots p_{r}$, where $p_{i}$'s are distinct primes, then whenever $I$ is the independence number of $D_n$, $I \geq 2^{r-1}-r$.
\begin{proof} Consider any independent set in $D_n$. If we pick up any element from that set, that is divisible by some $p_{i}$, then, all possible proper divisors of $n$, that has $p_{i}$ as a factor forms an Independent set of $D_n$ and cardinality of that set is $2^{r-1}-r$. Hence the result follows.
\end{proof}
\end{enumerate}
\end{remark}
\section{acknowledgment}
The authors acknowledge Dr. Lisa DeMeyer for introducing this topic to us by an excellent presentation, which motivated us to work in this area.
\end{document} |
\begin{document}
\begin{frontmatter}{}
\title{Strategic Decompositions of Normal Form Games: Zero-sum Games and Potential Games\tnoteref{t1}}
\tnotetext[t1]{\today.
\setstretch{0.8}
The research of S.-H. H. was supported by
the Ministry of Education of the Republic of Korea and the National Research Foundation of Korea (NRF-2016S1A3A2924944). The research of L. R.-B. was supported
by the US National Science Foundation (DMS-1109316). We greatly appreciate comments by the advisory editor and two anonymous referees. Especially, we would like to express special thanks to the anonymous referee who gave detailed suggestions on the revision. We also would like
to thank Murali Agastya, Samuel Bowles, Yves Gu\'{e}ron, Nayoung Kim,
Suresh Naidu, Jonathan Newton, David Nielsen and Seung-Yun Oh for their helpful
comments. Special thanks to William Sandholm who read the early version
(\url{http://arxiv.org/abs/1106.3552}) of this paper carefully and
gave us many helpful suggestions.}
\author[A1]{Sung-Ha Hwang\corref{cor1}}
\ead{[email protected]}
\author[A2]{Luc Rey-Bellet}
\ead{[email protected] }
\cortext[cor1]{Corresponding author. }
\address[A1]{Korea Advanced Institute of Science and Technology (KAIST), Seoul, Korea}
\address[A2]{Department of Mathematics and Statistics, University of Massachusetts
Amherst, MA, U.S.A.}
\begin{abstract}
\setstretch{0.75}
We introduce new classes of games, called \emph{zero-sum equivalent games} and \emph{zero-sum equivalent potential games}, and prove decomposition theorems involving these classes of games. We say that two games are ``strategically equivalent'' if, for every player, the payoff differences between two strategies (holding other players' strategies fixed) are identical. A zero-sum equivalent game is a game that is strategically equivalent to a zero-sum game; a zero-sum equivalent potential game is a potential game that is strategically equivalent to a zero-sum game. We also call a game ``normalized'' if the sum of one player's payoffs, given the other players' strategies, is zero. We present decomposition results involving these games as component games and study the equilibrium properties of these new games. One of our main results shows that any normal form game, whether the strategy set is finite or continuous, can be uniquely decomposed into a zero-sum normalized game, a zero-sum equivalent potential game, and an identical interest normalized game, each with distinctive equilibrium properties. We also show that two-player zero-sum equivalent games with finite strategy sets generically have a unique Nash equilibrium and that two-player zero-sum equivalent potential games with finite strategy sets generically have a strictly dominant Nash equilibrium.
\end{abstract}
\begin{keyword}
decomposition, zero-sum games, potential games.\\
\textbf{JEL Classification Numbers:} C72, C73
\end{keyword}
\end{frontmatter}
\thispagestyle{empty}
\tocless \section{Introduction\setcounter{page}{1}}
When two people start a joint venture, their interests are aligned.
In the division of a pie or unclaimed surpluses, however, someone's gain always comes at the expense of somebody else. So-called identical interest games---games in which all players have the same payoff function---and zero-sum games serve as polar models for studying these social interactions. Two games can be regarded as ``strategically equivalent'' if, for every player, the payoff differences between two strategies (holding other players' strategies fixed) are identical. That is, in two strategically equivalent games, strategic variables such as best responses
of players are the same.\footnote{See Definition \ref{def:st-eq} and \citet{Monderer96}
and \citet{Weibull95}; see also \citet{Morris04} for best response
equivalence.} Potential games---a much-studied class of games in the literature---are precisely those games that are strategically equivalent to identical interest games. We also introduce a class of games, called ``normalized'' games, in which
the sum of one player's payoffs, given the other players' strategies,
is always zero.
We are interested in zero-sum games and their variants---(i) games that are strategically equivalent to a zero-sum game, accordingly named \emph{zero-sum equivalent games}, (ii) potential games that are strategically equivalent to a zero-sum game, named \emph{zero-sum equivalent potential games} and (iii) \emph{zero-sum normalized games}. Our interest in zero-sum equivalent games is motivated by their definition being analogous to the definition of potential games. Potential games retain all the attractive properties of identical interest games (e.g., the existence of a potential function) because they are strategically equivalent to identical interest games. Thus, zero-sum equivalent games are expected to possess similar desirable properties of zero-sum games as well. It is well-known that two-player zero-sum games with a finite number of strategies have a mini-max solution which is the same as a Nash equilibrium.\footnote{Recently, there have been generalizations of these properties and characterizations for a special class of $n$-player zero-sum games (\citet{Bregman88, Cai15}).}
To examine these classes of zero-sum related games more systematically, we develop decomposition methods of normal form games and obtain several constituent components belonging to these classes. We also study the equilibrium properties of these new games such as the uniqueness/convexity of Nash equilibria and the existence of a dominant equilibrium. Based on these, we provide two specific applications: (i) equilibrium analysis of two-player finite strategy games and (ii) equilibrium analysis of contest games. In the first application, we illustrate that decomposition can isolate the effect of component games on the Nash equilibrium of the original game (see Figures \ref{fig:dep-eq} and \ref{fig:perturb}) and show that the total number of Nash equilibria of a given game, depending on some conditions in terms of its decomposition component games, can be maximal or minimal. In the second application, the uniqueness of Nash equilibria for rent-seeking games is shown by using the special property of zero-sum equivalent games. This highlights that identifying a zero-sum equivalent game via decomposition facilitates equilibrium analysis.
\begin{table}
\begin{tabular}{|c|c|c|}
\hline
4,4$ $ & -1,1 & 1,-1$ $\tabularnewline
\hline
1,-1 & 2,2 & -2,0$ $\tabularnewline
\hline
-1,1 & 0,-2 & 2,2\tabularnewline
\hline
\end{tabular}
=$\underbrace{
\begin{tabular}{|c|c|c|}
\hline
2,2$ $ & -1,-1$ $ & -1,-1$ $\tabularnewline
\hline
-1,-1 & 2,2 & -1,-1\tabularnewline
\hline
-1,-1 & -1,-1 & 2,2\tabularnewline
\hline
\end{tabular}}_{=I}$
+$\underbrace{
\begin{tabular}{|c|c|c|}
\hline
0,0 & -1,1 & 1,-1\tabularnewline
\hline
1,-1 & 0,0 & -1,1\tabularnewline
\hline
-1,1 & 1,-1$ $ & 0,0\tabularnewline
\hline
\end{tabular}}_{=Z}$
+$\underbrace{
\begin{tabular}{|c|c|c|}
\hline
1,1 & 1,0 & 1,0\tabularnewline
\hline
0,1 & 0,0 & 0,0\tabularnewline
\hline
0,1 & 0,0 & 0,0\tabularnewline
\hline
\end{tabular}}_{=B}$
+$\underbrace{
\begin{tabular}{|c|c|c|}
\hline
1,1 & 0,1 & 0,1\tabularnewline
\hline
1,0 & 0,0 & 0,0\tabularnewline
\hline
1,0 & 0,0 & 0,0\tabularnewline
\hline
\end{tabular}}_{=E}$
\protect\protect\caption{\textbf{Illustration of a decomposition.} This example
illustrates one of our main results. Here, $I$ is an identical interest game,
$Z$ is a zero-sum game (the Rock-Paper-Scissors game), $B$ is a
game in which the first strategy is the dominant strategy and $E$
is called a ``non-strategic'' game, in which, for every player, the payoff
differences between two strategies (holding other players' strategies
fixed) are identical. Observe that $I$ and $Z$
are ``normalized'' in the sense that the column sums and row
sums of the payoffs are zeros. }
\label{tab:1}
\end{table}
One of our main decomposition results (Theorem \ref{thm:main}) shows that any normal form game, whether the strategy set is finite or continuous, can be decomposed as follows: (i) an identical interest ``normalized''
component ($I$ in Table \ref{tab:1}), (ii) a zero-sum ``normalized''
component ($Z$ in Table \ref{tab:1}), (iii) a zero-sum equivalent potential component---component equivalent to both a zero-sum and identical interest game ($B$ in Table \ref{tab:1}) and (iv) a nonstrategic component ($E$ in Table \ref{tab:1}). Most popular zero-sum games, such as Rock-Paper-Scissors
games and Matching Pennies games, belong to the class of
zero-sum normalized games (see also Cyclic games in \citet{Hofbauer00}).
This study makes the following contributions.
First, we develop a more general way of decomposing normal form games than existing methods. Existing decomposition methods of normal form games, such as in \citet{Sandholm10}, \citet{Candogan2011} and \citet{HandR11}, are limited to finite strategy set games, relying on decomposition methods of tensors (or matrices) or graphs. Our new insights lie in viewing the set of all games as an abstract space---a vector space.
In the vector space of games, we decompose a game into a constituent game and its algebraic complement, and our decompositions correspond to direct sum representations of the vector space (Theorem \ref{thm:main}). In this space, commuting projection mappings are used to single out subspaces and their algebraic complementary subspaces.\footnote{ \citet{Sandholm10} also uses orthogonal projections to obtain decomposition components; however, the orthogonal projections in that paper cannot extract potential games and zero-sum equilibrium.
The decomposition by \citet{Candogan2011} relies on the Helmholtz decomposition of flows on the graph for games with finite strategy sets and uses Moore inverses of matrix operators. They also provide orthogonal projections onto the potential component space and harmonic component space. However, their definition of potential component games is different from the definition by \citet{Monderer96}(see Appendix \ref{sec:existing}). Besides, decomposition results of these two existing studies are limited to finite strategy games} This approach allows us to obtain decomposition results which hold for an arbitrary normal form game, whether finite or continuous (Theorem \ref{thm:main}). In this way, our method shows a unified and transparent mechanism of decompositions of games, which may be modified to decompose other classes of games.
\com{
In the Hilbert space of games, we decompose games into a constituent game and its orthogonal complement (Theorem \ref{thm:main} (iii), (iv)). A representation of a Hilbert space as an orthogonal sum is useful in the sense that we can naturally characterize a class of games by examining their orthogonal complements. For example, the sufficiency
and necessity of the well-known Monderer and Shapley cycle condition for potential games (Theorem 2.8 in \citet{Monderer96}) can be proved by showing that this condition requires that potential games are orthogonal to all zero-sum normalized games \citep{HandR11}. In addition, the orthogonal structure also allows us to extend the vector space decomposition result, Theorem \ref{thm:main} (ii), to the class of $n>2$ player games with finite strategy games. Our Hilbert space decomposition results also hold for continuous payoff function games (Theorem \ref{thm:main} (iv)). The extension of these Hilbert space results to discontinuous payoff function games involves subtle issues related with the identification of payoff functions that are equal almost everywhere, and we discuss issues in more detail in Section \ref{sec:con}.
}
Second, we provide explicit expressions for projections whose ranges and kernels are subspaces of potential games, zero-sum normalized games, identical interest normalized games, zero-sum equivalent games, and zero-sum equivalent potential games. These explicit formulas provide algorithms to extract the component games from a given game as well as yield tests for potential games and zero-sum equivalent games. Then, the extracted components game can be used to understand the equilibrium properties of the original games (see Section \ref{sec:app}). \com{In this way, our decompositions systematically reveal the underlying equilibrium structure of games of interest. near potential games, explicit formula}
Third, to derive explicit expressions for projections, we find useful characterizations of various games. In particular, we provide \emph{new} characterizations (to our knowledge) of potential games (Proposition \ref{prop:pot}, equations \eqref{eq:pot-con2}, \eqref{eq:potent-char}). These new characterizations, along with existing tests and characterizations, may be used to shed light on the structures of potential games---the topic that we leave for future research.
The remainder of this paper is organized as follows. In Section \ref{sec:main-thm}, we present a basic setup and the main decomposition results, along with an example illustrating strategic equivalence.
Section \ref{sec:zero-eq} presents our results on the equilibrium properties of component games. In Section \ref{sec:app}, we provide the two applications and Section \ref{sec:con} concludes the paper. To streamline the presentation, we relegate most of the proofs to the appendix. Also, in Appendix \ref{sec:existing}, we compare our results to existing decomposition results.
\tocless \section{Decomposition Theorems \label{sec:main-thm}}
\tocless \subsection{Basic setup: payoff function space \label{subsec:semi-norm}}
Consider a collection of measurable spaces $S_i$ with a $\sigma$-algebra for $i=1, \cdots, n$. Let the product measurable space with the product $\sigma$-algebra be $S:= \prod_{i=1}^{n} S_i$. For each $i$, $f^{(i)}:S\to\mathbb{R}$ is a real-valued measurable function and let $f:=(f^{(1)},$ $f^{(2)},\cdots,f^{(n)})$. An $n$-player game $(N,S,f)$ is specified by a set of players, $N=\{1,2,\cdots,n\}$; a set of strategy profiles, $S$; and a
payoff function, $f$. Thus, given $N$ and $S$, a game is uniquely identified by a vector-valued
payoff function $f$. For each $i$, let $m_i$ be a measure on $S_i$ and $m$ be the product measure, $dm=\prod_{i=1}^n m_i$. We succinctly write an $n$-fold integration as follows:
\[
\int f^{(i)} dm = \int\cdots\int f^{(i)}(s_{1},\cdots,s_{n}) dm_{1}(s_{1})\cdots dm_{n}(s_{n}).
\]
We will consider a set of games with integrable payoff functions and thus define a semi-norm of a payoff function,$\left \Vert \cdot \right \Vert$, as follows
\[
\left \Vert f \right \Vert := \sum_{i=1}^n \int |f^{(i)}| dm
\]
and consider the following vector space of games:
\begin{equation} \label{eq:L}
\mathcal{L}:=\{f:S\to\mathbb{R}^{n}\textnormal{ measurable and }\left\Vert f\right\Vert <\infty\}.
\end{equation}
\com{
Since we are interested in square-integrable payoff functions, we introduce a semi-norm of a payoff function and a scalar product of payoff functions as follows:
\begin{equation}\label{eq:semi-norm}
\left\Vert f\right\Vert :=(\sum_{i=1}^{n}\int\left\vert f^{(i)}\right\vert ^{2}dm)^{1/2},\,\,\,\,\,\left\langle f,g\right\rangle :=\sum_{i}\int f^{(i)}g^{(i)}dm.
\end{equation}
and becomes a Hilbert space under identification of $m$-almost everywhere same functions. In Section \ref{subsec:semi-norm}, we view $\mathcal{L}$ as a vector space.}
\begin{table}
\center
\scalefont{0.95}
\begin{tabular}{c|l}
\hline
Notation & Name \tabularnewline
\hline
$\mathcal{I}$ & \textbf{I}dentical interest games \tabularnewline
$\mathcal{Z}$ & \textbf{Z}ero-sum games \tabularnewline
$\mathcal{N}$ & \textbf{N}ormalized games \tabularnewline
$\mathcal{E}$ & non-strategic games (define \textbf{E}quivalence relation) \tabularnewline
\hline
$\mathcal{Z}+\mathcal{E}$ & \textbf{Z}ero-sum \textbf{E}quivalent games \tabularnewline
$\mathcal{I}+\mathcal{E}$ & potential games (\textbf{I}dentical interest \textbf{E}quivalent games) \tabularnewline
\hline
$\mathcal{B} =$ & \textbf (\textbf{B}oth) zero-sum equivalent (and) potential games \tabularnewline
$(\mathcal{Z}+\mathcal{E})\cap(\mathcal{I}+\mathcal{E})$ \tabularnewline
\hline
$\mathcal{Z\cap N}$ & \textbf{Z}ero-sum \textbf{N}ormalized games \tabularnewline
$\mathcal{I \cap N}$ & \textbf{I}dentical interest \textbf{N}ormalized games \tabularnewline
\hline
\end{tabular}
\caption{\textbf{Notation}}
\label{tab:notation}
\end{table}
We assume that $S_i$ is either a finite set or a subset of $\mathbb{R}$ and associate a specific measure with each case as follows. If $S_i$ is a finite set, we suppose that $m_{i}$ is the counting measure with the natural $\sigma$-algebra of all subsets of $S_i$. If $S_{i}$ is a subset of $\mathbb{R}$, we assume that $S_{i}$ is bounded and choose $m_{i}$ to be the Lebesgue measure restriction on the Borel $\sigma$-algebra of $S_i$. We call a game with finite numbers of strategies a \emph{finite game} and a game with continuous strategy spaces a \emph{continuous game}.
We next introduce several classes of games of interest. Following the standard convention, $s_{-i}$ denotes the strategy profile of all players expect player $i$.
\begin{defn} \label{def:space}
We define the following subspaces of $\mathcal{L}$: \\
(i) The space of {\em identical interest games}, $\mathcal{I}$, is defined
by
\setstretch{0.7}
\[
\mathcal{I}:=\{ f\in\mathcal{L}:f^{(i)}(s)=f^{(j)}(s) \text{ for all }i,j \text{ and for all } s \}\footnote{A common interest game is sometimes used to refer to an identical interest game (see \citet{Sandholm10Book}). However, \citet{Aumann89} define a common interest game as a game that has a single payoff pair that strongly Pareto dominates all other payoff pairs. We appreciate an anonymous referee for pointing out this.} \,.
\]
(ii) The space of {\em zero-sum games}, $\mathcal{Z}$, is defined by
\[
\mathcal{Z}:= \{ f\in\mathcal{L}:\sum_{l=1}^{n}f^{(l)}(s)=0 \text{ for all } s \} \,.
\]
(iii) The space of {\em normalized games}, $\mathcal{N}$, is defined by
\begin{align}\label{eq:normalize}
\mathcal{N}:=\{ f\in\mathcal{L}:\int f^{(i)}(t_{i},s_{-i})dm_{i}(t_{i})=0 \text{ for all } s_{-i}, \text{ for all }i \} \,.
\end{align}
(iv) The space of {\em nonstrategic games}, $\mathcal{E}$, is defined by
\begin{align}\label{eq:passive}
\mathcal{E}:= \{ f \in\mathcal{L} : f^{(i)}(s_i,s_{-i}) = f^{(i)}(s'_i,s_{-i}) \text{ for all } s_i, s'_i, s_{-i}, \text{ for all } i \} \,.
\end{align}
\end{defn}
\noindent Identical interest games in $\mathcal{I}$ and zero-sum games in $\mathcal{Z}$ are familiar games, with cooperative and competitive interactions, respectively.\com{ and tractable\com{straightforward}
equilibrium analysis is possible in both classes of games via potential functions and mini-max solutions.} A normalized game is a game in which the sum of one player's payoffs, given the other players' strategies,
is always zero. A non-strategic game in $\mathcal{E}$, (also sometimes called a passive game), is a game in which each player's payoff does not depend on his own strategy choice (\citet{Sandholm10, Candogan2011}).
Thus, each player's strategy choice plays no role in determining her payoff. Because of this property, the players' strategic relations remain unchanged if we add the payoff of a non-strategic game to that of another game. This leads us to
the definition of strategic equivalence (and thus notation, $\mathcal{E}$).\footnote{One can study different strategic equivalences. For example, \citet{Monderer96} introduce the
concept of $w-$ potential games in which the payoff changes are proportional for each player. \citet{Morris04}
also study the best response equivalence of games in which players have the same best-responses. We choose
our definition of strategic equivalence since it is most natural with the linear structure of the space of all games.}
\begin{defn} \label{def:st-eq}
\setstretch{0.8}
We say that game $g$ is \emph{strategically equivalent} to game $f$
if
\[
g=f+h\textnormal{ for some }h\in\mathcal{E}
\]
We write this relation as $g\sim f$.
\end{defn}
Note the following simple, but useful, characterization for non-strategic games: a function does not depend on a variable if and only if the value of the integral of the
function with respect to that variable gives the same value of the function. We write $|S_i|:=m_{i}(S_{i})$.
\begin{lem} \label{lem:non-st}
A game $f$ is a non-strategic game if and only if
\begin{equation}\label{eq:pass}
f^{(i)}(s)=\frac{1}{|S_{i}|}\int f^{(i)}(t_{i},s_{-i})dm_{i}(t_{i}) \text{ for all }i, \text{ for all } s \,.
\end{equation}
\end{lem}
\begin{proof}
Suppose that $f$ satisfies \eqref{eq:pass}. Then, clearly, $f^{(i)}$ does not depend on $s_i$ for all $i$. Now let $f \in \mathcal{E}$. Then there exist $\zeta$ such that $f^{(i)}(s)=\zeta^{(i)}(s_{-i})$ for all $s$, which does not depend on $s_i$, for all $i$. Thus, by integrating, we see that \eqref{eq:pass} holds.
\end{proof}
As mentioned, it is well-known that two-player zero-sum games have desirable properties and thus, in addition to potential games, we also consider a class of zero-sum equivalent games---games that are strategically equivalent to zero-sum games and a class of zero-sum equivalent potential games---potential games that are strategically equivalent to zero-sum games. Given two subspaces $\mathcal{A}$ and $\mathcal{A}'$, the sum of two subspaces is defined to be
\[
\mathcal{A} + \mathcal{A}':= \{ f+f' :\,\,f \in \mathcal{A},\,\, f' \in \mathcal{A}' \}.
\]
\begin{defn}\label{def:pot-zero}
We have the following definitions: \\
(i) The space of {\em potential games} (identical interest equivalent games) is defined by
\setstretch{0.9}
\begin{align}
\mathcal{I}+\mathcal{E}.
\end{align}
(ii) The space of {\em zero-sum equivalent games} is defined by
\begin{align}
\mathcal{Z}+\mathcal{E}
\end{align}
(iii) The space of games that is strategically equivalent
to both an identical interest game and a zero-sum game, called {\em zero-sum equivalent potential games}, is denoted by $\mathcal{B}$:
\[
\mathcal{B} = (\mathcal{I} + \mathcal{E}) \cap (\mathcal{Z} + \mathcal{E})
\]
\end{defn}
The following example illustrates strategic equivalence.
\noindent \textbf{Example (Strategic equivalence: Cournot oligopoly).}
\noindent Consider a quasi-Cournot oligopoly game with a linear demand function for which the payoff function for the $i$-th player, for $i=1,\cdots,n$, is given by
\[
f^{(i)}(s)=(\alpha-\beta\sum_{j=1}^{n}s_{j})s_{i}-c_{i}(s_{i})\,,
\]
where $\alpha, \beta > 0$, $c_i(s_i) \geq 0$ for all $s_i \in [0, \bar s]$ for all $i$ and for some sufficiently large $\bar s$.\footnote{Here, quasi-Cournot games allow the negativity of the price \citep{Monderer96}. Further, we can choose $\bar s$ to ensure that the unique Nash equilibrium lies in the interior $[0, \bar s]$ as follows. Suppose that $c_i(s_i)$ is linear; that is, $c_i(s_i) = c_i s_i$ for all $i$. We assume that $\alpha > n \min_i c_i - (n-1) \max_i c_i$, which ensures that the Nash equilibrium, $s^*$, is positive. If we choose $\bar s$ such that $(n+1) \beta \bar s > \alpha - n \min_i c_i + (n-1) \max_i c_i$, then $s^*_i \in (0, \bar s)$ for all $i$.
\com{Should I discuss the meaning of this condition and existence?} }
It is well-known that this game is a potential game (\citealt{Monderer96}); i.e., it is strategically equivalent to an identical interest game. But it is also strategically equivalent to a zero sum game (if $n \ge 3$) as follows. To show this, when $n=3$, we write the payoff function as
\begin{equation}\label{eq:cournot-1}
\begin{pmatrix}f^{(1)}\\
f^{(2)}\\
f^{(3)}
\end{pmatrix}=\underbrace{\begin{pmatrix}
(\alpha-\beta s_{1})s_{1}-c_{1}(s_{1})\\
(\alpha-\beta s_{2})s_{2}-c_{2}(s_{2})\\
(\alpha-\beta s_{3})s_{3}-c_{3}(s_{3})
\end{pmatrix}}_{\text{Self-interaction}}-\underbrace{\begin{pmatrix}\beta s_{1}s_{2}\\
\beta s_{1}s_{2}\\
0
\end{pmatrix}}_{\substack{\text{Interactions}\\
\text{between players 1 and 2}
}
}-\underbrace{\begin{pmatrix}\beta s_{1}s_{3}\\
0\\
\beta s_{1}s_{3}
\end{pmatrix}}_{\substack{\text{Interactions}\\
\text{between players 1 and 3}
}
}-\underbrace{\begin{pmatrix}0\\
\beta s_{2}s_{3}\\
\beta s_{2}s_{3}
\end{pmatrix}}_{\substack{\text{Interactions}\\
\text{between players 2 and 3}
}
}
\end{equation}
The self-interaction term is strategically equivalent to both an identical interest game and a zero-sum game, as the following two payoffs show
\begin{equation} \label{eq:cournot-eq1}
\begin{pmatrix}
&\boldsymbol{(\alpha-\beta s_{1})s_{1}-c_{1}(s_{1})}&+(\alpha-\beta s_{2})s_{2}-c_{2}(s_{2})&+(\alpha-\beta s_{3})s_{3}-c_{3}(s_{3})\\
&(\alpha-\beta s_{1})s_{1}-c_{1}(s_{1})&+\boldsymbol{(\alpha-\beta s_{2})s_{2}-c_{2}(s_{2})}&+(\alpha-\beta s_{3})s_{3}-c_{3}(s_{3})\\
&(\alpha-\beta s_{1})s_{1}-c_{1}(s_{1})&+(\alpha-\beta s_{2})s_{2}-c_{2}(s_{2})&+\boldsymbol{(\alpha-\beta s_{3})s_{3}-c_{3}(s_{3})}
\end{pmatrix}
\end{equation}
and
\begin{equation} \label{eq:cournot-eq2}
\begin{pmatrix}\boldsymbol{(\alpha-\beta s_{1})s_{1}-c_{1}(s_{1})}-[(\alpha-\beta s_{2})s_{2}-c_{2}(s_{2})]\\
\boldsymbol{(\alpha-\beta s_{2})s_{2}-c_{2}(s_{2})}-[(\alpha-\beta s_{3})s_{3}-c_{3}(s_{3})]\\
\boldsymbol{(\alpha-\beta s_{3})s_{3}-c_{3}(s_{3})}-[(\alpha-\beta s_{1})s_{1}-c_{1}(s_{1})]
\end{pmatrix}.
\end{equation}
The payoffs in \eqref{eq:cournot-eq1} and \eqref{eq:cournot-eq2} are payoffs for an identical interest game and a zero-sum game, respectively.
They are obtained from the self-interaction term by adding payoffs that do not depend on the player's own strategy, and thus are strategically equivalent (see Definition \ref{def:st-eq}).
In a similar way, the payoff component describing
the interactions between players $1$ and $2$ is strategically equivalent
to the payoff for an identical interest game and the payoff for a zero-sum
game. For example,
\begin{equation} \label{eq:cournot-eq3}
\begin{pmatrix}
\boldsymbol{\beta s_{1}s_{2}}\\
\boldsymbol{\beta s_{1}s_{2}}\\
0
\end{pmatrix},\,\,
\begin{pmatrix}
\boldsymbol{\beta s_{1}s_{2}}\\
\boldsymbol{\beta s_{1}s_{2}}\\
\beta s_{1}s_{2}
\end{pmatrix},\,\,
\text{ and }
\begin{pmatrix}
\boldsymbol{\beta s_{1}s_{2}}\\
\boldsymbol{\beta s_{1}s_{2}}\\
-2\beta s_{1}s_{2}
\end{pmatrix}
\end{equation}
are all strategically equivalent. A similar computation holds for the last two terms in equation \eqref{eq:cournot-1} involving the interactions between players $1$ and $3$ as well as between players $2$ and $3$. As a consequence, the quasi-Cournot oligopoly game is strategically equivalent
to both an identical interest game and a zero-sum game.
\tocless \subsection{Decomposition results \label{subsec:vec-dec}}
In this section, we present our main decomposition results. For the convenience of readers, we also present some basic properties of projection operators in a vector space in Appendix \ref{appen:decomp_games}. In the context of game theory, the following two kinds of decompositions receive much attention in the literature: (i) identical interest games versus zero-sum games (\citet{Kalai10}) and (ii) normalized games versus non-strategic games (\citet{HandR11, Candogan2011}):
\begin{equation}\label{eq:1st-two-d1}
\text{(i)}\,\,\, \mathcal{L}=\mathcal{I}\ensuremath{\oplus}\mathcal{Z}, \qquad \text{(ii)}\,\,\, \mathcal{L}=\mathcal{N}\oplus\mathcal{E}
\end{equation}
\com{(see the appendix Corollary in \ref{cor:pre-decomp}.)
Thus, zero-sum games (normalized games) are algebraic complements of identical interest games (non-strategic games) and vice versa.}
where $\oplus$ denotes the direct sum in which every element in $\mathcal{L}$ can be \emph{uniquely} written as the sum of one element in $\mathcal{I}$ (or $\mathcal{N}$) and another element in $\mathcal{Z}$ (or $\mathcal{E}$).
To explain how projections on $\mathcal{L}$ induce decompositions in \eqref{eq:1st-two-d1}, we first introduce an operator which averages one player's payoffs with equal weights, given all other players' strategies. More precisely, for a scalar valued function $u: S \rightarrow \mathbb{R}$, we introduce operator $T_i$ which acts on scalar valued functions:
\begin{equation}\label{eq:def-t}
(T_i u)(s) = \frac{1}{|S_i|} \int u(t_i, s_{-i}) dm_i(t_i)
\end{equation}
for each $i$ (see Lemma \ref{lem:non-st}). Then $T_i u$ does not depend on $s_i$.
Note that if we define the following operators on $\mathcal{L}$, which act on vector valued functions,
\begin{equation}\label{eq:symm_opt}
\mathbf{S} f := ( \frac{1}{n} \sum_{i=1}^n f^{(i)}, \cdots, \frac{1}{n} \sum_{i=1}^n f^{(i)}), \quad \,\,\,\,\mathbf{P} f := (T_1 f^{(1)}, \cdots, T_n f^{(n)})
\end{equation}
then $\mathbf{S}$ and $\mathbf{P}$ are projections on $\mathcal{L}$ (see Lemmas \ref{lem:ti} and \ref{lem:proj-space}). Then, the decompositions in \eqref{eq:1st-two-d1} can be expressed as the ranges and kernels of projections as follows:
\begin{equation}\label{eq:two-d1-oper}
\mathcal{L} = R(\mathbf{S}) \oplus K(\mathbf{S}), \qquad \mathcal{L} = K(\mathbf{P}) \oplus R(\mathbf{P})
\end{equation}
where $R$ and $K$ denote the range and kernel of the operators, respectively. That is, the spaces of identical interest games and zero-sum games are the range and kernel of the operator $\mathbf{S}$, while the spaces of non-strategic games and normalized games are the range and kernel of the operator $\mathbf{P}$ (by Lemma \ref{lem:non-st}). This shows how a given projection induces a decomposition of the space of games into the range and kernel of the projection.
We would like to extend decompositions in \eqref{eq:1st-two-d1} (and \eqref{eq:two-d1-oper}) to decompositions involving the subspaces of various games defined in Section \ref{subsec:semi-norm} and Table \ref{tab:notation}. Hence, our first task is to find projections onto these subspaces, and then the decomposition results are induced by these projections. To streamline the presentation, we first state our decomposition results and then provide explicit expressions for projections onto the subspaces of various games in the subsequent propositions.
\begin{thm} \label{thm:main}We have the following decomposition results:
\begin{align*}
\hypertarget{D1}{\textbf{D1}:}\quad & \mathcal{L} =(\mathcal{I}\cap\mathcal{N})\oplus(\mathcal{Z}+\mathcal{E}) \\
\hypertarget{D2}{\textbf{D2}:}\quad & \mathcal{L}= (\mathcal{I}+\mathcal{E}) \oplus (\mathcal{Z} \cap \mathcal{N}) \\
\hypertarget{D3}{\textbf{D3}:} \quad & \mathcal{L}=(\mathcal{I}\cap\mathcal{N})\oplus(\mathcal{Z}\cap\mathcal{ N})\oplus \mathcal{B}
\end{align*}
where we recall $\mathcal{B}=(\mathcal{I}+\mathcal{E})\cap(\mathcal{Z}+\mathcal{E})$
\end{thm}
\begin{proof}
See Appendix \ref{appen:decomp_games}.
\end{proof}
To explain the idea of decompositions in Theorem \ref{thm:main}, we start with a natural way to represent the payoff function into a sum of payoffs, which aggregates strategic interactions among players in the various subset of $N$.\footnote{For example, \citet{Sandholm10} decomposes an $n$-player finite strategy game into $2^n$ component games in which each subset of players is ``active''. \citet{Ui00} also expresses a potential game as a sum of component games in which payoffs depend only on the subsets of players} Note the following partition of identity $I$
\begin{equation}\label{eq:par-id}
I = \prod_{l=1}^{n} (T_l + (I-T_l))= \sum_{M \subset N} \prod_{l \not \in M} T_l \prod_{k \in M} (I- T_k),
\end{equation}
which is obtained by expanding the product and using the commutative property of $T_i$'s in \eqref{eq:def-t}.
Using \eqref{eq:par-id}, $u:S \rightarrow \mathbb{R}$ can be written as
\begin{equation} \label{eq:u_M}
u= \sum_{M \subset N} u_M \text{ where } u_M = \prod_{l \not \in M}T_l \prod_{k \in M}(I-T_k)u.
\end{equation}
(see also Proposition 2.7 in \citet{Sandholm10}).
Observe that $u_M$ is normalized with respect to $s_k$ for all $k \in M$ and $u_M$ does not depend on $s_l$ for all $l \not \in M$. That is, $u_M$ normalizes the payoff function $u$ with respect to the strategies of players in $M$ and renders the payoff function $u$ independent of the strategies of players outside of $M$ by integrating out those strategies. Also note that for $u:S \rightarrow \mathbb{R}$,
\begin{equation} \label{eq:lab}
\sum_{M \ni i } u_M= (I-T_i) u, \qquad \sum_{M \not \ni i } u_M= T_i u.
\end{equation}
where $\sum_{M \ni i}$ (or $\sum_{M \not \ni i }$) is the summation which runs over all subsets of $N$ containing $i$ (or all subsets of $N$ not containing $i$, respectively).
First, consider the subspace of identical interest normalized games $\mathcal{I} \cap \mathcal{N}$ in Theorem \ref{thm:main} \textbf{D1}. From \eqref{eq:1st-two-d1} and \eqref{eq:two-d1-oper}, we have $\mathcal{I} \cap \mathcal{N} = R(\mathbf{S}) \cap R(\mathbf{P})$. If $\mathbf{S}$ and $\mathbf{P}$ were commuting, $\mathbf{SP}$ would be a projection and thus $\mathcal{I} \cap \mathcal{N}=R(\mathbf{S}) \cap R(\mathbf{P})=R(\mathbf{SP})$ would hold. However, since $\mathbf{S}$ and $\mathbf{P}$ do not commute,
$\mathbf{SP}$ is not a projection onto $\mathcal{I} \cap \mathcal{N}$. Fortunately, it turns out that the following projection $\mathbf{G}$ can be used to define a projection onto the subspace of identical interest normalized games:
\begin{equation} \label{eq:orth-proj}
\mathbf{G} f := (\prod_{l=1}^n (I - T_l) f^{(1)},\cdots, \prod_{l=1}^n (I - T_l) f^{(n)}).
\end{equation}
The projection $\mathbf{G}$ normalizes each player's payoff function with respect to \emph{all} players' strategies. Also, it is easy to check that $\mathbf{G}$ commutes with $\mathbf{S}$, hence $\mathbf{SG}$ becomes a projection on $\mathcal{L}$, and we also show that the range and kernel of $\mathbf{SG}$ are the subspaces of identical interest normalized games and zero sum-equivalent games, summarized in the following proposition.
\begin{prop}[\textbf{Decomposition D1}] \label{prop:i-normalized}
We have the following results. \\
(i) $\mathbf{SG}$ is a projection. \\
(ii) ${\displaystyle \mathcal{I} \cap \mathcal{N}= R(\mathbf{S}) \cap R(\mathbf{P}) = R(\mathbf{SG})}$ and ${\displaystyle \mathcal{Z} + \mathcal{E}= K(\mathbf{S}) + K(\mathbf{P}) = K(\mathbf{SG})}$.
\end{prop}
\begin{proof}
See Lemma \ref{lem:proj-space}, Proposition \ref{appen-prop:iinorm} and Proposition \ref{prop:zero-equiv}.
\end{proof}
\noindent The characterizations in Proposition \ref{prop:i-normalized} induce Theorem \ref{thm:main} \textbf{D1}, decomposing $\mathcal{L}$ as the direct sum of the range and kernel of the projection, $\mathbf{SG}$.
Next, consider the subspace of potential games, $\mathcal{I}+ \mathcal{E}$. Again, note that $ \mathcal{I}+\mathcal{E} = R(\mathbf{S})+R(\mathbf{P})$. Thus if $\mathbf{S}$ and $\mathbf{P}$ were commuting and $\mathbf{SP=0}$ held, $\mathbf{S+P}$ would be the projection onto $\mathcal{I}+\mathcal{E}$ and $\mathcal{I}+\mathcal{E} = R(\mathbf{S})+R(\mathbf{P}) = R(\mathbf{S}+\mathbf{P})$ would hold (see the condition in Lemma \ref{lem:vec-proj}).
Instead, we managed to find new characterizations for potential games, which can be used to derive an explicit expression for the projection onto the potential game subspace (equations \eqref{eq:pot-con2}, \eqref{eq:potent-char}). In Proposition \ref{prop:pot}, we establish equivalence between these new characterizations and the existing test for potential games in \citet{HandRTest15} (equation \eqref{eq:text-pot-con}).
\begin{prop}[\textbf{Potential games}] \label{prop:pot}
The following statements are equivalent. \\
(i) $\,\,$ $f$ is a potential game. \\
(ii)
\begin{equation}\label{eq:text-pot-con}
(I-T_i)(I-T_j) f^{(i)} =(I-T_i)(I-T_j) f^{(j)} \textrm{ for all } i,j \,.
\end{equation}
(iii)
\begin{equation}\label{eq:pot-con2}
f^{(i)}_M = \frac{1}{|M|} \sum_{j \in M} f^{(j)}_M \textrm{ and for all } i \in M, \textrm{ for all non-empty } M \subset N \,.
\end{equation}
(iv) For all $i$,
\begin{equation}\label{eq:potent-char}
f^{(i)} =\sum_{M \ni i} \frac{1}{M} \sum_{j \in M} f_M^{(j)} + \sum_{M \not \ni i} f_M^{(i)} =\sum_{M \ni i} \frac{1}{M} \sum_{j \in M} \prod_{l \not \in M} T_l \prod_{k \in M} (I- T_k) f^{(j)} + T_i f^{(i)}
\end{equation}
\end{prop}
\begin{proof}
See Proposition \ref{appen-prop:pot-char}.
\end{proof}
\noindent Implicit in Proposition \ref{prop:pot} (see equation \eqref{eq:potent-char}) is the following operator $\mathbf{V}$ on $\mathcal{L}$,
\begin{equation}\label{eq:v-proj}
\mathbf{V} f := (\sum_{M \ni 1} \frac{1}{M} \sum_{j \in M} f_M^{(j)} , \cdots, \sum_{M \ni n} \frac{1}{M} \sum_{j \in M} f_M^{(j)})
\end{equation}
and it is straighforward to check that $\mathbf{V}$ is a projection (i.e., $\mathbf{V}^2 = \mathbf{V}$), that $\mathbf{V}$ and $\mathbf{P}$ commute and that $\mathbf{V}\mathbf{P}=0$ (see Lemma \ref{lem:proj-space}). Then, Proposition \ref{prop:pot} implies that $\mathbf{V} + \mathbf{P}$ is a projection onto the subspace of potential games, and
we also show that $K(\mathbf V+\mathbf P)$ is equal to the subspace of zero-sum normalized games:
\begin{table}
\centering
\begin{tabular}{c|c|c}
\hline
Identical interest normalized games & $\mathcal{I} \cap \mathcal{N}$ & $R(\mathbf{SG})$ \\
Zero-sum normalized games & $\mathcal{Z} \cap \mathcal{N}$ & $K(\mathbf{V+P}$) \\
\hline
Potential games & $\mathcal{I}+\mathcal{E}$ & $R(\mathbf{V+P})$ \\
Zero-sum equivalent games & $\mathcal{Z}+\mathcal{E}$ & $K(\mathbf{SG})$ \\
Zero-sum equivalent potential games & $\mathcal{B}$ & $K(\mathbf{SG})\cap R(\mathbf{V+P})$ \\
\hline
\end{tabular}
\caption{Summary of projection mappings}\label{tab:projs}
\end{table}
\begin{prop}[\textbf{Decomposition D2}] \label{prop:d2}
We have the following results. \\
(i) $\mathbf{V}+\mathbf{P}$ is a projection. \\
(ii) ${\displaystyle \mathcal{I} + \mathcal{E}= R(\mathbf{S}) + R(\mathbf{P}) = R(\mathbf{V+P})}$ and ${\displaystyle \mathcal{Z} \cap \mathcal{N}= K(\mathbf{S}) \cap K(\mathbf{P}) = K(\mathbf{V+P})}$.
\end{prop}
\begin{proof}
See Lemma \ref{lem:proj-space}, Proposition \ref{appen-prop:pot-char}, and Proposition \ref{prop:zero-norm}.
\end{proof}
\noindent Again, from Proposition \ref{prop:d2} we obtain the decomposition in Theorem \ref{thm:main}, \textbf{D2}.
Finally, to obtain decomposition $\mathbf{D3}$, we show that $\mathbf{SG}$ and $\mathbf{I-(V+P)}$ commute and $\mathbf{SG(I-(V+P)) =0}$. This implies that the range of $\mathbf{SG} + \mathbf{I-(V+P)}$ is equal to the direct sum of the identical interest normalized game subspace and the zero-sum normalized game. We also show that the kernel of $\mathbf{SG} + \mathbf{I-(V+P)}$ is the subspace of zero-sum equivalent potential games:
\begin{prop}[\textbf{Decomposition D3}] \label{prop:d3}
We have the following results. \\
(i) $\mathbf{SG}$ and $\mathbf{I-(V+P)}$ commute and $\mathbf{SG (I-(V+P)) = 0}$. \\
(ii) ${\displaystyle\,\,\,\,\,\,\,\,\,\, (\mathcal{I} \cap \mathcal{N}) \oplus (\mathcal{Z} \cap \mathcal{N}) = R(\mathbf{SG}) \oplus K(\mathbf{V+P}) = R(\mathbf{SG + I - (V+P)})}$. \\
${\displaystyle \,\,\,\,\quad \mathcal{B}= (\mathcal{I} + \mathcal{E}) \cap (\mathcal{Z} + \mathcal{E})
=K(\mathbf{SG}) \cap R(\mathbf{V+P}) = K(\mathbf{SG + I - (V+P)})}$.
\end{prop}
\begin{proof}
See Proposition \ref{appen:vec-decomp} and Lemma \ref{lem:proj-space}.
\end{proof}
\noindent Then, decomposition \textbf{D3} again follows from the range and kernel decomposition of projection $\mathbf{SG + I - (V+P)}$. We summarize the ranges and kernels of these operators as follows in Table \ref{tab:projs}.
\begin{table}
\centering
\setstretch{1.2}
\scalefont{1}
\begin{tabular}{c|c|c}
\hline
$\frac{1}{N}\sum_{l=1}^{n}f^{(l)}_N$ & $\sum_{\substack{M \ni i \\ M \neq N}} \frac{1}{M} \sum_{j \in M} f_M^{(j)} + T_i f^{(i)}$ & $(I-T_i)f^{(i)} - \sum_{M \ni i} \frac{1}{M} \sum_{j \in M} f_M^{(j)} $ \\
\hline
$=f_{\mathcal{I} \cap \mathcal{N}}$ & $=f_{\mathcal{B}}$ & $=f_{\mathcal{Z} \cap \mathcal{N}}$\\
\hline
\multicolumn{2}{c|}{ $= f_{\mathcal{I}+\mathcal{E}}$} & \\
\hline
& \multicolumn{2}{|c} {$= f_{\mathcal{Z}+\mathcal{E}}$} \\
\hline
\end{tabular}
\caption{\textbf{Summary of component games}. In the first row, $f^{(i)}$ is decomposed into three components,$f_{\mathcal{I} \cap \mathcal{N}}$, $f_{\mathcal{B}}$ and $f_{\mathcal{I} \cap \mathcal{N}}$. Then $f_{\mathcal{I}+ \mathcal{E}}$ is obtained by adding the first two components and $f_{\mathcal{Z}+\mathcal{E}}$ is obtained by adding the last two components. }\label{tab:component}
\end{table}
Decompositions in Theorem \ref{thm:main} are of great importance in practice since they provide algorithms to extract the component games from a given game. In other words, the component games can be interpreted as ``closest'' potential, zero-sum equivalent, zero-sum equivalent potential, identical interest normalized, and zero-sum normalized games to the original games.
Concretely, by applying projections, we obtain each component game as follows:
\[
f= \underbrace{\mathbf{SG}f}_{\in \mathcal{I} \cap \mathcal{N}} + \underbrace{(\mathbf{I-(V+P)})f}_{\in \mathcal{Z} \cap \mathcal{N}} + \underbrace{(\mathbf{V+P - SG}) f}_{\in \mathcal{B}}.
\]
Table \ref{tab:component} summarizes how to find each component game.
\begin{rem} \normalfont
In the special case of $2$-player games, $n=2$, we have the indentity
\[
\mathbf V + \mathbf P = \mathbf I - (\mathbf I-\mathbf S) \mathbf G
\]
and all subspaces can be described by using the projections $\mathbf{S, G}$, yielding much simpler characterizations for \textbf{D2} and \textbf{D3}:
\begin{align*}
\textbf{D2}' \quad & \mathcal{L} =
(\mathcal{I}+\mathcal{E})
\oplus
(\mathcal{Z} \cap \mathcal{N}) \,\,\,\,\,\,\,\,\,\,\,\,\,= K(\mathbf{(I-S)G}) \oplus R(\mathbf{(I-S)G})\\
\textbf{D3}' \quad & \mathcal{L} = (\mathcal{I}\cap\mathcal{N})
\oplus
(\mathcal{Z}\cap\mathcal{N}) \oplus \mathcal{B} = R(\mathbf{SG}) \oplus R(\mathbf{(I-S)G}) \oplus K(\mathbf{G}).
\end{align*}
$\square$
\end{rem}
\begin{rem} \normalfont
If we introduce the following scalar product of payoff functions
\begin{equation}\label{eq:semi-norm}
\left\langle f,g\right\rangle :=\sum_{i}\int f^{(i)}g^{(i)}dm,
\end{equation}
then the decompositions, $\textbf{D1}, \textbf{D2}$ and $\textbf{D3}$, become orthogonal. Orthogonal decompositions are useful in the sense that we can naturally characterize a class of games by examining their orthogonal complements. For example, the sufficiency
and necessity of the well-known Monderer and Shapley cycle condition for potential games (Theorem 2.8 in \citet{Monderer96}) can be proved by showing that this condition requires that potential games are orthogonal to all zero-sum normalized games (see Section \ref{subsec:finite} and \citet{HandR11}).
$\square$
\end{rem}
\tocless \section{Equilibrium properties of component games \label{sec:zero-eq}}
\tocless \subsection{Zero-sum equivalent games}
In this section, we discuss the equilibrium properties of component games in Definitions \ref{def:space} and \ref{def:pot-zero}. When we study Nash equilibria of finite games, we will consider both pure and mixed strategies. To this purpose, for finite games we let $\Delta_i \,=\, \{\sigma_i \in\mathbb{R}^{|S_i|}: \sum_{s_i \in S_i} \sigma_{i}(s_i)=1,\,\sigma_{i}(s_i) \geq 0\text{\,\,\ for all } s_i \}$ with $\sigma_i(s_i)$ being the probability that player $i$ uses strategy $s_i$. We also follow the usual convention of extending the domain of the payoff $f$ from $S$ to $\Delta = \prod_{i=1}^n
\Delta_i$ by defining
\begin{equation}
f^{(i)}(\sigma):=\sum_{s\in S} f^{(i)}(s) \prod_{k} \sigma_k(s_k). \label{eq:game}
\end{equation}
For continuous strategy games, we consider mainly the set of Nash equilibria in pure strategies (except for Proposition \ref{prop:norm-zero-ci}).\footnote{There are existing results for the sufficient conditions ensuring the existence of a pure strategy Nash equilibrium (\citet{Debreu52}, \citet{Glicksberg52}, \citet{Fan52}, \citet{Dasgupta86}, \citet{Reny03New} and \citet{Duggan07}). Rather than imposing specific conditions, we simply require that a continuous game possess a pure strategy Nash equilibrium if necessary.} Hereafter, for a continuous game we denote by $s$ a pure strategy profile, while for a finite game we denote by $s=\sigma$ a mixed strategy profile by abuse of notation.
To study the equilibrium properties of zero-sum equivalent games, for a given zero-sum equivalent game $f = w+h$ where $w \in \mathcal{Z}$ and $h \in \mathcal{E}$, we introduce
\begin{equation}\label{eq:phi}
\Phi_f(s):= \max_{t \in S} \sum_{i=1}^{n} w^{(i)}(t_{i}, s_{-i}) = \sum_{i=1}^{n}\max_{t_i \in S_i} w^{(i)}(t_{i}, s_{-i}).
\end{equation}
The function in \eqref{eq:phi} has been used by various authors to examine the existence of a Nash equilibrium for a game.\footnote{See \citet{Nikaido55}, \citet{Rosen65}, \citet{Bregman88}, \citet{Myerson97}, \citet{Barron08}, \citet{Cai15}} We will show that $\Phi_f$ provides some useful characterizations for the class of zero-sum equivalent games. Note that
\begin{equation} \label{eq:char}
\Phi_f(s^*) = \min_s \Phi_f(s)=0 \iff s^* \text{ is a Nash equilibrium for }f=w+h.
\end{equation}
Using \eqref{eq:char}, we will study the conditions under which a zero-sum equivalent game admits a unique Nash equilibrium or a convex set of Nash equilibria. To do this, we will show that the (strict) convexity of $w^{(i)}(s_i, s_{-i})$ in $s_{-i}$ for all $i$ implies the (strict) convexity of $\Phi_f(s)$ in \eqref{eq:phi}. This follows from the simple fact that the value function of a maximization problem (in \eqref{eq:phi}) is convex in a parameter if the objective function itself is convex in that parameter. The same principle yields the convexity of the profit function in the basic microeconomics context since the objective profit function is convex in prices. Then, since the set of optimizers of a (strictly) convex function is convex (singleton), the relationship in \eqref{eq:char} shows that a sufficient condition for the convexity or uniqueness of Nash equilibria is the convexity or strict convexity of $w^{(i)}(s_i, s_{-i})$ in $s_{-i}$ for all $i$.
\begin{prop}[Nash equilibria for zero-sum equivalent games]\label{prop:zero-convex}
Suppose that $f$ is a zero-sum equivalent game, where $f=w+h$; $w$ is a zero-sum game and $h$ is a non-strategic game. Suppose that $f$ has a Nash equilibrium. \\
(i) If $w^{(i)}(s_{i},s_{-i})$ is convex in $s_{-i}$
for all $s_{i}$ for all $i$, the set of Nash equilibria is convex.\\
(ii) If $w^{(i)}(s_{i},s_{-i})$ is strictly convex
in $s_{-i}$ for all $s_{i}$ for all $i$, there exists a unique Nash equilibrium
for $f$.\end{prop}
\begin{proof}
See Appendix \ref{proof:zero-convex}.
\end{proof}
\begin{table}
\centering\scalefont{0.8}
\begin{tabular}{c|c|c}
\hline
& Properties & Examples\tabularnewline
\hline
Zero-sum equivalent & Convexity/ uniqueness of NE & contest games \tabularnewline
games & under some conditions & quasi-Cournot games \tabularnewline
\hline
Zero-sum equivalent & Two-player games: dominant strategy NE & Prisoner's Dilemma \tabularnewline
potential games & & quasi-Cournot games\tabularnewline
\hline
Zero-sum normalized & Unique uniform mixed & Rock-Paper-Scissors games \tabularnewline
games & strategy NE &
Matching Pennies games \tabularnewline
\hline
Identical interest & Uniform mixed strategy NE &
Coordination games
\tabularnewline
normalized games & & \tabularnewline
\hline
\end{tabular}
\caption{\textbf{Summary of equilibrium characterizations for game.} In the table, (C) and (F) mean continuous strategy games and finite games, respectively.}
\label{tab:eq-char}
\end{table}
We next further explore the consequences of Proposition \ref{prop:zero-convex} for two-player finite games.
Though a class of two-player finite games, often called bi-matrix games, is one of the simplest classes, in general, it is acknowledged that even bi-matrix games are hard to solve \citep{Savani06}. We also focus on a class of non-degenerate games. There are several notions of non-degeneracy for finite games, depending on contexts and problems---such as equilibrium characterizations and
classifications of dynamics.\footnote{\citet{WuandJiang52} define an essential game---a game whose Nash equilibria all change only slightly against a smaller perturbation to the game and show that almost all finite games are essential; i.e., the set of all essential games is an open and dense subset of the space of
games. \citet{Wilson71} introduces a non-degeneracy assumption regarding payoff matrices (more
precisely tensors) and shows that almost all games have an odd (hence finite) number of Nash equilibria. In the
context of evolutionary game theory, \citet{Zeeman80} also defines a stable game whose dynamic remains
structurally unchanged against a small perturbation. } Since we wish to study the equilibrium properties of two-player finite games, we adopt the following non-degeneracy assumption introduced by \citet{Quint&Shubik97} (see also \citet{Shapley74}). Let $|supp(\sigma_i)|$ be the size of the support of a mixed strategy $\sigma_i=s_i$ for finite games.
\begin{con*}[\hypertarget{con-N}{\textbf{N}}] Suppose that $f$ is a two-player finite game. If $|supp(\sigma_1)| =k$, then there are no more than $k$ pure strategy best responses for player 2 against $\sigma_1$. Similarly, if $|supp(\sigma_2)|=k$, there are no more than $k$ pure strategy best responses for player 1 against $\sigma_2$.
\end{con*}
\noindent Then Lemma 2.2 in \citet{Quint&Shubik97} shows that a two-player finite game has a finite number of Nash equilibria under Condition \hyperlink{con-N}{\textbf{(N)}}. A straightforward consequence of Proposition \ref{prop:zero-convex} is that, generically, two-player finite zero-sum equivalent games have a \emph{unique} Nash equilibrium.
\begin{cor}[two-player finite zero-sum equivalent games]
\label{cor:2p-zero-finite}Suppose that $f$ is a two-player finite zero-sum
equivalent game. Then the set of Nash equilibria for $f$
is convex. If $f$ satisfies Condition \hyperlink{con-N}{\textbf{(N)}},
the Nash equilibrium is unique. \end{cor}
\begin{proof}
See Appendix \ref{appen:other-proofs}.
\end{proof}
\tocless \subsection{Zero-sum equivalent potential games and normalized games} \label{subsec:con-zero-equi}
We denote by $\zeta_{l} : S \rightarrow \mathbb R$ a function that does not depend on $s_{l}$, that is, $\zeta_l :=T_l g$ for some $g$ (recall $T_l$ is defined in \eqref{eq:def-t}). In Section \ref{sec:main-thm}, we show that the quasi-Cournot model is a potential game which is also strategically equivalent to a zero-sum game (see equations \eqref{eq:cournot-eq1}, \eqref{eq:cournot-eq2} and \eqref{eq:cournot-eq3}). Note that the payoff function in \eqref{eq:cournot-1} can be written as
\footnote{Indeed, we can choose
$\zeta_1 = -\beta s_2 s_3 + (\alpha - \beta s_3) s_3 - c_3(s_3)$, $\zeta_2=-\beta s_1 s_3 + (\alpha - \beta s_1) s_1 - c_1(s_1)$, $\zeta_3 = -\beta s_1 s_2 + (\alpha - \beta s_2) s_2 - c_2(s_2)$.
}
\begin{equation}\label{eq:rep}
(f^{(1)}, f^{(2)}, f^{(3)} ) \sim (\zeta_2+ \zeta_3, \zeta_1 + \zeta_3, \zeta_1 + \zeta_2 )
\end{equation}
From equation \eqref{eq:rep}, we find an identical interest game which is strategically equivalent to $f$ as follows:
\[
(f^{(1)}, f^{(2)}, f^{(3)} ) \sim (\zeta_1+\zeta_2+ \zeta_3, \zeta_1 + \zeta_2 +\zeta_3, \zeta_1 + \zeta_2+\zeta_3 )
\]
since $\zeta_l$ does not depend on $s_l$. Similarly, we can find a zero-sum game which is strategically equivalent to $f$:
\begin{align*}
(f^{(1)}, f^{(2)}, f^{(3)} ) & \sim (\zeta_2 + \zeta_3 - 2 \zeta_1, \zeta_1 + \zeta_3 - 2 \zeta_2, \zeta_1 + \zeta_2 - 2 \zeta_3 ) \\
& =(-\zeta_1 + \zeta_2, \zeta_1-\zeta_2, 0) + (-\zeta_1 + \zeta_3, 0, \zeta_1 - \zeta_3)+(0, -\zeta_2 + \zeta_3, \zeta_2 - \zeta_3).
\end{align*}
The following statement makes these observations more general and precise.
\begin{prop}[\textbf{$n$-player zero-sum potential equivalent games}]
\label{prop:n-p-pop} An $n$-player game with is a zero-sum equivalent potential game if and only if
\begin{align}
(f^{(1)},f^{(2)},\cdots,f^{(n)}) & \sim\sum_{l=1}^{n}(\zeta_{l},\zeta_{l},\cdots,\zeta_{l}) \label{eq:pot-zero1} \\
& \sim\sum_{i<j}(0,\cdots,0,\underbrace{-\zeta_{i}+\zeta_{j}}_{i-\text{th}},0,\cdots0,\underbrace{\zeta_{i}-\zeta_{j}}_{j-\text{th}},0,\cdots,0), \label{eq:pot-zero2}
\end{align}
where $\zeta_{l}(\cdot)$ does not depend on $s_{l}$.
\end{prop}
\begin{proof}
See Appendix \ref{proof:prop:n-p-pop}.
\end{proof}
\noindent A similar expression to \eqref{eq:pot-zero1} for potential functions is in \citet{Ui00} (see the potential function in Theorem 3 in the cited paper; Appendix D).
The immediate consequences of Proposition \ref{prop:n-p-pop} for two-player games are as follows.
\begin{cor}[\textbf{Two-player zero-sum equivalent potential games}]
\label{prop:2p-both} We have the following results: \\
(i) Consider a two-player zero-sum equivalent potential game with
\[
f=(f^{(1)},f^{(2)}) \sim \sum_{l=1}^2 (\zeta_l, \zeta_l).
\] If $ (s^*_1, s^*_2) \in (\arg \max_{s_1}\zeta_2(s_1), \arg \max_{s_2}\zeta_1(s_2))$ exists, then $(s^*_1, s^*_2)$ is a Nash equilibrium. \\
(ii) Suppose that a two-player finite zero-sum equivalent potential game satisfies Condition \hyperlink{con-N}{\textbf{(N)}}. Then the game has a strictly dominant
strategy Nash equilibrium.
\end{cor}
\begin{proof}
See Appendix \ref{proof:prop:n-p-pop}.
\end{proof}
\noindent Intuitively, when two players have both identical and conflicting interests, the strategic interdependence effects completely offset each other as in the Prisoner's Dilemma game.
Interestingly, \citet{Alger13}, in their study on preference evolution,
identify a set of games in which ``the right thing to do'', is simply to choose a strategy that maximizes one's own payoff (p.2281 in \citet{Alger13}), and games in this set are strategically equivalent to those games in Corollary \ref{prop:2p-both} (i). In this way, our decompositions
can also be used to provide helpful characterizations for a class of interesting
games in applications.
Finally, we show that every zero-sum normalized game and identical interest normalized game possess a uniform mixed strategy Nash equilibrium. When an identical interest game is normalized, this game is normalized with respect to all players' strategies and player $i$'s interim payoff becomes zero against all other players' uniform mixed strategies. Similarly, when a zero-sum game is normalized, one player's payoff can be expressed as the sum of all the other player's payoffs, each normalized with respect to her own strategy, and this again causes player $i$'s interim payoff to be zero.
\begin{prop}[\textbf{Zero-sum normalized games and identical interest normalized games}]
\label{prop:norm-zero-ci}Suppose that a game is a zero-sum normalized game or an identical interest normalized game. Then the uniform mixed strategy profile is always a Nash equilibrium.\end{prop}
\begin{proof}
See Appendix \ref{proof:norm-zero-ci}.
\end{proof}
\tocless \section{Applications \label{sec:app}}
\tocless \subsection{Two-player finite strategy games \label{subsec:finite}}
In this section, we present applications of the decomposition results in Section \ref{sec:main-thm} and equilibrium characterizations in Section \ref{sec:zero-eq}. We first show that a two-player finite game can be uniquely decomposed into component games with distinctive equilibrium properties. Then we show that the total number of Nash equilibria for a given game, depending on some conditions in terms of its decomposition component games, can be maximal or minimal.\footnote{For the maximum number of Nash equilibria of finite games, see \citet{Quint&Shubik97, Quint&Shubik02}, \citet{Savani06}, \citet{MacLennan&Park99}.}
The following statement shows that a given game can be decomposed into three components: the first one with pure strategy Nash equilibria, the second one with a unique uniform mixed Nash equilibrium and the third one with a dominant Nash equilibrium (see Figure \ref{fig:dep-eq} for an illustration of Theorem \ref{thm:equi-decomp}).
\begin{thm}[Two-player finite strategy games; Nash equilibria]\label{thm:equi-decomp}
\begin{figure}
\caption{\textbf{Decomposition of a game into components with distinctive Nash equilibria.}
\label{fig:dep-eq}
\end{figure}
Suppose that $f$ is a two-player finite strategy game. Then, $f$
can be uniquely decomposed into three components:
\[
f= f_{\mathcal{I}\cap \mathcal{N}}+f_{\mathcal{Z} \cap \mathcal{N}}+f_{\mathcal{B}}
\]
where $f_{\mathcal{I}\cap \mathcal{N}}$ is an identical interest normalized game, $f_{\mathcal{Z} \cap \mathcal{N}}$ is a zero-sum normalized game and $f_{\mathcal{B}}$ is a zero-sum equivalent potential game.
Suppose that all three component games satisfy Condition \hyperlink{con-N}{\textbf{(N)}}. Then, $f_{\mathcal{I}\cap \mathcal{N}}$ has a finite number of Nash equilibria with a uniform mixed strategy, $f_{\mathcal{Z} \cap \mathcal{N}}$ has a unique uniform mixed strategy Nash equilibrium and $f_{\mathcal{B}}$ a the strictly dominant strategy Nash equilibrium.\end{thm}
\begin{proof}
This follows from the decomposition theorem, Theorem \ref{thm:main}, Corollary \ref{cor:2p-zero-finite}, Corollary \ref{prop:2p-both} and Proposition \ref{prop:norm-zero-ci}.
\end{proof}
\begin{figure}\label{fig:perturb}
\end{figure}
The bottom line of Figure \ref{fig:dep-eq} presents the decomposition of the symmetric game in Table \ref{tab:1}. In the middle line of Figure \ref{fig:dep-eq}, we show the Nash equilibria of the original game and the component games in the simplexes. In the top line we show the potential function for the identical interest normalized game in $\mathcal{I} \cap \mathcal{N}$, the function $\Phi$ for the zero-sum normalized game in $\mathcal{Z} \cap \mathcal{N}$ and the function $\Phi$ for the zero-sum equivalent potential game in $\mathcal{B}$. The decomposition of the game illustrates how each of the Nash equilibria of the original game is related to the Nash equilibria of component games. For example, the existence of the completely mixed strategy Nash equilibrium for the original game is related to the existence of the zero-sum normalized or identical interest games. Similarly, the existence of the pure strategy Nash equilibria, $(0,1,0)$ and $(0,0,1)$, is due to the existence of the identical interest normalized component.
To illustrate this relationship more explicitly, we present Figure \ref{fig:perturb} in which Nash equilibria are computed under various values of $\zeta$ and $\delta$ for a symmetric game, defined by
\begin{equation} \label{eq:pertub}
\begin{pmatrix}
2 & -1 & -1 \\
-1 & 2 & -1 \\
-1 & -1 & 2
\end{pmatrix}
+ \zeta \begin{pmatrix}
0 & -1 & 1 \\
1 & 0 & -1 \\
-1 & 1 & 0
\end{pmatrix}
+ \delta \begin{pmatrix}
1 & 1 & 1 \\
0 & 0 & 0 \\
0 & 0 & 0
\end{pmatrix}.
\end{equation}
Here, a two-player symmetric game $f=(f^{(1)},f^{(2)})$ satisfies $f^{(1)}(x,y) = f^{(2)}(y,x)$ and a matrix can specify a bi-matrix game in Figure \ref{fig:dep-eq}. Note that when $\zeta=1$ and $\delta=1$, the game in \eqref{eq:pertub} becomes the finite game presented in Figure \ref{fig:dep-eq}. Figure \ref{fig:perturb} shows that when a game is close to the identical interest normalized game, equilibrium properties---the numbers of pure strategy and mixed strategy Nash equilibria---of this game are the same as the identical interest normalized game (Region I). Also, when the effect of the zero-sum normalized game, $\zeta$, is sufficiently strong, the corresponding game admits a unique uniform mixed strategy (the property of zero-sum normalized games; Region VI). Similarly, in the case where the effect of a zero-sum equivalent potential game is prevalent, the corresponding game has a unique dominant strategy (the property of zero-sum equivalent potential games; Region V).
Next, motivated by Figure \ref{fig:perturb}, we establish a more precise relationship between Nash equilibria of a given game and those of its component games. If we consider two-player symmetric games with $l$ strategies, then a game can be succinctly identified with an $l \times l$ matrix as in \eqref{eq:pertub}. Then we can explicitly find basis games for subspaces of $\mathcal{I} \cap \mathcal{N}$, $\mathcal{Z} \cap \mathcal{N}$ and $\mathcal{B}$ as follows.
We first define games $\{ S^{(ij)} \}_{ij}$ for identical interest normalized games and games $\{ Z^{(ij)} \}_{ij}$ for zero-sum normalized games:
\[
S^{(ij)}_{k k'} =
\begin{cases}
1 & \text{ if } (k,k')=(i,i) \text{ or } (j,j)\\
-1 & \text{ if } (k,k')=(i,j)\text{ or } (j,i) \\
0 & \text{ otherwise},
\end{cases}
\qquad
Z^{(ij)}_{k k'} = \begin{cases}
-1 & \mbox{if } (k,k')=(1,i), (i,j),\text{ or } (j,1) \\
1 & \mbox{if } (k,k')=(1,j), (i,1),\text{ or } (j,i) \\
0 & \mbox{otherwise}.
\end{cases}
\]
Note that $Z^{(ij)}$ is a Rock-Paper-Scissor game involving strategies 1, $i$ and $j$ (see Table \ref{tab:basis}) and, as mentioned, the condition for a two player symmetric game to be a potential game can be obtained by the requirement that a game is orthogonal to all $Z^{(ij)}$'s (see \citet{HandR11}).
We also define the games, $D^{(i)}$ and $E^{(i)}$, for $\mathcal{B}$:
\[
D^{(i)}_{kk'} = \begin{cases}
1, & \mbox{if } k=i \\
0, & \mbox{otherwise}
\end{cases}
,\qquad
E^{(i)}_{kk'} = \begin{cases}
1, & \mbox{if } k'=i \\
0, & \mbox{otherwise}.
\end{cases}
\]
Then, we obtain the following bases for each subspace.
\begin{lem} \label{lem:basis}
We have the following results:\\
(i) The set of games $\{S^{(ij)}\}_{i=1,\cdots, l, j>i}$ forms a basis for $\mathcal{I} \cap \mathcal{N}$ \\
(ii) The set of games $\{Z^{(ij)}\}_{i=2,\cdots, l, j>i}$ forms a basis for $\mathcal{Z} \cap \mathcal{N}$ \\
(iii) The set of games $\{D^{(i)}\}_{i=1,\cdots, l-1}, \{E^{(i)}\}_{i=1,\cdots, l}$ forms a basis for $\mathcal{B}$
\end{lem}
\begin{proof}
See Lemma \ref{appen:lem-c1}.
\end{proof}
The proof of Lemma \ref{lem:basis} involves counting the dimension of each subspace and checking whether basis games are independent. Thus, for a given $G$, by our decomposition results, there exists a unique set of coefficients $\{\gamma_{ij} \}, \{\zeta_{ij}\}, \{\delta_i \}$ and $\{\eta_i\}$ such that
\begin{equation}\label{eq:decomp-rep}
G= \underbrace{\sum_{i=1}^l \sum_{j=i+1}^l \gamma_{ij} S^{(ij)} }_{=: S}+ \underbrace{\sum_{i=2}^l \sum_{j=i+1}^l \zeta_{ij} Z^{(ij)}}_{=: Z} + \underbrace{\sum_{i=1}^{l-1}\delta_{i} D^{(i)}}_{=: D}+ \sum_{i=1}^{l}\eta_{i} E^{(i)}
\end{equation}
and $G$ is strategically equivalent to $S+Z+D$: $G \sim S + Z + D$.
Denote by $\#(G)$, the number of Nash equilibria of $G$. First note that if $G$ is a symmetric game, then $\#(G) \leq 2^l-1$ (See Lemma 2 (d) and the Theorem in Quint \& Shubik (2002)).
Let
\[
\underline \gamma := \min_k \min_{j \neq k} \gamma_{kj}, \,\,\, \bar \delta = \max_i \delta_i , \,\,\, \bar \zeta = \frac{(l-1)(l-2)}{2} \max_{j>i} |\zeta_{ij}|
\]
The following proposition identifies conditions under which the equilibrium property of an identical interest normalized component game determines the equilibrium property of the original game.
\begin{prop} \label{prop:number}
Consider the decomposition in \eqref{eq:decomp-rep}. \\
(i) Suppose that $\gamma_{ij}<0$ for all $i,j$. Then $\#(G) = 1$. \com{In addition, if $D=\mathbf{0}$, then the uniform mixed strategy is a unique NE.} \\
(ii) Suppose that $\gamma_{ij}>0$ for all $i,j$. Suppose that $\delta_i\geq 0$ for all $i$ and $\underline \gamma > \bar \delta + \bar \zeta$. Then $\#(G) = 2^l-1$.
\end{prop}
\begin{proof}
See Proposition \ref{appen:prop:app1}.
\end{proof}
The result (i) in Proposition \ref{prop:number} is partially known in the literature. \citet{Hofbauer09} define a class of games, strictly stable games, and show that a strictly stable game possesses a unique mixed strategy Nash equilibrium (For the definition of strictly stable games, see Appendix \ref{appen:app}.). In addition, Proposition \ref{prop:number} (i) shows that a sufficient condition for the strict stability of a game is given by the negative coefficients assigned to identical interest normalized basis games in decomposition. Proposition \ref{prop:number} (ii) also identifies the conditions under which the equilibrium properties of identical interest normalized component game determine equilibrium property of the original game. \citet{Kandori98} shows that a class of coordination games---games satisfying the total bandwagon property---possesses $2^l-1$ number of Nash equilibria. Proposition \ref{prop:number} (ii) shows that (1) the identical interest normalized games with the positive coefficients assigned to basis games satisfy the total bandwagon property and (2) a game sufficiently close to an identical interest normalized game also satisfies the total bandwagon property. In other words, Proposition \ref{prop:number} (ii) shows how decomposition can be used to find a class of coordination games which satisfies the total bandwagon property.
\com{Finally, we point out that the coefficients, $\gamma_{ij}$, $\zeta_{ij}$, are easily computed by the orthogonal projections in \eqref{eq:orth-proj}, using the relationship
\[
(\mathcal{ G S} G)_{ij} = \gamma_{ij},\,\, (\mathcal{ G A} G)_{ij} = - \zeta_{ij}
\]
for $i \neq j$ (see Appendix OOO).
}
\tocless \subsection{Contest games}
In this section, we study a class of games, called contest games, which include Tullok contests and all-pay auctions as special cases \citep{Konrad07}. A contest game is an $n$-player game in which payoff functions are given by
\begin{equation}
f^{(i)}(s)=p^{(i)}(s_{i},s_{-i})v-c_{i}(s_{i})\,\,\text{for \ensuremath{i=1,\cdots,n}},\label{eq:contest-model}
\end{equation}
where $\sum_{i}p^{(i)}(s)=1$ and $p^{(i)}(s)\ge0$ for all $s \geq 0$,
$v>0$, $c_{i}(0)=0$ and $c_{i}(\cdot)$ is continuous, increasing and convex. Then, it is easy to verify that a contest game is a zero-sum equivalent game, since
\begin{equation}\label{eq:w-fun}
f^{(i)} \sim (p^{(i)}(s_{i},s_{-i})-\frac{1}{n})v-\frac{1}{n-1}\sum_{j\ne i}(c_{i}(s_{i})-c_{j}(s_{j})) = : w^{(i)}(s_i, s_{-i}).
\end{equation}
Thus, Proposition \ref{prop:zero-convex} may be applicable for equilibrium analysis.
The existing literature extensively studies the uniqueness of Nash equilibria under specific assumptions about $p^{(i)}$ (e.g., \citet{HillmanRiley89}, \citet{SO-contest97}). When $p^{(i)}(s)$ is given by
\begin{equation}\label{eq:r-s-games}
p^{(i)}(s) = \begin{cases}
\frac{s_i}{\sum_l s_l}, & \mbox{if } s_l > 0 \text{ for some } l \\
\frac{1}{n}, & \mbox{if } s_l = 0 \text{ for all } l,
\end{cases}
\end{equation}
the contest game is called a rent-seeking game. Using Proposition \ref{prop:zero-convex}, we present a completely different way of showing uniqueness of Nash equilibria for rent-seeking games from the existing literature (\citet{SO-contest97}, \citet{Cornes05}). While the existing literature shows the uniqueness of Nash equilibria by using the property of aggregative games, we establish the uniqueness by using the strategic equivalence of the rent-seeking game to a zero-sum game ($w$) and the convexity of $w$. Thus, our approach shows that recognizing zero-sum equivalent games via decomposition facilitates equilibrium analysis.
Though we adopt a rather simplifying assumption that $c_i(s_i)$ is linear, i.e.,
\[
c_i(s_i) = c_i s_i,
\]
where $c_i >0$ for all $i$, we believe that our method can be extended to the case where $c_i(\cdot)$ is convex (or non-linear) or to other classes of zero-sum equivalent games. We also set $v=1$ for simplicity.
The idea of showing the uniqueness of Nash equilibria is as follows. First, we show that $w^{(i)}(s_i, s_{-i})$ in \eqref{eq:w-fun} is convex in $s_{-i}$ and, thus, the set of Nash equilibria is convex (Lemma \ref{lem:contest1}). Second, we show that when we study the Nash equilibrium of the contest games using the $\Phi_f$ function, it is enough to examine the $\Phi_f$ function defined over the set of players whose action levels are strictly positive, namely the active player set, denoted by $P$, where $P \subset \{1,\cdots, n \}$ and $|P| \geq 2$ (Lemma \ref{lem:contest2}). Then we show that the $\Phi_f$ function defined over $P$ is strictly convex (Lemma \ref{lem:contest3}). This implies that each $P$ set admits at most one Nash equilibrium (Lemma \ref{lem:contest4}). That is, if we define $S(P)$
\[
S(P):= \{ (s_1, \cdots, s_n) : s_i >0 \text{ for } i \in P \text{ and } s_j =0 \text{ for } j \not \in P \},
\]
then each $S(P)$ contains at most one Nash equilibrium. Finally, if there exist two different Nash equilibria, $s^*$ and $t^*$, such that $s^* \in S(P)$ and $t^* \in S(P')$ and $P \neq P'$, then the convexity of Nash equilibria implies that there are infinitely many Nash equilibria, which contradicts the fact that each distinctive $P$ can admit at most one Nash equilibrium. Thus, there exists a unique Nash equilibrium for rent-seeking games defined by \eqref{eq:r-s-games}(Proposition \ref{prop:contest}). We provide all detailed steps and lemmas in Appendix \ref{appen:app}.
\begin{prop} \label{prop:contest}
The Nash equilibrium for a rent-seeking game defined in \eqref{eq:r-s-games} is unique.
\end{prop}
\begin{proof}
See Proposition \ref{appen:prop-app2}.
\end{proof}
\tocless \section{Conclusion \label{sec:con}}
In this study, we developed decomposition methods for classes
of games such as zero-sum equivalent games, zero-sum equivalent potential games, zero-sum normalized games, and identical interest normalized games. Our methods rely on the properties of commuting projections in the vector space of games, and identifications of subspaces of games by the ranges and kernels of these projections. The identifications are based on the characterizations for various classes of games.
Next, we showed that two-player finite zero-sum equivalent games have a unique Nash equilibrium. We then studied the class of zero-sum equivalent potential games and showed that two-player finite zero-sum equivalent potential games have generically a unique strictly dominant Nash equilibrium. We also showed that identical interest normalized games and zero-sum normalized games have a uniform mixed strategy Nash equilibrium. Based on these, we provide two specific applications. In the first application, we demonstrate that decomposition can single out the effect of component games on the Nash equilibrium of the original game. In the second application, the uniqueness of Nash equilibria for rent-seeking games is shown based on the special property of zero-sum equivalent games.
\appendix
\section*{\large{Appendix: for publication}}
\com{
\tableofcontents
\com{
\noindent\textbf{\Large{}Appendix}{\Large \par}
}
}
\renewcommand{D}{A}
\section{Decomposition results\label{appen:decomp_games} }
\subsection{Decomposition via projections\label{appen:projection_games} }
Let $V$ be a vector space. We say that $V$ is the direct sum of $V_1$ and $V_2$ and write $V=V_1 \oplus V_2$ if any $x \in V$ can be written uniquely as $x=x_1+x_2$ with $x_i\in V_i$, $i=1,2$.
Recall that a linear map $P:V \rightarrow V$ is a projection if $P^2 = P$. Then $I-P$ is also a projection and from writing $x =Px + (I- P)x$ we obtain the direct sum decomposition:
\begin{equation}\label{eq:direct-sum}
V=R(P) \oplus K(P) = R(P) \oplus R(I-P) = K(I-P) \oplus K(P)\,,
\end{equation}
where $R(P)$ and $K(P)$ are the range and kernel of the map $P$, respectively.
Note the following elementary properties for projections.
\begin{lem} \label{lem:vec-proj} Let $P_1$ and $P_2$ be two commuting projections on the vector space $V$. \\
(i) Then $P_1P_2$ is a projection
and
\begin{eqnarray}
R(P_1 P_2) &=& R(P_1) \cap R(P_2)\,, \label{P12R} \\
K(P_1 P_2) &=& K(P_1) + K(P_2) \,. \label{P12K}
\end{eqnarray}
(ii) $P_1 + P_2$ is a projection if and only if $P_1P_2=0$, in which case we have
\begin{eqnarray}
R(P_1+P_2)=R(P_1) \oplus R(P_2) \label{P1+2R} \,, \\
K(P_1+P_2)=K(P_1)\cap K(P_2) \,. \label{P1+2K}
\end{eqnarray}
\end{lem}
\begin{proof}
Some of results are presented, for example, in Chapter 9 in \citet{Kreyszig89}. For completeness, we present the complete proof of all statements. \\
(i) We have $(P_1 P_2)^2=P_1^2 P_2^2 =P_1 P_2$ so $P_1P_2$ is a projection.
\noindent
To prove \eqref{P12R} note that if $x \in R(P_1P_2)$ then $x = P_1P_2y = P_2P_1 y$ and thus $x \in R(P_1)\cap R(P_2)$. Conversely if $x \in R(P_1)\ \cap R(P_2)$ then $x = P_1 y =P_2z$ and thus $x =P_1y =
P_1 P_2y + P_1(I-P_2)y = P_1P_2 y + (I-P_2)P_2 z = P_1P_2y$ and thus $x \in R(P_1P_2)$.
\noindent
To prove \eqref{P12K} note that if $x \in K(P_1P_2)$ then $P_1 P_2x=0$ and then $x = P_2x +(I-P_2)x \in K(P_1) + K(P_2)$ since $P_1 P_2x=0$ and $P_2(I-P_2)x=0$.
Conversely if $x \in K(P_1) + K(P_2)$ then $x = y+z$ with $P_1y=P_2z=0$
and thus $P_1 P_2x = P_2 P_1 y + P_1 P_2 z=0$.
\noindent
(ii) Since $(P_1+P_2)^2 = P_1 + P_2 + 2P_1P_2$, $P_1+P_2$ is a projection if and only if $P_1 P_2=0$. The statements in (ii) by applying (i) to the projections $I-P_1$ and $I-P_2$ since $P_1P_2=0$ implies that $(I-P_1)(I-P_2)= I-(P_1+P_2)$. More specifically, to prove \eqref{P1+2R} we use \eqref{P12K} to obtain
\begin{eqnarray}
R(P_1+P_2)&=&K\left( (1-P_1)(I-P_2)\right)
= K( I-P_1)+ K( I-P_2 )
\nonumber \\
&=& R(P_1)+ R(P_2) = R(P_1) \oplus R(P_2) \,,
\end{eqnarray}
where in the last equality we used that by \eqref{P12R} $R(P_1)\cap R(P_2)=R(P_1P_2)= \{0\}$. To prove \eqref{P1+2K} we use \eqref{P12R} and obtain
\begin{eqnarray}
K(P_1+P_2)&=& R\left( (1-P_1)(I-P_2)\right)=R(I-P_1) \cap R(I-P_2) \nonumber \\
&=& K( P_1) \cap K( P_2 ) \,.
\end{eqnarray}
\end{proof}
From Lemma \ref{lem:vec-proj} we obtain the following vector decompositions.
\begin{prop} \label{appen:vec-decomp}
Let $P_1$ and $P_2$ be commuting projections such that $P_1(I- P_2) =0$.
Then we have
\begin{align}
\textbf{D1} \quad V = & R(P_1) \oplus K(P_1) \label{eq:vec-d1}\,,\\
\textbf{D2} \quad V = & R(P_2) \oplus K(P_2) \label{eq:vec-d2} \,,\\
\textbf{D3} \quad V = & R(P_1) \oplus K(P_2) \oplus (K(P_1) \cap R(P_2)) \label{eq:vec-d3}\,.
\end{align}
\end{prop}
\begin{proof}
The decompositions \eqref{eq:vec-d1} and \eqref{eq:vec-d1} are immediate from \eqref{eq:direct-sum}. For \eqref{eq:vec-d3} note that $P_1$ commute with $I-P_2$ and thus by Lemma \ref{lem:vec-proj} $P_1 + I-P_2$ is a projection. From \eqref{P1+2R} and \eqref{P1+2K} we obtain
\begin{align*}
& K(P_1+ I - P_2) = K(P_1) \cap K(I-P_2) = K(P_1) \cap R(P_2)\\
& R(P_1 + I - P_2) = R(P_1) \oplus R(I-P_2) = R(P_1) \oplus K(P_2)
\end{align*}
from Lemma \ref{lem:vec-proj} (ii) and thus \eqref{eq:vec-d3} by \eqref{eq:direct-sum}.
\end{proof}
\subsection{Game-Theoretic Applications}
Let $f:=(f^{(1)}, \cdots, f^{(n)}): S \rightarrow \mathbb{R}^n$
where $f^{(i)}:S \rightarrow \mathbb{R}$ for $i=1,\cdots,n$.
We let
\begin{equation}
\left\Vert f^{(i)} \right\Vert_1 :=\int\left\vert f^{(i)}\right\vert dm, \,\,\, \quad
\left\Vert f\right\Vert :=\sum_{i=1}^{n}\int\left\vert f^{(i)}\right\vert dm\,. \,\,\,
\end{equation}
We let $L(S,\mathbb{R};m)=\left\{ u:S\to\mathbb{R}\,;\,u\mbox{ is measurable and }\|u\|_1 < \infty\right\}$
and consider the space of games payoffs given by the following vector space:
\begin{align*}
\mathcal{L}:= & L(S,\mathbb{R}^{n};m)=\left\{ f:S\to\mathbb{R}^{n}\,;\,f\mbox{ is measurable and }\|f\| < \infty\right\} \,.
\end{align*}
Note that $f\in\mathcal{L}$ if and only if $f^{(i)}\in L(S,\mathbb{R};m)$, for each $i=1,2,\cdots,n$.
Recall the operator $T_i u= \frac{1}{|S_i|}\int_{S_i} u(s) \, dm_i(s_i)$ given in equation \eqref{eq:def-t}.
\begin{lem} \label{lem:ti}
We have the following results: \\
(i) The operators $T_{i}$ are projections on $L(S,\mathbb{R};m)$. \\
(ii) The projections $T_{i}$ and $T_{j}$ commute for any $i,j$ and any product of the form $T_{i_{1}} \cdots T_{i_{k}} (I-T_{j_1}) \cdots (I-T_{j_l})$
is a projection on $L(S,\mathbb{R};m)$.
\end{lem}
\begin{proof}
(i) If $u \in L(S, \mathbb{R}, m)$ then by Fubini theorem $u(s)=u(s_i, s_{-i})$ is integrable with respect to $m_i(s_i)$ (for almost every $s_{-i}$) so that $T_i u$ is well defined and by Fubini theorem again, $T_i u$ is integrable with respect to $\prod_{l \not =i} m_l(s_l)$. Since $T_i u$ does not depend on $s_i$, $T_i u$ is integrable with respect to $m_i$ and we have $T_i^2 u = T_i u$ and we have $|T_iu| \le T_i|u|$. By Fubini theorem again
\[
\|T_iu\|_1=\int |T_i u| dm = \int |T_i u| dm_i(s_i) \prod_{l \not =i} dm_i(s_i) \le \int T_i |u| dm_i(s_i) \prod_{l \not =i} dm_i(s_i) \,=\, \|u\|_1\,.
\]
and thus, $T_i$ is bounded.
(ii) Projections $T_i$ and $T_j$ commute by Fubini theorem and thus by Lemma \ref{lem:vec-proj} any product of $T_i$'s and $I-T_j$'s is again a projection.
\end{proof}
In the next lemma we prove the basic properties of the projections introduced in Section 2, see the operators $\mathbf{S}$ and $\mathbf{P}$
defined in \eqref{eq:symm_opt}, $\mathbf{G}$ defined in \eqref{eq:orth-proj} and $\mathbf{V}$ defined in \eqref{eq:v-proj}. Also recall the convenient notation from \eqref{eq:u_M}: for any $M \subset N \,=\,\{1,\cdots,n \}$ we set $u_M = \prod_{l \not \in M}T_l \prod_{k \in M}(I-T_k)u$. By Lemma \ref{lem:ti} the map $u \mapsto u_M$ is a projection.
\begin{lem} \label{lem:proj-space}
We have the following results: \\
(i) $\mathbf{S}$, $\mathbf{P}$, and $\mathbf{G}$ are projections. \\
(ii) $\mathbf{S}$ and $\mathbf{G}$ commute and hence $\mathbf{SG}$ is a projection. \\
(iii) $\mathbf{V}$ is a projection. \\
(iv) $\mathbf{V}$ and $\mathbf{P}$ commute and $\mathbf{VP}=0$, hence $\mathbf{V+P}$ is a projection. \\
(v) $\mathbf{SG}$ and $\mathbf{I-V-P}$ commute and $\mathbf{SG} (\mathbf{I-V-P})=0$
\end{lem}
\begin{proof}
(i) It is easy to check that $\mathbf{S}$ is a projection and for $\mathbf{P}$ and $\mathbf{G}$ this follows from Lemma \ref{lem:ti} since $(\mathbf{P}f)^{(i)}= T_if^{(i)}$ $(\mathbf{G}f)^{(i)}=f_N^{(i)}$ \\
(ii) We have
\[
(\mathbf{SG} f)^{(i)}= \frac{1}{n} \sum_{j=1}^{n} \prod_{l=1}^{n} (I-T_l)f^{(j)} =\prod_{l=1}^{n} (I-T_l)\frac{1}{n} \sum_{j=1}^{n} f^{(j)} =(\mathbf{GS}f)^{(i)}\,.
\]
(iii) First observe that
\begin{equation}\label{eq:con}
(u_M)_{M'} = \begin{cases}
0, & \mbox{if } M \neq M' \\
u_M, & \mbox{if } M = M'
\end{cases} \,.
\end{equation}
Equation \eqref{eq:con} implies that
if
\[
(h^{(1)}, \cdots, h^{(n)}) = \mathbf{V} f = (\sum_{M \ni 1} \frac{1}{|M|} \sum_{l \in M} f_M^{(l)}, \cdots, \sum_{M \ni n} \frac{1}{|M|} \sum_{l \in M} f_M^{(l)})
\]
then for any $M$ and $i,j \in M$ we have $h^{(i)}_M = h^{(j)}_M = \frac{1}{|M|}\sum_{l \in M} f^{(l)}_M$ which does not depend on $i,j$. Therefore
\[
\frac{1}{|M|} \sum_{ l \in M} h^{(l)}_M = \frac{1}{|M|}\sum_{l \in M} f^{(l)}_M \,.
\]
This implies that $\mathbf{V}^2=\mathbf{V}$ and thus $\mathbf V$ is a projection. \\
(iv) Note that if $i \in M$ we have
\[ T_i u_M = T_i \prod_{l \notin M} T_l \prod_{k \in M} (I-T_k) u =
\prod_{l \notin M} T_l \prod_{k \in M} (I-T_k) (T_i u) = (T_i u)_M =0
\]
where the last inequality follows from $\displaystyle T_i \prod_{k \in M} (I-T_k) =
T_i(I-T_i) \prod_{\substack {k \in M \\ k \not=i}} (I-T_k)=0$.
Therefore
\begin{align}
(\mathbf{PV} f)^{(i)} & = T_i (\mathbf{V}f)^{(i)} = T_i \sum_{M \ni i} \frac{1}{|M|} \sum_{j \in M} f_M^{(j)} =0 \\
(\mathbf{VP}f)^{(i)}&= \sum_{M \ni i} \frac{1}{|M|} \sum_{j \in M} (T_jf^{(j)})_M = 0
\end{align}
Thus $\mathbf{PV}=\mathbf{VP}=0$ and by Lemma \ref{lem:vec-proj} $\mathbf{V} + \mathbf{P}$ is a projection.
\\
(v) Note first that the fact $\mathbf{SGP} = \mathbf{PSG}=0$ is proved exactly as in (iv) since $(\mathbf{SG}f)^{(i)} = \frac{1}{|N|}\sum_{j \in N} f^{(j)}_N$.
Moreover, if we let
\[
(\mathbf{D} f)^{(i)}= \sum_{\substack{M \ni i \\ M \neq N} } \frac{1}{|M|} \sum_{j \in M} f_M^{(j)}
\]
we have $\mathbf{V} = \mathbf{SG} + \mathbf{D}$. Therefore we have
\begin{equation}\label{eq:sgd}
\mathbf{SG}( \mathbf{I} - \mathbf{V} + \mathbf{P})= \mathbf{SG} \mathbf{D} \quad \textrm{ and }
( \mathbf{I} - \mathbf{V} + \mathbf{P})\mathbf{SG} =\mathbf{D} \mathbf{SG}
\end{equation}
To conclude, recall that $(u_M)_{M'}=0$ if $M \not= M'$ in \eqref{eq:con}. From the
definition of $\mathbf{D}$ we note that $((\mathbf{D}f)^{(i)})_N=0$ and thus
\[
(\mathbf{SG} \mathbf{D} f)^{(i)} = \frac{1}{|N|} \sum_{j \in N} ((\mathbf{D}f)^{(j)})_N \,=\,0\,.
\]
On the other hand we have $((\mathbf{SG}f)^{(i)})_M =0$ if $M \not= N$
and thus
\[
( \mathbf{D} \mathbf{SG} f)^{(i)} \,=\, \sum_{\substack{M \ni i \\ M \neq N} } \frac{1}{|M|} \sum_{j \in M} ((\mathbf{SG}f)^{(i)})_M =0
\]
This proves that $\mathbf{SG} \mathbf{D}= \mathbf{D}\mathbf{SG} =0$ and therefore by \eqref{eq:sgd} (v) holds.
\end{proof}
\noindent \textbf{Proof of Theorem \ref{thm:main}.}
We let
\[
P_1 = \mathbf{SG}, \,\,\, P_2 = \mathbf{V + P}
\]
in \eqref{eq:vec-d1}, \eqref{eq:vec-d2}, and \eqref{eq:vec-d3}. From Lemma \ref{lem:proj-space} and Proposition \ref{appen:vec-decomp}, we have the following decomposition:
\begin{alignat}{2}
\textbf{D1} \quad \mathcal{L} = & R(\mathbf{SG}) \oplus K(\mathbf{SG}) \label{eq:d1} \\
\textbf{D2} \quad \mathcal{L} = & R(\mathbf{V+P}) \oplus K(\mathbf{V+P}) \label{eq:d2} \\
\textbf{D3} \quad \mathcal{L} = & R(\mathbf{SG}) \oplus K(\mathbf{V+P}) \oplus (K(\mathbf{SG}) \cap R(\mathbf{V+P})) \label{eq:d3}
\end{alignat}
Then from Propositions \ref{appen-prop:iinorm}, \ref{prop:zero-equiv}, \ref{appen-prop:pot-char}, and \ref{prop:zero-norm}, proven below we obtain the decompositions in Theorem \ref{thm:main}
$\square$.
Note that we also have
\begin{equation}\label{eq:char-b}
K(\mathbf{SG}) \cap R(\mathbf{V+P})= K(\mathbf{SG}) \cap K(\mathbf{I-(V+P)})=K(\mathbf{I-(D+P)})= R(\mathbf{D+P}).
\end{equation}
Next, we will show that each of these ranges and kernels in \eqref{eq:d1}, \eqref{eq:d2}, and \eqref{eq:d3} are indeed equivalent to the corresponding subspaces presented in Theorem \ref{thm:main}.
\begin{prop}[\textbf{Identical interest normalized games}] \label{appen-prop:iinorm}
We have
\[
R(\mathbf{SG}) = \mathcal{I} \cap \mathcal{N}.
\]
\end{prop}
\begin{proof}
Let $f \in R(\mathbf{SG})$. Then $f = \mathbf{SG}f=\mathbf{GS} f$. Thus $f \in \mathcal{I}\cap \mathcal{N}$. Suppose that $f \in \mathcal{I}\cap \mathcal{N}$. Then $f=(h,\cdots, h)$ for some scalar valued function $h$ such that
\[
\int_{S_i} h(s) d m_i(s_i) =0 \,\,\text{ for all } i.
\]
Thus, $ \mathbf{S} f = f$. Also since $(I-{T}_i) h = h - \int_i h dm_i = h $ for all $i$, we have $\mathbf{G} f= f$. Thus $\mathbf{SG} f = \mathbf{G} f = f$ and we have $f \in R(\mathbf{SG})$. \end{proof}
\begin{prop}[\textbf{Zero-sum equivalent games}] \label{prop:zero-equiv}
We have
\[
K(\mathbf{SG}) = \mathcal{Z}+\mathcal{E}
\]
\end{prop}
\begin{proof}
We first have the following equivalence: $ f \in K(\mathbf{SG}) \iff \mathbf{SG} f =0$. Thus we will show that $ \mathbf{SG} f =0 \iff f \in \mathcal{Z} + \mathcal{E}$ or equivalently,
\[
\sum_{i=1}^{n}f^{(i)}\in K(\prod_{l=1}^{n}(I-T_{l})) \iff f\in\mathcal{Z}+\mathcal{E}
\]
\noindent $(\impliedby)$ If $f\in\mathcal{Z}+\mathcal{E}$,
then $f^{(i)}=g^{(i)}+h^{(i)}$, where $\sum_{i=1}^{n}g^{(i)}=0$
and $h^{(i)}\in R(T_{i})$. Therefore, we have
\[
\sum_{i=1}^{n}f^{(i)}=\sum_{i=1}^{n}T_{i}q^{(i)},
\]
for some $q^{(1)},\cdots,q^{(n)}$, and clearly, we have $\left(\prod_{l=1}^{n}(I-T_{l})\right)(\sum_{i=1}^{n}T_{i}q^{(i)})=0$.
\noindent$(\implies)$ Conversely, suppose that $\sum_{i=1}^{n}f^{(i)}\in K(\prod_{l=1}^{n}(I-T_{l}))$.
Then, for each $i$,
\[
f^{(i)}= \prod_{l=1}^{n}(I-T_{l}) f^{(i)} + (I-\prod_{l=1}^{n} (I-T_{l})) f^{(i)} =:m^{(i)}+n^{(i)}.
\]
Then $ \sum_{i=1}^{n} m^{(i)} = \sum_{i=1}^{n} \prod_{l=1}^n (I-T_l) f^{(i)} =0 $ because $\sum_{i=1}^{n}f^{(i)}\in K(\prod_{l=1}^{n}(I-T_{l}))$. Also since $n^{(i)}\in K(\prod_{l=1}^{n}(I-T_{l}))=R(T_{1})+\cdots+R(T_{n})$,
we have, for each $i$,
\[
n^{(i)}\,=\,\sum_{j=1}^{n}T_{j}n_{j}^{(i)},
\]
for some $\{n_{j}^{(i)}\}_{j=1}^{n}$. In this way, we find $\{n_{j}^{(i)}\}_{i,j}$.
For each $i$, we write
\[
n^{(i)}\,=\,\left(\sum_{j=1}^{n}T_{j}n_{j}^{(i)}-\sum_{j=1}^{n}T_{i}n_{i}^{(j)}\right)+\underbrace{\sum_{j=1}^{n}T_{i}n_{i}^{(j)}}_{\in R{(T_{i})}}\,.
\]
Then, we have
\[
\sum_{i=1}^{n}(\sum_{j=1}^{n}T_{j}n_{j}^{(i)}-\sum_{j=1}^{n}T_{i}n_{i}^{(j)})\,=\,0.
\]
Thus $f=(f^{(1)},\cdots, f^{(n)})$ can be written as
\begin{align*}
= & \underbrace{(m^{(1)},\cdots, m^{(n)}) + (\sum_{j=1}^{n}T_{j}n_{j}^{(1)}-\sum_{j=1}^{n}T_{1}n_{1}^{(j)}, \cdots, \sum_{j=1}^{n}T_{j}n_{j}^{(n)}-\sum_{j=1}^{n}T_{n}n_{n}^{(j)})}_{\in \mathcal{Z}} \\ + & \underbrace{(\sum_{j=1}^{n}T_{1}n_{1}^{(j)}, \cdots, \sum_{j=1}^{n}T_{n}n_{n}^{(j)})}_{\in \mathcal{E}}
\end{align*}
This shows that $f\in\mathcal{Z}+\mathcal{E}$, and concludes the
proof of the claim. \\
\end{proof}
The following proposition gives detailed characterizations for potential games.
\begin{prop}[\textbf{Potential games}] \label{appen-prop:pot-char}
The following statements are equivalent. \\
(i) $f$ is a potential game. \\
(ii)
\begin{equation}\label{eq:pot-con}
(I-T_i)(I-T_j) f^{(i)} =(I-T_i)(I-T_j) f^{(j)} \textrm{ for all } i,j \,.
\end{equation}
(iii)
\begin{equation}\label{eq:appen-pot-con2}
f^{(i)}_M = \frac{1}{|M|} \sum_{j \in M} f^{(j)}_M \textrm{ and for all } i \in M\, \textrm{ for all non-empty } M .
\end{equation}
(iv)
\begin{equation}\label{eq:pot-con3}
f^{(i)} = T_i f^{(i)} + \sum_{M \ni i} \frac{1}{|M|} \sum_{j \in M }f^{(j)}_M \textrm { for all } i.
\end{equation}
\noindent Thus we have
\[
R(\mathbf{V+P})=\mathcal{I}+\mathcal{E}.
\]
\end{prop}
\begin{proof}
((i) $\implies$ (ii)) Suppose that $f$ is a potential game. Then for all $i$, $f^{(i)}(s)= \phi + T_i h^{(i)}$ for some $h^{(i)}$. Thus we have
\[
(I-T_i)(I-T_j)(f^{(i)}(s)-f^{(j)}(s)) = (I-T_i)(I-T_j)(T_i h^{(i)}(s_{-i})- T_j h^{(j)}(s_{-j}))=0
\]
\noindent ((ii) $\implies$ (iii)) Suppose that condition \eqref{eq:pot-con} holds and let $M \ni \{i,j\}$. Then
\begin{align}
f_M^{(i)}=& \prod_{l \notin M} T_l \prod_{k \in M}(I-T_k) f^{(i)} \nonumber \\
=& \prod_{l \notin M} T_l \prod_{\substack{k \in M
\\ i,j \notin M} }(I-T_k) (I-T_i)(I-T_j) f^{(i)} \nonumber\\
=& \prod_{l \notin M} T_l \prod_{\substack{k \in M
\\ i,j \notin M} }(I-T_k) (I-T_i)(I-T_j) f^{(j)}
= f_M^{(j)}\nonumber
\end{align}
and therefore $f_M^{(i)}$ is independent of $i$ if $i \in M$ and thus \eqref{eq:appen-pot-con2} holds.\\
\noindent ((iii) $\implies$ (iv)) If \eqref{eq:appen-pot-con2} holds, we have
\begin{align}
f^{(i)} =& T_i f^{(i)} + (I-T_i)f^{(i)} \nonumber\\
=& T_i f^{(i)} + \sum_{M \ni i} f_M^{(i)} \nonumber\\
=& T_i f^{(i)} + \sum_{M \ni i} \frac{1}{|M|} \sum_{j\in M} f_M^{(j)} \,. \nonumber
\end{align}
which establishes \eqref{eq:pot-con3}. \\
\noindent ((iv) $\implies$ (i)) Suppose that \eqref{eq:pot-con3} holds. Then
\[
f^{(i)} = \sum_{M \ni i} \frac{1}{|M|}\sum_{j \in M }f^{(j)}_M +T_i f^{(i)} = \underbrace{\sum_{\substack{M \subset N \\ M \neq \emptyset}} \frac{1}{|M|} \sum_{j \in M }f^{(j)}_M}_{\let \scriptstyle \textstyle \substack{ \equiv \phi}} \underbrace{ -\sum_{\substack{M \not \in i \\ M \neq \emptyset}} \frac{1}{|M|} \sum_{j \in M }f^{(j)}_M +T_i f^{(i)}}_{\let \scriptstyle \textstyle \substack{ \equiv h^{(i)} }}
\]
where the first term $\phi$ does not depend on $i$
and the second term $h^{(i)}$ satisfies $h^{(i)}=T_ih^{(i)}$ since $f_M^{(j)}=T_i f_M^{(j)}$ if $ i \notin M$. This shows that $f$ is a potential game.
\end{proof}
\begin{prop}[\textbf{Zero-sum normalized games}] \label{prop:zero-norm}
We have
\[
K(\mathbf{V+P}) = \mathcal{Z} \cap \mathcal{N}
\]
\end{prop}
\begin{proof}
We will show that
$f$ is a zero-sum normalized game if and only if
\begin{equation}\label{eq:char-zn}
\sum_{M \ni i } \frac{1}{|M|} \sum_{j \in M} f_M^{(j)} + T_i f^{(i)} =0
\end{equation}
for all $i$.
Suppose that $f$ is a zero-sum normalized game. Let $i$ be fixed. Then since $f$ is normalized, $T_i f^{(i)}=0$. Also if $j \not \in M$, then $f^{(j)}_M=0$, again since $f$ is normalized. Thus if $f$ is normalized, then
\[
\sum_{j \not \in M} f_M^{(j)} = 0.
\]
Thus we find that
\[
\sum_{M \ni i } \frac{1}{|M|} \sum_{j \in M} f_M^{(j)} = \sum_{M \ni i } \frac{1}{|M|} \sum_{j =1}^n f_M^{(j)} =0
\]
since $f$ is a zero-sum game. \\
Now suppose that \eqref{eq:char-zn} holds. Let $i$ be fixed and $M' \ni i$. Applying $\prod_{l \not \in M'} T_l \prod_{k \in M'} (I-T_k) $ at \eqref{eq:char-zn}, from \eqref{eq:con}, we find that
\[
\frac{1}{|M'|} \sum_{j \in M'} f_{M'}^{(j)} =0
\]
Thus for all $M' \ni i$, we have $\frac{1}{|M'|} \sum_{j \in M'} f_{M'}^{(j)} =0$. Thus $T_i f^{(i)}=0$. That is, $f$ is normalized. By varying $i$, we also find that $\sum_{j \in M} f_M^{(j)}=0$ for all $M \neq \emptyset $. Also note that since $f$ is normalized, $(\sum_{j=1}^{n} f^{(j)})_{\emptyset} =0$ and $\sum_{j \notin M} f_M^{(j)} =0$.
Thus, we find that
\[
\sum_{M \subset N}(\sum_{j=1}^{n} f^{(j)})_M=\sum_{\substack{M \subset N \\ M \neq \emptyset} }(\sum_{j=1}^{n} f^{(j)}_M) =\sum_{\substack{M \subset N \\ M \neq \emptyset} }(\sum_{j \in M } f^{(j)}_M) =0
\]
Thus we have $\sum_{j=1}^{n} f^{(j)}=0$.
\end{proof}
\com{
Also it is easy to check that
\[
range(\mathcal{GS})=(\mathcal{I} \cap \mathcal{N}), \,\, range(\mathcal{G}(I-\mathcal{S}))=(\mathcal{Z} \cap \mathcal{N} ), \,\,\, range(I- \mathcal{G})= \mathcal{B}
\]
}
\section*{\large{Appendix: Only for online publication}}
\renewcommand{D}{B}
\section{Other proofs}\label{appen:other-proofs}
\begin{proof}[\textbf{Proof of Proposition \ref{prop:zero-convex}.}]\label{proof:zero-convex}
We show (ii)((i) follows similarly).
Let $i$ and $s_{i}$ be fixed. Then from the discussion before the proposition, $w^{(i)}$ is concave in $s_i$ for all $i$. Thus there exists a Nash equilibrium. We next show that $\Phi(s)=\sum_{i}\max_{s_{i}\in S_{i}}w^{(i)}(s_{i},s_{-i})$
is strictly convex. Let $t',$ $t''\in S$ be given. Then $u',$ $u''\in S$
be given such that $w^{^{(i)}}(u_{i}',t_{-i}')=\max_{s_{i}\in S_{i}}w^{(i)}(s_{i},t_{-i}')$
and $w^{^{(i)}}(u_{i}'',t_{-i}'')=\max_{s_{i}\in S_{i}}w^{(i)}(s_{i},t_{-i}'')$
for all $i$. Let $\alpha\in(0,1)$ and $t^{*}$ be such that $w^{^{(i)}}(t_{i}^{*},((1-\alpha)t'+\alpha t'')_{-i})=\max_{s_{i}\in S_{i}}w^{(i)}(s_{i},((1-\alpha) t'+ \alpha t'')_{-i})$
for all $i$. Then we have
\begin{align*}
& (1-\alpha)\Phi(t')+\alpha \Phi(t'') =(1-\alpha)\sum_{i}w^{(i)}(u_{i}',t_{-i}')+\alpha \sum_{i}w^{^{(i)}}(u_{i}'',t_{-i}'')\\
& \ge(1-\alpha)\sum_{i}w^{(i)}(t_{i}^{*},t_{-i}')+\alpha \sum_{i}w^{^{(i)}}(t_{i}^{*},t_{-i}'') >\sum_{i}w^{(i)}(t_{i}^{*},(1-\alpha)t'_{-i}+\alpha t''_{-i})\\
& =\Phi((1-\alpha)t'+\alpha t'').
\end{align*}
Thus $\Phi_f(s)$ is strictly convex and the minimizer of $\Phi_f$ is unique. Since the Nash equilibrium is a minimizer of $\Phi_f$, the Nash equilibrium is unique.
\end{proof}
\begin{proof}[\textbf{Proof of Corollary \ref{cor:2p-zero-finite}.}]\label{proof:2p-zero-finite}
Let $f=w+h$, where $h$ is a non-strategic game. Then, $w^{(1)}(\sigma_{1},\sigma_{2})$
is convex in $\sigma_{2}$ and $w^{(2)}(\sigma_{1},\sigma_{2})$ is
convex in $\sigma_{1}.$ By Proposition \ref{prop:zero-convex}, the set of Nash equilibria is convex.
Suppose that $f$ has two distinct Nash equilibria, $\rho^{*}$ and $\sigma^{*}$, where
$\rho^{*}\ne\sigma^{*}.$ Then, for all $t\in(0,1)$, $(1-t)\rho^{*}+t\sigma^{*}$ is a Nash equilibrium
since the set of Nash equilibria is convex. This contradicts Condition \hyperlink{con-N}{\textbf{(N)}} because of Lemma 2.2 in \citet{Quint&Shubik97}.
\end{proof}
\begin{proof}[\textbf{Proof of Proposition \ref{prop:n-p-pop}}]\label{proof:prop:n-p-pop}
We let
\[
\mathcal{D} := \{ f \in \mathcal{L} : f^{(i)}(s) := \sum_{l \neq i} \zeta_l (s_{-l}) \text{ for all } i \}
\]
We first show that
\[
\mathcal{B}=(\mathcal{Z}+\mathcal{E})\cap(\mathcal{I}+\mathcal{E})=\mathbf{S}(\mathbf{P}(\mathcal{L}))+\mathcal{E}.
\]
Let $f\in(\mathcal{Z}+\mathcal{E})\cap(\mathcal{I}+\mathcal{E})$.
Then, $f=g_{1}+h_{1}$, for $g_{1}\in\mathcal{Z}$ and$\ h_{1}\in\mathcal{E}$,
and $f=g_{2}+h_{2}$, for $g_{2}\in\mathcal{I}$ and $h_{2}\in\mathcal{E}.$
Thus, we have
\begin{equation}
g_{1}+h_{1}=g_{2}+h_{2},\label{eq:pr}
\end{equation}
and applying $\mathcal{S}$ to (\ref{eq:pr}), we obtain
\[
f=\mathcal{S}(h_{1}-h_{2})+h_{2}.\text{ }
\]
Thus, since $h_{1}-h_{2}\in \mathbf{P} (\mathcal{L})$, $f\in \mathbf{S}(\mathbf{P}(\mathcal{L}))+\mathcal{E}$.
Conversely, let $f\in \mathbf{S}(\mathbf{P}(\mathcal{L}))+\mathcal{E}$. Obviously,
$f\in\mathcal{I}+\mathcal{E}$. In addition, $f=\mathbf{S}(\mathbf{P}(g))+h_{1}$,
for $\ g\in\mathcal{L}$ and $h_{1}\in\mathcal{E}.$ Thus,
\[
f=\mathbf{S}(\mathbf{P}(g))+h_{1}=-(\mathbf{I}-\mathbf{S})(\mathbf P(g))+\mathbf P(g)+h_{1}\in\mathcal{Z}+\mathcal{E}\text{.}
\]
This shows that
\[
(\mathcal{Z}+\mathcal{E})\cap(\mathcal{I}+\mathcal{E})=\mathbf{S}(\mathbf P(\mathcal{L}))+\mathcal{E}\text{.}
\]
Note that
\begin{align*}
\mathbf{S}(\mathbf{P}(\mathcal{L}))+\mathcal{E} & \mathcal{=}\{f:f^{(i)}=\sum_{l=1}^{n} \zeta_{l}(s_{-l})\text{ for some }\{\zeta_{l}\}_{l=1}^{n}\text{ and for all }i\}+\mathcal{E}\\
& =\{f:f^{(i)}=\sum_{l\neq i}\zeta_{l}(s_{-l})\text{ for some }\{\zeta_{l}\}_{l=1}^{n}\text{ and for all }i\}+\mathcal{E}\\
& =\mathcal{D}+\mathcal{E}.
\end{align*}
Now observe that
\begin{align*}
& (\sum_{l\neq1}\zeta_{l}(s_{-l}),\sum_{l\neq2}\zeta_{l}(s_{-l}),\cdots,\sum_{l\neq n}\zeta_{l}(s_{-l}))\\
& \sim(\sum_{l=1}^{n}\zeta_{l}(s_{-l}),\sum_{l=2}^{n}\zeta_{l}(s_{-l}),\cdots,\sum_{l=1}^{n}\zeta_{l}(s_{-l})).
\end{align*}
Hence, the first result follows from $\mathcal{D}+\mathcal{E}=(\mathcal{I}+\mathcal{E})\cap(\mathcal{Z}+\mathcal{E}$).
For the second result, observe that
\begin{align*}
(\sum_{l\neq1}\zeta_{l},\sum_{l\neq2}\zeta_{l},\cdots,\sum_{l\neq n}\zeta_{l}) & \sim(\sum_{l\neq1}\zeta_{l}-(n-1)\zeta_{1},\sum_{l\neq2}\zeta_{l}-(n-1)\zeta_{2},\cdots,\sum_{l\neq n}\zeta_{l}-(n-1)\zeta_{n})\\
& =(\sum_{l\neq1}(\zeta_{l}-\zeta_{1}),\sum_{l\neq2}(\zeta_{l}-\zeta_{2}),\cdots,\sum_{l\neq n}(\zeta_{l}-\zeta_{n}))\\
& =(\sum_{l>1}(\zeta_{l}-\zeta_{1}),\zeta_{1}-\zeta_{2},\zeta_{1}-\zeta_{3},\cdots,\zeta_{1}-\zeta_{n})\\
& +(0,\sum_{l>2}(\zeta_{l}-\zeta_{2}),\zeta_{2}-\zeta_{3},\cdots, \zeta_{2}-\zeta_{n})+\cdots\\
& +(0,0,\cdots,\sum_{l>n-1}(\zeta_{l}-\zeta_{n-1}),\zeta_{n-1}-\zeta_{n})\\
& =\sum_{i=1}^{n}\sum_{l>i}^{n}(0,\cdots,0,\underbrace{-\zeta_{i}+\zeta_{j}}_{i-\text{th}},0,\cdots0,\underbrace{\zeta_{i}-\zeta_{j}}_{j-\text{th}},0,\cdots,0) \\
& =\sum_{i<j}(0,\cdots,0,\underbrace{-\zeta_{i}+\zeta_{j}}_{i-\text{th}},0,\cdots0,\underbrace{\zeta_{i}-\zeta_{j}}_{j-\text{th}},0,\cdots,0).
\end{align*}
\end{proof}
\begin{proof}[\textbf{Proof of Corollary \ref{prop:2p-both}}]\label{proof:2p-both}
(i) This immediately follows from Proposition \ref{prop:n-p-pop}. (ii) From the second part of Corollary \ref{prop:2p-both}, $ (s^*_1, s^*_2) \in (\arg \max_{s_1} \zeta_2(s_1), \arg \max_{s_2} \zeta_1(s_2))$ is a Nash equilibrium.
If there are two distinctive maximizers, then since the set of maximizers is convex, there exist infinitely many Nash equilibria, contradicting Condition \hyperlink{con-N}{\textbf{(N)}} again by Lemma 2.2 in \citet{Quint&Shubik97}. Thus, the maximizer is unique and constitutes the strictly dominant Nash equilibrium.
\end{proof}
\begin{proof}[\textbf{Proof of Proposition \ref{prop:norm-zero-ci}}]\label{proof:norm-zero-ci}
Let $ d \sigma_i(s_i) = \frac{1}{m(S_i)} d m_i (s_i)$ be player $i$' uniform mixed strategy. We define a uniform mixed strategy profile as a product measure of uniform mixed strategies: i.e.,
\[
d \sigma (s) = \prod_i d \sigma_i(s_i).
\]
Let $i$ and $s_{i}$ be fixed. We show that
\[
f^{(i)}(s_{i},\sigma_{-i})=0.
\]
Then, the desired result follows since $f^{(i)}(s_{i},\sigma_{-i})=0=f^{(i)}(\sigma_{i},\sigma_{-i})$
for all $i$ and $s_{i}$; hence, $f^{(i)}(\sigma_{i},\sigma_{-i})= \max_{s_i} f^{(i)}(s_i, \sigma_{-i})$ for all $i$. First, by the definition of the mixed strategy extension,
\[
f^{(i)}(s_{i},\sigma_{-i})=\int_{s_{-i}\in S_{-i}} f^{(i)}(s_{i},s_{-i})\prod_{l\neq i} d \sigma_l (s_l).
\]
If $f$ is a zero-sum normalized game, then
\[
f^{(i)}(s_{i},\sigma_{-i})= - \int_{s_{-i}\in S_{-i}} \sum_{j \neq i} f^{(j)}(s_{j},s_{-j}) \prod_{l\neq i} d \sigma_l (s_l) = - \sum_{j \neq i} \int_{s_{-i}\in S_{-i}} f^{(j)}(s_{j},s_{-j}) \prod_{l\neq i} d \sigma_l (s_l) =0
\]
where the last equality follows from the normalization, $\int_{s_l \in S_l} f^{(l)}(s_l, s_{-l}) d \sigma_l (s_l) =0$ for all $l$ and Fubini's Theorem. If $f$ is an identical interest game, then similarly
\[
f^{(i)}(s_{i},\sigma_{-i})=\int_{s_{-i}\in S_{-i}} v(s_{i},s_{-i})\prod_{l\neq i} d \sigma_l (s_l) =0
\]
where the last equality again follows from the normalization, $\int_{s_l \in S_l} v(s_l, s_{-l}) d \sigma_l (s_l) =0$ for all $l$.
Thus, we obtain the desired result.
\end{proof}
\renewcommand{D}{C}
\section{Details for Section \ref{sec:app} \label{appen:app}}
\subsection{Finite strategy games}
\begin{table}[t]
\centering
\scalefont{0.9}
\begin{tabular}{c|c|c|c}
\hline
& Identity payoff & Zero-sum & Both Potential \\
& Normalized & Normalized & and Zero-sum \\
& $\mathcal{I}\cap\mathcal{N}$ & $\mathcal{Z}\cap\mathcal{N}$ & $\mathcal{B}$ \\
\hline
Dimensions & $\frac{(l-1)l}{2}$ & $\frac{(l-2)(l-1)}{2}$ & $2 l-1$ \\
\hline
Basis Games
& $\begin{pmatrix}
1 & -1\\
-1 & 1
\end{pmatrix}$
& $\begin{pmatrix}
0 & -1 & 1\\
1 & 0 & -1\\
-1 & 1 & 0
\end{pmatrix}$
& $\begin{pmatrix}
1 & 1\\
0 & 0
\end{pmatrix}$
$\begin{pmatrix}
1 & 0\\
1 & 0
\end{pmatrix}$
$\begin{pmatrix}
0 & 1\\
0 & 1
\end{pmatrix}$
\\
\hline
\end{tabular}
\caption{\textbf{Dimensions of subspaces and basis games for two-player symmetric games}}
\label{tab:basis}
\end{table}
\begin{lem} \label{appen:lem-c1}
We have the following results:\\
(i) The set of games $\{S^{(ij)}\}_{i=1,\cdots, l, j>i}$ forms a basis set for $\mathcal{I} \cap \mathcal{N}$ \\
(ii) The set of games $\{Z^{(ij)}\}_{i=2,\cdots, l, j>i}$ forms a basis set for $\mathcal{I} \cap \mathcal{N}$ \\
(iii) The set of games $\{D^{(i)}\}_{i=1,\cdots, l-1}, \{E^{(i)}\}_{i=1,\cdots, l}$ forms a basis set for $\mathcal{B}$
\end{lem}
\begin{proof}
(i) We note that there are precisely $\frac{l(l-1)}{2}$ number of different $S^{(ij)}$'s. Thus, we only need to show that these $S^{(ij)}$'s are independent. Let $S$ be
\[
S:= \sum_{i=1}^{l} \sum_{j=i+1}^l \alpha^{(ij)} S^{(ij)}.
\]
Then it is easy to check that $S_{ij}=\alpha^{(ij)}$.Thus if $S=\mathbf{O}$, then $\alpha^{(ij)}=0$ for all $i, j$. \\
(ii) Again we note that there are precisely $\frac{(l-2)(l-1)}{2}$ number of different $Z^{(ij)}$'s. Let $Z$ be
\[
Z:= \sum_{i=2}^{l} \sum_{j=i+1}^l \zeta^{(ij)} Z^{(ij)}.
\]
Then it is also easy to check that $Z_{ij}=-\zeta^{(ij)}$.Thus if $Z=\mathbf{O}$, then $\zeta^{(ij)}=0$ for all $i, j$. \\
(iii) Again we note that there are precisely $l-1$ number of different $D^{(i)}$'s and $l$ number of different $E^{(i)}$'s. Let $K$ be
\[
K= \sum_{i=1}^{l-1}\delta_{i} D^{(i)}+ \sum_{i=1}^{l}\eta_{i} E^{(i)}
\]
Then if $K= \mathbf{O}$, then $\eta_i =0$ for all $i$ (because the last row of $K$ is given by $(\eta_1, \cdots, \eta_l)$) and this, in turn, implies that $\delta_i=0$ for all $i$.
\end{proof}
We would have the following results.
\begin{prop} \label{appen:prop:app1}
We have the following results: \\
(i) Suppose that $\gamma_{ij}<0$ for all $i,j$. Then $\#(G) = 1$. \com{In addition, if $D=\mathbf{0}$, then the uniform mixed strategy is a unique NE.} \\
(ii) Suppose that $\gamma_{ij}>0$ for all $i,j$. Suppose that $\delta_i\geq 0$ for all $i$ and $\underline \gamma > \bar \delta + \bar \zeta$. Then $\#(G) = 2^l-1$.
\end{prop}
\begin{proof}[\textbf{Proof of Proposition \ref{appen:prop:app1} (ii)}]
We will show that $G$ satisfies the total band wagon property defined by \citet{Kandori98}. Then for any $A \subset \{1, 2, \cdots, l \}$, there exists a unique Nash equilibrium, which is completely mixed in $A$ and thus there exist precisely $2^l-1$ Nash equilibria (See \citet{Kandori98}). Thus, we will show that for all $q \in \Delta$, $BR(q) \subset \Sigma_q$, where $BR(q)$ is the set of all pure strategy best responses for $q$. Suppose that there exists $q \in \Delta$ such that $BR(q) \not \subset \Sigma_q$. Then we must have $\Sigma_q \neq \{1, \cdots, l\}$ (the set of all pure strategies) and there exists $k \not \in \Sigma_q$ such that
\[
e_k \cdot G q \geq q \cdot G q
\]
We define $\gamma_{ji}=\gamma_{ij}$ for $j > i$. First observe that we have
\[
q \cdot S q = \sum_{i <j } \gamma_{ij}(q_i - q_j)^2, \,\,\,\, e_k \cdot S q = \sum_{j<k} \gamma_{jk} (q_k - q_j) + \sum_{j>k} \gamma_{kj}(q_k - q_j) = \sum_{j \neq k} \gamma_{kj}(q_k -q_j)
\]
Next we define $\zeta_{ji} = - \zeta_{ij} \text{ for } j > i$. Again observe that we have
\[
q \cdot Z q = 0, \,\,\, \,\, e_1 \cdot S q = \sum_{i<j} \zeta_{ij} (q_j - q_i), \,\,\, \,\,e_k \cdot S q = \sum_{j \neq k } \zeta_{ij} (q_1 - q_j)
\]
Thus
\begin{align*}
& e_1 \cdot S q \leq \sum_{i<j} \max_{i<j} |\zeta_{ij}| |q_j - q_i| \leq \max_{i<j} |\zeta_{ij}| \sum_{i<j} \leq \bar \zeta \\
& e_k \cdot S q \leq \sum_{j \neq k} \max_{i<j} |\zeta_{ij}| |q_1 - q_j| \leq \max_{i<j} |\zeta_{ij}| \sum_{i<j} \leq \bar \zeta
\end{align*}
Next we let $d=(\delta_1, \cdots, \delta_l)^T$ and find that
\[
q \cdot D q = q \cdot d \geq 0, \,\,\,\, e_k \cdot D q = \delta _k
\]
since $\delta_i \geq 0$ for all $i$.
Then since $k \not \in \Sigma_q$ so $q_k=0$. Thus
\begin{align*}
& 0\leq q \cdot S q + q \cdot Z q + q \cdot D q = q \cdot G q \leq e_k \cdot G q = \sum_{j \neq k} \gamma_{kj}(q_k -q_j)+\bar \zeta + \bar \delta \\
& =- \sum_{j \neq k} \gamma_{kj} q_j + \bar \zeta + \bar \delta \leq -\sum_{j \in \Sigma_q}\gamma_{kj} q_j + \bar \zeta + \bar \delta \leq - \underline \gamma + \bar \zeta + \bar \delta <0
\end{align*}
which is a contradiction. The last inequality in the above follows from
\[
\sum_{ j \in \sum_q } \gamma_{kj} q_j \geq \sum_{ j \in \sum_q } \min_{ j \in \sum_q} \gamma_{kj} q_j \geq \sum_{ j \in \sum_q } \min_{ j \neq k} \gamma_{kj} q_j \geq \min_{j \neq k} \gamma_{kj} \sum_{j \in \sum_q} q_j = \min_{j \neq k} \gamma_{kj} \geq \underline \gamma
\]
\end{proof}
To show (i) of Proposition \ref{appen:prop:app1}, we recall the following definitions (from \citet{Hofbauer09}).
\begin{defn}
We say that \\
(i) a symmetric game $G$ is stable if $(q-p) \cdot G (q-p) \leq 0 \text{ for all } p, q \in \Delta$ \\
(ii) a symmetric game $G$ is strictly stable if $(q-p) \cdot G (q-p) < 0 \text{ for all } p \neq q \in \Delta$ \\
(iii) a symmetric game $G$ is null-stable if $(q-p) \cdot G (q-p) = 0 \text{ for all } p, q \in \Delta$
\end{defn}
Next we have the following well-known observation.
\begin{lem}
If $p$ satisfies
\[
(q-p) \cdot G q <0 \text{ for all } q \neq p \in \Delta
\]
then $p$ is a unique Nash equilibrium for a symmetric game, $G$.
\end{lem}
\begin{proof}
Since $G$ is finite, there exist a Nash equilibrium, say $p'$. We will show that $p' =p$. Suppose that $p' \neq p$. Then we find
\[
p \cdot G p' > p' \cdot G p'
\]
which shows that $p'$ is not a Nash equilibrium, a contradiction. Thus we must have $p'=p$. And this also shows that there cannot exist any other Nash equilibrium.
\end{proof}
We have the following characterization for the strict stability of $G$.
\begin{lem}
Suppose that $G$ is given by \eqref{eq:decomp-rep}. \\
(i) $G$ is strictly stable if $\gamma_{ij} < 0$ for all $i < j$. \\
(iii) $G$ is null stable if $\gamma_{ij} = 0$ for all $i < j$.
\end{lem}
\begin{proof}
(i) Let $T\Delta$ be the tangent space of $\Delta$ and $ z \neq 0$ and $z \in T \Delta$. Then since $G=S+Z+B$, $Bz=\mathbf{0}$,and $z\cdot Z z = 0$, we have
\[
z \cdot G z = z \cdot S z = z \cdot \sum_{i<j} \gamma_{ij} S^{(ij)} z = \sum_{i<j} z \cdot S^{(ij)} z = \sum_{i<j} \gamma_{ij}(z_i - z_j)^2 \leq 0
\]
because $\gamma_{ij} <0$ for all $i< j$.
If $\sum_{i<j} \gamma_{ij}(z_i - z_j)^2 = 0$, $\gamma_{ij}(z_i - z_j)^2=0$ for all $i<j$ and thus $z_i - z_j=0$ for all $i>j$ which is a contradiction to $z \neq 0$. Thus we have $z \cdot G z <0$. (ii) Let $z \in T\Delta$. If $\gamma_{ij}=0$, then we again have
\[
z \cdot G z = \sum_{i<j} \gamma_{ij}(z_i - z_j)^2 =0
\]
\end{proof}
\begin{proof}[\textbf{Proof of Proposition \ref{appen:prop:app1} (i)}]
Suppose that $\gamma_{ij} <0$ for all $i>j$. Since $G$ is a finite game, there exists a NE, $p^*$, for $G$. Since $G$ is strictly stable, for all $q \neq p^*$, we have
\[
(q-p^*) \cdot G (q-p^*) < 0 \text{ for all } q \neq p^* \in \Delta
\]
Thus
\[
(q - p^*) \cdot Gq < (q - p^*) \cdot G p^* \leq 0
\]
where the last inequality follows from $p^*$ is a NE. Thus we find that $\#(G)=1$.
\end{proof}
\subsection{Contest games}
First note that $s=(0,0, \cdot, 0)$ cannot be a Nash equilibrium since any player $i$ can deviate to $s_i>0$. Thus we let
\[
S = \{ (s_1, \cdots, s_n): s_i \geq 0 \text{ for all } i,\text{ and } s_j > 0 \text{ for some } j \}.
\]
\begin{lem} \label{lem:contest1}
Let $i$ be fixed and $s_i \geq 0$. Then $w^{(i)}(s_i, \cdot) : S_{-i} \rightarrow \mathbb{R}$ is convex.
\end{lem}
\begin{proof}
We will show that $p^{(i)}(s_i, \cdot) : S_{-i} \rightarrow \mathbb{R}$ is convex and then the desired result follows. Suppose that $s_i > 0$. Define $g: s_{-i} \mapsto \sum_{l \neq i} s_l$ and $h: t \mapsto \frac{s_1}{s_1 +t}$. Then $g$ is convex, $h$ is convex and decreasing, thus $p^{(i)}(s_i, \cdot)$ is convex. If $s_i=0$, then
\[
p^{(i)} (0, s_{-i}) = \begin{cases}
\frac{1}{n}, & \mbox{if } s_{-i} = 0 \\
0, & \mbox{otherwise}.
\end{cases}
\]
Thus $p^{(i)} (0, \cdot)$ is convex for all $s_{-i} \neq 0$. Thus we obtain the desired result.
\end{proof}
Since it is known that the rent-seeking game admits a Nash equilibrium, Proposition \ref{prop:zero-convex} and Lemma \ref{lem:contest1} show that the set of Nash equilibrium for the rent-seeking game is convex.
Let $b_i^\circ (s_{-i})$ be the best response when an interior solution occurs. That is, $b_i^\circ (s_{-i})$ satisfies
\[
c_i (b_i^\circ(s_{-i}) + \sum_{l \neq i } s_l )^2 = \sum_{l \neq i } s_l.
\]
Then
\[
\Phi_f(s) = \sum_{l=i } w^{(i)} (\max \{ b_i^\circ (s_{-i}), 0 \}, s_{-i})
\]
For $P \subset \{1, \cdots, n\}$ such that $|P|\geq 2$, we define
\[
w_P^{(i)}(s_i, s_{-i}) =(p^{(i)}(s_i, s_{-i}) - \frac{1}{|P|} ) - \frac{1}{|P|-1} \sum_{j \neq i, j \in P} (c_i s_i - c_j s_j)
\]
for $s \in \prod_{i=1} S_i$.
\begin{lem} \label{lem:contest2}
Suppose that $s^*$ is a Nash equilibrium and $s_i ^* >0$ for all $i \in P$ and $s_i^*=0$ for all $i \not \in P$ where $P \subset \{1, \cdots, n \}$. Let $s_P^* = (s_i^*)_{i \in P}$. Then we have
\[
\Phi_f(s^*) = \sum_{i \in P} w^{(i)}_P( b_i^\circ(s^*_{P, -i}), s_{P,-i}^*)
\]
\end{lem}
\begin{proof}
Let $P \subset \{1, \cdots, n \}$ such that for all $i \in P$, $s_i >0, b_i(s_{-i})>0$ and for all $i \not \in P$, $s_i = 0, b_i(s_{-i})=0$. Then we have
\begin{align*}
\sum_{i=1}^n w^{(i)}(b_i(s_{-i}), s_{-i})= & \sum_{i \in P} w^{(i)}(b^0_i(s_{-i}), s_{-i}) + \sum_{i \not \in P} w^{(i)}(0, s_{-i})\\
= & \sum_{i \in P} [ p^{(i)}(b_i^\circ (s_{-i}), s_{-i}) -c_i b^\circ_i(s_{-i})) + \frac{1}{n-1} \sum_{j \neq i, j \in P} c_j s_j] \\
+ & \sum_{i \not \in P} \frac{1}{n-1} \sum_{j \neq i, j \in P } c_j s_j -1 \\
= & \sum_{i \in P} w^{(i)}_P( b_i^\circ(s_{P, -i}), s_{P,-i})
\end{align*}
where we use $b^\circ_i(s_{-i}) =b^\circ_i(s_{P, -i})$, since $b^\circ_i(s_{-i})$ depends $\sum_{l \neq i}s_l$.
Using this, we obtain the desired result.
\end{proof}
Lemma \ref{lem:contest2} leads us to define
\begin{align}
\Phi^\circ_P(s) := & \sum_{i \in P} w^{(i)}_P( b_i^\circ(s_{-i}), s_{-i}) \notag \\
= & \frac{1}{|P|-1}\sum_{i \in P} c_i (\sum_{i \in P } \sum_{l \neq i} s_l) - 2 \sum_{i \in P } \sqrt{c_i} \sqrt{ \sum_{ l \neq i } s_l } + |P|-1 \label{eq:phi-circ-p}
\end{align}
for $s \in S(P)$.
\begin{lem} \label{lem:contest3}
$\Phi^\circ_P(s): S(P) \rightarrow \mathbb{R}$ is strictly convex.
\end{lem}
\begin{proof}
From \eqref{eq:phi-circ-p}, it is enough to consider the following function:
\[
\Psi(s):= \sum_{i=1}^{n} \alpha_i h( \sum_{l \neq i} s_l)
\]
where $\alpha_i >0$ and $h$ is strictly convex. We will show that $\Psi$ is strictly convex. Let $s, t \in S_+$ and $s \neq t$ and $\rho \in (0, 1)$. Then for some $k$, $\sum_{l \neq k} s_l \neq \sum_{l \neq k} t_l$. Otherwise, if $\sum_{l \neq i} s_l =\sum_{l \neq i} t_l$, then $\sum_{l} s_l = \sum_l t_l$, which again implies $s_i=t_i$, a contradiction. Thus from the strict convexity of $h$, we have
\[
h( (1-\rho) \sum_{l\neq k}s_l + \rho \sum_{l\neq k} t_l) > (1-\rho) h(\sum_{l\neq k}s_l) + \rho h(\sum_{l\neq k} t_l)
\]
and
\[
\Psi( (1-\rho) s + \rho t) = \sum_{i=1}^{n} \alpha_i h( \sum_{l \neq i} (1-\rho) s_l + \rho t_l) > \sum_{i=1}^{n} \alpha_i (1-\rho) h(\sum_{l\neq i}s_i) + \rho h(\sum_{l\neq i} t_l) =(1-\rho)\Psi(s) + \rho \Psi(t)
\]
\end{proof}
\begin{lem} \label{lem:contest4}
Suppose that $s^*$ and $t^*$ are Nash equilibria for a rent-seeking game defined in \eqref{eq:r-s-games} such that $s_i^*, t_i^* >0$ for all $i \in P$ and $s_i^*, t_i^* =0$ for all $ i \not \in P$ for some $P \subset \{1,\cdots, n\}$. Then $s^*=t^*$.
\end{lem}
\begin{proof}
Suppose that $s^*$ and $t^*$ are Nash equilibria for $\Gamma^{(n)}$ such that $s_i^*, t_i^* >0$ for all $i \in P$ and $s_i^*, t_i^* =0$ for all $ i \not \in P$. Let $s_P^*:= (s_i^*)_{i \in P}$ and $t_P^*:= (t_i^*)_{i \in P}$.
Then we have
\[
0 = \Phi(s^*) = \Phi^\circ_P(s_P^*) \text{ and } 0 = \Phi(t^*) = \Phi^\circ_P(t_P^*).
\]
Since $\Phi^\circ_P(s_P) \geq 0$ for all $s_P \in \prod_{i \in P} S_i$ (This is to be shown) and the strict convexity of $\Phi_P^\circ(s)$ implies that the minimum is unique. We have $s_P^* = t_P^*$, and thus $s^* = t^*$.
\end{proof}
\begin{prop} \label{appen:prop-app2}
The Nash equilibrium for the rent-seeking game defined in \eqref{eq:r-s-games} is unique.
\end{prop}
\begin{proof}
Suppose that $s^*$ and $t^*$ such that $s^* \neq t^*$ are Nash equilibria. Let $P':=\{i: s_i^*>0 \}$ and $P'':= \{i: t_i^*>0 \}$. Then from Lemma \ref{lem:contest4}, we must have $P' \neq P$. Since the set of Nash equilibria is convex by Lemma \ref{lem:contest1}, $\rho s^* + (1-\rho) t^*$ is a Nash equilibria for all $\rho \in [0, 1]$. Then for $0 < \rho < 1$, $(\rho s^* + (1-\rho) t^*)_i >0 $ if $ i \in P'$ and $(\rho s^* + (1-\rho) t^*)_j >0 $ if $ j \in P''$. Thus there are infinitely many Nash equilibrium for the set $P' \cup P''$, which is contradiction to Lemma \ref{lem:contest4}.
\end{proof}
\renewcommand{D}{D}
\section{Existing decomposition results}\label{sec:existing}
Our decomposition methods extend two kinds of existing results: (i) \citet{Kalai10}, (ii) \citet{HandR11, Candogan2011}. First, \citet{Kalai10} decompose normal form games with incomplete information and study the implications for Bayesian mechanism designs. Their decomposition is based on the orthogonal decomposition $\mathcal{L}=\mathcal{I} \oplus\mathcal{Z}$ in equation (\ref{eq:1st-two-d1}).
Second, \citet{HandR11} similarly provide decomposition results based on the orthogonality between identical interest and zero-sum games and between normalized and non-strategic games, mainly focusing on finite games. \citet{Candogan2011} decompose finite strategy games into three components: a potential component, a nonstrategic component, and a harmonic component. When the numbers of strategies are the same for all players, harmonic components are the same as zero-sum normalized games, and their harmonic games, in this case, refer to games that are strategically equivalent to zero-sum normalized games. Also, their potential component is obtained
by removing the non-strategic component from the potential part ($\mathcal{I}+\mathcal{E}$)
of the games. Note that we can change our definition of zero-sum normalized games to their definition of harmonic games, with all the decomposition results remaining unchanged. Thus, their three-component decomposition of finite strategy
games follows from Theorem \ref{thm:main}, $\mathcal{L}=(\mathcal{I}+\mathcal{E})\oplus(\mathcal{Z}\cap\mathcal{N})$
(see the proof of Corollary \ref{cor: Can} for more detail).
\begin{cor}
\label{cor: Can}We have the following decomposition.
\[
\mathcal{L}=\underbrace{((\mathcal{\mbox{\ensuremath{\mathcal{I}}}}+\mathcal{E})\cap\mathcal{N})}_{\text{Potential Component}}\oplus\underbrace{\mathcal{E}}_{\substack{\text{Nonstrategic}\\
\text{Component}
}
}\oplus\underbrace{(\mathcal{Z}\cap\mathcal{N})}_{\substack{\text{Harmonic}\\
\text{Component}
}
}
\]
\end{cor}
\begin{proof}
This proof follows from Theorem \ref{thm:main} by showing that $ $$((\mathcal{I}+\mathcal{E})\cap\mathcal{N})\oplus\mathcal{E}=\mathcal{I}+\mathcal{E}$.
First, observe that $(\mathcal{I}+\mathcal{E})\cap\mathcal{N}\subset\mathcal{I}+\mathcal{E}$,
which implies that $((\mathcal{I}+\mathcal{E})\cap\mathcal{N})\oplus\mathcal{E}\subset\mathcal{I}+\mathcal{E}$.
Now, let $f\in\mathcal{I}+\mathcal{E}$. Then, $f=g+h,$ where $g\in\mathcal{I}$,
$h\in\mathcal{E}$, and $g=(v,v,\cdots,v).$ Then, by applying the
map, $\mathbf{P}$, we find that $f=\mathbf{P}(f)+(I-\mathbf{P})(f)$. Obviously,
$\mathbf{P}(f)\in\mathcal{E}$. In addition, $(I-\mathbf{P})(f)=(I-\mathbf{P})(g)=(v-T_{1}v,\,v-T_{2}v,\cdots,\,v-T_{n}v)\in\mathcal{I}+\mathcal{E}.$
Thus, $(I-\mathbf{P})(f)\in(\mathcal{I}+\mathcal{E})\cap\mathcal{N}$.
\end{proof}
\com{\noindent Note that Corollary \ref{cor: Can} not only reproduces the result of \citet{Candogan2011}, when the number of strategies of the players is the same, but also extends it to the space of games with continuous strategy sets.
}
\citet{Ui00} provides the following characterization for potential
games:
\begin{equation} \label{eq:Ui-con}
f\text{ is a potential game if and only if }f^{(i)}=\sum_{\substack{M\subset N\\
M\ni i
}
}\xi_{M}\textnormal{ for some\,}\{\xi_{M}\}_{M\subset N}\textnormal{ for all }i
\end{equation}
where $\xi_{M}$ depends only on $s_{l}$, with $l\in M$. Let
\[
\mathcal{D} := \{ f \in \mathcal{L} : f^{(i)}(s) := \sum_{l \neq i} \zeta_l (s_{-l}) \text{ for all } i \}.
\]
\noindent From our decomposition results, we have $\mathcal{D} \subset \mathcal{I}+\mathcal{E}$ and $\mathcal{E} \subset \mathcal{I} + \mathcal{D}$. In particular, the second inclusion holds because
\[
\zeta_{i}(s_{-i}) =\sum_{l=1}^{n} \zeta_l (s_{-l}) - \sum_{l \neq i} \zeta_{l}(s_{-l}).
\]
Thus, $\mathcal{D} \subset \mathcal{I}+\mathcal{E}$ implies that $\mathcal{I}+\mathcal{D}+\mathcal{E} \subset \mathcal{I}+\mathcal{E}$ and $\mathcal{E} \subset \mathcal{I} + \mathcal{D}$ implies $\mathcal{I}+\mathcal{D}+\mathcal{E} \subset \mathcal{I}+\mathcal{D}$. From this, we find
\begin{equation}\label{eq:ui-con-show}
\mathcal{I}+\mathcal{D}= \mathcal{I}+\mathcal{D}+\mathcal{E}= \mathcal{I}+\mathcal{E}
\end{equation}
Note that all games in $\mathcal{I}$ and in $\mathcal{D}$ satisfy Ui's condition in \eqref{eq:Ui-con}; hence, games in $\mathcal{I} + \mathcal{D}$ satisfy Ui's condition. Then, equalities in \eqref{eq:ui-con-show} show that the condition in \eqref{eq:Ui-con} is a necessary condition for potential games. The sufficiency of Ui's
condition is deduced by adding the non-strategic game
\[
(\sum_{\substack{M\subset N\\
M\not\ni1
}
}\xi_{M},\sum_{\substack{M\subset N\\
M\not\ni2
}
}\xi_{M},\cdots,\sum_{\substack{M\subset N\\
M\not\ni n
}
}\xi_{M})
\]
to game $f$ satisfying Ui's condition.
As explained in the main text, \citet{Sandholm10} decomposes $n$-player finite strategy games into
$2^{n}$ components using an orthogonal projection. When the set of games consists of symmetric games with $l$ strategies, the orthogonal projection is given by $\Gamma:=I-\frac{1}{l}\mathbf{1}\mathbf{1}^{T}$
, where $I$ is the $l \times l$ identity matrix and $\mbox{\textbf{1}}$ is the column vector consisting of all
1's. Using $\Gamma$, we can,
for example, write a given symmetric game, $A$, as
\begin{equation}
A=\underset{=(\mathcal{I}\cap\mathcal{N})\oplus(\mathcal{Z}\cap\mathcal{N})}{\underbrace{\Gamma A\Gamma}}+\text{ }\underbrace{(I-\Gamma)A\Gamma+\Gamma A(I-\Gamma)+(I-\Gamma)A(I-\Gamma)}_{=\mathcal{B}}.\footnote{In fact, for two player symmetric game, using Table \ref{tab:component} we can verify that
\[
f_{\mathcal B}=(I - (I-T_1)(I-T_2))f^{(1)}, \quad f_{\mathcal{I}\cap\mathcal{N}}+ f_{\mathcal{Z} \cap \mathcal{N}}=(I-T_1)(I-T_2) f^{(1)}.
\]}\label{eq:sand_decomp}
\end{equation}
Thus, our decompositions show that $\Gamma A\Gamma$ can be decomposed
further into games with different properties---identical interest normalized
games and zero-sum normalized games---and every game belonging to the
second component in (\ref{eq:sand_decomp}) is strategically equivalent to both an identical interest game and a zero-sum game. \citet{Sandholm10}
also shows that a two-player game, ($A,B)$, is potential if and only
if $\Gamma A\Gamma=\Gamma B\Gamma.$ If $P=(P^{(1)},P^{(2)})$ is a non-strategic
game, it is easy to see that $\Gamma P^{(1)}=O$ and $P^{(2)}\Gamma=O$,
where $O$ is a zero matrix. Thus,
the necessity of the condition $\Gamma A\Gamma=\Gamma B\Gamma$ for potential
games is obtained. Conversely, if $\Gamma A\Gamma=\Gamma B\Gamma$,
then game $(A,B)$ does not have a component belonging to $\mathcal{Z}\cap\mathcal{N}$
because $(\Gamma A\Gamma,\Gamma B\Gamma)\in(\mathcal{I}\cap\mathcal{N})\oplus(\mathcal{Z}\cap\mathcal{N}).$
Thus, ($A$,$B$) is a potential game.
\end{document} |
\begin{document}
\title{Spectral gap for the growth-fragmentation equation via Harris's Theorem}
\begin{abstract}
We study the long-time behaviour of the growth-fragmentation
equation, a nonlocal linear evolution equation describing a wide
range of phenomena in structured population dynamics. We show the
existence of a spectral gap under conditions that generalise those
in the literature by using a method based on Harris's theorem, a
result coming from the study of equilibration of Markov
processes. The difficulty posed by the non-conservativeness of the
equation is overcome by performing an $h$-transform, after solving
the dual Perron eigenvalue problem. The existence of the direct
Perron eigenvector is then a consequence of our methods, which prove
exponential contraction of the evolution equation.
Moreover the rate of convergence is explicitly quantifiable in terms of
the dual eigenfunction and the coefficients of the equation.
\end{abstract}
\tableofcontents
\section{Introduction and main result}
\label{sec:intro}
The growth-fragmentation equation is a linear, partial
integro-differential equation which is commonly used in structured population
dynamics for modelling various phenomena including the time evolution
of cell populations in biology such as in \cite{APM03, B68, BA67,
BCP08, DHT84, HW89, DM14, P06, RTK17}, single species populations
\cite{SS71}, or carbon content in a forest \cite{BGP19}; some
aggregation and growth phenomena in physics or biophysics as in
\cite{BLL19,CLODLMP,EPW,G15,LW17,McGZ87}; neuroscience in
\cite{CY19,PPS14} and even TCP/IP communication protocols such as in
\cite{BMR02, BCGMZ13,CMP10}. The general form of the growth-fragmentation
equation is given by:
\begin{equation}
\begin{aligned} \label{eq:gf}
\frac{\partial }{\partial t}n(t,x) + \frac{\partial }{\partial x} (g(x) n(t,x)) + B(x)n(t,x) &= \int_{x}^{+ \infty} \kappa (y,x) n(t,y) \,\mathrm{d} y, &&t,x > 0,\\
n(t,0) &= 0, &&t \geq 0,\\
n(0,x) &= n_0(x), &&x> 0,
\end{aligned}
\end{equation}
where $n(t,x)$ represents the population density of individuals
structured by a variable $x > 0$ at a time $t \geq 0$. The structuring
variable $x$ could be \emph{age, size, length, weight, DNA content,
biochemical composition} etc. depending on the modelling
context. Here we refer to it as \emph{`size'} for simplicity. Equation
\eqref{eq:gf} is coupled with an initial condition $n_0(x)$ at time
$t=0$ and a Dirichlet boundary condition which models the fact that no
individuals are newly created at size $0$. The function $g$ is the
\emph{growth rate} and $B$ is the \emph{total division/fragmentation
rate} of individuals of size $x \geq 0$. The fragmentation kernel
$\kappa(y,x)$ is the rate at which individuals of size $x$ are
obtained as the result of a fragmentation event of an individual of
size $y$. When fixing $x$, $\kappa(x, \cdot)$ is a nonnegative measure on
$(0,x]$. The \emph{total fragmentation rate} $B$ is always obtained as
\begin{equation}\label{eq:kappaB}
B(x) = \int_0^y \frac{y}{x} \kappa(x,y) \,\mathrm{d} y,
\qquad x > 0.
\end{equation}
Important particular cases are
\begin{equation*}
\kappa(x,y) = B(x) \frac{2}{x} \,\mathrm{d}elta_{\{y=\frac{x}{2}\}},
\end{equation*}
which corresponds to the \emph{mitosis} process, suitable for
modelling of biological cells, where individuals can only break into
two equal fragments; and
\begin{equation*}
\kappa(x,y) = B(x) \frac{2}{x},
\end{equation*}
which is the case with \emph{uniform fragment distribution}, where
fragmentation gives fragments of any size less than the original one
with equal probability. This case is used for example in modelling the dynamics of polymer chains, as in \cite{EPW}.
Two opposing dynamics, growth and fragmentation, are balanced through
Equation~\eqref{eq:gf}. The growth term tends to increase the average
size of the population and the fragmentation term increases the total
number of individuals but breaks the population into smaller sizes. If
the growth rate $g(x)$ vanishes, then only fragmentation takes place
and the equation is known as the \emph{pure fragmentation
equation}. Similarly when $B$ and $\kappa$ are both $0$, Equation \eqref{eq:gf} is the \emph{pure growth equation}.
We are concerned here with the mathematical theory of this equation,
and more precisely with its long-time behaviour as $t \to
+\infty$. Under suitable conditions on the coefficients $\kappa$ and
$g$, the typical behaviour is that the total population tends to grow
exponentially at a rate $e^{\lambda t}$, for some $\lambda > 0$, and
the normalised population distribution tends to approach a
\emph{universal profile} for large times, independently of the initial
condition. This has been investigated in a large amount of previous
works, of which we give a short summary. The first mathematical study
of this type of equation was done in \cite{DHT84} for the mitosis
case, in a work inspired by some biophysical papers \cite{B68,
BA67,SS71}. In \cite{DHT84}, the authors considered the mitosis
kernel with the size variable in a bounded interval and proved
exponential growth at a rate $\lambda$, and exponentially fast
approach to the universal profile. In \cite{MMP05}, the authors
considered the size variable in $(0,+\infty)$ and introduced the
\emph{general relative entropy} method for several linear PDEs
including the growth-fragmentation equation. They proved relaxation to
equilibrium in $L^p$ spaces without an explicit rate. Following
\cite{PR05} and \cite{LP09}, providing an explicit rate of convergence
to a universal profile under reasonable assumptions became a topic of
research for many other works. New functional inequalities were proved
in \cite{CCM11, CCM11-2} in order to obtain explicit rates of
convergence, see also~\cite{GS14}. Some authors used a
semigroup approach \cite{O92, BA06, BPR12, BG18, EN01, GN88,MS16}
or a probabilistic approach \cite{BCGMZ13,B03, B19, BW16, BW18, BW20, B18, BGP19,C21,C20,CMP10,C17},
and some authors provided explicit
solutions as in~\cite{ZvBW152}. In this paper we are
able to give more general results regarding the speed of convergence
to equilibrium: we obtain constructive results which cover a wide
range of bounded and unbounded fragmentation rates, and which apply
both in mitosis and uniform fragmentation situations.
When the equal mitosis kernel is considered, there is a special case
with a linear growth rate where the solutions exhibit oscillatory
behaviour in long time. This property was first proved mathematically
in \cite{GN88} when the equation is posed in a compact set. Recently,
this result was extended to $(0,+\infty)$ by the general relative
entropy argument in suitable weighted $L^2$ or measure spaces in
\cite{BDG18,GM19} and by means of Mellin transform in $L^1$ space by~\cite{vBALZ}.
An important tool when studying the asymptotic behaviour of
\eqref{eq:gf} is the Perron eigenvalue problem: finding a positive
eigenfunction for the operator which defines the equation, associated
to a simple, real eigenvalue which is also equal to the spectral
radius; see \cite{M06,DG09} for general existence results. In
\cite{BCG13}, the authors gave some estimates on the principal
eigenfunctions of the growth-fragmentation operator, giving their
first order behaviour close to $0$ and $+\infty$. Then they proved a
spectral gap result by means of entropy–entropy dissipation
inequalities, with tools similar to those of~\cite{CCM11,
CCM11-2}. They assumed that the growth and the fragmentation coefficients
behave asymptotically like power laws.
In this paper we use a probabilistic approach, namely
\emph{Harris's theorem}, for showing the spectral gap
property. We give a novel approach based on
estimating solutions to the PDE, and obtain results which
can be applied to general growth and fragmentation rates
including mitosis and uniform fragmentation cases. Detailed
hypotheses and results are given later in this
introduction. The method is also completely constructive and
gives explicit estimates. However, in some cases these
estimates depend on estimates on the first dual
eigenfunction, which may be not easy to obtain, but
constitute a separate question. After stating our results we
also give a brief comparison to other spectral gap results
in the literature.
Applications of this type of argument into biological and
kinetic models which can be described as \emph{Markov
processes} is becoming a subject of many works recently, and has been extended to models which are not
Markov processes but share similar properties. The
predecessor of Harris's theorem, namely \emph{Doeblin's
argument} is used in \cite{G18} for proving exponential
relaxation of solutions to the equilibrium for the
conservative renewal equation. In \cite{CY19} and \cite{DG17},
the authors study population models which describe the
dynamics of interacting neurons, structured by elapsed-time
in~\cite{CY19} or by voltage in~\cite{DG17}, and existence of
a spectral gap property in the \textit{`no-connectivity'}
setting is proved by Doeblin's Theorem. Moreover, there are
some recent works for the extension of this method into the
non-conservative setting. In \cite{BCG17}, the authors
consider several types of linear PDEs including a
growth-diffusion model with time-space varying environment and
some renewal equations with time-fluctuating ({\it e.g.}
periodic) coefficients. They provide quantitative estimates in
total variation distance for the associated non-conservative
and non-homogeneous semigroups by means of generalized
Doeblin's conditions. The full Harris's theorem is used in
\cite{B18,BGP19} for deriving exponential convergence to the
equilibrium in the conservative form of the
growth-fragmentation equation. In the present work, we are
interested in the long time behaviour of the more challenging
non-conservative case, namely when no quantity is preserved
along time. Our method is in the spirit of~\cite{BCGM19},
where a non-conservative version of Harris's Theorem is
proposed and applied to the growth-fragmentation equation with
constant growth rate $g$ and increasing total division rate
$B$, see also~\cite{CG20} for an application to a
mutation-selection model which is similar to growth-fragmentation.
The difference here is that we first build a solution to
the dual Perron eigenproblem by using Krein-Rutman's theorem
and a maximum principle. Then we take advantage of the dual
eigenfunction to perform a so-called (Doob) $h$-transform
\cite{D57}, similarly as in~\cite{BPR12,C17}, in order to
apply Harris's theorem. It allows us to consider very general
growth and fragmentation rates. The drawback is that the
spectral gap is given explicitly in terms of the dual
eigenfunction, for which quantitative estimates are in general
hard to obtain. However, for certain specific coefficients
that are worth of interest, the dual eigenfunction is known
explicitly. It is the case of the so-called
\emph{self-similar fragmentation equation}, widely studied in
the literature, for which we provide new quantitative
estimates on the spectral gap.
\
Let us now precise the functional analytic
setting of our work and what we mean by solutions to
Equation~\eqref{eq:gf}. We are interested in measure
solutions to this equation, which is a relevant notion in
population dynamics, see {\it e.g.}~\cite{CCC13,G18}. We
say that a family $(n(t,\cdot))_{t\geq0}$ of positive
measures on $(0,+\infty)$ is a solution to
Equation~\eqref{eq:gf} if for all $f\in C^1_c([0,+\infty))$
the function $t\mapsto\langle n(t,\cdot),f\rangle$ is
continuously differentiable and for all $t\geq0$
\begin{equation}\label{eq:gf_def}\frac{\,\mathrm{d}}{\,\mathrm{d} t}\langle n(t,\cdot),f\rangle=\langle n(t,\cdot),\mathcal L^*[f]\rangle,\end{equation}
where
\[\mathcal{L}^*[f](x) := g(x) \frac{\partial}{\partial x}f (x) +
\int_{0}^{x} \kappa (x,y)f(y) \,\mathrm{d} y -B(x) f(x)\]
is the dual operator of the growth-fragmentation operator
\[\mathcal{L} [n] (x) := - \frac{\partial}{\partial x} (g(x)n(x)) - B(x) n(x) + \int_{x}^{+\infty} \kappa (y,x) n(y) \,\mathrm{d} y, \]
which appears in Equation~\eqref{eq:gf}. We refer
to~\cite{BCGM19,GM19} for (the method of) proof that
Equation~\eqref{eq:gf} is well-posed in the set of positive
(or signed Radon) measures $\mu$ such that the weighted total
variation norm
\begin{equation}\label{eq:wtv}\left\| \mu \right\|_{V} = \int_{0}^{+\infty} V(x) |\mu|(\mathrm{d}x)\end{equation}
is finite, when $V(x)=x^k+x^K$ with $k\leq0$ and $K>1$.
\
The Perron eigenvalue problem consists of finding suitable
eigenelements $(\lambda, N, \partialhi )$ with $\lambda > 0$ and
$N, \partialhi \colon (0,+\infty) \to [0,+\infty)$, $N,\partialhi\not\equiv0$, satisfying the following:
\begin{equation} \label{eq:eigenfunction}
\mathcal{L}[N]=\lambda N,\quad(gN)(0) = 0,
\end{equation}
\begin{equation} \label{eq:dualeigenfunction}
\mathcal{L}^*[\partialhi]=\lambda\partialhi.
\end{equation}
If such a triple exists then $\lambda$ is actually the
dominant eigenvalue of Equation \eqref{eq:gf}, and the solution is expected to converge to a universal profile whose shape is given by the eigenfunction $N(x)$. The
convergence rate is given by the gap between the dominant eigenvalue
$\lambda >0$ and the rest of the spectrum. If we scale the
equation by defining $m(t,x) := n(t,x) e^{-\lambda t}$ we obtain:
\begin{equation}
\begin{aligned}
\label{eq:gfscaled}
\frac{\partial }{\partial t}m(t,x) + \frac{\partial }{\partial x} (g(x) m(t,x)) + (B(x) + \lambda) m(t,x)&= \int_{x}^{+ \infty} \kappa (y,x) m(t,y) \,\mathrm{d} y, &&t,x \geq 0,\\
m(t,0) &= 0, \qquad \qquad &&t > 0,\\
m(0,x) &= n_0(x), \qquad &&x > 0.
\end{aligned}
\end{equation}
We remark that $N(x)$ is the stationary solution of Equation \eqref{eq:gfscaled}
and $\partialhi(x)$ provides a conservation law for \eqref{eq:gfscaled} since
\[\frac{\,\mathrm{d}}{\,\mathrm{d} t}\int_{0}^{+\infty}\partialhi(x) m(t,x) \,\mathrm{d} x =0. \]
Since the existence and
uniqueness of the eigenelements provide useful information about the long
time behaviour of the growth-fragmentation equation \eqref{eq:gf}, it
has been a popular topic of research. We refer to \cite{DG09}
for a general recent result. From now on we consider
Equation \eqref{eq:gfscaled} instead of Equation \eqref{eq:gf} since it is more
convenient to study the long-time behaviour of the former and we
can easily recover the nature of the latter.
\
We now list all the assumptions we need throughout the paper.
\
As we will explain in Section~\ref{sec:harris},
Harris's method relies on a local Doeblin's minorisation
condition. The computations for checking this condition
strongly depend on the fragmentation kernel. In~\cite{CY19}
a global Doeblin condition is proved (for a similar
equation) for kernels $\kappa$ which satisfy, for some
$\epsilon,\eta,x_*>0$, the condition that
$\kappa(x,y)\geq\epsilon$ for all $x\in[0,\eta]$ and
$y\geq x_*$. Here we rather consider kernels that are of
self-similar form, which is commonly assumed in the
literature about spectral gaps for the growth-fragmentation
equation~\cite{BCG13,BCGM19,BG18,C21,CCM11,MS16} and
includes the classical kernels appearing in applications (in
particular equal or unequal mitosis and uniform fragment
distribution, see below).
\begin{hyp}\label{asmp:k1}
We assume that $\kappa(x,y)$, the fragmentation kernel, is of the
self-similar form such that
\begin{equation*} \label{hyp:kappa}
\kappa (x,y) = \frac{1}{x}p\Big(\frac{y}{x} \Big) B(x), \quad \text{ for } y > x > 0,
\end{equation*}
where $p$, the ``fragment distribution'', is a nonnegative measure on $(0,1]$ such that $z p(z)$ is a probability measure; that is,
\begin{equation*}
\int_{(0,1]} z p(z) \,\mathrm{d} z = 1.
\end{equation*}
\end{hyp}
\begin{rem}
It is useful to define $p_k$, for $k\in\mathbb{R}$, as the $k$-th moment of
$p$:
\[p_k := \int_{0}^{1} z^k p(z) \,\mathrm{d} z.\] With this notation,
Hypothesis~\ref{asmp:k1} ensures that $p_1 = 1$, so the
relation~\eqref{eq:kappaB} is guaranteed.
\end{rem}
Our next hypothesis states that we consider only the two
extreme cases of the fragment distribution, namely the very singular
equal mitosis case and the very smooth uniform fragment
distribution. One can find conditions for our methods to work in
intermediate cases, but we have preferred to give simple proofs that
show both singular and smooth cases can be treated:
\begin{hyp} \label{asmp:p}
We assume that the fragment distribution $p$ is either the one
corresponding to the equal mitosis:
\begin{equation}\label{eq:mitosis}
p(\,\mathrm{d} z)=2\,\mathrm{d}elta_{\frac12}(\,\mathrm{d} z)
\end{equation}
or the uniform fragment distribution:
\begin{equation}\label{eq:uniform}
p(\,\mathrm{d} z)=2\,\mathrm{d} z.
\end{equation}
\end{hyp}
\begin{rem}\label{rk:kernel}
We restrict to these two particular fragmentation kernels
because they naturally appear in the modelling of natural
phenomena. They are also good representatives of two
opposite mathematical situations: a very regular, strictly
positive case and a singular case which is positive only at
$z=1/2$. However, the results which we prove to be valid for
the uniform kernel can be readily extended to self-similar
kernels with $p$ satisfying
\begin{equation}\label{asmp:plowerbound}
p(z)\geq c>0 \qquad\text{for all
$z$ in some interval $(z_1, z_2) \subseteq
(0,1)$}
\end{equation}
and either
\[p_0<+\infty\qquad \text{if}\qquad\int_0^1\frac{1}{g(x)}\,\mathrm{d} x<+\infty,\]
or
\[\exists k<0 \ \text{ with }\ p_k< +\infty\qquad \text{if}\qquad\int_0^1\frac{1}{g(x)}\,\mathrm{d} x=+\infty.\]
In the particular case of the linear growth rate, $g(x) = x$, it is enough to assume that
\[\exists k<1 \ \text{ with }\ p_k< +\infty.\]
Notice that under condition~\eqref{asmp:plowerbound}, similarly as
for~\eqref{eq:mitosis} and~\eqref{eq:uniform}, the function
$k\mapsto p_k$ is strictly decreasing on the interval where it takes
finite values. (The only case in which $p_k$ is not strictly
decreasing is that of $p(z)$ concentrated at $z=1$, which actually
means no fragmentation at all is happening.)
In the case of constant growth rate, a more
general condition than~\eqref{asmp:plowerbound} is assumed
in~\cite{BCGM19} that covers the unequal mitosis kernels
$p(\,\mathrm{d} z)=\,\mathrm{d}elta_\alpha(\,\mathrm{d} z)+\,\mathrm{d}elta_{1-\alpha}(\,\mathrm{d} z)$ with
$0<\alpha<1$. In our proofs we can also consider this
generalisation with straightforward modifications when the
growth rate $g$ satisfies forthcoming
Hypothesis~\ref{asmp:gp}.
Regarding non self-similar kernels, there are results of
exponential convergence to the stationary distribution in
the literature, but only for bounded fragmentation rates;
see~\cite{LP09,PPS14} for PDE-based arguments and
\cite{B19,BW18, C21} for a probabilistic point of view. We
also point out that an optimal condition on the fragment
distribution is given in \cite{B19} for a spectral gap to
exist (for bounded fragmentation rates).
\end{rem}
Next we have a general assumption on the growth rate $g$ and the total
fragmentation rate~$B$:
\begin{hyp}\label{asmp:gB}
We assume that $g \colon (0,+\infty) \to (0,+\infty)$ is a locally
Lipschitz function such that $g(x) =\mathcal{O}(x)$ as
$x\to+\infty$ and $g(x)=\mathcal{O}(x^{-\xi})$ as $x\to0$ for some
$\xi\geq 0$. The total fragmentation rate
$B\colon [0,+\infty) \to [0,+\infty)$ is a continuous function and the
following holds
\begin{equation}
\label{eq:gB}
\int_0^1\frac{B(x)}{g(x)}\,\mathrm{d} x<+\infty,
\qquad\frac{xB(x)}{g(x)}\underset{x \to 0}{\longrightarrow } 0,
\qquad \frac{xB(x)}{g(x)} \underset{x \to +\infty}{\longrightarrow } + \infty.
\end{equation}
\end{hyp}
This assumption is very mild, and is always present in the previous works
to ensure the existence of an equilibrium and a dual eigenfunction. If
$B$ behaves like a power of exponent $b$ and $g$ behaves like a
power of exponent $a$, conditions \eqref{eq:gB} are equivalent to
the more familiar $b - a + 1 > 0$. The condition
$g = \mathcal{O}(x)$ for large $x$ ensures that the characteristics
corresponding to the growth part are defined for all times (i.e.,
clusters do not grow to infinite size in finite time). A stronger
assumption which is implicit in Hypothesis \ref{asmp:gB} is that $B$ is bounded
above on intervals of the form $[0,R]$ (since it is continuous there),
so we do not allow fragmentation rates $B$ which blow up at $0$. This
is used in the proof of Lemma \ref{lem:doeblinuniformfrag}.
A consequence of Hypothesis \ref{asmp:gB}, later we will need
the following:
\begin{equation}
\label{eq:tB}
\partialarbox{.8\linewidth}{There exists $t_B > 0$ such that $B$ is bounded below by a
positive quantity on any interval of the form $[t_B,\theta]$ with
$\theta > t_B$.}
\end{equation}
One sees this from the last limit in \eqref{eq:gB}, which implies that
for large enough $t_B$ we have
\begin{equation*}
B(x) \geq \frac{g(x)}{x}.
\end{equation*}
This easily implies \eqref{eq:tB}, since $g(x)/x$ is continuous and
strictly positive, so bounded below by some positive quantity on any
compact interval.
\
Our last assumption gives a stronger requirement on the growth rate
$g$ when the mitosis kernel is considered. In this case, some additional requirement
is necessary, since when the linear growth rate with equal
mitosis is considered, it is known that there is no spectral gap
\citep{BDG18,GM19,vBALZ}. We point out that the sharp assumption of ``there
exists a point $x > 0$ with $g(2x)\neq 2g(x)$'' is enough to show
convergence to the profile $N$, without a rate and only in particular cases,
as proved in \cite[Section 6.3.3]{RTK17}. Our assumption is stronger
than this, but also leads to a stronger result:
\begin{hyp}\label{asmp:gp}
When $p$ is the equal mitosis kernel~\eqref{eq:mitosis}, we assume
that the growth rate $g$ satisfies
\begin{gather*}
\omega g(x) < g(\omega x)
\qquad \text{for all $x > 0$ and $\omega \in (0,1)$,}
\\
H(z) := \int_0^z \frac{1}{g(x)} \,\mathrm{d} x < +\infty
\qquad \text{for all $z > 0$,}
\end{gather*}
and also $H^{-1}$ (the inverse of $H$) does not grow too fast, in
the sense that for all $r > 0$ we have
\begin{equation}
\label{eq:H-1-power}
\lim_{z \to +\infty} \frac{H^{-1}(z + r)}{H^{-1}(z)} = 1.
\end{equation}
\end{hyp}
If we consider just powers, examples of growth and fragmentation
rates which satisfy all of the above are
\begin{gather*}
B(x) = x^b,\qquad g(x) = x^a
\end{gather*}
with:
\begin{itemize}
\item any $b \geq 0$, $-\infty < a \leq 1$ in the uniform
fragment distribution case, excluding the case
$(b, a) = (0, 1)$.
\item any $b \geq 0$, $-\infty < a < 1$ in the mitosis case.
\end{itemize}
Under Hypothesis \ref{asmp:k1}, the rescaled
growth-fragmentation equation \eqref{eq:gfscaled} takes the
form:
\begin{align} \label{eq:gfscaledgeneral}
\begin{split}
\frac{\partial }{\partial t}m(t,x) + \frac{\partial }{\partial x} (g(x)m(t,x)) + c(x ) m(t,x) &= \mathcal{A}(t,x) , \hspace{5pt} t,x \geq 0,\\
m(t,0)&= 0, \qquad \qquad t > 0,\\
m(0,x) &= n_0(x), \qquad x > 0.
\end{split}
\end{align} where
\[c(x) := B(x) + \lambda \] and
\[\mathcal{A}(t,x) := \int_{x}^{+\infty} \frac{B(y)}{y} p
\left(\frac{x}{y}\right) m(t,y) \,\mathrm{d} y.
\]
According to Hypothesis \ref{asmp:p}, we only allow $p(z) = 2$ or
$p(z) = 2 \,\mathrm{d}elta_{\frac 1 2} (z)$.
\
Our main result is given by the following theorem:
\begin{thm}
\label{thm:main}
Assume that Hypotheses \ref{asmp:k1}, \ref{asmp:p},
\ref{asmp:gB}, and \ref{asmp:gp} are satisifed. Then there
exists a solution $(\lambda,N,\partialhi)$ to the Perron
eigenvalue
problem~\eqref{eq:eigenfunction}-\eqref{eq:dualeigenfunction}
with the normalization $\int N=\int\partialhi N=1$, $\lambda >0$,
and there exist $C,\rho > 0$ such that the solution
$n = n(t,x) \equiv n_t(x)$ to Equation \eqref{eq:gf} with initial
data given by a nonnegative finite measure $n_0$ with
$\|n_0\|_V < +\infty$ satisfies
\begin{equation}\label{eq:convergence}
\left\| e^{-\lambda t} n_t - \Big(\int\partialhi n_0\Big)N \right\|_{V}
\leq
C e^{-\rho t} \left\| n_0 - \Big(\int\partialhi n_0\Big)N\right\|_{V}
\qquad \text{for all } t \geq 0,
\end{equation}
where the weight $V$ of the total variation norm $\|\cdot\|_V$ defined in~\eqref{eq:wtv} is given by
\begin{equation*}
\begin{aligned}
V(x)&=1+x^K,\ 1+\xi <K\qquad
&&\text{if}\qquad\int_0^1\frac{1}{g(x)}\,\mathrm{d} x<+\infty,
\\
V(x)&=x^k+x^K,\ -1<k<0, 1+\xi <K\qquad
&&\text{if}\qquad\int_0^1\frac{1}{g(x)}\,\mathrm{d} x=+\infty.
\end{aligned}
\end{equation*}
In the specific case of $g(x)=x$, the weight $V(x)$ can be taken to be
\begin{equation*}
V(x) = x^k+x^K,\ -1<k<1<K.
\end{equation*}
\end{thm}
It is worth noticing that we obtain a spectral gap in spaces
with essentially optimal weights. Indeed it was proved
in~\cite{BG17} that there is no spectral gap in weighted $L^1$ space
with the dual eigenfunction $\partialhi$ when $B$ is bounded (see
the estimates in Theorem~\ref{thm:dualeigenfunction} below).
To the best of our knowledge, even the existence of the Perron eigenelements in
such generality is new (allowing a total fragmentation rate with any
growth at infinity, and with no required connectivity condition on its
support), and hence so is the existence of a spectral gap. However,
since our approach for the existence of the principal eigenfunction
$N$ is a byproduct of the contraction result provided by Harris's
theorem, this precludes the case of self-similar fragmentation with
equal mitosis and growth rate $g(x)=x$, for which convergence to a
universal profile does not hold, as we already mentioned. In that case
the existence of a Perron eigenfunction has to be tackled with other
spectral methods, as in~\cite{DG09,HW90,H85,M06}.
Note also that our result is valid for the measure solutions
of Equation~\eqref{eq:gf}, thus improving the result
in~\cite{DDGW18} where the general relative entropy method is
extended to measure solutions, providing convergence to
Malthusian behaviour but without a rate and under restrictive
assumptions on the coefficients.
Regarding the assumptions on the coefficients,
the only existing spectral gap results that consider general growth rates are the ones in~\cite{BCG13} and~\cite{BG18}.
In theses papers, the fragmentation rate is assumed to behave like a power law,
which we relax here by only requiring Hypothesis~\ref{asmp:gB} on $B$.
The other results in the literature focus on constant or linear growth rates and,
except in~\cite{BCGM19}, they also consider division rates that grow like power laws.
Finally, when explicit estimates are available for
$\partialhi$, our method allows us to derive quantitative estimates
on the spectral gap. It is the case for instance when $g(x)=x$
since then $\partialhi(x)=x$. An important particular case is to
consider additionally that $B(x)=x^b$ for some $b>0$. This
corresponds to the so-called self-similar fragmentation
equation, which appears as a rescaling of the pure fragmentation
equation, see {\it e.g.}~\cite{DE16,EMRR05}. To illustrate the
quantification of the spectral gap, we prove that for the
homogeneous fragmentation kernel and the choice $V(x)=1+x^2$,
the inequality~\eqref{eq:convergence} holds true for
\begin{equation}\label{rho_num}
\rho=\frac{-\log\Big(1-\,\mathrm{d}frac{\alpha}{2(1+2\alpha)}\Big)}{2\log2}
\end{equation}
where
\[\alpha=2\log2\,R^{b+3}e^{-2(4R)^b/b}\quad \text{with}\quad R=80\Big(\frac{15}{2}\Big)^{\frac1b+\frac b2}.\]
This seems to simplify the computable bound in~\cite[Proposition
6.7]{MS16}. It can also be compared to~\cite{GS14} where the
spectral gap in $L^2(x\,\mathrm{d} x)$ is proved to be at least $\frac12$,
but only for $b\geq2$. Similarly as
in~\cite{MS16}, our method also allows for deriving explicit
estimates for more general fragmentation kernels since it does
not change the function $\partialhi$.
Historically, the first explicit spectral gap was obtained for
constant growth and division rates and the equal mitosis
kernel in~\cite{PR05}, and then in~\cite{CMP10,BCGMZ13}. The
conditions were relaxed~\cite{LP09} and in particular general
fragmentation kernels were considered. Our method also allows
to get explicit spectral gap in the case of constant growth
rates, when the division rate is affine and the fragmentation
kernel is self-similar. Indeed if $g(x)=1$ and $B(x)=ax+b$,
then we easily check that $\partialhi(x)=\alpha x+1$ with
$\alpha=\frac{(p_0-1)b}{2}\big[\sqrt{1+\frac{4a}{(p_0-1)b^2}}-1\big]$,
where we recall that $p_0$ is the mass of the self-similar
kernel $p$, and the Perron eigenvalue is given by
$\lambda=\frac{(p_0-1)b}{2}\big[\sqrt{1+\frac{4a}{(p_0-1)b^2}}+1\big]$.
It is a particular case of the one treated in~\cite{BCGM19}
where $B$ is only assumed to be non-increasing, but it extends
the historical case of constant division rate.
\
This paper is organized as follows:
We devote Section \ref{sec:phi} to showing existence of the dual
eigenfuction and some bounds on it. In Section \ref{sec:harris}, we
recall some introductory concepts from the theory of Markov processes
and state Harris's Theorem \ref{thm:Harris} based on
the previous literature. Eventually for the proof of Theorem
\ref{thm:main} which is given by applying Harris's theorem, we need to
have Hypotheses \ref{hyp:Lyapunov} and \ref{hyp:localDoeblin}
satisfied for Equation \eqref{eq:gfscaledgeneral}. In Sections
\ref{sec:lyapunov} and \ref{sec:LowerBounds}, we prove that Hypotheses
\ref{hyp:Lyapunov} and \ref{hyp:localDoeblin} are verified for Equation \eqref{eq:gfscaledgeneral}, respectively.
Finally in Section~\ref{sec:proof-main} we give the proof of Theorem~\ref{thm:main} and the computations leading to~\eqref{rho_num}.
\section{Existence of the dual eigenfunction}
\label{sec:phi}
In this section, we prove the following theorem which implies
existence and boundedness of the dual Perron eigenfunction $\partialhi$, a
solution to the dual eigenproblem \eqref{eq:dualeigenfunction}:
\begin{thm}[Existence and bounds on the eigenfunction $\partialhi$]
\label{thm:dualeigenfunction}
We assume that Hypotheses \ref{asmp:k1} and \ref{asmp:gB} hold true
and assume also that $p_0<+\infty$. Then there exist a continuous
function $\partialhi$ which is a solution to Equation \eqref{eq:dualeigenfunction}
and $C>0$ such that for any $k>1$;
\[ 0 < \partialhi(x) \leq C(1+ x^k) \qquad \text{ for all } x>0. \]
Additionally we have $\partialhi(0)>0$ when $\int_0^1\frac1g<+\infty$ and
$\partialhi(0)=0$ when $\int_0^1\frac1g=+\infty$.
\end{thm}
Notice that our only assumption on $p$ is that $p_0 < +\infty$ (see
Remark~\ref{rk:kernel}). We prove this theorem at the end of the
section.
Following the idea introduced in~\cite{PR05} and also used
in~\cite{BCG13,DG09}, we begin with defining a truncated version of
the dual Perron eigenproblem \eqref{eq:dualeigenfunction} in an
interval $[0,R]$ for some $R>0$:
\begin{equation}
\begin{split}
\label{eq:trdualeigenfunction1}
-g(x) \frac{\partial}{\partial x } \partialhi_R(x) + (B(x) + \lambda_R ) \partialhi_R(x)
= \frac{B(x)}{x} \int_{0}^{R} p\left(\frac{y}{x}\right)\partialhi_R(y) \,\mathrm{d} y,
\\
\partialhi_R(x) >0\quad\text{for}\ x\in(0,R),
\qquad
\partialhi_R(R) = 0.
\end{split}
\end{equation}
\
Now we give some lemmas which will be used in the proof of Theorem
\ref{thm:dualeigenfunction}. The existence of a solution to Equation \eqref{eq:trdualeigenfunction1} is a consequence of the Krein-Rutman
theorem (see the appendices in~\cite{DG09} and~\cite{BCG13}). Moreover
in \cite{BCG13}, the authors proved that there exists $R_0>0$ large
enough such that for all $R>R_0$ we have $\lambda_R >0$. We thus have
the following result:
\begin{lem}\label{lem:existencetruncated}
For any $R>0$, the truncated dual Perron eigenproblem
\eqref{eq:trdualeigenfunction1} admits a solution
$(\lambda_R,\partialhi_R)$ with $\partialhi_R$ a Lipschitz function. Moreover
there exists $R_0>0$ such that $\lambda_R>0$ for all $R>R_0$.
\end{lem}
Before proving uniform estimates on $(\lambda_R,\partialhi_R)$, we first
recall a maximum principle. We begin by defining an operator
$\mathcal{L}^*_R$, acting on once-differentiable functions
$\varphi \in \mathcal{C}^{1}([0,R])$:
\begin{equation*} \label{eq:max}
\mathcal{L}^*_R \varphi(x):= - g(x)\varphi'(x) + \left( \lambda_R + B(x) \right)\varphi(x) - \frac{B(x)}{x}\int_{0}^{x} p \left(\frac{y}{x} \right) \varphi(y) \,\mathrm{d} y.
\end{equation*}
We have the following maximum principle, see \cite[Appendix C]{DG09} or \cite[Section 3.2]{BCG13}:
\begin{lem}
\label{lem:maxprinciple}
Suppose that $ \varphi(x) \geq 0$ for $x \in [0,A]$ for some $A \in (0,R)$ with $\varphi(R) \geq 0$ and $\mathcal{L}^*_R \varphi(x) >0$ on $[A,R]$. Then $\varphi (x) \geq 0 $ on $[0,R]$.
\end{lem}
This maximum principle allows us to get a uniform upper bound on
$\partialhi_R$, for a suitable normalization.
\begin{lem}\label{lem:bounonphi}
Consider that Hypotheses \ref{asmp:k1} and \ref{asmp:gB} are
satisfied, and that $p_0<+\infty$. For any $k>1$, there exists
$A>0$ such that if $\partialhi_R$ is normalized such that
\begin{equation} \label{norm}
\underset{x \in [0,A]}{ \sup } \, \, \partialhi_R (x)= 1,
\end{equation}
then for all $R>\max\{A,R_0\}$ and for all $x \in (0,R]$ we have
\[0 < \partialhi_R(x)\leq 1 +x^k.\]
Additionally, $\partialhi_R(0)>0$ when $\int_0^1\frac1g<+\infty$ and $\partialhi_R(0)=0$ when $\int_0^1\frac1g=+\infty$.
\end{lem}
\begin{proof}
For the bound from above we want to use the maximum principle in Lemma~\ref{lem:maxprinciple}. Therefore we want to prove that $\mathcal{L}^*_R \varphi(x) >0$ for $ x \in (A,R)$ with $A \in (0,R)$ as in Lemma~\ref{lem:maxprinciple}.
We take $\varphi(x) = 1 + x^k$ for some $k>1$. Then for $R \geq R_0$ we have
\begin{align*} \label{psi}
\begin{split}
\mathcal{L}^*_R \varphi(x) &= \lambda_R (1+x^k) - kg(x) x^{k-1} + B(x) \left( (1 + x^k) - \frac{1}{x}\int_{0}^{x} (1 + y^k) p \left(\frac{y}{x}\right) \,\mathrm{d} y \right)
\\ &= \lambda_R (1+x^k) - kg(x) x^{k-1} + B(x) (1 + x^k - p_0 - x^{k}p_k)
\\ &> x^{k-1} \left( -k g(x) - B(x) x^{1-k} + (1-p_k) B(x)x \right) := \varrho (x)
\end{split}
\end{align*}
since $p_0 = 2$ and $0< p_k < 1=p_1$ for $k>1$.
Moreover assuming \eqref{eq:gB} gives that behaviour of $\varrho$ will be dominated by the positive term $(1-p_k) B(x)x^k > 0$.
Therefore, we can find $A(k)>0$ such that for all $A(k) <x <R$, we have $\mathcal{L}^*_R \varphi(x) >0$.
We fix such a $A> 0$ and normalize $\partialhi_R$ by~\eqref{norm}.
Then by the maximum principle in Lemma~\ref{lem:maxprinciple} we obtain that $\partialhi_R(x)\leq1+x^k$.
The positivity or nullity of $\partialhi_R(0)$ is a
direct consequence of~\cite[Theorem 1.10]{BCG13}.
\end{proof}
\begin{lem}
\label{lem:boundonlambda}
Under Hypotheses \ref{asmp:k1} and \ref{asmp:gB} with $p_0<+\infty$,
there exists a constant $C>0$ such that $\lambda_R \leq C$ for all
$R >R_0$.
\end{lem}
\begin{proof}
Since $\partialhi_R$ is continuous and by \eqref{norm}, there exists
$x_R\in[0,A]$ such that $\partialhi_R(x_R)=1.$ Notice that necessarily
$x_R>0$ when $\int_0^1\frac1g=+\infty$, since $\partialhi_R(0)=0$ is the
case. Moreover, the equation $\mathcal{L}^*_R \partialhi_R=0$ ensures that
for all $x>0$ we have
\begin{multline*}
\left(\partialhi_R(x) \exp \left( -\int_{x_R}^x \frac{\lambda_R+B(s)}{g(s)} \,\mathrm{d} s\right) \right)'
\\ = - \frac{B(x)}{x g(x)} \exp \left( -\int_{x_R}^x \frac{\lambda_R+B(s)}{g(s)} \,\mathrm{d} s\right) \int_{0}^{x} p \left(\frac{y}{x}\right) \partialhi_R(y) \,\mathrm{d} y.
\end{multline*} By integrating this from $x_R$ to $x\geq x_R$;
\begin{align*}
\partialhi_R(x) &\exp \left( -\int_{x_R}^x \frac{\lambda_R+B(s)}{g(s)} \,\mathrm{d} s\right) - 1\\
&= - \int_{x_R}^{x} \frac{B(y)}{y g(y)} \exp \left( -\int_{x_R}^y \frac{\lambda_R+B(s)}{g(s)} \,\mathrm{d} s\right) \int_{0}^{y} p \left(\frac{z}{y}\right) \partialhi_R(z) \,\mathrm{d} z \,\mathrm{d} y.
\end{align*}
By using the upper bound on $\partialhi_R$ we obtain, for $R> R_0$,
\begin{align*}
\partialhi_R(x)&\exp \left({-\int_{x_R}^x \frac{\lambda_R+B(s)}{g(s)} \,\mathrm{d} s} \right)
\\ &\geq 1 - \int_{x_R}^{x} \frac{B(y)}{y g(y)} \exp \left( -\int_{x_R}^{y} \frac{\lambda_R+B(s)}{g(s)} \,\mathrm{d} s \right) \int_{0}^{y} p \left(\frac{z}{y}\right) (1+z^k) \,\mathrm{d} z \,\mathrm{d} y
\\ &\geq 1 - \int_{x_R}^{x} \frac{B(y)}{g(y)} \exp \left(-\int_{x_R}^{y} \frac{\lambda_R+B(s)}{g(s)} \,\mathrm{d} s \right) \left( p_0 + p_k y^k\right) \,\mathrm{d} y.
\end{align*}
Since $\partialhi_R(R)=0$ we deduce that for all $R>R_0$,
\begin{equation}\label{eq:lowerbound} \int_{x_R}^{R} \frac{B(y)}{g(y)} \exp \left(-\int_{x_R}^{y} \frac{\lambda_R+B(s)}{g(s)} \,\mathrm{d} s \right) \left( p_0 + p_k y^k\right) \,\mathrm{d} y\geq 1,\end{equation}
and this enforces $\lambda_R$ to be bounded from above.
Indeed, otherwise, there would exist a sequence $(R_n)_{n\geq0}$ and $x_\infty\in[0,A]$ such that
\[R_n\to+\infty,\qquad \lambda_{R_n}\to+\infty,\qquad x_{R_n}\to x_\infty.\]
But in that case, since
\begin{align*}
\mathds{1}_{[x_{R_n},R_n]}(y)\frac{B(y)}{g(y)}& \exp \left(-\int_{x_{R_n}}^{y} \frac{\lambda_{R_n}+B(s)}{g(s)} \,\mathrm{d} s \right) \left( p_0 + p_k y^k\right)\\
&\leq\frac{B(y)}{g(y)} \exp \left(-\int_{A}^{y} \frac{B(s)}{g(s)} \,\mathrm{d} s \right) \left( p_0 + p_k y^k\right)
\end{align*}
and the latter function is integrable on $[0,+ \infty)$ (carry out
an integration by parts and use~\eqref{eq:gB}), the dominated
convergence theorem ensures that
\[\int_{x_{R_n}}^{R_n} \frac{B(y)}{g(y)} \exp \left(-\int_{x_{R_n}}^{y} \frac{\lambda_{R_n}+B(s)}{g(s)} \,\mathrm{d} s \right) \left( p_0 + p_k y^k\right) \,\mathrm{d} y\to0,\]
which contradicts~\eqref{eq:lowerbound}.
\end{proof}
\begin{lem}
\label{lem:boundonphiR}
Under Hypotheses \ref{asmp:k1} and \ref{asmp:gB} with $p_0<+\infty$,
$|\partialhi_R'(x)|$ is uniformly bounded on compact intervals for all
$R> R_0$.
\end{lem}
\begin{proof}
By the equation $\mathcal{L}^*_R\partialhi_R(x)=0$ and bounds on $\partialhi_R(x)$
and $\lambda_R$ we obtain
\begin{align*}
| \partialhi_R'(x) | &= \frac{\lambda_R \partialhi_R(x)}{g(x)} + \frac{B(x)}{g(x)} \, \left | \partialhi_R(x) - \frac{1}{x} \int_{0}^{x} p \left(\frac{y}{x} \right) \partialhi_R(y) \,\mathrm{d} y \right |
\\ &\leq \frac{\lambda_R }{g(x)} (1+x^k)+ \frac{B(x)}{g(x)} \, \left| 1+ x^k- \frac{1}{x} (1+x^k)\int_{0}^{x} p \left(\frac{y}{x} \right) \,\mathrm{d} y\right |
\\ &\leq \frac{\lambda_R }{g(x)} (1+x^k)+ \frac{B(x)}{g(x)} \, \left| 1+ x^k- \frac{1}{x} (1+x^k)x p_0\right |
\\ &\leq \frac{\lambda_R }{g(x)} (1+x^k)+ \frac{B(x)}{g(x)} \, |1- p_0 |,
\end{align*}
which gives a bound on $\partialhi_R' (x)$ for all $R > R_0$, taking
into account that $\lambda_R$ is uniformly bounded for all
$R > R_0$ thanks to Lemma \ref{lem:boundonlambda}.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:dualeigenfunction}]
Lemmas \ref{lem:existencetruncated}, \ref{lem:bounonphi},
\ref{lem:boundonlambda} and \ref{lem:boundonphiR} give the proof.
Since there exists a solution to the truncated dual Perron
eigenproblem \eqref{eq:trdualeigenfunction1} for any $R >0$ by Lemma~\ref{lem:existencetruncated}, it only remains to prove that the
terms are bounded in order to pass to the limit as $R \to + \infty$.
We provide the bounds on $\partialhi_R$, $\lambda_R$ and $\partialhi_R'$ by Lemmas
\ref{lem:bounonphi}, \ref{lem:boundonlambda}, \ref{lem:boundonphiR}
respectively.
These bounds ensure that we can extract a subsequence of
$(\lambda_R)$ which converges to $\lambda>0$ and a subsequence of
$(\partialhi_R)$ which converges locally uniformly to a limit $\partialhi$ which
satisfies $0 < \partialhi(x)\leq 1+x^k.$ Clearly $(\lambda,\partialhi)$ is the
solution to the dual Perron eigenproblem
\eqref{eq:dualeigenfunction}, and $\partialhi\not\equiv0$ since
$\sup_{x \in [0,A]} \partialhi (x)= 1.$
Similarly, the proof of the positivity or nullity of $\partialhi(0)$ is a
direct consequence of~\cite[Theorem 1.10]{BCG13}.
\end{proof}
\section{Harris's Theorem}
\label{sec:harris}
In this section, we state Harris's theorem based on \cite{H16} and
\cite{HM11}. The original idea comes from the study of discrete-time
Markov processes and dates back to Doeblin and \citep{H56} where
conditions of existence and uniqueness of having an equilibrium (or an
\emph{invariant measure}) for a Markov process are investigated. It is
a probabilistic method which relies on both a minorisation property
and a drift condition (also called Foster-Lyapunov condition), which
we describe below.
We use Harris's theorem applied to continuous-time Markov processes in
order to show that solutions to rescaled growth-fragmentation
equation \eqref{eq:gfscaledgeneral}, under suitable assumptions,
converge towards a universal profile at an exponential rate.
\
We assume that $\Omega$ is a Polish space and $(\Omega, \mathbb{S}igma)$ is a
measurable space together with its Borel $\sigma$-algebra $\mathbb{S}igma$, so
that $\Omega $ endowed with any probability measure is a Lebesgue
space. Moreover we denote the space of finite measures on $\Omega$ by
$\mathcal{M}(\Omega)$ and the space of probability measures on
$\Omega$ by $\mathcal{P}(\Omega)$.
A discrete-time Markov process $x$ is defined through a
\emph{transition probability function}. A linear, measurable function
$S \colon \Omega \times \mathbb{S}igma \mapsto \mathcal{P}(\Omega)$ is a
transition probability function if $S(x, \cdot)$ is a probability
measure for every $x$ and $x \mapsto S(\cdot, A)$ is a measurable
function for every $A \in \mathbb{S}igma$. By using the transition probability
function we can define the associated Markov operator $\mathcal{S}$
acting on the space of signed measures on $\Omega$ and its adjoint
$\mathcal{S}^*$ acting on the space of bounded measurable functions
$\varphi : \Omega \mapsto [0, +\infty)$ in the following way:
\begin{equation*}
(\mathcal{S} \mu) (A)
= \int_{\Omega} S(x,A) \mu (\,\mathrm{d} x),
\qquad (\mathcal{S}^* \varphi) (x) = \int_{\Omega} \varphi(y) S(x, \,\mathrm{d} y).
\end{equation*}
On the other hand, a continuous-time Markov process is no longer
described by a single transition function, but by a family of
transition probability functions $S_t$ defined for each time $t
>0$, with the property that the associated operators $\mathcal{S}_t$
satisfy
\begin{itemize}
\item the semigroup property:
$\mathcal{S}_{s+t} = \mathcal{S}_s \mathcal{S}_t$,
\item and $\mathcal{S}_0$ is the identity, or equivalently,
$S_0(x, \cdot) = \,\mathrm{d}elta_x$ for all $x \in \Omega$.
\end{itemize}
We notice that $\mathcal{S}_{t}$ is \emph{linear, mass preserving} and
\emph{positivity preserving}. An \emph{invariant measure} of a
continuous-time Markov process $(\mathcal{S}_t)_{t \geq 0}$ is a
probability measure $\mu$ on $\Omega$ such that $\mathcal{S}_t \mu = \mu $
for every $t \geq 0$, and it is the main concept we need to
investigate when studying the asymptotic behaviour of a Markov process.
\
Let us state Doeblin's and Harris's theorems along with some
hypotheses. We always assume $(\mathcal{S}_t)_{t \geq 0}$ is a
continuous-time Markov semigroup. For their proofs we refer to
\cite{MT93} or \cite{HM11,H16}.
\begin{hyp}[Doeblin's condition]
\label{hyp:Doeblin}
There exists a time $t_0 >0$, a probability distribution $\nu$ and a
constant $\alpha \in (0,1)$ such that for any initial condition
$x_0$ in the domain we have:
\[ \mathcal{S}_{t_0} \,\mathrm{d}elta_{x_0} \geq \alpha \nu .\]
\end{hyp}
Using this we prove the following theorem:
\begin{thm}[Doeblin's Theorem] \label{thm:Doeblin}
If we have a Markov semigroup $(\mathcal{S}_t)_{t \geq 0}$
satisfying Doeblin's condition (Hypothesis \ref{hyp:Doeblin}) then
for any two finite measures $\mu_1$ and $\mu_2$ and any integer
$n \geq 0$ we have that
\begin{equation*}
\label{eqn:Doeblin1}
\left\| \mathcal{S}^{n}_{t_0} (\mu_1 - \mu_2)\right\|_{\mathrm{TV}} \leq (1-\alpha) ^n\left\| \mu_1 - \mu_2 \right\|_{\mathrm{TV}}.
\end{equation*}
As a consequence, the semigroup has a unique invariant probability
measure $\mu_*$, and for all probability measures $\mu$:
\begin{equation*}
\label{eqn:Doeblin2}
\left\| \mathcal{S}_{t} ( \mu - \mu_*) \right\|_{\mathrm{TV}} \leq C e^{-\rho t} \left\| \mu - \mu_*\right\|_{\mathrm{TV}}, \quad t \geq 0,
\end{equation*}
where
\[ C := \frac{1}{1-\alpha} >0,
\qquad \rho := \frac{-\log(1-\alpha)}{t_0} >0 .\]
\end{thm}
\
Harris's theorem is an extension of Doeblin's theorem to situations in
which one cannot prove a uniform minorisation condition as in
Hypothesis \ref{hyp:Doeblin}. This is often the case when the state
space is unbounded. Instead, we use Doeblin's condition only in a
given region, and then show that the stochastic process will return to
that region often enough. This is established by finding a so-called
Lyapunov, or Foster-Lyapunov function. Both conditions then imply the
existence of a spectral gap in a weighted total variation
norm. Precisely, we need the following two hypotheses to be satisfied:
\begin{hyp}[Foster-Lyapunov condition]
\label{hyp:Lyapunov}
There exist $\gamma \in (0,1)$, $K \geq 0$, some time $t_0 >0$
and a measurable function $V : [0, + \infty) \mapsto [1, + \infty)$
such that
\begin{equation} \label{eq:hyp2}
(\mathcal{S}_{t_0}^* V)(x) \leq \gamma V(x) + K,
\end{equation}
for all $x$.
\end{hyp}
\begin{rem}
When our continuous continuous-time Markov process is obtained by
solving a particular PDE we often denote
\begin{equation*}
(\mathcal{S}_t m_0)(x) \equiv m(t,x),
\end{equation*}
where $m$ is the solution to the PDE with initial condition
$m_0$. Then the previous condition is equivalent to
\begin{equation*} \label{hyp:conf1}
\int_\Omega m(t_0,x )V (x) \,\mathrm{d} x \leq \gamma \int_\Omega m_0(x) V(x) \,\mathrm{d} x + K ,
\end{equation*}
to be satisfied for all $m_0 \in \mathcal{P} (\Omega)$. One can
verify this by proving the inequality
\begin{equation*} \label{hyp:conf2}
\frac{\,\mathrm{d}}{\,\mathrm{d} t} \int_\Omega m(t,x) V (x) \,\mathrm{d} x
\leq -\lambda \int_{0}^{+\infty} m(t,x) V(x) \,\mathrm{d} x + D
\end{equation*}
for some positive constants $D$ and $\lambda$, which then implies
\eqref{eq:hyp2} with $\gamma = e^{-\lambda t_0}$ and
$K = D/\lambda$.
\end{rem}
\
The next hypothesis is a minorisation condition like Hypothesis
\ref{hyp:Doeblin}, but only on a sufficiently large region:
\begin{hyp}[Small set condition]
\label{hyp:localDoeblin}
There exist a probability measure $\nu$, a constant
$\alpha \in (0,1)$ and some time $t_0 >0$ such that
\[ \mathcal{S}_{t_0} \,\mathrm{d}elta_{x_0} \geq \alpha \nu, \] for all
$x_0 \in \mathcal{C}$, where
\[ \mathcal{C} = \left \{ x : V(x) \leq R \right \} \] for some
$R > 2K / (1-\gamma) $ where $K, \gamma$ are as in \emph{Hypothesis
\ref{hyp:Lyapunov}}.
\end{hyp}
Finally we state \emph{Harris's theorem} under these hypotheses:
\begin{thm}[Harris's Theorem] \label{thm:Harris}
If we have a Markov semigroup $(\mathcal{S}_t)_{t \geq 0}$ satisfying Hypotheses \ref{hyp:Lyapunov} and \ref{hyp:localDoeblin} then there exist $\beta >0$ and $\bar{\alpha} \in (0,1)$ such that
\begin{equation*}
\left\| \mathcal{S}_{t_0} \mu_1 - \mathcal{S}_{t_0} \mu_2\right\|_{V,\beta} \leq \bar{\alpha} \left\| \mu_1 - \mu_2 \right\|_{V,\beta}.
\end{equation*} for all nonnegative measure $\int \mu_1 = \int \mu_2$, where the norm $\| \cdot \|_{V,\beta}$ is defined by
\[\left\| \mu_1 - \mu_2 \right\|_{V,\beta} := \int (1+\beta V(x)) |\mu_1 - \mu_2 |\,\mathrm{d} x.\]
Moreover, the semigroup has a unique invariant probability measure $\mu_*$ and there exist $C>0$ and $\rho >0$ (depending only on $t_0, \alpha, \gamma, K, R$ and $\beta$) such that
\begin{equation*}
\left\|\mathcal{S}_t (\mu -\mu_*)\right\|_{V,\beta} \leq C e^{-\rho t} \left\|\mu -\mu_*\right\|_{V,\beta} \text{ for all } t \geq 0.
\end{equation*}
Explicitly if we set $ \gamma_0 \in \left[\gamma + 2K /R, 1\right)$ for any $\alpha_0 \in (0, \alpha)$ we can chose $ \beta = \alpha_0/K$ and
$\bar{ \alpha } = \max \left \{ 1-\alpha + \alpha_0, (2+ R\beta \gamma_0) / (2+ R\beta) \right \}$. Then we have $C = 1/ \bar{\alpha}$ and $\rho = -(\log \bar{\alpha}) / t_0$.
\end{thm}
Proofs of Theorems \ref{thm:Doeblin} and \ref{thm:Harris} can be found
for example in \cite{ H16, HM11, MT93, S13}.
\section{Foster-Lyapunov condition}
\label{sec:lyapunov}
In this section we prove that Hypothesis \ref{hyp:Lyapunov} is
verified for the semigroup generated by rescaled
growth-fragmentation equation \eqref{eq:gfscaledgeneral}, when we
consider the evolution of $f(t,x) := \partialhi(x) m(t,x)$.
We divide the proof of Hypothesis \ref{hyp:Lyapunov} into
three cases which require slightly different calculations.
\subsection{Linear growth rate}
\label{subseq:lingr}
First we treat the linear growth case $g(x)=x$ with a constant
fragmentation kernel. (As remarked before, we do not consider the
mitosis kernel when $g(x)=x$ since there is no spectral gap in that
case). In this case the Perron eigenvalue and the corresponding dual
eigenfunction are known ($\lambda =1$ and $\partialhi(x)=x$), and the rescaled
growth-fragmentation equation is given by
\begin{equation}
\label{g=xp=2}
\frac{\partial }{\partial t}m(t,x) + \frac{\partial }{\partial x} (x m(t,x))
= 2\int_{x}^{+\infty} \frac{B(y)}{y} m(t,y) \,\mathrm{d} y - (B(x) +1) m(t,x),
\end{equation}
coupled with the usual initial and boundary conditions.
\begin{lem}
\label{lyapunovBlin}
We consider Equation \eqref{g=xp=2} under Hypotheses \ref{asmp:k1}, \ref{asmp:gB} with a growth rate $g(x) = x$ and the constant fragment distribution $p(z) = 2$ for $z \in (0,1]$. Then the following holds true for any $K>1>k>-1$, for some $C_1, \bar{C}>0$, and any nonnegative measure solution $m = m(t,x)$:
\begin{align}
\label{eq:lyapunovBlin}
\int_{0}^{+\infty} V(x) f(t,x) \,\mathrm{d} x \leq e^{-C_1t} \int_{0}^{+\infty} V(x) f_0(x) \,\mathrm{d} x + \bar{C } \int_{0}^{+\infty} f_0(x) \,\mathrm{d} x
\end{align}
for all $t \geq 0$, where $f(t,x):=xm(t,x)$, $f_0(x) = x m_0(x)$, $\|f_0\|_V < +\infty$ and $V(x) = x^{k-1} + x^{K-1}$.
\end{lem}
\begin{proof}
Let $\varphi:\mathbb{R}\to[0,1]$ be a non-increasing $C^1$ function such that $\varphi(x)=1$ for $x\leq0$ and $\varphi(x)=0$ for $x\geq1$.
For $\ell>0$ we define $\varphi_\ell(x)=\varphi(x-\ell)$.
Starting from~\eqref{eq:gf_def} we have
\begin{align*}
\frac{\,\mathrm{d}}{\,\mathrm{d} t} &\int_{0}^{+\infty} \big(x^k + x^K\big) \varphi_\ell(x)m(t,x) \,\mathrm{d} x \\
&= \int_{0}^{+\infty}\left(\big(kx^{k-1}+Kx^{K-1}\big)\varphi_\ell(x)+\big(x^k+x^K\big)\varphi'_\ell(x)\right)xm(t,x)\,\mathrm{d} x\\
&\qquad+2\int_{0}^{+\infty}\frac{B(x)}{x}m(t,x)\int_0^x\big(y^k+y^K\big)\varphi_\ell(y)\,\mathrm{d} y\,\mathrm{d} x\\
&\qquad\qquad-\int_{0}^{+\infty}\big(1+B(x)\big)\big(x^k+x^K\big)\varphi_\ell(x)m(t,x)\,\mathrm{d} x.
\end{align*}
Since $\varphi_\ell$ is non-increasing we get
\begin{align*}
\frac{\,\mathrm{d}}{\,\mathrm{d} t} &\int_{0}^{+\infty} \big(x^k + x^K\big) \varphi_\ell(x)m(t,x) \,\mathrm{d} x \\
&\leq\int_{0}^{+\infty}\big(kx^{k-1}+Kx^{K-1}\big)\varphi_\ell(x)xm(t,x)\,\mathrm{d} x\\
&\qquad+2\int_{0}^{+\infty}B(x)\left(\frac{x^{k-1}}{k+1}+\frac{x^{K-1}}{K+1}\right)\varphi_\ell(x)xm(t,x)\,\mathrm{d} x\\
&\qquad\qquad-\int_{0}^{+\infty}\big(1+B(x)\big)\big(x^{k-1}+x^{K-1}\big)\varphi_\ell(x)xm(t,x)\,\mathrm{d} x\\
&\leq -\frac{1}{2}(1-k)\int_{0}^{+\infty}(x^{k-1} + x^{K-1})\varphi_\ell(x) xm(t,x) \,\mathrm{d} x \\
&\quad+ \int_{0}^{+\infty} \left( c_1 B(x) x^{K-1}+ c_2x^{K-1} + c_3 B(x) x^{k-1} + c_4x^{k-1} \right)\varphi_\ell(x) xm(t,x) \,\mathrm{d} x
\end{align*}
where
\[ -1< c_1 : = \frac{1-K}{1+K}< 0, \, \,\, c_2:=K - \frac{k+1}{2}>0, \, \,\, c_3:= \frac{1-k}{1+k} > 0, \,\,\, c_4:=\frac{k-1}{2} <0 . \]
We define
\begin{equation}\label{Phi_lin}
\Phi(x) : = c_1 B(x)x^{K-1} + c_2x^{K-1} + c_3 B(x)x^{k-1}+ c_4 x^{k-1}.
\end{equation}
Due to Hypothesis \ref{asmp:gB}, the total fragmentation rate
$B \colon [0, + \infty) \to [0, + \infty) $ satisfies
$B(x) \to 0$ as $x \to 0$ and
$B(x) \to +\infty$ as $x \to +\infty$. Hence in
the latter expression the behaviour as $x \to +\infty$ is dominated
by the first term; thus $\Phi(x)$ will approach $-\infty$.
Similarly when $x \to 0$, the last term will dominate the behaviour
of $\Phi$, which is negative as well. Since $B$ is continuous we can
always bound ${\sup}_{x \geq 0} \, \Phi(x) \leq C_2 $ with
some positive quantity $C_2 >0.$ Therefore by denoting
$f(t,x) = xm(t,x)$ and $f_0(x)=x m_0(x)$ we obtain, since $\varphi_\ell\leq1$ and $\int f(t,x)\,\mathrm{d} x=\int f_0(x)\,\mathrm{d} x$,
\begin{align*}
\frac{\,\mathrm{d}}{\,\mathrm{d} t} \int_{0}^{+\infty}& (x^{k-1}+x^{K-1}) \varphi_\ell(x)f(t,x) \,\mathrm{d} x \\
& \leq -C_1 \int_{0}^{+\infty} (x^{k-1}+x^{K-1})\varphi_\ell(x) f(t,x) \,\mathrm{d} x + C_2 \int_{0}^{+\infty} f_0(x) \,\mathrm{d} x,
\end{align*}
where $C_1 = (1-k) /2 >0$. Then Grönwall's lemma implies
\begin{align*}
\int_{0}^{+\infty} V(x)\varphi_\ell(x) f(t,x) \,\mathrm{d} x & \leq e^{-C_1t} \int_{0}^{+\infty} V(x)\varphi_\ell(x) f_0(x) \,\mathrm{d} x + \bar{C } \int_{0}^{+\infty} f_0(x) \,\mathrm{d} x
\end{align*}
with $\bar{C} = C_2/C_1$.
Due to the monotone convergence theorem we deduce \eqref{eq:lyapunovBlin} by letting $\ell$ go to $+\infty$.
\end{proof}
\subsection{Sublinear growth rate close to $0$}
\label{subseq:sublingr}
In this section we assume that $\int_0^1 \frac 1g<+\infty$, which we
sometimes refer to as the case of \emph{sublinear growth rate at
$x=0$}.
\begin{lem}
\label{lem:lyapunov1}
We consider Equation
\eqref{eq:gfscaledgeneral} under Hypotheses \ref{asmp:k1},
\ref{asmp:gB}, and $\int_0^1 \frac 1g<+\infty$. We take $K>1+\xi$.
Then the following holds true for $C_1 = \lambda$ (the first
eigenvalue), some $C_2 > 0$, and any nonnegative measure solution $m = m(t,x)$:
\begin{equation} \label{lya1}
\frac{\,\mathrm{d}}{\,\mathrm{d} t} \int_{0}^{+\infty} x^K m(t,x) \,\mathrm{d} x
\leq -C_1 \int_{0}^{+\infty} x^K m(t,x) \,\mathrm{d} x
+ C_2 \int_{0}^{+\infty} \partialhi(x)m(t,x) \,\mathrm{d} x,
\end{equation}
for all $ t \geq 0$.
\end{lem}
\begin{proof}
For the sake of conciseness and clarity, we skip the truncation procedure here. But the same method as for Lemma~\ref{lyapunovBlin} can be used to make the calculations rigorous by using the truncation function $\varphi_\ell$.
We have
\begin{align*}
\frac{\,\mathrm{d}}{\,\mathrm{d} t} &\int_{0}^{+\infty} x^K m(t,x) \,\mathrm{d} x
\\&= -\int_{0}^{+\infty} x^K\frac{\partial}{\partial x} \left(g(x) m(t,x) \right) \,\mathrm{d} x - \int_{0}^{+\infty} x^K (B(x) +\lambda) m(t,x) \,\mathrm{d} x
\\&+\int_{0}^{+\infty} x^K\int_{x}^{+\infty} \frac{B(y)}{y} p\left(\frac{x}{y}\right) m(t,y) \,\mathrm{d} y \,\mathrm{d} x
\\&= -\lambda \int_{0}^{+\infty} x^K m(t,x) \,\mathrm{d} x + \int_{0}^{+\infty} \left( (p_K-1)x^{K} B(x) + K x^{K -1}g(x) \right) m(t,x) \,\mathrm{d} x.
\end{align*}
We define \[\Phi(x):= (p_K-1)x^{K} B(x) + K x^{K -1}g(x)\] and
notice that ${\sup}_{x \geq 0} \, \Phi(x) \leq C_2\partialhi(x)$
for some $C_2 >0$ due to Hypothesis \ref{asmp:gB} concerning the
behaviour of $xB(x)/ g(x)$ as $x \to +\infty$ and
$x \to 0$, and the fact that $\partialhi(0)>0$ since
$\int_0^1 \frac{1}{g}<+\infty$ which is a result of Theorem
\ref{thm:dualeigenfunction}.
\end{proof}
We now give a translation of this lemma in terms of $f = \partialhi m$, since this is needed in order to apply Harris's theorem to the evolution of $f$:
\begin{cor}
\label{cor:lyapunov1/gint}
We consider Equation
\eqref{eq:gfscaledgeneral} under Hypotheses \ref{asmp:k1},
\ref{asmp:gB}, and $\int_0^1 \frac 1g<+\infty$. For
$V(x) = 1+ \frac{x^K}{\partialhi(x)}$ where $K > 1 + \xi$ and
$f(t,x):=\partialhi(x)m(t,x)$ with $ f_0(x)=\partialhi(x) m_0(x), \|f_0\|_V < +\infty$, there exist $C_1, \tilde{C} >0$ such that
for all $t\geq0$
\begin{equation}
\label{eq:lyapunov1}
\int_{0}^{+\infty} V(x) f(t,x) \,\mathrm{d} x \leq e^{-C_1t} \int_{0}^{+\infty} V(x) f_0(x) \,\mathrm{d} x + \tilde{C} \int_{0}^{+\infty} f_0(x) \,\mathrm{d} x.
\end{equation}
\end{cor}
\begin{proof} By adding $\partialhi(x)$ of both sides of \eqref{lya1} we obtain
\begin{align*}
\frac{\,\mathrm{d}}{\,\mathrm{d} t} &\int_{0}^{+\infty} x^K m(t,x) \,\mathrm{d} x = \frac{\,\mathrm{d}}{\,\mathrm{d} t} \int_{0}^{+\infty} (x^K+ \partialhi(x)) m(t,x) \,\mathrm{d} x \\
&\leq -C_1 \int_{0}^{+\infty} (x^K+\partialhi(x)) m(t,x) \,\mathrm{d} x
+ (C_1+C_2) \int_{0}^{+\infty} \partialhi(x) m(t,x) \,\mathrm{d} x.
\end{align*}
Therefore, we have for $f(t,x) = \partialhi(x) m(t,x)$;
\begin{multline*}
\frac{\,\mathrm{d}}{\,\mathrm{d} t} \int_{0}^{+\infty} \left(1+\frac{x^K}{\partialhi(x)}\right) f(t,x) \,\mathrm{d} x
\\ \leq -C_1 \int_{0}^{+\infty} \left(1+\frac{x^K}{\partialhi(x)}\right) f(t,x) \,\mathrm{d} x + (C_1 + C_2) \int_{0}^{+\infty} f_0(x) \,\mathrm{d} x,
\end{multline*} since $\int f(t,x)dx=\int f_0(x)dx$.
Grönwall's lemma then implies \eqref{eq:lyapunov1} with $\tilde{C} = 1+C_2/C_1$.
\end{proof}
\subsection{Superlinear growth rate close to $0$}
\label{subseq:superlingr}
Now we assume that $\int_0^1\frac 1g=+\infty$, which implies linear or superlinear behaviour for the growth rate $x$ close $0$. This, of course, includes the case $g(x)=x$ from Section \ref{subseq:lingr}, but the general result we obtain now is slightly more restrictive. In the case
of exact linear growth, Lemma \ref{lyapunovBlin} is slightly more
precise.
\begin{lem}
\label{lem:lyapunov2}
We consider Equation \eqref{eq:gfscaledgeneral} under Hypotheses
\ref{asmp:k1}, \ref{asmp:gB}, and $\int_0^1\frac 1g=+\infty$. We
take $k<0$ and $K>1 + \xi$. Then the following holds true for any
nonnegative measure solution $m=m(t,x)$:
\begin{multline*}
\frac{\,\mathrm{d}}{\,\mathrm{d} t} \int_{0}^{+\infty} (x^k+x^K) m(t,x) \,\mathrm{d} x
\leq -C_1 \int_{0}^{+\infty} (x^k+x^K) m(t,x) \,\mathrm{d} x
+ C_2 \int_{0}^{+\infty} \partialhi(x)m(t,x) \,\mathrm{d} x,
\end{multline*}
for all $t \geq 0$, where $C_1 = \lambda >0$ and $C_2>0$ is some
constant independent of the solution $m$.
\end{lem}
\begin{proof}
Here again we skip the truncation procedure and refer to the proof of Lemma~\ref{lyapunovBlin} for the method which allows making the calculations rigorous.
We have
\begin{align*}
&\frac{\,\mathrm{d}}{\,\mathrm{d} t} \int_{0}^{+\infty} (x^k+x^K) m(t,x) \,\mathrm{d} x
\\&= -\int_{0}^{+\infty} (x^k+x^K)\frac{\partial}{\partial x} \left(g(x) m(t,x) \right) \,\mathrm{d} x - \int_{0}^{+\infty} (x^k+x^K) (B(x) +\lambda) m(t,x) \,\mathrm{d} x
\\& \quad + \int_{0}^{+\infty} \frac{B(y)}{y} m(t,y) \int_0^1 (y^kz^k+y^Kz^K) p\left(z\right) y \,\mathrm{d} z\,\mathrm{d} y
\\&= -\lambda \int_{0}^{+\infty} (x^k+x^K) m(t,x) \,\mathrm{d} x
\\&\quad+ \int_{0}^{+\infty} \left( (p_k-1)x^kB(x) + (p_K-1)x^{K} B(x) + k x^{k-1}g(x) + K x^{K -1}g(x) \right) m(t,x) \,\mathrm{d} x
\end{align*}
Similarly to previous proofs, we define
\[\Phi(x):= (p_k-1)x^kB(x) + (p_K-1)x^{K} B(x) + k x^{k-1}g(x)
+ K x^{K -1}g(x)\] and notice that
${\sup}_{x > 0} \, \Phi(x) \leq C_2\partialhi(x)$ for some
$C_2 >0$ due to Hypothesis \ref{asmp:gB} concerning the
behaviour of $xB(x)/g(x)$ as $x \to +\infty$ and
$x \to 0$, and the fact that $p_K-1<0$ and $k<0$.
\end{proof}
\begin{cor}
\label{cor:lyapunov1/gnonint}
We consider Equation \eqref{eq:gfscaledgeneral} under Hypotheses
\ref{asmp:k1}, \ref{asmp:gB} and $\int_0^1\frac 1g=+\infty$. For $V(x) = \frac{x^k+x^K}{\partialhi(x)}$
with $k<0$, $K > 1 + \xi$, and $f(t,x):=\partialhi(x)m(t,x)$ with $f_0(x)= \partialhi (x)m_0(x)$, $\|f_0\|_V < +\infty$, there exist
$C_1, \tilde{C} >0$ such that for all $t\geq0$:
\begin{equation}
\label{eq:lyapunov2}
\int_{0}^{+\infty} V(x) f(t,x) \,\mathrm{d} x \leq e^{-C_1t} \int_{0}^{+\infty} V(x) f_0(x) \,\mathrm{d} x + \tilde{C} \int_{0}^{+\infty} f_0(x) \,\mathrm{d} x.
\end{equation}
\end{cor}
\begin{proof}
The inequality in Lemma \ref{lem:lyapunov2} yields, for
$f(t,x) := \partialhi(x) m(t,x)$,
\begin{multline*}
\frac{\,\mathrm{d}}{\,\mathrm{d} t} \int_{0}^{+\infty} \frac{x^k+x^K}{\partialhi(x)} f(t,x) \,\mathrm{d} x
\leq -C_1 \int_{0}^{+\infty} \frac{x^k+x^K}{\partialhi(x)} f(t,x) \,\mathrm{d} x + (C_1 + C_2) \int_{0}^{+\infty} f_0(x) \,\mathrm{d} x,
\end{multline*} since $\int f(t,x)dx=\int f_0(x)dx$.
Then Grönwall's
lemma implies \eqref{eq:lyapunov2} with
$\tilde{C} = 1+C_2/C_1$.
\end{proof}
\section{Minorisation condition}
\label{sec:LowerBounds}
In this section, we show that Hypothesis \ref{hyp:localDoeblin} is
verified for the semigroup generated by rescaled
growth-fragmentation equation \eqref{eq:gfscaledgeneral}. We give the
proof in two parts where the uniform fragment distribution and the
equal mitosis are considered separately.
We start by recalling some known results on the solution of the
transport part of Equation \eqref{eq:gfscaledgeneral}. Consider the equation
\begin{equation} \label{eq:growth1}
\begin{aligned}
\frac{\partial }{\partial t}m(t,x) + \frac{\partial }{\partial x} (g(x) m(t,x)) &= -c(x) m(t,x), \qquad &&t,x > 0,\\
m(t,0) &= 0, \qquad &&t > 0,\\
m(0,x) &= n_0(x), \qquad &&x > 0,
\end{aligned}
\end{equation} which is the same as Equation \eqref{eq:gfscaledgeneral} without the positive part of the fragmentation operator.
We remark that Hypothesis \ref{asmp:gB} ensures that the characteristic ordinary differential equation
\begin{align}
\begin{split}
\label{eq:char-ode}
\,\mathrm{d}dt X_t (x_0) &= g(X_t(x_0)), \\
X_0(x_0) &= x_0,
\end{split}
\end{align}
has a unique solution, defined for $t \in [0,+\infty)$, for any
initial condition $x_0 > 0$. In fact, it is defined in some interval
$(t_*(x_0), +\infty)$, for some $t_*(x_0) < 0$. The solution can be
explicitly given in terms of $H^{-1}$, where
\begin{equation*}
H(x) := \int_1^x \frac{1}{g(y)} \,\mathrm{d} y, \qquad x \geq 0.
\end{equation*}
We notice that $H$ is strictly increasing with
$H_0 := H(0) = \underset{x \to 0}{\lim} \, H(x) < 0$ and
$\underset{x \to +\infty}{\lim} \, H(x) = +\infty$ (since $g$ grows
sublinearly as $x \to +\infty$), so that it is invertible as a map
from $(0,+\infty)$ to $(H_0, +\infty)$. (We allow $H_0 = -\infty$ if
$1/g$ is not integrable close to $x=0$.) It can easily be checked that
\begin{equation*}
\label{eq:char-solution}
X_t(x_0) = H^{-1} (t + H(x_0))
\qquad \text{for $x_0 > 0$ and $t > H_0 - H(x_0)$},
\end{equation*}
so that that the maximal time interval where the solution of
\eqref{eq:char-ode} is defined is precisely as $(H_0-H(x_0),
+\infty)$. Since it will be convenient later, we define
\begin{equation*}
X_t(0) := \lim_{x_0 \to 0} X_t(x_0) =
\begin{cases}
0 &\qquad \text{if $H_0 = -\infty$,}
\\
H^{-1}(t + H_0) &\qquad \text{if $H_0 \in (-\infty, 0)$.}
\end{cases}
\end{equation*}
This reflects the fact that the characteristics take a very long time
to escape from $0$ when $1/g$ is not integrable close to $0$; while
they escape in finite time if $1/g$ is integrable close to $0$.
For each $t \geq 0$, we have thus defined the \emph{flow map}
$X_t \colon (0,+\infty) \to (X_t(0),+\infty)$, which is strictly
increasing. For negative times, we may consider
$X_{-t} \colon (X_t(0),+\infty) \to (0,+\infty)$ (where $t >
0$). Of course, $X_{-t} = (X_t)^{-1}$.
If $n_0$ is a nonnegative measure, it is well known that the unique
measure solution to Equation \eqref{eq:growth1} is given by
\begin{equation}
\label{eq:growth-sol-form1}
\begin{aligned}
m(t, x) &= X_t \# n_0(x)
\exp{\left( -\int_0^t c(X_{-\tau}(x)) \,\mathrm{d} \tau \right)},
\qquad &&t \geq 0,\ x > X_t(0),
\\
m(t,x) &= 0, \qquad &&t \geq 0,\ x \leq X_t(0),
\end{aligned}
\end{equation}
where we abuse notation by evaluating the measures $m(t,\cdot)$ and
$X_t \# n_0$ at a point $x > 0$. For a Borel measurable map
$X \colon (0,+\infty) \to (0,+\infty)$, the expression $X \# n_0$ denotes
the \emph{transport}, or \emph{push forward}, of the measure $n_0$ by
the map $X$, defined by duality through
\begin{equation*}
\int_0^\infty \varphi(x) X \# n_0(x) \,\mathrm{d} x
:= \int_0^\infty \varphi(X(y)) n_0(y) \,\mathrm{d} y
\end{equation*}
for all continuous, compactly supported
$\varphi \colon (0,+\infty) \to \mathbb{R}$. We use the notation $\mathcal{T}_t$
for this flow map:
\begin{equation} \label{T_t}
\mathcal{T}_t n_0(x) := X_t \# n_0(x), \qquad \text{for all } t \geq 0,
\end{equation}
so $\mathcal{T}_t$ is the semigroup associated to transport equation
\eqref{eq:growth1}.
If additionally $n_0$ is a function
and $X$ has a left inverse $X^{-1}\colon (a,b) \to (0,+\infty)$, one has
\begin{equation*}
X \# n_0(x) =
\begin{cases}
n_0(X^{-1}(x))\, \Big|\,\mathrm{d}dx (X^{-1})(x))\Big|
\quad & \text{if $x \in (a,b)$,}
\\
0
\quad & \text{otherwise.}
\end{cases}
\end{equation*}
Using this for the solution to \eqref{eq:growth1}, if $n_0$ is a
function we may write $m$ in the equivalent form
\begin{equation}
\label{eq:growth-sol-form2}
m(t, x) = n_0(X_{-t}(x)) \,\mathrm{d}dx X_{-t}(x)
\exp{\left( -\int_0^t c(X_{-\tau}(x)) \,\mathrm{d} \tau \right)}
\end{equation}
when $t \geq 0 $ and $x > X_t(0)$, and $m(t,x) = 0$
otherwise. Using that $Y_t(x) := \,\mathrm{d}dx X_t(x)$ satisfies
$\,\mathrm{d}dt Y_t(x) = g'(X_t(x)) Y_t(x)$, we note for later that
\begin{equation}
\label{eq:growth-sol-form3}
\,\mathrm{d}dx X_{-t}(x) = \exp{\left( -\int_0^t g'(X_{-\tau}(x))\,\mathrm{d} \tau \right)},
\qquad t \geq 0,\ x > X_t(0).
\end{equation}
\subsection{Uniform fragment distribution}
Let us consider the case of uniform fragment distribution $p(z)=2$,
corresponding to the fragmentation kernel of the form $\kappa (x,y) = \frac{2}{x} B(x) \mathbbm{1}_{\{0 \leq x \leq y\}}$. The growth-fragmentation equation in this case is widely studied and depending on some assumptions made on growth and total division rates,
existence (in some cases exact values) of eigenelements are known. The
rescaled growth-fragmentation equation in this case becomes
\begin{equation}
\label{eq:gfscaledp=2}
\begin{aligned}
\frac{\partial }{\partial t}m + \frac{\partial }{\partial x} (g(x) m) &= 2\int_{x}^{+\infty} \frac{B(y)}{y} m(t,y) \,\mathrm{d} y
- (B(x) + \lambda ) m, \quad &&t,x \geq 0,
\\ m(t,0) &= 0, \quad &&t > 0,
\\ m(0,x) &= n_0(x), \quad &&x > 0,
\end{aligned}
\end{equation}
where $m = m(t,x)$ whenever variables are not explicitly written. If
we consider a linear growth $g(x) = g_0x$ and a power like total
division $ B(x) = b_0x^{\gamma}$ with $\gamma >0$, and $g_0, b_0 >0$,
the Perron eigenvalue and the corresponding dual eigenfunction are
given by
\begin{align*}
\lambda = g_0 \quad \text{ and } \quad \partialhi(x) = \frac{x}{\int y N(y)}.
\end{align*}
In this case, eigenelements can be computed explicitly (see for
example \cite{DG09}):
\begin{equation*}
\lambda = g_0, \qquad
N(x) = \left(\frac{b_0}{\gamma g_0}\right)^{1/\gamma}
\frac{\gamma}{\Gamma \big(\frac{1}{\gamma}\big)}
\exp \left(- \frac{1}{\gamma}\frac{b_0}{g_0} x^\gamma\right),
\qquad
\partialhi(x) = \left(\frac{b_0}{\gamma g_0}\right)^{1/\gamma}
\frac{ \Gamma \big(\frac{1}{\gamma}\big)}
{\Gamma \big(\frac{2}{\gamma}\big)} x.
\end{equation*}
Moreover, in \cite{BCG13}, the authors give the asymptotics of the
profile $N$ and accurate bounds on the dual eigenfunction $\partialhi$ in a
more general form of the growth-fragmentation equation where growth
and total division rates behave like a power law for large and small
$x$.
\begin{lem}[Lower bound for the uniform fragment distribution]
\label{lem:doeblinuniformfrag}
Assume Hypotheses \ref{asmp:k1} and \ref{asmp:gB} hold true with a constant
distribution of fragments $p(z) = 2$ for $z \in (0,1]$. Let
$(\mathcal{S}_t)_{t \geq 0}$ be the linear semigroup associated to
Equation \eqref{eq:gfscaledp=2}. For all $0 < \eta < \theta$ given,
there exists $t_0 >0$ such that for all $t > t_0$ and
$x_0 \in (\eta,\theta]$ it holds that
\begin{equation*}
\mathcal{S}_{t} \,\mathrm{d}elta_{x_0}(x) \geq C(\eta, \theta, t)\qquad \text{for all $x \in I_t$},
\end{equation*}
where $I_t$ is an open interval which depends on $\eta$,
the time $t$, and for some quantity $C = C(\eta, \theta, t)$
depending only on $\eta$, $\theta$ and $t$. If in addition we assume
that
\begin{equation*}
\int_0^1 \frac{1}{g(x)} \,\mathrm{d} x < +\infty,
\end{equation*}
then the above result also holds when taking $\eta = 0$.
\end{lem}
\begin{proof}
Recall that $(\mathcal{T}_t)_{t \geq 0}$ the semigroup associated to the transport equation
\begin{equation*}
\frac{\partial }{\partial t}m(t,x) + \frac{\partial }{\partial x} (g(x)m(t,x)) + c(x) m(t,x) =0,
\end{equation*} where $c(x) = B(x) + \lambda$. By Duhamel's formula we have
\begin{equation*}
\label{duhamel}
\mathcal{S}_t n_0(x) = m(t,x) = \mathcal{T}_t n_0(x) + \int_{0}^{t} \mathcal{T}_{t-\tau} (\mathcal{A}(\tau, .))(x) \,\mathrm{d} \tau,
\end{equation*}
where
$\mathcal{A}(t,x) := 2 \int_{x}^{+\infty} \frac{B(y)}{y} m(t,y)
\,\mathrm{d} y$. Fix $0 \leq \eta < \theta$, and take any
$x_0 \in (\eta, \theta]$.
If $n_0 = \,\mathrm{d}elta_{x_0}$, a simple
bound gives
\begin{equation*}
\mathcal{S}_t \,\mathrm{d}elta_{x_0} \geq \mathcal{T}_t \,\mathrm{d}elta_{x_0}
= X_t \# \,\mathrm{d}elta_{x_0} \exp{\left( -\int_0^t c(X_{t-\tau}(x_0)) \,\mathrm{d} \tau \right)},
\end{equation*}
where we have used the expression of $\mathcal{T}_t$ given in
\eqref{eq:growth-sol-form1} and the fact that the support of
$X_t \# \,\mathrm{d}elta_{x_0}$ is the single point $\{X_t(x_0)\}$. By
Hypothesis \ref{asmp:gB} (in particular since $B$ is
continuous on $[0,X_t(\theta)]$), for some $C_1 = C_1(\theta, t)$
which is increasing in $t$, we have
\begin{equation*}
c(x) = B(x) + \lambda \leq C_1
\quad \text{for all $x \in (0, X_t(\theta)]$}.
\end{equation*}
We deduce that
\begin{equation} \label{secondbound}
\mathcal{S}_t \,\mathrm{d}elta_{x_0} \geq X_t \# \,\mathrm{d}elta_{x_0} e^{-C_1 t}
= \,\mathrm{d}elta_{X_t(x_0)} e^{-C_1 t}.
\end{equation}
Using this we obtain
\begin{equation*}
\mathcal{A}(t,x) \geq 2 e^{-C_1 t} \frac{B(X_t(x_0))}{X_t(x_0)}
\quad \text{for all $t > 0$ and $x < X_t(x_0)$.}
\end{equation*}
We use that there is some $x_B > 0$ for which $B$ is bounded
below by a positive quantity on any interval of the form
$[x_B, R]$. There is some $t_B > 0$ such that for $t > t_B$ we
have $X_t(x_0) > x_B$ for all $x_0 > \eta$ {(for this to hold,
notice we may take $\eta = 0$ in the case that
$\int_0^1 1/g < +\infty$, but we need
$\eta > 0$ otherwise)}. Hence, for some
$C_2 = C_2(\eta, \theta, t)$ which is decreasing in $t$, we obtain
\begin{equation*}
\mathcal{A}(t,x) \geq
C_2 e^{-C_1 t}
\qquad \text{for all $t > t_B$ and $x < X_t(x_0)$.}
\end{equation*}
Take now $t > t_B$, which will stay fixed until the end of the
proof. The previous bound shows that
\begin{equation*}
\mathcal{A}(\tau,x) \geq
C_2(\eta,\theta, \tau) e^{-C_1(\theta, \tau) \tau}
\geq
C_2(\eta,\theta, t) e^{-C_1(\theta, t) \tau}
=:
\tilde{C_2} e^{-\tilde{C_1} \tau}
\end{equation*}
{for all $t > t_B$,\ $t_B < \tau < t$ and all
$x < X_\tau(x_0)$.} As a consequence, using
\eqref{eq:growth-sol-form2} and \eqref{eq:growth-sol-form3},
\begin{equation*}
\mathcal{T}_{t-\tau} \mathcal{A}(\tau, x)
\geq \tilde{C_2} e^{-\tilde{C_1} \tau}
\exp{\left( -\int_0^{t-\tau} c(X_{-s}(x)) \,\mathrm{d} s \right)}
\exp{\left( -\int_0^{t-\tau} g'(X_{-s}(x))\,\mathrm{d} s \right)}
\end{equation*}
for all $t_B < \tau < t$ and $X_{t-\tau}(0) < x <
X_t(x_0)$. Since $X_{-s}(x) \leq X_t(x_0)$ in this range, we
can bound this by
\begin{equation*}
\mathcal{T}_{t-\tau} \mathcal{A}(\tau, x)
\geq \tilde{C_2} e^{-2 \tilde{C_1} t}
\exp{\left( -\int_0^{t-\tau} g'(X_{-s}(x))\,\mathrm{d} s \right)},
\end{equation*}
again for all $t_B < \tau < t$ and
$X_{t-\tau}(0) < x < X_t(x_0)$. In order to find a lower bound
for the last exponential we restrict to a smaller $x$
interval. Since the bound holds for all $x$ with
\begin{equation*}
X_{t-\tau}(0) < x < X_t(x_0),
\end{equation*}
it holds in particular for all $x$ with
\begin{equation}
\label{eq:It-interval}
X_{t-t_B}(\eta) < x < X_t(\eta).
\end{equation}
Again this is a point where we need to take $\eta > 0$ in the
case $\int_0^1 1/g = +\infty$, since otherwise this gives an
empty range of $x$. In the case $\int_0^1 1/g < +\infty$,
$\eta = 0$ is allowed. In this range, the quantity $X_{-s}(x)$
inside the exponential satisfies
\begin{equation*}
X_{\tau-t_B}(\eta) \leq X_{-s}(x) \leq X_t(\eta)
\end{equation*}
Choose $\,\mathrm{d}elta > 0$ such that $t_B + \,\mathrm{d}elta < t$. Then for all
$x$ satisfying \eqref{eq:It-interval} and all $\tau \in (t_B +
\,\mathrm{d}elta, t)$ we have
\begin{equation*}
X_{\,\mathrm{d}elta}(\eta) \leq X_{-s}(x) \leq X_t(\eta).
\end{equation*}
Using that $g'(X) \leq C_3$ for all
$X \in \left [ X_{\,\mathrm{d}elta}(\eta), X_t(\eta) \right ]$ we have
\begin{equation*}
\mathcal{T}_{t-\tau} \mathcal{A}(\tau, x)
\geq \tilde{C_2} e^{-\tilde{C_1} \tau} e^{-C_3 (t-\tau)}
\geq \tilde{C_2} e^{-C_4 t}
\end{equation*}
for all $x$ satisfying \eqref{eq:It-interval} and all
$\tau \in (t_B + \,\mathrm{d}elta, t)$. A final integration gives, for
$x$ in the same interval,
\begin{equation*}
\int_{0}^{t} \mathcal{T}_{t-\tau} (\mathcal{A}(\tau, \cdot))(x) \,\mathrm{d}\tau
\geq \tilde{C_2} e^{-C_4 t} \int_{t_B+\,\mathrm{d}elta}^{t} \,\mathrm{d}\tau
= \tilde{C_2} e^{-C_4 t} (t-t_B-\,\mathrm{d}elta).
\end{equation*}
Taking $t_0:=t_B$ gives the result.
\end{proof}
\subsection{Equal mitosis}
We now consider the fragment distribution
$p(z) = 2 \,\mathrm{d}elta_{\frac{1}{2}}(z)$ which describes the process of equal mitosis, in which cells of size $x$ split into two equal daughter
cells of size $x/2$. In Equation \eqref{eq:gfscaledgeneral}, we have then
$\mathcal{A}(t,x) := 4 B(2x) m(t,2x)$ and the rescaled
growth-fragmentation equation takes the form
\begin{equation}
\label{eq:gfscaledmitosis}
\begin{aligned}
\frac{\partial }{\partial t}m(t,x) + \frac{\partial }{\partial x} (g(x) m(t,x))
&= 4B(2x) m(t,2x) - (B(x) + \lambda ) m(t,x), &&t,x \geq 0,
\\ m(t,0) &= 0, \qquad \qquad &&t > 0,
\\ m(0,x) &= n_0(x), \qquad &&x > 0.
\end{aligned}
\end{equation}
The case where $g$ and $B$ are constant was the subject of numerous
works in the past, most notably~\cite{BCGMZ13,CMP10,HW89,MS16,PR05,vBALZ}.
For $g(x)=1$ and $B(x)=1$, eigenelements are
given by
\[\lambda = 1, \qquad
N(x) = \sum_{n=0}^{+\infty} (-1)^n \alpha_n e^{-2^{n+1}x},
\qquad
\partialhi(x) \equiv 1.
\]
with $\alpha_n=\frac{2}{2^n-1}\alpha_{n-1}$ and $\alpha_0>0$ a suitable normalization constant,
and the solution $m(t,x)$ converges exponentially fast to the universal profile $N(x)$, which vanishes as $x\to0$ and $x\to+\infty$.
However, when a linear growth rate $g(x)= x$ is considered
Equation \eqref{eq:gfscaledmitosis} exhibits oscillatory behaviour in
the long time. This is because instead of a dominant real eigenvalue,
there are nonzero imaginary eigenvalues, so that there exists a set of
dominant eigenvalues. This type of periodic long time behaviour was
first observed in \cite{DHT84} and then it was proved
in \cite{GN88} by using the theory of positive semigroups combined
with spectral analysis to obtain the convergence to a semigroup of
rotations. Since the method relies on some compactness arguments, the
authors considered the equation in a compact subset of $(0,+\infty)$.
Recently in~\cite{GM19}, the authors proved the oscillatory
behaviour in the framework of measure solutions for general division rates on
$(0,+\infty)$. The proof relies on a general relative
entropy argument combined with the use of Harris's theorem on discrete sub-problems.
It provides an explicit rate of convergence in weighted total variation norm.
Here we consider a sublinear growth rate and a
more general division rate than those so far considered in the
literature. We exclude of course the case $g(x)=x$, for which we know
the lower bound (and the exponential convergence) does not hold.
We first need a technical lemma which gives an expression
for the time integration of a measure moving in time:
\begin{lem}
\label{lem:time-integral-of-dirac}
Let $t > 0$ and $F \colon [0,t] \to \mathbb{R}$ an injective, differentiable
function. Then
\begin{equation*}
\int_{0}^{t} \,\mathrm{d}elta_{F(\tau)} (x) \,\mathrm{d} \tau =\left(F^{-1}\right)'(x) \mathbbm{1}_{\{F(0) \leq x \leq F(t)\}}.
\end{equation*}
\end{lem}
\begin{proof}
Integrating against a smooth test function $\varphi(x)$ we obtain
\begin{align*}
\begin{split}
\int_{0}^{+\infty} \varphi(x) \int_{0}^{t} \,\mathrm{d}elta_{F(\tau)} (x) \,\mathrm{d} \tau \,\mathrm{d} x
&= \int_{0}^{t} \int_{0}^{+\infty} \varphi(x) \,\mathrm{d}elta_{F(\tau)} (x) \,\mathrm{d} x \,\mathrm{d} \tau
\\&= \int_{0}^{t} \varphi (F(\tau)) \,\mathrm{d} \tau = \int_{F(0)}^{F(t)} \varphi(y) \left(F^{-1}\right)'(y) \,\mathrm{d} y.
\end{split}
\end{align*}
by using the change of variable $y = F(\tau)$.
\end{proof}
The following result will ensure a certain \emph{sublinearity} of the
characteristic flow $X_t$ which we will need later:
\begin{lem}
\label{lem:flow-sublinear}
Assume that the growth rate $g\colon (0,+\infty) \to (0,+\infty)$ is
locally Lipschitz and satisfies
\begin{equation*}
\omega g(x) < g(\omega x)
\qquad \text{for all $x > 0$ and $\omega \in (0,1)$.}
\end{equation*}
Then for any $t > 0$ the characteristic flow $X_t$ satisfies
\begin{equation*}
\omega X_t(x) < X_t (\omega x),
\qquad \text{for all $x > 0$ and $\omega \in (0,1)$.}
\end{equation*}
\end{lem}
\begin{proof}
Call $h_1(t) := \omega X_t(x)$ and $h_2(t) := X_t(\omega x)$. The
second one satisfies the ODE
\begin{equation*}
h_2'(t) = g(h_2(t)),
\end{equation*}
while the first one satisfies
\begin{equation*}
h_1'(t) = \omega g(X_t(x)) < g (\omega X_t(x)) = g(h_1(t)).
\end{equation*}
Since they have the same initial condition, this differential
inequality implies $h_1(t) < h_2(t)$ for all $t > 0$.
\end{proof}
Our main lower bound for the mitosis case is the following:
\begin{lem}[Lower bound for equal mitosis]
\label{lem:doeblinmitosis}
Assume Hypotheses \ref{asmp:k1}, \ref{asmp:gB}, \ref{asmp:gp} hold true
with the mitosis kernel $p(z) = 2\,\mathrm{d}elta_{\frac{1}{2}}(z).$ Let
$(\mathcal{S}_t)_{t \geq 0}$ be the semigroup associated to Equation
\eqref{eq:gfscaledmitosis}. For any $\theta > 0$ there exists
$t_0 = t_0(\theta) >0$ such that for all $t > t_0$ and
$x_0 \in (0, \theta]$ it holds that
\[
\mathcal{S}_{t} \,\mathrm{d}elta_{x_0}(x) \geq C (t_0, \theta)
\qquad \text{for all $x \in I_t$},
\]
where $I_t$ is an open interval which depends on time $t$, and for
some quantity $C=C(t, \theta)$ depending only on $t$ and $\theta$.
\end{lem}
\begin{proof}
Fix $\theta > 0$ and take any $x_0 \in (0,\theta]$. We follow the
same strategy as in the proof of Lemma \ref{lem:doeblinuniformfrag}. Here the only different part is $\mathcal{A}(t,x)$. We consider the
semigroup $(\mathcal{T}_t)_{t \geq 0}$ defined as in \eqref{T_t} and
$(\mathcal{S}_t)_{t \geq 0}$ defined as the semigroup associated to
\eqref{eq:gfscaledmitosis} with
$\mathcal{A}(t,x) = 4 B(2x) m(t,2x)$. Using \eqref{secondbound} we have
\[
\mathcal{T}_t \,\mathrm{d}elta_{x_0} (2x)
\geq
X_t \# \,\mathrm{d}elta_{x_0} (2x) e^{-C_1t}
=
\frac{1}{2}\,\mathrm{d}elta_{\frac{1}{2} X_t \left(x_0 \right)} (x)
e^{-C_1t},
\]
for $C_1 = C_1(\theta, t)$, increasing in $t$.
we obtain
\[
\mathcal{A} (t,x) \geq
2 e^{-C_1 t} B \left( X_t \left(x_0\right) \right)
\,\mathrm{d}elta_{\frac{1}{2} X_t \left(x_0 \right)} (x)
\quad \text{for all } t >0.
\]
We know that there exists some $x_B > 0$ for which $B$ is bounded below
by a positive quantity in each interval of the form $[x_B, R]$. Take
$t_B > 0$ such that for $t > t_B$ we have
$X_t \left(x_0 \right) > x_B$ for all $x_0 > 0$. Hence, for some
$C_2 = C_2(\theta, t) > 0$, decreasing in $t$,
\begin{equation*}
\mathcal{A}(t,x) \geq C_2 e^{-C_1 t}
\,\mathrm{d}elta_{\frac{1}{2} X_t \left(x_0 \right)} (x)
\qquad \text{for all $t > t_B$.}
\end{equation*}
Fix now any $t > t_B$. For $t_B < \tau < t$ we have
\begin{multline*}
\mathcal{A}(\tau,x)
\geq
C_2(\theta, \tau) e^{-C_1(\theta, \tau) \tau} \,\mathrm{d}elta_{\frac{1}{2} X_\tau \left(x_0 \right)} (x) \geq
C_2(\theta, t) e^{-C_1(\theta, t) t} \,\mathrm{d}elta_{\frac{1}{2} X_\tau \left(x_0 \right)} (x)
=:
\tilde{C_2} e^{-\tilde{C_1} t} \,\mathrm{d}elta_{\frac{1}{2} X_\tau \left(x_0 \right)} (x).
\end{multline*}
Hence using \eqref{eq:growth-sol-form1} we have
\begin{align*}
\begin{split}
\mathcal{T}_{t-\tau} \mathcal{A}(\tau, x)
&\geq
\tilde{C_2} e^{-\tilde{C_1} t}
\,\mathrm{d}elta_{ X_{t-\tau} \left( \frac{1}{2} X_\tau (x_0) \right)} (x)
\exp{\left( -\int_0^{t-\tau} c(X_{-s}(x))\,\mathrm{d} s \right)}
\\
&\geq \tilde{C_2} e^{-2\tilde{C_1} t} \,\mathrm{d}elta_{ X_{t-\tau} \left( \frac{1}{2} X_\tau (x_0) \right)} (x),
\end{split}
\end{align*}
for all $\tau \in (t_B, t)$. Define $F( \tau ) : = X_{t-\tau} \left(
\frac{1}{2} X_\tau (x_0) \right)$, and notice that it is a
strictly decreasing function, since Lemma \ref{lem:flow-sublinear}
ensures that for $\tau_1 < \tau_2$
\begin{equation*}
F(\tau_2)
= X_{t-{\tau_2}} \left( \frac{1}{2} X_{\tau_2} (x_0) \right)
< X_{t - \tau_2} X_{\tau_2 - \tau_1}
\left( \frac{1}{2} X_{\tau_1} (x_0) \right)
= F(\tau_1).
\end{equation*}
By Lemma \ref{lem:time-integral-of-dirac} we
obtain
\begin{align*}
\begin{split}
\int_{0}^{t} \mathcal{T}_{t-\tau} \mathcal{A}(\tau, x) \,\mathrm{d} \tau
&\geq
\int_{t_B}^{t} \mathcal{T}_{t-\tau} \mathcal{A}(\tau, x) \,\mathrm{d} \tau
\geq
\tilde{C_2} e^{-2 \tilde{C_1} t} \int_{t_B}^{t} \,\mathrm{d}elta_{ X_{t-\tau} \left(
\frac{1}{2} X_\tau (x_0) \right)} (x) \,\mathrm{d} \tau
\\
&\geq \tilde{C_2} e^{-2 \tilde{C_1} t} \left ( F(\tau )\right)' (x)\mathbbm{1}_{\mathcal{I}_{x_0}}
\end{split}
\end{align*}
where we define
\begin{equation*}
\label{interval}
\mathcal{I}_{x_0} :=
\left[
\frac12 X_t \left (x_0 \right ),
\
X_{t-t_B}\left(\frac{1}{2} X_{t_B}(x_0) \right)
\right ].
\end{equation*}
Again by Lemma \ref{lem:flow-sublinear} we see that this interval is
nonempty. Since we need a bound which is independent of $x_0$, we
consider the intersection of all these intervals as $x_0$ moves in
the interval $(0, \theta)$. That intersection is
\begin{equation*}
\mathcal{I}_{t} :=
\left[
\frac12 X_t \left (\theta \right ),
\
X_{t-t_B}\left(\frac{1}{2} X_{t_B}(0) \right)
\right ].
\end{equation*}
Condition \eqref{eq:H-1-power} shows that this interval is
nonempty for $t$ large enough, since
\begin{equation*}
\frac{X_t \left (\theta \right )}
{X_{t-t_B}\left(\frac{1}{2} X_{t_B}(0) \right)}
=
\frac{H^{-1} (t + \theta)}
{H^{-1}\left(t - t_B + \frac{1}{2} X_{t_B}(0) \right) }
\to 1
\qquad
\text{as $t \to +\infty$.}
\end{equation*}
This gives the result.
\end{proof}
\section{Proof of the main result}
\label{sec:proof-main}
We conclude by giving the proof of Theorem \ref{thm:main}. It is a
direct application of Harris's Theorem \ref{thm:Harris}. Hypotheses \ref{hyp:Lyapunov} and \ref{hyp:localDoeblin} need to be verified. We
already verified Hypothesis \ref{hyp:Lyapunov} (Lyapunov condition) in
Section \ref{sec:lyapunov} (see the corollary given in each case); in
fact, we have proved that given any $t_0 > 0$ we can satisfy
Hypothesis~\ref{hyp:Lyapunov} for any $t \geq t_0$, with constants
$\gamma$, $K$ \emph{which are independent of $t$} (since we can always
take $\gamma := e^{-C_1 t_0}$, $K := \tilde{C}$).
Regarding Hypothesis~\ref{hyp:localDoeblin}, the lower bounds we
obtained in Section \ref{sec:LowerBounds} are for $m(t,x)$ which is a
solution to Equation \eqref{eq:gfscaledgeneral}. However we need to satisfy the
minorisation condition for $f(t,x) = \partialhi(x) m(t,x)$ since the
equation on $f$ conserves mass; thus the associated semigroup is
Markovian, and we may apply Harris's theorem to it.
The equation satisfied by $f$ is
\begin{equation}
\begin{aligned}
\label{eq:gfconserv}
\frac{\partial }{\partial t}f(t,x) + \partialhi(x)\frac{\partial }{\partial x}
&\left( \frac{g(x)}{\partialhi(x)} f(t,x) \right)
+ (B(x) + \lambda) f(t,x)
\\
&= \partialhi(x) \int_{x}^{+ \infty} \frac{B(y)}{y} p\left(
\frac{x}{y}\right) f(t,y) \,\mathrm{d} y, \qquad &&t,x \geq 0,\\
m(t,0) &= 0, \qquad \qquad &&t > 0,\\
m(0,x) &= n_0(x), \qquad &&x > 0.
\end{aligned}
\end{equation}
We define $(\mathcal{F}_t)_{t \geq 0}$ as the semigroup associated to Equation
\eqref{eq:gfconserv}, or alternatively by the relationship
\begin{equation*}
\mathcal{F}_t (\partialhi n_0) := \partialhi \mathcal{S}_t n_0,
\end{equation*}
for any nonnegative measure $n_0$ such that $\partialhi n_0$ is a
finite measure on $(0,+\infty)$.
\begin{lem}[Minorisation condition for $f(t,x)$]
\label{lem:doeblingeneral}
We assume Hypotheses \ref{asmp:k1}, \ref{asmp:p}, \ref{asmp:gB} and
\ref{asmp:gp} hold true. Let $(\mathcal{F}_t)_{t \geq 0}$ be the
semigroup associated to Equation \eqref{eq:gfconserv}. For any
$0 > \eta >\theta$ there exists $t_0 = t_0(\eta, \theta) >0$ such that for
all $t > t_0$ and $x_0 \in [\eta, \theta]$ it holds that
\[ \mathcal{F}_{t} \,\mathrm{d}elta_{x_0}(x) \geq \breve{C} (\eta, \theta, t)
\qquad \text{for all $x \in I_t$},
\]
where $I_t$ is an open interval which depends on time $t$, and for
some quantity $\breve{C} = \breve{C}(\eta, \theta,t)$ depending only
on $\eta$, $\theta$ and $t$. If in addition we assume that
\begin{equation*}
\int_0^1 \frac{1}{g(x)} \,\mathrm{d} x < +\infty,
\end{equation*}
then the above result also holds when taking $\eta = 0$.
\end{lem}
\begin{proof}
Let $(\mathcal{S}_t)_{t \geq 0}$ and $(\mathcal{F}_t)_{t \geq 0}$ be
the semigroups associated to Equations \eqref{eq:gfscaledgeneral} and
\eqref{eq:gfconserv} respectively. Under the conditions of Lemma
\ref{lem:doeblinuniformfrag} we have a lower bound for
$\mathcal{S}_{t} \,\mathrm{d}elta_{x_0}(x) \geq C (\eta, \theta, t)$. It
immediately translates to a lower bound on $\mathcal{F}_t$ in all
cases:
\begin{enumerate}
\item If $\int_0^1 \frac{1}{g(x)} \,\mathrm{d} x = +\infty$, we know from
\cite{BCG13} that $\partialhi(x)$ is bounded
in each interval of the form $(0, \theta]$ {(since it is
continuous and tends to a positive constant at
$x=0$)}.
\item If $\int_0^1 \frac{1}{g(x)} \,\mathrm{d} x = +\infty$, then since
$\partialhi(x)$ is continuous there exist constants
$\hat{C}_1(\eta, \theta)$, $\hat{C}_2(\eta, \theta) > 0$ such that
$\hat{C}_1 \leq \partialhi(y) \leq \hat{C}_2$ for all
$y \in [\eta, \theta]$.
\end{enumerate}
On the other hand, under the conditions of Lemma
\ref{lem:doeblinmitosis} we know again that $\partialhi(x)$ is bounded
above and below by positive constants in each interval of the form
$(0, \theta]$.
Therefore we obtain for $x_0 \in [\eta,\theta]$:
\[ \mathcal{F}_t \,\mathrm{d}elta_{x_0}(x)
= \frac{\partialhi(x)}{\partialhi(x_0)} \mathcal{S}_t \,\mathrm{d}elta_{x_0} (x)
\geq
\frac{\hat{C}_1(\eta, \theta)}{\hat{C}_2(\eta, \theta)}
C(\eta, \theta, t) := \breve{C}(\eta, \theta, t),
\]
allowing $\eta=0$ if $\int_0^1 1/g < +\infty$.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:main}]
As remarked above, the semigroup $(\mathcal{F}_t)_{t \geq 0}$
satisfies the Lyapunov condition in Hypothesis~\ref{hyp:Lyapunov} in
all cases, for $t \geq 1$, with a weight $V$ and constants $\gamma$,
$K$ which are independent of $t$. In order to satisfy
Hypothesis~\ref{hyp:localDoeblin} it is enough then to find any time
$t \geq 1$ for which we have a uniform lower bound whenever the initial
condition is a delta function supported on a region of the form
\begin{equation*}
\mathcal{C} := \left \{x > 0 \mid V(x) \leq R \right \}
\end{equation*}
for some $R > 2K / (1- \gamma)$. Lemma \ref{lem:doeblingeneral}
gives this in all cases. Notice that in the cases in which the lower
bound is only available for $x_0 \in [\eta, \theta]$ with $\eta >
0$, the function $V$ we give in Section \ref{sec:lyapunov} is
unbounded at $x=0$, and thus the region $\mathcal{C}$ is contained
in an interval of that form.
\end{proof}
\partialaragraph{\bf Explicit calculations for the self-similar fragmentation case.}
We recall that the so-called self-similar fragmentation equation corresponds to a linear growth rate \mbox{$g(x)=x$}, a monomial total fragmentation rate $B(x)=x^b$, $b>0$, and a self-similar kernel (here we take the homogeneous self-similar kernel $p(z)\equiv2$).
In that case, all the constants appearing in Harris's theorem can be quantified.
This is due to the explicit expression $\partialhi(x)=x$ of the dual eigenfunction when $g(x)=x$.
For the computations we choose for instance the parameters $k=0$ and $K=2$, which correspond to the Lyapunov function $V(x)=(x^k+x^K)/\partialhi(x)=1/x+x$.
We start with Hypothesis~\ref{hyp:Lyapunov}.
Using that $B(x)=x^b$ we can make the proof of Lemma~\ref{lyapunovBlin} more quantitative.
Indeed the function $\Phi$ defined in~\eqref{Phi_lin} reads in the present case
\[\Phi(x)=-\frac13x^{b+1}+\frac32x+x^{b-1}-\frac12x^{-1}.\]
Treating separately the cases $x\leq1$, $x\geq1$, and $b\leq2$, $b\geq2$, we can check that
\[\Phi(x)\leq-\frac13x^{b+1}+\frac32x+x^{b-1}\leq5\Big(\frac{15}{2}\Big)^{\frac1b+\frac b2}\]
for all $x>0$.
So Hypothesis~\ref{hyp:Lyapunov} is verified for any $t_0>0$ with the constants
\[\gamma=e^{-\frac{t_0}{2}}\qquad\text{and}\qquad K=10\Big(\frac{15}{2}\Big)^{\frac1b+\frac b2}.\]
We now turn to Hypothesis~\ref{hyp:localDoeblin}.
We choose
\[R=\frac{4K}{1-\gamma}\]
and we notice that since $V(x)=1/x+x$
\[\mathcal C=\left \{ x:V(x)\leq R \right \}\subset[1/R,R].\]
For $\partialhi(x)=x$ and $p(z)\equiv2$, Equation~\eqref{eq:gfconserv} reads
\[\frac{\partial }{\partial t}f(t,x) + \frac{\partial }{\partial x}\left( xf(t,x) \right) + B(x) f(t,x) = 2 \int_{x}^{+ \infty} B(y)f(t,y)\frac{x}{y} \,\mathrm{d} y\]
and we can prove directly on this equation, proceeding similarly as in Lemma~\ref{lem:doeblinuniformfrag}, that for any $t_0>0$ and all $x_0\in[1/R,R]$
\[\mathcal F_{t_0}\,\mathrm{d}elta_{x_0}\geq \alpha\nu\]
with
\[\nu(\,\mathrm{d} y)=\frac{2e^{-2t_0}}{R}\mathbbm{1}_{[0,Re^{t_0}]}(y)y\,\mathrm{d} y\qquad\text{and}\qquad \alpha=R^{b+3}t_0\exp\Big(-2R^\gamma \frac{e^{b t_0}}{b}\Big).\]
We are now in position to apply Harris's theorem.
Choosing in Theorem~\ref{thm:Harris}
\[\alpha_0=\frac\alpha2\qquad\text{and}\qquad\gamma_0=\gamma+\frac{2K}R\]
we obtain
\[\bar\alpha=\max\left\{1-\frac\alpha2,\frac{1-\gamma+\frac{1+\gamma}{2}\alpha}{1-\gamma+\alpha}\right\}.\]
Choosing $t_0=2\log2$ we get
\[\gamma=\frac12,\quad R=80\Big(\frac{15}{2}\Big)^{\frac1b+\frac b2},\quad\alpha=2\log2 R^{b+3}e^{-2(4R)^b/b}\]
and
\[\bar\alpha=\max\left\{1-\frac\alpha2,1-\frac{\alpha}{2(1+2\alpha)}\right\}=1-\frac{\alpha}{2(1+2\alpha)}.\]
This proves that we can choose $\rho$ as in~\eqref{rho_num}.
\end{document} |
\begin{document}
\title{Quantum heat engine with a quadratically coupled optomechanical system}
\author{M. Tahir Naseem}
\affiliation{Department of Physics, Ko\c{c} University, 34450 Sariyer, Istanbul TURKEY}
\author{\"{O}zg\"{u}r E. M\"{u}stecapl{\i}o\u{g}lu}
\email{[email protected]}
\affiliation{Department of Physics, Ko\c{c} University, 34450 Sariyer, Istanbul TURKEY}
\date{\today}
\begin{abstract}
We propose a quantum heat engine based on a quadratically coupled optomechanical system. The optical component of the system is driven periodically with an incoherent thermal drive, which induces periodic oscillations in the mechanical component.
Under the action of the quadratic optomechanical interaction, the mechanical mode evolves from an initial thermal state to a thermal-squeezed
steady state, as verified by calculating the Wigner functions. The dynamics of the system is identified as an
effective four-stroke Otto cycle. We investigated the performance of the engine by evaluating the dissipated power, the maximum power under a load, and the maximum extractable work. It is found that the engine operating with quadratic optomechanics is more powerful than the one
operating with linear optomechanics. The effect is explained by the presence of squeezing in the quantum state of the mechanical mode.
\end{abstract}
\maketitle
\section{I.\, Introduction}\label{sec:intro}
Quantum heat engine (QHE) is a term typically used
to describe a machine that can harness work out of thermal resources using a quantum working
substance~\cite{HE-def,Kos-HE-Rev}. QHEs has attracted much attention in the last few decades~\cite{HE1, HE2, HE3, HE4, HE5, HE6, HE7, HE8, HE9, HE10, HE11, HE12, HE13, HE14, HE15, HE16, HE17, HE18, HE19, HE20, HE21, HE22, HE23, HE24, HE25, HE26, HE27, HE28,OM-HE1, OM-HE2, OM-HE3, OM-HE4, OM-HE5} and some experimental demonstrations have been reported~\cite{Eilon2019, Ronzani2018, Maslennikov2019}. Recently, another class of quantum machines which can convert
useful energy out of non-equilibrium reservoirs, in particular squeezed thermal noise, has been
theoretically proposed~\cite{PhysRevE.93.052120} and experimentally observed~\cite{Togan2017}. While advantages of
quantum reservoirs on the engine performance are shown to be significant~\cite{Corr1, Corr2, Corr3, Corr4, Corr5, Corr6, Umit-OM},
it is not clear if additional
complexity and extra energy cost of
preparing such quantum correlations would reduce their supremacy over classical resources or not. Following a more orthodox approach here,
we ask if we can see benefits of profound quantum states, in particular squeezing, of a working system to harvest work out of classical heat baths.
For that aim, we consider a system known for its capability of generating squeezing, namely quadratically coupled optomechanical
system as our working substance~\cite{gen-quad}.
Optomechanical working substance is a natural proposal to investigate quantum thermodynamics of QHEs. It has both the
steam-like and piston-like components which are the optical and the mechanical subsystems, respectively. On the other hand, all the existing
proposals of optomechanical QHEs are limited to linear coupling~\cite{OM-HE1, OM-HE2, OM-HE3, OM-HE4, OM-HE5,Umit-OM}. Linear coupling
describes the radiation pressure induced displacement of the mechanical mode, which in terms of quantum states yield a coherent thermal
state. This state is in fact a close analog of a classical state and yields only a marginal difference if the engine harvest the classical resources
stochastically~\cite{Umit-OM}. It has been recently argued that the mechanical mode can be externally pumped with a squeezed drive.
In Ref.~\cite{Ghosh12156}, a quantum heat engine is proposed based on a two-level system (TLS) as the working fluid that simultaneously interacts with the cold and hot baths, in addition, it is also coupled with a cavity that plays the role of the piston. It is reported that, when the quantized piston mode is subject to a non-linear (quadratic) drive, it evolves into a thermal-squeezed state. The work capacity of the piston is considerably enhanced for the quadratic drive as compared to the case when the piston mode is subject to linear external drive.
Here, we
consider quadratic optomechanical coupling which can generate squeezing without the additional complexity or energy cost of any external squeezed drive. The key difference
between the model in Ref.~\cite{Ghosh12156} and our heat engine is that in
their model the engine takes energy from an external squeezed
drive, that generates quadratic interaction. Here, squeezing is induced by quadratic optomechanical interaction and engine takes energy from external incoherent thermal drives.
There is another scheme for a QHE based on an optomechanical system~\cite{OM-HE4}. For the work extraction from the system, the initial state of the mechanical mode needs to be in a so-called thermodynamically non-passive state~\cite{nonpass1}. A non-passive state is a one from which work can be extracted unitarily until it becomes passive~\cite{pusz1978, Lenard1978, Allahverdyan2004, Palma2016, Brown2016, Skrzypczyk2015, Binder2015, Hovhannisyan2013, Klimovsky2013, Alicki2013, Felix2015, Niedenzu2016, Vinjanampathy2016, Goold2016, Niedenzu2018}. Non-passive states can also be regarded as quantum batteries~\cite{Alicki2013, Binder2015} or quantum flywheels~\cite{Levy2016}. The maximum amount of work that can be extracted from a non-passive state by means of a cyclic
unitary transformation is called ergotropy~\cite{Allahverdyan2004}. On contrary, passive states have zero ergotropy. In our model, the initial state of the mechanical mode is completely passive. By computing the Wigner functions, we verify that the optical mode remains in a thermal state, and the mechanical mode evolves to a thermal-squeezed state, which has finite ergotropy. In order to compare the thermodynamic behavior of the proposed engine with the classical engine cycles, we plot the mean energy versus the frequency of the optical mode modified by the mechanical feedback. This leads to the identification of an effective Otto engine cycle.
In addition, we compare the linearly and quadratically coupled optomechanical QHEs~\cite{Umit-OM} using three figures of merit,
namely the maximum work capacity, the dissipated power, and the power under load.
Rest of the paper is organized as follows. In Sec.~\ref{sec:quantumModel}, we give the quantum model based on the quadratic optomechanical coupling between the optical and the mechanical resonators. In Sec.~\ref{sec:quantumResults}, we calculate the Wigner functions for piston mode and identify the effective Otto cycle in our engine. In Sec.~\ref{sec:performance}, we calculate and compare the figure of merits of the output powers for the linear and the quadratic optomechanical coupling. Finally, we conclude our discussion in Sec.~\ref{sec:conclusions}.
\section{II.\, The Model}
\label{sec:quantumModel}
The schematic diagram of our heat engine is shown in Fig.~\ref{fig:fig1}. It is based on an optical cavity that contains a movable membrane, and they are coupled via the quadratic optomechanical coupling. The strength of this single-photon coupling is denoted by $g$, and the optical (mechanical) resonator has a frequency $\omega_\text{a}$ ($\omega_\text{b}$), such that $\omega_\text{a}\gg\omega_\text{b}$. We assume that the quadratic optomechanical coupling has negative values throughout this paper~\cite{Seok_2013, Seok_2014}. In our model the cavity-mode is working fluid and mechanical resonator plays the role of piston. The coupling between the optical and mechanical modes can be expressed as~\cite{NoriQuad}
\begin{equation}\label{eq:model}
\hat{H}_{\text{sys}}=\omega_\text{a}\hat{a}^{\dagger}\hat{a}+\omega_{\text b}\hat{b}^{\dagger}b+g\hat{a}^{\dagger}\hat{a}(\hat{b}+\hat{b}^{\dagger})^2,
\end{equation}
\begin{figure}
\caption{(Colour online) Schematic diagram of the quantum heat engine composed of an optical cavity and a mechanical membrane that interacts via quadratic optomechanical coupling $g$. The optical resonator has the high frequency (HF) $\omega_{\text{a}
\label{fig:fig1}
\end{figure}
\begin{figure*}
\caption{(Colour online) (a)-(d) The Wigner functions in the $p,q$ field quadrature phase space of the piston mode plotted at (a) $\omega_{\text{b}
\label{fig:fig2a}
\label{fig:fig2b}
\label{fig:fig2c}
\label{fig:fig2d}
\label{fig:fig2}
\end{figure*}
here and in the rest of the paper we take $\hbar=1$, moreover, $\hat{a}$~($\hat{a}^{\dagger}$) and $\hat{b}$~($\hat{b}^{\dagger}$) are the annihilation (creation) operators for the optical and mechanical
modes, respectively. We assume that the optical and mechanical resonators are coupled to two independent thermal baths at temperature $T_{\text{a}}$ and $T_{\text{b}}$, respectively. In addition, the optical resonator is driven by another a quasi-thermal periodic drive with power spectral density $S_\text{h}$. The temperatures of the thermal baths and periodic drive can be determined by,
\begin{eqnarray}
\bar{n}_\text{a}&=&\frac{1}{\exp{(\omega_\text{a}/T_\text{a})}-1},\\
\bar{n}_\text{b}&=&\frac{1}{\exp{(\omega_\text{b}/T_\text{b})}-1},\\
\bar{n}_\text{h}&=&\frac{1}{\exp{(\omega_\text{a}/T_\text{h})}-1}.
\end{eqnarray}
We take $k_\text{B}=1$. Here, $T_\text{h}$ is the temperature of periodic drive on optical resonator. We consider that $T_\text{h}>T_\text{a},T_\text{b}$. Furthermore, $\bar{n}_\text{a}$ and $\bar{n}_\text{b}$ are the mean number of excitations in the baths for optical and mechanical resonators, respectively, while the mean number of excitations for the thermal drive at temperature $T_h$ is given by $\bar{n}_\text{h}$.
The dynamics of the system can be described by the master equation ~\cite{masterEq},
\begin{eqnarray}\label{eq:master}
\dot{\hat{\rho}}&=&-i[\hat{H}_{\text{sys}},\hat{\rho}] \\ \nonumber
&+&\kappa_\text{a}(\bar{n}_\text{a}+1)D[\hat{a}]+\kappa_\text{a}\bar{n}_\text{a}D[\hat{a}^{\dagger}]\\ \nonumber
&+&\kappa_\text{b}(\bar{n}_\text{b}+1)D[\hat{b}]+\kappa_\text{b}\bar{n}_\text{b}D[\hat{b}^{\dagger}]\\ \nonumber
&+&\kappa_{\text{h}}(t)(\bar{n}_{\text{h}}+1)D[\hat{a}]+\kappa_\text{h}(t)\bar{n}_{\text{h}}D[\hat{a}^{\dagger}],
\end{eqnarray}
where, $\kappa_\text{a}$ and $\kappa_\text{b}$ are coupling constants of optical and mechanical resonators with their respective thermal baths. The coupling of the optical resonator with the additional thermal drive is described by periodic time-dependent coupling coefficient $\kappa_\text{h}(t)$. $D[\hat{\alpha}]:=(1/2)(2\hat{\alpha}\hat{\rho}\hat{\alpha}^{\dagger}-\hat{\alpha}^{\dagger}\hat{\alpha}\hat{\rho}-\hat{\rho}\hat{\alpha}^{\dagger}\hat{\alpha})$ is the Lindblad dissipator superoperator with $\hat{\alpha}=\hat{a},\hat{b}$.
\section{III.\, Nonpassivity of the Piston Mode}\label{sec:quantumResults}
The stability condition for the quadratic optomechancial system with the membrane-in-middle dictates that $(\omega_{\text{b}}+4\bar{n}g)>0$ ~\cite{Liao_2013}, where $\bar{n}$ is the mean number of photons inside the cavity. This requires $n_\text{h}<0.80$, in the parameters regime we consider here. In order to reduce the number of control parameters for convenience in the numerical simulations, we assume identical mean number of excitations $\bar n_\text{a} = \bar n_\text{b} := \bar n_\text{c} = 0.01$, in the thermal baths. This is possible if $T_\text{a} / T_\text{b} = \omega_\text{a} / \omega_\text{b}$, which implies that we have $T_\text{a}\gg T_\text{b}$. We emphasize that this is not a requirement for the operation of our engine. Accordingly, the temperatures of the baths are $T_\text{a} \sim 104$ mK and $T_\text{b}\sim 5$ mK. Thereupon, thermal baths have the hierarchy of the temperatures $T_\text{h}>T_\text{a}>T_\text{b}$. The thermal periodic drive acting on optical resonator has a temporal profile of square wave $\kappa_\text{h}(t):=\kappa_\text{h}s(t)$.
For heating and cooling stages the square wave $s(t)=1$ and $s(t)=0$, respectively. If the external thermal pulse is on, the optical mode heats to the temperature of the thermal drive, and when the pulse is off, this mode cools to the temperature $T_{\text{a}}$. In contrast, the mechanical mode always coupled with a cold bath at temperature $T_{\text{b}}$. Heating and cooling stage each has the same time of $\pi/\omega_\text{b}$.
Fig.~\ref{fig:fig2} shows the Wigner functions of the reduced density matrix $\rho_\text{b}=\text{Tr}_\text{a}[\rho (t)]$ of the mechanical mode at different values of scaled time $\omega_{\text{b}}t$. With $\bar n_\text{h}=0.45$, and at the start of the engine operation at $\omega_{\text{b}}t=0$, the initial state of the piston (mechanical) mode is thermal as shown in Fig.~\ref{fig:fig2a}. The state of the mechanical resonator evolves from thermal (passive) to a thermal-squeezed (non-passive) state as shown in Figs.~\ref{fig:fig2b}-\ref{fig:fig2d}. However, during the evolution, the squeezing in the piston first increases (Fig.~\ref{fig:fig2b}) and then it decreases till system reaches at steady-state (Fig.~\ref{fig:fig2d}). The decrease in the squeezing of the mechanical resonator is due to the fact that thermal noise and strong decoherence present in the system weakens the quantum correlations ~\cite{gen-quad}.
This change in the state of the mechanical resonator is due to the periodic thermal drive and quadratic optomechanical interaction. We also observed that for sufficiently small values of $\bar n_\text{h}$ the initial state of the piston mode remains thermal which has no work content. However, for higher values of $\bar n_\text{h}$ the initial state of the mechanical resonator evolves to a thermal-squeezed state.
The Wigner functions exhibit larger widths with increasing $\bar n_\text{h}$ conforming to the increasing fluctuations. The Wigner functions are entirely positive, conforming to the fully classical dynamics associated with the mixture of thermal states character of the piston mode. On the other hand, mechanical and optical modes are still quantum correlated, which contribute to the dynamics of the $\langle \hat{n}_\text{b}\rangle$. This can be seen from the equation of motion for $\langle \hat{n}_\text{b}\rangle$ given in Appendix. We like to point out here that, if one consider linear optomechancial interaction in which the interaction term is $\hat{H}_{I}= g\hat{a}^{\dagger}\hat{a}(\hat{b}+\hat{b}^{\dagger})$, the piston (mechanical) mode evolves from thermal to a coherent-thermal (non-passive) state ~\cite{OM-HE3,Umit-OM}.
\subsection{A.\, Effective Otto engine cycle}
To identify the effective Otto engine cycle in our model, we define the effective frequency $\omega_\text{eff}:= \omega_\text{a} + g q^2$ of the working fluid (optical resonator), where $\hat{q}=\hat{b}+\hat{b}^{\dagger}$ is the position operator of the mechanical resonator. This effective frequency can be considered as the change in the optical mode frequency associated with variations in the position of the mechanical resonator. Accordingly, we can also define the effective mean energy $U_\text{a} = \omega_\text{eff}\langle \hat{n}_\text{a}\rangle$ of the optical resonator mode, and $\hat{n}_\text{a}=\hat{a}^{\dagger}\hat{a}$ the photon number operator of the optical resonator. In the factorization of the effective energy, we have ignored the correlations between $\langle \hat{n}_\text{a}\rangle$ and $\langle\hat{q}^2\rangle$. In order to identify the engine cycle, we plot the effective mean energy $U_\text{a}$ of the working fluid against effective frequency $\omega_\text{eff}$, for $\bar n_\text{h}= 0.125$, as shown in Fig.~\ref{fig:fig3}. We like to emphasize here is that all the results presented from here onwards are at steady-state of the system unless otherwise mentioned.
Fig.~\ref{fig:fig3a} shows a four-stage engine cycle, the first stage of our engine cycle is isochoric heating of the optical resonator under the action of the thermal pulse. Fig.~\ref{fig:fig3a} shows this stage, which is indicated by the arrow from point A to B in the figure at $\omega_\text{eff} \sim 1.292$. During this stage, the effective frequency $\omega_\text{eff}$ of mechanical resonator remains constant and the periodic thermal pulse suddenly switched on. The optical resonator receives incoherent energy and thermalizes quickly under the action of the noise pulse as compared to mechanical resonator, which cannot follow the thermalization of the optical mode. The mean excitation number of the optical resonator reaches to steady-state $\langle \hat n_{\text{a}}\rangle^{ss}$ $\sim$ $0.0675$ for $\bar{n}_{\text{h}}=0.125$. This can be found by writing the rate equation for $\langle \hat n_{\text{a}}\rangle$ (Appendix) and finding its steady-state solution; $\langle \hat n_{\text{a}}\rangle^{ss} = ( \bar n_{\text{c}} + \bar n_{\text{h}})/2$.
The second stage in our engine cycle is the adiabatic expansion, which is indicated by the arrow from point B to C' in Fig.~\ref{fig:fig3a}. During this stage of the cycle, the thermal pulse remains active, however, due to strong dissipation in the system, there is a slight decrease in the mean effective energy $U_{a}$. The effective frequency $\omega_\text{eff}$ decreases to $\sim$ 1.245 and the displacement of the mechanical resonator from mean position increases. The entropy of the optical resonator remains constant as shown in Fig.~\ref{fig:fig3b}, the optical resonator is in thermal state and its entropy can be calculated by
\begin{eqnarray}
S_a=(1+\langle \hat n_a\rangle)\ln{(1+\langle \hat n_a\rangle)}-\langle \hat n_a\rangle\ln{(\langle \hat n_a\rangle)}.
\end{eqnarray}
During B to C' (Fig.~\ref{fig:fig3}) of the cycle, under the action of the thermal noise pulse the state of piston starts evolving from thermal state to a thermal-squeezed state, and piston converts heat into potential energy that to be harvested. From C' to C there is a transitional stage which can not be identified with the standard thermodynamic process. The transitional stages in our effective Otto cycle are due to the presence of the inner friction in the system. The inner friction appears in quantum heat engine when the interacting part of the Hamiltonian does not commute with the non-interacting part. Due to this, the heat engine can not follow the truly adiabatic strokes in the cycle and dissipates useful energy ~\cite{Zambrini_2015}.
The third stage of our heat engine cycle is isochoric cooling, that is indicated by the arrow from points C to D at $\omega_\text{eff}$ $\sim$ $1.245$. During this stage, the heat pulse is suddenly turned off, and the mean excitation number quickly drops to $\langle \hat n_{\text{a}}\rangle^{ss}=\bar n_{\text{c}}/2$, the optical mode still remains in a thermal state. The strong dissipation in the system also helps during the cooling stage. The last stage of the cycle is adiabatic compression of optical mode, in which the effective frequency increases to $\omega_\text{eff}$ $\sim$ $1.41$. Accordingly, the mean position of the mechanical resonator decreases, the entropy remains constant and the periodic thermal drive remains inactive during this stage.
This final stage is denoted by D to A' in Fig. ~\ref{fig:fig3b}. There is another transitional stage from A' to A that completes the engine cycle.
\begin{figure}
\caption{(Colour online) (a) The dependence of the mean effective energy $U_{\text{a}
\label{fig:fig3a}
\label{fig:fig3b}
\label{fig:fig3}
\end{figure}
\section{IV.\, Performance of the engine}\label{sec:performance}
Our engine can be described by the Otto-cycle excluding the transitional stages in the cycle.
The transitional stages of the engine do not strongly affect the temperature-entropy ($T$-$S$) cycle which is shown in Fig.~\ref{fig:fig4a}. This ($T$-$S$) cycle looks similar to standard Otto engine cycle. Here we introduce an effective temperature, as the optical resonator remains in the thermal state the effective temperature can be given by
\begin{eqnarray}
T_{\text{eff}}=\omega_{\text{eff}}/\ln{(1+1/\langle\hat n_a\rangle)}.
\end{eqnarray}
The shape of the $T$-$S$ cycle of our four-stage engine is similar to experimentally obtained cycle for single-atom heat engine proposed in ~\cite{SingleAtom}, and for nanomechanical Otto engine driven by squeezed reservoir ~\cite{OttoShape}.
{\it Area of $T$-$S$ cycle}: The area of the effective $T$-$S$ curve defines the useful energy content which is stored in the thermal-squeezed state of the mechanical resonator and not dissipated as heat. The piston mode undergoes to coherent oscillations due to the cyclic work output of the effective Otto engine. This can cause ever-increasing oscillation amplitude of the piston mode, however it is balanced by the friction effect of the cold baths attached to the mechanical resonator. The amplitude of piston mode oscillations can be determined using the methods in circuit QED~\cite{masterEq}. The area of the $T$-$S$ cycle can be considered as a figure of merit for the potential work output from the working fluid.
The shape of the cycle in Fig.~\ref{fig:fig4a} can be approximated by a trapezoid, and we can estimate the net output work by $W_a\sim2.7\times 10^{-2}\hbar\omega_a\sim 1.7\times 10^{-25}$ joules. The power can be calculated if we divide the work output with the heating pulse period $2\pi/\omega_b=2$ ns, which is the cycle time of our engine. The power from working fluid for the parameters we considered is $P_a\sim 8.9\times10^{-17}$ W. The input heat taken by working fluid can also be calculated from the Fig.~\ref{fig:fig4a}, and we find $Q_{\text{in}}\sim 0.30\hbar\omega_a$. Considering these values of work output and heat intake the efficiency of our engine becomes $\eta=W_a/Q_{\text{in}}\sim 0.09\%$. We note that similar values of work output and efficiency can be found from the $\langle\hat n_a\rangle$-$\omega_{\text{eff}}$ cycle diagram, which is similar to Fig.~\ref{fig:fig3}. Moreover, the work output increase with the increasing values of $\bar n_h$, but we cannot increase this indefinitely. The stability condition on quadratic optomechanical model limits the indefinite increase of $\bar n_h$.
\begin{figure*}
\caption{(Colour online) (a) T-S diagram of the optical mode for $\bar{n}
\label{fig:fig4a}
\label{fig:fig4b}
\label{fig:fig4c}
\label{fig:fig4d}
\label{fig:fig4}
\end{figure*}
{\it Dissipated internal power (DIP)}: Alternatively, in order to estimate the power of the heat engine another figure of merit, called dissipated internal power, can be calculated, which is given by ~\cite{OM-HE3}
\begin{eqnarray}\label{eq:intpow}
P &=& - \text{Tr}{\omega_\text{b}\hat{n}_\text{b}\kappa_\text{b}[(\bar{n}_\text{b}+1)D_{\hat{b}}[\hat{\rho}]+\bar{n}_\text{b}D_{\hat{b}^\dagger}[\hat{\rho}]]},\\ \nonumber
&=& \omega_\text{b}\kappa_\text{b}(\langle\hat{n}_\text{b}\rangle - \bar{n}_\text{c}).
\end{eqnarray}
This describes the net flux energy dissipated by the piston mode into its environment. The qualitative behavior of the DIP is the same as of the mean excitation number of the mechanical resonator which is oscillatory around some mean value. For the given system parameters, the maximum DIP has obtained at the maximum value of the mean number of excitations of the mechanical resonator;
$P_\text{max}$ $\sim$ $\langle\hat{n}_\text{b}\rangle_\text{max}$. DIP is plotted as a function of $\bar{n}_\text{h}$ in Fig.~\ref{fig:fig4b}, for both linear and quadratic optomechanical couplings. The qualitative behavior of DIP is same for both linear and quadratic coupling, and for very small values of $\bar{n}_\text{h}$, the difference between the dissipated powers is small for the linear and quadratic models. The reason for low power at small $\bar{n}_\text{h}$ is due lack of coherence building and the small amount of squeezing of the mechanical mode in linear and quadratic coupling (cf. Fig.~\ref{fig:fig3}), respectively. As we increase $\bar{n}_\text{h}$ the squeezing in the mechanical mode increases and correspondingly dissipated power has more useful work content than the incoherent energy or heat.
We get more dissipated power for the quadratic optomechanical coupling based heat engine, than for a linear coupling model with the same system parameters, this is shown in Fig.~\ref{fig:fig4b}. For small values of $\bar{n}_\text{h}$, the difference in the dissipated power for two models is small, but for the sufficiently high values of $\bar{n}_\text{h}$ this difference becomes pronounced.
{\it Power under load }: Another way of estimating the power of our engine is dissipated power under load. When a load is attached to mechanical resonator, then power dissipates in its presence is termed as dissipated power under load ($P_\text{L}$) ~\cite{OM-HE3}. The external load attached to the mechanical resonator creates additional damping along with friction introduced by the environment of the mechanical resonator. Accordingly, the dynamics of the system changes and we replace $\kappa_\text{b}\to \kappa_\text{b} + \kappa_\text{L} $. The expression for $P_\text{L}$ is similar with Eq.~(\ref{eq:intpow}) and given by
\begin{eqnarray}\label{eq:extpow}
P_{\text{L}} &=& - \text{Tr}{\omega_\text{L}\hat{n}_\text{b}\kappa_\text{L}[(\bar{n}_\text{b}+1)D_{\hat{b}}[\tilde{\rho}]+\bar{n}_\text{b}D_{\hat{b}^\dagger}[\tilde{\rho}]]},\\ \nonumber
&=& \omega_\text{b}\kappa_\text{L}(\langle\hat{n}_\text{b}\rangle - \bar{n}_\text{c}).
\end{eqnarray}
Here $\tilde{\rho}$ is obtained by substituting $\kappa_\text{b}\to \kappa_\text{b} + \kappa_\text{L}$ in the Eq.~(\ref{eq:master}). It is clear from Eq.~(\ref{eq:extpow}) that if there is no load $\kappa_\text{L} = 0$, or when $\kappa_\text{L}\to\infty$ (very heavy load under which thermal machine won't work), dissipated power under load becomes zero. Moreover, there is optimal value of external load $\kappa_\text{L}$ for which we get maximum power $P_\text{L}^{*} = \kappa_\text{L}^{*} P_\text{L}$. This can be used as a potential figure of merit for quantum heat engine working under an external load ~\cite{OM-HE3}. Fig.~\ref{fig:fig4c} shows the dissipated power under load $P_\text{L}$ as a function of external load $\kappa_\text{L}$. As predicted, when there is no external load $\kappa_\text{L}$ = $0$ and for $\kappa_\text{L}$ $\to$ $\infty$, power dissipated under load is zero. There is an optimal value of load ($\kappa_{\text{L}}^{*}$) exists, that yields maximum power. Again, the power for the quadratic coupling based model is greater than linear coupling, and this difference is largest for the optimal value of load $\kappa_\text{L}^{*}$. We also note that the squeezing in the mechanical resonator decreases as we increase the external load $\kappa_\text{L}$, and the state of the piston mode becomes completely passive in the limit $\kappa_\text{L}$ $\to$ $\infty$.
{\it Work capacity }: Another figure of merit for the work extraction from quantum heat engine is based on non-passivity of the piston mode. This describes the maximum extractable work from the non-equilibrium steady state of the mechanical resonator. The uper bound of the maximum extractable work for a given quantum sate $\rho$ that is subject to a given Hamiltonian $H$ can be given by ~\cite{Esposito2011, Horodecki2013},
\begin{eqnarray}\label{eq:workcap}
W^{\text{max}}\leq T S(\rho||\rho^{\text{G}}(H)),
\end{eqnarray}
where $\rho^{\text{G}}(H)$ is the Gibbs state, and $S(\rho||\rho^{\text{G}}(H))$ is the relative entropy between the quantum state $\rho$ and the Gibbs state, which can be described as: $S(\rho||\rho^{\text{G}}(H))= \text{Tr}[\rho \text{log}(\rho)-\rho \text{log}(\rho^{\text{G}})]$. We can rewrite the Eq.~(\ref{eq:workcap}) as ~\cite{OM-HE3},
\begin{eqnarray}\label{eq:FreeEng}
W^{\text{max}}\leq F(\rho)-F(\rho^{\text{G}}(H))=\Delta F.
\end{eqnarray}
Here $F(\rho)$ is the free energy of the non-equilibrium steady state. In our model, this can be determined by the difference in the mean energy of the mechanical resonator and the Von Neumann entropy of the steady state of the system; $F(\rho)= \text{Tr}[\rho\hat{H}_{\text{b}}]- K_{\text{b}}T_{\text{b}} S(\rho)$.
In addition, $\hat{H}_{\text{b}}$ is the Hamiltonian of the mechanical resonator.
In our heat engine, although the initial state of the piston is thermal, however, under the action of quadratic optomechanical interaction it evolves to a thermal-squeezed state. Likewise, the optical resonator works on the piston, consequently the energy is stored as extractable work in it, and state of the piston becomes non-passive. This extractable work can be calculated by the difference in the free energies as given in Eq.~(\ref{eq:FreeEng}). We plot this $\Delta F$ in Fig.~\ref{fig:fig4d} for both linear and quadratic optomechanical coupling based models. Again, the work capacity of the piston in case of quadratic interaction is higher than for linear coupling. The reason for this increase in the work capacity is due to the ability of the squeezed state to store work. We like to emphasize here that, in the calculation of power $P_{a}$ (Fig.~\ref{fig:fig4a}), we have ignored the correlations between the mean photon number of the optical resonator and position of the piston mode; $\langle\hat{n}_a \hat{q}^2\rangle=\langle\hat{n}_a\rangle \langle\hat{q}^2\rangle$. On contrary, this factorization is not performed while calculating the dissipated internal power, power under load and work capacity, presented in Figs.~\ref{fig:fig4b}-\ref{fig:fig4d}. In addition, we calculate the work and power of the engine at steady-state, the engine passes through a large number of transient Otto cycles to reach the steady-state. During the transient regime, the piston mode has more squeezing as shown in the Figs.~\ref{fig:fig2b} and \ref{fig:fig2c} as compared to steady-state squeezing (Fig.~\ref{fig:fig2d}). The decrease in squeezing of the piston mode is due to the presence of strong dissipation in the sideband-unresolved regime of optomechanics; $\omega_{\text{b}}\ll\kappa_{\text{a}}$. To calculate the work of a particular cycle during the transient regime, one has to perform energy measurement on the system after the completion of that cycle. This will kill the quantum correlations between the optical and mechanical components of our system~\cite{PhysRevLett.118.050601}. If we calculate the work of each cycle by using the method given in Ref.~\cite{PhysRevLett.118.050601}, the output work will be different than the calculated work at steady-state reported in Fig.~\ref{fig:fig4}. This is due to the fact that in Figs.~\ref{fig:fig4b}-\ref{fig:fig4d}, the quantum correlations are present, and these correlations will not play a role if one uses the projective measurement method presented in Ref.~\cite{PhysRevLett.118.050601}.
Finally, we present some remarks about the parameters of our quadratic optomechanical heat engine for the experimental realization. The parameters regime we used $\kappa_{\text{b}}<\omega_{\text{b}}<\kappa_{\text{a}}$, can be realized in a planar silicon photonic crystal cavity~\cite{Oskar_2015}, or in circuit QED by mapping the quadratic optomechanical coupling onto the superconducting electrical circuit system~\cite{NoriQuad}. The key challenge is to realize single-photon quadratic optomechanical coupling regime in which mechanical resonator frequency becomes comparable with quadratic coupling. In our work, by taking into account the stability of the system we considered $g<\omega_{\text{b}}$ for other system parameters given in Fig.~\ref{fig:fig2}. Experimentally the single-photon strong coupling regime in the quadratic optomechanical system has not been achieved so far. However, recent experimental advances in this field may able to achieve this regime in the future. In a planar photonic crystal cavity, the single-photon coupling strength can be enhanced from a few Hz to 1 kHz~\cite{Kalaee_2016} or several hundred kHz~\cite{Oskar_2015}. Moreover, in Ref.~\cite{PhysRevA.85.053832} the quadratic optomechanical coupling strength $g$ has been estimated in MHz regime. There are several theoretical proposals that also exploit the single-photon quadratic optomechanical coupling regime ~\cite{Liao_2013, Liao2014, PhysRevA.92.023811, PhysRevA.96.013860, PhysRevA.99.013804}.
\section{V.\, Conclusions}\label{sec:conclusions}
In conclusion, we proposed and examined a quantum heat engine based on a general quadratic coupled optomechanical system. In our model, the working fluid mode (optical) is driven incoherently with a quasi-thermal drive, and it interacts with the quantized piston mode via quadratic optomechanical interaction. Accordingly, the piston evolves from an initial thermal state to a thermal-squeezed state. These states belong to the class of so-called thermodynamically non-passive states, as work can be extracted from such states. We verified the thermal-squeezed state of the piston mode by numerically calculating the Wigner functions. Thermodynamical properties of the heat engine are investigated by plotting the effective mean energy for different values of effective frequency of the working fluid mode. We identified an effective Otto cycle, and estimated the extractable work by calculating the area of effective $T$-$S$ cycle diagram, which ignores the quantum correlations between the optical and mechanical components, as one figure of merit.
In addition, we calculated the internal dissipated power, dissipated power under load, and work capacity of the piston which are sensitive to quantum correlations. We reported that all these figures of merits show higher work output for the quadratic interaction relative to linear optomechanical interaction.
\section{Appendix}\label{sec:Appendix}
The equations of motions for the relevant thermodynamical observables can be determined using Eq.~(\ref{eq:master}) and given by
\begin{eqnarray}\label{Eq:rateEqs}
\frac{d}{dt}\langle\hat{n}_{a}\rangle &=& A - B\langle\hat{n}_{a}\rangle, \nonumber \\
\frac{d}{dt}\langle\hat{n}_{b}\rangle &=& \kappa_{b}(\bar{n}_{b} - \langle\hat{n}_{b}\rangle)+g\langle\hat{n}_{a}(\hat{q}\hat{p}+\hat{p}\hat{q})\rangle, \nonumber \\
\frac{d}{dt}\langle\hat{n}_{a}(\hat{q}\hat{p}+\hat{p}\hat{q})\rangle &=& A - B\langle\hat{n}_{a}(\hat{q}\hat{p}+\hat{p}\hat{q})\rangle - (\omega_{b}- i\kappa_{b})\nonumber \\ &\times & \langle\hat{n}_{a}(\hat{q}^2-\hat{p}^2)\rangle + 8ig\langle\hat{n}_{a}^2\hat{q}^2\rangle - 4i\kappa_{b}\langle\hat{n}_{a}\hat{b}^{\dagger 2}\rangle, \nonumber \\
\frac{d}{dt}\langle\hat{n}_{a}\hat{q}^2\rangle &=& A- B\langle\hat{n}_{a}\hat{q}^2\rangle + (\omega_{b}- \frac{i}{2}\kappa_{b})\langle\hat{n}_{a}(\hat{q}\hat{p}+\hat{p}\hat{q})\rangle \nonumber \\
&+& 2\kappa_{b}(\bar{n}_{b}-\langle\hat{n}_{a}\hat{n}_{b}\rangle + \langle\hat{n}_{a}\hat{b}^{\dagger 2}\rangle), \nonumber \\
\frac{d}{dt}\langle\hat{n}_{a}\hat{p}^2\rangle &=& A- B\langle\hat{n}_{a}\hat{p}^2\rangle - (\omega_{b}+ \frac{i}{2}\kappa_{b})\langle\hat{n}_{a}(\hat{q}\hat{p}+\hat{p}\hat{q})\rangle \nonumber \\
&+& 2\kappa_{b}(\bar{n}_{b}-\langle\hat{n}_{a}\hat{n}_{b}\rangle + \langle\hat{n}_{a}\hat{b}^{\dagger 2}\rangle) \nonumber \\
&+& 4g \langle\hat{n}_{a}^2(\hat{q}\hat{p}+\hat{p}\hat{q})\rangle, \nonumber \\
\frac{d}{dt}\langle\hat{n}_{a}\hat{n}_{b}\rangle &=& A - B\langle\hat{n}_{a}\hat{n}_{b}\rangle +\kappa_{b}(\bar{n}_{b}-\langle\hat{n}_{a}\hat{n}_{b}\rangle)\nonumber \\
&+&2g\langle\hat{n}_{a}^2(\hat{q}\hat{p}+\hat{p}\hat{q})\rangle, \nonumber \\
\frac{d}{dt}\langle\hat{n}_{a}\hat{b}^{\dagger 2}\rangle &=& A + (2i(\omega_{b}+2g)-B-\kappa_{b}\bar{n}_{b})\langle\hat{n}_{a}\hat{b}^{\dagger 2}\rangle \nonumber \\
&-& 2ig(2\langle\hat{n}_{a}\hat{n}_{b}\rangle + \langle\hat{n}_{a}\rangle).
\end{eqnarray}
Where $A=\kappa_{a}\bar{n}_{a}+\kappa_{h}\bar{n}_{h}$, $B=\kappa_{a}+\kappa_{h}$, $\hat{q}=(\hat{b}+\hat{b}^{\dagger})$ and $\hat{p}=i(\hat{b}^{\dagger}-\hat{b})$. The figure of merits to evaluate the performance of the engine are presented in Figs.~\ref{fig:fig4b}-\ref{fig:fig4b}, these are numerically evaluated and depend on mean excitation $\langle\hat{n}_{b}\rangle$. During the dynamics of the heat engine, the optical and mechanical modes are quantum correlated which can be verified from the equation of motion for $\langle\hat{n}_{b}\rangle$. Although equations of motions presented in ~(\ref{Eq:rateEqs}) do not form a close set, it can still be solved by the method of weakening the correlations that allows for factorizing the optical and mechanical mode operators at the higher order of equations in the hierarchy~\cite{Bonifacio_1975, Andreev_1977}. We see that the dynamics of $\langle\hat{n}_{b}\rangle$ depends on 4-operator quantum correlations between optical and mechanical subsystems, $\langle\hat{n}_{a}\hat{q}\hat{p}\rangle$. From the hierarchy of equations of motion, it can be noted that the evolution of 4-operator quantum correlation $\langle\hat{n}_{a}\hat{q}\hat{p}\rangle$ depends on mechanical quadratic squeezing explicitly under the factorization approximation applied to 6-operator correlations such as $\langle\hat{n}_{a}^2\hat{q}^2\rangle$. We remark that similar set of equations including classical thermal white noise drives based upon Langevin dynamics can be written and the engine operation influenced with the classical correlations could be obtained. The quantum correlations in the case of the standard linear optomechanical model are stronger than classical ones and hence yielding more powerful engine when it is harvesting work in the quantum mechanical cycle instead of stochastic one ~\cite{Umit-OM}. The same conclusion, with even further enhancement by quantum squeezing, applies here, too.
\begin{thebibliography}{83}
\makeatletter
\providecommand \@ifxundefined [1]{
\@ifx{#1\undefined}
}
\providecommand \@ifnum [1]{
\ifnum #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \@ifx [1]{
\ifx #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \natexlab [1]{#1}
\providecommand \enquote [1]{``#1''}
\providecommand \bibnamefont [1]{#1}
\providecommand \bibfnamefont [1]{#1}
\providecommand \citenamefont [1]{#1}
\providecommand \href@noop [0]{\@secondoftwo}
\providecommand \href [0]{\begingroup \@sanitize@url \@href}
\providecommand \@href[1]{\@@startlink{#1}\@@href}
\providecommand \@@href[1]{\endgroup#1\@@endlink}
\providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode
`\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax}
\providecommand \@@startlink[1]{}
\providecommand \@@endlink[0]{}
\providecommand \url [0]{\begingroup\@sanitize@url \@url }
\providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }}
\providecommand \urlprefix [0]{URL }
\providecommand \Eprint [0]{\href }
\providecommand \doibase [0]{http://dx.doi.org/}
\providecommand \selectlanguage [0]{\@gobble}
\providecommand \bibinfo [0]{\@secondoftwo}
\providecommand \bibfield [0]{\@secondoftwo}
\providecommand \translation [1]{[#1]}
\providecommand \BibitemOpen [0]{}
\providecommand \bibitemStop [0]{}
\providecommand \bibitemNoStop [0]{.\EOS\space}
\providecommand \EOS [0]{\spacefactor3000\relax}
\providecommand \BibitemShut [1]{\csname bibitem#1\endcsname}
\let\auto@bib@innerbib\@empty
\bibitem [{\citenamefont {Kosloff}\ and\ \citenamefont {Levy}(2014)}]{HE-def}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Ronnie}\ \bibnamefont
{Kosloff}}\ and\ \bibinfo {author} {\bibfnamefont {Amikam}\ \bibnamefont
{Levy}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Quantum heat
engines and refrigerators: Continuous devices},}\ }\href {\doibase
10.1146/annurev-physchem-040513-103724} {\bibfield {journal} {\bibinfo
{journal} {Ann. Rev. Phys. Chem.}\ }\textbf {\bibinfo {volume}
{65}},\ \bibinfo {pages} {365--393} (\bibinfo {year} {2014})},\ \bibinfo
{note} {pMID: 24689798},\ \BibitemShut
{NoStop}
\bibitem [{\citenamefont {Friedenberger}\ and\ \citenamefont
{Lutz}(2017{\natexlab{a}})}]{Kos-HE-Rev}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Alexander}\
\bibnamefont {Friedenberger}}\ and\ \bibinfo {author} {\bibfnamefont {Eric}\
\bibnamefont {Lutz}},\ }\bibfield {title} {\enquote {\bibinfo {title} {When
is a quantum heat engine quantum?}}\ }\href
{http://stacks.iop.org/0295-5075/120/i=1/a=10002} {\bibfield {journal}
{\bibinfo {journal} {Eurphys. Lett.}\ }\textbf {\bibinfo {volume} {120}},\ \bibinfo
{pages} {10002} (\bibinfo {year} {2017}{\natexlab{a}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Kieu}(2004)}]{HE1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Tien~D.}\ \bibnamefont
{Kieu}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The second law,
maxwell's demon, and work derivable from quantum heat engines},}\ }\href
{\doibase 10.1103/PhysRevLett.93.140403} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {93}},\ \bibinfo
{pages} {140403} (\bibinfo {year} {2004})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Quan}\ \emph {et~al.}(2005)\citenamefont {Quan},
\citenamefont {Zhang},\ and\ \citenamefont {Sun}}]{HE2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.~T.}\ \bibnamefont
{Quan}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Zhang}}, \ and\
\bibinfo {author} {\bibfnamefont {C.~P.}\ \bibnamefont {Sun}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Quantum heat engine with multilevel
quantum systems},}\ }\href {\doibase 10.1103/PhysRevE.72.056110} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. E}\ }\textbf {\bibinfo {volume}
{72}},\ \bibinfo {pages} {056110} (\bibinfo {year} {2005})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Tonner}\ and\ \citenamefont {Mahler}(2005)}]{HE3}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Friedemann}\
\bibnamefont {Tonner}}\ and\ \bibinfo {author} {\bibfnamefont {G\"unter}\
\bibnamefont {Mahler}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Autonomous quantum thermodynamic machines},}\ }\href {\doibase
10.1103/PhysRevE.72.066118} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. E}\ }\textbf {\bibinfo {volume} {72}},\ \bibinfo {pages} {066118}
(\bibinfo {year} {2005})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Quan}\ \emph {et~al.}(2007)\citenamefont {Quan},
\citenamefont {Liu}, \citenamefont {Sun},\ and\ \citenamefont {Nori}}]{HE4}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.~T.}\ \bibnamefont
{Quan}}, \bibinfo {author} {\bibfnamefont {Yu-xi}\ \bibnamefont {Liu}},
\bibinfo {author} {\bibfnamefont {C.~P.}\ \bibnamefont {Sun}}, \ and\
\bibinfo {author} {\bibfnamefont {Franco}\ \bibnamefont {Nori}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Quantum thermodynamic cycles and quantum
heat engines},}\ }\href {\doibase 10.1103/PhysRevE.76.031105} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. E}\ }\textbf {\bibinfo {volume}
{76}},\ \bibinfo {pages} {031105} (\bibinfo {year} {2007})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Henrich}\ \emph {et~al.}(2007)\citenamefont
{Henrich}, \citenamefont {Mahler},\ and\ \citenamefont {Michel}}]{HE5}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Markus~J.}\
\bibnamefont {Henrich}}, \bibinfo {author} {\bibfnamefont {G\"unter}\
\bibnamefont {Mahler}}, \ and\ \bibinfo {author} {\bibfnamefont {Mathias}\
\bibnamefont {Michel}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Driven spin systems as quantum thermodynamic machines: Fundamental
limits},}\ }\href {\doibase 10.1103/PhysRevE.75.051118} {\bibfield {journal}
{\bibinfo {journal} {Phys. Rev. E}\ }\textbf {\bibinfo {volume} {75}},\
\bibinfo {pages} {051118} (\bibinfo {year} {2007})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Scully}(2010)}]{HE6}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Marlan~O.}\
\bibnamefont {Scully}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Quantum photocell: Using quantum coherence to reduce radiative recombination
and increase efficiency},}\ }\href {\doibase 10.1103/PhysRevLett.104.207701}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf
{\bibinfo {volume} {104}},\ \bibinfo {pages} {207701} (\bibinfo {year}
{2010})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Scully}\ \emph
{et~al.}(2011{\natexlab{a}})\citenamefont {Scully}, \citenamefont {Chapin},
\citenamefont {Dorfman}, \citenamefont {Kim},\ and\ \citenamefont
{Svidzinsky}}]{HE7}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Marlan~O.}\
\bibnamefont {Scully}}, \bibinfo {author} {\bibfnamefont {Kimberly~R.}\
\bibnamefont {Chapin}}, \bibinfo {author} {\bibfnamefont {Konstantin~E.}\
\bibnamefont {Dorfman}}, \bibinfo {author} {\bibfnamefont {Moochan~Barnabas}\
\bibnamefont {Kim}}, \ and\ \bibinfo {author} {\bibfnamefont {Anatoly}\
\bibnamefont {Svidzinsky}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Quantum heat engine power can be increased by noise-induced coherence},}\
}\href {\doibase 10.1073/pnas.1110234108} {\bibfield {journal} {\bibinfo
{journal} {Proc. Natl. Acad. Sci.}\ }\textbf
{\bibinfo {volume} {108}},\ \bibinfo {pages} {15097--15100} (\bibinfo {year}
{2011}{\natexlab{a}})},\ \BibitemShut {NoStop}
\bibitem [{\citenamefont {Abah}\ \emph {et~al.}(2012)\citenamefont {Abah},
\citenamefont {Ro\ss{}nagel}, \citenamefont {Jacob}, \citenamefont {Deffner},
\citenamefont {Schmidt-Kaler}, \citenamefont {Singer},\ and\ \citenamefont
{Lutz}}]{HE8}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {O.}~\bibnamefont
{Abah}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Ro\ss{}nagel}},
\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Jacob}}, \bibinfo
{author} {\bibfnamefont {S.}~\bibnamefont {Deffner}}, \bibinfo {author}
{\bibfnamefont {F.}~\bibnamefont {Schmidt-Kaler}}, \bibinfo {author}
{\bibfnamefont {K.}~\bibnamefont {Singer}}, \ and\ \bibinfo {author}
{\bibfnamefont {E.}~\bibnamefont {Lutz}},\ }\bibfield {title} {\enquote
{\bibinfo {title} {Single-ion heat engine at maximum power},}\ }\href
{\doibase 10.1103/PhysRevLett.109.203006} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {109}},\ \bibinfo
{pages} {203006} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Brunner}\ \emph {et~al.}(2012)\citenamefont
{Brunner}, \citenamefont {Linden}, \citenamefont {Popescu},\ and\
\citenamefont {Skrzypczyk}}]{HE9}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Nicolas}\ \bibnamefont
{Brunner}}, \bibinfo {author} {\bibfnamefont {Noah}\ \bibnamefont {Linden}},
\bibinfo {author} {\bibfnamefont {Sandu}\ \bibnamefont {Popescu}}, \ and\
\bibinfo {author} {\bibfnamefont {Paul}\ \bibnamefont {Skrzypczyk}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Virtual qubits, virtual
temperatures, and the foundations of thermodynamics},}\ }\href {\doibase
10.1103/PhysRevE.85.051117} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. E}\ }\textbf {\bibinfo {volume} {85}},\ \bibinfo {pages} {051117}
(\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Anders}\ and\ \citenamefont
{Giovannetti}(2013)}]{HE10}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Janet}\ \bibnamefont
{Anders}}\ and\ \bibinfo {author} {\bibfnamefont {Vittorio}\ \bibnamefont
{Giovannetti}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Thermodynamics of discrete quantum processes},}\ }\href
{http://stacks.iop.org/1367-2630/15/i=3/a=033022} {\bibfield {journal}
{\bibinfo {journal} {New J. Phys.}\ }\textbf {\bibinfo {volume} {15}},\
\bibinfo {pages} {033022} (\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Bergenfeldt}\ \emph {et~al.}(2014)\citenamefont
{Bergenfeldt}, \citenamefont {Samuelsson}, \citenamefont {Sothmann},
\citenamefont {Flindt},\ and\ \citenamefont {B\"uttiker}}]{HE11}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Christian}\
\bibnamefont {Bergenfeldt}}, \bibinfo {author} {\bibfnamefont {Peter}\
\bibnamefont {Samuelsson}}, \bibinfo {author} {\bibfnamefont {Bj\"orn}\
\bibnamefont {Sothmann}}, \bibinfo {author} {\bibfnamefont {Christian}\
\bibnamefont {Flindt}}, \ and\ \bibinfo {author} {\bibfnamefont {Markus}\
\bibnamefont {B\"uttiker}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Hybrid microwave-cavity heat engine},}\ }\href {\doibase
10.1103/PhysRevLett.112.076803} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {112}},\ \bibinfo {pages}
{076803} (\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Uzdin}\ and\ \citenamefont {Kosloff}(2014)}]{HE12}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Raam}\ \bibnamefont
{Uzdin}}\ and\ \bibinfo {author} {\bibfnamefont {Ronnie}\ \bibnamefont
{Kosloff}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The multilevel
four-stroke swap engine and its environment},}\ }\href
{http://stacks.iop.org/1367-2630/16/i=9/a=095003} {\bibfield {journal}
{\bibinfo {journal} {New J. Phys.}\ }\textbf {\bibinfo {volume} {16}},\
\bibinfo {pages} {095003} (\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Altintas}\ \emph {et~al.}(2014)\citenamefont
{Altintas}, \citenamefont {Hardal},\ and\ \citenamefont {M\"ustecapl\ifmmode
\imath \else \i \fi{}og\ifmmode~\tilde{}\else \~{}\fi{}lu}}]{HE13}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Ferdi}\ \bibnamefont
{Altintas}}, \bibinfo {author} {\bibfnamefont {Ali \"U.~C.}\ \bibnamefont
{Hardal}}, \ and\ \bibinfo {author} {\bibfnamefont {\"Ozg\"ur~E.}\
\bibnamefont {M\"ustecapl\ifmmode \imath \else \i
\fi{}og\ifmmode~\tilde{}\else \~{}\fi{}lu}},\ }\bibfield {title} {\enquote
{\bibinfo {title} {Quantum correlated heat engine with spin squeezing},}\
}\href {\doibase 10.1103/PhysRevE.90.032102} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. E}\ }\textbf {\bibinfo {volume} {90}},\ \bibinfo
{pages} {032102} (\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ro\ss{}nagel}\ \emph {et~al.}(2014)\citenamefont
{Ro\ss{}nagel}, \citenamefont {Abah}, \citenamefont {Schmidt-Kaler},
\citenamefont {Singer},\ and\ \citenamefont {Lutz}}]{HE14}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Ro\ss{}nagel}}, \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Abah}},
\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Schmidt-Kaler}}, \bibinfo
{author} {\bibfnamefont {K.}~\bibnamefont {Singer}}, \ and\ \bibinfo {author}
{\bibfnamefont {E.}~\bibnamefont {Lutz}},\ }\bibfield {title} {\enquote
{\bibinfo {title} {Nanoscale heat engine beyond the carnot limit},}\ }\href
{\doibase 10.1103/PhysRevLett.112.030602} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {112}},\ \bibinfo
{pages} {030602} (\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Plastina}\ \emph {et~al.}(2014)\citenamefont
{Plastina}, \citenamefont {Alecce}, \citenamefont {Apollaro}, \citenamefont
{Falcone}, \citenamefont {Francica}, \citenamefont {Galve}, \citenamefont
{Lo~Gullo},\ and\ \citenamefont {Zambrini}}]{HE15}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Plastina}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Alecce}},
\bibinfo {author} {\bibfnamefont {T.~J.~G.}\ \bibnamefont {Apollaro}},
\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Falcone}}, \bibinfo
{author} {\bibfnamefont {G.}~\bibnamefont {Francica}}, \bibinfo {author}
{\bibfnamefont {F.}~\bibnamefont {Galve}}, \bibinfo {author} {\bibfnamefont
{N.}~\bibnamefont {Lo~Gullo}}, \ and\ \bibinfo {author} {\bibfnamefont
{R.}~\bibnamefont {Zambrini}},\ }\bibfield {title} {\enquote {\bibinfo
{title} {Irreversible work and inner friction in quantum thermodynamic
processes},}\ }\href {\doibase 10.1103/PhysRevLett.113.260601} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo
{volume} {113}},\ \bibinfo {pages} {260601} (\bibinfo {year}
{2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Campo}(2014)}]{HE16}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.and Paternostro~M.}\
\bibnamefont {Campo}, \bibfnamefont {A.~deland~Goold}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {More bang for your buck: Super-adiabatic quantum
engines},}\ }\href {http://dx.doi.org/10.1038/srep06208} {\bibfield
{journal} {\bibinfo {journal} {Sci. Rep.}\ }\textbf {\bibinfo {volume}
{4}},\ \bibinfo {pages} {6208} (\bibinfo {year} {2014})},\ \bibinfo {note}
{article}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Altintas}\ \emph {et~al.}(2015)\citenamefont
{Altintas}, \citenamefont {Hardal},\ and\ \citenamefont {M\"ustecapl\ifmmode
\imath \else \i \fi{}o\ifmmode~\breve{g}\else \u{g}\fi{}lu}}]{HE17}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Ferdi}\ \bibnamefont
{Altintas}}, \bibinfo {author} {\bibfnamefont {Ali \"U.~C.}\ \bibnamefont
{Hardal}}, \ and\ \bibinfo {author} {\bibfnamefont {\"Ozg\"ur~E.}\
\bibnamefont {M\"ustecapl\ifmmode \imath \else \i
\fi{}o\ifmmode~\breve{g}\else \u{g}\fi{}lu}},\ }\bibfield {title} {\enquote
{\bibinfo {title} {Rabi model as a quantum coherent heat engine: From quantum
biology to superconducting circuits},}\ }\href {\doibase
10.1103/PhysRevA.91.023816} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {91}},\ \bibinfo {pages} {023816}
(\bibinfo {year} {2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ivanchenko}(2015)}]{HE18}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {E.~A.}\ \bibnamefont
{Ivanchenko}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Quantum otto
cycle efficiency on coupled qudits},}\ }\href {\doibase
10.1103/PhysRevE.92.032124} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. E}\ }\textbf {\bibinfo {volume} {92}},\ \bibinfo {pages} {032124}
(\bibinfo {year} {2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Perarnau-Llobet}\ \emph {et~al.}(2015)\citenamefont
{Perarnau-Llobet}, \citenamefont {Hovhannisyan}, \citenamefont {Huber},
\citenamefont {Skrzypczyk}, \citenamefont {Brunner},\ and\ \citenamefont
{Ac\'{\i}n}}]{HE19}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Mart\'{\i}}\
\bibnamefont {Perarnau-Llobet}}, \bibinfo {author} {\bibfnamefont {Karen~V.}\
\bibnamefont {Hovhannisyan}}, \bibinfo {author} {\bibfnamefont {Marcus}\
\bibnamefont {Huber}}, \bibinfo {author} {\bibfnamefont {Paul}\ \bibnamefont
{Skrzypczyk}}, \bibinfo {author} {\bibfnamefont {Nicolas}\ \bibnamefont
{Brunner}}, \ and\ \bibinfo {author} {\bibfnamefont {Antonio}\ \bibnamefont
{Ac\'{\i}n}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Extractable
work from correlations},}\ }\href {\doibase 10.1103/PhysRevX.5.041011}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. X}\ }\textbf {\bibinfo
{volume} {5}},\ \bibinfo {pages} {041011} (\bibinfo {year}
{2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Hardal}\ and\ \citenamefont
{M{\"u}stecaplioglu}(2015)}]{HE20}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Ali {\"U}~C.}\
\bibnamefont {Hardal}}\ and\ \bibinfo {author} {\bibfnamefont
{{\"O}zg{\"u}r~E.}\ \bibnamefont {M{\"u}stecaplioglu}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Superradiant quantum heat engine},}\ }\href
{http://dx.doi.org/10.1038/srep12953} {\bibfield {journal} {\bibinfo
{journal} {Sci. Rep.}\ }\textbf {\bibinfo {volume} {5}},\ \bibinfo {pages}
{12953} (\bibinfo {year} {2015})},\ \bibinfo {note} {article}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Manzano}\ \emph
{et~al.}(2016{\natexlab{a}})\citenamefont {Manzano}, \citenamefont {Galve},
\citenamefont {Zambrini},\ and\ \citenamefont {Parrondo}}]{HE21}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Gonzalo}\ \bibnamefont
{Manzano}}, \bibinfo {author} {\bibfnamefont {Fernando}\ \bibnamefont
{Galve}}, \bibinfo {author} {\bibfnamefont {Roberta}\ \bibnamefont
{Zambrini}}, \ and\ \bibinfo {author} {\bibfnamefont {Juan M.~R.}\
\bibnamefont {Parrondo}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Entropy production and thermodynamic power of the squeezed thermal
reservoir},}\ }\href {\doibase 10.1103/PhysRevE.93.052120} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. E}\ }\textbf {\bibinfo {volume}
{93}},\ \bibinfo {pages} {052120} (\bibinfo {year}
{2016}{\natexlab{a}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {T\"urkpen\ifmmode~\mbox{\c{c}}\else \c{c}\fi{}e}\
and\ \citenamefont {M\"ustecapl\ifmmode \imath \else \i
\fi{}o\ifmmode~\breve{g}\else \u{g}\fi{}lu}(2016)}]{HE22}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Deniz}\ \bibnamefont
{T\"urkpen\ifmmode~\mbox{\c{c}}\else \c{c}\fi{}e}}\ and\ \bibinfo {author}
{\bibfnamefont {\"Ozg\"ur~E.}\ \bibnamefont {M\"ustecapl\ifmmode \imath \else
\i \fi{}o\ifmmode~\breve{g}\else \u{g}\fi{}lu}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Quantum fuel with multilevel atomic coherence
for ultrahigh specific work in a photonic carnot engine},}\ }\href {\doibase
10.1103/PhysRevE.93.012145} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. E}\ }\textbf {\bibinfo {volume} {93}},\ \bibinfo {pages} {012145}
(\bibinfo {year} {2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Campisi}\ and\ \citenamefont {Fazio}(2016)}]{HE23}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Michele}\ \bibnamefont
{Campisi}}\ and\ \bibinfo {author} {\bibfnamefont {Rosario}\ \bibnamefont
{Fazio}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The power of a
critical heat engine},}\ }\href {http://dx.doi.org/10.1038/ncomms11895}
{\bibfield {journal} {\bibinfo {journal} {Nat. Commun.}\ }\textbf {\bibinfo
{volume} {7}},\ \bibinfo {pages} {11895} (\bibinfo {year} {2016})},\ \bibinfo
{note} {article}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Dag}\ \emph {et~al.}(2016)\citenamefont {Dag},
\citenamefont {Niedenzu}, \citenamefont {M\"ustecapl\ifmmode \imath \else \i
\fi{}o\ifmmode~\breve{g}\else \u{g}\fi{}lu},\ and\ \citenamefont
{Kurizki}}]{HE24}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Ceren~B.}\
\bibnamefont {Dag}}, \bibinfo {author} {\bibfnamefont {Wolfgang}\
\bibnamefont {Niedenzu}}, \bibinfo {author} {\bibfnamefont {\"Ozg\"ur~E.}\
\bibnamefont {M\"ustecapl\ifmmode \imath \else \i
\fi{}o\ifmmode~\breve{g}\else \u{g}\fi{}lu}}, \ and\ \bibinfo {author}
{\bibfnamefont {Gershon}\ \bibnamefont {Kurizki}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Multiatom quantum coherences in micromasers as
fuel for thermal and nonthermal machines},}\ }\href {\doibase
10.3390/e18070244} {\bibfield {journal} {\bibinfo {journal} {Entropy}\
}\textbf {\bibinfo {volume} {18}} (\bibinfo {year} {2016}),\
10.3390/e18070244}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Roulet}\ \emph {et~al.}(2017)\citenamefont {Roulet},
\citenamefont {Nimmrichter}, \citenamefont {Arrazola}, \citenamefont {Seah},\
and\ \citenamefont {Scarani}}]{HE25}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Alexandre}\
\bibnamefont {Roulet}}, \bibinfo {author} {\bibfnamefont {Stefan}\
\bibnamefont {Nimmrichter}}, \bibinfo {author} {\bibfnamefont {Juan~Miguel}\
\bibnamefont {Arrazola}}, \bibinfo {author} {\bibfnamefont {Stella}\
\bibnamefont {Seah}}, \ and\ \bibinfo {author} {\bibfnamefont {Valerio}\
\bibnamefont {Scarani}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Autonomous rotor heat engine},}\ }\href {\doibase
10.1103/PhysRevE.95.062131} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. E}\ }\textbf {\bibinfo {volume} {95}},\ \bibinfo {pages} {062131}
(\bibinfo {year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Hofer}\ \emph {et~al.}(2017)\citenamefont {Hofer},
\citenamefont {Brask}, \citenamefont {Perarnau-Llobet},\ and\ \citenamefont
{Brunner}}]{HE26}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Patrick~P.}\
\bibnamefont {Hofer}}, \bibinfo {author} {\bibfnamefont {Jonatan~Bohr}\
\bibnamefont {Brask}}, \bibinfo {author} {\bibfnamefont {Mart\'{\i}}\
\bibnamefont {Perarnau-Llobet}}, \ and\ \bibinfo {author} {\bibfnamefont
{Nicolas}\ \bibnamefont {Brunner}},\ }\bibfield {title} {\enquote {\bibinfo
{title} {Quantum thermal machine as a thermometer},}\ }\href {\doibase
10.1103/PhysRevLett.119.090603} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {119}},\ \bibinfo {pages}
{090603} (\bibinfo {year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Song}\ \emph {et~al.}(2016)\citenamefont {Song},
\citenamefont {Singh}, \citenamefont {Zhang}, \citenamefont {Zhang},\ and\
\citenamefont {Meystre}}]{HE27}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Qiao}\ \bibnamefont
{Song}}, \bibinfo {author} {\bibfnamefont {Swati}\ \bibnamefont {Singh}},
\bibinfo {author} {\bibfnamefont {Keye}\ \bibnamefont {Zhang}}, \bibinfo
{author} {\bibfnamefont {Weiping}\ \bibnamefont {Zhang}}, \ and\ \bibinfo
{author} {\bibfnamefont {Pierre}\ \bibnamefont {Meystre}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {One qubit and one photon: The simplest
polaritonic heat engine},}\ }\href {\doibase 10.1103/PhysRevA.94.063852}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo
{volume} {94}},\ \bibinfo {pages} {063852} (\bibinfo {year}
{2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Friedenberger}\ and\ \citenamefont
{Lutz}(2017{\natexlab{b}})}]{HE28}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Alexander}\
\bibnamefont {Friedenberger}}\ and\ \bibinfo {author} {\bibfnamefont {Eric}\
\bibnamefont {Lutz}},\ }\bibfield {title} {\enquote {\bibinfo {title} {When
is a quantum heat engine quantum?}}\ }\href
{http://stacks.iop.org/0295-5075/120/i=1/a=10002} {\bibfield {journal}
{\bibinfo {journal} {Europhys. Lett.}\ }\textbf {\bibinfo {volume} {120}},\ \bibinfo
{pages} {10002} (\bibinfo {year} {2017}{\natexlab{b}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Zhang}\ \emph
{et~al.}(2014{\natexlab{a}})\citenamefont {Zhang}, \citenamefont {Bariani},\
and\ \citenamefont {Meystre}}]{OM-HE1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Keye}\ \bibnamefont
{Zhang}}, \bibinfo {author} {\bibfnamefont {Francesco}\ \bibnamefont
{Bariani}}, \ and\ \bibinfo {author} {\bibfnamefont {Pierre}\ \bibnamefont
{Meystre}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Quantum
optomechanical heat engine},}\ }\href {\doibase
10.1103/PhysRevLett.112.150602} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {112}},\ \bibinfo {pages}
{150602} (\bibinfo {year} {2014}{\natexlab{a}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Zhang}\ \emph
{et~al.}(2014{\natexlab{b}})\citenamefont {Zhang}, \citenamefont {Bariani},\
and\ \citenamefont {Meystre}}]{OM-HE2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Keye}\ \bibnamefont
{Zhang}}, \bibinfo {author} {\bibfnamefont {Francesco}\ \bibnamefont
{Bariani}}, \ and\ \bibinfo {author} {\bibfnamefont {Pierre}\ \bibnamefont
{Meystre}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Theory of an
optomechanical quantum heat engine},}\ }\href {\doibase
10.1103/PhysRevA.90.023819} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {90}},\ \bibinfo {pages} {023819}
(\bibinfo {year} {2014}{\natexlab{b}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Mari}\ \emph {et~al.}(2015)\citenamefont {Mari},
\citenamefont {Farace},\ and\ \citenamefont {Giovannetti}}]{OM-HE3}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A}~\bibnamefont
{Mari}}, \bibinfo {author} {\bibfnamefont {A}~\bibnamefont {Farace}}, \ and\
\bibinfo {author} {\bibfnamefont {V}~\bibnamefont {Giovannetti}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Quantum optomechanical piston engines
powered by heat},}\ }\href {http://stacks.iop.org/0953-4075/48/i=17/a=175501}
{\bibfield {journal} {\bibinfo {journal} {J. Phys. B}\ }\textbf {\bibinfo
{volume} {48}},\ \bibinfo {pages} {175501} (\bibinfo {year}
{2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Gelbwaser-Klimovsky}\ \emph {et~al.}(2015)\citenamefont {Gelbwaser},
\citenamefont {Klimovsky},\ and\ \citenamefont {Kurizki}}]{OM-HE4}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Klimovsky}}, \ and\
\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Kurizki}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Work extraction from heat-powered quantized optomechanical
setups},}\ }\href {https://www.nature.com/articles/srep07809}
{\bibfield {journal} {\bibinfo {journal} {Sci. Rep.}\ }\textbf {\bibinfo
{volume} {5}},\ \bibinfo {pages} {7809} (\bibinfo {year}
{2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Zhang}\ and\ \citenamefont {Zhang}(2017)}]{OM-HE5}
\BibitemOpen
\bibfield {journal} { }\bibfield {author} {\bibinfo {author} {\bibfnamefont
{Keye}\ \bibnamefont {Zhang}}\ and\ \bibinfo {author} {\bibfnamefont
{Weiping}\ \bibnamefont {Zhang}},\ }\bibfield {title} {\enquote {\bibinfo
{title} {Quantum optomechanical straight-twin engine},}\ }\href {\doibase
10.1103/PhysRevA.95.053870} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {95}},\ \bibinfo {pages} {053870}
(\bibinfo {year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Klatzow}\ \emph {et~al.}(2019)\citenamefont
{Klatzow}, \citenamefont {Becker}, \citenamefont {Ledingham}, \citenamefont
{Weinzetl}, \citenamefont {Kaczmarek}, \citenamefont {Saunders},
\citenamefont {Nunn}, \citenamefont {Walmsley}, \citenamefont {Uzdin},\ and\
\citenamefont {Poem}}]{Eilon2019}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {James}\ \bibnamefont
{Klatzow}}, \bibinfo {author} {\bibfnamefont {Jonas~N.}\ \bibnamefont
{Becker}}, \bibinfo {author} {\bibfnamefont {Patrick~M.}\ \bibnamefont
{Ledingham}}, \bibinfo {author} {\bibfnamefont {Christian}\ \bibnamefont
{Weinzetl}}, \bibinfo {author} {\bibfnamefont {Krzysztof~T.}\ \bibnamefont
{Kaczmarek}}, \bibinfo {author} {\bibfnamefont {Dylan~J.}\ \bibnamefont
{Saunders}}, \bibinfo {author} {\bibfnamefont {Joshua}\ \bibnamefont {Nunn}},
\bibinfo {author} {\bibfnamefont {Ian~A.}\ \bibnamefont {Walmsley}}, \bibinfo
{author} {\bibfnamefont {Raam}\ \bibnamefont {Uzdin}}, \ and\ \bibinfo
{author} {\bibfnamefont {Eilon}\ \bibnamefont {Poem}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Experimental demonstration of quantum effects in
the operation of microscopic heat engines},}\ }\href {\doibase
10.1103/PhysRevLett.122.110601} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {122}},\ \bibinfo {pages}
{110601} (\bibinfo {year} {2019})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ronzani}\ \emph {et~al.}(2018)\citenamefont
{Ronzani}, \citenamefont {Karimi}, \citenamefont {Senior}, \citenamefont
{Chang}, \citenamefont {Peltonen}, \citenamefont {Chen},\ and\ \citenamefont
{Pekola}}]{Ronzani2018}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Alberto}\ \bibnamefont
{Ronzani}}, \bibinfo {author} {\bibfnamefont {Bayan}\ \bibnamefont {Karimi}},
\bibinfo {author} {\bibfnamefont {Jorden}\ \bibnamefont {Senior}}, \bibinfo
{author} {\bibfnamefont {Yu-Cheng}\ \bibnamefont {Chang}}, \bibinfo {author}
{\bibfnamefont {Joonas~T.}\ \bibnamefont {Peltonen}}, \bibinfo {author}
{\bibfnamefont {ChiiDong}\ \bibnamefont {Chen}}, \ and\ \bibinfo {author}
{\bibfnamefont {Jukka~P.}\ \bibnamefont {Pekola}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Tunable photonic heat transport in a quantum
heat valve},}\ }\href {\doibase 10.1038/s41567-018-0199-4} {\bibfield
{journal} {\bibinfo {journal} {Nat. Phys.}\ }\textbf {\bibinfo {volume}
{14}},\ \bibinfo {pages} {991--995} (\bibinfo {year} {2018})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Maslennikov}\ \emph {et~al.}(2019)\citenamefont
{Maslennikov}, \citenamefont {Ding}, \citenamefont {Habl{\"u}tzel},
\citenamefont {Gan}, \citenamefont {Roulet}, \citenamefont {Nimmrichter},
\citenamefont {Dai}, \citenamefont {Scarani},\ and\ \citenamefont
{Matsukevich}}]{Maslennikov2019}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Gleb}\ \bibnamefont
{Maslennikov}}, \bibinfo {author} {\bibfnamefont {Shiqian}\ \bibnamefont
{Ding}}, \bibinfo {author} {\bibfnamefont {Roland}\ \bibnamefont
{Habl{\"u}tzel}}, \bibinfo {author} {\bibfnamefont {Jaren}\ \bibnamefont
{Gan}}, \bibinfo {author} {\bibfnamefont {Alexandre}\ \bibnamefont {Roulet}},
\bibinfo {author} {\bibfnamefont {Stefan}\ \bibnamefont {Nimmrichter}},
\bibinfo {author} {\bibfnamefont {Jibo}\ \bibnamefont {Dai}}, \bibinfo
{author} {\bibfnamefont {Valerio}\ \bibnamefont {Scarani}}, \ and\ \bibinfo
{author} {\bibfnamefont {Dzmitry}\ \bibnamefont {Matsukevich}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Quantum absorption refrigerator with
trapped ions},}\ }\href {\doibase 10.1038/s41467-018-08090-0} {\bibfield
{journal} {\bibinfo {journal} {Nat. Commun.}\ }\textbf {\bibinfo {volume}
{10}},\ \bibinfo {pages} {202} (\bibinfo {year} {2019})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Manzano}\ \emph
{et~al.}(2016{\natexlab{b}})\citenamefont {Manzano}, \citenamefont {Galve},
\citenamefont {Zambrini},\ and\ \citenamefont
{Parrondo}}]{PhysRevE.93.052120}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Gonzalo}\ \bibnamefont
{Manzano}}, \bibinfo {author} {\bibfnamefont {Fernando}\ \bibnamefont
{Galve}}, \bibinfo {author} {\bibfnamefont {Roberta}\ \bibnamefont
{Zambrini}}, \ and\ \bibinfo {author} {\bibfnamefont {Juan M.~R.}\
\bibnamefont {Parrondo}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Entropy production and thermodynamic power of the squeezed thermal
reservoir},}\ }\href {\doibase 10.1103/PhysRevE.93.052120} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. E}\ }\textbf {\bibinfo {volume}
{93}},\ \bibinfo {pages} {052120} (\bibinfo {year}
{2016}{\natexlab{b}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Klaers}\ \emph
{et~al.}(2017{\natexlab{a}})\citenamefont {Klaers}, \citenamefont {Faelt},
\citenamefont {Imamoglu},\ and\ \citenamefont {Togan}}]{Togan2017}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Jan}\ \bibnamefont
{Klaers}}, \bibinfo {author} {\bibfnamefont {Stefan}\ \bibnamefont {Faelt}},
\bibinfo {author} {\bibfnamefont {Atac}\ \bibnamefont {Imamoglu}}, \ and\
\bibinfo {author} {\bibfnamefont {Emre}\ \bibnamefont {Togan}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Squeezed thermal reservoirs as a
resource for a nanomechanical engine beyond the carnot limit},}\ }\href
{\doibase 10.1103/PhysRevX.7.031044} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. X}\ }\textbf {\bibinfo {volume} {7}},\ \bibinfo {pages}
{031044} (\bibinfo {year} {2017}{\natexlab{a}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Scully}\ \emph {et~al.}(2003)\citenamefont {Scully},
\citenamefont {Zubairy}, \citenamefont {Agarwal},\ and\ \citenamefont
{Walther}}]{Corr1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Marlan~O.}\
\bibnamefont {Scully}}, \bibinfo {author} {\bibfnamefont {M.~Suhail}\
\bibnamefont {Zubairy}}, \bibinfo {author} {\bibfnamefont {Girish~S.}\
\bibnamefont {Agarwal}}, \ and\ \bibinfo {author} {\bibfnamefont {Herbert}\
\bibnamefont {Walther}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Extracting work from a single heat bath via vanishing quantum coherence},}\
}\href {\doibase 10.1126/science.1078955} {\bibfield {journal} {\bibinfo
{journal} {Science}\ }\textbf {\bibinfo {volume} {299}},\ \bibinfo {pages}
{862--864} (\bibinfo {year} {2003})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Scully}\ \emph
{et~al.}(2011{\natexlab{b}})\citenamefont {Scully}, \citenamefont {Chapin},
\citenamefont {Dorfman}, \citenamefont {Kim},\ and\ \citenamefont
{Svidzinsky}}]{Corr2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Marlan~O.}\
\bibnamefont {Scully}}, \bibinfo {author} {\bibfnamefont {Kimberly~R.}\
\bibnamefont {Chapin}}, \bibinfo {author} {\bibfnamefont {Konstantin~E.}\
\bibnamefont {Dorfman}}, \bibinfo {author} {\bibfnamefont {Moochan~Barnabas}\
\bibnamefont {Kim}}, \ and\ \bibinfo {author} {\bibfnamefont {Anatoly}\
\bibnamefont {Svidzinsky}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Quantum heat engine power can be increased by noise-induced coherence},}\
}\href {\doibase 10.1073/pnas.1110234108} {\bibfield {journal} {\bibinfo
{journal} {Proc. Natl. Acad. Sci.}\ }\textbf {\bibinfo {volume} {108}},\
\bibinfo {pages} {15097--15100} (\bibinfo {year}
{2011}{\natexlab{b}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Harbola}\ \emph {et~al.}(2012)\citenamefont
{Harbola}, \citenamefont {Rahav},\ and\ \citenamefont {Mukamel}}]{Corr3}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Upendra}\ \bibnamefont
{Harbola}}, \bibinfo {author} {\bibfnamefont {Saar}\ \bibnamefont {Rahav}}, \
and\ \bibinfo {author} {\bibfnamefont {Shaul}\ \bibnamefont {Mukamel}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Quantum heat engines: A
thermodynamic analysis of power and efficiency},}\ }\href
{http://stacks.iop.org/0295-5075/99/i=5/a=50005} {\bibfield {journal}
{\bibinfo {journal} {Europhys. Lett.}\ }\textbf {\bibinfo {volume} {99}},\ \bibinfo
{pages} {50005} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Dillenschneider}\ and\ \citenamefont
{Lutz}(2009)}]{Corr4}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Dillenschneider}}\ and\ \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont
{Lutz}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Energetics of
quantum correlations},}\ }\href
{http://stacks.iop.org/0295-5075/88/i=5/a=50003} {\bibfield {journal}
{\bibinfo {journal} {Europhys. Lett.}\ }\textbf {\bibinfo {volume} {88}},\ \bibinfo
{pages} {50003} (\bibinfo {year} {2009})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Park}\ \emph {et~al.}(2013)\citenamefont {Park},
\citenamefont {Kim}, \citenamefont {Sagawa},\ and\ \citenamefont
{Kim}}]{Corr5}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Jung~Jun}\
\bibnamefont {Park}}, \bibinfo {author} {\bibfnamefont {Kang-Hwan}\
\bibnamefont {Kim}}, \bibinfo {author} {\bibfnamefont {Takahiro}\
\bibnamefont {Sagawa}}, \ and\ \bibinfo {author} {\bibfnamefont {Sang~Wook}\
\bibnamefont {Kim}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Heat
engine driven by purely quantum information},}\ }\href {\doibase
10.1103/PhysRevLett.111.230402} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {111}},\ \bibinfo {pages}
{230402} (\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Brunner}\ \emph {et~al.}(2014)\citenamefont
{Brunner}, \citenamefont {Huber}, \citenamefont {Linden}, \citenamefont
{Popescu}, \citenamefont {Silva},\ and\ \citenamefont {Skrzypczyk}}]{Corr6}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Nicolas}\ \bibnamefont
{Brunner}}, \bibinfo {author} {\bibfnamefont {Marcus}\ \bibnamefont {Huber}},
\bibinfo {author} {\bibfnamefont {Noah}\ \bibnamefont {Linden}}, \bibinfo
{author} {\bibfnamefont {Sandu}\ \bibnamefont {Popescu}}, \bibinfo {author}
{\bibfnamefont {Ralph}\ \bibnamefont {Silva}}, \ and\ \bibinfo {author}
{\bibfnamefont {Paul}\ \bibnamefont {Skrzypczyk}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Entanglement enhances cooling in microscopic
quantum refrigerators},}\ }\href {\doibase 10.1103/PhysRevE.89.032115}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. E}\ }\textbf {\bibinfo
{volume} {89}},\ \bibinfo {pages} {032115} (\bibinfo {year}
{2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Hardal}\ \emph {et~al.}(2017)\citenamefont {Hardal},
\citenamefont {Aslan}, \citenamefont {Wilson},\ and\ \citenamefont
{M\"ustecapl\ifmmode \imath \else \i \fi{}o\ifmmode~\breve{g}\else
\u{g}\fi{}lu}}]{Umit-OM}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Ali \"U.~C.}\
\bibnamefont {Hardal}}, \bibinfo {author} {\bibfnamefont {Nur}\ \bibnamefont
{Aslan}}, \bibinfo {author} {\bibfnamefont {C.~M.}\ \bibnamefont {Wilson}}, \
and\ \bibinfo {author} {\bibfnamefont {\"Ozg\"ur~E.}\ \bibnamefont
{M\"ustecapl\ifmmode \imath \else \i \fi{}o\ifmmode~\breve{g}\else
\u{g}\fi{}lu}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Quantum
heat engine with coupled superconducting resonators},}\ }\href {\doibase
10.1103/PhysRevE.96.062120} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. E}\ }\textbf {\bibinfo {volume} {96}},\ \bibinfo {pages} {062120}
(\bibinfo {year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Shi}\ and\ \citenamefont
{Bhattacharya}(2013)}]{gen-quad}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Shi}}\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Bhattacharya}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Quantum
mechanical study of a generic quadratically coupled optomechanical system},}\
}\href {\doibase 10.1103/PhysRevA.87.043829} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {87}},\ \bibinfo
{pages} {043829} (\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ghosh}\ \emph {et~al.}(2017)\citenamefont {Ghosh},
\citenamefont {Latune}, \citenamefont {Davidovich},\ and\ \citenamefont
{Kurizki}}]{Ghosh12156}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Ghosh}}, \bibinfo {author} {\bibfnamefont {C.~L.}\ \bibnamefont {Latune}},
\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Davidovich}}, \ and\
\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Kurizki}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Catalysis of heat-to-work conversion in
quantum machines},}\ }\href {\doibase 10.1073/pnas.1711381114} {\bibfield
{journal} {\bibinfo {journal} {Proc. Natl. Acad. Sci.}\ }\textbf {\bibinfo
{volume} {114}},\ \bibinfo {pages} {12156--12161} (\bibinfo {year}
{2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ghosh}\ \emph {et~al.}(2019)\citenamefont {Ghosh},
\citenamefont {Mukherjee}, \citenamefont {Niedenzu},\ and\ \citenamefont
{Kurizki}}]{nonpass1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Arnab}\ \bibnamefont
{Ghosh}}, \bibinfo {author} {\bibfnamefont {Victor}\ \bibnamefont
{Mukherjee}}, \bibinfo {author} {\bibfnamefont {Wolfgang}\ \bibnamefont
{Niedenzu}}, \ and\ \bibinfo {author} {\bibfnamefont {Gershon}\ \bibnamefont
{Kurizki}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Are quantum
thermodynamic machines better than their classical counterparts?}}\ }\href
{\doibase 10.1140/epjst/e2019-800060-7} {\bibfield {journal} {\bibinfo
{journal} {Eur. Phys. J. Spec. Top.}\ }\textbf {\bibinfo {volume} {227}},\
\bibinfo {pages} {2043--2051} (\bibinfo {year} {2019})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Pusz}\ and\ \citenamefont
{Woronowicz}(1978)}]{pusz1978}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {W.}~\bibnamefont
{Pusz}}\ and\ \bibinfo {author} {\bibfnamefont {S.~L.}\ \bibnamefont
{Woronowicz}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Passive
states and kms states for general quantum systems},}\ }\href
{https://projecteuclid.org:443/euclid.cmp/1103901491} {\bibfield {journal}
{\bibinfo {journal} {Comm. Math. Phys.}\ }\textbf {\bibinfo {volume} {58}},\
\bibinfo {pages} {273--290} (\bibinfo {year} {1978})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Lenard}(1978)}]{Lenard1978}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Lenard}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Thermodynamical
proof of the gibbs formula for elementary quantum systems},}\ }\href
{\doibase 10.1007/BF01011769} {\bibfield {journal} {\bibinfo {journal} {J.
Stat. Phys.}\ }\textbf {\bibinfo {volume} {19}},\ \bibinfo {pages} {575--586}
(\bibinfo {year} {1978})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Allahverdyan}\ \emph {et~al.}(2004)\citenamefont
{Allahverdyan}, \citenamefont {Balian},\ and\ \citenamefont
{Nieuwenhuizen}}]{Allahverdyan2004}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~E.}\ \bibnamefont
{Allahverdyan}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Balian}}, \ and\ \bibinfo {author} {\bibfnamefont {Th.~M.}\ \bibnamefont
{Nieuwenhuizen}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Maximal
work extraction from finite quantum systems},}\ }\href
{http://stacks.iop.org/0295-5075/67/i=4/a=565} {\bibfield {journal}
{\bibinfo {journal} {Europhys. Lett.}\ }\textbf {\bibinfo {volume} {67}},\ \bibinfo
{pages} {565} (\bibinfo {year} {2004})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {De~Palma}\ \emph {et~al.}(2016)\citenamefont
{De~Palma}, \citenamefont {Mari}, \citenamefont {Lloyd},\ and\ \citenamefont
{Giovannetti}}]{Palma2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Giacomo}\ \bibnamefont
{De~Palma}}, \bibinfo {author} {\bibfnamefont {Andrea}\ \bibnamefont {Mari}},
\bibinfo {author} {\bibfnamefont {Seth}\ \bibnamefont {Lloyd}}, \ and\
\bibinfo {author} {\bibfnamefont {Vittorio}\ \bibnamefont {Giovannetti}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Passive states as optimal
inputs for single-jump lossy quantum channels},}\ }\href {\doibase
10.1103/PhysRevA.93.062328} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {93}},\ \bibinfo {pages} {062328}
(\bibinfo {year} {2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Brown}\ \emph {et~al.}(2016)\citenamefont {Brown},
\citenamefont {Friis},\ and\ \citenamefont {Huber}}]{Brown2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Eric~G}\ \bibnamefont
{Brown}}, \bibinfo {author} {\bibfnamefont {Nicolai}\ \bibnamefont {Friis}},
\ and\ \bibinfo {author} {\bibfnamefont {Marcus}\ \bibnamefont {Huber}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Passivity and practical work
extraction using gaussian operations},}\ }\href
{http://stacks.iop.org/1367-2630/18/i=11/a=113028} {\bibfield {journal}
{\bibinfo {journal} {New J. Phys.}\ }\textbf {\bibinfo {volume} {18}},\
\bibinfo {pages} {113028} (\bibinfo {year} {2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Skrzypczyk}\ \emph {et~al.}(2015)\citenamefont
{Skrzypczyk}, \citenamefont {Silva},\ and\ \citenamefont
{Brunner}}]{Skrzypczyk2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Paul}\ \bibnamefont
{Skrzypczyk}}, \bibinfo {author} {\bibfnamefont {Ralph}\ \bibnamefont
{Silva}}, \ and\ \bibinfo {author} {\bibfnamefont {Nicolas}\ \bibnamefont
{Brunner}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Passivity,
complete passivity, and virtual temperatures},}\ }\href {\doibase
10.1103/PhysRevE.91.052133} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. E}\ }\textbf {\bibinfo {volume} {91}},\ \bibinfo {pages} {052133}
(\bibinfo {year} {2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Binder}\ \emph
{et~al.}(2015{\natexlab{a}})\citenamefont {Binder}, \citenamefont
{Vinjanampathy}, \citenamefont {Modi},\ and\ \citenamefont
{Goold}}]{Binder2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Felix~C}\ \bibnamefont
{Binder}}, \bibinfo {author} {\bibfnamefont {Sai}\ \bibnamefont
{Vinjanampathy}}, \bibinfo {author} {\bibfnamefont {Kavan}\ \bibnamefont
{Modi}}, \ and\ \bibinfo {author} {\bibfnamefont {John}\ \bibnamefont
{Goold}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Quantacell:
powerful charging of quantum batteries},}\ }\href
{http://stacks.iop.org/1367-2630/17/i=7/a=075015} {\bibfield {journal}
{\bibinfo {journal} {New J. Phys.}\ }\textbf {\bibinfo {volume} {17}},\
\bibinfo {pages} {075015} (\bibinfo {year} {2015}{\natexlab{a}})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Hovhannisyan}\ \emph {et~al.}(2013)\citenamefont
{Hovhannisyan}, \citenamefont {Perarnau-Llobet}, \citenamefont {Huber},\ and\
\citenamefont {Ac\'{\i}n}}]{Hovhannisyan2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Karen~V.}\
\bibnamefont {Hovhannisyan}}, \bibinfo {author} {\bibfnamefont {Mart\'{\i}}\
\bibnamefont {Perarnau-Llobet}}, \bibinfo {author} {\bibfnamefont {Marcus}\
\bibnamefont {Huber}}, \ and\ \bibinfo {author} {\bibfnamefont {Antonio}\
\bibnamefont {Ac\'{\i}n}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Entanglement generation is not necessary for optimal work extraction},}\
}\href {\doibase 10.1103/PhysRevLett.111.240401} {\bibfield {journal}
{\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {111}},\
\bibinfo {pages} {240401} (\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Gelbwaser-Klimovsky}\ \emph
{et~al.}(2013)\citenamefont {Gelbwaser-Klimovsky}, \citenamefont {Alicki},\
and\ \citenamefont {Kurizki}}]{Klimovsky2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Gelbwaser-Klimovsky}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Alicki}}, \ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Kurizki}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Work and energy
gain of heat-pumped quantized amplifiers},}\ }\href
{http://stacks.iop.org/0295-5075/103/i=6/a=60005} {\bibfield {journal}
{\bibinfo {journal} {Europhys. Lett.}\ }\textbf {\bibinfo {volume} {103}},\ \bibinfo
{pages} {60005} (\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Alicki}\ and\ \citenamefont
{Fannes}(2013)}]{Alicki2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Robert}\ \bibnamefont
{Alicki}}\ and\ \bibinfo {author} {\bibfnamefont {Mark}\ \bibnamefont
{Fannes}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Entanglement
boost for extractable work from ensembles of quantum batteries},}\ }\href
{\doibase 10.1103/PhysRevE.87.042123} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. E}\ }\textbf {\bibinfo {volume} {87}},\ \bibinfo
{pages} {042123} (\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Binder}\ \emph
{et~al.}(2015{\natexlab{b}})\citenamefont {Binder}, \citenamefont
{Vinjanampathy}, \citenamefont {Modi},\ and\ \citenamefont
{Goold}}]{Felix2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Felix}\ \bibnamefont
{Binder}}, \bibinfo {author} {\bibfnamefont {Sai}\ \bibnamefont
{Vinjanampathy}}, \bibinfo {author} {\bibfnamefont {Kavan}\ \bibnamefont
{Modi}}, \ and\ \bibinfo {author} {\bibfnamefont {John}\ \bibnamefont
{Goold}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Quantum
thermodynamics of general quantum processes},}\ }\href {\doibase
10.1103/PhysRevE.91.032119} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. E}\ }\textbf {\bibinfo {volume} {91}},\ \bibinfo {pages} {032119}
(\bibinfo {year} {2015}{\natexlab{b}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Niedenzu}\ \emph {et~al.}(2016)\citenamefont
{Niedenzu}, \citenamefont {Gelbwaser-Klimovsky}, \citenamefont {Kofman},\
and\ \citenamefont {Kurizki}}]{Niedenzu2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Wolfgang}\
\bibnamefont {Niedenzu}}, \bibinfo {author} {\bibfnamefont {David}\
\bibnamefont {Gelbwaser-Klimovsky}}, \bibinfo {author} {\bibfnamefont
{Abraham~G}\ \bibnamefont {Kofman}}, \ and\ \bibinfo {author} {\bibfnamefont
{Gershon}\ \bibnamefont {Kurizki}},\ }\bibfield {title} {\enquote {\bibinfo
{title} {On the operation of machines powered by quantum non-thermal
baths},}\ }\href {http://stacks.iop.org/1367-2630/18/i=8/a=083012} {\bibfield
{journal} {\bibinfo {journal} {New J. Phys.}\ }\textbf {\bibinfo {volume}
{18}},\ \bibinfo {pages} {083012} (\bibinfo {year} {2016})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Vinjanampathy}\ and\ \citenamefont
{Anders}(2016)}]{Vinjanampathy2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Sai}\ \bibnamefont
{Vinjanampathy}}\ and\ \bibinfo {author} {\bibfnamefont {Janet}\ \bibnamefont
{Anders}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Quantum
thermodynamics},}\ }\href {\doibase 10.1080/00107514.2016.1201896} {\bibfield
{journal} {\bibinfo {journal} {Contemp. Phys.}\ }\textbf {\bibinfo {volume}
{57}},\ \bibinfo {pages} {545--579} (\bibinfo {year} {2016})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Goold}\ \emph {et~al.}(2016)\citenamefont {Goold},
\citenamefont {Huber}, \citenamefont {Riera}, \citenamefont {del Rio},\ and\
\citenamefont {Skrzypczyk}}]{Goold2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {John}\ \bibnamefont
{Goold}}, \bibinfo {author} {\bibfnamefont {Marcus}\ \bibnamefont {Huber}},
\bibinfo {author} {\bibfnamefont {Arnau}\ \bibnamefont {Riera}}, \bibinfo
{author} {\bibfnamefont {Lídia}\ \bibnamefont {del Rio}}, \ and\ \bibinfo
{author} {\bibfnamefont {Paul}\ \bibnamefont {Skrzypczyk}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {The role of quantum information in
thermodynamics—a topical review},}\ }\href
{http://stacks.iop.org/1751-8121/49/i=14/a=143001} {\bibfield {journal}
{\bibinfo {journal} {J. Phys. A}\ }\textbf {\bibinfo {volume} {49}},\
\bibinfo {pages} {143001} (\bibinfo {year} {2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Niedenzu}\ \emph {et~al.}(2018)\citenamefont
{Niedenzu}, \citenamefont {Mukherjee}, \citenamefont {Ghosh}, \citenamefont
{Kofman},\ and\ \citenamefont {Kurizki}}]{Niedenzu2018}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Wolfgang}\
\bibnamefont {Niedenzu}}, \bibinfo {author} {\bibfnamefont {Victor}\
\bibnamefont {Mukherjee}}, \bibinfo {author} {\bibfnamefont {Arnab}\
\bibnamefont {Ghosh}}, \bibinfo {author} {\bibfnamefont {Abraham~G.}\
\bibnamefont {Kofman}}, \ and\ \bibinfo {author} {\bibfnamefont {Gershon}\
\bibnamefont {Kurizki}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Quantum engine efficiency bound beyond the second law of thermodynamics},}\
}\href {\doibase 10.1038/s41467-017-01991-6} {\bibfield {journal} {\bibinfo
{journal} {Nat. Commun.}\ }\textbf {\bibinfo {volume} {9}},\ \bibinfo {pages}
{165} (\bibinfo {year} {2018})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Levy}\ \emph {et~al.}(2016)\citenamefont {Levy},
\citenamefont {Di\'osi},\ and\ \citenamefont {Kosloff}}]{Levy2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Amikam}\ \bibnamefont
{Levy}}, \bibinfo {author} {\bibfnamefont {Lajos}\ \bibnamefont {Di\'osi}}, \
and\ \bibinfo {author} {\bibfnamefont {Ronnie}\ \bibnamefont {Kosloff}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Quantum flywheel},}\ }\href
{\doibase 10.1103/PhysRevA.93.052119} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {93}},\ \bibinfo
{pages} {052119} (\bibinfo {year} {2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Seok}\ \emph {et~al.}(2013)\citenamefont {Seok},
\citenamefont {Buchmann}, \citenamefont {Wright},\ and\ \citenamefont
{Meystre}}]{Seok_2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Seok}}, \bibinfo {author} {\bibfnamefont {L.~F.}\ \bibnamefont {Buchmann}},
\bibinfo {author} {\bibfnamefont {E.~M.}\ \bibnamefont {Wright}}, \ and\
\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Meystre}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Multimode strong-coupling quantum
optomechanics},}\ }\href {\doibase 10.1103/PhysRevA.88.063850} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume}
{88}},\ \bibinfo {pages} {063850} (\bibinfo {year} {2013})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Seok}\ \emph {et~al.}(2014)\citenamefont {Seok},
\citenamefont {Wright},\ and\ \citenamefont {Meystre}}]{Seok_2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Seok}}, \bibinfo {author} {\bibfnamefont {E.~M.}\ \bibnamefont {Wright}}, \
and\ \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Meystre}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Dynamic stabilization of an
optomechanical oscillator},}\ }\href {\doibase 10.1103/PhysRevA.90.043840}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo
{volume} {90}},\ \bibinfo {pages} {043840} (\bibinfo {year}
{2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Kim}\ \emph {et~al.}(2015)\citenamefont {Kim},
\citenamefont {Johansson},\ and\ \citenamefont {Nori}}]{NoriQuad}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Eun-jong}\
\bibnamefont {Kim}}, \bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont
{Johansson}}, \ and\ \bibinfo {author} {\bibfnamefont {Franco}\ \bibnamefont
{Nori}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Circuit analog of
quadratic optomechanics},}\ }\href {\doibase 10.1103/PhysRevA.91.033835}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo
{volume} {91}},\ \bibinfo {pages} {033835} (\bibinfo {year}
{2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Fink}\ \emph {et~al.}(2010)\citenamefont {Fink},
\citenamefont {Steffen}, \citenamefont {Studer}, \citenamefont {Bishop},
\citenamefont {Baur}, \citenamefont {Bianchetti}, \citenamefont {Bozyigit},
\citenamefont {Lang}, \citenamefont {Filipp}, \citenamefont {Leek},\ and\
\citenamefont {Wallraff}}]{masterEq}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont
{Fink}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Steffen}},
\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Studer}}, \bibinfo
{author} {\bibfnamefont {Lev~S.}\ \bibnamefont {Bishop}}, \bibinfo {author}
{\bibfnamefont {M.}~\bibnamefont {Baur}}, \bibinfo {author} {\bibfnamefont
{R.}~\bibnamefont {Bianchetti}}, \bibinfo {author} {\bibfnamefont
{D.}~\bibnamefont {Bozyigit}}, \bibinfo {author} {\bibfnamefont
{C.}~\bibnamefont {Lang}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Filipp}}, \bibinfo {author} {\bibfnamefont {P.~J.}\ \bibnamefont {Leek}}, \
and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Wallraff}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Quantum-to-classical
transition in cavity quantum electrodynamics},}\ }\href {\doibase
10.1103/PhysRevLett.105.163601} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {105}},\ \bibinfo {pages}
{163601} (\bibinfo {year} {2010})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Liao}\ and\ \citenamefont {Nori}(2013)}]{Liao_2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Jie-Qiao}\
\bibnamefont {Liao}}\ and\ \bibinfo {author} {\bibfnamefont {Franco}\
\bibnamefont {Nori}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Photon blockade in quadratically coupled optomechanical systems},}\ }\href
{\doibase 10.1103/PhysRevA.88.023853} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {88}},\ \bibinfo
{pages} {023853} (\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ro{\ss}nagel}\ \emph {et~al.}(2016)\citenamefont
{Ro{\ss}nagel}, \citenamefont {Dawkins}, \citenamefont {Tolazzi},
\citenamefont {Abah}, \citenamefont {Lutz}, \citenamefont {Schmidt-Kaler},\
and\ \citenamefont {Singer}}]{SingleAtom}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Johannes}\
\bibnamefont {Ro{\ss}nagel}}, \bibinfo {author} {\bibfnamefont {Samuel~T.}\
\bibnamefont {Dawkins}}, \bibinfo {author} {\bibfnamefont {Karl~N.}\
\bibnamefont {Tolazzi}}, \bibinfo {author} {\bibfnamefont {Obinna}\
\bibnamefont {Abah}}, \bibinfo {author} {\bibfnamefont {Eric}\ \bibnamefont
{Lutz}}, \bibinfo {author} {\bibfnamefont {Ferdinand}\ \bibnamefont
{Schmidt-Kaler}}, \ and\ \bibinfo {author} {\bibfnamefont {Kilian}\
\bibnamefont {Singer}},\ }\bibfield {title} {\enquote {\bibinfo {title} {A
single-atom heat engine},}\ }\href {\doibase 10.1126/science.aad6320}
{\bibfield {journal} {\bibinfo {journal} {Science}\ }\textbf {\bibinfo
{volume} {352}},\ \bibinfo {pages} {325--329} (\bibinfo {year}
{2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Klaers}\ \emph
{et~al.}(2017{\natexlab{b}})\citenamefont {Klaers}, \citenamefont {Faelt},
\citenamefont {Imamoglu},\ and\ \citenamefont {Togan}}]{OttoShape}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Jan}\ \bibnamefont
{Klaers}}, \bibinfo {author} {\bibfnamefont {Stefan}\ \bibnamefont {Faelt}},
\bibinfo {author} {\bibfnamefont {Atac}\ \bibnamefont {Imamoglu}}, \ and\
\bibinfo {author} {\bibfnamefont {Emre}\ \bibnamefont {Togan}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Squeezed thermal reservoirs as a
resource for a nanomechanical engine beyond the carnot limit},}\ }\href
{\doibase 10.1103/PhysRevX.7.031044} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. X}\ }\textbf {\bibinfo {volume} {7}},\ \bibinfo {pages}
{031044} (\bibinfo {year} {2017}{\natexlab{b}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Esposito}\ and\ \citenamefont {den
Broeck}(2011)}]{Esposito2011}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Esposito}}\ and\ \bibinfo {author} {\bibfnamefont {C.~Van}\ \bibnamefont
{den Broeck}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Second law
and landauer principle far from equilibrium},}\ }\href
{http://stacks.iop.org/0295-5075/95/i=4/a=40004} {\bibfield {journal}
{\bibinfo {journal} {Europhys. Lett.}\ }\textbf {\bibinfo {volume} {95}},\ \bibinfo
{pages} {40004} (\bibinfo {year} {2011})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Horodecki}\ and\ \citenamefont
{Oppenheim}(2013)}]{Horodecki2013}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Michal}\ \bibnamefont
{Horodecki}}\ and\ \bibinfo {author} {\bibfnamefont {Jonathan}\ \bibnamefont
{Oppenheim}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Fundamental
limitations for quantum and nanoscale thermodynamics},}\ }\href
{http://dx.doi.org/10.1038/ncomms3059} {\bibfield {journal} {\bibinfo
{journal} {Nat. Commun.}\ }\textbf {\bibinfo {volume} {4}},\ \bibinfo {pages}
{2059} (\bibinfo {year} {2013})},\ \bibinfo {note} {article}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Watanabe}\ \emph {et~al.}(2017)\citenamefont
{Watanabe}, \citenamefont {Venkatesh}, \citenamefont {Talkner},\ and\
\citenamefont {del Campo}}]{PhysRevLett.118.050601}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Gentaro}\ \bibnamefont
{Watanabe}}, \bibinfo {author} {\bibfnamefont {B.~Prasanna}\ \bibnamefont
{Venkatesh}}, \bibinfo {author} {\bibfnamefont {Peter}\ \bibnamefont
{Talkner}}, \ and\ \bibinfo {author} {\bibfnamefont {Adolfo}\ \bibnamefont
{del Campo}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Quantum
performance of thermal machines over many cycles},}\ }\href {\doibase
10.1103/PhysRevLett.118.050601} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {118}},\ \bibinfo {pages}
{050601} (\bibinfo {year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Para\"{\i}so}\ \emph {et~al.}(2015)\citenamefont
{Para\"{\i}so}, \citenamefont {Kalaee}, \citenamefont {Zang}, \citenamefont
{Pfeifer}, \citenamefont {Marquardt},\ and\ \citenamefont
{Painter}}]{Oskar_2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Taofiq~K.}\
\bibnamefont {Para\"{\i}so}}, \bibinfo {author} {\bibfnamefont {Mahmoud}\
\bibnamefont {Kalaee}}, \bibinfo {author} {\bibfnamefont {Leyun}\
\bibnamefont {Zang}}, \bibinfo {author} {\bibfnamefont {Hannes}\ \bibnamefont
{Pfeifer}}, \bibinfo {author} {\bibfnamefont {Florian}\ \bibnamefont
{Marquardt}}, \ and\ \bibinfo {author} {\bibfnamefont {Oskar}\ \bibnamefont
{Painter}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Position-squared coupling in a tunable photonic crystal optomechanical
cavity},}\ }\href {\doibase 10.1103/PhysRevX.5.041024} {\bibfield {journal}
{\bibinfo {journal} {Phys. Rev. X}\ }\textbf {\bibinfo {volume} {5}},\
\bibinfo {pages} {041024} (\bibinfo {year} {2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Kalaee}\ \emph {et~al.}(2016)\citenamefont {Kalaee},
\citenamefont {Para\"{i}so}, \citenamefont {Pfeifer},\ and\ \citenamefont
{Painter}}]{Kalaee_2016}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Kalaee}}, \bibinfo {author} {\bibfnamefont {T.~K.}\ \bibnamefont
{Para\"{i}so}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Pfeifer}}, \ and\ \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont
{Painter}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Design of a
quasi-2d photonic crystal optomechanical cavity with tunable, large
x2-coupling},}\ }\href {\doibase 10.1364/OE.24.021308} {\bibfield {journal}
{\bibinfo {journal} {Opt. Express}\ }\textbf {\bibinfo {volume} {24}},\
\bibinfo {pages} {21308--21328} (\bibinfo {year} {2016})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Li}\ \emph {et~al.}(2012)\citenamefont {Li},
\citenamefont {Liu}, \citenamefont {Yi}, \citenamefont {Zou}, \citenamefont
{Ren},\ and\ \citenamefont {Xiao}}]{PhysRevA.85.053832}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Hao-Kun}\ \bibnamefont
{Li}}, \bibinfo {author} {\bibfnamefont {Yong-Chun}\ \bibnamefont {Liu}},
\bibinfo {author} {\bibfnamefont {Xu}~\bibnamefont {Yi}}, \bibinfo {author}
{\bibfnamefont {Chang-Ling}\ \bibnamefont {Zou}}, \bibinfo {author}
{\bibfnamefont {Xue-Xin}\ \bibnamefont {Ren}}, \ and\ \bibinfo {author}
{\bibfnamefont {Yun-Feng}\ \bibnamefont {Xiao}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Proposal for a near-field optomechanical system
with enhanced linear and quadratic coupling},}\ }\href {\doibase
10.1103/PhysRevA.85.053832} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {85}},\ \bibinfo {pages} {053832}
(\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Liao}\ and\ \citenamefont {Nori}(2014)}]{Liao2014}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Jie-Qiao}\
\bibnamefont {Liao}}\ and\ \bibinfo {author} {\bibfnamefont {Franco}\
\bibnamefont {Nori}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Single-photon quadratic optomechanics},}\ }\href
{https://doi.org/10.1038/srep06302} {\bibfield {journal} {\bibinfo
{journal} {Scientific Reports}\ }\textbf {\bibinfo {volume} {4}},\ \bibinfo
{pages} {6302} (\bibinfo {year} {2014})},\ \bibinfo {note}
{article}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Gu}\ \emph {et~al.}(2015)\citenamefont {Gu},
\citenamefont {Yi}, \citenamefont {Sun},\ and\ \citenamefont
{Xu}}]{PhysRevA.92.023811}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Wen-ju}\ \bibnamefont
{Gu}}, \bibinfo {author} {\bibfnamefont {Zhen}\ \bibnamefont {Yi}}, \bibinfo
{author} {\bibfnamefont {Li-hui}\ \bibnamefont {Sun}}, \ and\ \bibinfo
{author} {\bibfnamefont {Da-hai}\ \bibnamefont {Xu}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Mechanical cooling in single-photon
optomechanics with quadratic nonlinearity},}\ }\href {\doibase
10.1103/PhysRevA.92.023811} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {92}},\ \bibinfo {pages} {023811}
(\bibinfo {year} {2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Qiao}(2017)}]{PhysRevA.96.013860}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Lei}\ \bibnamefont
{Qiao}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Single-photon
transport through a waveguide coupling to a quadratic optomechanical
system},}\ }\href {\doibase 10.1103/PhysRevA.96.013860} {\bibfield {journal}
{\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {96}},\
\bibinfo {pages} {013860} (\bibinfo {year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Zheng}\ \emph {et~al.}(2019)\citenamefont {Zheng},
\citenamefont {Yin}, \citenamefont {Bin}, \citenamefont {L\"u},\ and\
\citenamefont {Wu}}]{PhysRevA.99.013804}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Li-Li}\ \bibnamefont
{Zheng}}, \bibinfo {author} {\bibfnamefont {Tai-Shuang}\ \bibnamefont {Yin}},
\bibinfo {author} {\bibfnamefont {Qian}\ \bibnamefont {Bin}}, \bibinfo
{author} {\bibfnamefont {Xin-You}\ \bibnamefont {L\"u}}, \ and\ \bibinfo
{author} {\bibfnamefont {Ying}\ \bibnamefont {Wu}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Single-photon-induced phonon blockade in a
hybrid spin-optomechanical system},}\ }\href {\doibase
10.1103/PhysRevA.99.013804} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {99}},\ \bibinfo {pages} {013804}
(\bibinfo {year} {2019})}\BibitemShut {NoStop}
\end{thebibliography}
\end{document} |
\begin{document}
\title{An Order-Invariant and Interpretable Hierarchical Dilated Convolution Neural Network for Chemical Fault Detection and Diagnosis}
\author{Mengxuan Li,
Peng Peng, \IEEEmembership{Member, IEEE,}
Min Wang, \IEEEmembership{Member, IEEE,}
Hongwei Wang
\thanks{*This work was supported by the National Key R\&D Program of China under Grant 2020YFB1707803.(Corresponding author: Peng Peng and Hongwei Wang.)}
\thanks{
Mengxuan Li is with the College of Computer Science and Technology in Zhejiang University, Hangzhou, 310013, China. (E-mail: [email protected]).
Peng Peng and Hongwei Wang are with Zhejiang University and the University of Illinois Urbana–Champaign Institute, Haining, 314400, China. (E-mail: [email protected], [email protected]).
Min Wang is with the School of Automation Engineering, University of Electronic Science and Technology of China, Chengdu 611731, China, (e-mail: [email protected]).
}
\thanks{This work has been submitted to the IEEE Transactions on Automation Science and Engineering for possible publication. Copyright may be transferred without notice, after which this version may no longer be accessible.}
}
\maketitle
\begin{abstract}
Fault detection and diagnosis is significant for reducing maintenance costs and improving health and safety in chemical processes. Convolution neural network (CNN) is a popular deep learning algorithm with many successful applications in chemical fault detection and diagnosis tasks. However, convolution layers in CNN are very sensitive to the order of features, which can lead to instability in the processing of tabular data. Optimal order of features result in better performance of CNN models but it is expensive to seek such optimal order. In addition, because of the encapsulation mechanism of feature extraction, most CNN models are opaque and have poor interpretability, thus failing to identify root-cause features without human supervision. These difficulties inevitably limit the performance and credibility of CNN methods. In this paper, we propose an order-invariant and interpretable hierarchical dilated convolution neural network (HDLCNN), which is composed by feature clustering, dilated convolution and the shapley additive explanations (SHAP) method. The novelty of HDLCNN lies in its capability of processing tabular data with features of arbitrary order without seeking the optimal order, due to the ability to agglomerate correlated features of feature clustering and the large receptive field of dilated convolution. Then, the proposed method provides interpretability by including the SHAP values to quantify feature contribution. Therefore, the root-cause features can be identified as the features with the highest contribution. Computational experiments are conducted on the Tennessee Eastman chemical process benchmark dataset. Compared with the other methods, the proposed HDLCNN-SHAP method achieves better performance on processing tabular data with features of arbitrary order, detecting faults, and identifying the root-cause features.
\end{abstract}
\defNote to Practitioners{Note to Practitioners}
\begin{abstract}
This paper was motivated by the problem of fault detection and diagnosis to process multiple variables and identify the root-cause features in real chemical processes. In this case, the order of features affects the fault detection performance and the precise root-cause feature is required to be identify to avoid the same faults. This paper presents a novel order-invariant and interpretable framework for fault detection and root cause analysis in real chemical processes to process data with features of arbitrary order, thus reducing the burden on users. It utilizes the collected historical data for training and requires no human supervision. The newly collected data is automatically classified into a normal or fault type. Once a fault happens, the corresponding root-cause feature is identified without any prior knowledge. In our future work, we plan to focus on the faults with multiple root-cause features. In addition, the incomplete dataset and simultaneous-fault diagnosis are worth of investigation.
\end{abstract}
\begin{IEEEkeywords}
Fault Diagnosis, Deep Learning, Dilated Convolution Neural Network, Interpretability
\end{IEEEkeywords}
\IEEEpeerreviewmaketitle
\section{Introduction}
\IEEEPARstart{w}ITH the advent of Industry 4.0, chemical processes become more intelligent and automatic. This trend has raised the urgent need of detecting anomalies and diagnosing faults efficiently and correctly. Chemical faults result in chemical contamination, potential explosion, and other seriously chemical hazards, and thus intelligent fault diagnosis methods are required to find the underlying causes of the faults. Current fault detection and diagnosis methods can be classified into two categories: model-based and data-based. Model-based methods are less accurate with higher level of complexity as they depend on the modeling of complex physical and chemical processes. Therefore, data-based methods have become increasingly popular recently. Among these methods, deep learning methods have been used widely and achieved electrifying performance in fault detection and diagnosis problems \cite{xie2021intelligent}.
In particular, convolution neural network (CNN) is one of the most representative deep learning architectures based on convolution calculations. The main advantage of CNN is that it automatically detects the important features without any human supervision. And it can easily process high-dimensional data by sharing convolution kernels. Currently, some work has been done to show the potential of utilizing CNN to detect fault in chemical processes. For example, Wang \emph{et al.} proposed a feature fusion fault diagnosis method using a normalized CNN for complex chemical processes \cite{wang2021fault}. Huang \emph{et al.} introduced a novel fault diagnosis method that consists of sliding window processing and a CNN model \cite{huang2022novel}. However, applying the current CNN models in real chemical processes is still challenging. On the one hand, CNN models rely on convolution operation to extract information within each size-fixed convolution kernel, leading to result that only the information among adjacent features can be extracted. Different from images, the data of chemical processes involves multiple variables through the time domain, which is considered as tabular data. Thus the order of these variables determine the extracted information through kernels, resulting in instability of processing tabular data by convolution layers. On the other hand, the existing CNN methods only provide classification results while the analysis of the root-cause features is lacking. Because of the encapsulation mechanism of feature extraction, most CNN models are opaque without any knowledge of the internal working principles, thus users have no information on feature contribution to the prediction. Without analyzing the underlying root-cause features, the same faults will repeat and result in serious consequences.
In this paper, we propose an order-invariant and interpretable hierarchical dilated convolution neural network (HDLCNN) composed by feature clustering, dilated convolution and shapley additive explanations (SHAP) method to process tabular data with features of arbitrary order and obtain credible root-cause features. Dilated convolution, a variant of CNN that expands the kernel by inserting holes between the kernel elements, is utilized in our method \cite{yu2017dilated}. It is adopted to increase the receptive field size without increasing the number of model parameters. Since receptive field is the corresponding region in the input that determines a unit in a certain layer in the network, dilated convolution with larger receptive field has the capability of extracting global features thus weakening the impact of the order of features. To further eliminate the effects of the order, we utilize feature clustering method to agglomerate highly correlated features before convolution layers. Hierarchical clustering method is applied since it builds a hierarchy of clusters and is not affected by the input order of data. In addition, a major difficulty to apply CNN methods in real chemical processes is to get credible fault detection results and obtain exact root-cause features. This means that interpretability, the degree to which a human can understand the model's result, is vital for human to trust the decisions made by complex models. To solve this problem, we apply the SHAP method to interpret the complex black box model, which is a method to explain individual predictions based on the game theoretically optimal Shapley values \cite{lundberg2017unified-SHAP}. Compared with other interpretability methods, it has the advantages of solid theoretical foundation in game theory and intuitive visualization based on the origin data. Also, it is model-agnostic while providing both local and global interpretability. Therefore, we utilize the SHAP method to provide interpretability by computing the SHAP values to quantify feature contribution and then obtaining the root-cause features.
The main contributions of this article are as follows:
\begin{itemize}
\item A dilated convolution based order-invariant classifier, namely HDLCNN, is developed to solve chemical fault detection and diagnosis. Dilated convolution is a variant of CNN with larger receptive field, enabling the proposed method to extract more information within a size-fixed convolution kernel thus achieving better performance on processing tabular data with features of arbitrary order.
\item Hierarchical clustering algorithm is applied to agglomerate highly correlated features before the convolution layers to further weaken the effect of the order of features. As a data pre-processing method, hierarchical clustering treats each input as a separate cluster and then sequentially merges similar clusters thus being unaffected on the order.
\item SHAP method is applied to provide credible and visual interpretability of the classification results from HDLCNN. The computed SHAP values quantify feature contribution and are utilized to obtain the root-cause features.
\item The experimental results on the Tennessee Eastman (TE) chemical process benchmark dataset demonstrate that, in contrast to the existing methods, the proposed HDLCNN-SHAP method achieves better performance in the key operations of processing tabular data with features of arbitrary order, detecting faults, and identifying the root-cause features.
\end{itemize}
The rest of this paper is organized as follows. The related work is introduced and the motivation of this work is described in Section \ref{background}. The proposed order-invariant and interpretable chemical fault detection and diagnosis method is shown in Section \ref{method}. The experiments of our proposed method based on the TE dataset are introduced in Section \ref{experiment}. And Section \ref{conclusion} summarizes this paper.
\section{Background Theory and Motivation}
\label{background}
\subsection{Related Work}
Till now, researchers has shown the effectiveness of applying CNN for chemical fault detection and diagnosis. For example, Chadha \emph{et al.} proposed a 1-D CNN model to extract meaningful features from the time series sequence \cite{chadha2019time}. Wang \emph{et al.} proposed a fault diagnosis method using deep learning multi-model fusion based on CNN and long short-term memory (LSTM) \cite{wang2020intelligent}. Gu \emph{et al.} proposed an incremental CNN model to detect faults in a real chemical industrial process \cite{gu2021imbalance}. He \emph{et al.} proposed a multi-block temporal convolutional network to learn the temporal-correlated features \cite{he2021multiblock-mbtcn}. However, these methods mainly focus on extracting temporal features and ignore the effect of the order of features. Zhong \emph{et al.} discussed the impact of the arrangement order of features on fault diagnosis and used enumeration method to find the optimal order \cite{zhong2019novel-optimal-order}. However, it is time consuming to find the optimal order and the problem will be exacerbated since chemical processes involve multiple variables. Therefore, it is necessary to design an effective network that is less affected by the order of features. In this paper, we propose an order-invariant fault detection method based on hierarchical clustering and dilated convolution. The proposed HDLCNN methods can process chemical tabular data with arbitrary feature order and provide accurate fault classification results.
Another limitation of the current CNN methods is that only the classification results are provided without an explanation of the results. Considered as black box systems, the CNN models produces useful fault classification results without revealing any information about its internal workings. To avoid the black box problem, interpretability methods have aroused interests of researchers, which aim to help humans readily understand the reasoning behind predictions made by the complex models. Interpretability methods can be divided into two categories: ante-hoc interpretability and post-hoc interpretability. The former one refers to design interpretable models to directly visualize and interpret the internal information while the latter one refers to apply interpretation methods after model training. Generally, researchers prefer to use ante-hoc interpretable bayesian network (BN) to recognize the root-cause features in chemical processes. BN is a probabilistic graphical model based on random variables and the corresponding conditional probability. It can be used to identify the propagation probability among measurable variables to determine the root-cause features. Liu \emph{et al.} proposed a strong relevant mechanism bayesian network by combining mechanism correlation analysis and process state transition to identify the unmonitored root-cause features \cite{liu2022fault-bn}. Liu \emph{et al.} proposed a multi-state BN to recognize a node into multiple states \cite{liu2022optimized-bn}. However, it is expensive to design a BN model since it requires prior knowledge and expert rules. And no universally acknowledged method is developed for constructing networks from raw data. On the contrary, CNN models provide accurate classification results without human supervision. Therefore, utilizing suitable post-hoc inpretability methods to explain the internal parameters of CNN models is a possible solution to visualize the feature contribution and thus identify the root-cause features. In this paper, we propose an interpretable fault diagnosis method based on the SHAP method. The root-cause features are obtained with SHAP values thus greatly improving the practicability of fault diagnosis methods in real chemical processes.
\begin{figure}
\caption{The tabular data with $n$ features and $m$ time duration. Highly correlated features are closer in the optimal order.
}
\label{fig_tabular}
\end{figure}
\subsection{Motivation of This Work}
\begin{figure}
\caption{The correlation coefficients of the 22 features in the TE dataset.}
\label{fig_corr}
\end{figure}
Fault detection and diagnosis technologies are vital in real chemical processes since they aim to discover the faults in the early stage and thus reduce maintenance costs. For this task, the current CNN methods only focus on extracting temporal features in multi-variables chemical processes. However, chemical processes have tabular data involving both time and feature domains. Similar to the effect of the pixel positions in images, the order of features in tabular data also affect the classification results, since it determines the information extracted within a size-fixed convolution kernel. Although deep CNN models can achieve global receptive field and extract order-invariant information ultimately, they are computational expensive and inefficient for chemical data. Therefore, we aim to design a shallow and order-invariant CNN model. Fig. \ref{fig_tabular} shows an example of tabular data with $n$ features and $m$ time duration. Each row represents a feature and the order of these rows determine the information extracted within a kernel. The left part shows the raw data with original order of features, and the right part shows the optimal order by changing the order of features to make highly correlated features closer. We seek the optimal order since it results in the best performance of CNN model. To further explain this problem, we conduct some experiments on the TE dataset to analyze the correlation of these features. The TE dataset contains 22 continuous process features and the corresponding correlation coefficients are shown in Fig. \ref{fig_corr}. From Fig. \ref{fig_corr}, we can see that there are 14 pairs of features have correlation coefficients greater than 0.7. Therefore, utilizing convolution kernels to extract features of the original order will loss the information related to the correlation of features, since the close-related variables are not considered effectively. Zhong \emph{et al.} proved the impact of the order led on model performance and devoted much efforts to find the optimal order \cite{zhong2019novel-optimal-order}. However, using enumeration method to select the optimal order is inefficient. An alternative solution is to design a model which is less affected by the order of features and can process arbitrary order effectively. In this paper, we propose an order-invariant HDLCNN model based on feature clustering and dilated convolution to extract features with arbitrary order. Dilated convolution is utilized to extract information involving more variables due to its larger receptive field. In addition, as a data pre-processing method, feature clustering is used to further agglomerate correlated features before training the dilated convolution model. More details are described in Section \ref{method}. This design enables the proposed model to effectively process the tabular data with features of arbitrary order thus no longer necessary to seek the optimal order.
On the other hand, chemical processes have a very high risk of serious incident consequences by handling and processing materials under hazardous conditions. Therefore, once a fault is detected, it is necessary to analyze the corresponding root causes to identify the underlying issues and avoid the same faults. Generally, researchers design ante-hoc interpretable BN models to identify root-cause features. But these methods require prior knowledge and expert rules leading to much manual intervention for real applications. On the contrary, post-hoc interpretability methods analyze the complex models after training and require no prior knowledge, thus can be combined with opaque CNN models to leverage their strengths of automatically extracting the important features. In particular, specific post-hoc interpretability methods are proposed to explain the CNN-based models and make them more transparent. Zhou \emph{et al.} utilized global average pooling layers in CNN models to generate class activation maps (CAM), which indicates the discriminative regions used by the CNNs for prediction \cite{zhou2016learning-cam}. Further, Selvaraju \emph{et al.} extends the CAM method to any CNN-based differentiable architecture by using the gradients of targets and flowing into the final convolution layer, namely gradient-weighted CAM (Grad-CAM) \cite{selvaraju2017grad-cam}. However, these CAM-based methods only produce a coarse localization map highlighting the important regions \cite{selvaraju2017grad-cam} while fault diagnosis requires to provide pixel-level explanation and precisely locate the correct root-cause feature. Therefore, these model-specific interpretability methods are not satisfying. In contrast, model-agnostic methods are more flexible and independent of the underlying machine learning model. For example, partial dependence plot (PDP) \cite{friedman2001greedy-pdp} and individual conditional expectation plot (ICEP) \cite{goldstein2015peeking-icep} are designed to display the effect of a feature on the prediction. However, they assume the independence of each feature and fail to process multiple features simultaneously, thus they are inappropriate for chemical processes. Besides, Ribeiro \emph{et al.} proposed a technique that explains individual predictions by training local surrogate models to approximate the predictions of the underlying black box model, namely local interpretable model-agnostic explanations (LIME) \cite{ribeiro2016should-LIME}. But it also ignores the correlation between features and only provide local explanations. In contrast, SHAP provides both local and global explanations by computing the contribution of each feature to the corresponding prediction \cite{lundberg2017unified-SHAP}. Also, it considers the interaction effect after obtaining the individual feature effects. Therefore, we apply SHAP method to improve the interpreability of our CNN-based model. The visualization of feature contribution and analysis of root-cause features based on SHAP values are shown in Section \ref{experiment}.
\subsection{Dilated Convolution}
CNN is a representative deep learning model which has been widely used in different fields. It takes the raw data, trains the model, then extracts the features automatically for better classification. Although increasing depth of CNN models can achieve larger receptive field size and higher performance, the number of parameters will greatly increase. With the consideration of it, dilated convolution is proposed. The key idea of dilated CNN (DLCNN) is to maintain the high resolution of feature maps and enlarge the receptive field size in CNN \cite{yu2017dilated}. It expands the kernel by inserting holes among original elements thus enlarging the receptive field. Comparing with traditional CNN, it involves a hyper-parameter named dilation rate which indicates a spacing between the non-zero values in a kernel. Fig. \ref{fig_CNN_dilated} shows the comparison of CNN and DLCNN. Theoretically, CNN can be seen as a DLCNN with dilation rate $r = 1$ and the normal convolution calculation is as follows:
\begin{equation}
Y[i,j] = \sum\nolimits_{m+n=i}\sum\nolimits_{p+q=j}H[m,n] \cdot X[p,q]
\label{CNN_equ}
\end{equation}
where $Y$, $H$ and $X$ are the 2-D output, filter and input respectively. A sample of DLCNN is introduced in Fig. \ref{fig_CNN_dilated} (b). With a dilation rate $r$, ($r - 1$) data points will be skipped in the process of convolution. The dilated convolution calculation is defined as follows:
\begin{equation}
Y_r[i,j] = \sum\nolimits_{rm+n=i}\sum\nolimits_{rp+q=j}H[m,n] \cdot X[p,q]
\label{DCNN_equ}
\end{equation}
With a dilation rate $r$, this method offers a larger receptive field at the same computational cost. While the number of weights in the kernel is unchanged, they are no longer applied to spatially adjacent samples. Define $c_{l}$ as the receptive field size of the feature map $y_{l}$ at the layer $l$ in a DLCNN. Then, the receptive field size of layer $l$ can be computed as follows:
\begin{equation}
c_{l} = s_{l+1} \cdot c_{l+1} + [(r_{l+1}(h_{l+1} - 1) + 1) - s_{l+1}]
\label{DCNN_receptice_equ}
\end{equation}
where $s_{l}$ refers to the stride and $h_{l}$ indicates the kernel size. It is obvious that the receptive field increases linearly as the dilation rate increases. This enables models to have a larger receptive field with the same number of parameters and computation costs.
\begin{figure}
\caption{The comparison of CNN and DLCNN.}
\label{fig_CNN_dilated}
\end{figure}
\begin{figure*}
\caption{The overall architecture of the proposed order-invariant and interpretable HDLCNN-SHAP method for chemical fault detection and diagnosis.}
\label{fig_arch}
\end{figure*}
\subsection{SHAP Method}
In this paper, we utilize the SHAP method \cite{lundberg2017unified-SHAP} to provide interpretability and obtain the root-cause features, which is based on shapley values \cite{shapley1997value-shapely} and game theory \cite{vstrumbelj2014explaining-game-theory}. It computes the shapley values for each feature of the data samples and these values indicate the contribution that the feature generates in the prediction. More specifically, a shapley value is the average marginal contribution of a feature among all possible coalitions \cite{shapley1997value-shapely}. Consider a simple linear model:
\begin{equation}
\hat{f}(x)=\beta_{0}+\beta_{1} x_{1}+\ldots+\beta_{p} x_{p}
\end{equation}
where $x$ is a data sample with $p$ features and each $x_i$ is a feature value. $\beta_i$ is the weight of the feature $i$. The contribution $\phi_i$ of the feature $i$ on the prediction $\hat{f}(x)$ can be computed as follows:
\begin{equation}
\phi_{i}(\hat{f})=\beta_{i} x_{i}-E\left(\beta_{i} X_{i}\right)=\beta_{i} x_{i}-\beta_{i} E\left(X_{i}\right)
\end{equation}
where $E\left(\beta_{i} X_{i}\right)$ is the mean effect estimate for the feature $i$. Then, the contribution is the difference between the feature effect and the average effect.
On this basis, SHAP defines an explanation model $g$ based on the additivity property of shapley values as follows:
\begin{equation}
\label{SHAP_equ}
g\left(z^{\prime}\right)=\phi_{0}+\sum_{i=1}^{M} \phi_{i} z_{i}^{\prime}
\end{equation}
where $z^{\prime} \in \{0, 1\}^M$ is a one-hot vector of features, $M$ is the number of input features and $\phi_i$ is the contribution of the feature $i$. Assuming a model inputs a dataset $P$ and outputs a prediction $Y(S)$, the shapley value $\phi_i$ is computed as follows:
\begin{equation}
\phi_{i}=\sum_{S \subseteq F \backslash\{i\}}\frac{|S| !(|F|-|S|-1) !}{|F| !}\cdot\boldsymbol{Y}
\end{equation}
\begin{equation}
\boldsymbol{Y}=Y_{S \cup\{i\}}\left(x_{S \cup\{i\}}\right)-Y_{S}\left(x_{S}\right)
\end{equation}
where $S$ is a subset of the features and $F$ is the set of all features \cite{lundberg2017unified-SHAP}.
\section{METHODOLOGY}
\label{method}
In this paper, we propose an order-invariant and interpretable fault diagnosis method, namely HDLCNN-SHAP, based on feature clustering, dilated convolution and the SHAP method for chemical fault detection and diagnosis. The proposed method mainly contains two parts: a hierarchical dilated convolution model and an explainer based on the SHAP method. The input data is first pre-processed by the feature clustering method and then fed into the hierarchical dilated convolution model to provide classification results. Then the trained model is seen as a black box and we apply the SHAP method to interpret the model performance. The overall architecture is shown in Fig. \ref{fig_arch}. In the following subsections, we will introduce the main flow of our method in detail.
\subsection{Data Pre-processing}
\label{feature clusetering}
As shown in Fig. \ref{fig_corr}, different features may be strongly correlated which requires the ability of extracting the hidden information of the relevance among these features. In this case, a convolution layer is difficult to extract enough information within size-fixed kernels. Instead of seeking the optimal order, we cluster the correlated features before training the dilated convolution model. As a data pre-processing step, we apply hierarchical clustering to divide the features into two categories based on relevance. Compared with other clustering methods, hierarchical clustering is easy to understand and implement. More importantly, the clustering results are not affected by the input order of data. Assume that we have a set of training samples: $\boldsymbol{X}=\left\{\boldsymbol{x}_{\mathbf{1}}, \boldsymbol{x}_{\mathbf{2}}, \boldsymbol{x}_{\mathbf{3}}, \ldots, \boldsymbol{x}_{\boldsymbol{n}}\right\}$, where $\boldsymbol{x}_{\boldsymbol{i}} \in \boldsymbol{R}^{\boldsymbol{p}}$. Based on it, we build a set of the $\boldsymbol{p}$ features: $\boldsymbol{F}=\left\{\boldsymbol{f}_{\mathbf{1}}, \boldsymbol{f}_{\mathbf{2}}, \boldsymbol{f}_{\mathbf{3}}, \ldots, \boldsymbol{f}_{\boldsymbol{p}}\right\}$, where $\boldsymbol{f}_{\boldsymbol{i}} \in \boldsymbol{R}^{\boldsymbol{n}}$. Then we divide these $\boldsymbol{p}$ features into two categorizes based on the hierarchical clustering algorithm. It mainly contains the following three steps:
\begin{enumerate}
\item Each feature $\boldsymbol{f}_{\boldsymbol{i}}$ is treated as a single cluster. Then we compute the euclidean distance $d(\boldsymbol{f}_{\boldsymbol{i}}, \boldsymbol{f}_{\boldsymbol{j}})$ between two clusters $\boldsymbol{f}_{\boldsymbol{i}}$ and $\boldsymbol{f}_{\boldsymbol{j}}$.
\item The two closest clusters $\boldsymbol{f}_{\boldsymbol{i}}, \boldsymbol{f}_{\boldsymbol{j}}$ are merged into a single cluster $\boldsymbol{f}_{\boldsymbol{s}}$. Then $\boldsymbol{f}_{\boldsymbol{i}}, \boldsymbol{f}_{\boldsymbol{j}}$ are removed and $\boldsymbol{f}_{\boldsymbol{s}}$ is added.
\item Iterate the previous step until there is only one cluster remaining.
\end{enumerate}
At each iteration, the distance matrix is updated to reflect the distance of the newly formed cluster $\boldsymbol{f}_{\boldsymbol{s}}$ with the remaining clusters. We use the Ward variance minimization algorithm to calculate the distance mentioned above. The distance between the newly formed cluster $\boldsymbol{f}_{\boldsymbol{s}}$ and any remaining cluster $\boldsymbol{f}_{\boldsymbol{t}}$ is defined as:
\begin{equation}
d^{\boldsymbol{*}}(\boldsymbol{f}_{\boldsymbol{s}}, \boldsymbol{f}_{\boldsymbol{t}})=\sqrt{d_1(\boldsymbol{f}_{\boldsymbol{s}}, \boldsymbol{f}_{\boldsymbol{t}}) + d_2(\boldsymbol{f}_{\boldsymbol{s}}, \boldsymbol{f}_{\boldsymbol{t}}) - d_3(\boldsymbol{f}_{\boldsymbol{s}}, \boldsymbol{f}_{\boldsymbol{t}})}
\end{equation}
\begin{equation}
d_1(\boldsymbol{f}_{\boldsymbol{s}}, \boldsymbol{f}_{\boldsymbol{t}}) = \frac{|\boldsymbol{f}_{\boldsymbol{t}}|+|\boldsymbol{f}_{\boldsymbol{i}}|}{T} d(\boldsymbol{f}_{\boldsymbol{t}}, \boldsymbol{f}_{\boldsymbol{i}})^{2}
\end{equation}
\begin{equation}
d_2(\boldsymbol{f}_{\boldsymbol{s}}, \boldsymbol{f}_{\boldsymbol{t}}) = \frac{|\boldsymbol{f}_{\boldsymbol{t}}|+|\boldsymbol{f}_{\boldsymbol{j}}|}{T} d(\boldsymbol{f}_{\boldsymbol{t}}, \boldsymbol{f}_{\boldsymbol{j}})^{2}
\end{equation}
\begin{equation}
d_3(\boldsymbol{f}_{\boldsymbol{s}}, \boldsymbol{f}_{\boldsymbol{t}}) = \frac{|\boldsymbol{f}_{\boldsymbol{t}}|}{T} d(\boldsymbol{f}_{\boldsymbol{i}}, \boldsymbol{f}_{\boldsymbol{j}})^{2}
\end{equation}
where $T=|\boldsymbol{f}_{\boldsymbol{t}}|+|\boldsymbol{f}_{\boldsymbol{i}}|+|\boldsymbol{f}_{\boldsymbol{j}}|$. Finally, we obtain two sets of features $\boldsymbol{F}_{\mathbf{1}}, \boldsymbol{F}_{\mathbf{2}}$ and each of them contains features with high correlation coefficients. The original data is reordered based on $\boldsymbol{F}_{\mathbf{1}}$ and $\boldsymbol{F}_{\mathbf{2}}$. For each sample $\boldsymbol{x}_{\boldsymbol{i}}$, the processed sample $\boldsymbol{x}_{\boldsymbol{i}}^{\boldsymbol{\prime}}$ contains $\boldsymbol{p}$ features: $\boldsymbol{F}^{\boldsymbol{\prime}}=\left\{\boldsymbol{f}_{\mathbf{1}}^{\boldsymbol{\prime}}, \ldots, \boldsymbol{f}_{\mathbf{m}}^{\boldsymbol{\prime}}, \boldsymbol{f}_{\mathbf{m+1}}^{\boldsymbol{\prime}}, \ldots, \boldsymbol{f}_{\boldsymbol{p}}^{\boldsymbol{\prime}}\right\}$, where $\left\{\boldsymbol{f}_{\mathbf{1}}^{\boldsymbol{\prime}}, \ldots, \boldsymbol{f}_{\boldsymbol{m}}^{\boldsymbol{\prime}}\right\}$ belongs to $\boldsymbol{F}_{\mathbf{1}}$ and $\left\{\boldsymbol{f}_{\mathbf{m+1}}^{\boldsymbol{\prime}}, \ldots, \boldsymbol{f}_{\boldsymbol{p}}^{\boldsymbol{\prime}}\right\}$ belongs to $\boldsymbol{F}_{\mathbf{2}}$.
\begin{figure*}
\caption{Details of the proposed order-invariant hierarchical model based on dilated convolution.}
\label{fig_model}
\end{figure*}
\subsection{Hierarchical Dilated Convolution Model}
\label{HDLCNN}
After feature clustering, the processed data is then fed into the dilated convolution layers for feature extraction. To explain the model structure clearly, we take the data from TE dataset as input and define the processed input data as a set $\boldsymbol{D}=\left\{\boldsymbol{d}_{\mathbf{1}}, \boldsymbol{d}_{\mathbf{2}}, \boldsymbol{d}_{\mathbf{3}}, \ldots, \boldsymbol{d}_{\boldsymbol{n}}\right\}$, where $\boldsymbol{d}_{\boldsymbol{i}} \in \boldsymbol{R}^{\boldsymbol{22\times 20}}$. Each data sample $\boldsymbol{d}_{\boldsymbol{i}}$ has 22 features and 20 time duration. The details are shown in Fig. \ref{fig_model} and the procedures of the model are summarized as follows.
\begin{enumerate}
\item The size of the processed data is $(N\times1\times22\times20)$ and we divide the 22 features into two segments. Since the processed data is reordered in the previous step, each segment contains highly correlated features belonging to the same cluster. Each segment has a size of $(N\times1\times11\times20)$.
\item The segmented data is processed by a dilated convolution layer with dilation rate $r=2$. Then the hidden information about feature correlation is extracted locally and the size of the extracted features is $(N\times16\times7\times16)$.
\item The extracted features are concatenated to obtain the entire information about two feature clusters. The size of the concatenated features is $(N\times16\times14\times16)$.
\item The concatenated features are then processed by a dilated convolution layer with dilation rate $r=2$. This step can further extract the global information and the size of the new extracted features is $(N\times32\times10\times12)$.
\item A max pooling layer is applied to help over-fitting and reduce the computational cost. Now the size of the extracted features is $(N\times32\times5\times6)$.
\item The extracted features are flattened to couple information that exists vertically and horizontally. The output data of the fully connected layer has a size of $(N\times960)$.
\item A linear layer is used to change the dimensionality of the data. Then a softmax activation function is applied to impart non-linearity into the model and output the probability distributions of the possible classes (11 in this case). The size of the final output is $(N\times11)$.
\end{enumerate}
Finally, we obtain the classification results and a trained model that requires explanation. The performance of this model is described in Section \ref{experiment} and the interpretability method is introduced in the following subsection.
\subsection{Deep SHAP Explainer}
To interpret the order-invariant hierarchical model mentioned in Section \ref{HDLCNN}, we apply an explainer based on the SHAP method which combines deep learning important features (DeepLIFT) \cite{shrikumar2017learning-deeplift} and shapley values to leverage extra knowledge about the properties of deep neural networks to improve computational performance \cite{lundberg2017unified-SHAP}.
DeepLIFT is an algorithm to compute the feature importance of the input with a given output based on back-propagation \cite{shrikumar2017learning-deeplift}. This method uses a summation-to-delta property to compute the contribution scores $C_{\Delta x_{i} \Delta y}$ for each input $x_{i}$:
\begin{equation}
\sum_{i=1}^{n} C_{\Delta x_{i} \Delta y}=\Delta y
\end{equation}
where $y$ is the model output, $\Delta y=y-y^{0}$, $\Delta x_{i}=x_{i}-x^{0}$, $x^{0}$ refers to the reference input, and $y^{0}$ represents the reference output. Compared with Equation \ref{SHAP_equ}, if we define $\phi_{i}=C_{\Delta x_{i} \Delta y}$ and $\phi_{0}=y^{0}$, then DeepLIFT approximates SHAP values for linear models.
Deep SHAP takes DeepLIFT as a compositional approximation of SHAP values and recursively passes the multipliers of DeepLIFT backwards through the network \cite{lundberg2017unified-SHAP}. Deep SHAP explainer can effectively achieve linearization by combining the SHAP values computed for smaller components into SHAP values for the whole model. Therefore, we can quantify the contribution of each feature from each data sample to obtain local explanation. Based on the feature contribution of each data sample, we further interpret the model globally by calculating the average value of data samples for each feature, which can be mathematically described as follows:
\begin{equation}
\Phi_{i} = \frac{1}{n}\sum_{j=1}^{n} \phi_{i}(x_j)
\end{equation}
where $\phi_{i}(x_j)$ refers to the contribution of the feature $i$ of the input $x_j$. Finally, we identify the root-cause feature $\boldsymbol{\gamma}$ as the one with the highest contribution.
\begin{equation}
\boldsymbol{\gamma}=\arg\max(\Phi_{i})
\end{equation}
\subsection{The Entire Fault Detection and Diagnosis Procedure}
Based on the description above, the entire order-invariant and interpretable fault detection and diagnosis procedure via feature clustering, hierarchical dilated convolution model and SHAP method mainly consists of six steps:
\begin{enumerate}
\item Collecting the monitored variables via sensors in chemical processes. Obtaining the training set from the collected samples and normalizing the data.
\item Processing the training samples based on hierarchical clustering method. The features are reordered according to the clustering results and the highly correlated features are closer in the processed data.
\item Training the hierarchical dilated convolution model with the processed data. Storing the model parameters for later usage.
\item Acquiring online samples and processing them in the same way as mentioned above.
\item Restoring the trained model and classifying the new sample into a normal or fault type.
\item If a fault happens, identifying the corresponding root-cause feature based on SHAP method. Feature contribution is computed and the one with highest contribution is considered as the root-cause feature.
\end{enumerate}
\begin{table*}[]
\centering
\caption{Binary Fault detection accuracy of the selected 10 faults}
\label{binary-accuracy}
\setlength{\tabcolsep}{1.49mm}{
\begin{tabular}{|c|cc|cc|cc|cc|cc|c|c|c|c|c|c|}
\hline
\multirow{2}{*}{Fault ID} & \multicolumn{2}{c|}{PCA} & \multicolumn{2}{c|}{KPCA} & \multicolumn{2}{c|}{KDPCA} & \multicolumn{2}{c|}{KDICA} & \multicolumn{2}{c|}{MLPP} & \multirow{2}{*}{DSAE} & \multirow{2}{*}{VS-SVDD} & \multirow{2}{*}{MBTCN} & \multirow{2}{*}{CNN} & \multirow{2}{*}{DLCNN} & \multirow{2}{*}{HDLCNN} \\ \cline{2-11}
& \multicolumn{1}{c|}{SPE} & T$^{2}$ & \multicolumn{1}{c|}{SPE} & T$^{2}$
& \multicolumn{1}{c|}{SPE} & T$^{2}$ & \multicolumn{1}{c|}{SPE} & T$^{2}$ & \multicolumn{1}{c|}{SPE} & T$^{2}$ & & & & & & \\ \hline
1 & \multicolumn{1}{c|}{99.5} & 99.1 & \multicolumn{1}{c|}{100.0} & 99.3 & \multicolumn{1}{c|}{100.0} & 99.5 & \multicolumn{1}{c|}{100.0} & 100.0 & \multicolumn{1}{c|}{99.7} & 100.0 & 99.3 & 99.0 & 100.0 & 99.4 & 98.8 & 100.0 \\ \hline
2 & \multicolumn{1}{c|}{98.4} & 98.5 & \multicolumn{1}{c|}{99.0} & 95.3 & \multicolumn{1}{c|}{99.1} & 98.3 & \multicolumn{1}{c|}{98.5} & 98.8 & \multicolumn{1}{c|}{98.9} & 99.8 & 96.8 & 98.0 & 99.0 & 99.1 & 99.1 & 98.7 \\ \hline
3 & \multicolumn{1}{c|}{0.6} & 3.6 & \multicolumn{1}{c|}{6.8} & 9.0 & \multicolumn{1}{c|}{9.6} & 4.4 & \multicolumn{1}{c|}{19.4} & 19.8 & \multicolumn{1}{c|}{23.8} & 39.6 & 67.4 & 42.0 & 85.4 & 85.7 & 92.4 & 97.6 \\ \hline
8 & \multicolumn{1}{c|}{96.8} & 97.4 & \multicolumn{1}{c|}{97.9} & 97.4 & \multicolumn{1}{c|}{97.8} & 97.6 & \multicolumn{1}{c|}{97.8} & 99.4 & \multicolumn{1}{c|}{100.0} & 98.7 & 87.0 & 98.0 & 89.0 & 96.7 & 97.7 & 95.9 \\ \hline
10 & \multicolumn{1}{c|}{15.4} & 36.7 & \multicolumn{1}{c|}{52.5} & 48.6 & \multicolumn{1}{c|}{63.5} & 42.6 & \multicolumn{1}{c|}{80.6} & 92.9 & \multicolumn{1}{c|}{71.3} & 94.2 & 68.3 & 73.0 & 86.6 & 93.7 & 93.9 & 95.6 \\ \hline
11 & \multicolumn{1}{c|}{63.8} & 41.4 & \multicolumn{1}{c|}{77.6} & 51.0 & \multicolumn{1}{c|}{91.0} & 33.6 & \multicolumn{1}{c|}{81.4} & 90.3 & \multicolumn{1}{c|}{93.6} & 95.6 & 81.3 & 98.0 & 99.5 & 93.4 & 93.0 & 94.5 \\ \hline
12 & \multicolumn{1}{c|}{92.5} & 98.5 & \multicolumn{1}{c|}{98.5} & 98.9 & \multicolumn{1}{c|}{99.1} & 99.1 & \multicolumn{1}{c|}{99.7} & 100.0 & \multicolumn{1}{c|}{99.6} & 100.0 & 94.1 & 100.0 & 96.5 & 81.7 & 82.7 & 90.7 \\ \hline
13 & \multicolumn{1}{c|}{95.0} & 94.3 & \multicolumn{1}{c|}{95.2} & 94.3 & \multicolumn{1}{c|}{95.4} & 96.3 & \multicolumn{1}{c|}{95.9} & 95.9 & \multicolumn{1}{c|}{96.5} & 91.2 & 78.1 & 95.0 & 95.6 & 96.3 & 96.8 & 96.9 \\ \hline
14 & \multicolumn{1}{c|}{99.9} & 98.8 & \multicolumn{1}{c|}{100.0} & 99.6 & \multicolumn{1}{c|}{100.0} & 99.9 & \multicolumn{1}{c|}{100.0} & 100.0 & \multicolumn{1}{c|}{100.0} & 100.0 & 99.6 & 100.0 & 100.0 & 100.0 & 100.0 & 100.0 \\ \hline
20 & \multicolumn{1}{c|}{42.3} & 34.0 & \multicolumn{1}{c|}{59.8} & 49.1 & \multicolumn{1}{c|}{66.8} & 51.5 & \multicolumn{1}{c|}{72.7} & 83.9 & \multicolumn{1}{c|}{86.7} & 93.6 & 78.6 & 78.0 & 90.1 & 97.2 & 97.3 & 96.4 \\ \hline
Average & \multicolumn{1}{c|}{70.4} & 70.2 & \multicolumn{1}{c|}{78.7} & 74.2 & \multicolumn{1}{c|}{82.2} & 72.3 & \multicolumn{1}{c|}{84.6} & 88.1 & \multicolumn{1}{c|}{87.0} & 91.3 & 85.1 & 88.1 & 94.2 & 94.3 & 95.2 & \textbf{96.6} \\ \hline
\end{tabular}}
\end{table*}
\section{EXPERIMENT STUDY}
\label{experiment}
In this paper, we use the TE dataset to verify the effectiveness of the proposed method. It simulates actual chemical processes and is widely used as a benchmark in chemical fault detection and diagnosis \cite{amin2018process-te-1-15,peng2021towards-te-peng,yu2015nonlinear-te-10}. There are totally 21 types of faults and 22 continuous measured variables in this dataset. For the training set, it has 980 samples including 500 samples in the normal case and 480 samples in the case of failure for each fault type. For the test set, it has 960 samples including 160 normal samples and 800 fault samples for each fault type.
Details are described in the following subsections.
\begin{figure}
\caption{The feature clustering dendrogram of the 22 features in TE dataset.}
\label{fig_cluster}
\end{figure}
\subsection{Experiment Setup}
The downloaded TE dataset has a sampling period of 180 seconds, leading to few data samples for training and test. Therefore, current CNN methods use simulation model to generate more data samples for feature extraction. Similarly, we refer to the simulation method from \cite{peng2020cost} on MATLAB to obtain more data for classification. The sampling period is set to 36 seconds (100 samples/h). The simulator runs for 48 hours in the normal state, then 4800 training normal samples are collected. For each fault type, the simulator runs for 48 hours to collect 4800 training fault samples. For the testing data of each fault, the simulator runs for 8 hours in the normal state at the beginning to collect 800 test normal samples. Then a fault disturbance is introduced and the simulator continues to run for 40 hours to collect 4000 test fault samples. Next, the collected data is processed in the range [0,1] to eliminate the adverse effects caused by singular data. To extract the features in both spatial and temporal domains, each data sample is reshaped into a 2-D array with 22 features and 20 time duration.
To demonstrate the performance of our proposed model on the chemical fault detection and diagnosis, we select Fault 1, Fault 2, Fault 3, Fault 8, Fault 10, Fault 11, Fault 12, Fault 13, Fault 14, and Fault 20 for binary and multi-class fault detection and diagnosis. Fault 10 and Fault 11 are chose for root cause analysis since their root-cause features are proven and widely used. The corresponding true root-cause features are X(18) and X(9) \cite{amin2018process-te-1-15, yu2015nonlinear-te-10}.
\subsection{Feature Clustering}
As shown in Fig. \ref{fig_tabular}, highly correlated features are closer in the optimal order. Although it is hard to obtain the optimal order, we can cluster the features with higher correlation to achieve similar effect. As described in Section \ref{feature clusetering}, we divide the 22 features into two categorizes based on the correlation. The corresponding hierarchical clustering dendrogram is shown in Fig. \ref{fig_cluster}. We can see that the first category includes 11 features which are X(1), X(2), X(3), X(4), X(8), X(9), X(12), X(14), X(15), X(17) and X(19). The second category also includes 11 features which are X(5), X(6), X(7), X(10), X(11), X(13), X(16), X(18), X(20), X(21) and X(22). Refer to the correlation coefficients of the 22 features shown in Fig. \ref{fig_corr}, we can see that the highly correlated features are classified into the same cluster.
\begin{figure}
\caption{The t-SNE embedding of the extracted features of our proposed HDLCNN model.}
\label{fig_tsne}
\end{figure}
\subsection{Contrast and Ablation Experiments}
Firstly, we evaluate the proposed method for binary fault detection and diagnosis. In this case, only one fault is considered at a time. To show the efficiency, the proposed HDLCNN model is compared with the existing data-driven methods including principal component analysis (PCA), kernel principal component analysis (KPCA), integrated kernel dynamic principal component analysis (KDPCA), kernel dynamic independent component analysis (KDICA) \cite{fan2014fault-comparison}, modified locality preserving projection (MLPP) \cite{shah2022modified-mlpp}, denoising sparse autoencoder (DSAE) \cite{peng2021towards-te-peng}, variable selection and support vector data description (VS-SVDD) \cite{cai2022relevant-RVS-SVDD} and multi-block temporal convolutional network (MBTCN) \cite{he2021multiblock-mbtcn}. As shown in Table. \ref{binary-accuracy}, our proposed model results in the highest average fault detection rate, which is marked bold. The following experiment results are marked in a similar way. As ablation experiments, we compare CNN, DLCNN and HDLCNN. CNN is the baseline model with traditional convolution layers and DLCNN contains dilated convolution layers without hierarchical feature clustering. HDLCNN is our proposed method involving both hierarchical feature clustering and dilated convolution layers. It is obvious that the average fault detection accuracy of DLCNN is increased by 1.0\% compared to CNN, which shows the effect of dilated convolution. In addition, hierarchical feature clustering is proved instrumental since the average fault detection accuracy of HDLCNN is increased by 1.5\% compared to DLCNN.
\begin{table}[]
\centering
\caption{Multi-class Fault detection accuracy of the selected 10 faults}
\setlength{\tabcolsep}{3.2mm}{
\begin{tabular}{|c|c|c|c|c|c|}
\hline
Fault ID & PCA & DSAE & CNN & DLCNN & HDLCNN \\ \hline
0 & 5.1 & 51.3 & 85.3 & 92.1 & 99.6 \\ \hline
1 & 51.8 & 98.4 & 99.2 & 99.7 & 97.7 \\ \hline
2 & 78.5 & 97.8 & 94.6 & 95.0 & 97.2 \\ \hline
3 & 25.9 & 14.1 & 94.4 & 100.0 & 100.0 \\ \hline
8 & 4.1 & 44.8 & 89.6 & 93.0 & 94.5 \\ \hline
10 & 8.6 & 36.4 & 95.3 & 99.4 & 98.8 \\ \hline
11 & 16.4 & 45.5 & 90.3 & 93.1 & 93.0 \\ \hline
12 & 12.2 & 68.4 & 73.4 & 95.5 & 93.9 \\ \hline
13 & 45.4 & 23.8 & 92.2 & 75.2 & 93.3 \\ \hline
14 & 50.8 & 97.9 & 100.0 & 100.0 & 100.0 \\ \hline
20 & 41.5 & 72.4 & 89.9 & 91.3 & 92.6 \\ \hline
Average & 30.9 & 59.2 & 91.3 & 94.0 & \textbf{96.4} \\ \hline
\end{tabular}}
\label{table_11}
\end{table}
\begin{table}[]
\centering
\caption{Accuracy of CNN, DLCNN and HDLCNN}
\label{acc-te}
\setlength{\tabcolsep}{4mm}{
\begin{tabular}{|c|c|c|c|}
\hline
& CNN & DLCNN & HDLCNN \\ \hline
Close-correlated Order & 91.3 & 94.0 & 96.4 \\ \hline
Separate-correlated Order & 85.8 & 91.8 & 96.0 \\ \hline
Difference & 5.5 & 2.2 & 0.4 \\ \hline
\end{tabular}}
\end{table}
Then, to explore the performance of the proposed method for multi-class fault detection and diagnosis, we combine the normal case with the selected 10 types of faults. As shown in Fig. \ref{fig_tsne}, the extracted features of HDLCNN are visualized. Specifically, we utilize t-SNE method to reduce the dimension of the features to 2, and then plot them by class. It is obvious that the embedding of the extracted features belonging to different classes are separate. Therefore, it is unsurprising that the softmax layer can get accurate classification results. PCA, DSAE, CNN and DLCNN are selected as comparison methods. As shown in Table. \ref{table_11}, the proposed model achieves the highest average fault detection rate. Similarly, we consider CNN, DLCNN and HDLCNN as ablation experiments. Due to the dilated convolution, the average fault detection accuracy of DLCNN is increased by 3.0\% compared to CNN. And with hierarchical feature clustering, the average fault detection accuracy of HDLCNN is increased by 2.6\% compared to DLCNN.
\subsection{Ablation Experiments of Sensitivity to Feature Order}
To demonstrate the ability of our proposed HDLCNN model to process tabular data with features of arbitrary order, we find the separate-correlated order of features and compare to the close-correlated order. More specifically, we consider the separate-correlated order as [X(21), X(8), X(4), X(3), X(15), X(16), X(2), X(6), X(20), X(13), X(17), X(18), X(9), X(1), X(7), X(10), X(5), X(14), X(19), X(11), X(12), X(22)], in which highly correlated features are separated. And the close-correlated order is formed in an opposite way. In this case, the average multi-class fault detection accuracy of CNN, DLCNN and HDLCNN is shown in Table. \ref{acc-te}. We see that the performance of CNN and DLCNN is influenced by the order of the features and the differences are 5.5\% and 2.2\% respectively. This proves that dilated convolution can extract more information and weaken the effect of the feature order. Further, HDLCNN is order-invariant and the difference is only 0.4\%, which confirms the effect of hierarchical feature clustering. In a word, the ablation experiments demonstrate that our proposed method can effectively process tabular data with features of arbitrary order without seeking the optimal order. The confusion matrices obtained by HDLCNN are illustrated in Fig. \ref{fig_confusion_matrix}.
\begin{figure}
\caption{The confusion matrices of HDLCNN.}
\label{fig_confusion_matrix}
\end{figure}
\begin{figure}
\caption{The visualization of SHAP values of a single sample of Fault 10 and 11.}
\label{shap_fig_root_cause}
\end{figure}
\begin{figure}
\caption{The heatmap of a single sample of Fault 10 and 11.}
\label{heatmap_fig_root_cause}
\end{figure}
\subsection{Local and Global Explanation}
We further analyze the feature contribution and obtain the root-cause features based on SHAP values. Fault 10 and Fault 11 are selected for root cause analysis, and the corresponding true root-cause features are X(18) and X(9), respectively. For Fault 10, the stripper temperature (X(18)) is directly affected because of the random variation of temperature in feed C \cite{yu2015nonlinear-te-10}. And for Fault 11, the random variation in reactor cooling water inlet temperature results in abnormal behaviour of the reactor temperature (X(9)) \cite{amin2018process-te-1-15}.
\begin{figure}
\caption{The average feature importance of Fault 10 and 11.}
\label{fig_root_cause}
\end{figure}
First, local explanation is provided to indicate the feature contribution to the prediction from a single sample. Fig. \ref{shap_fig_root_cause} shows the visualization of the SHAP values of a single sample. The left column indicates the gray image of the sample, the middle column shows the SHAP values of classifying this sample to the normal case, and the right column shows the SHAP values of classifying this sample to the case of failure. Red pixels indicate high SHAP values and blue pixels denote low SHAP values. High SHAP value means great feature contribution of this sample to be classified to the corresponding type of fault. The corresponding heatmaps are shown in Fig. \ref{heatmap_fig_root_cause}. It is obviously that the most important features are X(18) and X(9) for Fault 10 and Fault 11 respectively, which are also the true root-cause features.
Then, global explanation is described to show the feature contribution of the overall dataset. We compute the average feature contribution for each sample and consider the feature with highest importance as the corresponding root-cause feature. Fig. \ref{fig_root_cause} shows the average feature importance and the features with highest importance are marked red. We see that the most important features are X(18) and X(9) for Fault 10 and Fault 11 respectively, which are also the true root-cause features. Fig. \ref{summary_fig_root_cause} denotes the relationship between the measured feature values and the corresponding SHAP values. For Fault 10, high measured values of X(18) obviously correspond high SHAP values which means high stripper temperature (X(18)) may be the main cause of the failure. On the contrary, for Fault 11, we see that low measured values of X(9) mainly refer to high SHAP values which reminds us to pay attention to the reduction of reactor temperature (X(9)).
\begin{figure}
\caption{The SHAP values of the top 10 features of Fault 10 and 11.}
\label{summary_fig_root_cause}
\end{figure}
\section{Conclusion}
\label{conclusion}
In this paper, we propose an order-invariant and interpretable HDLCNN-SHAP method for chemical fault detection and diagnosis based on feature clustering, dilated convolution and SHAP method. The ability to detect faults and obtain the root-cause features is essential for fault detection and diagnosis methods in real chemical processes. Comparing with the existing methods, our proposed method can effectively process tabular data with features of arbitrary order without seeking the optimal order. In addition, root-cause features are precisely identified without any human supervision. The proposed method is evaluated on a simulation dataset based on an actual chemical process. Experimental results show that the proposed method achieves better performance for both binary and multi-class fault detection and diagnosis compared with other popular data-driven methods. Moreover, the proposed method is order-invariant, which results in insensitivity to the order of the features. Local and global explanation are further described to obtain the root-cause features. In our future work, we will focus on more practical and complex fault detection problems. Simultaneous-fault diagnosis is a common problem in real applications and the problem of faults with multiple root-cause features is consequential as well. Besides, incomplete and high-dimensional datasets are worth of investigation for solving fault detection problems in real-world chemical processes.
\ifCLASSOPTIONcaptionsoff
\fi
\end{document} |
\begin{document}
\title{1-Overlap Cycles for Steiner Triple Systems}
\begin{abstract}
A number of applications of Steiner triple systems (e.g. disk erasure codes) exist that require a special ordering of its blocks. Universal cycles, introduced by Chung, Diaconis, and Graham in 1992, and Gray codes are examples of listing elements of a combinatorial family in a specific manner, and Godbole invented the following generalization of these in 2010. 1-overlap cycles require a set of strings to be ordered so that the last letter of one string is the first letter of the next. In this paper, we prove the existence of 1-overlap cycles for automorphism free Steiner triple systems of each possible order. Since Steiner triple systems have the property that each block can be represented uniquely by a pair of points, these 1-overlap cycles can be compressed by omitting non-overlap points to produce rank two universal cycles on such designs, expanding on the results of Dewar.
\end{abstract}
\textbf{Keywords:} Overlap cycles, universal cycles, Gray codes.
\textbf{MSC Classifications:} 68R15, 05B05.
\section{Introduction}
Steiner triple systems, or $(v,3,1)$-designs, appear in many interesting applications. They can be viewed as set systems, combinatorial designs, hypergraphs, or many other types of structures. A Steiner triple system of order $v$, or STS($v$), is a pair $(X, \mathcal{B})$ with $|X|=v$, where $\mathcal{B}$ is a set of triples, or blocks, from $X$. The set $\mathcal{B}$ has the property that every pair of points from $X$ appears in exactly one triple in $\mathcal{B}$. It has been completely determined for which values $v$ such a set system can exist.
\begin{thm}\label{STS}
\emph{\cite{Kirkman}} There exists an STS($v$) if and only if $v \equiv 1, 3 \pmod 6$.
\end{thm}
Several authors have considered ordering blocks in Steiner triple systems in specific ways. For example, in \cite{Disks}, the authors consider such an ordering to construct erasure codes. In \cite{Dewar}, Dewar uses a modified universal cycle structure to list the blocks and points within each block of these designs in an organized manner. \textbf{Universal cycles}, or \textbf{ucycles}, are a type of cyclic Gray code in which a string $a_1a_2 \ldots a_n$ may follow string $b_1 b_2 \ldots b_n$ if and only if $a_{i+1} = b_i$ for all $i \in \{1,2, \ldots , n-1\}$. That is, the two substrings $a_2 a_3 \ldots a_n$ and $b_1 b_2 \ldots b_{n-1}$ are identical \cite{CDG}. We can think of this as an $n-1$ overlap between the strings.
Because any two blocks in a Steiner triple system can share at most one point in common, finding a ucycle over the blocks of an STS is clearly impossible - they would need to overlap in two points. To remedy this problem, Dewar introduces a modified ucycle structure. A \textbf{rank two universal cycle} is a ucycle on a block design in which each block is represented by just two of its elements. Since any pair of points appears in exactly one block of an STS, they completely identify a unique block in the triple system. Dewar constructs rank two ucycles for the special class of cyclic Steiner triple systems. A \textbf{cyclic} design has automorphism group containing the cyclic group of order $v$, isomorphic to $\mathbb{Z}_v$, as a subgroup. Since this subgroup contains the automorphism $\pi: i \mapsto i+1 \pmod v$, this implies that we can partition the blocks into classes so that within one class each block can be obtained from any other by repeated applications of $\pi$ on the block elements.
\begin{thm}\label{Dewar}
\emph{(\cite{Dewar}, p. 200)} Every cyclic STS$(v)$ with $v \neq 3$ admits a ucycle of rank two.
\end{thm}
While this result allows us to write the list of blocks as a modified ucycle, it is not easy to recover the design from a given ucycle. Given just two points of a block, the only way to recover the missing point is to have a lookup table at hand, which (depending on applications) may defeat the purpose of creating a compact listing. For this reason, we consider overlap cycles.
Overlap cycles were first introduced in \cite{Godbole} for binary and $m$-ary strings. To extend this concept to Steiner triple systems, let $(X, \mathcal{B})$ be an STS($v$). An \textbf{$s$-overlap cycle} (or $s$-\textbf{ocycle}) on $(X, \mathcal{B})$ is an ordered listing of the blocks in $\mathcal{B}$ so that the last $s$ points in one block are the first $s$ points of its successor in the listing. In the case of triple systems, a 2-ocycle is a ucycle, which as previously discussed cannot be formed on any STS. Hence we consider 1-ocycles.
When writing out ocycles, we can list the sequence fully or we can choose to omit points that do not appear as an overlap (\textbf{hidden} points). When we omit the hidden points, we say that the cycle is written in \textbf{compressed form}. Using this concept, we can view Dewar's rank two ucycles as compressed 1-ocycles and easily obtain the following corollary to Theorem \ref{Dewar}.
\begin{cor}\label{CDewar}
Every cyclic STS$(v)$ with $v \neq 3$ admits a 1-ocycle.
\end{cor}
It is a well-known result that there exists a cyclic STS($v$) for every $v \equiv 1, 3 \pmod 6$ except $v = 9$ (See \cite{TripleSystems}, Theorem 7.3). In order to further differentiate our results from Dewar's, we will consider a different class of Steiner triple systems, namely automorphism free (AF) Steiner triple systems, and prove the following result using recursive constructions. However, the constructions used herein may be utilized with various base cases for different (and perhaps not automorphism free) Steiner triple systems.
\begin{result}\label{main}
For every $v \equiv 1, 3 \pmod 6$ with $v \geq 15$, there exists an AF STS$(v)$ with a 1-ocycle.
\end{result}
We also include two other direct constructions of 1-ocycles for a Steiner triple system of each order (Results \ref{OC3m6} and \ref{OC1m6}), as well as another recursive construction (Result \ref{OCPC}). While Dewar constructs rank two ucycles for all cyclic designs of each order, these direct constructions may be a simpler method of finding an ocycle when any STS($v$) will do.
In this paper, we will begin with a review of some recursive constructions of automorphism free Steiner triple systems in Section 2 and show 1-ocycle constructions that correspond. Some of the larger but necessary base cases for these constructions may be found in the appendix. Section 3 discusses similar results for other STS constructions and their corresponding 1-ocycle constructions. As a future direction, it would be interesting to consider these structures over other types of designs, such as Steiner quadruple systems (see \cite{OCSQS}).
\section{Constructions of AF Steiner Triple Systems and 1-Ocycles}
\subsection{Recursive Constructions of AF Steiner Triple Systems}
The first construction produces an AF STS($2v+1$) from an AF STS($v$).
\begin{const}\label{C2v+1}
Given $(X, \mathcal{A})$, an STS($v$) with $v \geq 15$ and with $X$ identified with $\mathbb{Z}_v$, construct a new design $(Y, \mathcal{B})$ with points: $$Y = (\mathbb{Z}_2 \times \mathbb{Z}_v )\cup\{\infty\} $$ and blocks:
\begin{enumerate}
\item $\{(1,a), (1,b), (1,c)\}$ with $\{a,b,c\} \in \mathcal{A}$,
\item $\left\{ (0,x), (0,y), \left(1, \frac{x+y}{2} \right) \right\}$ with $\{x,y\} \subset X$, and
\item $\{(0,x), (1,x), \infty\}$ with $x \in X$.
\end{enumerate}
\end{const}
The following theorem from \cite{AFSTS} proves that Construction \ref{C2v+1} is correct.
\begin{thm}\label{2v+1}
\emph{\cite{AFSTS}}. If $(X, \mathcal{A})$ is an STS($v$), then $(Y, \mathcal{B})$ is an STS($2v+1$). In particular, if $(X, \mathcal{A})$ is AF, then $(Y, \mathcal{B})$ is AF.
\end{thm}
The second construction produces an AF STS($2v+7$) from an AF STS($v$).
\begin{const}\label{C2v+7}
Given $(X, \mathcal{A})$, an STS($v$) with $v \geq 15$, and with $X$ identified with $\mathbb{Z}_v$, construct a new design $(Y, \mathcal{B})$ with points: $$Y = (\mathbb{Z}_2 \times \mathbb{Z}_v) \cup \{ \infty_i \mid |i| \leq 3\}.$$ Fix $(Z, \mathcal{C})$ as some STS(7) on the points $\{-3, -2, -1, 0, 1, 2, 3\}$. The blocks in our new design are as follows:
\begin{enumerate}
\item $\{(1,i), (1,j), (1,k)\}$ with $\{i,j,k\} \in \mathcal{A}$,
\item $\{ \infty_i , \infty_j , \infty_k\}$ with $\{i,j,k\} \in \mathcal{C}$,
\item $\{(0,x), (0, x+2), (0,x+6)\}$ with $x \in \mathbb{Z}_v$,
\item $\{(0,x), (1,x+y), (0, x+2y)\}$ with $\{x, y\} \subset \mathbb{Z}_v$ and $|y|>3$, and
\item $\{\infty_i, (1,j), (0, i+j)\}$ with $|i| \leq 3$ and $j \in \mathbb{Z}_v$.
\end{enumerate}
\end{const}
The following theorem from \cite{AFSTS} proves that Construction \ref{C2v+7} is correct.
\begin{thm}\label{2v+7}
\emph{\cite{AFSTS}}. If $(X, \mathcal{A})$ is an STS($v$), then $(Y, \mathcal{B})$ is an STS($2v+7$). In particular, if $(X, \mathcal{A})$ is AF, then $(Y, \mathcal{B})$ is AF.
\end{thm}
\subsection{Base Cases}\label{BaseC}
The recursive constructions given in the previous subsection require six base cases in order to construct recursively an AF STS($v$) for every $v \equiv 1, 3 \pmod 6$ with $v \geq 15$. These base cases are STS($v$)'s for $v = 15, 19, 21,25,27,33$. We also provide 1-ocycles for a non-cyclic STS($v$) for $v = 9,13$. We include a 1-ocycle for the cyclic STS(7) as it is used in the second recursive construction. See the appendix for all cases with $v \geq 19$.
\\
\\
$\mathbf{v=7:}$ We use the cyclic $(7,3,1)$-design and produce the 1-ocycle: $$\underline{2},1, \underline{0}, 3,
\underline{4}, 2,
\underline{5}, 0,
\underline{6},4,\underline{1},5,\underline{3},6,\underline{2}$$ or, since each
pair appears in exactly one block, we may omit the non-overlap points to write it in compressed form as:
$$(2,0,4,5,6,1,3,2).$$
$\mathbf{v=9:}$ We use the non-cyclic design (from \cite{SmallSTS}) and produce the 1-ocycle:
$$\underline{0},1, \underline{2}, 8,
\underline{5}, 3,
\underline{4}, 1,
\underline{7},8,\underline{6},3,\underline{0},4,\underline{8},1,\underline{3},7,
\underline{2},4,\underline{6},1,\underline{5},7,\underline{0}$$ or in compressed form:
$$(0,2,5,4,7,6,4,8,3,2,6,5).$$
\\
\\
$\mathbf{v=13:}$ We use the non-cyclic design (from \cite{AFSTS}) and produce the 1-ocycle:
$$\underline{1}, 2, \underline{0}, 9, \underline{10}, 12, \underline{1}, 3, \underline{5}, 7, \underline{11}, 9, \underline{6}, 7, \underline{12}, 8, \underline{4}, 9, \underline{5}, 10, \underline{8}, 6, \underline{3}, 11, \underline{10}, 7, \underline{4},$$
$$\underline{4}, 0, \underline{3}, 7, \underline{2}, 5, \underline{12}, 3, \underline{9}, 8, \underline{2}, 10, \underline{6}, 5, \underline{0}, 12, \underline{11}, 2, \underline{4}, 6, \underline{1}, 9, \underline{7}, 0, \underline{8}, 11, \underline{1}.$$
\\
\\
$\mathbf{v=15:}$ We use the AF design (from \cite{SmallSTS}) and produce the 1-ocycle:
$$\begin{array}{c|c|c|c|c}
210 & 807 & 5b7 & da4 & b94 \\
0a9 & 73c & 742 & 4e5 & 48c \\
971 & ce1 & 2dc & 52a & c95 \\
153 & 1db & cb0 & a7e & 58d \\
304 & b82 & 0de & eb6 & d76 \\
461 & 236 & e83 & 6ca & 689 \\
1a8 & 605 & 39d & a3b & 9e2
\end{array}$$
\subsection{Recursive Constructions of 1-Overlap Cycles}
\begin{result}\label{OC2v+1}
If there exists an AF STS$(v)$ with a 1-ocycle, then there exists an AF STS$(2v+1)$ with a 1-ocycle when $v \geq 15$.
\end{result}
\begin{proof}
Using Construction \ref{C2v+1}, we construct an overlap cycle for $(Y, \mathcal{B})$ as follows. We will construct a 1-overlap cycle for triples of type (1), and then for the triples of types (2) and (3), and finally show that this sequence may be joined with the sequence for triples of type (1).
\\
\\
\textbf{Step 1: Triples of type (1):} Let $O$ be a 1-ocycle on $\mathcal{A}$. Define $\{1\} \oplus O$ to be the cycle obtained by preceding each point in $O$ to with a 1, i.e. each point becomes an ordered pair with first coordinate 1. Then $\{1\} \oplus O$ is a 1-ocycle for the set of triples of type (1).
\\
\\
\textbf{Step 2: Triples of type (2):} We first define the \textbf{difference} of the triple to be the smaller of $x-y$ and $y-x$ (modulo $v$). Then we partition the set of triples of type (2) depending on their difference $d$. This creates an equivalence relation on the set of triples of type (2). We will construct 1-ocycles for each equivalence class separately.
\begin{description}
\item[$\mathbf{d=1}$:] We have the overlap cycle (in compressed form, with hidden elements removed): $$(0,0),(0,1),(0,2), \ldots , (0,v-1), (0,0).$$
\item[$\mathbf{d \geq 3}$:] We follow the same procedure as for $d=1$ by beginning with point $(0,0)$ and moving to point $(0, d)$, then $(0, 2d)$, and so on. We note however that if $d \mid v$ the procedure will not produce a cycle that covers all triples. However, when this happens we can repeat the process beginning with the first triple that remains unused. In this manner, we will obtain several disjoint ocycles, and every triple of type (2) with the given difference $d$ will be covered by one of these cycles.
\end{description}
Note that for difference $d=1$, every point of type $(0,x)$ for $x \in X$ appears as an overlap point. Thus for all overlap cycles associated with $d \geq 3$, we can join them to the cycle for $d=1$. We reserve the triples corresponding to $d=2$ to include with the triples of type (3).
\\
\\
\textbf{Step 3: Triples of type (3):} We construct an ocycle to include triples of type (2) with $d=2$ and triples of type (3) as follows. First, connect pairs of triples of the form: $$\begin{array}{ccc} (1,x+1) & (0,x) & (0,x+2) \\ \\ & \hbox{and} &\\ \\ (0, x+2) & \infty & (1,x+2) \end{array}$$ Then we may use all of these pairs to form the ocycle: $$\underline{(1,1)}, (0,0), \underline{(0,2)}, \infty, \underline{(1,2)}, (0,1), \underline{(0,3)}, \infty, \underline{(1,3)}, \ldots , \underline{(1,0)}, (0,v-1), \underline{(0,1)}, \infty, \underline{(1,1)}.$$ This cycle accounts for $v$ of these pairs of triples, and since no triple is covered twice it must cover all triples of type (2) with $d=2$ and all triples of type (3).
\\
\\
To connect all of the constructed cycles, we note that the cycle created in Step 3 contains the points $(0,x)$ and $(1,x)$ for every $x \in X$ as an overlap. Thus we can connect the cycle created in Step 1 to this cycle, as well as the cycle created in Step 2. This produces one long 1-ocycle that covers all triples.
\end{proof}
\begin{result}\label{OC2v+7}
If there exists an AF STS$(v$) with a 1-ocycle, then there exists an AF STS$(2v+7)$ with a 1-ocycle, when $v \geq 15$.
\end{result}
\begin{proof}
Using Construction \ref{C2v+7}, we will find ocycles for subsets of blocks, and show that they can be combined to form one long ocycle for the entire design.
\\
\\
\textbf{Step 1: Triples of type (1):} Let $O$ be a 1-ocycle on $\mathcal{A}$. Then $\{1\} \oplus \mathcal{A}$ also has a 1-ocycle, given by $\{1\} \oplus O$.
\\
\\
\textbf{Step 2: Triples of type (3):} We construct one long cycle: $$\underline{(0,0)}, (0,6), \underline{(0,2)}, (0,8), \underline{(0,4)}, \ldots , \underline{(0, v-1)}, (0,5), \underline{(0,1)}, \ldots , \underline{(0,v-2)}, (0,4), \underline{(0,0)}.$$ Note that since $v$ must always be odd, we see the point $(0,x)$ for every $x \in \mathbb{Z}_v$ as an overlap point in this cycle.
\\
\\
\textbf{Step 3: Triples of type (4) with $|y|> 4$:} We start by creating the ocycle (in compressed form, with hidden elements removed): $$(0,0), (0,2y), (0,4y), (0,6y), \ldots , (0,0).$$ This cycle contains all triples of type (4) associated with a particular $y$, since $v$ must be odd. Note also that in each cycle, all of the points $(0,x)$ for every $x \in \mathbb{Z}_v$ appear as overlaps. Thus we can connect all of these cycles for each choice of $y$ with $|y| > 4$.
\\
\\
\textbf{Step 4: Triples of type (4) with $|y| = 4$ and type (5) with $i = -3$:} We begin by pairing up blocks as follows so as to partition $\mathcal{B}$: $$\begin{array}{ccc} \{ (0,x), & (0,x+8), & (1,x+4)\} \\ \\ & \hbox{and} & \\ \\ \{(1,x+4), & \infty_{-3}, & (0,x+1)\} \end{array}$$ We can connect up these pairs in order, starting with the pair that begins $(0,0)$ and then moving to the pair that begins $(0,1)$, and so on. We will eventually end with the pair starting $(0,v-1)$, which ends with the point $(0,0)$. Thus we have an overlap cycle. Note that in this cycle the points $(0,x)$ and $(1,x)$ appear as overlap points for every $x \in \mathbb{Z}_v$.
\\
\\
\textbf{Step 5: Triples of type (2):} The triples of type (2) correspond to an STS(7). We have shown in Section \ref{BaseC} that a 1-ocycle exists for the unique STS(7). We will use the cycle from Step 4 to join the triples of type (2). If we break the cycle from Step 4 between the blocks $$\begin{array}{cccc} \{(0,v-8), & (0,0), & (1,v-4)\} & \hbox{and} \\ \{(1,v-4), & \infty_{-3}, & (0,v-7)\} \end{array}$$ and also between the blocks $$\begin{array}{cccc} \{(1,3), & \infty_{-3}, & (0,0)\} & \hbox{and} \\ \{(0,0), & (0,8), & (1,4)\} \end{array}$$ then we now have two 1-overlap paths: $$\underline{(0,0)}, (0,8), \underline{(1,4)}, \ldots , \underline{(0,v-8)}, (0,0), \underline{(1,v-4)}$$ $$\hbox{and}$$ $$\underline{(1,v-4)}, \infty_{-3}, \underline{(0,v-7)}, \ldots , \underline{(1,3)}, \infty_{-3}, \underline{(0,0)}.$$ We can swap the order of the last two elements in the first path, and swap the order of the first two and the order of the last two elements in the second path two obtain the following two 1-ocycles: $$\underline{(0,0)}, (0,8), \underline{(1,4)}, \ldots , \underline{(0,v-8)}, (1,v-4), \underline{(0,0)}$$ $$\hbox{and}$$ $$\underline{\infty_{-3}}, (1,v-4), \underline{(0,v-7)}, \ldots , \underline{(1,3)}, (0,0), \underline{\infty_{-3}}.$$ Now we have $\infty_{-3}$ as an overlap point in the second cycle and so we can join this ocycle to the STS(7) ocycle (which contains every point $\infty_i$ as an overlap point).
\\
\\
\textbf{Step 6: Triples of type (5) with $i \neq -3$:} We construct three separate ocycles as follows. For $k \in \{-2,0,2\}$, construct the cycle: $$(0,0), (1,k), (0,1), (1,k+1), \ldots$$ When $k=-2$, this covers all triples of type $$\{(0,x), (1,x-2), \infty_2\} \hbox{ and } \{(0,x), (1,x-3), \infty_3\}.$$ When $k=0$, this covers all triples of type $$\{(0,x), (1,x), \infty_0\} \hbox{ and } \{(0,x), (1,x-1), \infty_1\}.$$ When $k=2$, this covers all triples of type $$\{(0,x), (1,x+2), \infty_{-2}\} \hbox{ and } \{(0,x), (1,x+1), \infty_{-1}\}.$$ These three cycles cover all triples of type (5) with $i \neq -3$.
\\
\\
The cycles from Steps 2, 3, 5, and 6 all contain the point $(0,x)$ for every $x \in \mathbb{Z}_v $, and so can be connected. The cycles from Steps 1, 5, and 6 all contain the point $(1,x)$ for every $x \in \mathbb{Z}_v \setminus \{v-4\}$, and so can be connected. Since the cycle from Step 5 appears in both cases, these two long cycles can also be connected. Thus, all triples are contained in one of the connected cycles, and so we have a 1-ocycle that covers all blocks.
\end{proof}
We are now ready to prove Result \ref{main}.
\begin{proof}[Proof of Result \ref{main}]
We proceed by induction on $n$. For $n=15,19,21,25,27,33$, we have shown ocycles in Section \ref{BaseC} and the appendix.
For $n \geq 37$ and $n \equiv 1 \pmod {12}$, there exists $v \equiv 3 \pmod 6$ with $n = 2v+7$. Note that $n \geq 37$ implies that $v \geq 15$. Thus we use Result \ref{OC2v+7} to find the STS($n$).
For $n \geq 39$ and $n \equiv 3 \pmod {12}$, there exists $v \equiv 1 \pmod 6$ with $n = 2v+1$. Note that $n \geq 39$ implies that $v \geq 19$. Thus we use Result \ref{OC2v+1} to find the STS($n$).
For $n \geq 31$ and $n \equiv 7 \pmod {12}$, there exists $v \equiv 3 \pmod 6$ with $n = 2v+1$. Note that $n \geq 31$ implies that $v \geq 15$, so we use Result \ref{OC2v+1} to find the STS($n$).
For $n \geq 45$ and $n \equiv 9 \pmod {12}$, there exists $v \equiv 1 \pmod 6$ with $n = 2v+7$. Note that $n \geq 45$ implies that $v \geq 19$, so we use Result \ref{OC2v+7} to find the STS($n$).
\end{proof}
\begin{cor}
For every $n \geq 15$ with $n \equiv 1, 3\pmod 6$, there exists an AF STS$(n)$ with a rank two ucycle.
\end{cor}
\begin{proof}
Using Result \ref{main}, we construct an AF STS($n$) with a 1-ocycle. The 1-ocycle in compressed form is a rank two ucycle.
\end{proof}
\section{Other STS Constructions with Overlap Cycles}
In this section, we look at several other known constructions for Steiner triple systems, and show their corresponding 1-ocycle constructions.
\begin{const}\label{PC}
\emph{(See \cite{TripleSystems}, p. 39 - Direct Product)} Given $(X, \mathcal{A})$, an STS($u$) and $(Y, \mathcal{B})$, an STS($v$), identify $X$ with $\mathbb{Z}_u$ and $Y$ with $\mathbb{Z}_v$. We construct a new STS($uv$) $=(Z, \mathcal{C})$ with points: $$Z = \mathbb{Z}_u \times \mathbb{Z}_v$$ and blocks:
\begin{enumerate}
\item $\{(i,a), (i,b), (i,c)\}$ with $i \in \mathbb{Z}_u$ and $\{a,b,c\} \in \mathcal{B}$,
\item $\{(i,a), (j,a), (k,a)\}$ with $\{i,j,k\} \in \mathcal{A}$ and $a \in \mathbb{Z}_v$, and
\item $\{(i,a), (j,b), (k,c)\}$ with $\{i,j,k\} \in \mathcal{A}$ and $\{a,b,c\} \in \mathcal{B}$.
\end{enumerate}
\end{const}
The following theorem (see \cite{TripleSystems}) proves that Construction \ref{PC} is correct.
\begin{thm}
If $(X, \mathcal{A})$ is an STS$(v)$ and $(Y, \mathcal{B})$ is an STS$(w)$, then $(Z, \mathcal{C})$ is an STS$(vw)$.
\end{thm}
An interesting consequence of the direct product is the following theorem.
\begin{thm}
\emph{(See \cite{TripleSystems}, Lemma 7.12)} The automorphism group of the direct product of two triple systems is the direct product of their automorphism groups.
\end{thm}
This theorem implies another method for constructing AF Steiner triple systems that admit ocycles. Beginning with two AF Steiner triple systems with corresponding 1-ocycles, we can use the following result to construct a 1-ocycle on their direct product.
\begin{result}\label{OCPC}
If there exists an STS$(u)$ with a 1-overlap cycle and an STS$(v)$ with a 1-overlap cycle, then there exists an STS$(uv)$ with a 1-ocycle.
\end{result}
\begin{proof}
Let $(X, \mathcal{A})$ be an STS($u$) and $(Y, \mathcal{B})$ be an STS($v$) that admit 1-ocycles $O(\mathcal{A})$ and $O(\mathcal{B})$, respectively. We construct an STS($uv$) using Construction \ref{PC} (the direct product). For each $i \in \mathbb{Z}_u$, we have a 1-ocycle covering the triples of type 1, namely $i \oplus O(\mathcal{B})$. Similarly, for each $a \in \mathbb{Z}_v$, we have a 1-ocycle covering the triples of type 2: $O(\mathcal{A}) \oplus a$. Lastly, for each $A=\{i,j,k\} \in \mathcal{A}$ and each $B =\{a,b,c\} \in \mathcal{B}$, we can construct the following 1-ocycle:
$$\begin{array}{ccc}
\{(i,a), & (j,b), & (k,c)\} \\
\{(k,c), & (j,a), & (i,b)\} \\
\{(i,b) ,& (j,c), & (k,a)\} \\
\{(k,a) ,& (j,b), & (i,c)\} \\
\{(i,c) ,& (j,a), & (k,b)\} \\
\{(k,b) ,& (j,c), & (i,a)\}
\end{array}$$
To connect cycles covering triples of types (1) and (2), we connect wherever possible. Starting with $0 \oplus O(\mathcal{B})$, we connect all cycles over triples of type (2). Then, starting with an arbitrary, already connected, cycle over triples of type (2), we repeat the process by adding cycles over triples of type (1) wherever possible. We continue this process of extending our cycle until we no longer are able to add any more cycles.
We will always be able to continue to connect cycles, except when all cycles are connected, or:
\begin{enumerate}
\item there exists $i \in \mathbb{Z}_u$ that never appears as an overlap point in $O(\mathcal{A})$, and/or,
\item there exists $a \in \mathbb{Z}_v$ that never appears as an overlap point in $O(\mathcal{B})$.
\end{enumerate}
If we have both cases, then we choose a block $A \in \mathcal{A}$ containing $i$, say $A = \{i,j,k\}$, and a block $B \in \mathcal{B}$ containing $a$, i.e. $B = \{a,b,c\}$. Then we arrange the cycle covering the triples from $A \times B$ to begin with $(i,a)$. Since two points of $A$ must appear as overlap points in $O(\mathcal{A})$ and $i$ is not one of them, we must have that $j$ and $k$ are overlap points in $O(\mathcal{A})$. Similarly, $b$ and $c$ must be overlap points in $O(\mathcal{B})$. Thus we can connect the cycle for $A \times B$ to the cycles $k \oplus O(\mathcal{B})$ (at point $(k,c)$) and $O(\mathcal{A}) \oplus c$ (at point $(k,c)$ as well). Note that since each block can only contain one hidden element, this process will never use a block from $\mathcal{A}$ or $\mathcal{B}$ more than once. If only case (1) or case (2) holds (but not both), this process is repeated with an arbitrary choice of block from $\mathcal{B}$.
\end{proof}
\begin{const}\label{3m6}
\emph{(\cite{Bose}, Bose Construction)} Suppose that $n \equiv 3 \pmod 6$; then $n=3m$ for some $m$ odd. The point set is made up of three copies of the integers modulo $m$. Formally: $$X = \mathbb{Z}_3 \times \mathbb{Z}_m.$$ Blocks are of two types:
\begin{enumerate}
\item $\{(a,i), (a,j), (a+1,k)\}$ with $i+j = 2k$ for each $a \in \mathbb{Z}_3$
\item $\{(0,i), (1,i), (2,i)\}$ for each $i \in \mathbb{Z}_m$
\end{enumerate}
\end{const}
The following theorem from \cite{Bose} proves that Construction \ref{3m6} is correct.
\begin{thm}
\emph{\cite{Bose}} If $n \equiv 3 \pmod 6$, there exists an STS$(n)$.
\end{thm}
\begin{result}\label{OC3m6}
For $n \equiv 3 \pmod 6$ with $n>3$, there exists an STS$(n)$ that admits a 1-ocycle.
\end{result}
\begin{proof}
We will use Construction \ref{3m6} to create an STS($n$), then construct 1-ocycles to cover each type of triples, and finally show how to connect them into one large cycle. First, note that we have $m\geq 3$ since $n>3$, and so there exists at least three triples of each kind.
\\
\\
\textbf{Step 1: Triples of type (1) with $a=1$:} Define the value $\min \{i-j ,j-i \}$, where subtraction is done in the group $\mathbb{Z}_m$, to be the \textbf{distance} for the triple $\{(1,i), (1,j), (2,k)\}$. Partition the blocks of type (1) into classes so that the blocks $\{(1,i), (1,j), (2,k)\}$ and $\{(1,r), (1,s), (2,t)\}$ are in the same class if and only if they have the same distance. This defines an equivalence relation on the set of blocks of type (1) with $\frac{m-1}{2}$ different equivalence classes. Create a cycle using the set of blocks having the form $\{(1,i), (1,i+1), (2,i+\frac{m-1}{2}+1)\}$ as shown below in compressed form: $$(1,0) (1,1) (1,2) \cdots (1,m-1) (1,0)$$ in compressed form. Create similar (possibly shorter) cycles using the blocks within each other equivalence class. This creates at least one, if not several disjoint, cycles for each equivalence class. Since the first cycle created (using blocks with distance 1) has every point $(1,i)$ as an overlap point, we can combine all of these cycles to make one long cycle.
\\
\\
\textbf{Step 2: Triples of type (1) with $a=2$:}
Repeat as in Step 1. We pay careful attention to attach the cycle for distance 2 blocks at the point $(2,0)$. Note that this is possible since distance 2 also creates one long cycle covering the entire equivalence class, as $m$ must be odd. Now we may be assured that the cycle corresponding to distance 2 does not have any cycles attached at the overlap point $(2,1)$ between the blocks $\{(2,m-1), (0,0), (2,1)\}$ and $\{(2,1), (0,2), (2,3)\}$. Then, when we have combined all blocks of type (1) with $a=2$ to make a cycle, we convert the cycle to a string by cutting it between these two blocks and then reversing the order of the last two points. In other words, we now have a string that begins with $(2,1) (0,2) (2,3)$ and ends with $(2,m-1) (2,1) (0,0)$.
\\
\\
\textbf{Step 3: Triples of type (2) and (1) with $a=0$:} Repeat as in Step 1 excluding the equivalence class with distance 2. For these excluded blocks, we partition the set of blocks of type (1) and (2) into sets of size two by grouping together: $$\{(0,i), (2,i), (1,i)\} \hbox{ and } \{(1,i), (0,i-1), (0,i+1)\}.$$ Clearly the blocks in each set of size two can form a 1-overlap string, and then we can combine each of these strings to obtain a 1-ocycle of the form: $$\underline{(0,1)} (2,1) \underline{(1,1)} (0,0) \underline{(0,2)} \cdots \underline{(0,i)} (2,i) \underline{(1,i)} (0,i-1) \underline{(0,i+1)} \cdots \underline{(0,0)} (2,0) \underline{(1,0)} (0,m-1) \underline{(0,1)}$$ $$\hbox{or}$$ $$(0,1) (1,1) (0,2) (1,2) \cdots (0,i) (1,i) (0,i+1) (1,i+1) \cdots (0,0) (1,0) (0,1)$$ in compressed form.
\\
\\
\textbf{Step 4: Combining the triples from Step 1 and Step 3} Since the cycles created in Step 1 and Step 3 both contain every point $(1,i)$ as an overlap point, we can combine these two cycles. More importantly, we have a choice of where to combine the cycles, since we have at least two choices for an overlap point $(1,i)$. We choose to combine the two cycles at an overlap point other than $(1,1)$. Then, we can create a string from this cycle by cutting the cycle between the blocks $\{(0,1), (2,1), (1,1)\}$ and $\{(1,1), (0,0), (0,2)\}$ (from cycle from Step 3), and reversing the order of the first two and the last two elements. In other words, we now have a string that begins with $\{(2,1), (0,1), (1,1)\}$ and ends with $\{(1,1), (0,2), (0,0)\}$.
\\
\\
To create our final 1-ocycle, we recall that our string from Step 2 also begins with the point $(2,1)$ and ends with the point $(0,0)$, and so we can combine these two strings into one large cycle by reversing the order of the string from Step 4.
\end{proof}
\begin{const}\label{1m6}
\emph{(\cite{Skolem}, Skolem Construction)} If $n \equiv 1 \pmod 6$, then $n=6t+1$ for some $t \in \mathbb{Z}$. We define the point set as $$Y = (\mathbb{Z}_{2t} \times \mathbb{Z}_3) \cup \{ \infty\}.$$ Then we define three types of blocks:
\begin{enumerate}
\item $ A_x = \{(x,0), (x,1), (x,2)\}$ for $0 \leq x \leq t-1$.
\item $B_{x,y,i} = \{(x,i), (y,i), (x \circ y, i+1)\}$ for each $x , y \in \mathbb{Z}_{2t}$ with $x<y$ and each $i \in \mathbb{Z}_3$, and where $x \circ y = \pi(x+y \pmod {2t})$ and $$\pi(z) = \left\{ \begin{array}{ll} z/2, & \hbox{ if } z \hbox{ is even,} \\ (z+2t-1)/2, & \hbox{ if } z \hbox{ is odd.} \end{array}\right.$$
\item $C_{x,i} = \{\infty, (x+t,i), (x,i+1)\}$ for each $0 \leq x \leq t-1$ and $i \in \mathbb{Z}_3$.
\end{enumerate}
\end{const}
The following theorem from \cite{Skolem} proves that Construction \ref{1m6} is correct.
\begin{thm}
If $n \equiv 1 \pmod 6$, then there is an STS$(n)$.
\end{thm}
\begin{result}\label{OC1m6}
For $n \equiv 1 \pmod 6$ with $n>1$, there exists an STS$(n)$ that admits a 1-overlap cycle.
\end{result}
\begin{proof}
We will use Construction \ref{1m6} to construct an STS($n$), then show how to construct disjoint cycles for most triples of type (2), then disjoint cycles for triples of types (1) and (3), and finally show how to combine them to make one large 1-ocycle containing all triples.
\\
\\
\textbf{Step 1: Triples of type (2):} The triples of type (2) can be partitioned based on the pair $\{(x,i), (y,i)\}$. Similar to Result \ref{OC3m6}, we define the \textbf{distance} of the triple to be the smaller of $x-y$ and $y-x$ (modulo $2t$). Then, we can partition the set of triples of type (2) into classes that share the same distance for each difference $k<t$. Following the method from Result \ref{OC3m6} (Step 1), we can create disjoint cycles that contain all of these triples. Note that the triples corresponding to distance 1 make one long cycle for each second coordinate. This cycle is (in compressed form): $$(0,i) (1,i) (2,i) \ldots (2t-1,i) \hbox{ for } i \in \mathbb{Z}_3.$$ These cycles contain every point from $\mathbb{Z}_{2t} \times \{i\}$ as an overlap point, and so we can hook up all of them to make three long cycles - one for each $i \in \mathbb{Z}_3$. These cycles cover all triples of type (2) except those with distance $t$.
\\
\\
\textbf{Step 2: Triples of types (1), (2), (3):} We begin by partitioning the triples of type (3) into classes that contain the following blocks: $$\{\infty, (x+t,0), (x,1)\}, \{\infty, (x+t,1), (x,2)\}, \{\infty, (x+t,2), (x,0)\}.$$ Note that no other triples of type (3) contain any points with $x$ or $x+t$ as a first coordinate. This set of blocks has a corresponding triple of type (1): $$\{(x,0), (x,1), (x,2)\}.$$ It also has a corresponding triple of type (2) with distance $t$: $$\{(x,i),(x+t,i), (x \circ (x+t), i+1)\} \hbox{ for } i \in \mathbb{Z}_3.$$ Using these blocks and defining $y = x+t$, we create the following cycle: $$\begin{array}{ccccccccccccccc} \underline{x2} & (x\circ y)0 & \underline{y2} & x0 & \underline{\infty} & x1 & \underline{y0} & (x \circ y)1 & \underline{x0} & x2 & \underline{x1} & (x \circ y)2 & \underline{y1} & \infty & \underline{x2} \end{array}$$ $$\hbox{or}$$ $$ \begin{array}{cccccccccc} x2 & y2 & \infty & y0 & x0 & x1 & y1 & x2 \end{array}$$ in compressed form. This creates a set of disjoint cycles that cover all of the remaining triples.
\\
\\
To combine all of our cycles and create our final 1-ocycle, we note that the cycles from Step 2 each have at least one overlap point of the form $(x,1)$ with $x \in \mathbb{Z}_t$, and so we can hook these cycles all up to the cycle from Step 1 that corresponds to $i=1$. Also, each of the cycles from Step 2 also have overlap points $(x,i)$ corresponding to $i=1,2$ and $x \in \mathbb{Z}_t$, and so we can connect the remaining two cycles from Step 1.
\end{proof}
We can now use the direct constructions to prove the existence of an STS($v$) that admits a 1-ocycle for every $v \equiv 1, 3 \pmod 6$.
\begin{thm}\label{easymain}
For every $v \equiv 1, 3 \pmod 6$, there exists an STS($v$) that admits a 1-ocycle.
\end{thm}
\begin{proof}
For $n \equiv 3 \pmod 6$ with $n \geq 7$, we apply Result \ref{OC3m6} to obtain the desired system. For $n \equiv 1 \pmod 6$ with $n \geq 7$, we apply Result \ref{OC1m6} to obtain the desired system.
\end{proof}
\begin{cor}
For every $n \geq 7$ with $n \equiv 1, 3\pmod 6$, there exists an STS$(n)$ with a rank two ucycle.
\end{cor}
\begin{proof}
Using Theorem \ref{easymain}, we construct an STS($n$) with a 1-ocycle. The 1-ocycle in compressed form is a rank two ucycle.
\end{proof}
\begin{appendix}
\section{Appendix}
Included in this appendix are the necessary base cases for Constructions \ref{C2v+1} and \ref{C2v+7}.
\\
\\
$\mathbf{v=19:}$ We use the AF design (from \cite{STS19}) and produce the 1-ocycle:
$$\begin{array}{l|l|l|l|l|l}
1,2,3 & 6,15,8 & 15,17,10 & 16,11,14 & 19,1,18 & 17,2,19 \\
3,5,6 & 8,1,9 & 10,5,14 & 14,17,7 & 18,2,16 & 19,14,8 \\
6,2,4 & 9,2,11 & 14,6,9 & 7,15,9 & 16,3,19 & 8,16,7 \\
4,10,13 & 11,5,13 & 9,19,13 & 9,12,18 & 19,15,4 & 7,4,3 \\
13,1,12 & 13,8,17 & 13,18,7 & 18,15,11 & 4,8,12 & 3,17,18 \\
12,2,14 & 17,9,5 & 7,12,11 & 11,1,10 & 12,15,3 & 18,8,5 \\
14,1,15 & 5,12,19 & 11,4,17 & 10,2,8 & 3,13,14 & 5,4,1 \\
15,13,2 & 19,11,6 & 17,12,6 & 8,11,3 & 14,18,4 \\
2,5,7 & 6,13,16 & 6,18,10 & 3,9,10 & 4,9,16\\
7,1,6 & 16,5,15 & 10,12,16 & 10,7,19 & 16,1,17
\end{array}$$
\\
\\
$\mathbf{v=21:}$ We use the AF design (from \cite{AFSTS}) to produce the 1-ocycle:
$$\begin{array}{c|c|c|c|c|c|c}
00, \infty_1 , 11 & 05 , \infty_1 , 16 & 12 , 14 , 08 & 16 , 18 , 03 & 01 , 10 , 15 & 05 , 10 , 14 & 05,03,04 \\
11 , \infty_0 , 01 & 16 , \infty_0 , 06 & 08 , \infty_2 , 11 & 03 , \infty_2 , 15 & 15 , 12 , 18 & 14 , 13 , 06 & 04, 01, 07 \\
01 , \infty_1 , 12 & 06 , \infty_1 , 17 & 11 , 13 , 17 & 15 , 17 , 02 & 18 , 10 , 02 & 06 , 11 , 15 & 07, 08, 06 \\
12 , \infty_0 , 02 & 17 , \infty_0 , 07 & 07 , \infty_2 , 10 & 02 , \infty_2 , 14 & 02 ,11 , 16 & 15 , 14 , 07 & 06, 03, 00 \\
02 , \infty_1 , 13 & 07 , \infty_1 , 18 & 10 , 12 , 06 & 14 , 16 , 01 & 16 , 13 , 10 & 07 , 12 , 16 & 00, 04, 08 \\
13 , \infty_0 , 03 & 18 , \infty_0 , 08 & 06 , \infty_2 , 18 & 01 , \infty_2 , 13 & 10 , 11 , 03 & 16 , 15 , 08 & 08, 01, 03 \\
03 , \infty_1 , 14 & 08 , \infty_1 , 10 & 18 , 11 , 05 & 13 , 15 , 00 & 03 , 17 , 12 & 08 , 13 , 17 & 03, 07, 02 \\
14 , \infty_0 , 04 & 10 , 00 , \infty_0 & 05 , \infty_2 , 17 & 00 , 18 , 14 & 12 , 11 , 04 & 17 , 16 , 00 & 02, 04, 06 \\
04 , \infty_1 , 15 & \infty_0 , \infty_1 , \infty_2 & 17 , 10 , 04 & 14 , 11 , 17 & 04 , 18 , 13 & 00,01, 02 & 06, 01, 05 \\
15 , \infty_0 , 05 & \infty_2 , 00 , 12 & 04 , \infty_2 , 16 & 17 , 18 , 01 & 13 , 12 , 05 & 02, 08, 05 & 05, 07, 00
\end{array}$$
$\mathbf{v=25:}$ We use the AF design (from \cite{AFSTS}) to produce the 1-ocycle:
$$\begin{array}{c|c|c|c|c|c|c}
\infty_1, 00, 11 & 18, \infty_0, 08 & 06, \infty_3, 10 & 04, \infty_5, 11 & 07, 08, 06 & 13, 16, 14 & 14, \infty_6, 06 \\
11, \infty_0, 01 & 08, \infty_1, 10 & 10, \infty_2, 07 & 11, \infty_4, 05 & 06, 03, 00 & 14, 17, 15 & 06, 01, 15 \\
01, \infty_1, 12 & 10, \infty_0, 00 & 07, \infty_3, 11 & 05, \infty_5, 12 & 00, 04, 08 & 15, 18, 16 & 15, \infty_6, 07 \\
12, \infty_0, 02 & 00, \infty_3, 13 & 11, \infty_2, 08 & 12, \infty_4, 06 & 08, 01, 03 & 16, 10, 17 & 07, 02, 16 \\
02, \infty_1, 13 & 13, \infty_2, 01 & 08, \infty_3, 12 & 06, \infty_5, 13 & 03, 07, 02 & 17, 11, 18 & 16, \infty_6, 08 \\
13, \infty_0, 03 & 01, \infty_3, 14 & 12, \infty_2, 00 & 13, \infty_4, 07 & 02, 04, 06 & 18, 12, 10 & 08, 03, 17 \\
03, \infty_1, 14 & 14, \infty_2, 02 & 00, \infty_5, 16 & 07, \infty_5, 14 & 06, 01, 05 & 10, \infty_6, 02 & 17, 00, \infty_6 \\
14, \infty_0, 04 & 02, \infty_3, 15 & 16, \infty_4, 01 & 14, \infty_4, 08 & 05, 07, 00 & 02, 06, 11 & \infty_6, \infty_2, \infty_0 \\
04, \infty_1, 15 & 15, \infty_2, 03 & 01, \infty_5, 17 & 08, \infty_5, 15 & 00, 04, 18 & 11, \infty_6, 03 & \infty_0, \infty_3, \infty_1 \\
15, \infty_0, 05 & 03, \infty_3, 16 & 17, \infty_4, 02 & 15, \infty_4, 00 & 18, \infty_6, 01 & 03, 07, 12 & \infty_1, \infty_4, \infty_2 \\
05, \infty_1, 16 & 16, \infty_2, 04 & 02, \infty_5, 18 & 00, 01, 02 & 01, 05, 10 & 12, \infty_6, 04 & \infty_2, \infty_5, \infty_3 \\
16, \infty_0, 06 & 04, \infty_3, 17 & 18, \infty_4, 03 & 02, 08, 05 & 10, 13, 11 & 04, 08, 13 & \infty_3, \infty_6, \infty_4 \\
06, \infty_1, 17 & 17, \infty_2, 05 & 03, \infty_5, 10 & 05, 03, 04 & 11, 14, 12 & 13, \infty_6, 05 & \infty_4, \infty_0, \infty_5 \\
17, \infty_0, 07 & 05, \infty_3, 18 & 10, \infty_4, 04 & 04, 01, 07 & 12, 15, 13 & 05, 00, 14 & \infty_5, \infty_6, \infty_1 \\
07, \infty_1, 18 & 18, \infty_2, 06 &
\end{array}$$
\\
\\
$\mathbf{v=27:}$ We use the AF design (from \cite{AFSTS}) to produce the 1-ocycle:
$$\hspace{-7mm}\begin{array}{c|c|c|c|c|c}
00, \infty, 10 & 19, 18, 12 & 07, \infty, 17 & 04, 00, 12 & 01, 06, 1(10) & 0(11), 03, 17 \\
10, 0(12), 01 & 12, 1(10), 16 & 17, 06, 08 & 12, 0(12), 05 & 1(10), 05, 02 & 17, 02, 0(12) \\
01, \infty, 11 & 16, 15, 10 & 08, \infty, 18 & 05, 01, 13 & 02, 07, 1(11) & 0(12), 04, 18 \\
11, 12, 10 & 10, 1(12), 1(11) & 18, 07, 09 & 13, 00, 06 & 1(11), 06, 03 & 18, 03, 00 \\
10, 19, 1(10) & 1(11), 12, 14 & 09, \infty, 19 & 06, 02, 14 & 03, 08, 1(12) & 00, 07, 01 \\
1(10), 1(12), 11 & 14, 16, 11 & 19, 08, 0(10) & 14, 01, 07 & 1(12), 07, 04 & 01, 08, 02 \\
11, 13, 15 & 11, 19, 17 & 0(10), \infty, 1(10) & 07, 03, 15 & 04, 09, 10 & 02, 09, 03 \\
15, 17, 1(11) & 17, 10, 18 & 1(10), 09, 0(11) & 15, 02, 08 & 10, 08, 05 & 03, 0(10), 04 \\
1(11), 19, 16 & 18, 1(11), 11 & 0(11), \infty, 1(11) & 08, 04, 16 & 05, 0(10), 11 & 04, 0(11), 05 \\
16, 17, 1(12) & 11, 00, 02 & 1(11), 0(10), 0(12) & 16, 03, 09 & 11, 09, 06 & 05, 0(12), 06 \\
1(12), 18, 14 & 02, \infty, 12 & 0(12), \infty, 1(12) & 09, 05, 17 & 06, 0(11), 12 & 06, 00, 07 \\
14, 19, 15 & 12, 01, 03 & 1(12), 0(11), 00 & 17, 04, 0(10) & 12, 0(10), 07 & 07, 01, 08 \\
15, 1(10), 18 & 03, \infty, 13 & 00, 09, 1(11) & 0(10), 06, 18 & 07, 0(12), 13 & 08, 02, 09 \\
18, 16, 13 & 13, 02, 04 & 1(11), 08, 01 & 18, 05, 0(11) & 13, 0(11), 08 & 09, 03, 0(10) \\
13, 1(11), 1(10) & 04, \infty, 14 & 01, 0(10), 1(12) & 0(11), 07, 19 & 08, 00, 14 & 0(10), 04, 0(11) \\
1(10), 17, 14 & 14, 03, 05 & 1(12), 09, 02 & 19, 06, 0(12) & 14, 0(12), 09 & 0(11), 05, 0(12) \\
14, 10, 13 & 05, \infty, 15 & 02, 0(11), 10 & 0(12), 08, 1(10) & 09, 01, 15 & 0(12), 06, 00 \\
13, 17, 12 & 15, 04, 06 & 10, 0(10), 03 & 1(10), 07, 00 & 15, 00, 0(10) & \\
12, 15, 1(12) & 06, \infty, 16 & 03, 0(12), 11 & 00, 05, 19 & 0(10), 02, 16 & \\
1(12), 13, 19 & 16, 05, 07 & 11, 0(11), 04 & 19, 04, 01 & 16, 01, 0(11) &
\end{array}$$
$\mathbf{v=33:}$ We use the AF design (from \cite{AFSTS}) to produce the 1-ocycle:
$$\begin{array}{c|c|c|c}
13, \infty_1, 02 & 1(13), 16, 0(14) & 1(13), 11, 08 & 03, 09, 0(13) \\
02, \infty_2, 15 & 0(14), \infty_0, 1(14) & 08, 15, 1(14) & 0(13), 0(10), 04\\
15, \infty_1, 04 & 1(14), 14, 19 & 1(14), 12, 09 & 04, 0(14), 05\\
04, \infty_2, 17 & 19, 1(10), 0(14) & 09, 16, 10 & 05, 02, 0(10) \\
17, \infty_1, 06 & 0(14), 18, 1(12) & 10, 0(10), 13 & 0(10), 07, 0(14) \\
06, \infty_2, 19 & 1(12), 15, 0(13) & 13, 1(10), 0(11) & 0(14), 0(11), 06\\
19, \infty_1, 08 & 0(13), 17, 1(11) & 0(11), 15, 19 & 06, 0(12), 0(10) \\
08, \infty_2, 1(11) & 1(11), 14, 0(12) & 19, 12, 0(10) & 0(10), 03, 0(11) \\
1(11), \infty_1, 0(10) & 0(12), 16, 1(10) & 0(10), 14, 18 & 0(11), 09, 04 \\
0(10), \infty_2, 1(13) & 1(10), \infty_0, 0(10) & 18, 11, 09 & 04, 08, 0(12) \\
1(13), \infty_1, 0(12) & 0(10), 17, 11 & 09, 13, 17 & 0(12), 09, 05 \\
0(12), \infty_2, 10 & 11, 14, 0(11) & 17, 10, 08 & 05, 08, 0(13) \\
10, \infty_1, 0(14) & 0(11), \infty_0, 1(11) & 08, 12, 16 & 0(13), 07, 06 \\
0(14), \infty_2, 12 & 1(11), 11, 16 & 16, 1(14), 07 & 06, 08, 09 \\
12, \infty_1, 01 & 16, 17, 0(11) & 07, 11, 15 & 09, 0(14), 02 \\
01, \infty_2, 14 & 0(11), 18, 12 & 15, 1(13), 06 & 02, 1(11), 10 \\
14, \infty_1, 03 & 12, 15, 0(12) & 06, 10, 14 & 10, 0(13), 12\\
03, \infty_2, 16 & 0(12), 19, 13 & 14, 1(12), 05 & 12, 00, 14 \\
16, \infty_1, 05 & 13, 16, 0(13) & 05, 1(14), 13 & 14, 02, 16 \\
05, \infty_2, 18 & 0(13), 1(10), 14 & 13, 1(11), 04 & 16, 04, 18 \\
18, \infty_1, 07 & 14, 17, 0(14) & 04, 1(13), 12 & 18, 06, 1(10) \\
07, \infty_2, 1(10) & 0(14), 1(11), 15 & 12, 1(10), 03 & 1(10), 08, 1(12) \\
1(10), \infty_1, 09 & 15, 18, 00 & 03, 1(12), 11 & 1(12), 1(14), 0(10) \\
09, \infty_2, 1(12) & 00, 1(12), 16 & 11, 19, 02 & 0(10), 15, 16 \\
1(12), \infty_1, 0(11) & 16, 19, 01 & 02, 01, 00 & 16, \infty_0, 06 \\
0(11), \infty_2, 1(14) & 01, 1(13), 17 & 00, 0(10), 09 & 06, 11, 12 \\
1(14), \infty_1, 0(13) & 17, 1(10), 02 & 09, 07, 01 & 12, \infty_0, 02 \\
0(13), \infty_2, 11 & 02, 1(14), 18 & 01, 05, 03 & 02, 1(12), 1(13) \\
11,00, \infty_1 & 18, 1(11), 03 & 03, 00, 04 & 1(13), \infty_0, 0(13) \\
\infty_1, \infty_0, \infty_2 & 03, \infty_0, 13 & 04, 06, 01 & 0(13), 18, 19 \\
\infty_2, 00,13 & 13, 18, 1(13) & 01, 0(10), 08 & 19, \infty_0, 09 \\
13, 01, 15 & 1(13), 1(14), 03 & 08, 00, 07 & 09, 14, 15 \\
15, 03, 17 & 03, 10, 19 & 07, 03, 0(12) & 15, \infty_0, 05 \\
17, 05, 19 & 19, 1(12), 04 & 0(12), 0(14), 01 & 05, 10, 11 \\
19, 07, 1(11) & 04, 11, 1(10) & 01, 0(13), 0(11) & 11, \infty_0, 01 \\
1(11), 09, 1(13) & 1(10), 1(13), 05 & 0(11), 08, 02 & 01, 1(11), 1(12) \\
1(13), 0(11), 10 & 05, 12, 1(11) & 02, 03, 06 & 1(12), \infty_0, 0(12) \\
10, 19, 01 & 1(11), 1(14), 06 & 06, 00, 05 & 0(12), 17, 18 \\
01, 1(10), 1(14) & 06, 13, 1(12) & 05, 0(11), 07 & 18, \infty_0, 08 \\
1(14), 17, 00 & 1(12), 10, 07 & 07, 04, 02 & 08, 13, 14 \\
00, \infty_0, 10 & 07, \infty_0, 17 & 02, 0(13), 0(12) & 14, \infty_0, 04 \\
10, 15, 1(10) & 17, 1(12), 12 & 0(12), 0(11), 00 & 04, 10, 1(14) \\
1(10), 1(11), 00 & 12, 13, 07 & 00, 0(14), 0(13) & 1(14), 0(12), 11\\
00, 19, 1(13) & 07, 14, 1(13) & 0(13), 08, 03 & 11, 0(14), 13
\end{array}$$
\end{appendix}
\end{document} |
\begin{document}
\begin{center}
{\bf EXTREMALITY OF CONVEX SETS WITH SOME APPLICATIONS}\\[3ex]
BORIS S. MORDUKHOVICH\footnote{Corresponding author. Department of Mathematics, Wayne State University, Detroit, MI 48202, USA ([email protected]) and Peoples' Friendship University of Russia, Moscow 117198, Russia. Email: [email protected], phone: (734)369-3675, fax: (313)577-7596. Research of this author was partly supported by the National Science Foundation under grants DMS-1007132 and DMS-1512846 and by the Air Force Office of Scientific Research under grant \#15RT0462.} and NGUYEN MAU NAM\footnote{Fariborz Maseeh Department of Mathematics and Statistics, Portland State University, Portland, OR 97207, USA. Email: [email protected]. Research of this author was partly supported by the National Science Foundation under grant \#1411817.}.\\[3ex]
{\bf Dedicated to the memory of Jonathan Michael Borwein}
\end{center}
\small{\bf Abstract:} In this paper we introduce an enhanced notion of extremal systems for sets in locally convex topological vector spaces and obtain efficient conditions for set extremality in the convex case. Then we apply this machinery to deriving new calculus results on intersection rules for normal cones to convex sets and on infimal convolutions of support functions.\\[1ex]
\noindent {\bf Keywords:} Convex and variational analysis, extremal systems of sets, normals to convex sets, normal intersection rules, support functions, infimal convolutions
\newtheorem{Theorem}{Theorem}[section]
\newtheorem{Proposition}[Theorem]{Proposition}
\newtheorem{Remark}[Theorem]{Remark}
\newtheorem{Lemma}[Theorem]{Lemma}
\newtheorem{Corollary}[Theorem]{Corollary}
\newtheorem{Definition}[Theorem]{Definition}
\newtheorem{Example}[Theorem]{Example}
\renewcommand{\tauhetaesection.\arabic{equation}}{\tauhetaesection.\arabic{equation}}
\normalsize\vspace*{-0.2in}
\section{Introduction}\vspace*{-0.1in}
{\em Convex analysis} has been well recognized as an important area of mathematics with numerous applications to optimization, control, economics, and many other disciplines. We refer the reader to the fundamental monographs \cite{bc,bl,HU,r,z} and the bibliographies therein for various aspects of convex analysis and its applications. Jon Borwein, who unexpectedly passed away on August 2, 2016, made pivotal contributions to these and related fields of Applied Mathematics, among other areas of his fantastic creative activity.
Methods and constructions of convex analysis play also a decisive role in the study of nonconvex functions and sets by using certain convexification procedures. In particular, calculus and applications of Clarke's generalized gradients for nonconvex functions \cite{c} is based on appropriate convexifications and employing techniques and results of convex analysis.
Besides this, other ideas have been developed in the study and applications of nonconvex functions, sets, and set-valued mappings in the framework of {\em variational analysis}, which employs variational/optimization principles married to perturbation and approximation techniques; see the books \cite{BZ,m-book1,RockWets-VA} for extended expositions in finite and infinite dimensions. Powerful tools, results, and applications of variational analysis have been obtained by using the {\em dual-space geometric approach} \cite{m-book1} based on the {\em extremal principle} (a geometric variational principle) for systems of sets. This approach produces first a {\em full calculus} of generalized normals to nonconvex sets and then applies it to establish comprehensive calculus rules for related subgradients of extended-real-valued functions and coderivatives of set-valued mappings. Needless to say that well-developed calculus of generalized differentiation is an unavoidable requirement and the key for various applications.
Addressing generally nonconvex objects, results of variational analysis contain corresponding convex facts as their particular cases. However, basic variational techniques involving limiting procedures do not fully capture advantages from the presence of convexity. Indeed, the major calculus results of \cite{m-book1} hold in {\em Asplund} spaces (i.e., such Banach spaces where every separable subspace has a separable dual) and the {\em closedness} of sets (epigraphs for extended-real-valued function, graphs for set-valued mappings) is a standing assumption.
The major goal of this paper is to investigate a counterpart of the variational geometric approach to the study of convex sets in locally convex topological vector (LCTV) spaces without any completeness and closedness assumptions. Based on an enhanced notion of {\em set extremality}, which is a global version of the corresponding local concept largely developed and applied in \cite{m-book1} while occurring to be particularly useful in the convex setting mainly exploited here, this approach allows us to obtain the basic intersection rule for normals to convex sets under a new qualification condition. The same approach also allows us to derive new calculus results for support functions of convex set intersections in general LCTV spaces. Note that these results can be used to obtain major calculus rules of generalized differentiation and Fenchel conjugates for extended-real-valued convex functions; cf.\ our previous publications \cite{bmn,bmn1} for some versions in finite dimensions.
The rest of the paper is organized as follows. In Section~2 we introduce the aforementioned version of set extremality, establish its relationships with the separation property for convex sets, and derive various extremality conditions. The obtained results are applied in Section~3 to get the normal cone representation for convex set intersections under a new qualification condition. In Section~4 this approach is employed to represent the support function of set intersections via the infimal convolution of supports to intersection components.
For simplicity of presentation we suppose, unless otherwise stated, that all the spaces under consideration are {\em normed linear} spaces. The reader can check that the results obtained below in this setting hold true in the LCTV space generality.
The notation used throughout the paper is standard in the areas of functional, convex, and variational analysis; cf.\ \cite{m-book1,r,RockWets-VA,z}. Recall that the closed ball centered at $\bar{x}$ with radius $r>0$ is denoted by $\Bbb B(\bar{x};r)$ while the closed unit ball of the space $X$ in question and its topological dual $X^*$ are denoted by $\Bbb B$ and $\Bbb B^*$, respectively, if no confusion arises. Given a convex set $\Omega\subset X$, we write $\Bbb R^+(\Omegamega):=\{tv\mbox{\rm int}\,n X|\;t\mbox{\rm int}\,n\Bbb R_+,\;v\mbox{\rm int}\,n\Omegamega\}$, where $\Bbb R_+$ signifies the collection of positive numbers, and use the symbol $\bar{v}erline\Omega$ for the topological closure of $\Omega$. Finally, remind the notation for the (algebraic) {\em core} of a set:
\begin{equation}\langlebel{core-def}
\mbox{\rm co}re\Omegamega:=\big\{x\mbox{\rm int}\,n\Omegamega\big|\;\forall\,v\mbox{\rm int}\,n X\;\exists\,\gamma>0\;\mbox{\rm such that }\;x+tv\mbox{\rm int}\,n\Omegamega\;\mbox{\rm whenever }\;|t|<\gamma\big\}.
\end{equation}
In what follow we deal with {\em extended-real-valued} functions $f\mbox{\rm co}lon X\tauo\Bar{\R}:=(-\mbox{\rm int}\,nfty,\mbox{\rm int}\,nfty]$ and assume that are {\em proper}, i.e., $\mbox{\rm dom}\, f:=\{x\mbox{\rm int}\,n X|\;f(x)<\mbox{\rm int}\,nfty\}\ne\emptyset$.\vspace*{-0.2in}
\section{Extremal Systems of Sets}
\setcounter{equation}{0}\vspace*{-0.1in}
We start this section with the definition of extremality for set systems, which is inspired by the notion of local set extremality in variational analysis (see \cite[Definition~2.1]{m-book1}) while having some special features that are beneficial for convex sets. In particular, we do not require that the sets have a common point.\vspace*{-0.1in}
\begin{Definition}{\bf(set extremality).}\langlebel{ext-sys} We say that two nonempty sets $\Omega_1,\Omega_2\subset X$ form an {\sc extremal system} if for any $\varepsilon>0$ there exists $a\mbox{\rm int}\,n X$ such that
\begin{equation}\langlebel{setex}
\|a\|\le\varepsilon\;\;\mbox{\rm and }\;(\Omegamega_1+a)\cap\Omegamega_2=\emptyset.
\end{equation}
\end{Definition}\vspace*{-0.05in}
Observe similarly to \cite{m-book1} that the notion of set extremality introduced in Definition~\ref{ext-sys} covers (global) optimal solutions to problems of constrained optimization with scalar, vector, and set-valued objectives, various equilibrium concepts arising in operations research, mechanics, and economic modeling, etc. Furthermore, the set extremality naturally arises in deriving calculus rules of generalized differentiation in variational analysis. In particular, we are going to demonstrate this below in our device of the normal cone intersection rule and the support function representation for convex set intersections presented in the paper.
Given a convex set $\Omegamega\subset X$ with $\bar{x}\mbox{\rm int}\,n\Omegamega$, the {\em normal cone} to $\Omegamega$ at $\bar{x}$ is
\begin{equation}\langlebel{nor}
N(\bar{x};\Omegamega):=\big\{x^*\mbox{\rm int}\,n X^*\big|\;\langle x^*,x-\bar{x}\rangle\le 0\;\;\mbox{\rm for all }\;x\mbox{\rm int}\,n\Omegamega\big\}.
\end{equation}
The following underlying result establishes a useful characterization of set extremality and shows that, in the case of convex sets, extremality is closely related to while being different from the conventional convex separation:
\begin{equation}\langlebel{sep}
\sup_{x\mbox{\rm int}\,n\Omega_1}\langle x^*,x\rangle\le\mbox{\rm int}\,nf_{x\mbox{\rm int}\,n\Omega_2}\langle x^*,x\rangle\;\;\mbox{\rm for some }\;x^*\ne 0.
\end{equation}
Note that if $\Omega_1, \Omega_2$ are convex sets such that $\bar{x}\mbox{\rm int}\,n\Omega_1\cap\Omega_2$, then \eqref{sep} is equivalent to
\begin{eqnarray}\langlebel{ep}
N(\bar{x};\Omega_1)\cap\big(-N(\bar{x};\Omega_2)\big)\ne\{0\}.
\end{eqnarray}\vspace*{-0.35in}
\begin{Theorem}{\bf(set extremality and separation).}\langlebel{extremal principle} Let $\Omega_1,\Omega_2\subset X$ be nonempty sets. Then the following assertions are fulfilled:
{\bf(i)} The sets $\Omega_1$ and $\Omega_2$ form an extremal system if and only if $0\notin{\rm int}(\Omegamega_1-\Omegamega_2)$. Furthermore, the extremality of $\Omega_1,\Omega_2$ implies that $({\rm int}\,\Omega_1)\cap\Omega_2=\emptyset$ and likewise $({\rm int}\,\Omega_2)\cap\Omega_1=\emptyset$.
{\bf(ii)} If $\Omega_1,\Omega_2$ are convex and form an extremal system and if ${\rm int}(\Omega_1-\Omega_2)\ne\emptyset$, then the separation property \eqref{sep} holds.
{\bf (iii)} The separation property \eqref{sep} always implies the set extremality \eqref{setex}, without imposing either the convexity of $\Omega_1,\Omega_2$ or the condition ${\rm int}(\Omega_1-\Omega_2)\ne\emptyset$ as in {\rm(ii)}.
\end{Theorem}\vspace*{-0.1in}
{\bf Proof.} To verify the extremality characterization in (i), suppose first that the sets $\Omega_1,\Omega_2$ form an extremal system while the condition $0\notin{\rm int}(\Omegamega_1-\Omegamega_2)$ fails. Then there is $r>0$ such that $\Bbb B(0;r)\subset\Omega_1-\Omega_2$. Put $\varepsilon:=r$ and observe that $-a\mbox{\rm int}\,n\Omega_1-\Omega_2$ for any $a\mbox{\rm int}\,n X$ with $\|a\|\le\varepsilon$, which gives us $(\Omega_1+a)\cap\Omega_2\ne\emptyset$ and thus contradicts \eqref{setex}. To justify the converse implication in (i), suppose that $0\notin{\rm int}(\Omegamega_1-\Omegamega_2)$. Then for any $\varepsilon>0$ we get
$$
\Bbb B(0;\varepsilon)\cap\big(X\setminus(\Omega_1-\Omega_2)\big)\ne\emptyset,
$$
which tells us that there is $a\mbox{\rm int}\,n X$ such that $\|a\|<\varepsilon$ and $-a\mbox{\rm int}\,n\Omega_1-\Omega_2$, i.e., \eqref{setex} holds. It remains to show in (i) that the extremality of $\Omega_1,\Omega_2$ yields $({\rm int}\,\Omega_1)\cap\Omega_2=\emptyset$. Assuming the contrary, take $x\mbox{\rm int}\,n{\rm int}\,\Omega_1$ with $x\mbox{\rm int}\,n\Omega_2$ and find $\varepsilon>0$ such that $x-a\mbox{\rm int}\,n\Omega_1$ for any $a\mbox{\rm int}\,n X$ with $\|a\|<\varepsilon$. This clearly contradicts \eqref{setex} and thus completes the proof of (i).
Next we verify (ii). Consider the two convex sets $\Lambda_1:=\Omega_1-\Omega_2$ and $\Lambda_2:=\{0\}$ in $X$. By the extremality of $\Omega_1,\Omega_2$ we have due to (i) that $({\rm int}\,\Lambda_1)\cap\Lambda_2=\emptyset$, where $\mbox{\rm int}\,\Lambda_1\ne\emptyset$ by the assumption in (ii). The classical separation theorem applied to $\Lambda_1,\Lambda_2$ tells us that $\sup_{x\mbox{\rm int}\,n\Omega_1-\Omega_2}\langle x^*,x\rangle\le 0$, which is clearly equivalent to \eqref{sep}. Thus assertion (ii) is justified.
To prove the final assertion (iii), take $x^*\ne 0$ from \eqref{sep} and find $c\mbox{\rm int}\,n X$ such that $\langle x^*,c\rangle>0$. For any $\varepsilon>0$ we can select $a:=-c/k$ satisfying $\|a\|<\varepsilon$ when $k\mbox{\rm int}\,nI\!\!N$ is sufficiently large. Let as show that \eqref{setex} holds with this vector $a$. If it is not the case, then there exists $Hadamardat x\mbox{\rm int}\,n\Omega_2$ such that $Hadamardat x-a\mbox{\rm int}\,n\Omega_1$. By the separation property \eqref{sep} we have
$$
\langle x^*,Hadamardat x-a\rangle\le\sup_{x\mbox{\rm int}\,n\Omega_1}\langle x^*,x\rangle\le\mbox{\rm int}\,nf_{x\mbox{\rm int}\,n\Omega_2}\langle x^*,x\rangle\le\langle x^*,Hadamardat x\rangle,
$$
which gives us by the above construction of $a\mbox{\rm int}\,n X$ that
$$
\langle x^*,Hadamardat x\rangle-\langle x^*,a\rangle=\langle x^*,Hadamardat x\rangle+k\langle x^*,c\rangle\le\langle x^*,Hadamardat x\rangle,
$$
and therefore $\langle x^*,c\rangle\le 0$. It contradicts the choice of $c\mbox{\rm int}\,n X$ and hence justifies assertion (iii) while completing in this way the proof of the theorem. $
\square$\vspace*{-0.1in}
\begin{Corollary}{\bf (sufficient conditions for extremality of convex sets).}\langlebel{int-ext} Let $\Omega_1,\Omega_2$ be nonempty convex sets of $X$ satisfying the conditions $\mbox{\rm int}\,\Omegamega_1\ne\emptyset$ and $(\mbox{\rm int}\,\Omegamega_1)\cap\Omegamega_2=\emptyset$. Then the sets $\Omegamega_1$ and $\Omegamega_2$ form an extremal system. Furthermore, we have $\mbox{\rm int}(\Omegamega_1-\Omegamega_2)\ne\emptyset$.
\end{Corollary}\vspace*{-0.1in}
{\bf Proof.} It is well known that the assumptions imposed in the corollary ensure the separation property for convex sets. Thus the set extremality of $\Omega_1,\Omega_2$ follows from Theorem~\ref{extremal principle}(iii). To verify the last assertion of the corollary, take any $\bar{x}\mbox{\rm int}\,n{\rm int}\,\Omega_1$ and find $r>0$ such that ${\rm int}\,\Bbb B(\bar{x};r)\subset\Omegamega_1$. Then for any fixed point $x\mbox{\rm int}\,n\Omega_2$ we have
$$
V:={\rm int}\,\Bbb B(\bar{x};r)-x\subset\Omegamega_1-\Omegamega_2,
$$
and thus $\mbox{\rm int}(\Omegamega_1-\Omegamega_2)\ne\emptyset$ because $V$ is a nonempty open subset of $X$. $
\square$\vspace*{-0.1in}
\begin{Remark}{\bf (on the extremal principle).}\langlebel{ext-prin} {\rm Condition \eqref{ep} is known to hold, under the name of the (exact) {\em extremal principle}, for locally extremal points of nonconvex sets. In \cite[Theorem~2.22]{m-book1} it is derived for closed subsets of Asplund spaces with the replacement of \eqref{nor} by the basic/limiting normal cone of Mordukhovich, which reduces to \eqref{nor} for convex sets. Besides the Asplund space requirement, the aforementioned result of \cite{m-book1} imposes the {\em sequential normal compactness} (SNC) assumption on one of the sets $\Omega_1,\Omega_2$. This property is satisfied for convex sets under the interiority assumption of Corollary~\ref{int-ext}; see \cite[Proposition~1.25]{m-book1}. Furthermore, in the case of closed convex sets in Banach spaces the SNC property offers significant advantages for the validity of \eqref{ep} in comparison with the interiority condition due to the SNC characterization from \cite[Theorem~1.21]{m-book1}: a closed convex set $\Omega$ with nonempty relative interior (i.e., the interior of it with respect to its span) is SNC at every $\bar{x}\mbox{\rm int}\,n\Omega$ if and only if the closure of the span of $\Omega$ is of finite codimension. A similar characterization has been obtained in \cite[Theorem~2.5]{blm} for the more restrictive Borwein-Str\'ojwas' {\em compactly epi-Lipschitzian} (CEL) property \cite{BS} of closed convex sets in normed spaces. Note that the CEL and SNC properties may not agree even for closed convex cones in nonseparable Asplund spaces; see \cite{fm} for comprehensive results and examples.}
\end{Remark}\vspace*{-0.05in}
As established in Theorem~\ref{extremal principle}(ii), the set extremality in \eqref{setex} implies the separation property \eqref{sep} and its equivalent form \eqref{ep} whenever $\bar{x}\mbox{\rm int}\,n\Omega_1\cap\Omega_2$ under the {\em nonempty difference interior} ${\rm int}(\Omega_1-\Omega_2)\ne\emptyset$ for arbitrary convex sets $\Omega_1,\Omega_2$ in LCTV spaces. Could we relax this assumption? The next theorem shows that it can be done, for {\em closed} convex subsets of {\em Banach} spaces, in both {\em approximate} and {\em exact} forms of the {\em convex extremal principle}. Furthermore, the results obtained therein justify that both of these forms are {\em characterizations} of the convex set extremality under the SNC property of one of the sets involved without imposing any interiority assumption on them or their difference.
To proceed, recall first the definition of the SNC property used below for convex sets; compare it with a nonconvex counterpart from \cite[Definition~1.20]{m-book1}. A subset $\Omega\subset X$ of a Banach space is {\em SNC} at $\bar{x}\mbox{\rm int}\,n\Omega$ if for any sequence $\{(x_k,x^*_k)\}_{k\mbox{\rm int}\,nI\!\!N}\subset X\tauimes X^*$ we have
\begin{equation}\langlebel{snc}
\big[x^*_k\mbox{\rm int}\,n N(x_k;\Omega),\;x_k\mbox{\rm int}\,n\Omega,\;x_k\tauo\bar{x},\;x^*_k\stackrel{w^*}{\tauo}0\big]\Longrightarrow\|x^*_k\|\tauo 0\;\;\mbox{\rm as }\;k\tauo\mbox{\rm int}\,nfty,
\end{equation}
where the normal cone is taken from \eqref{nor}, and where the symbol $\stackrel{w^*}{\tauo}$ signifies the {\em sequential} convergence in the weak$^*$ topology of $X^*$. We have already mentioned in Remark~\ref{ext-prin} the explicit description of the SNC property for closed convex sets with nonempty relative interiors in Banach spaces given in \cite[Theorem~1.21]{m-book1}. Assertion (ii) of the next theorem employs SNC \eqref{snc} for furnishing the limiting procedure in general Banach spaces.\vspace*{-0.1in}
\begin{Theorem}{\bf(approximate and exact versions of the convex extremal principle in Banach spaces).}\langlebel{convex-ep} Let $\Omega_1$ and $\Omega_2$ be closed convex subsets of a Banach space $X$, and let $\bar{x}$ be any common point of $\Omega_1,\Omega_2$. Consider the following assertions:
{\bf (i)} The sets $\Omega_i$, $i=1,2$, form an extremal system in $X$.
{\bf (ii)} For each $\varepsilon>0$ we have:
\begin{eqnarray}\langlebel{ep1}
\exists\,x_i\mbox{\rm int}\,n\Bbb B(\bar{x};\varepsilon)\cap\Omega_i,\;\exists\,\;x^*_i\mbox{\rm int}\,n N(x_{i\varepsilon};\Omega_i)+\varepsilon\Bbb B^*\;\;\mbox{\rm with }\;x^*_1+x^*_2=0,\;\|x^*_1\|=\|x^*_2\|=1.
\end{eqnarray}
{\bf (iii)} The equivalent properties \eqref{sep} and \eqref{ep} are satisfied.
Then we always have the implication {\rm(i)}$\Longrightarrow${\rm(ii)}. Furthermore, all the properties in {\rm(i)}--{\rm(iii)} are equivalent if in addition
either $\Omega_1$ or $\Omega_2$ is $SNC$ at $\bar{x}$.
\end{Theorem}\vspace*{-0.1in}
{\bf Proof.} Let us begin with verifying (i)$\Longrightarrow$ (ii). It follows from the extremality condition that for any $\varepsilon>0$ there exists $a\mbox{\rm int}\,n X$ such that
\begin{equation*}
\|a\|\le\varepsilon^2\;\;\mbox{\rm and }\;(\Omegamega_1+a)\cap\Omegamega_2=\emptyset.
\end{equation*}
Define the convex, lower semicontinuous, and bounded from below function $f\mbox{\rm co}lon X^2\tauo\Bar{\R}$ by
\begin{equation}\langlebel{ext1}
f(x_1,x_2):=\|x_1-x_2+a\|+\delta\big((x_1,x_2);\Omega_1\tauimes\Omega_2\big),\quad(x_1,x_2)\mbox{\rm int}\,n X^2,
\end{equation}
via the indicator function of the closed set $\Omega_1\tauimes\Omega_2$. It follows from \eqref{setex} that $f(x_1,x_2)>0$ on $X\tauimes X$ and $f(\bar{x},\bar{x})=\|a\|\le\varepsilon^2$ for any $\bar{x}\mbox{\rm int}\,n\Omega_1\tauimes\Omega_2$. Applying to \eqref{ext1} the Ekeland variational principle (see, e.g., \cite[Theorem~2.26(i)]{m-book1}), we find a pair $(x_{1\varepsilon},x_{2\varepsilon})\mbox{\rm int}\,n\Omega_1\tauimes\Omega_2$ satisfying $\|x_{1\varepsilon}-\bar{x}\|\le\varepsilon$, $\|x_{2\varepsilon}-\bar{x}\|\le\varepsilon$, and
\begin{equation*}
f(x_{1\varepsilon},x_{2\varepsilon})\le f(x_1,x_2)+\varepsilon\big(\|x_1-x_{1\varepsilon}\|+\|x_2-x_{2\varepsilon}\|\big)\;\;\mbox{\rm for all }\;(x_1,x_2)\mbox{\rm int}\,n X^2.
\end{equation*}
The latter means that the function $\varphi(x_1,x_2):=f(x_1,x_2)+\varepsilon\big(\|x_1-x_{1\varepsilon}\|+\|x_2-x_{2\varepsilon}\|)$ attains its minimum on $X^2$ at $(x_{1\varepsilon},x_{2\varepsilon})$ with $\|x_{1\varepsilon}-x_{2\varepsilon}-a\|\ne 0$. Thus the generalized Fermat rule tells us that $0\mbox{\rm int}\,n\partial\varphi(x_{1\varepsilon},x_{2\varepsilon})$. Taking into account the summation structure of $f$ in \eqref{ext1}, we apply to its subdifferential the classical Moreau-Rockafellar theorem that allows us to find---by standard subdifferentiation of the norm and indicator functions---such dual elements $x^*_{i\varepsilon}\mbox{\rm int}\,n N(x_{i\varepsilon};\Omega_i)+\varepsilon\Bbb B^*$ for $i=1,2$ that all the conditions in \eqref{ep1} are satisfied. This justifies assertion (ii) of the theorem.
We verify next the validity of (ii)$\Longrightarrow$(iii) by furnishing the passage to the limit in \eqref{ep1} as $\varepsilon\downarrow 0$ with the help of the SNC property of, say, the set $\Omega_1$ at $\bar{x}$. Take a sequence $\varepsilon_k\downarrow 0$ as $k\tauo\mbox{\rm int}\,nfty$ and find by \eqref{ep1} the corresponding septuples $(x_{1k},x_{2k},x^*_k,x^*_{1k},x^*_{2k},e^*_{1k},e^*_{2k})$ so that $x_{1k}\tauo\bar{x}$, $x_{2k}\tauo\bar{x}$ as $k\tauo\mbox{\rm int}\,nfty$, and
\begin{equation}\langlebel{ep2}
x^*_k=x^*_{1k}+\varepsilon_ke^*_{1k},\;x^*_k=-x^*_{2k}+\varepsilon_k e^*_{2k},\;\|x^*_k\|=1,\;x^*_{ik}\mbox{\rm int}\,n N(x_{ik};\Omega_1),\;e^*_{ik}\mbox{\rm int}\,n\Bbb B^*
\end{equation}
for all $k\mbox{\rm int}\,nI\!\!N$ and $i=1,2$. The classical Banach–-Alaoglu theorem of functional analysis tells us that for any Banach space $X$ the sequence of triples $(x^*_k,e^*_{1k},e^*_{2k})$ contains a {\em subnet} converging to some $(x^*,e^*_1,e^*_2)\mbox{\rm int}\,n\Bbb B^*\tauimes\Bbb B^*\tauimes\Bbb B^*$ in the weak$^*$ topology of $X^*$. It follows from \eqref{ep2} and definition \eqref{nor} of the normal cone to convex sets that the corresponding subnets of $\{x^*_{1k},x^*_{2k})\}$ converge in the latter topology to some pair $(x^*_1,x^*_2)\mbox{\rm int}\,n X^*\tauimes X^*$ satisfying $x^*_1=-x^*_2=x^*$ and $x^*_i\mbox{\rm int}\,n N(\bar{x};\Omega_i)$ for $i=1,2$.
To justify (iii), it remains to show that we can always find $x^*\ne 0$ in this way provided that $\Omega_1$ is SNC at $\bar{x}$. Assuming the contrary, let us first check that $\{x^*_{1k}\}$ converges to zero in the weak$^*$ topology. If it is not the case, there is $z\mbox{\rm int}\,n X$ such that the numerical sequence $\{\langle x^*_{1k},z\rangle\}$ does not converge to zero. Fix $w\mbox{\rm int}\,n \Omega_1$ and for each $k\mbox{\rm int}\,nI\!\!N$ consider the set
\begin{equation}\langlebel{Vk}
V_k:=\big\{z^*\mbox{\rm int}\,n X^*\big|\;|\langle z^*,w-\bar{x}\rangle-\langle x^*_1,w-\bar{x}\rangle|<1/k,\;|\langle z^*,z\rangle-\langle x^*_1,z\rangle|<1/k\big\},
\end{equation}
which is a neighborhood of $x^*_1$ in the weak$^*$ topology of $X^*$. By extracting numerical subsequences in \eqref{Vk}, suppose without loss of generality that
\begin{equation*}
\langle x^*_{1k},w-\bar{x}\rangle\tauo\langle x^*_1,w-\bar{x}\rangle\;\;\mbox{\rm and }\;\langle x^*_{1k},z\rangle\tauo\langle x^*_1,z\rangle\;\;\mbox{\rm as }\;k\tauo\mbox{\rm int}\,nfty.
\end{equation*}
Remembering that $x^*_{1k}\mbox{\rm int}\,n N(x_{1k};\Omega_1)$ by \eqref{ep2} gives us the estimate
\begin{equation}\langlebel{xk}
\langle x^*_{1k},w-\bar{x}\rangle=\langle x^*_{1k},w-x_{1k}\rangle+\langle x^*_{1k},x_{1k}-\bar{x}\rangle\le\langle x^*_{1k},x_{1k}-\bar{x}\rangle,\quad k\mbox{\rm int}\,nI\!\!N.
\end{equation}
Note that $\langle x^*_{1k},w-\bar{x}\rangle\tauo\langle x^*_1,w-\bar{x}\rangle$ and $|\langle x^*_{1k},x_{1k}-\bar{x}\rangle|\le\|x^*_{1k}\|\cdot\|x_{1k}-\bar{x}\|\tauo 0$ as $k\tauo\mbox{\rm int}\,nfty$ by the boundedness of $\{x^*_{1k}\}$ in \eqref{ep2}. Passing now to the limit in \eqref{xk} tells us that $\langle x^*_1,w-\bar{x}\rangle\le 0$ and so $x^*_1\mbox{\rm int}\,n N(\bar{x};\Omega_1)$. It follows from \eqref{ep2} that $-x_1^*\mbox{\rm int}\,n N(\bar{x};\Omega_2)$ and thus $x_1^*\mbox{\rm int}\,n N(\bar{x};\Omega_1)\cap(-N(\bar{x};\Omega_2))=\{0\}$, which contradicts the imposed assumption on $\langle x^*_{1k}, z\rangle\tauo 0$. Therefore the sequence $\{x^*_{1k}\}$ converges to zero in the weak$^*$ topology of $X^*$, which implies its sequential convergence
$x^*_{1k}\xrightarrow{w^*}0$ as well. By the assumed SNC property of $\Omega_1$ at $\bar{x}$ we conclude that $x^*_{1k}\xrightarrow{\|\cdot\|}0$ while yielding $x^*_k \xrightarrow{\|\cdot\|}0$. This surely contradicts \eqref{ep2} and thus ends the proof of implication (ii)$\Longrightarrow$(iii).
To check finally the equivalence assertion in (iii), observe that the separation property \eqref{sep} ensures by Theorem~\ref{extremal principle}(iii) that the sets $\Omega_1,\Omega_2$ form an extremal system in $X$, and so we have conditions \eqref{ep1} in (i). Since implication (i)$\Longrightarrow$(ii) has been verified above, this readily justifies the claimed equivalences in (iii) and thus completes the proof of theorem. $
\square$
As an immediate consequence of the (convex) approximate extremal principle in Theorem~\ref{convex-ep}(ii), we obtain the celebrated Bishop-Phelps theorem for closed convex sets in general Banach spaces; see \cite[Theorem~3.18]{ph}. Recall that $\bar{x}\mbox{\rm int}\,n\Omega$ is a {\em support point} of $\Omega\subset X$ if there is $0\ne x^*\mbox{\rm int}\,n X^*$ such that the function $x\mapsto\langle x^*,x\rangle$ attaints its supremum on $\Omega$ at $\bar{x}$.\vspace*{-0.1in}
\begin{Corollary}{\bf (Bishop-Phelps theorem).}\langlebel{bp} Let $\Omega$ be a nonempty, closed, and convex subset of a Banach space $X$. Then the support points of $\Omega$ are dense on the boundary of $\Omega$.
\end{Corollary}\vspace*{-0.1in}
{\bf Proof.} It is obvious from \eqref{setex} and the definition of boundary points that for any boundary point $\bar{x}$ of $\Omega$, the sets $\Omega_1:=\{\bar{x}\}$ and $\Omega_2:=\Omega$ form an extremal system in $X$. Then the result follows from \eqref{ep1} and the normal cone structure in \eqref{nor}.$
\square$
Note that a geometric approach involving the approximate extremality conditions \eqref{ep1} at points nearby may be useful for applications to the so-called {\em sequential convex subdifferential calculus} initiated by Attouch-Baillon-Th\'era \cite{abt} and Thibault \cite{thib} in different frameworks and then developed in other publications. Likewise it can be applied as a geometric device of coderivative and conjugate calculus rules, which is our intention in the future research.\vspace*{-0.2in}
\section{Normal Cone Intersection Rule}
\setcounter{equation}{0}\vspace*{-0.1in}
In this section we employ the set extremality and the results of Theorem~\ref{extremal principle} to obtain the exact intersection rule for the normal cone \eqref{nor} under a new qualification condition.
The following theorem justifies a precise representation of the normal cone $N(\bar{x};\Omega_1\cap\Omega_2)$ via normals to each sets $\Omega_1$ and $\Omega_2$ under the new qualification condition \eqref{qc} depending on $\bar{x}$, which is weaker than the standard interiority condition in LCTV spaces. For convenience we refer to \eqref{qc} as to the {\em bounded extremality condition}.\vspace*{-0.1in}
\begin{Theorem}{\bf (intersection rule).}\langlebel{nir} Let $\Omegamega_1,\Omegamega_2\subset X$ be convex, and let $\bar{x}\mbox{\rm int}\,n\Omegamega_1\cap\Omegamega_2$. Suppose that there exists a bounded convex neighborhood $V$ of $\bar{x}$ such that
\begin{equation}\langlebel{qc}
0\mbox{\rm int}\,n\mbox{\rm int}\big(\Omegamega_1-(\Omegamega_2\cap V)\big).
\end{equation}
Then we have the normal cone intersection rule
\begin{equation}\langlebel{ni}
N(\bar{x};\Omegamega_1\cap\Omegamega_2)=N(\bar{x};\Omegamega_1)+N(\bar{x};\Omegamega_2).
\end{equation}
\end{Theorem}\vspace*{-0.1in}
{\bf Proof.} To verify \eqref{ni} under the qualification condition \eqref{qc}, denote $A:=\Omegamega_1$ and $B:=\Omegamega_2\cap V$ and observe that $0\mbox{\rm int}\,n\mbox{\rm int}(A-B)$ and $B$ is bounded. Fixing an arbitrary normal $x^*\mbox{\rm int}\,n N(\bar{x};A\cap B)$, we get by \eqref{nor} that $\langle x^*,x-\bar{x}\rangle\le 0$ for all $x\mbox{\rm int}\,n A\cap B$. Consider the sets
\begin{equation}\langlebel{theta}
\Thetaeta_1:= A\tauimes[0,\mbox{\rm int}\,nfty)\;\;\mbox{\rm and }\;\Thetaeta_2:=\big\{(x,\mu)\mbox{\rm int}\,n X\tauimes\Bbb R\big|\;x\mbox{\rm int}\,n B,\;\mu\le\langle x^*,x-\bar{x}\rangle\big\}.
\end{equation}
It follows from the constructions of $\Thetaeta_1$ and $\Thetaeta_2$ that for any $\alphapha>0$ we have
\begin{equation*}
\big(\Thetaeta_1+(0,\alphapha)\big)\cap\Thetaeta_2=\emptyset,
\end{equation*}
and thus these sets form an {\em extremal system} by Definition~\ref{ext-sys}. Employing Theorem~\ref{extremal principle}(i) tells us that $0\notin\mbox{\rm int}(\Thetaeta_1-\Thetaeta_2)$. To check next that $\mbox{\rm int}(\Thetaeta_1-\Thetaeta_2)\ne\emptyset$, take $r>0$ such that $U:=\Bbb B(0;r)\subset A-B$. The boundedness of the set $B$ allows us to choose $\bar{\langlembda}\mbox{\rm int}\,n\Bbb R$ satisfying
\begin{equation}\langlebel{lambda}
\bar{\langlembda}\ge\sup_{x\mbox{\rm int}\,n B}\langle-x^*,x-\bar{x}\rangle.
\end{equation}
Then we get ${\rm int}(\Thetaeta_1-\Thetaeta_2)\ne\emptyset$ by showing that $U\tauimes(\bar{\langlembda},\mbox{\rm int}\,nfty)\subset\Thetaeta_1-\Thetaeta_2$. To verify the latter, fix any $(x,\langlembda)\mbox{\rm int}\,n U\tauimes(\bar{\langlembda},\mbox{\rm int}\,nfty)$ for which we clearly have $x\mbox{\rm int}\,n U\subset A-B$ and $\langlembda>\bar{\langlembda}$, and so $x=w_1-w_2$ with some $w_1\mbox{\rm int}\,n A$ and $w_2\mbox{\rm int}\,n B$. This implies in turn the representation
$$
(x,\langlembda)=(w_1,\langlembda-\bar\langlembda)-(w_2,-\bar\langlembda).
$$
Further, it follows from $\langlembda-\bar\langlembda>0$ that $(w_1,\langlembda-\bar\langlembda)\mbox{\rm int}\,n\Thetaeta_1$, and we deduce from \eqref{theta} and \eqref{lambda} that $(w_2,-\bar\langlembda)\mbox{\rm int}\,n\Thetaeta_2$, which shows that $\mbox{\rm int}(\Thetaeta_1-\Thetaeta_2)\ne\emptyset$. Applying now Theorem~\ref{extremal principle}(ii) to the sets $\Thetaeta_1,\Thetaeta_2$ in \eqref{theta} gives us $y^*\mbox{\rm int}\,n X^*$ and $\gamma\mbox{\rm int}\,n\Bbb R$ such that $(y^*,\gamma)\ne(0,0)$ and
\begin{equation}\langlebel{convexseparation}
\langle y^*,x\rangle +\langlembda_1\gamma\le\langle y^*,y\rangle+\langlembda_2\gamma\;\;\mbox{\rm whenever }\;(x,\langlembda_1)\mbox{\rm int}\,n\Thetaeta_1,\;(y,\langlembda_2)\mbox{\rm int}\,n\Thetaeta_2.
\end{equation}
Using \eqref{convexseparation} with $(\bar{x},1)\mbox{\rm int}\,n\Thetaeta_1$ and $(\bar{x},0)\mbox{\rm int}\,n\Thetaeta_2$ yields $\gamma\le 0$. Supposing $\gamma=0$, we get
$$
\langle y^*,x\rangle\le\langle y^*,y\rangle\;\;\mbox{\rm for all }\;x\mbox{\rm int}\,n A,\;y\mbox{\rm int}\,n B.
$$
Since $U\subset A-B$, it readily produces $y^*=0$, a contradiction, which shows that $\gamma<0$. Employing next \eqref{convexseparation} with $(x,0)\mbox{\rm int}\,n\Thetaeta_1$ for $x\mbox{\rm int}\,n A$ and $(\bar{x},0)\mbox{\rm int}\,n\Thetaeta_2$ tells us that
\begin{equation*}
\langle y^*,x\rangle\le\langle y^*,\bar{x}\rangle\;\;\mbox{\rm for all }\;x\mbox{\rm int}\,n A,\;\;\mbox{\rm and so }\;y^*\mbox{\rm int}\,n N(\bar{x};A).
\end{equation*}
Using finally \eqref{convexseparation} with $(\bar{x},0)\mbox{\rm int}\,n\Thetaeta_1$ and $(y,\langle x^*,y-\bar{x}\rangle)\mbox{\rm int}\,n\Thetaeta_2$ for $y\mbox{\rm int}\,n B$ implies that
\begin{equation*}
\langle y^*,\bar{x}\rangle\le\langle y^*,y\rangle+\gamma\langle x^*,y-\bar{x}\rangle\;\;\mbox{\rm for all }\;y\mbox{\rm int}\,n B.
\end{equation*}
Dividing both sides of the obtained inequality by $\gamma<0$, we arrive at
\begin{equation*}
\langle x^*+y^*/\gamma,y-\bar{x}\rangle\le 0\;\;\mbox{\rm for all }\;y\mbox{\rm int}\,n B,
\end{equation*}
which verifies by \eqref{nor} the validity of the inclusions
$$
x^*\mbox{\rm int}\,n-y^*/\gamma+N(\bar{x};B)\subset N(\bar{x};A)+N(\bar{x};B)
$$
and thus shows that $N(\bar{x};A\cap B)\subset N(\bar{x};A)+N(\bar{x};B)$. The opposite inclusion therein is trivial, and so we get the equality $N(\bar{x};A\cap B)= N(\bar{x};A)+N(\bar{x};B)$. Since $N(\bar{x};A\cap B)=N(\bar{x};\Omegamega_1\cap\Omegamega_2)$ and $N(\bar{x};B)=N(\bar{x};\Omegamega_2)$, it justifies \eqref{ni} and completes the proof.
$
\square$\vspace*{-0.1in}
\begin{Remark}{\bf (comparing qualification conditions for the normal intersection formula).}\langlebel{qc-comp} {\rm We have the following useful observations:
{\bf (i)} It is easy to see that, if one of the sets $\Omega_1,\Omega_2$ is bounded, the introduced qualification condition \eqref{qc} reduces to the {\em difference interiority condition}
\begin{equation}\langlebel{dqc}
0\mbox{\rm int}\,n{\rm int}(\Omega_1-\Omega_2).
\end{equation}
Furthermore, \eqref{qc} surely holds under the validity of the {\em classical interiority condition} $\Omega_1\cap({\rm int}\,\Omega_2)\ne\emptyset$, which is the only condition previously known to us that ensures the validity of the intersection formula \eqref{ni} in the general LCTV (or even normed) space setting. Indeed, if the latter condition is satisfied, take $u\mbox{\rm int}\,n\Omegamega_1\cap({\rm int}\,\Omegamega_2)$ and $\gamma>0$ such that $u+\gamma\Bbb B\subset\Omegamega_2$. Then we choose $r>0$ with $u+\gamma\Bbb B\subset\Omegamega_2\cap\Bbb B(\bar{x};r)$. Thus $\gamma\Bbb B\subset\Omegamega_1-(\Omegamega_2\cap\Bbb B(\bar{x};r))$ and so $0\mbox{\rm int}\,n\mbox{\rm int}(\Omegamega_1-(\Omegamega_2\cap V))$, where $V:=\Bbb B(\bar{x};r)$.
As the following simple example shows, the bounded extremality condition \eqref{qc} may be weaker than the classical interiority condition even in $\Bbb R^2$. Indeed, consider the convex sets
$$
\Omega_1:=\Bbb R\tauimes[0,\mbox{\rm int}\,nfty)\;\;\mbox{\rm and }\;\Omega_2:=\{0\}\tauimes\Bbb R
$$
for which $\Omega_1\cap({\rm int}\,\Omega_2)=\emptyset$, while the conditions $0\mbox{\rm int}\,n{\rm int}(\Omega_1-\Omega_2)$ and \eqref{qc} hold.
{\bf (ii)} If $X$ is {\em Banach} and both sets $\Omega_1,\Omega_2$ are {\em closed} with $\mbox{\rm int}(\Omega_1-\Omega_2)\ne\emptyset$, the difference interiority condition \eqref{dqc} reduces to Rockafellar's {\em core qualification condition} $0\mbox{\rm int}\,n{\rm core}(\Omega_1-\Omega_2)$ introduced in \cite{r1}. This follows from the equivalence
\begin{equation}\langlebel{core-int}
\big[0\mbox{\rm int}\,n{\rm core}(\Omega_1-\Omega_2)\big]\Longleftrightarrow\big[0\mbox{\rm int}\,n{\rm int}(\Omega_1-\Omega_2)\big]
\end{equation}
valid in this case. Indeed, the implication ``$\Longleftarrow$" in \eqref{core-int} is obvious due to $\mbox{\rm int}\,\Omega\subset\mbox{\rm co}re\Omega$ for any set. To verify the opposite implication in \eqref{core-int}, recall that $\mbox{\rm int}\,\Omega=\mbox{\rm co}re\Omega$ for closed convex subsets of Banach spaces by \cite[Theorem~4.1.8]{BZ}. Using now the well-known fact that $\mbox{\rm int}\,\bar{v}erline\Omega=\mbox{\rm int}\,\Omega$ for convex sets with nonempty interiors yields
$$
0\mbox{\rm int}\,n\mbox{\rm core}\big(\bar{v}erline{\Omega_1-\Omega_2}\big)=\mbox{\rm int}\big(\bar{v}erline{\Omega_1-\Omega_2}\big)=\mbox{\rm int}\big(\Omega_1-\Omega_2\big).
$$
Note that the core qualification condition is superseded in the same setting by the requirement that $\Bbb R^+(\Omegamega_1-\Omegamega_2)\subset X$ is a closed subspace, which is known as the {\em Attouch-Br\'ezis regularity condition} established in \cite{AB} with the usage of convex duality and the fundamental Banach-Dieudonn\'e-Krein-\v Smulian theorem in general Banach spaces.}
\end{Remark}\vspace*{-0.1in}
The next proposition shows that the core condition $0\mbox{\rm int}\,n{\rm core}(\Omega_1-\Omega_2)$ implies the extremality one \eqref{qc} for closed subsets of reflexive Banach spaces {\em provided that} ${\rm int}(\Omega_1-\Omega_2)\ne\emptyset$. Thus the extremality approach of Theorem~\ref{nir} offers in this setting a simplified proof of the intersection formula in comparison with those known in the literature.\vspace*{-0.1in}
\begin{Proposition}{\bf (bounded extremality condition in reflexive spaces).}\langlebel{intersection rule reflexive} The qualification condition \eqref{qc} holds at any $\bar{x}\mbox{\rm int}\,n\Omegamega_1\cap\Omegamega_2$ if $X$ is a reflexive Banach space and $\Omegamega_1,\Omegamega_2\subset X$ are closed convex sets such that ${\rm int}(\Omega_1-\Omega_2)\ne\emptyset$ and $0\mbox{\rm int}\,n\mbox{\rm core}(\Omegamega_1-\Omegamega_2)$.
\end{Proposition}\vspace*{-0.1in}
{\bf Proof.} Fix any number $r>0$ and show that
\begin{equation}\langlebel{cl}
0\mbox{\rm int}\,n\mbox{\rm core}\big(\Omegamega_1\cap\Bbb B(\bar{x};r)-\Omegamega_2\cap\Bbb B(\bar{x};r)\big).
\end{equation}
Indeed, the assumption ${\rm int}(\Omega_1-\Omega_2)\ne\emptyset$ allows us to find $\gamma>0$ such that $\gamma\Bbb B\subset\Omegamega_1-\Omegamega_2$. For any $x\mbox{\rm int}\,n X$ denote $u:= \frac{\gamma}{\|x\|+1}x\mbox{\rm int}\,n\gamma\Bbb B$ and get $u=w_1-w_2$ with $w_i\mbox{\rm int}\,n\Omegamega_i$ for $i=1,2$. Hence there is a constant $\bar{\gamma}>0$ depending on $x$ and $r$ for which
$$
t\max\big\{\|w_1-\bar{x}\|,\|w_2-\bar{x}\|\big\}<r\;\mbox{ whenever }\;0<t<\bar{\gamma}.
$$
This readily justifies the relationships
\begin{equation*}
tu=tw_1-tw_2=\big(\bar{x}+t(w_1-\bar{x})\big)-\big(\bar{x}+t(w_2-\bar{x})\big)\mbox{\rm int}\,n\big(\Omegamega_1\cap\Bbb B(\bar{x};r)\big)-\big(\Omegamega_2\cap\Bbb B(\bar{x};r)\big)
\end{equation*}
for all $0<t<\bar{\gamma}$ and thus establishes the claimed inclusion \eqref{cl} by the core definition \eqref{core-def}.
Since $X$ is reflexive and the sets $\Omegamega_i\cap\Bbb B(\bar{x};r)$, $i=1,2$, are closed and bounded in $X$, they are weakly sequentially compact in this space. This implies that their difference $\big(\Omegamega_1\cap\Bbb B(\bar{x};r)\big)-\big(\Omegamega_2\cap\Bbb B(\bar{x};r)\big)$ is closed in $X$. Then we get by \cite[Theorem~4.1.8]{BZ} that
$$
0\mbox{\rm int}\,n\mbox{\rm core}\big(\Omegamega_1\cap\Bbb B(\bar{x};r)-\Omegamega_2\cap\Bbb B(\bar{x};r)\big)=\mbox{\rm int}\big(\Omegamega_1\cap\Bbb B(\bar{x};r)-\Omegamega_2\cap\Bbb B(\bar{x};r)\big)\subset{\rm int}\big(\Omegamega_1-\Omegamega_2\cap\Bbb B(\bar{x};r)\big),
$$
which verifies \eqref{qc} and thus completes the proof of the proposition. $
\square$\vspace*{-0.2in}
\section{Support Functions for Set Intersections}
\setcounter{equation}{0}\vspace*{-0.1in}
In this section we derive a precise representation of support functions for convex set intersections via the infimal convolution of the support functions to the intersection components under the {\em difference interiority condition} \eqref{dqc}. This result under \eqref{dqc} seems to be new in the literature on convex analysis in LCTV (and also in normed) spaces; see Remark~\ref{rem-supp} for more discussions. Furthermore, we present a novel geometric device for results of this type by employing set extremality and the normal intersection rule obtained above.
Recall that the {\em support function} of a nonempty set $\Omega\subset X$ is given by
\begin{equation}\langlebel{supp}
\sigma_\Omega(x)(x^*):=\sup\{\langle x^*,x\rangle\big|\;x\mbox{\rm int}\,n\Omega\big\},\quad x^*\mbox{\rm int}\,n X^*.
\end{equation}
The {\em infimal convolution} of two functions $f,g\mbox{\rm co}lon X\tauo\Bar{\R}$ is
\begin{equation}\langlebel{ic}
(f\oplus g)(x):=\mbox{\rm int}\,nf\big\{f(x_1)+g(x_2)\big|\;x_1+x_2=x\big\}=\mbox{\rm int}\,nf\big\{f(u)+g(x-u)\big|\;u\mbox{\rm int}\,n X\big\}.
\end{equation}\vspace*{-0.35in}
\begin{Theorem}{\bf(support functions for set intersections via infimal convolutions).}\langlebel{sigma intersection rule} Let the sets $\Omegamega_1,\Omegamega_2\subset X$ be nonempty and convex, and let and one of them be bounded. Then the difference interiority condition \eqref{dqc} ensures the representation
\begin{equation}\langlebel{convol}
(\sigma_{\Omegamega_1\cap\Omegamega_2})(x^*)=(\sigma_{\Omegamega_1}\oplus\sigma_{\Omegamega_2})(x^*)\;\;\mbox{\rm for all }\;x^*\mbox{\rm int}\,n X^*.
\end{equation}
Moreover, for any $x^*\mbox{\rm int}\,n\mbox{\rm dom}\,(\sigma_{\Omegamega_1\cap\Omegamega_2})$ there are $x^*_1,x^*_2\mbox{\rm int}\,n X^*$ such that $x^*=x^*_1+x^*_2$ and
\begin{equation}\langlebel{convol1}
(\sigma_{\Omegamega_1\cap\Omegamega_2})(x^*)=\sigma_{\Omegamega_1}(x^*_1)+\sigma_{\Omegamega_2}(x^*_2).
\end{equation}
\end{Theorem}\vspace*{-0.1in}
{\bf Proof.} First we check that the inequality ``$\le$" in \eqref{convol} holds in the general setting. Fix any $x^*\mbox{\rm int}\,n X^*$ and pick $x^*_1,x^*_2\mbox{\rm int}\,n X^*$ such that $x^*=x^*_1+x^*_2$. Then it follows from \eqref{supp} that
\begin{equation*}
\langle x^*,x\rangle=\langle x^*_1,x\rangle +\langle x^*_2,x\rangle\le\sigma_{\Omegamega_1}(x^*_1)+\sigma_{\Omegamega_2}(x^*_2)\;\;\mbox{\rm whenever }\;x\mbox{\rm int}\,n\Omegamega_1\cap\Omegamega_2.
\end{equation*}
Taking the infimum on the right-hand side above with respect to all $x^*_1,x^*_2\mbox{\rm int}\,n X^*$ satisfying $x^*_1+x^*_2=x^*$ gives us by definition \eqref{ic} of the infimal convolution that
\begin{equation*}
\langle x^*,x\rangle\le(\sigma_{\Omegamega_1}\oplus\sigma_{\Omegamega_2})(x^*).
\end{equation*}
This verifies and the inequality ``$\le$" in \eqref{convol} by taking the supremum on the left-hand side therein with respect to $x\mbox{\rm int}\,n\Omegamega_1\cap\Omegamega_2$.
To justify further the opposite inequality in \eqref{convol} under the validity of \eqref{dqc}, suppose that $\Omegamega_2$ is bounded. It suffices to consider the case where $x^*\mbox{\rm int}\,n\mbox{\rm dom}\,(\sigma_{\Omegamega_1\cap\Omegamega_2})$ and prove the inequality ``$\le$" in \eqref{convol1}; then the one in \eqref{convol} and both statements of the theorem follow.
To proceed, denote $\alphapha:=(\sigma_{\Omegamega_1\cap\Omegamega_2})(x^*)\mbox{\rm int}\,n\Bbb R$, for which we clearly have $\langlengle x^*,x\ranglengle-\alphapha\le 0$ whenever $x\mbox{\rm int}\,n\Omegamega_1\cap\Omegamega_2$, and then construct the two nonempty convex subsets of $X\tauimes\Bbb R$ by
\begin{equation}\langlebel{Theta}
\Thetaeta_1:=\Omegamega_1\tauimes[0,\mbox{\rm int}\,nfty)\;\;\mbox{\rm and }\;\Thetaeta_2:=\big\{(x,\langlembda)\mbox{\rm int}\,n X\tauimes\Bbb R\big|\;x\mbox{\rm int}\,n\Omegamega_2,\;\langlembda\le\langlengle x^*,x\ranglengle-\alphapha\big\}.
\end{equation}
Observe that the sets $\Theta_1,\Theta_2$ form an {\em extremal system}. Indeed, it follows from the choice of $\alpha$ and the construction in \eqref{Theta} that
for any $\gamma>0$ we have
\begin{equation*}
\big(\Thetaeta_1+(0,\gamma)\big)\cap\Thetaeta_2=\emptyset.
\end{equation*}
Then Theorem~\ref{extremal principle}(i) tells us that $0\notin\mbox{\rm int}(\Thetaeta_1-\Thetaeta_2)$. Arguing similarly to the proof of Theorem~\ref{nir}, we see that the condition $\mbox{\rm int}(\Thetaeta_1-\Thetaeta_2)\ne\emptyset$ holds for the sets in \eqref{Theta}. Thus Theorem~\ref{extremal principle}(ii) allows us to find a pair
$(y^*,\beta)\ne(0,0)$ such that
\begin{equation}\langlebel{sep-conv}
\langle y^*, x\rangle+\langlembda_1\beta\le\langle y^*,y\rangle+\langlembda_2\beta\;\;\mbox{\rm whenever }\;(x,\langlembda_1)\mbox{\rm int}\,n\Thetaeta_1,\;(y,\langlembda_2)\mbox{\rm int}\,n\Thetaeta_2.
\end{equation}
Choosing $(\bar{x},1)\mbox{\rm int}\,n\Thetaeta_1$ and $(\bar{x},0)\mbox{\rm int}\,n\Thetaeta_2$ in \eqref{sep-conv} shows that $\beta\le 0$. If $\beta=0$, then
\begin{equation*}
\langle y^*,x\rangle\le\langle y^*,y\rangle\;\;\mbox{\rm for all }\;x\mbox{\rm int}\,n\Omegamega_1,\;y\mbox{\rm int}\,n\Omegamega_2.
\end{equation*}
By ${\rm int}(\Omegamega_1-\Omegamega_2)\ne\emptyset$ this yields $y^*=0$, a contradiction justifying the negativity of $\beta$ in \eqref{sep-conv}. Take now $(x,0)\mbox{\rm int}\,n\Thetaeta_1$ and $(y,\langlengle x^*,y\ranglengle-\alphapha)\mbox{\rm int}\,n\Thetaeta_2$ in \eqref{sep-conv} and then get
\begin{equation*}
\langle y^*,x\rangle\le\langle y^*,y\rangle+\beta(\langle x^*,y\rangle-\alphapha),
\end{equation*}
which can be equivalently rewritten (due to $\beta<0$) as
\begin{equation*}
\alphapha\ge\big\langle y^*/\beta+x^*,y\big\rangle+\big\langle-y^*/\beta,x\big\rangle\;\;\mbox{\rm for all }\;x\mbox{\rm int}\,n\Omegamega_1,\;y\mbox{\rm int}\,n\Omegamega_2.
\end{equation*}
Denoting $x^*_1:=y^*/\beta+x^*$ and $x^*_2:=-y^*/\beta$, we have $x^*_1+x^*_2=x^*$ and $\langlengle x^*_1,x\ranglengle +\langlengle x^*_2,y\ranglengle\le\alphapha$ for all
$x\mbox{\rm int}\,n\Omegamega_1$ and $y\mbox{\rm int}\,n\Omegamega_2$. This shows that
\begin{equation*}
\sigma_{\Omegamega_1}(x^*_1)+\sigma_{\Omegamega_2}(x^*_2)\le\alphapha=\sigma_{\Omegamega_1\cap\Omegamega_2}(x^*)
\end{equation*}
and thus completes the proof of the theorem.$
\square$\vspace*{-0.1in}
\begin{Remark}{\bf(comparison with Fenchel duality).}\langlebel{rem-supp}
{\rm Since the qualification condition \eqref{qc} used in Theorem~\ref{nir} is equivalent to \eqref{dqc} employed in Theorem~\ref{sigma intersection rule} when one of the sets $\Omega_1,\Omega_2$ is bounded, all the comments given in Remark~\ref{qc-comp} are applied here. On the other hand, there is a remarkable feature of the calculus rules for support functions presented in Theorem~\ref{sigma intersection rule}, which does not have analogs in the setting of Theorem~\ref{nir} and should be specially commented. Namely, the support function \eqref{supp} is the {\em Fenchel conjugate}
\begin{equation*}
f^*(x^*):=\sup\big\{\langle x^*,x\rangle-f(x)\big|\;x\mbox{\rm int}\,n X\big\},\quad x^*\mbox{\rm int}\,n X^*,
\end{equation*}
of the indicator function $f(x):=\delta(x;\Omega)$ of a given set $\Omega\subset X$, and hence a well-developed {\em conjugate calculus} can be applied to establish representations \eqref{convol} and \eqref{convol1}; see, e.g., the books \cite{Ra,r1,s,z} and the references therein. However, it seems to us that such an approach from Fenchel duality misses the specific results of Theorem~\ref{sigma intersection rule} derived for the support function under the qualification condition \eqref{dqc} in general LCTV spaces. Observe also that, in contrast to analytical schemes usually applied to deriving conjugate calculus and then deducing results of the type of Theorems~\ref{nir} and \ref{sigma intersection rule} from them, we develop here a {\em geometric approach} in the other direction based on {\em set extremality}.}
\end{Remark}
\small
\end{document} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.